Hi… I am well aware that this diff view is very suboptimal. It will be fixed when the refactored server comes along!
Remove the mess
/hookc/hookc /git2d/git2d /static /templates /LICENSE* /forged
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // Package embed provides embedded filesystems created in build-time. package embed import "embed" // Source contains the licenses collected at build time. // It is intended to be served to the user. // //go:embed LICENSE* var Source embed.FS // Resources contains the templates and static files used by the web interface, // as well as the git backend daemon and the hookc helper. // //go:embed forged/templates/* forged/static/* //go:embed hookc/hookc git2d/git2d var Resources embed.FS
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package oldgit
import (
"bytes"
"fmt"
"strings"
"time"
"github.com/go-git/go-git/v5/plumbing/object"
)
// FmtCommitPatch formats a commit object as if it was returned by
// git-format-patch.
func FmtCommitPatch(commit *object.Commit) (final string, err error) {
var patch *object.Patch
var buf bytes.Buffer
var author object.Signature
var date string
var commitTitle, commitDetails string
if _, patch, err = CommitToPatch(commit); err != nil {
return "", err
}
author = commit.Author
date = author.When.Format(time.RFC1123Z)
commitTitle, commitDetails, _ = strings.Cut(commit.Message, "\n")
// This date is hardcoded in Git.
fmt.Fprintf(&buf, "From %s Mon Sep 17 00:00:00 2001\n", commit.Hash)
fmt.Fprintf(&buf, "From: %s <%s>\n", author.Name, author.Email)
fmt.Fprintf(&buf, "Date: %s\n", date)
fmt.Fprintf(&buf, "Subject: [PATCH] %s\n\n", commitTitle)
if commitDetails != "" {
commitDetails1, commitDetails2, _ := strings.Cut(commitDetails, "\n")
if strings.TrimSpace(commitDetails1) == "" {
commitDetails = commitDetails2
}
buf.WriteString(commitDetails)
buf.WriteString("\n")
}
buf.WriteString("---\n")
fmt.Fprint(&buf, patch.Stats().String())
fmt.Fprintln(&buf)
buf.WriteString(patch.String())
fmt.Fprintf(&buf, "\n-- \n2.48.1\n")
return buf.String(), nil
}
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // Package oldgit provides deprecated functions that depend on go-git. package oldgit
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package oldgit
import (
"errors"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
)
// CommitToPatch creates an [object.Patch] from the first parent of a given
// [object.Commit].
//
// TODO: This function should be deprecated as it only diffs with the first
// parent and does not correctly handle merge commits.
func CommitToPatch(commit *object.Commit) (parentCommitHash plumbing.Hash, patch *object.Patch, err error) {
var parentCommit *object.Commit
var commitTree *object.Tree
parentCommit, err = commit.Parent(0)
switch {
case errors.Is(err, object.ErrParentNotFound):
if commitTree, err = commit.Tree(); err != nil {
return
}
if patch, err = NullTree.Patch(commitTree); err != nil {
return
}
case err != nil:
return
default:
parentCommitHash = parentCommit.Hash
if patch, err = parentCommit.Patch(commit); err != nil {
return
}
}
return
}
// NullTree is a tree object that is empty and has no hash.
var NullTree object.Tree //nolint:gochecknoglobals
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package render
import (
"bytes"
"html/template"
chromaHTML "github.com/alecthomas/chroma/v2/formatters/html"
chromaLexers "github.com/alecthomas/chroma/v2/lexers"
chromaStyles "github.com/alecthomas/chroma/v2/styles"
)
// Highlight returns HTML with syntax highlighting for the given file content,
// using Chroma. The lexer is selected based on the filename.
// If tokenization or formatting fails, a fallback <pre> block is returned with the error.
func Highlight(filename, content string) template.HTML {
lexer := chromaLexers.Match(filename)
if lexer == nil {
lexer = chromaLexers.Fallback
}
iterator, err := lexer.Tokenise(nil, content)
if err != nil {
return template.HTML("<pre>Error tokenizing file: " + err.Error() + "</pre>") //#nosec G203`
}
var buf bytes.Buffer
style := chromaStyles.Get("autumn")
formatter := chromaHTML.New(
chromaHTML.WithClasses(true),
chromaHTML.TabWidth(8),
)
if err := formatter.Format(&buf, style, iterator); err != nil {
return template.HTML("<pre>Error formatting file: " + err.Error() + "</pre>") //#nosec G203
}
return template.HTML(buf.Bytes()) //#nosec G203
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package render
import (
"html"
"html/template"
)
// EscapeHTML just escapes a string and wraps it in [template.HTML].
func EscapeHTML(s string) template.HTML {
return template.HTML(html.EscapeString(s)) //#nosec G203
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package render
import (
"bytes"
"html"
"html/template"
"strings"
"github.com/microcosm-cc/bluemonday"
"github.com/yuin/goldmark"
"github.com/yuin/goldmark/extension"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
var markdownConverter = goldmark.New(goldmark.WithExtensions(extension.GFM)) //nolint:gochecknoglobals
// renderReadme renders and sanitizes README content from a byte slice and filename.
func Readme(data []byte, filename string) (string, template.HTML) {
switch strings.ToLower(filename) {
case "readme":
return "README", template.HTML("<pre>" + html.EscapeString(misc.BytesToString(data)) + "</pre>") //#nosec G203
case "readme.md":
var buf bytes.Buffer
if err := markdownConverter.Convert(data, &buf); err != nil {
return "Error fetching README", EscapeHTML("Unable to render README: " + err.Error())
}
return "README.md", template.HTML(bluemonday.UGCPolicy().SanitizeBytes(buf.Bytes())) //#nosec G203
default:
return filename, template.HTML("<pre>" + html.EscapeString(misc.BytesToString(data)) + "</pre>") //#nosec G203
}
}
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // Package render provides functions to render code and READMEs. package render
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
// getRepoInfo returns the filesystem path and direct access permission for a
// given repo and a provided ssh public key.
//
// TODO: Revamp.
func (s *Server) getRepoInfo(ctx context.Context, groupPath []string, repoName, sshPubkey string) (repoID int, fsPath string, access bool, contribReq, userType string, userID int, err error) {
err = s.database.QueryRow(ctx, `
WITH RECURSIVE group_path_cte AS (
-- Start: match the first name in the path where parent_group IS NULL
SELECT
id,
parent_group,
name,
1 AS depth
FROM groups
WHERE name = ($1::text[])[1]
AND parent_group IS NULL
UNION ALL
-- Recurse: join next segment of the path
SELECT
g.id,
g.parent_group,
g.name,
group_path_cte.depth + 1
FROM groups g
JOIN group_path_cte ON g.parent_group = group_path_cte.id
WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
AND group_path_cte.depth + 1 <= cardinality($1::text[])
)
SELECT
r.id,
r.filesystem_path,
CASE WHEN ugr.user_id IS NOT NULL THEN TRUE ELSE FALSE END AS has_role_in_group,
r.contrib_requirements,
COALESCE(u.type, ''),
COALESCE(u.id, 0)
FROM group_path_cte g
JOIN repos r ON r.group_id = g.id
LEFT JOIN ssh_public_keys s ON s.key_string = $3
LEFT JOIN users u ON u.id = s.user_id
LEFT JOIN user_group_roles ugr ON ugr.group_id = g.id AND ugr.user_id = u.id
WHERE g.depth = cardinality($1::text[])
AND r.name = $2
`, pgtype.FlatArray[string](groupPath), repoName, sshPubkey,
).Scan(&repoID, &fsPath, &access, &contribReq, &userType, &userID)
return
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"bufio"
"errors"
"log/slog"
"os"
"go.lindenii.runxiyu.org/forge/forged/internal/database"
"go.lindenii.runxiyu.org/forge/forged/internal/irc"
"go.lindenii.runxiyu.org/forge/forged/internal/scfg"
)
type Config struct {
HTTP struct {
Net string `scfg:"net"`
Addr string `scfg:"addr"`
CookieExpiry int `scfg:"cookie_expiry"`
Root string `scfg:"root"`
ReadTimeout uint32 `scfg:"read_timeout"`
WriteTimeout uint32 `scfg:"write_timeout"`
IdleTimeout uint32 `scfg:"idle_timeout"`
ReverseProxy bool `scfg:"reverse_proxy"`
} `scfg:"http"`
Hooks struct {
Socket string `scfg:"socket"`
Execs string `scfg:"execs"`
} `scfg:"hooks"`
LMTP struct {
Socket string `scfg:"socket"`
Domain string `scfg:"domain"`
MaxSize int64 `scfg:"max_size"`
WriteTimeout uint32 `scfg:"write_timeout"`
ReadTimeout uint32 `scfg:"read_timeout"`
} `scfg:"lmtp"`
Git struct {
RepoDir string `scfg:"repo_dir"`
Socket string `scfg:"socket"`
DaemonPath string `scfg:"daemon_path"`
} `scfg:"git"`
SSH struct {
Net string `scfg:"net"`
Addr string `scfg:"addr"`
Key string `scfg:"key"`
Root string `scfg:"root"`
} `scfg:"ssh"`
IRC irc.Config `scfg:"irc"`
General struct {
Title string `scfg:"title"`
} `scfg:"general"`
DB struct {
Type string `scfg:"type"`
Conn string `scfg:"conn"`
} `scfg:"db"`
Pprof struct {
Net string `scfg:"net"`
Addr string `scfg:"addr"`
} `scfg:"pprof"`
}
// LoadConfig loads a configuration file from the specified path and unmarshals
// it to the global [config] struct. This may race with concurrent reads from
// [config]; additional synchronization is necessary if the configuration is to
// be made reloadable.
func (s *Server) loadConfig(path string) (err error) {
var configFile *os.File
if configFile, err = os.Open(path); err != nil {
return err
}
defer configFile.Close()
decoder := scfg.NewDecoder(bufio.NewReader(configFile))
if err = decoder.Decode(&s.config); err != nil {
return err
}
for _, u := range decoder.UnknownDirectives() {
slog.Warn("unknown configuration directive", "directive", u)
}
if s.config.DB.Type != "postgres" {
return errors.New("unsupported database type")
}
if s.database, err = database.Open(s.config.DB.Conn); err != nil {
return err
}
s.globalData["forge_title"] = s.config.General.Title
return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"context"
"github.com/jackc/pgx/v5"
)
// TODO: All database handling logic in all request handlers must be revamped.
// We must ensure that each request has all logic in one transaction (subject
// to exceptions if appropriate) so they get a consistent view of the database
// at a single point. A failure to do so may cause things as serious as
// privilege escalation.
// queryNameDesc is a helper function that executes a query and returns a
// list of nameDesc results. The query must return two string arguments, i.e. a
// name and a description.
func (s *Server) queryNameDesc(ctx context.Context, query string, args ...any) (result []nameDesc, err error) {
var rows pgx.Rows
if rows, err = s.database.Query(ctx, query, args...); err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var name, description string
if err = rows.Scan(&name, &description); err != nil {
return nil, err
}
result = append(result, nameDesc{name, description})
}
return result, rows.Err()
}
// nameDesc holds a name and a description.
type nameDesc struct {
Name string
Description string
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"bufio"
"context"
"errors"
"io"
"net/http"
"net/url"
"strings"
"github.com/jackc/pgx/v5"
)
// fedauth checks whether a user's SSH public key matches the remote username
// they claim to have on the service. If so, the association is recorded.
func (s *Server) fedauth(ctx context.Context, userID int, service, remoteUsername, pubkey string) (bool, error) {
var err error
matched := false
usernameEscaped := url.PathEscape(remoteUsername)
var req *http.Request
switch service {
// TODO: Services should be configurable by the instance administrator
// and should not be hardcoded in the source code.
case "sr.ht":
req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://meta.sr.ht/~"+usernameEscaped+".keys", nil)
case "github":
req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://github.com/"+usernameEscaped+".keys", nil)
case "codeberg":
req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://codeberg.org/"+usernameEscaped+".keys", nil)
case "tangled":
req, err = http.NewRequestWithContext(ctx, http.MethodGet, "https://tangled.sh/keys/"+usernameEscaped, nil)
// TODO: Don't rely on one webview
default:
return false, errors.New("unknown federated service")
}
if err != nil {
return false, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false, err
}
defer func() {
_ = resp.Body.Close()
}()
buf := bufio.NewReader(resp.Body)
for {
line, err := buf.ReadString('\n')
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return false, err
}
lineSplit := strings.Split(line, " ")
if len(lineSplit) < 2 {
continue
}
line = strings.Join(lineSplit[:2], " ")
if line == pubkey {
matched = true
break
}
}
if !matched {
return false, nil
}
var txn pgx.Tx
if txn, err = s.database.Begin(ctx); err != nil {
return false, err
}
defer func() {
_ = txn.Rollback(ctx)
}()
if _, err = txn.Exec(ctx, `UPDATE users SET type = 'federated' WHERE id = $1 AND type = 'pubkey_only'`, userID); err != nil {
return false, err
}
if _, err = txn.Exec(ctx, `INSERT INTO federated_identities (user_id, service, remote_username) VALUES ($1, $2, $3)`, userID, service, remoteUsername); err != nil {
return false, err
}
if err = txn.Commit(ctx); err != nil {
return false, err
}
return true, nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
//
//go:build linux
package unsorted
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/jackc/pgx/v5"
"go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
var (
errGetFD = errors.New("unable to get file descriptor")
errGetUcred = errors.New("failed getsockopt")
)
// hooksHandler handles a connection from hookc via the
// unix socket.
func (s *Server) hooksHandler(conn net.Conn) {
var ctx context.Context
var cancel context.CancelFunc
var ucred *syscall.Ucred
var err error
var cookie []byte
var packPass packPass
var sshStderr io.Writer
var hookRet byte
defer conn.Close()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
// There aren't reasonable cases where someone would run this as
// another user.
if ucred, err = getUcred(conn); err != nil {
if _, err = conn.Write([]byte{1}); err != nil {
return
}
writeRedError(conn, "\nUnable to get peer credentials: %v", err)
return
}
uint32uid := uint32(os.Getuid()) //#nosec G115
if ucred.Uid != uint32uid {
if _, err = conn.Write([]byte{1}); err != nil {
return
}
writeRedError(conn, "\nUID mismatch")
return
}
cookie = make([]byte, 64)
if _, err = conn.Read(cookie); err != nil {
if _, err = conn.Write([]byte{1}); err != nil {
return
}
writeRedError(conn, "\nFailed to read cookie: %v", err)
return
}
{
var ok bool
packPass, ok = s.packPasses.Load(misc.BytesToString(cookie))
if !ok {
if _, err = conn.Write([]byte{1}); err != nil {
return
}
writeRedError(conn, "\nInvalid handler cookie")
return
}
}
sshStderr = packPass.session.Stderr()
_, _ = sshStderr.Write([]byte{'\n'})
hookRet = func() byte {
var argc64 uint64
if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil {
writeRedError(sshStderr, "Failed to read argc: %v", err)
return 1
}
var args []string
for range argc64 {
var arg bytes.Buffer
for {
nextByte := make([]byte, 1)
n, err := conn.Read(nextByte)
if err != nil || n != 1 {
writeRedError(sshStderr, "Failed to read arg: %v", err)
return 1
}
if nextByte[0] == 0 {
break
}
arg.WriteByte(nextByte[0])
}
args = append(args, arg.String())
}
gitEnv := make(map[string]string)
for {
var envLine bytes.Buffer
for {
nextByte := make([]byte, 1)
n, err := conn.Read(nextByte)
if err != nil || n != 1 {
writeRedError(sshStderr, "Failed to read environment variable: %v", err)
return 1
}
if nextByte[0] == 0 {
break
}
envLine.WriteByte(nextByte[0])
}
if envLine.Len() == 0 {
break
}
kv := envLine.String()
parts := strings.SplitN(kv, "=", 2)
if len(parts) < 2 {
writeRedError(sshStderr, "Invalid environment variable line: %v", kv)
return 1
}
gitEnv[parts[0]] = parts[1]
}
var stdin bytes.Buffer
if _, err = io.Copy(&stdin, conn); err != nil {
writeRedError(conn, "Failed to read to the stdin buffer: %v", err)
}
switch filepath.Base(args[0]) {
case "pre-receive":
if packPass.directAccess {
return 0
}
allOK := true
for {
var line, oldOID, rest, newIOID, refName string
var found bool
var oldHash, newHash plumbing.Hash
var oldCommit, newCommit *object.Commit
var pushOptCount int
pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"])
if err != nil {
writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err)
return 1
}
// TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface?
// Also it'd be nice to be able to combine users or whatever
if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" {
if pushOptCount == 0 {
writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
return 1
}
for pushOptIndex := range pushOptCount {
pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)]
if !ok {
writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex)
return 1
}
if strings.HasPrefix(pushOpt, "fedid=") {
fedUserID := strings.TrimPrefix(pushOpt, "fedid=")
service, username, found := strings.Cut(fedUserID, ":")
if !found {
writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID)
return 1
}
ok, err := s.fedauth(ctx, packPass.userID, service, username, packPass.pubkey)
if err != nil {
writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err)
return 1
}
if !ok {
writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID)
return 1
}
break
}
if pushOptIndex == pushOptCount-1 {
writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
return 1
}
}
}
line, err = stdin.ReadString('\n')
if errors.Is(err, io.EOF) {
break
} else if err != nil {
writeRedError(sshStderr, "Failed to read pre-receive line: %v", err)
return 1
}
line = line[:len(line)-1]
oldOID, rest, found = strings.Cut(line, " ")
if !found {
writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
return 1
}
newIOID, refName, found = strings.Cut(rest, " ")
if !found {
writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
return 1
}
if strings.HasPrefix(refName, "refs/heads/contrib/") {
if allZero(oldOID) { // New branch
fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
var newMRLocalID int
if packPass.userID != 0 {
err = s.database.QueryRow(ctx,
"INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id",
packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"),
).Scan(&newMRLocalID)
} else {
err = s.database.QueryRow(ctx,
"INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id",
packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"),
).Scan(&newMRLocalID)
}
if err != nil {
writeRedError(sshStderr, "Error creating merge request: %v", err)
return 1
}
mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", s.genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID)
fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset)
s.ircBot.Send("PRIVMSG #chat :New merge request at " + mergeRequestWebURL)
} else { // Existing contrib branch
var existingMRUser int
var isAncestor bool
err = s.database.QueryRow(ctx,
"SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2",
strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID,
).Scan(&existingMRUser)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err)
} else {
writeRedError(sshStderr, "Error querying for existing merge request: %v", err)
}
return 1
}
if existingMRUser == 0 {
allOK = false
fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)")
continue
}
if existingMRUser != packPass.userID {
allOK = false
fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)")
continue
}
oldHash = plumbing.NewHash(oldOID)
if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil {
writeRedError(sshStderr, "Daemon failed to get old commit: %v", err)
return 1
}
// Potential BUG: I'm not sure if new_commit is guaranteed to be
// detectable as they haven't been merged into the main repo's
// objects yet. But it seems to work, and I don't think there's
// any reason for this to only work intermitently.
newHash = plumbing.NewHash(newIOID)
if newCommit, err = packPass.repo.CommitObject(newHash); err != nil {
writeRedError(sshStderr, "Daemon failed to get new commit: %v", err)
return 1
}
if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil {
writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err)
return 1
}
if !isAncestor {
// TODO: Create MR snapshot ref instead
allOK = false
fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)")
continue
}
fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
}
} else { // Non-contrib branch
allOK = false
fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)")
}
}
fmt.Fprintln(sshStderr)
if allOK {
fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)")
return 0
}
fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)")
return 1
default:
fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset)
return 1
}
}()
fmt.Fprintln(sshStderr)
_, _ = conn.Write([]byte{hookRet})
}
// serveGitHooks handles connections on the specified network listener and
// treats incoming connections as those from git hook handlers by spawning
// sessions. The listener must be a SOCK_STREAM UNIX domain socket. The
// function itself blocks.
func (s *Server) serveGitHooks(listener net.Listener) error {
for {
conn, err := listener.Accept()
if err != nil {
return err
}
go s.hooksHandler(conn)
}
}
// getUcred fetches connection credentials as a [syscall.Ucred] from a given
// [net.Conn]. It panics when conn is not a [net.UnixConn].
func getUcred(conn net.Conn) (ucred *syscall.Ucred, err error) {
unixConn := conn.(*net.UnixConn)
var unixConnFD *os.File
if unixConnFD, err = unixConn.File(); err != nil {
return nil, errGetFD
}
defer unixConnFD.Close()
if ucred, err = syscall.GetsockoptUcred(int(unixConnFD.Fd()), syscall.SOL_SOCKET, syscall.SO_PEERCRED); err != nil {
return nil, errGetUcred
}
return ucred, nil
}
// allZero returns true if all runes in a given string are '0'. The comparison
// is not constant time and must not be used in contexts where time-based side
// channel attacks are a concern.
func allZero(s string) bool {
for _, r := range s {
if r != '0' {
return false
}
}
return true
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
//
//go:build !linux
package unsorted
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"path/filepath"
"strconv"
"strings"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/jackc/pgx/v5"
"go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
// hooksHandler handles a connection from hookc via the
// unix socket.
func (s *Server) hooksHandler(conn net.Conn) {
var ctx context.Context
var cancel context.CancelFunc
var err error
var cookie []byte
var packPass packPass
var sshStderr io.Writer
var hookRet byte
defer conn.Close()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
// TODO: ucred-like checks
cookie = make([]byte, 64)
if _, err = conn.Read(cookie); err != nil {
if _, err = conn.Write([]byte{1}); err != nil {
return
}
writeRedError(conn, "\nFailed to read cookie: %v", err)
return
}
{
var ok bool
packPass, ok = s.packPasses.Load(misc.BytesToString(cookie))
if !ok {
if _, err = conn.Write([]byte{1}); err != nil {
return
}
writeRedError(conn, "\nInvalid handler cookie")
return
}
}
sshStderr = packPass.session.Stderr()
_, _ = sshStderr.Write([]byte{'\n'})
hookRet = func() byte {
var argc64 uint64
if err = binary.Read(conn, binary.NativeEndian, &argc64); err != nil {
writeRedError(sshStderr, "Failed to read argc: %v", err)
return 1
}
var args []string
for range argc64 {
var arg bytes.Buffer
for {
nextByte := make([]byte, 1)
n, err := conn.Read(nextByte)
if err != nil || n != 1 {
writeRedError(sshStderr, "Failed to read arg: %v", err)
return 1
}
if nextByte[0] == 0 {
break
}
arg.WriteByte(nextByte[0])
}
args = append(args, arg.String())
}
gitEnv := make(map[string]string)
for {
var envLine bytes.Buffer
for {
nextByte := make([]byte, 1)
n, err := conn.Read(nextByte)
if err != nil || n != 1 {
writeRedError(sshStderr, "Failed to read environment variable: %v", err)
return 1
}
if nextByte[0] == 0 {
break
}
envLine.WriteByte(nextByte[0])
}
if envLine.Len() == 0 {
break
}
kv := envLine.String()
parts := strings.SplitN(kv, "=", 2)
if len(parts) < 2 {
writeRedError(sshStderr, "Invalid environment variable line: %v", kv)
return 1
}
gitEnv[parts[0]] = parts[1]
}
var stdin bytes.Buffer
if _, err = io.Copy(&stdin, conn); err != nil {
writeRedError(conn, "Failed to read to the stdin buffer: %v", err)
}
switch filepath.Base(args[0]) {
case "pre-receive":
if packPass.directAccess {
return 0
}
allOK := true
for {
var line, oldOID, rest, newIOID, refName string
var found bool
var oldHash, newHash plumbing.Hash
var oldCommit, newCommit *object.Commit
var pushOptCount int
pushOptCount, err = strconv.Atoi(gitEnv["GIT_PUSH_OPTION_COUNT"])
if err != nil {
writeRedError(sshStderr, "Failed to parse GIT_PUSH_OPTION_COUNT: %v", err)
return 1
}
// TODO: Allow existing users (even if they are already federated or registered) to add a federated user ID... though perhaps this should be in the normal SSH interface instead of the git push interface?
// Also it'd be nice to be able to combine users or whatever
if packPass.contribReq == "federated" && packPass.userType != "federated" && packPass.userType != "registered" {
if pushOptCount == 0 {
writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
return 1
}
for pushOptIndex := range pushOptCount {
pushOpt, ok := gitEnv[fmt.Sprintf("GIT_PUSH_OPTION_%d", pushOptIndex)]
if !ok {
writeRedError(sshStderr, "Failed to get push option %d", pushOptIndex)
return 1
}
if strings.HasPrefix(pushOpt, "fedid=") {
fedUserID := strings.TrimPrefix(pushOpt, "fedid=")
service, username, found := strings.Cut(fedUserID, ":")
if !found {
writeRedError(sshStderr, "Invalid federated user identifier %#v does not contain a colon", fedUserID)
return 1
}
ok, err := s.fedauth(ctx, packPass.userID, service, username, packPass.pubkey)
if err != nil {
writeRedError(sshStderr, "Failed to verify federated user identifier %#v: %v", fedUserID, err)
return 1
}
if !ok {
writeRedError(sshStderr, "Failed to verify federated user identifier %#v: you don't seem to be on the list", fedUserID)
return 1
}
break
}
if pushOptIndex == pushOptCount-1 {
writeRedError(sshStderr, "This repo requires contributors to be either federated or registered users. You must supply your federated user ID as a push option. For example, git push -o fedid=sr.ht:runxiyu")
return 1
}
}
}
line, err = stdin.ReadString('\n')
if errors.Is(err, io.EOF) {
break
} else if err != nil {
writeRedError(sshStderr, "Failed to read pre-receive line: %v", err)
return 1
}
line = line[:len(line)-1]
oldOID, rest, found = strings.Cut(line, " ")
if !found {
writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
return 1
}
newIOID, refName, found = strings.Cut(rest, " ")
if !found {
writeRedError(sshStderr, "Invalid pre-receive line: %v", line)
return 1
}
if strings.HasPrefix(refName, "refs/heads/contrib/") {
if allZero(oldOID) { // New branch
fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
var newMRLocalID int
if packPass.userID != 0 {
err = s.database.QueryRow(ctx,
"INSERT INTO merge_requests (repo_id, creator, source_ref, status) VALUES ($1, $2, $3, 'open') RETURNING repo_local_id",
packPass.repoID, packPass.userID, strings.TrimPrefix(refName, "refs/heads/"),
).Scan(&newMRLocalID)
} else {
err = s.database.QueryRow(ctx,
"INSERT INTO merge_requests (repo_id, source_ref, status) VALUES ($1, $2, 'open') RETURNING repo_local_id",
packPass.repoID, strings.TrimPrefix(refName, "refs/heads/"),
).Scan(&newMRLocalID)
}
if err != nil {
writeRedError(sshStderr, "Error creating merge request: %v", err)
return 1
}
mergeRequestWebURL := fmt.Sprintf("%s/contrib/%d/", s.genHTTPRemoteURL(packPass.groupPath, packPass.repoName), newMRLocalID)
fmt.Fprintln(sshStderr, ansiec.Blue+"Created merge request at", mergeRequestWebURL+ansiec.Reset)
s.ircBot.Send("PRIVMSG #chat :New merge request at " + mergeRequestWebURL)
} else { // Existing contrib branch
var existingMRUser int
var isAncestor bool
err = s.database.QueryRow(ctx,
"SELECT COALESCE(creator, 0) FROM merge_requests WHERE source_ref = $1 AND repo_id = $2",
strings.TrimPrefix(refName, "refs/heads/"), packPass.repoID,
).Scan(&existingMRUser)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
writeRedError(sshStderr, "No existing merge request for existing contrib branch: %v", err)
} else {
writeRedError(sshStderr, "Error querying for existing merge request: %v", err)
}
return 1
}
if existingMRUser == 0 {
allOK = false
fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs to unowned MR)")
continue
}
if existingMRUser != packPass.userID {
allOK = false
fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(branch belongs another user's MR)")
continue
}
oldHash = plumbing.NewHash(oldOID)
if oldCommit, err = packPass.repo.CommitObject(oldHash); err != nil {
writeRedError(sshStderr, "Daemon failed to get old commit: %v", err)
return 1
}
// Potential BUG: I'm not sure if new_commit is guaranteed to be
// detectable as they haven't been merged into the main repo's
// objects yet. But it seems to work, and I don't think there's
// any reason for this to only work intermitently.
newHash = plumbing.NewHash(newIOID)
if newCommit, err = packPass.repo.CommitObject(newHash); err != nil {
writeRedError(sshStderr, "Daemon failed to get new commit: %v", err)
return 1
}
if isAncestor, err = oldCommit.IsAncestor(newCommit); err != nil {
writeRedError(sshStderr, "Daemon failed to check if old commit is ancestor: %v", err)
return 1
}
if !isAncestor {
// TODO: Create MR snapshot ref instead
allOK = false
fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(force pushes are not supported yet)")
continue
}
fmt.Fprintln(sshStderr, ansiec.Blue+"POK"+ansiec.Reset, refName)
}
} else { // Non-contrib branch
allOK = false
fmt.Fprintln(sshStderr, ansiec.Red+"NAK"+ansiec.Reset, refName, "(you cannot push to branches outside of contrib/*)")
}
}
fmt.Fprintln(sshStderr)
if allOK {
fmt.Fprintln(sshStderr, "Overall "+ansiec.Green+"ACK"+ansiec.Reset+" (all checks passed)")
return 0
}
fmt.Fprintln(sshStderr, "Overall "+ansiec.Red+"NAK"+ansiec.Reset+" (one or more branches failed checks)")
return 1
default:
fmt.Fprintln(sshStderr, ansiec.Red+"Invalid hook:", args[0]+ansiec.Reset)
return 1
}
}()
fmt.Fprintln(sshStderr)
_, _ = conn.Write([]byte{hookRet})
}
// serveGitHooks handles connections on the specified network listener and
// treats incoming connections as those from git hook handlers by spawning
// sessions. The listener must be a SOCK_STREAM UNIX domain socket. The
// function itself blocks.
func (s *Server) serveGitHooks(listener net.Listener) error {
for {
conn, err := listener.Accept()
if err != nil {
return err
}
go s.hooksHandler(conn)
}
}
// allZero returns true if all runes in a given string are '0'. The comparison
// is not constant time and must not be used in contexts where time-based side
// channel attacks are a concern.
func allZero(s string) bool {
for _, r := range s {
if r != '0' {
return false
}
}
return true
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"github.com/go-git/go-git/v5"
gitConfig "github.com/go-git/go-git/v5/config"
gitFmtConfig "github.com/go-git/go-git/v5/plumbing/format/config"
)
// gitInit initializes a bare git repository with the forge-deployed hooks
// directory as the hooksPath.
func (s *Server) gitInit(repoPath string) (err error) {
var repo *git.Repository
var gitConf *gitConfig.Config
if repo, err = git.PlainInit(repoPath, true); err != nil {
return err
}
if gitConf, err = repo.Config(); err != nil {
return err
}
gitConf.Raw.SetOption("core", gitFmtConfig.NoSubsection, "hooksPath", s.config.Hooks.Execs)
gitConf.Raw.SetOption("receive", gitFmtConfig.NoSubsection, "advertisePushOptions", "true")
if err = repo.SetConfig(gitConf); err != nil {
return err
}
return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"context"
"errors"
"io"
"iter"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/jackc/pgx/v5/pgtype"
)
// openRepo opens a git repository by group and repo name.
//
// TODO: This should be deprecated in favor of doing it in the relevant
// request/router context in the future, as it cannot cover the nuance of
// fields needed.
func (s *Server) openRepo(ctx context.Context, groupPath []string, repoName string) (repo *git.Repository, description string, repoID int, fsPath string, err error) {
err = s.database.QueryRow(ctx, `
WITH RECURSIVE group_path_cte AS (
-- Start: match the first name in the path where parent_group IS NULL
SELECT
id,
parent_group,
name,
1 AS depth
FROM groups
WHERE name = ($1::text[])[1]
AND parent_group IS NULL
UNION ALL
-- Recurse: join next segment of the path
SELECT
g.id,
g.parent_group,
g.name,
group_path_cte.depth + 1
FROM groups g
JOIN group_path_cte ON g.parent_group = group_path_cte.id
WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
AND group_path_cte.depth + 1 <= cardinality($1::text[])
)
SELECT
r.filesystem_path,
COALESCE(r.description, ''),
r.id
FROM group_path_cte g
JOIN repos r ON r.group_id = g.id
WHERE g.depth = cardinality($1::text[])
AND r.name = $2
`, pgtype.FlatArray[string](groupPath), repoName).Scan(&fsPath, &description, &repoID)
if err != nil {
return
}
repo, err = git.PlainOpen(fsPath)
return
}
// commitIterSeqErr creates an [iter.Seq[*object.Commit]] from an
// [object.CommitIter], and additionally returns a pointer to error.
// The pointer to error is guaranteed to be populated with either nil or the
// error returned by the commit iterator after the returned iterator is
// finished.
func commitIterSeqErr(ctx context.Context, commitIter object.CommitIter) (iter.Seq[*object.Commit], *error) {
var err error
return func(yield func(*object.Commit) bool) {
for {
commit, err2 := commitIter.Next()
if err2 != nil {
if errors.Is(err2, io.EOF) {
return
}
err = err2
return
}
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
}
if !yield(commit) {
return
}
}
}, &err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"bytes"
"context"
"encoding/hex"
"errors"
"os"
"os/exec"
"path"
"sort"
"strings"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
func writeTree(ctx context.Context, repoPath string, entries []treeEntry) (string, error) {
var buf bytes.Buffer
sort.Slice(entries, func(i, j int) bool {
nameI, nameJ := entries[i].name, entries[j].name
if nameI == nameJ { // meh
return !(entries[i].mode == "40000") && (entries[j].mode == "40000")
}
if strings.HasPrefix(nameJ, nameI) && len(nameI) < len(nameJ) {
return !(entries[i].mode == "40000")
}
if strings.HasPrefix(nameI, nameJ) && len(nameJ) < len(nameI) {
return entries[j].mode == "40000"
}
return nameI < nameJ
})
for _, e := range entries {
buf.WriteString(e.mode)
buf.WriteByte(' ')
buf.WriteString(e.name)
buf.WriteByte(0)
buf.Write(e.sha)
}
cmd := exec.CommandContext(ctx, "git", "hash-object", "-w", "-t", "tree", "--stdin")
cmd.Env = append(os.Environ(), "GIT_DIR="+repoPath)
cmd.Stdin = &buf
var out bytes.Buffer
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
return "", err
}
return strings.TrimSpace(out.String()), nil
}
func buildTreeRecursive(ctx context.Context, repoPath, baseTree string, updates map[string][]byte) (string, error) {
treeCache := make(map[string][]treeEntry)
var walk func(string, string) error
walk = func(prefix, sha string) error {
cmd := exec.CommandContext(ctx, "git", "cat-file", "tree", sha)
cmd.Env = append(os.Environ(), "GIT_DIR="+repoPath)
var out bytes.Buffer
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
return err
}
data := out.Bytes()
i := 0
var entries []treeEntry
for i < len(data) {
modeEnd := bytes.IndexByte(data[i:], ' ')
if modeEnd < 0 {
return errors.New("invalid tree format")
}
mode := misc.BytesToString(data[i : i+modeEnd])
i += modeEnd + 1
nameEnd := bytes.IndexByte(data[i:], 0)
if nameEnd < 0 {
return errors.New("missing null after filename")
}
name := misc.BytesToString(data[i : i+nameEnd])
i += nameEnd + 1
if i+20 > len(data) {
return errors.New("unexpected EOF in SHA")
}
shaBytes := data[i : i+20]
i += 20
entries = append(entries, treeEntry{
mode: mode,
name: name,
sha: shaBytes,
})
if mode == "40000" {
subPrefix := path.Join(prefix, name)
if err := walk(subPrefix, hex.EncodeToString(shaBytes)); err != nil {
return err
}
}
}
treeCache[prefix] = entries
return nil
}
if err := walk("", baseTree); err != nil {
return "", err
}
for filePath, blobSha := range updates {
parts := strings.Split(filePath, "/")
dir := strings.Join(parts[:len(parts)-1], "/")
name := parts[len(parts)-1]
entries := treeCache[dir]
found := false
for i, e := range entries {
if e.name == name {
if blobSha == nil {
// Remove TODO
entries = append(entries[:i], entries[i+1:]...)
} else {
entries[i].sha = blobSha
}
found = true
break
}
}
if !found && blobSha != nil {
entries = append(entries, treeEntry{
mode: "100644",
name: name,
sha: blobSha,
})
}
treeCache[dir] = entries
}
built := make(map[string][]byte)
var build func(string) ([]byte, error)
build = func(prefix string) ([]byte, error) {
entries := treeCache[prefix]
for i, e := range entries {
if e.mode == "40000" {
subPrefix := path.Join(prefix, e.name)
if sha, ok := built[subPrefix]; ok {
entries[i].sha = sha
continue
}
newShaStr, err := build(subPrefix)
if err != nil {
return nil, err
}
entries[i].sha = newShaStr
}
}
shaStr, err := writeTree(ctx, repoPath, entries)
if err != nil {
return nil, err
}
shaBytes, err := hex.DecodeString(shaStr)
if err != nil {
return nil, err
}
built[prefix] = shaBytes
return shaBytes, nil
}
rootShaBytes, err := build("")
if err != nil {
return "", err
}
return hex.EncodeToString(rootShaBytes), nil
}
type treeEntry struct {
mode string // like "100644"
name string // individual name
sha []byte
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
)
// getRefHash returns the hash of a reference given its
// type and name as supplied in URL queries.
func getRefHash(repo *git.Repository, refType, refName string) (refHash plumbing.Hash, err error) {
var ref *plumbing.Reference
switch refType {
case "":
if ref, err = repo.Head(); err != nil {
return
}
refHash = ref.Hash()
case "commit":
refHash = plumbing.NewHash(refName)
case "branch":
if ref, err = repo.Reference(plumbing.NewBranchReferenceName(refName), true); err != nil {
return
}
refHash = ref.Hash()
case "tag":
if ref, err = repo.Reference(plumbing.NewTagReferenceName(refName), true); err != nil {
return
}
refHash = ref.Hash()
default:
panic("Invalid ref type " + refType)
}
return
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"net/http"
)
// getUserFromRequest returns the user ID and username associated with the
// session cookie in a given [http.Request].
func (s *Server) getUserFromRequest(request *http.Request) (id int, username string, err error) {
var sessionCookie *http.Cookie
if sessionCookie, err = request.Cookie("session"); err != nil {
return
}
err = s.database.QueryRow(
request.Context(),
"SELECT user_id, COALESCE(username, '') FROM users u JOIN sessions s ON u.id = s.user_id WHERE s.session_id = $1;",
sessionCookie.Value,
).Scan(&id, &username)
return
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"net/http"
"strings"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
// httpHandleRepoBranches provides the branches page in repos.
func (s *Server) httpHandleRepoBranches(writer http.ResponseWriter, _ *http.Request, params map[string]any) {
var repo *git.Repository
var repoName string
var groupPath []string
var err error
var notes []string
var branches []string
var branchesIter storer.ReferenceIter
repo, repoName, groupPath = params["repo"].(*git.Repository), params["repo_name"].(string), params["group_path"].([]string)
if strings.Contains(repoName, "\n") || misc.SliceContainsNewlines(groupPath) {
notes = append(notes, "Path contains newlines; HTTP Git access impossible")
}
branchesIter, err = repo.Branches()
if err == nil {
_ = branchesIter.ForEach(func(branch *plumbing.Reference) error {
branches = append(branches, branch.Name().Short())
return nil
})
}
params["branches"] = branches
params["http_clone_url"] = s.genHTTPRemoteURL(groupPath, repoName)
params["ssh_clone_url"] = s.genSSHRemoteURL(groupPath, repoName)
params["notes"] = notes
s.renderTemplate(writer, "repo_branches", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"errors"
"net/http"
"path/filepath"
"strconv"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// httpHandleGroupIndex provides index pages for groups, which includes a list
// of its subgroups and repos, as well as a form for group maintainers to
// create repos.
func (s *Server) httpHandleGroupIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
var groupPath []string
var repos []nameDesc
var subgroups []nameDesc
var err error
var groupID int
var groupDesc string
groupPath = params["group_path"].([]string)
// The group itself
err = s.database.QueryRow(request.Context(), `
WITH RECURSIVE group_path_cte AS (
SELECT
id,
parent_group,
name,
1 AS depth
FROM groups
WHERE name = ($1::text[])[1]
AND parent_group IS NULL
UNION ALL
SELECT
g.id,
g.parent_group,
g.name,
group_path_cte.depth + 1
FROM groups g
JOIN group_path_cte ON g.parent_group = group_path_cte.id
WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
AND group_path_cte.depth + 1 <= cardinality($1::text[])
)
SELECT c.id, COALESCE(g.description, '')
FROM group_path_cte c
JOIN groups g ON g.id = c.id
WHERE c.depth = cardinality($1::text[])
`,
pgtype.FlatArray[string](groupPath),
).Scan(&groupID, &groupDesc)
if errors.Is(err, pgx.ErrNoRows) {
web.ErrorPage404(s.templates, writer, params)
return
} else if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting group: "+err.Error())
return
}
// ACL
var count int
err = s.database.QueryRow(request.Context(), `
SELECT COUNT(*)
FROM user_group_roles
WHERE user_id = $1
AND group_id = $2
`, params["user_id"].(int), groupID).Scan(&count)
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error checking access: "+err.Error())
return
}
directAccess := (count > 0)
if request.Method == http.MethodPost {
if !directAccess {
web.ErrorPage403(s.templates, writer, params, "You do not have direct access to this group")
return
}
repoName := request.FormValue("repo_name")
repoDesc := request.FormValue("repo_desc")
contribReq := request.FormValue("repo_contrib")
if repoName == "" {
web.ErrorPage400(s.templates, writer, params, "Repo name is required")
return
}
var newRepoID int
err := s.database.QueryRow(
request.Context(),
`INSERT INTO repos (name, description, group_id, contrib_requirements)
VALUES ($1, $2, $3, $4)
RETURNING id`,
repoName,
repoDesc,
groupID,
contribReq,
).Scan(&newRepoID)
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error creating repo: "+err.Error())
return
}
filePath := filepath.Join(s.config.Git.RepoDir, strconv.Itoa(newRepoID)+".git")
_, err = s.database.Exec(
request.Context(),
`UPDATE repos
SET filesystem_path = $1
WHERE id = $2`,
filePath,
newRepoID,
)
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error updating repo path: "+err.Error())
return
}
if err = s.gitInit(filePath); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error initializing repo: "+err.Error())
return
}
misc.RedirectUnconditionally(writer, request)
return
}
// Repos
var rows pgx.Rows
rows, err = s.database.Query(request.Context(), `
SELECT name, COALESCE(description, '')
FROM repos
WHERE group_id = $1
`, groupID)
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error())
return
}
defer rows.Close()
for rows.Next() {
var name, description string
if err = rows.Scan(&name, &description); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error())
return
}
repos = append(repos, nameDesc{name, description})
}
if err = rows.Err(); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting repos: "+err.Error())
return
}
// Subgroups
rows, err = s.database.Query(request.Context(), `
SELECT name, COALESCE(description, '')
FROM groups
WHERE parent_group = $1
`, groupID)
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error())
return
}
defer rows.Close()
for rows.Next() {
var name, description string
if err = rows.Scan(&name, &description); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error())
return
}
subgroups = append(subgroups, nameDesc{name, description})
}
if err = rows.Err(); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting subgroups: "+err.Error())
return
}
params["repos"] = repos
params["subgroups"] = subgroups
params["description"] = groupDesc
params["direct_access"] = directAccess
s.renderTemplate(writer, "group", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"net/http"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// httpHandleIndex provides the main index page which includes a list of groups
// and some global information such as SSH keys.
func (s *Server) httpHandleIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
var err error
var groups []nameDesc
groups, err = s.queryNameDesc(request.Context(), "SELECT name, COALESCE(description, '') FROM groups WHERE parent_group IS NULL")
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error querying groups: "+err.Error())
return
}
params["groups"] = groups
s.renderTemplate(writer, "index", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"net/http"
"time"
"github.com/jackc/pgx/v5"
"go.lindenii.runxiyu.org/forge/forged/internal/argon2id"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// httpHandleLogin provides the login page for local users.
func (s *Server) httpHandleLogin(writer http.ResponseWriter, request *http.Request, params map[string]any) {
var username, password string
var userID int
var passwordHash string
var err error
var passwordMatches bool
var cookieValue string
var now time.Time
var expiry time.Time
var cookie http.Cookie
if request.Method != http.MethodPost {
s.renderTemplate(writer, "login", params)
return
}
username = request.PostFormValue("username")
password = request.PostFormValue("password")
err = s.database.QueryRow(request.Context(),
"SELECT id, COALESCE(password, '') FROM users WHERE username = $1",
username,
).Scan(&userID, &passwordHash)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
params["login_error"] = "Unknown username"
s.renderTemplate(writer, "login", params)
return
}
web.ErrorPage500(s.templates, writer, params, "Error querying user information: "+err.Error())
return
}
if passwordHash == "" {
params["login_error"] = "User has no password"
s.renderTemplate(writer, "login", params)
return
}
if passwordMatches, err = argon2id.ComparePasswordAndHash(password, passwordHash); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error comparing password and hash: "+err.Error())
return
}
if !passwordMatches {
params["login_error"] = "Invalid password"
s.renderTemplate(writer, "login", params)
return
}
if cookieValue, err = randomUrlsafeStr(16); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting random string: "+err.Error())
return
}
now = time.Now()
expiry = now.Add(time.Duration(s.config.HTTP.CookieExpiry) * time.Second)
cookie = http.Cookie{
Name: "session",
Value: cookieValue,
SameSite: http.SameSiteLaxMode,
HttpOnly: true,
Secure: false, // TODO
Expires: expiry,
Path: "/",
} //exhaustruct:ignore
http.SetCookie(writer, &cookie)
_, err = s.database.Exec(request.Context(), "INSERT INTO sessions (user_id, session_id) VALUES ($1, $2)", userID, cookieValue)
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error inserting session: "+err.Error())
return
}
http.Redirect(writer, request, "/", http.StatusSeeOther)
}
// randomUrlsafeStr generates a random string of the given entropic size
// using the URL-safe base64 encoding. The actual size of the string returned
// will be 4*sz.
func randomUrlsafeStr(sz int) (string, error) {
r := make([]byte, 3*sz)
_, err := rand.Read(r)
if err != nil {
return "", fmt.Errorf("error generating random string: %w", err)
}
return base64.RawURLEncoding.EncodeToString(r), nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"fmt"
"net/http"
"strings"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/format/diff"
"github.com/go-git/go-git/v5/plumbing/object"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/oldgit"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// usableFilePatch is a [diff.FilePatch] that is structured in a way more
// friendly for use in HTML templates.
type usableFilePatch struct {
From diff.File
To diff.File
Chunks []usableChunk
}
// usableChunk is a [diff.Chunk] that is structured in a way more friendly for
// use in HTML templates.
type usableChunk struct {
Operation diff.Operation
Content string
}
func (s *Server) httpHandleRepoCommit(writer http.ResponseWriter, request *http.Request, params map[string]any) {
var repo *git.Repository
var commitIDStrSpec, commitIDStrSpecNoSuffix string
var commitID plumbing.Hash
var parentCommitHash plumbing.Hash
var commitObj *object.Commit
var commitIDStr string
var err error
var patch *object.Patch
repo, commitIDStrSpec = params["repo"].(*git.Repository), params["commit_id"].(string)
commitIDStrSpecNoSuffix = strings.TrimSuffix(commitIDStrSpec, ".patch")
commitID = plumbing.NewHash(commitIDStrSpecNoSuffix)
if commitObj, err = repo.CommitObject(commitID); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting commit object: "+err.Error())
return
}
if commitIDStrSpecNoSuffix != commitIDStrSpec {
var patchStr string
if patchStr, err = oldgit.FmtCommitPatch(commitObj); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error formatting patch: "+err.Error())
return
}
fmt.Fprintln(writer, patchStr)
return
}
commitIDStr = commitObj.Hash.String()
if commitIDStr != commitIDStrSpec {
http.Redirect(writer, request, commitIDStr, http.StatusSeeOther)
return
}
params["commit_object"] = commitObj
params["commit_id"] = commitIDStr
parentCommitHash, patch, err = oldgit.CommitToPatch(commitObj)
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting patch from commit: "+err.Error())
return
}
params["parent_commit_hash"] = parentCommitHash.String()
params["patch"] = patch
params["file_patches"] = makeUsableFilePatches(patch)
s.renderTemplate(writer, "repo_commit", params)
}
type fakeDiffFile struct {
hash plumbing.Hash
mode filemode.FileMode
path string
}
func (f fakeDiffFile) Hash() plumbing.Hash {
return f.hash
}
func (f fakeDiffFile) Mode() filemode.FileMode {
return f.mode
}
func (f fakeDiffFile) Path() string {
return f.path
}
var nullFakeDiffFile = fakeDiffFile{ //nolint:gochecknoglobals
hash: plumbing.NewHash("0000000000000000000000000000000000000000"),
mode: misc.FirstOrPanic(filemode.New("100644")),
path: "",
}
func makeUsableFilePatches(patch diff.Patch) (usableFilePatches []usableFilePatch) {
// TODO: Remove unnecessary context
// TODO: Prepend "+"/"-"/" " instead of solely distinguishing based on color
for _, filePatch := range patch.FilePatches() {
var fromFile, toFile diff.File
var ufp usableFilePatch
chunks := []usableChunk{}
fromFile, toFile = filePatch.Files()
if fromFile == nil {
fromFile = nullFakeDiffFile
}
if toFile == nil {
toFile = nullFakeDiffFile
}
for _, chunk := range filePatch.Chunks() {
var content string
content = chunk.Content()
if len(content) > 0 && content[0] == '\n' {
content = "\n" + content
} // Horrible hack to fix how browsers newlines that immediately proceed <pre>
chunks = append(chunks, usableChunk{
Operation: chunk.Type(),
Content: content,
})
}
ufp = usableFilePatch{
Chunks: chunks,
From: fromFile,
To: toFile,
}
usableFilePatches = append(usableFilePatches, ufp)
}
return
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"net/http"
"github.com/jackc/pgx/v5"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// idTitleStatus describes properties of a merge request that needs to be
// present in MR listings.
type idTitleStatus struct {
ID int
Title string
Status string
}
// httpHandleRepoContribIndex provides an index to merge requests of a repo.
func (s *Server) httpHandleRepoContribIndex(writer http.ResponseWriter, request *http.Request, params map[string]any) {
var rows pgx.Rows
var result []idTitleStatus
var err error
if rows, err = s.database.Query(request.Context(),
"SELECT repo_local_id, COALESCE(title, 'Untitled'), status FROM merge_requests WHERE repo_id = $1",
params["repo_id"],
); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error querying merge requests: "+err.Error())
return
}
defer rows.Close()
for rows.Next() {
var mrID int
var mrTitle, mrStatus string
if err = rows.Scan(&mrID, &mrTitle, &mrStatus); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error scanning merge request: "+err.Error())
return
}
result = append(result, idTitleStatus{mrID, mrTitle, mrStatus})
}
if err = rows.Err(); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error ranging over merge requests: "+err.Error())
return
}
params["merge_requests"] = result
s.renderTemplate(writer, "repo_contrib_index", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"net/http"
"strconv"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// httpHandleRepoContribOne provides an interface to each merge request of a
// repo.
func (s *Server) httpHandleRepoContribOne(writer http.ResponseWriter, request *http.Request, params map[string]any) {
var mrIDStr string
var mrIDInt int
var err error
var title, status, srcRefStr, dstBranchStr string
var repo *git.Repository
var srcRefHash plumbing.Hash
var dstBranchHash plumbing.Hash
var srcCommit, dstCommit, mergeBaseCommit *object.Commit
var mergeBases []*object.Commit
mrIDStr = params["mr_id"].(string)
mrIDInt64, err := strconv.ParseInt(mrIDStr, 10, strconv.IntSize)
if err != nil {
web.ErrorPage400(s.templates, writer, params, "Merge request ID not an integer")
return
}
mrIDInt = int(mrIDInt64)
if err = s.database.QueryRow(request.Context(),
"SELECT COALESCE(title, ''), status, source_ref, COALESCE(destination_branch, '') FROM merge_requests WHERE repo_id = $1 AND repo_local_id = $2",
params["repo_id"], mrIDInt,
).Scan(&title, &status, &srcRefStr, &dstBranchStr); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error querying merge request: "+err.Error())
return
}
repo = params["repo"].(*git.Repository)
if srcRefHash, err = getRefHash(repo, "branch", srcRefStr); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting source ref hash: "+err.Error())
return
}
if srcCommit, err = repo.CommitObject(srcRefHash); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting source commit: "+err.Error())
return
}
params["source_commit"] = srcCommit
if dstBranchStr == "" {
dstBranchStr = "HEAD"
dstBranchHash, err = getRefHash(repo, "", "")
} else {
dstBranchHash, err = getRefHash(repo, "branch", dstBranchStr)
}
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting destination branch hash: "+err.Error())
return
}
if dstCommit, err = repo.CommitObject(dstBranchHash); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting destination commit: "+err.Error())
return
}
params["destination_commit"] = dstCommit
if mergeBases, err = srcCommit.MergeBase(dstCommit); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting merge base: "+err.Error())
return
}
if len(mergeBases) < 1 {
web.ErrorPage500(s.templates, writer, params, "No merge base found for this merge request; these two branches do not share any common history")
// TODO
return
}
mergeBaseCommit = mergeBases[0]
params["merge_base"] = mergeBaseCommit
patch, err := mergeBaseCommit.Patch(srcCommit)
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting patch: "+err.Error())
return
}
params["file_patches"] = makeUsableFilePatches(patch)
params["mr_title"], params["mr_status"], params["mr_source_ref"], params["mr_destination_branch"] = title, status, srcRefStr, dstBranchStr
s.renderTemplate(writer, "repo_contrib_one", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"net/http"
"go.lindenii.runxiyu.org/forge/forged/internal/git2c"
"go.lindenii.runxiyu.org/forge/forged/internal/render"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// httpHandleRepoIndex provides the front page of a repo using git2d.
func (s *Server) httpHandleRepoIndex(w http.ResponseWriter, req *http.Request, params map[string]any) {
repoName := params["repo_name"].(string)
groupPath := params["group_path"].([]string)
_, repoPath, _, _, _, _, _ := s.getRepoInfo(req.Context(), groupPath, repoName, "") // TODO: Don't use getRepoInfo
client, err := git2c.NewClient(s.config.Git.Socket)
if err != nil {
web.ErrorPage500(s.templates, w, params, err.Error())
return
}
defer client.Close()
commits, readme, err := client.CmdIndex(repoPath)
if err != nil {
web.ErrorPage500(s.templates, w, params, err.Error())
return
}
params["commits"] = commits
params["readme_filename"] = readme.Filename
_, params["readme"] = render.Readme(readme.Content, readme.Filename)
s.renderTemplate(w, "repo_index", params)
// TODO: Caching
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"fmt"
"io"
"net/http"
"os/exec"
"github.com/jackc/pgx/v5/pgtype"
)
// httpHandleRepoInfo provides advertised refs of a repo for use in Git's Smart
// HTTP protocol.
//
// TODO: Reject access from web browsers.
func (s *Server) httpHandleRepoInfo(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) {
groupPath := params["group_path"].([]string)
repoName := params["repo_name"].(string)
var repoPath string
if err := s.database.QueryRow(request.Context(), `
WITH RECURSIVE group_path_cte AS (
-- Start: match the first name in the path where parent_group IS NULL
SELECT
id,
parent_group,
name,
1 AS depth
FROM groups
WHERE name = ($1::text[])[1]
AND parent_group IS NULL
UNION ALL
-- Recurse: jion next segment of the path
SELECT
g.id,
g.parent_group,
g.name,
group_path_cte.depth + 1
FROM groups g
JOIN group_path_cte ON g.parent_group = group_path_cte.id
WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
AND group_path_cte.depth + 1 <= cardinality($1::text[])
)
SELECT r.filesystem_path
FROM group_path_cte c
JOIN repos r ON r.group_id = c.id
WHERE c.depth = cardinality($1::text[])
AND r.name = $2
`,
pgtype.FlatArray[string](groupPath),
repoName,
).Scan(&repoPath); err != nil {
return err
}
writer.Header().Set("Content-Type", "application/x-git-upload-pack-advertisement")
writer.WriteHeader(http.StatusOK)
cmd := exec.Command("git", "upload-pack", "--stateless-rpc", "--advertise-refs", repoPath)
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
defer func() {
_ = stdout.Close()
}()
cmd.Stderr = cmd.Stdout
if err = cmd.Start(); err != nil {
return err
}
if err = packLine(writer, "# service=git-upload-pack\n"); err != nil {
return err
}
if err = packFlush(writer); err != nil {
return
}
if _, err = io.Copy(writer, stdout); err != nil {
return err
}
if err = cmd.Wait(); err != nil {
return err
}
return nil
}
// Taken from https://github.com/icyphox/legit, MIT license.
func packLine(w io.Writer, s string) error {
_, err := fmt.Fprintf(w, "%04x%s", len(s)+4, s)
return err
}
// Taken from https://github.com/icyphox/legit, MIT license.
func packFlush(w io.Writer) error {
_, err := fmt.Fprint(w, "0000")
return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"net/http"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// httpHandleRepoLog provides a page with a complete Git log.
//
// TODO: This currently provides all commits in the branch. It should be
// paginated and cached instead.
func (s *Server) httpHandleRepoLog(writer http.ResponseWriter, req *http.Request, params map[string]any) {
var repo *git.Repository
var refHash plumbing.Hash
var err error
repo = params["repo"].(*git.Repository)
if refHash, err = getRefHash(repo, params["ref_type"].(string), params["ref_name"].(string)); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting ref hash: "+err.Error())
return
}
logOptions := git.LogOptions{From: refHash} //exhaustruct:ignore
commitIter, err := repo.Log(&logOptions)
if err != nil {
web.ErrorPage500(s.templates, writer, params, "Error getting recent commits: "+err.Error())
return
}
params["commits"], params["commits_err"] = commitIterSeqErr(req.Context(), commitIter)
s.renderTemplate(writer, "repo_log", params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"fmt"
"html/template"
"net/http"
"strings"
"go.lindenii.runxiyu.org/forge/forged/internal/git2c"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// httpHandleRepoRaw serves raw files, or directory listings that point to raw
// files.
func (s *Server) httpHandleRepoRaw(writer http.ResponseWriter, request *http.Request, params map[string]any) {
repoName := params["repo_name"].(string)
groupPath := params["group_path"].([]string)
rawPathSpec := params["rest"].(string)
pathSpec := strings.TrimSuffix(rawPathSpec, "/")
params["path_spec"] = pathSpec
_, repoPath, _, _, _, _, _ := s.getRepoInfo(request.Context(), groupPath, repoName, "")
client, err := git2c.NewClient(s.config.Git.Socket)
if err != nil {
web.ErrorPage500(s.templates, writer, params, err.Error())
return
}
defer client.Close()
files, content, err := client.CmdTreeRaw(repoPath, pathSpec)
if err != nil {
web.ErrorPage500(s.templates, writer, params, err.Error())
return
}
switch {
case files != nil:
params["files"] = files
params["readme_filename"] = "README.md"
params["readme"] = template.HTML("<p>README rendering here is WIP again</p>") // TODO
s.renderTemplate(writer, "repo_raw_dir", params)
case content != "":
if misc.RedirectNoDir(writer, request) {
return
}
writer.Header().Set("Content-Type", "application/octet-stream")
fmt.Fprint(writer, content)
default:
web.ErrorPage500(s.templates, writer, params, "Unknown error fetching repo raw data")
}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"html/template"
"net/http"
"strings"
"go.lindenii.runxiyu.org/forge/forged/internal/git2c"
"go.lindenii.runxiyu.org/forge/forged/internal/render"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// httpHandleRepoTree provides a friendly, syntax-highlighted view of
// individual files, and provides directory views that link to these files.
//
// TODO: Do not highlight files that are too large.
func (s *Server) httpHandleRepoTree(writer http.ResponseWriter, request *http.Request, params map[string]any) {
repoName := params["repo_name"].(string)
groupPath := params["group_path"].([]string)
rawPathSpec := params["rest"].(string)
pathSpec := strings.TrimSuffix(rawPathSpec, "/")
params["path_spec"] = pathSpec
_, repoPath, _, _, _, _, _ := s.getRepoInfo(request.Context(), groupPath, repoName, "")
client, err := git2c.NewClient(s.config.Git.Socket)
if err != nil {
web.ErrorPage500(s.templates, writer, params, err.Error())
return
}
defer client.Close()
files, content, err := client.CmdTreeRaw(repoPath, pathSpec)
if err != nil {
web.ErrorPage500(s.templates, writer, params, err.Error())
return
}
switch {
case files != nil:
params["files"] = files
params["readme_filename"] = "README.md"
params["readme"] = template.HTML("<p>README rendering here is WIP again</p>") // TODO
s.renderTemplate(writer, "repo_tree_dir", params)
case content != "":
rendered := render.Highlight(pathSpec, content)
params["file_contents"] = rendered
s.renderTemplate(writer, "repo_tree_file", params)
default:
web.ErrorPage500(s.templates, writer, params, "Unknown object type, something is seriously wrong")
}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"bytes"
"compress/gzip"
"compress/zlib"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"strings"
"github.com/jackc/pgx/v5/pgtype"
)
// httpHandleUploadPack handles incoming Git fetch/pull/clone's over the Smart
// HTTP protocol.
func (s *Server) httpHandleUploadPack(writer http.ResponseWriter, request *http.Request, params map[string]any) (err error) {
if ct := request.Header.Get("Content-Type"); !strings.HasPrefix(ct, "application/x-git-upload-pack-request") {
http.Error(writer, "bad content-type", http.StatusUnsupportedMediaType)
return nil
}
decoded, err := decodeBody(request)
if err != nil {
http.Error(writer, "cannot decode request body", http.StatusBadRequest)
return err
}
defer decoded.Close()
var groupPath []string
var repoName string
var repoPath string
var cmd *exec.Cmd
groupPath, repoName = params["group_path"].([]string), params["repo_name"].(string)
if err := s.database.QueryRow(request.Context(), `
WITH RECURSIVE group_path_cte AS (
-- Start: match the first name in the path where parent_group IS NULL
SELECT
id,
parent_group,
name,
1 AS depth
FROM groups
WHERE name = ($1::text[])[1]
AND parent_group IS NULL
UNION ALL
-- Recurse: jion next segment of the path
SELECT
g.id,
g.parent_group,
g.name,
group_path_cte.depth + 1
FROM groups g
JOIN group_path_cte ON g.parent_group = group_path_cte.id
WHERE g.name = ($1::text[])[group_path_cte.depth + 1]
AND group_path_cte.depth + 1 <= cardinality($1::text[])
)
SELECT r.filesystem_path
FROM group_path_cte c
JOIN repos r ON r.group_id = c.id
WHERE c.depth = cardinality($1::text[])
AND r.name = $2
`,
pgtype.FlatArray[string](groupPath),
repoName,
).Scan(&repoPath); err != nil {
return err
}
writer.Header().Set("Content-Type", "application/x-git-upload-pack-result")
// writer.Header().Set("Connection", "Keep-Alive")
// writer.Header().Set("Transfer-Encoding", "chunked")
cmd = exec.CommandContext(request.Context(), "git", "upload-pack", "--stateless-rpc", repoPath)
cmd.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket)
var stderrBuf bytes.Buffer
cmd.Stderr = &stderrBuf
cmd.Stdout = writer
cmd.Stdin = decoded
if gp := request.Header.Get("Git-Protocol"); gp != "" {
cmd.Env = append(cmd.Env, "GIT_PROTOCOL="+gp)
}
if err = cmd.Run(); err != nil {
log.Println(stderrBuf.String())
return err
}
return nil
}
func decodeBody(r *http.Request) (io.ReadCloser, error) {
switch ce := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Encoding"))); ce {
case "", "identity":
return r.Body, nil
case "gzip":
zr, err := gzip.NewReader(r.Body)
if err != nil { return nil, err }
return zr, nil
case "deflate":
zr, err := zlib.NewReader(r.Body)
if err != nil { return nil, err }
return zr, nil
default:
return nil, fmt.Errorf("unsupported Content-Encoding: %q", ce)
}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"net/http"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// httpHandleUsers is a useless stub.
func (s *Server) httpHandleUsers(writer http.ResponseWriter, _ *http.Request, params map[string]any) {
web.ErrorPage501(s.templates, writer, params)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"errors"
"log/slog"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/jackc/pgx/v5"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
"go.lindenii.runxiyu.org/forge/forged/internal/web"
)
// ServeHTTP handles all incoming HTTP requests and routes them to the correct
// location.
//
// TODO: This function is way too large.
func (s *Server) ServeHTTP(writer http.ResponseWriter, request *http.Request) {
var remoteAddr string
if s.config.HTTP.ReverseProxy {
remoteAddrs, ok := request.Header["X-Forwarded-For"]
if ok && len(remoteAddrs) == 1 {
remoteAddr = remoteAddrs[0]
} else {
remoteAddr = request.RemoteAddr
}
} else {
remoteAddr = request.RemoteAddr
}
slog.Info("incoming http", "addr", remoteAddr, "method", request.Method, "uri", request.RequestURI)
var segments []string
var err error
var sepIndex int
params := make(map[string]any)
if segments, _, err = misc.ParseReqURI(request.RequestURI); err != nil {
web.ErrorPage400(s.templates, writer, params, "Error parsing request URI: "+err.Error())
return
}
dirMode := false
if segments[len(segments)-1] == "" {
dirMode = true
segments = segments[:len(segments)-1]
}
params["url_segments"] = segments
params["dir_mode"] = dirMode
params["global"] = s.globalData
var userID int // 0 for none
userID, params["username"], err = s.getUserFromRequest(request)
params["user_id"] = userID
if err != nil && !errors.Is(err, http.ErrNoCookie) && !errors.Is(err, pgx.ErrNoRows) {
web.ErrorPage500(s.templates, writer, params, "Error getting user info from request: "+err.Error())
return
}
if userID == 0 {
params["user_id_string"] = ""
} else {
params["user_id_string"] = strconv.Itoa(userID)
}
for _, v := range segments {
if strings.Contains(v, ":") {
web.ErrorPage400Colon(s.templates, writer, params)
return
}
}
if len(segments) == 0 {
s.httpHandleIndex(writer, request, params)
return
}
if segments[0] == "-" {
if len(segments) < 2 {
web.ErrorPage404(s.templates, writer, params)
return
} else if len(segments) == 2 && misc.RedirectDir(writer, request) {
return
}
switch segments[1] {
case "static":
s.staticHandler.ServeHTTP(writer, request)
return
case "source":
s.sourceHandler.ServeHTTP(writer, request)
return
}
}
if segments[0] == "-" {
switch segments[1] {
case "login":
s.httpHandleLogin(writer, request, params)
return
case "users":
s.httpHandleUsers(writer, request, params)
return
default:
web.ErrorPage404(s.templates, writer, params)
return
}
}
sepIndex = -1
for i, part := range segments {
if part == "-" {
sepIndex = i
break
}
}
params["separator_index"] = sepIndex
var groupPath []string
var moduleType string
var moduleName string
if sepIndex > 0 {
groupPath = segments[:sepIndex]
} else {
groupPath = segments
}
params["group_path"] = groupPath
switch {
case sepIndex == -1:
if misc.RedirectDir(writer, request) {
return
}
s.httpHandleGroupIndex(writer, request, params)
case len(segments) == sepIndex+1:
web.ErrorPage404(s.templates, writer, params)
return
case len(segments) == sepIndex+2:
web.ErrorPage404(s.templates, writer, params)
return
default:
moduleType = segments[sepIndex+1]
moduleName = segments[sepIndex+2]
switch moduleType {
case "repos":
params["repo_name"] = moduleName
if len(segments) > sepIndex+3 {
switch segments[sepIndex+3] {
case "info":
if err = s.httpHandleRepoInfo(writer, request, params); err != nil {
web.ErrorPage500(s.templates, writer, params, err.Error())
}
return
case "git-upload-pack":
if err = s.httpHandleUploadPack(writer, request, params); err != nil {
web.ErrorPage500(s.templates, writer, params, err.Error())
}
return
}
}
if params["ref_type"], params["ref_name"], err = misc.GetParamRefTypeName(request); err != nil {
if errors.Is(err, misc.ErrNoRefSpec) {
params["ref_type"] = ""
} else {
web.ErrorPage400(s.templates, writer, params, "Error querying ref type: "+err.Error())
return
}
}
if params["repo"], params["repo_description"], params["repo_id"], _, err = s.openRepo(request.Context(), groupPath, moduleName); err != nil {
web.ErrorPage500(s.templates, writer, params, "Error opening repo: "+err.Error())
return
}
repoURLRoot := "/"
for _, part := range segments[:sepIndex+3] {
repoURLRoot = repoURLRoot + url.PathEscape(part) + "/"
}
params["repo_url_root"] = repoURLRoot
params["repo_patch_mailing_list"] = repoURLRoot[1:len(repoURLRoot)-1] + "@" + s.config.LMTP.Domain
params["http_clone_url"] = s.genHTTPRemoteURL(groupPath, moduleName)
params["ssh_clone_url"] = s.genSSHRemoteURL(groupPath, moduleName)
if len(segments) == sepIndex+3 {
if misc.RedirectDir(writer, request) {
return
}
s.httpHandleRepoIndex(writer, request, params)
return
}
repoFeature := segments[sepIndex+3]
switch repoFeature {
case "tree":
if misc.AnyContain(segments[sepIndex+4:], "/") {
web.ErrorPage400(s.templates, writer, params, "Repo tree paths may not contain slashes in any segments")
return
}
if dirMode {
params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/"
} else {
params["rest"] = strings.Join(segments[sepIndex+4:], "/")
}
if len(segments) < sepIndex+5 && misc.RedirectDir(writer, request) {
return
}
s.httpHandleRepoTree(writer, request, params)
case "branches":
if misc.RedirectDir(writer, request) {
return
}
s.httpHandleRepoBranches(writer, request, params)
return
case "raw":
if misc.AnyContain(segments[sepIndex+4:], "/") {
web.ErrorPage400(s.templates, writer, params, "Repo tree paths may not contain slashes in any segments")
return
}
if dirMode {
params["rest"] = strings.Join(segments[sepIndex+4:], "/") + "/"
} else {
params["rest"] = strings.Join(segments[sepIndex+4:], "/")
}
if len(segments) < sepIndex+5 && misc.RedirectDir(writer, request) {
return
}
s.httpHandleRepoRaw(writer, request, params)
case "log":
if len(segments) > sepIndex+4 {
web.ErrorPage400(s.templates, writer, params, "Too many parameters")
return
}
if misc.RedirectDir(writer, request) {
return
}
s.httpHandleRepoLog(writer, request, params)
case "commit":
if len(segments) != sepIndex+5 {
web.ErrorPage400(s.templates, writer, params, "Incorrect number of parameters")
return
}
if misc.RedirectNoDir(writer, request) {
return
}
params["commit_id"] = segments[sepIndex+4]
s.httpHandleRepoCommit(writer, request, params)
case "contrib":
if misc.RedirectDir(writer, request) {
return
}
switch len(segments) {
case sepIndex + 4:
s.httpHandleRepoContribIndex(writer, request, params)
case sepIndex + 5:
params["mr_id"] = segments[sepIndex+4]
s.httpHandleRepoContribOne(writer, request, params)
default:
web.ErrorPage400(s.templates, writer, params, "Too many parameters")
}
default:
web.ErrorPage404(s.templates, writer, params)
return
}
default:
web.ErrorPage404(s.templates, writer, params)
return
}
}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"log/slog"
"net/http"
)
// renderTemplate abstracts out the annoyances of reporting template rendering
// errors.
func (s *Server) renderTemplate(w http.ResponseWriter, templateName string, params map[string]any) {
if err := s.templates.ExecuteTemplate(w, templateName, params); err != nil {
http.Error(w, "error rendering template: "+err.Error(), http.StatusInternalServerError)
slog.Error("error rendering template", "error", err.Error())
}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"bytes"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"os"
"os/exec"
"strings"
"time"
"github.com/bluekeyes/go-gitdiff/gitdiff"
"github.com/go-git/go-git/v5"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
func (s *Server) lmtpHandlePatch(session *lmtpSession, groupPath []string, repoName string, mbox io.Reader) (err error) {
var diffFiles []*gitdiff.File
var preamble string
if diffFiles, preamble, err = gitdiff.Parse(mbox); err != nil {
return fmt.Errorf("failed to parse patch: %w", err)
}
var header *gitdiff.PatchHeader
if header, err = gitdiff.ParsePatchHeader(preamble); err != nil {
return fmt.Errorf("failed to parse patch headers: %w", err)
}
var repo *git.Repository
var fsPath string
repo, _, _, fsPath, err = s.openRepo(session.ctx, groupPath, repoName)
if err != nil {
return fmt.Errorf("failed to open repo: %w", err)
}
headRef, err := repo.Head()
if err != nil {
return fmt.Errorf("failed to get repo head hash: %w", err)
}
headCommit, err := repo.CommitObject(headRef.Hash())
if err != nil {
return fmt.Errorf("failed to get repo head commit: %w", err)
}
headTree, err := headCommit.Tree()
if err != nil {
return fmt.Errorf("failed to get repo head tree: %w", err)
}
headTreeHash := headTree.Hash.String()
blobUpdates := make(map[string][]byte)
for _, diffFile := range diffFiles {
sourceFile, err := headTree.File(diffFile.OldName)
if err != nil {
return fmt.Errorf("failed to get file at old name %#v: %w", diffFile.OldName, err)
}
sourceString, err := sourceFile.Contents()
if err != nil {
return fmt.Errorf("failed to get contents: %w", err)
}
sourceBuf := bytes.NewReader(misc.StringToBytes(sourceString))
var patchedBuf bytes.Buffer
if err := gitdiff.Apply(&patchedBuf, sourceBuf, diffFile); err != nil {
return fmt.Errorf("failed to apply patch: %w", err)
}
var hashBuf bytes.Buffer
// It's really difficult to do this via go-git so we're just
// going to use upstream git for now.
// TODO
cmd := exec.CommandContext(session.ctx, "git", "hash-object", "-w", "-t", "blob", "--stdin")
cmd.Env = append(os.Environ(), "GIT_DIR="+fsPath)
cmd.Stdout = &hashBuf
cmd.Stdin = &patchedBuf
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to run git hash-object: %w", err)
}
newHashStr := strings.TrimSpace(hashBuf.String())
newHash, err := hex.DecodeString(newHashStr)
if err != nil {
return fmt.Errorf("failed to decode hex string from git: %w", err)
}
blobUpdates[diffFile.NewName] = newHash
if diffFile.NewName != diffFile.OldName {
blobUpdates[diffFile.OldName] = nil // Mark for deletion.
}
}
newTreeSha, err := buildTreeRecursive(session.ctx, fsPath, headTreeHash, blobUpdates)
if err != nil {
return fmt.Errorf("failed to recursively build a tree: %w", err)
}
commitMsg := header.Title
if header.Body != "" {
commitMsg += "\n\n" + header.Body
}
env := append(os.Environ(),
"GIT_DIR="+fsPath,
"GIT_AUTHOR_NAME="+header.Author.Name,
"GIT_AUTHOR_EMAIL="+header.Author.Email,
"GIT_AUTHOR_DATE="+header.AuthorDate.Format(time.RFC3339),
)
commitCmd := exec.CommandContext(session.ctx, "git", "commit-tree", newTreeSha, "-p", headCommit.Hash.String(), "-m", commitMsg)
commitCmd.Env = env
var commitOut bytes.Buffer
commitCmd.Stdout = &commitOut
if err := commitCmd.Run(); err != nil {
return fmt.Errorf("failed to commit tree: %w", err)
}
newCommitSha := strings.TrimSpace(commitOut.String())
newBranchName := rand.Text()
refCmd := exec.CommandContext(session.ctx, "git", "update-ref", "refs/heads/contrib/"+newBranchName, newCommitSha) //#nosec G204
refCmd.Env = append(os.Environ(), "GIT_DIR="+fsPath)
if err := refCmd.Run(); err != nil {
return fmt.Errorf("failed to update ref: %w", err)
}
return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
// SPDX-FileCopyrightText: Copyright (c) 2024 Robin Jarry <robin@jarry.cc>
package unsorted
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"log/slog"
"net"
"strings"
"time"
"github.com/emersion/go-message"
"github.com/emersion/go-smtp"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
type lmtpHandler struct{}
type lmtpSession struct {
from string
to []string
ctx context.Context
cancel context.CancelFunc
s Server
}
func (session *lmtpSession) Reset() {
session.from = ""
session.to = nil
}
func (session *lmtpSession) Logout() error {
session.cancel()
return nil
}
func (session *lmtpSession) AuthPlain(_, _ string) error {
return nil
}
func (session *lmtpSession) Mail(from string, _ *smtp.MailOptions) error {
session.from = from
return nil
}
func (session *lmtpSession) Rcpt(to string, _ *smtp.RcptOptions) error {
session.to = append(session.to, to)
return nil
}
func (*lmtpHandler) NewSession(_ *smtp.Conn) (smtp.Session, error) {
ctx, cancel := context.WithCancel(context.Background())
session := &lmtpSession{
ctx: ctx,
cancel: cancel,
}
return session, nil
}
func (s *Server) serveLMTP(listener net.Listener) error {
smtpServer := smtp.NewServer(&lmtpHandler{})
smtpServer.LMTP = true
smtpServer.Domain = s.config.LMTP.Domain
smtpServer.Addr = s.config.LMTP.Socket
smtpServer.WriteTimeout = time.Duration(s.config.LMTP.WriteTimeout) * time.Second
smtpServer.ReadTimeout = time.Duration(s.config.LMTP.ReadTimeout) * time.Second
smtpServer.EnableSMTPUTF8 = true
return smtpServer.Serve(listener)
}
func (session *lmtpSession) Data(r io.Reader) error {
var (
email *message.Entity
from string
to []string
err error
buf bytes.Buffer
data []byte
n int64
)
n, err = io.CopyN(&buf, r, session.s.config.LMTP.MaxSize)
switch {
case n == session.s.config.LMTP.MaxSize:
err = errors.New("Message too big.")
// drain whatever is left in the pipe
_, _ = io.Copy(io.Discard, r)
goto end
case errors.Is(err, io.EOF):
// message was smaller than max size
break
case err != nil:
goto end
}
data = buf.Bytes()
email, err = message.Read(bytes.NewReader(data))
if err != nil && message.IsUnknownCharset(err) {
goto end
}
switch strings.ToLower(email.Header.Get("Auto-Submitted")) {
case "auto-generated", "auto-replied":
// Disregard automatic emails like OOO replies
slog.Info("ignoring automatic message",
"from", session.from,
"to", strings.Join(session.to, ","),
"message-id", email.Header.Get("Message-Id"),
"subject", email.Header.Get("Subject"),
)
goto end
}
slog.Info("message received",
"from", session.from,
"to", strings.Join(session.to, ","),
"message-id", email.Header.Get("Message-Id"),
"subject", email.Header.Get("Subject"),
)
// Make local copies of the values before to ensure the references will
// still be valid when the task is run.
from = session.from
to = session.to
_ = from
for _, to := range to {
if !strings.HasSuffix(to, "@"+session.s.config.LMTP.Domain) {
continue
}
localPart := to[:len(to)-len("@"+session.s.config.LMTP.Domain)]
var segments []string
segments, err = misc.PathToSegments(localPart)
if err != nil {
// TODO: Should the entire email fail or should we just
// notify them out of band?
err = fmt.Errorf("cannot parse path: %w", err)
goto end
}
sepIndex := -1
for i, part := range segments {
if part == "-" {
sepIndex = i
break
}
}
if segments[len(segments)-1] == "" {
segments = segments[:len(segments)-1] // We don't care about dir or not.
}
if sepIndex == -1 || len(segments) <= sepIndex+2 {
err = errors.New("illegal path")
goto end
}
mbox := bytes.Buffer{}
if _, err = fmt.Fprint(&mbox, "From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001\r\n"); err != nil {
slog.Error("error handling patch... malloc???", "error", err)
goto end
}
data = bytes.ReplaceAll(data, []byte("\r\n"), []byte("\n"))
if _, err = mbox.Write(data); err != nil {
slog.Error("error handling patch... malloc???", "error", err)
goto end
}
// TODO: Is mbox's From escaping necessary here?
groupPath := segments[:sepIndex]
moduleType := segments[sepIndex+1]
moduleName := segments[sepIndex+2]
switch moduleType {
case "repos":
err = session.s.lmtpHandlePatch(session, groupPath, moduleName, &mbox)
if err != nil {
slog.Error("error handling patch", "error", err)
goto end
}
default:
err = errors.New("Emailing any endpoint other than repositories, is not supported yet.") // TODO
goto end
}
}
end:
session.to = nil
session.from = ""
switch err {
case nil:
return nil
default:
return &smtp.SMTPError{
Code: 550,
Message: "Permanent failure: " + err.Error(),
EnhancedCode: [3]int{5, 7, 1},
}
}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"net/url"
"strings"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
// We don't use path.Join because it collapses multiple slashes into one.
// genSSHRemoteURL generates SSH remote URLs from a given group path and repo
// name.
func (s *Server) genSSHRemoteURL(groupPath []string, repoName string) string {
return strings.TrimSuffix(s.config.SSH.Root, "/") + "/" + misc.SegmentsToURL(groupPath) + "/-/repos/" + url.PathEscape(repoName)
}
// genHTTPRemoteURL generates HTTP remote URLs from a given group path and repo
// name.
func (s *Server) genHTTPRemoteURL(groupPath []string, repoName string) string {
return strings.TrimSuffix(s.config.HTTP.Root, "/") + "/" + misc.SegmentsToURL(groupPath) + "/-/repos/" + url.PathEscape(repoName)
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"html/template"
"io/fs"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/minify/v2/html"
"go.lindenii.runxiyu.org/forge/forged/internal/embed"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
// loadTemplates minifies and loads HTML templates.
func (s *Server) loadTemplates() (err error) {
minifier := minify.New()
minifierOptions := html.Minifier{
TemplateDelims: [2]string{"{{", "}}"},
KeepDefaultAttrVals: true,
} //exhaustruct:ignore
minifier.Add("text/html", &minifierOptions)
s.templates = template.New("templates").Funcs(template.FuncMap{
"first_line": misc.FirstLine,
"path_escape": misc.PathEscape,
"query_escape": misc.QueryEscape,
"dereference_error": misc.DereferenceOrZero[error],
"minus": misc.Minus,
})
err = fs.WalkDir(embed.Resources, "forged/templates", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.IsDir() {
content, err := fs.ReadFile(embed.Resources, path)
if err != nil {
return err
}
minified, err := minifier.Bytes("text/html", content)
if err != nil {
return err
}
_, err = s.templates.Parse(misc.BytesToString(minified))
if err != nil {
return err
}
}
return nil
})
return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"errors"
"html/template"
"io/fs"
"log"
"log/slog"
"net"
"net/http"
_ "net/http/pprof"
"os"
"os/exec"
"path/filepath"
"syscall"
"time"
"go.lindenii.runxiyu.org/forge/forged/internal/cmap"
"go.lindenii.runxiyu.org/forge/forged/internal/database"
"go.lindenii.runxiyu.org/forge/forged/internal/embed"
"go.lindenii.runxiyu.org/forge/forged/internal/irc"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
goSSH "golang.org/x/crypto/ssh"
)
type Server struct {
config Config
database database.Database
sourceHandler http.Handler
staticHandler http.Handler
// globalData is passed as "global" when rendering HTML templates.
globalData map[string]any
serverPubkeyString string
serverPubkeyFP string
serverPubkey goSSH.PublicKey
// packPasses contains hook cookies mapped to their packPass.
packPasses cmap.Map[string, packPass]
templates *template.Template
ircBot *irc.Bot
ready bool
}
func NewServer(configPath string) (*Server, error) {
s := &Server{
globalData: make(map[string]any),
} //exhaustruct:ignore
s.sourceHandler = http.StripPrefix(
"/-/source/",
http.FileServer(http.FS(embed.Source)),
)
staticFS, err := fs.Sub(embed.Resources, "forged/static")
if err != nil {
return s, err
}
s.staticHandler = http.StripPrefix("/-/static/", http.FileServer(http.FS(staticFS)))
s.globalData = map[string]any{
"server_public_key_string": &s.serverPubkeyString,
"server_public_key_fingerprint": &s.serverPubkeyFP,
"forge_version": version,
// Some other ones are populated after config parsing
}
if err := s.loadConfig(configPath); err != nil {
return s, err
}
misc.NoneOrPanic(s.loadTemplates())
misc.NoneOrPanic(misc.DeployBinary(misc.FirstOrPanic(embed.Resources.Open("git2d/git2d")), s.config.Git.DaemonPath))
misc.NoneOrPanic(misc.DeployBinary(misc.FirstOrPanic(embed.Resources.Open("hookc/hookc")), filepath.Join(s.config.Hooks.Execs, "pre-receive")))
misc.NoneOrPanic(os.Chmod(filepath.Join(s.config.Hooks.Execs, "pre-receive"), 0o755))
s.ready = true
return s, nil
}
func (s *Server) Run() error {
if !s.ready {
return errors.New("not ready")
}
// Launch Git2D
go func() {
cmd := exec.Command(s.config.Git.DaemonPath, s.config.Git.Socket) //#nosec G204
cmd.Stderr = log.Writer()
cmd.Stdout = log.Writer()
if err := cmd.Run(); err != nil {
panic(err)
}
}()
// UNIX socket listener for hooks
{
hooksListener, err := net.Listen("unix", s.config.Hooks.Socket)
if errors.Is(err, syscall.EADDRINUSE) {
slog.Warn("removing existing socket", "path", s.config.Hooks.Socket)
if err = syscall.Unlink(s.config.Hooks.Socket); err != nil {
slog.Error("removing existing socket", "path", s.config.Hooks.Socket, "error", err)
os.Exit(1)
}
if hooksListener, err = net.Listen("unix", s.config.Hooks.Socket); err != nil {
slog.Error("listening hooks", "error", err)
os.Exit(1)
}
} else if err != nil {
slog.Error("listening hooks", "error", err)
os.Exit(1)
}
slog.Info("listening hooks on unix", "path", s.config.Hooks.Socket)
go func() {
if err = s.serveGitHooks(hooksListener); err != nil {
slog.Error("serving hooks", "error", err)
os.Exit(1)
}
}()
}
// UNIX socket listener for LMTP
{
lmtpListener, err := net.Listen("unix", s.config.LMTP.Socket)
if errors.Is(err, syscall.EADDRINUSE) {
slog.Warn("removing existing socket", "path", s.config.LMTP.Socket)
if err = syscall.Unlink(s.config.LMTP.Socket); err != nil {
slog.Error("removing existing socket", "path", s.config.LMTP.Socket, "error", err)
os.Exit(1)
}
if lmtpListener, err = net.Listen("unix", s.config.LMTP.Socket); err != nil {
slog.Error("listening LMTP", "error", err)
os.Exit(1)
}
} else if err != nil {
slog.Error("listening LMTP", "error", err)
os.Exit(1)
}
slog.Info("listening LMTP on unix", "path", s.config.LMTP.Socket)
go func() {
if err = s.serveLMTP(lmtpListener); err != nil {
slog.Error("serving LMTP", "error", err)
os.Exit(1)
}
}()
}
// SSH listener
{
sshListener, err := net.Listen(s.config.SSH.Net, s.config.SSH.Addr)
if errors.Is(err, syscall.EADDRINUSE) && s.config.SSH.Net == "unix" {
slog.Warn("removing existing socket", "path", s.config.SSH.Addr)
if err = syscall.Unlink(s.config.SSH.Addr); err != nil {
slog.Error("removing existing socket", "path", s.config.SSH.Addr, "error", err)
os.Exit(1)
}
if sshListener, err = net.Listen(s.config.SSH.Net, s.config.SSH.Addr); err != nil {
slog.Error("listening SSH", "error", err)
os.Exit(1)
}
} else if err != nil {
slog.Error("listening SSH", "error", err)
os.Exit(1)
}
slog.Info("listening SSH on", "net", s.config.SSH.Net, "addr", s.config.SSH.Addr)
go func() {
if err = s.serveSSH(sshListener); err != nil {
slog.Error("serving SSH", "error", err)
os.Exit(1)
}
}()
}
// HTTP listener
{
httpListener, err := net.Listen(s.config.HTTP.Net, s.config.HTTP.Addr)
if errors.Is(err, syscall.EADDRINUSE) && s.config.HTTP.Net == "unix" {
slog.Warn("removing existing socket", "path", s.config.HTTP.Addr)
if err = syscall.Unlink(s.config.HTTP.Addr); err != nil {
slog.Error("removing existing socket", "path", s.config.HTTP.Addr, "error", err)
os.Exit(1)
}
if httpListener, err = net.Listen(s.config.HTTP.Net, s.config.HTTP.Addr); err != nil {
slog.Error("listening HTTP", "error", err)
os.Exit(1)
}
} else if err != nil {
slog.Error("listening HTTP", "error", err)
os.Exit(1)
}
server := http.Server{
Handler: s,
ReadTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second,
WriteTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second,
IdleTimeout: time.Duration(s.config.HTTP.ReadTimeout) * time.Second,
} //exhaustruct:ignore
slog.Info("listening HTTP on", "net", s.config.HTTP.Net, "addr", s.config.HTTP.Addr)
go func() {
if err = server.Serve(httpListener); err != nil && !errors.Is(err, http.ErrServerClosed) {
slog.Error("serving HTTP", "error", err)
os.Exit(1)
}
}()
}
// Pprof listener
{
pprofListener, err := net.Listen(s.config.Pprof.Net, s.config.Pprof.Addr)
if err != nil {
slog.Error("listening pprof", "error", err)
os.Exit(1)
}
slog.Info("listening pprof on", "net", s.config.Pprof.Net, "addr", s.config.Pprof.Addr)
go func() {
if err := http.Serve(pprofListener, nil); err != nil {
slog.Error("serving pprof", "error", err)
os.Exit(1)
}
}()
}
s.ircBot = irc.NewBot(&s.config.IRC)
// IRC bot
go s.ircBot.ConnectLoop()
select {}
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"errors"
"fmt"
"os"
"os/exec"
gliderSSH "github.com/gliderlabs/ssh"
"github.com/go-git/go-git/v5"
)
// packPass contains information known when handling incoming SSH connections
// that then needs to be used in hook socket connection handlers. See hookc(1).
type packPass struct {
session gliderSSH.Session
repo *git.Repository
pubkey string
directAccess bool
repoPath string
userID int
userType string
repoID int
groupPath []string
repoName string
contribReq string
}
// sshHandleRecvPack handles attempts to push to repos.
func (s *Server) sshHandleRecvPack(session gliderSSH.Session, pubkey, repoIdentifier string) (err error) {
groupPath, repoName, repoID, repoPath, directAccess, contribReq, userType, userID, err := s.getRepoInfo2(session.Context(), repoIdentifier, pubkey)
if err != nil {
return err
}
repo, err := git.PlainOpen(repoPath)
if err != nil {
return err
}
repoConf, err := repo.Config()
if err != nil {
return err
}
repoConfCore := repoConf.Raw.Section("core")
if repoConfCore == nil {
return errors.New("repository has no core section in config")
}
hooksPath := repoConfCore.OptionAll("hooksPath")
if len(hooksPath) != 1 || hooksPath[0] != s.config.Hooks.Execs {
return errors.New("repository has hooksPath set to an unexpected value")
}
if !directAccess {
switch contribReq {
case "closed":
if !directAccess {
return errors.New("you need direct access to push to this repo")
}
case "registered_user":
if userType != "registered" {
return errors.New("you need to be a registered user to push to this repo")
}
case "ssh_pubkey":
fallthrough
case "federated":
if pubkey == "" {
return errors.New("you need to have an SSH public key to push to this repo")
}
if userType == "" {
userID, err = s.addUserSSH(session.Context(), pubkey)
if err != nil {
return err
}
fmt.Fprintln(session.Stderr(), "you are now registered as user ID", userID)
userType = "pubkey_only"
}
case "public":
default:
panic("unknown contrib_requirements value " + contribReq)
}
}
cookie, err := randomUrlsafeStr(16)
if err != nil {
fmt.Fprintln(session.Stderr(), "Error while generating cookie:", err)
}
s.packPasses.Store(cookie, packPass{
session: session,
pubkey: pubkey,
directAccess: directAccess,
repoPath: repoPath,
userID: userID,
repoID: repoID,
groupPath: groupPath,
repoName: repoName,
repo: repo,
contribReq: contribReq,
userType: userType,
})
defer s.packPasses.Delete(cookie)
// The Delete won't execute until proc.Wait returns unless something
// horribly wrong such as a panic occurs.
proc := exec.CommandContext(session.Context(), "git-receive-pack", repoPath)
proc.Env = append(os.Environ(),
"LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket,
"LINDENII_FORGE_HOOKS_COOKIE="+cookie,
)
proc.Stdin = session
proc.Stdout = session
proc.Stderr = session.Stderr()
if err = proc.Start(); err != nil {
fmt.Fprintln(session.Stderr(), "Error while starting process:", err)
return err
}
err = proc.Wait()
if err != nil {
fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err)
}
return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"fmt"
"os"
"os/exec"
glider_ssh "github.com/gliderlabs/ssh"
)
// sshHandleUploadPack handles clones/fetches. It just uses git-upload-pack
// and has no ACL checks.
func (s *Server) sshHandleUploadPack(session glider_ssh.Session, pubkey, repoIdentifier string) (err error) {
var repoPath string
if _, _, _, repoPath, _, _, _, _, err = s.getRepoInfo2(session.Context(), repoIdentifier, pubkey); err != nil {
return err
}
proc := exec.CommandContext(session.Context(), "git-upload-pack", repoPath)
proc.Env = append(os.Environ(), "LINDENII_FORGE_HOOKS_SOCKET_PATH="+s.config.Hooks.Socket)
proc.Stdin = session
proc.Stdout = session
proc.Stderr = session.Stderr()
if err = proc.Start(); err != nil {
fmt.Fprintln(session.Stderr(), "Error while starting process:", err)
return err
}
err = proc.Wait()
if err != nil {
fmt.Fprintln(session.Stderr(), "Error while waiting for process:", err)
}
return err
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"fmt"
"log/slog"
"net"
"os"
"strings"
gliderSSH "github.com/gliderlabs/ssh"
"go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
goSSH "golang.org/x/crypto/ssh"
)
// serveSSH serves SSH on a [net.Listener]. The listener should generally be a
// TCP listener, although AF_UNIX SOCK_STREAM listeners may be appropriate in
// rare cases.
func (s *Server) serveSSH(listener net.Listener) error {
var hostKeyBytes []byte
var hostKey goSSH.Signer
var err error
var server *gliderSSH.Server
if hostKeyBytes, err = os.ReadFile(s.config.SSH.Key); err != nil {
return err
}
if hostKey, err = goSSH.ParsePrivateKey(hostKeyBytes); err != nil {
return err
}
s.serverPubkey = hostKey.PublicKey()
s.serverPubkeyString = misc.BytesToString(goSSH.MarshalAuthorizedKey(s.serverPubkey))
s.serverPubkeyFP = goSSH.FingerprintSHA256(s.serverPubkey)
server = &gliderSSH.Server{
Handler: func(session gliderSSH.Session) {
clientPubkey := session.PublicKey()
var clientPubkeyStr string
if clientPubkey != nil {
clientPubkeyStr = strings.TrimSuffix(misc.BytesToString(goSSH.MarshalAuthorizedKey(clientPubkey)), "\n")
}
slog.Info("incoming ssh", "addr", session.RemoteAddr().String(), "key", clientPubkeyStr, "command", session.RawCommand())
fmt.Fprintln(session.Stderr(), ansiec.Blue+"Lindenii Forge "+version+", source at "+strings.TrimSuffix(s.config.HTTP.Root, "/")+"/-/source/"+ansiec.Reset+"\r")
cmd := session.Command()
if len(cmd) < 2 {
fmt.Fprintln(session.Stderr(), "Insufficient arguments\r")
return
}
switch cmd[0] {
case "git-upload-pack":
if len(cmd) > 2 {
fmt.Fprintln(session.Stderr(), "Too many arguments\r")
return
}
err = s.sshHandleUploadPack(session, clientPubkeyStr, cmd[1])
case "git-receive-pack":
if len(cmd) > 2 {
fmt.Fprintln(session.Stderr(), "Too many arguments\r")
return
}
err = s.sshHandleRecvPack(session, clientPubkeyStr, cmd[1])
default:
fmt.Fprintln(session.Stderr(), "Unsupported command: "+cmd[0]+"\r")
return
}
if err != nil {
fmt.Fprintln(session.Stderr(), err.Error())
return
}
},
PublicKeyHandler: func(_ gliderSSH.Context, _ gliderSSH.PublicKey) bool { return true },
KeyboardInteractiveHandler: func(_ gliderSSH.Context, _ goSSH.KeyboardInteractiveChallenge) bool { return true },
// It is intentional that we do not check any credentials and accept all connections.
// This allows all users to connect and clone repositories. However, the public key
// is passed to handlers, so e.g. the push handler could check the key and reject the
// push if it needs to.
} //exhaustruct:ignore
server.AddHostKey(hostKey)
if err = server.Serve(listener); err != nil {
slog.Error("error serving SSH", "error", err.Error())
os.Exit(1)
}
return nil
}
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"context"
"errors"
"fmt"
"io"
"net/url"
"go.lindenii.runxiyu.org/forge/forged/internal/ansiec"
"go.lindenii.runxiyu.org/forge/forged/internal/misc"
)
var errIllegalSSHRepoPath = errors.New("illegal SSH repo path")
// getRepoInfo2 also fetches repo information... it should be deprecated and
// implemented in individual handlers.
func (s *Server) getRepoInfo2(ctx context.Context, sshPath, sshPubkey string) (groupPath []string, repoName string, repoID int, repoPath string, directAccess bool, contribReq, userType string, userID int, err error) {
var segments []string
var sepIndex int
var moduleType, moduleName string
segments, err = misc.PathToSegments(sshPath)
if err != nil {
return
}
for i, segment := range segments {
var err error
segments[i], err = url.PathUnescape(segment)
if err != nil {
return []string{}, "", 0, "", false, "", "", 0, err
}
}
if segments[0] == "-" {
return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
}
sepIndex = -1
for i, part := range segments {
if part == "-" {
sepIndex = i
break
}
}
if segments[len(segments)-1] == "" {
segments = segments[:len(segments)-1]
}
switch {
case sepIndex == -1:
return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
case len(segments) <= sepIndex+2:
return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
}
groupPath = segments[:sepIndex]
moduleType = segments[sepIndex+1]
moduleName = segments[sepIndex+2]
repoName = moduleName
switch moduleType {
case "repos":
_1, _2, _3, _4, _5, _6, _7 := s.getRepoInfo(ctx, groupPath, moduleName, sshPubkey)
return groupPath, repoName, _1, _2, _3, _4, _5, _6, _7
default:
return []string{}, "", 0, "", false, "", "", 0, errIllegalSSHRepoPath
}
}
// writeRedError is a helper function that basically does a Fprintf but makes
// the entire thing red, in terms of ANSI escape sequences. It's useful when
// producing error messages on SSH connections.
func writeRedError(w io.Writer, format string, args ...any) {
fmt.Fprintln(w, ansiec.Red+fmt.Sprintf(format, args...)+ansiec.Reset)
}
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // Package unsorted is where unsorted Go files from the old structure are kept. package unsorted
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package unsorted
import (
"context"
"github.com/jackc/pgx/v5"
)
// addUserSSH adds a new user solely based on their SSH public key.
//
// TODO: Audit all users of this function.
func (s *Server) addUserSSH(ctx context.Context, pubkey string) (userID int, err error) {
var txn pgx.Tx
if txn, err = s.database.Begin(ctx); err != nil {
return
}
defer func() {
_ = txn.Rollback(ctx)
}()
if err = txn.QueryRow(ctx, `INSERT INTO users (type) VALUES ('pubkey_only') RETURNING id`).Scan(&userID); err != nil {
return
}
if _, err = txn.Exec(ctx, `INSERT INTO ssh_public_keys (key_string, user_id) VALUES ($1, $2)`, pubkey, userID); err != nil {
return
}
err = txn.Commit(ctx)
return
}
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> package unsorted var version = "unknown"
// SPDX-License-Identifier: AGPL-3.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org>
package web
import (
"html/template"
"net/http"
)
// ErrorPage404 renders a 404 Not Found error page using the "404" template.
func ErrorPage404(templates *template.Template, w http.ResponseWriter, params map[string]any) {
w.WriteHeader(http.StatusNotFound)
_ = templates.ExecuteTemplate(w, "404", params)
}
// ErrorPage400 renders a 400 Bad Request error page using the "400" template.
// The error message is passed via the "complete_error_msg" template param.
func ErrorPage400(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
w.WriteHeader(http.StatusBadRequest)
params["complete_error_msg"] = msg
_ = templates.ExecuteTemplate(w, "400", params)
}
// ErrorPage400Colon renders a 400 Bad Request error page telling the user
// that we migrated from : to -.
func ErrorPage400Colon(templates *template.Template, w http.ResponseWriter, params map[string]any) {
w.WriteHeader(http.StatusBadRequest)
_ = templates.ExecuteTemplate(w, "400_colon", params)
}
// ErrorPage403 renders a 403 Forbidden error page using the "403" template.
// The error message is passed via the "complete_error_msg" template param.
func ErrorPage403(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
w.WriteHeader(http.StatusForbidden)
params["complete_error_msg"] = msg
_ = templates.ExecuteTemplate(w, "403", params)
}
// ErrorPage451 renders a 451 Unavailable For Legal Reasons error page using the "451" template.
// The error message is passed via the "complete_error_msg" template param.
func ErrorPage451(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
w.WriteHeader(http.StatusUnavailableForLegalReasons)
params["complete_error_msg"] = msg
_ = templates.ExecuteTemplate(w, "451", params)
}
// ErrorPage500 renders a 500 Internal Server Error page using the "500" template.
// The error message is passed via the "complete_error_msg" template param.
func ErrorPage500(templates *template.Template, w http.ResponseWriter, params map[string]any, msg string) {
w.WriteHeader(http.StatusInternalServerError)
params["complete_error_msg"] = msg
_ = templates.ExecuteTemplate(w, "500", params)
}
// ErrorPage501 renders a 501 Not Implemented error page using the "501" template.
func ErrorPage501(templates *template.Template, w http.ResponseWriter, params map[string]any) {
w.WriteHeader(http.StatusNotImplemented)
_ = templates.ExecuteTemplate(w, "501", params)
}
// SPDX-License-Identifier: AGPL-3.0-only // SPDX-FileCopyrightText: Copyright (c) 2025 Runxi Yu <https://runxiyu.org> // Package web provides web-facing components of the forge. package web