Merge remote-tracking branch 'origin/main' into forgejo-federation

This commit is contained in:
Anthony Wang 2023-02-10 00:24:43 +00:00
commit 1a54d5e897
Signed by: a
GPG key ID: 42A5B952E6DD8D38
450 changed files with 12583 additions and 1654 deletions

View file

@ -25,7 +25,7 @@ steps:
- make deps-frontend
- name: deps-backend
image: golang:1.19
image: golang:1.20
pull: always
commands:
- make deps-backend
@ -88,7 +88,7 @@ steps:
depends_on: [deps-frontend]
- name: checks-backend
image: golang:1.19
image: golang:1.20
commands:
- make --always-make checks-backend # ensure the 'go-licenses' make target runs
depends_on: [deps-backend]
@ -109,7 +109,7 @@ steps:
depends_on: [deps-frontend]
- name: build-backend-no-gcc
image: golang:1.18 # this step is kept as the lowest version of golang that we support
image: golang:1.19 # this step is kept as the lowest version of golang that we support
pull: always
environment:
GO111MODULE: on
@ -122,7 +122,7 @@ steps:
path: /go
- name: build-backend-arm64
image: golang:1.19
image: golang:1.20
environment:
GO111MODULE: on
GOPROXY: https://goproxy.io
@ -138,7 +138,7 @@ steps:
path: /go
- name: build-backend-windows
image: golang:1.19
image: golang:1.20
environment:
GO111MODULE: on
GOPROXY: https://goproxy.io
@ -153,7 +153,7 @@ steps:
path: /go
- name: build-backend-386
image: golang:1.19
image: golang:1.20
environment:
GO111MODULE: on
GOPROXY: https://goproxy.io
@ -247,7 +247,7 @@ steps:
- pull_request
- name: deps-backend
image: golang:1.19
image: golang:1.20
pull: always
commands:
- make deps-backend
@ -364,7 +364,7 @@ steps:
path: /go
- name: generate-coverage
image: golang:1.19
image: golang:1.20
commands:
- make coverage
environment:
@ -440,7 +440,7 @@ steps:
- pull_request
- name: deps-backend
image: golang:1.19
image: golang:1.20
pull: always
commands:
- make deps-backend
@ -557,7 +557,7 @@ steps:
- name: test-e2e
image: mcr.microsoft.com/playwright:v1.29.2-focal
commands:
- curl -sLO https://go.dev/dl/go1.19.linux-amd64.tar.gz && tar -C /usr/local -xzf go1.19.linux-amd64.tar.gz
- curl -sLO https://go.dev/dl/go1.20.linux-amd64.tar.gz && tar -C /usr/local -xzf go1.20.linux-amd64.tar.gz
- groupadd --gid 1001 gitea && useradd -m --gid 1001 --uid 1001 gitea
- apt-get -qq update && apt-get -qqy install build-essential
- export TEST_PGSQL_SCHEMA=''
@ -656,7 +656,7 @@ trigger:
steps:
- name: download
image: golang:1.19
image: golang:1.20
pull: always
commands:
- timeout -s ABRT 40m make generate-license generate-gitignore
@ -720,7 +720,7 @@ steps:
- make deps-frontend
- name: deps-backend
image: golang:1.19
image: golang:1.20
pull: always
commands:
- make deps-backend
@ -729,7 +729,7 @@ steps:
path: /go
- name: static
image: techknowlogick/xgo:go-1.19.x
image: techknowlogick/xgo:go-1.20.x
pull: always
commands:
# Upgrade to node 18 once https://github.com/techknowlogick/xgo/issues/163 is resolved
@ -841,7 +841,7 @@ steps:
- make deps-frontend
- name: deps-backend
image: golang:1.19
image: golang:1.20
pull: always
commands:
- make deps-backend
@ -850,7 +850,7 @@ steps:
path: /go
- name: static
image: techknowlogick/xgo:go-1.19.x
image: techknowlogick/xgo:go-1.20.x
pull: always
commands:
# Upgrade to node 18 once https://github.com/techknowlogick/xgo/issues/163 is resolved
@ -932,7 +932,7 @@ trigger:
steps:
- name: build-docs
image: golang:1.19
image: golang:1.20
commands:
- cd docs
- make trans-copy clean build

View file

@ -28,7 +28,7 @@ linters:
fast: false
run:
go: 1.19
go: 1.20
timeout: 10m
skip-dirs:
- node_modules
@ -74,7 +74,7 @@ linters-settings:
- name: modifies-value-receiver
gofumpt:
extra-rules: true
lang-version: "1.19"
lang-version: "1.20"
depguard:
list-type: denylist
# Check the list against standard lib.
@ -84,6 +84,7 @@ linters-settings:
- github.com/unknwon/com: "use gitea's util and replacements"
- io/ioutil: "use os or io instead"
- golang.org/x/exp: "it's experimental and unreliable."
- code.gitea.io/gitea/modules/git/internal: "do not use the internal package, use AddXxx function instead"
issues:
max-issues-per-linter: 0

View file

@ -267,26 +267,10 @@ with the rest of the summary matching the original PR. Similarly for frontports
---
The below is a script that may be helpful in creating backports. YMMV.
A command to help create backports can be found in `contrib/backport` and can be installed (from inside the gitea repo root directory) using:
```bash
#!/bin/sh
PR="$1"
SHA="$2"
VERSION="$3"
if [ -z "$SHA" ]; then
SHA=$(gh api /repos/go-gitea/gitea/pulls/$PR -q '.merge_commit_sha')
fi
if [ -z "$VERSION" ]; then
VERSION="v1.16"
fi
echo git checkout origin/release/"$VERSION" -b backport-$PR-$VERSION
git checkout origin/release/"$VERSION" -b backport-$PR-$VERSION
git cherry-pick $SHA && git commit --amend && git push zeripath backport-$PR-$VERSION && xdg-open https://github.com/go-gitea/gitea/compare/release/"$VERSION"...zeripath:backport-$PR-$VERSION
go install contrib/backport/backport.go
```
## Developer Certificate of Origin (DCO)

View file

@ -1,5 +1,5 @@
#Build stage
FROM golang:1.19-alpine3.17 AS build-env
FROM golang:1.20-alpine3.17 AS build-env
ARG GOPROXY
ENV GOPROXY ${GOPROXY:-direct}

View file

@ -1,5 +1,5 @@
#Build stage
FROM golang:1.19-alpine3.17 AS build-env
FROM golang:1.20-alpine3.17 AS build-env
ARG GOPROXY
ENV GOPROXY ${GOPROXY:-direct}

View file

@ -23,13 +23,13 @@ SHASUM ?= shasum -a 256
HAS_GO = $(shell hash $(GO) > /dev/null 2>&1 && echo "GO" || echo "NOGO" )
COMMA := ,
XGO_VERSION := go-1.19.x
XGO_VERSION := go-1.20.x
AIR_PACKAGE ?= github.com/cosmtrek/air@v1.40.4
EDITORCONFIG_CHECKER_PACKAGE ?= github.com/editorconfig-checker/editorconfig-checker/cmd/editorconfig-checker@2.6.0
ERRCHECK_PACKAGE ?= github.com/kisielk/errcheck@v1.6.2
GOFUMPT_PACKAGE ?= mvdan.cc/gofumpt@v0.4.0
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.1
GOLANGCI_LINT_PACKAGE ?= github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.0
GXZ_PAGAGE ?= github.com/ulikunitz/xz/cmd/gxz@v0.5.10
MISSPELL_PACKAGE ?= github.com/client9/misspell/cmd/misspell@v0.3.4
SWAGGER_PACKAGE ?= github.com/go-swagger/go-swagger/cmd/swagger@v0.30.3

130
assets/go-licenses.json generated

File diff suppressed because one or more lines are too long

View file

@ -9,6 +9,7 @@ import (
"encoding/json"
"io/fs"
"os"
goPath "path"
"path/filepath"
"regexp"
"sort"
@ -47,13 +48,15 @@ func main() {
entries := []LicenseEntry{}
for _, path := range paths {
path := filepath.ToSlash(path)
licenseText, err := os.ReadFile(path)
if err != nil {
panic(err)
}
path := strings.Replace(path, base+string(os.PathSeparator), "", 1)
name := filepath.Dir(path)
path = strings.Replace(path, base+"/", "", 1)
name := goPath.Dir(path)
// There might be a bug somewhere in go-licenses that sometimes interprets the
// root package as "." and sometimes as "code.gitea.io/gitea". Workaround by

View file

@ -180,6 +180,11 @@ var (
Name: "raw",
Usage: "Display only the token value",
},
cli.StringFlag{
Name: "scopes",
Value: "",
Usage: "Comma separated list of scopes to apply to access token",
},
},
Action: runGenerateAccessToken,
}
@ -698,9 +703,15 @@ func runGenerateAccessToken(c *cli.Context) error {
return err
}
accessTokenScope, err := auth_model.AccessTokenScope(c.String("scopes")).Normalize()
if err != nil {
return err
}
t := &auth_model.AccessToken{
Name: c.String("token-name"),
UID: user.ID,
Name: c.String("token-name"),
UID: user.ID,
Scope: accessTokenScope,
}
if err := auth_model.NewAccessToken(t); err != nil {

View file

@ -185,6 +185,7 @@ Gitea or set your environment appropriately.`, "")
userID, _ := strconv.ParseInt(os.Getenv(repo_module.EnvPusherID), 10, 64)
prID, _ := strconv.ParseInt(os.Getenv(repo_module.EnvPRID), 10, 64)
deployKeyID, _ := strconv.ParseInt(os.Getenv(repo_module.EnvDeployKeyID), 10, 64)
actionPerm, _ := strconv.ParseInt(os.Getenv(repo_module.EnvActionPerm), 10, 64)
hookOptions := private.HookOptions{
UserID: userID,
@ -194,6 +195,7 @@ Gitea or set your environment appropriately.`, "")
GitPushOptions: pushOptions(),
PullRequestID: prID,
DeployKeyID: deployKeyID,
ActionPerm: int(actionPerm),
}
scanner := bufio.NewScanner(os.Stdin)

41
contrib/backport/README Normal file
View file

@ -0,0 +1,41 @@
`backport`
==========
`backport` is a command to help create backports of PRs. It backports a
provided PR from main on to a released version.
It will create a backport branch, cherry-pick the PR's merge commit, adjust
the commit message and then push this back up to your fork's remote.
The default version will read from `docs/config.yml`. You can override this
using the option `--version`.
The upstream branches will be fetched, using the remote `origin`. This can
be overrided using `--upstream`, and fetching can be avoided using
`--no-fetch`.
By default the branch created will be called `backport-$PR-$VERSION`. You
can override this using the option `--backport-branch`. This branch will
be created from `--release-branch` which is `release/$(VERSION)`
by default and will be pulled from `$(UPSTREAM)`.
The merge-commit as determined by the github API will be used as the SHA to
cherry-pick. You can override this using `--cherry-pick`.
The commit message will be amended to add the `Backport` header.
`--no-amend-message` can be set to stop this from happening.
If cherry-pick is successful the backported branch will be pushed up to your
fork using your remote. These will be determined using `git remote -v`. You
can set your fork name using `--fork-user` and your remote name using
`--remote`. You can avoid pushing using `--no-push`.
If the push is successful, `xdg-open` will be called to open a backport url.
You can stop this using `--no-xdg-open`.
Installation
============
```bash
go install contrib/backport/backport.go
```

View file

@ -0,0 +1,438 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"path"
"strconv"
"strings"
"syscall"
"github.com/google/go-github/v45/github"
"github.com/urfave/cli"
"gopkg.in/yaml.v3"
)
const defaultVersion = "v1.18" // to backport to
func main() {
app := cli.NewApp()
app.Name = "backport"
app.Usage = "Backport provided PR-number on to the current or previous released version"
app.Description = `Backport will look-up the PR in Gitea's git log and attempt to cherry-pick it on the current version`
app.ArgsUsage = "<PR-to-backport>"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "version",
Usage: "Version branch to backport on to",
},
cli.StringFlag{
Name: "upstream",
Value: "origin",
Usage: "Upstream remote for the Gitea upstream",
},
cli.StringFlag{
Name: "release-branch",
Value: "",
Usage: "Release branch to backport on. Will default to release/<version>",
},
cli.StringFlag{
Name: "cherry-pick",
Usage: "SHA to cherry-pick as backport",
},
cli.StringFlag{
Name: "backport-branch",
Usage: "Backport branch to backport on to (default: backport-<pr>-<version>",
},
cli.StringFlag{
Name: "remote",
Value: "",
Usage: "Remote for your fork of the Gitea upstream",
},
cli.StringFlag{
Name: "fork-user",
Value: "",
Usage: "Forked user name on Github",
},
cli.BoolFlag{
Name: "no-fetch",
Usage: "Set this flag to prevent fetch of remote branches",
},
cli.BoolFlag{
Name: "no-amend-message",
Usage: "Set this flag to prevent automatic amendment of the commit message",
},
cli.BoolFlag{
Name: "no-push",
Usage: "Set this flag to prevent pushing the backport up to your fork",
},
cli.BoolFlag{
Name: "no-xdg-open",
Usage: "Set this flag to not use xdg-open to open the PR URL",
},
}
cli.AppHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.HelpName}} {{if .VisibleFlags}}[options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
{{if len .Authors}}
AUTHOR:
{{range .Authors}}{{ . }}{{end}}
{{end}}{{if .Commands}}
OPTIONS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
`
app.Action = runBackport
if err := app.Run(os.Args); err != nil {
fmt.Fprintf(os.Stderr, "Unable to backport: %v\n", err)
}
}
func runBackport(c *cli.Context) error {
ctx, cancel := installSignals()
defer cancel()
version := c.String("version")
if version == "" {
version = readVersion()
}
if version == "" {
version = defaultVersion
}
upstream := c.String("upstream")
if upstream == "" {
upstream = "origin"
}
forkUser := c.String("fork-user")
remote := c.String("remote")
if remote == "" && !c.Bool("--no-push") {
var err error
remote, forkUser, err = determineRemote(ctx, forkUser)
if err != nil {
return err
}
}
upstreamReleaseBranch := c.String("release-branch")
if upstreamReleaseBranch == "" {
upstreamReleaseBranch = path.Join("release", version)
}
localReleaseBranch := path.Join(upstream, upstreamReleaseBranch)
args := c.Args()
if len(args) == 0 {
return fmt.Errorf("no PR number provided\nProvide a PR number to backport")
} else if len(args) != 1 {
return fmt.Errorf("multiple PRs provided %v\nOnly a single PR can be backported at a time", args)
}
pr := args[0]
backportBranch := c.String("backport-branch")
if backportBranch == "" {
backportBranch = "backport-" + pr + "-" + version
}
fmt.Printf("* Backporting %s to %s as %s\n", pr, localReleaseBranch, backportBranch)
sha := c.String("cherry-pick")
if sha == "" {
var err error
sha, err = determineSHAforPR(ctx, pr)
if err != nil {
return err
}
}
if sha == "" {
return fmt.Errorf("unable to determine sha for cherry-pick of %s", pr)
}
if !c.Bool("no-fetch") {
if err := fetchRemoteAndMain(ctx, upstream, upstreamReleaseBranch); err != nil {
return err
}
}
if err := checkoutBackportBranch(ctx, backportBranch, localReleaseBranch); err != nil {
return err
}
if err := cherrypick(ctx, sha); err != nil {
return err
}
if !c.Bool("no-amend-message") {
if err := amendCommit(ctx, pr); err != nil {
return err
}
}
if !c.Bool("no-push") {
url := "https://github.com/go-gitea/gitea/compare/" + upstreamReleaseBranch + "..." + forkUser + ":" + backportBranch
if err := gitPushUp(ctx, remote, backportBranch); err != nil {
return err
}
if !c.Bool("no-xdg-open") {
if err := xdgOpen(ctx, url); err != nil {
return err
}
} else {
fmt.Printf("* Navigate to %s to open PR\n", url)
}
}
return nil
}
func xdgOpen(ctx context.Context, url string) error {
fmt.Printf("* `xdg-open %s`\n", url)
out, err := exec.CommandContext(ctx, "xdg-open", url).Output()
if err != nil {
fmt.Fprintf(os.Stderr, "%s", string(out))
return fmt.Errorf("unable to xdg-open to %s: %w", url, err)
}
return nil
}
func gitPushUp(ctx context.Context, remote, backportBranch string) error {
fmt.Printf("* `git push -u %s %s`\n", remote, backportBranch)
out, err := exec.CommandContext(ctx, "git", "push", "-u", remote, backportBranch).Output()
if err != nil {
fmt.Fprintf(os.Stderr, "%s", string(out))
return fmt.Errorf("unable to push up to %s: %w", remote, err)
}
return nil
}
func amendCommit(ctx context.Context, pr string) error {
fmt.Printf("* Amending commit to prepend `Backport #%s` to body\n", pr)
out, err := exec.CommandContext(ctx, "git", "log", "-1", "--pretty=format:%B").Output()
if err != nil {
fmt.Fprintf(os.Stderr, "%s", string(out))
return fmt.Errorf("unable to get last log message: %w", err)
}
parts := strings.SplitN(string(out), "\n", 2)
if len(parts) != 2 {
return fmt.Errorf("unable to interpret log message:\n%s", string(out))
}
subject, body := parts[0], parts[1]
if !strings.HasSuffix(subject, " (#"+pr+")") {
subject = subject + " (#" + pr + ")"
}
out, err = exec.CommandContext(ctx, "git", "commit", "--amend", "-m", subject+"\n\nBackport #"+pr+"\n"+body).Output()
if err != nil {
fmt.Fprintf(os.Stderr, "%s", string(out))
return fmt.Errorf("unable to amend last log message: %w", err)
}
return nil
}
func cherrypick(ctx context.Context, sha string) error {
// Check if a CHERRY_PICK_HEAD exists
if _, err := os.Stat(".git/CHERRY_PICK_HEAD"); err == nil {
// Assume that we are in the middle of cherry-pick - continue it
fmt.Println("* Attempting git cherry-pick --continue")
out, err := exec.CommandContext(ctx, "git", "cherry-pick", "--continue").Output()
if err != nil {
fmt.Fprintf(os.Stderr, "git cherry-pick --continue failed:\n%s\n", string(out))
return fmt.Errorf("unable to continue cherry-pick: %w", err)
}
return nil
}
fmt.Printf("* Attempting git cherry-pick %s\n", sha)
out, err := exec.CommandContext(ctx, "git", "cherry-pick", sha).Output()
if err != nil {
fmt.Fprintf(os.Stderr, "git cherry-pick %s failed:\n%s\n", sha, string(out))
return fmt.Errorf("git cherry-pick %s failed: %w", sha, err)
}
return nil
}
func checkoutBackportBranch(ctx context.Context, backportBranch, releaseBranch string) error {
out, err := exec.CommandContext(ctx, "git", "branch", "--show-current").Output()
if err != nil {
return fmt.Errorf("unable to check current branch %w", err)
}
currentBranch := strings.TrimSpace(string(out))
fmt.Printf("* Current branch is %s\n", currentBranch)
if currentBranch == backportBranch {
fmt.Printf("* Current branch is %s - not checking out\n", currentBranch)
return nil
}
if _, err := exec.CommandContext(ctx, "git", "rev-list", "-1", backportBranch).Output(); err == nil {
fmt.Printf("* Branch %s already exists. Checking it out...\n", backportBranch)
return exec.CommandContext(ctx, "git", "checkout", "-f", backportBranch).Run()
}
fmt.Printf("* `git checkout -b %s %s`\n", backportBranch, releaseBranch)
return exec.CommandContext(ctx, "git", "checkout", "-b", backportBranch, releaseBranch).Run()
}
func fetchRemoteAndMain(ctx context.Context, remote, releaseBranch string) error {
fmt.Printf("* `git fetch %s main`\n", remote)
out, err := exec.CommandContext(ctx, "git", "fetch", remote, "main").Output()
if err != nil {
fmt.Println(string(out))
return fmt.Errorf("unable to fetch %s from %s: %w", "main", remote, err)
}
fmt.Println(string(out))
fmt.Printf("* `git fetch %s %s`\n", remote, releaseBranch)
out, err = exec.CommandContext(ctx, "git", "fetch", remote, releaseBranch).Output()
if err != nil {
fmt.Println(string(out))
return fmt.Errorf("unable to fetch %s from %s: %w", releaseBranch, remote, err)
}
fmt.Println(string(out))
return nil
}
func determineRemote(ctx context.Context, forkUser string) (string, string, error) {
out, err := exec.CommandContext(ctx, "git", "remote", "-v").Output()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to list git remotes:\n%s\n", string(out))
return "", "", fmt.Errorf("unable to determine forked remote: %w", err)
}
lines := strings.Split(string(out), "\n")
for _, line := range lines {
fields := strings.Split(line, "\t")
name, remote := fields[0], fields[1]
// only look at pushers
if !strings.HasSuffix(remote, " (push)") {
continue
}
// only look at github.com pushes
if !strings.Contains(remote, "github.com") {
continue
}
// ignore go-gitea/gitea
if strings.Contains(remote, "go-gitea/gitea") {
continue
}
if !strings.Contains(remote, forkUser) {
continue
}
if strings.HasPrefix(remote, "git@github.com:") {
forkUser = strings.TrimPrefix(remote, "git@github.com:")
} else if strings.HasPrefix(remote, "https://github.com/") {
forkUser = strings.TrimPrefix(remote, "https://github.com/")
} else if strings.HasPrefix(remote, "https://www.github.com/") {
forkUser = strings.TrimPrefix(remote, "https://www.github.com/")
} else if forkUser == "" {
return "", "", fmt.Errorf("unable to extract forkUser from remote %s: %s", name, remote)
}
idx := strings.Index(forkUser, "/")
if idx >= 0 {
forkUser = forkUser[:idx]
}
return name, forkUser, nil
}
return "", "", fmt.Errorf("unable to find appropriate remote in:\n%s", string(out))
}
func readVersion() string {
bs, err := os.ReadFile("docs/config.yaml")
if err != nil {
if err == os.ErrNotExist {
log.Println("`docs/config.yaml` not present")
return ""
}
fmt.Fprintf(os.Stderr, "Unable to read `docs/config.yaml`: %v\n", err)
return ""
}
type params struct {
Version string
}
type docConfig struct {
Params params
}
dc := &docConfig{}
if err := yaml.Unmarshal(bs, dc); err != nil {
fmt.Fprintf(os.Stderr, "Unable to read `docs/config.yaml`: %v\n", err)
return ""
}
if dc.Params.Version == "" {
fmt.Fprintf(os.Stderr, "No version in `docs/config.yaml`")
return ""
}
version := dc.Params.Version
if version[0] != 'v' {
version = "v" + version
}
split := strings.SplitN(version, ".", 3)
return strings.Join(split[:2], ".")
}
func determineSHAforPR(ctx context.Context, prStr string) (string, error) {
prNum, err := strconv.Atoi(prStr)
if err != nil {
return "", err
}
client := github.NewClient(http.DefaultClient)
pr, _, err := client.PullRequests.Get(ctx, "go-gitea", "gitea", prNum)
if err != nil {
return "", err
}
if pr.Merged == nil || !*pr.Merged {
return "", fmt.Errorf("PR #%d is not yet merged - cannot determine sha to backport", prNum)
}
if pr.MergeCommitSHA != nil {
return *pr.MergeCommitSHA, nil
}
return "", nil
}
func installSignals() (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
// install notify
signalChannel := make(chan os.Signal, 1)
signal.Notify(
signalChannel,
syscall.SIGINT,
syscall.SIGTERM,
)
select {
case <-signalChannel:
case <-ctx.Done():
}
cancel()
signal.Reset()
}()
return ctx, cancel
}

View file

@ -927,14 +927,18 @@ ROUTER = console
;USE_COMPAT_SSH_URI = false
;;
;; Close issues as long as a commit on any branch marks it as fixed
;; Comma separated list of globally disabled repo units. Allowed values: repo.issues, repo.ext_issues, repo.pulls, repo.wiki, repo.ext_wiki, repo.projects
;; Comma separated list of globally disabled repo units. Allowed values: repo.issues, repo.ext_issues, repo.pulls, repo.wiki, repo.ext_wiki, repo.projects, repo.packages
;DISABLED_REPO_UNITS =
;;
;; Comma separated list of default repo units. Allowed values: repo.code, repo.releases, repo.issues, repo.pulls, repo.wiki, repo.projects.
;; Comma separated list of default new repo units. Allowed values: repo.code, repo.releases, repo.issues, repo.pulls, repo.wiki, repo.projects, repo.packages.
;; Note: Code and Releases can currently not be deactivated. If you specify default repo units you should still list them for future compatibility.
;; External wiki and issue tracker can't be enabled by default as it requires additional settings.
;; Disabled repo units will not be added to new repositories regardless if it is in the default list.
;DEFAULT_REPO_UNITS = repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects
;DEFAULT_REPO_UNITS = repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages
;;
;; Comma separated list of default forked repo units.
;; The set of allowed values and rules are the same as DEFAULT_REPO_UNITS.
;DEFAULT_FORK_REPO_UNITS = repo.code,repo.pulls
;;
;; Prefix archive files by placing them in a directory named after the repository
;PREFIX_ARCHIVE_FILES = true
@ -1218,10 +1222,6 @@ ROUTER = console
;;
;; Whether to enable a Service Worker to cache frontend assets
;USE_SERVICE_WORKER = false
;;
;; Whether to only show relevant repos on the explore page when no keyword is specified and default sorting is used.
;; A repo is considered irrelevant if it's a fork or if it has no metadata (no description, no icon, no topic).
;ONLY_SHOW_RELEVANT_REPOS = false
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@ -2458,6 +2458,8 @@ ROUTER = console
;LIMIT_SIZE_COMPOSER = -1
;; Maximum size of a Conan upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_SIZE_CONAN = -1
;; Maximum size of a Conda upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_SIZE_CONDA = -1
;; Maximum size of a Container upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
;LIMIT_SIZE_CONTAINER = -1
;; Maximum size of a Generic upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
@ -2550,3 +2552,19 @@ ROUTER = console
;PROXY_URL =
;; Comma separated list of host names requiring proxy. Glob patterns (*) are accepted; use ** to match all hosts.
;PROXY_HOSTS =
; [actions]
;; Enable/Disable actions capabilities
;ENABLED = false
;; Default address to get action plugins, e.g. the default value means downloading from "https://gitea.com/actions/checkout" for "uses: actions/checkout@v3"
;DEFAULT_ACTIONS_URL = https://gitea.com
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; settings for action logs, will override storage setting
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;[storage.actions_log]
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; storage type
;STORAGE_TYPE = local

View file

@ -37,7 +37,7 @@ PROVIDER_CONFIG = $GITEA_WORK_DIR/data/sessions
[picture]
AVATAR_UPLOAD_PATH = $GITEA_WORK_DIR/data/avatars
REPOSITORY_AVATAR_UPLOAD_PATH = $GITEA_WORK_DIR/data/gitea/repo-avatars
REPOSITORY_AVATAR_UPLOAD_PATH = $GITEA_WORK_DIR/data/repo-avatars
[attachment]
PATH = $GITEA_WORK_DIR/data/attachments

View file

@ -19,8 +19,8 @@ params:
author: The Gitea Authors
website: https://docs.gitea.io
version: 1.18.1
minGoVersion: 1.18
goVersion: 1.19
minGoVersion: 1.19
goVersion: 1.20
minNodeVersion: 16
search: nav
repo: "https://github.com/go-gitea/gitea"

View file

@ -104,7 +104,8 @@ In addition there is _`StaticRootPath`_ which can be set as a built-in at build
- `ENABLE_PUSH_CREATE_USER`: **false**: Allow users to push local repositories to Gitea and have them automatically created for a user.
- `ENABLE_PUSH_CREATE_ORG`: **false**: Allow users to push local repositories to Gitea and have them automatically created for an org.
- `DISABLED_REPO_UNITS`: **_empty_**: Comma separated list of globally disabled repo units. Allowed values: \[repo.issues, repo.ext_issues, repo.pulls, repo.wiki, repo.ext_wiki, repo.projects\]
- `DEFAULT_REPO_UNITS`: **repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects**: Comma separated list of default repo units. Allowed values: \[repo.code, repo.releases, repo.issues, repo.pulls, repo.wiki, repo.projects\]. Note: Code and Releases can currently not be deactivated. If you specify default repo units you should still list them for future compatibility. External wiki and issue tracker can't be enabled by default as it requires additional settings. Disabled repo units will not be added to new repositories regardless if it is in the default list.
- `DEFAULT_REPO_UNITS`: **repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages**: Comma separated list of default new repo units. Allowed values: \[repo.code, repo.releases, repo.issues, repo.pulls, repo.wiki, repo.projects\]. Note: Code and Releases can currently not be deactivated. If you specify default repo units you should still list them for future compatibility. External wiki and issue tracker can't be enabled by default as it requires additional settings. Disabled repo units will not be added to new repositories regardless if it is in the default list.
- `DEFAULT_FORK_REPO_UNITS`: **repo.code,repo.pulls**: Comma separated list of default forked repo units. The set of allowed values and rules is the same as `DEFAULT_REPO_UNITS`.
- `PREFIX_ARCHIVE_FILES`: **true**: Prefix archive files by placing them in a directory named after the repository.
- `DISABLE_MIGRATIONS`: **false**: Disable migrating feature.
- `DISABLE_STARS`: **false**: Disable stars feature.
@ -230,8 +231,6 @@ The following configuration set `Content-Type: application/vnd.android.package-a
- `DEFAULT_SHOW_FULL_NAME`: **false**: Whether the full name of the users should be shown where possible. If the full name isn't set, the username will be used.
- `SEARCH_REPO_DESCRIPTION`: **true**: Whether to search within description at repository search on explore page.
- `USE_SERVICE_WORKER`: **false**: Whether to enable a Service Worker to cache frontend assets.
- `ONLY_SHOW_RELEVANT_REPOS`: **false** Whether to only show relevant repos on the explore page when no keyword is specified and default sorting is used.
A repo is considered irrelevant if it's a fork or if it has no metadata (no description, no icon, no topic).
### UI - Admin (`ui.admin`)
@ -1214,6 +1213,7 @@ Task queue configuration has been moved to `queue.task`. However, the below conf
- `LIMIT_TOTAL_OWNER_SIZE`: **-1**: Maximum size of packages a single owner can use (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_COMPOSER`: **-1**: Maximum size of a Composer upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_CONAN`: **-1**: Maximum size of a Conan upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_CONDA`: **-1**: Maximum size of a Conda upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_CONTAINER`: **-1**: Maximum size of a Container upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_GENERIC`: **-1**: Maximum size of a Generic upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
- `LIMIT_SIZE_HELM`: **-1**: Maximum size of a Helm upload (`-1` means no limits, format `1000`, `1 MB`, `1 GiB`)
@ -1314,6 +1314,41 @@ PROXY_URL = socks://127.0.0.1:1080
PROXY_HOSTS = *.github.com
```
## Actions (`actions`)
- `ENABLED`: **false**: Enable/Disable actions capabilities
- `DEFAULT_ACTIONS_URL`: **https://gitea.com**: Default address to get action plugins, e.g. the default value means downloading from "https://gitea.com/actions/checkout" for "uses: actions/checkout@v3"
`DEFAULT_ACTIONS_URL` indicates where should we find the relative path action plugin. i.e. when use an action in a workflow file like
```yaml
name: versions
on:
push:
branches:
- main
- releases/*
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
```
Now we need to know how to get actions/checkout, this configuration is the default git server to get it. That means we will get the repository via git clone ${DEFAULT_ACTIONS_URL}/actions/checkout and fetch tag v3.
To help people who don't want to mirror these actions in their git instances, the default value is https://gitea.com
To help people run actions totally in their network, they can change the value and copy all necessary action repositories into their git server.
Of course we should support the form in future PRs like
```yaml
steps:
- uses: gitea.com/actions/checkout@v3
```
although Github don't support this form.
## Other (`other`)
- `SHOW_FOOTER_BRANDING`: **false**: Show Gitea branding in the footer.

View file

@ -54,7 +54,7 @@ To maintain understandable code and avoid circular dependencies it is important
### Package Dependencies
Since Golang don't support import cycles, we have to decide the package dependencies carefully. There are some levels between those packages. Below is the ideal package dependencies direction.
Since Golang doesn't support import cycles, we have to decide the package dependencies carefully. There are some levels between those packages. Below is the ideal package dependencies direction.
`cmd` -> `routers` -> `services` -> `models` -> `modules`

View file

@ -329,3 +329,22 @@ Before activating SSPI single sign-on authentication (SSO) you have to prepare y
- You have added the URL of the web app to the `Local intranet zone`
- The clocks of the server and client should not differ with more than 5 minutes (depends on group policy)
- `Integrated Windows Authentication` should be enabled in Internet Explorer (under `Advanced settings`)
## Reverse Proxy
Gitea supports Reverse Proxy Header authentication, it will read headers as a trusted login user name or user email address. This hasn't been enabled by default, you can enable it with
```ini
[service]
ENABLE_REVERSE_PROXY_AUTHENTICATION = true
```
The default login user name is in the `X-WEBAUTH-USER` header, you can change it via changing `REVERSE_PROXY_AUTHENTICATION_USER` in app.ini. If the user doesn't exist, you can enable automatic registration with `ENABLE_REVERSE_PROXY_AUTO_REGISTRATION=true`.
The default login user email is `X-WEBAUTH-EMAIL`, you can change it via changing `REVERSE_PROXY_AUTHENTICATION_EMAIL` in app.ini, this could also be disabled with `ENABLE_REVERSE_PROXY_EMAIL`
If set `ENABLE_REVERSE_PROXY_FULL_NAME=true`, a user full name expected in `X-WEBAUTH-FULLNAME` will be assigned to the user when auto creating the user. You can also change the header name with `REVERSE_PROXY_AUTHENTICATION_FULL_NAME`.
You can also limit the reverse proxy's IP address range with `REVERSE_PROXY_TRUSTED_PROXIES` which default value is `127.0.0.0/8,::1/128`. By `REVERSE_PROXY_LIMIT`, you can limit trusted proxies level.
Notice: Reverse Proxy Auth doesn't support the API. You still need an access token or basic auth to make API requests.

View file

@ -15,4 +15,21 @@ menu:
# 认证
## TBD
## 反向代理认证
Gitea 支持通过读取反向代理传递的 HTTP 头中的登录名或者 email 地址来支持反向代理来认证。默认是不启用的,你可以用以下配置启用。
```ini
[service]
ENABLE_REVERSE_PROXY_AUTHENTICATION = true
```
默认的登录用户名的 HTTP 头是 `X-WEBAUTH-USER`,你可以通过修改 `REVERSE_PROXY_AUTHENTICATION_USER` 来变更它。如果用户不存在,可以自动创建用户,当然你需要修改 `ENABLE_REVERSE_PROXY_AUTO_REGISTRATION=true` 来启用它。
默认的登录用户 Email 的 HTTP 头是 `X-WEBAUTH-EMAIL`,你可以通过修改 `REVERSE_PROXY_AUTHENTICATION_EMAIL` 来变更它。如果用户不存在,可以自动创建用户,当然你需要修改 `ENABLE_REVERSE_PROXY_AUTO_REGISTRATION=true` 来启用它。你也可以通过修改 `ENABLE_REVERSE_PROXY_EMAIL` 来启用或停用这个 HTTP 头。
如果设置了 `ENABLE_REVERSE_PROXY_FULL_NAME=true`,则用户的全名会从 `X-WEBAUTH-FULLNAME` 读取,这样在自动创建用户时将使用这个字段作为用户全名,你也可以通过修改 `REVERSE_PROXY_AUTHENTICATION_FULL_NAME` 来变更 HTTP 头。
你也可以通过修改 `REVERSE_PROXY_TRUSTED_PROXIES` 来设置反向代理的IP地址范围加强安全性默认值是 `127.0.0.0/8,::1/128`。 通过 `REVERSE_PROXY_LIMIT` 可以设置最多信任几级反向代理。
注意:反向代理认证不支持认证 APIAPI 仍旧需要用 access token 来进行认证。

View file

@ -56,7 +56,7 @@ _Symbols used in table:_
| Deploy Tokens | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ |
| Repository Tokens with write rights | ✓ | ✘ | ✓ | ✓ | ✓ | ✓ | ✓ |
| RSS Feeds | ✓ | ✘ | ✓ | ✘ | ✘ | ✘ | ✘ |
| Built-in CI/CD | [](https://github.com/go-gitea/gitea/issues/13539) | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ |
| Built-in CI/CD | | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ |
| Subgroups: groups within groups | [](https://github.com/go-gitea/gitea/issues/1872) | ✘ | ✘ | ✓ | ✓ | ✘ | ✓ |
| Interaction with other instances | [/](https://github.com/go-gitea/gitea/issues/18240) | ✘ | ✘ | ✘ | ✘ | ✘ | ✘ |
| Mermaid diagrams in Markdown | ✓ | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ |

View file

@ -49,7 +49,7 @@ _表格中的符号含义:_
| 内置容器 Registry | ✓ | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ |
| 外部 Git 镜像 | ✓ | ✓ | ✘ | ✘ | ✓ | ✓ | ✓ |
| WebAuthn (2FA) | ✓ | ✘ | ✓ | ✓ | ✓ | ✓ | ? |
| 内置 CI/CD | | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ |
| 内置 CI/CD | | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ |
| 子组织:组织内的组织 | [](https://github.com/go-gitea/gitea/issues/1872) | ✘ | ✘ | ✓ | ✓ | ✘ | ✓ |
#### 代码管理

View file

@ -51,7 +51,7 @@ menu:
| 內建 Container Registry | [](https://github.com/go-gitea/gitea/issues/2316) | ✘ | ✘ | ✓ | ✓ | ✘ | ✘ |
| 對外部 Git 鏡像 | ✓ | ✓ | ✘ | ✘ | ✓ | ✓ | ✓ |
| FIDO (2FA) | ✓ | ✘ | ✓ | ✓ | ✓ | ✓ | ✘ |
| 內建 CI/CD | | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ |
| 內建 CI/CD | | ✘ | ✓ | ✓ | ✓ | ✘ | ✘ |
| 子群組: 群組中的群組 | ✘ | ✘ | ✘ | ✓ | ✓ | ✘ | ✓ |
## 程式碼管理

View file

@ -0,0 +1,85 @@
---
date: "2022-12-28T00:00:00+00:00"
title: "Conda Packages Repository"
slug: "packages/conda"
draft: false
toc: false
menu:
sidebar:
parent: "packages"
name: "Conda"
weight: 25
identifier: "conda"
---
# Conda Packages Repository
Publish [Conda](https://docs.conda.io/en/latest/) packages for your user or organization.
**Table of Contents**
{{< toc >}}
## Requirements
To work with the Conda package registry, you need to use [conda](https://docs.conda.io/projects/conda/en/stable/user-guide/install/index.html).
## Configuring the package registry
To register the package registry and provide credentials, edit your `.condarc` file:
```yaml
channel_alias: https://gitea.example.com/api/packages/{owner}/conda
channels:
- https://gitea.example.com/api/packages/{owner}/conda
default_channels:
- https://gitea.example.com/api/packages/{owner}/conda
```
| Placeholder | Description |
| ------------ | ----------- |
| `owner` | The owner of the package. |
See the [official documentation](https://conda.io/projects/conda/en/latest/user-guide/configuration/use-condarc.html) for explanations of the individual settings.
If you need to provide credentials, you may embed them as part of the channel url (`https://user:password@gitea.example.com/...`).
## Publish a package
To publish a package, perform a HTTP PUT operation with the package content in the request body.
```
PUT https://gitea.example.com/api/packages/{owner}/conda/{channel}/{filename}
```
| Placeholder | Description |
| ------------ | ----------- |
| `owner` | The owner of the package. |
| `channel` | The [channel](https://conda.io/projects/conda/en/latest/user-guide/concepts/channels.html) of the package. (optional) |
| `filename` | The name of the file. |
Example request using HTTP Basic authentication:
```shell
curl --user your_username:your_password_or_token \
--upload-file path/to/package-1.0.conda \
https://gitea.example.com/api/packages/testuser/conda/package-1.0.conda
```
You cannot publish a package if a package of the same name and version already exists. You must delete the existing package first.
## Install a package
To install a package from the package registry, execute one of the following commands:
```shell
conda install {package_name}
conda install {package_name}={package_version}
conda install -c {channel} {package_name}
```
| Parameter | Description |
| ----------------- | ----------- |
| `package_name` | The package name. |
| `package_version` | The package version. |
| `channel` | The channel of the package. (optional) |

View file

@ -28,6 +28,7 @@ The following package managers are currently supported:
| ---- | -------- | -------------- |
| [Composer]({{< relref "doc/packages/composer.en-us.md" >}}) | PHP | `composer` |
| [Conan]({{< relref "doc/packages/conan.en-us.md" >}}) | C++ | `conan` |
| [Conda]({{< relref "doc/packages/conda.en-us.md" >}}) | - | `conda` |
| [Container]({{< relref "doc/packages/container.en-us.md" >}}) | - | any OCI compliant client |
| [Generic]({{< relref "doc/packages/generic.en-us.md" >}}) | - | any HTTP client |
| [Helm]({{< relref "doc/packages/helm.en-us.md" >}}) | - | any HTTP client, `cm-push` |

View file

@ -71,7 +71,7 @@ The patterns are case-insensitive which matches the behaviour of the package reg
### How the cleanup rules work
The cleanup rules are part of the [clean up job]({{< relref "doc/advanced/config-cheat-sheet.en-us.md#cron---cleanup-expired-packages-croncleanup_packages" >}}) and run periodicly.
The cleanup rules are part of the [clean up job]({{< relref "doc/advanced/config-cheat-sheet.en-us.md#cron---cleanup-expired-packages-croncleanup_packages" >}}) and run periodically.
The cleanup rule:

View file

@ -1,6 +1,6 @@
---
date: "2022-12-19T21:26:00+08:00"
title: "Encrypted secrets"
title: "Secrets"
slug: "secrets/overview"
draft: false
toc: false
@ -12,24 +12,24 @@ menu:
identifier: "overview"
---
# Encrypted secrets
# Secrets
Encrypted secrets allow you to store sensitive information in your organization or repository.
Secrets allow you to store sensitive information in your user, organization or repository.
Secrets are available on Gitea 1.19+.
# Naming your secrets
The following rules apply to secret names:
Secret names can only contain alphanumeric characters (`[a-z]`, `[A-Z]`, `[0-9]`) or underscores (`_`). Spaces are not allowed.
- Secret names can only contain alphanumeric characters (`[a-z]`, `[A-Z]`, `[0-9]`) or underscores (`_`). Spaces are not allowed.
Secret names must not start with the `GITHUB_` and `GITEA_` prefix.
- Secret names must not start with the `GITHUB_` and `GITEA_` prefix.
Secret names must not start with a number.
- Secret names must not start with a number.
Secret names are not case-sensitive.
- Secret names are not case-sensitive.
Secret names must be unique at the level they are created at.
- Secret names must be unique at the level they are created at.
For example, a secret created at the repository level must have a unique name in that repository, and a secret created at the organization level must have a unique name at that level.

View file

@ -382,7 +382,7 @@ Currently there are a check list below:
Sometimes if you moved or renamed your Gitea binary when upgrade and you haven't run `Update the '.ssh/authorized_keys' file with Gitea SSH keys. (Not needed for the built-in SSH server.)` on your Admin Panel. Then all pull/push via SSH will not be work.
This check will help you to check if it works well.
For contributors, if you want to add more checks, you can wrie ad new function like `func(ctx *cli.Context) ([]string, error)` and
For contributors, if you want to add more checks, you can write a new function like `func(ctx *cli.Context) ([]string, error)` and
append it to `doctor.go`.
```go

View file

@ -24,7 +24,7 @@ in search for references. These references will be shown as links in the Issue V
and, in some cases, produce certain _actions_.
Likewise, commit messages are parsed when they are listed, and _actions_
are can be triggered when they are pushed to the main branch.
can be triggered when they are pushed to the main branch.
To prevent the creation of unintended references, there are certain rules
for them to be recognized. For example, they should not be included inside code

21
go.mod
View file

@ -1,8 +1,9 @@
module code.gitea.io/gitea
go 1.18
go 1.19
require (
code.gitea.io/actions-proto-go v0.2.0
code.gitea.io/gitea-vet v0.2.2
code.gitea.io/sdk/gitea v0.15.1
codeberg.org/gusted/mcaptcha v0.0.0-20220723083913-4f3072e1d570
@ -17,6 +18,7 @@ require (
github.com/PuerkitoBio/goquery v1.8.0
github.com/alecthomas/chroma/v2 v2.4.0
github.com/blevesearch/bleve/v2 v2.3.6
github.com/bufbuild/connect-go v1.3.1
github.com/buildkite/terminal-to-html/v3 v3.7.0
github.com/caddyserver/certmagic v0.17.2
github.com/chi-middleware/proxy v1.1.1
@ -24,6 +26,7 @@ require (
github.com/dimiro1/reply v0.0.0-20200315094148-d0136a4c9e21
github.com/djherbis/buffer v1.2.0
github.com/djherbis/nio/v3 v3.0.1
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5
github.com/dustin/go-humanize v1.0.0
github.com/editorconfig/editorconfig-core-go/v2 v2.5.1
github.com/emersion/go-imap v1.2.1
@ -74,6 +77,7 @@ require (
github.com/microcosm-cc/bluemonday v1.0.21
github.com/minio/minio-go/v7 v7.0.46
github.com/msteinert/pam v1.1.0
github.com/nektos/act v0.0.0
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
github.com/niklasfasching/go-org v1.6.5
github.com/oliamb/cutter v0.2.2
@ -97,14 +101,14 @@ require (
github.com/yuin/goldmark v1.5.3
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20220924101305-151362477c87
github.com/yuin/goldmark-meta v1.1.0
go.jolheiser.com/hcaptcha v0.0.4
go.jolheiser.com/pwn v0.0.3
golang.org/x/crypto v0.4.0
golang.org/x/net v0.4.0
golang.org/x/oauth2 v0.3.0
golang.org/x/sys v0.3.0
golang.org/x/text v0.5.0
golang.org/x/tools v0.1.12
google.golang.org/grpc v1.47.0
google.golang.org/protobuf v1.28.1
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
gopkg.in/ini.v1 v1.67.0
gopkg.in/yaml.v3 v3.0.1
@ -159,14 +163,15 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
github.com/go-ap/errors v0.0.0-20221205040414-01c1adfc98ea // indirect
github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
github.com/go-enry/go-oniguruma v1.2.1 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.3 // indirect
github.com/go-openapi/inflect v0.19.0 // indirect
@ -208,6 +213,7 @@ require (
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/markbates/going v1.0.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mholt/acmez v1.0.4 // indirect
@ -232,7 +238,9 @@ require (
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/rhysd/actionlint v1.6.22 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/robfig/cron v1.2.0 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/rs/xid v1.4.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@ -261,9 +269,10 @@ require (
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.28.1 // indirect
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90 // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
@ -277,6 +286,8 @@ replace github.com/go-ap/activitypub => gitea.com/xy/activitypub v0.0.0-20221126
replace github.com/blevesearch/zapx/v15 v15.3.6 => github.com/zeripath/zapx/v15 v15.3.6-alignment-fix
replace github.com/nektos/act => gitea.com/gitea/act v0.234.2-0.20230131074955-e46ede1b1744
exclude github.com/gofrs/uuid v3.2.0+incompatible
exclude github.com/gofrs/uuid v4.0.0+incompatible

28
go.sum
View file

@ -58,6 +58,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
code.gitea.io/actions-proto-go v0.2.0 h1:nYh9nhhfk67YA4wVNLsCzd//RCvXnljwXClJ33+HPVk=
code.gitea.io/actions-proto-go v0.2.0/go.mod h1:00ys5QDo1iHN1tHNvvddAcy2W/g+425hQya1cCSvq9A=
code.gitea.io/gitea-vet v0.2.1/go.mod h1:zcNbT/aJEmivCAhfmkHOlT645KNOf9W2KnkLgFjGGfE=
code.gitea.io/gitea-vet v0.2.2 h1:TEOV/Glf38iGmKzKP0EB++Z5OSL4zGg3RrAvlwaMuvk=
code.gitea.io/gitea-vet v0.2.2/go.mod h1:zcNbT/aJEmivCAhfmkHOlT645KNOf9W2KnkLgFjGGfE=
@ -68,6 +70,8 @@ codeberg.org/gusted/mcaptcha v0.0.0-20220723083913-4f3072e1d570/go.mod h1:IIAjsi
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.sr.ht/~mariusor/go-xsd-duration v0.0.0-20220703122237-02e73435a078 h1:cliQ4HHsCo6xi2oWZYKWW4bly/Ory9FuTpFPRxj/mAg=
git.sr.ht/~mariusor/go-xsd-duration v0.0.0-20220703122237-02e73435a078/go.mod h1:g/V2Hjas6Z1UHUp4yIx6bATpNzJ7DYtD0FG3+xARWxs=
gitea.com/gitea/act v0.234.2-0.20230131074955-e46ede1b1744 h1:cqzKmGlX0wynSXO04NILpL25eBGwogDrKpkkbwmIpj4=
gitea.com/gitea/act v0.234.2-0.20230131074955-e46ede1b1744/go.mod h1:2C/WbTalu1VPNgbVaZJaZDzlOtAKqkXJhdOClxkMy14=
gitea.com/go-chi/binding v0.0.0-20221013104517-b29891619681 h1:MMSPgnVULVwV9kEBgvyEUhC9v/uviZ55hPJEMjpbNR4=
gitea.com/go-chi/binding v0.0.0-20221013104517-b29891619681/go.mod h1:77TZu701zMXWJFvB8gvTbQ92zQ3DQq/H7l5wAEjQRKc=
gitea.com/go-chi/cache v0.0.0-20210110083709-82c4c9ce2d5e/go.mod h1:k2V/gPDEtXGjjMGuBJiapffAXTv76H4snSmlJRLUhH0=
@ -227,6 +231,8 @@ github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/bufbuild/connect-go v1.3.1 h1:doJP6Q8Ypg6haUT2IAZJPWHUN9rAUp+F9MfK7yhu1zs=
github.com/bufbuild/connect-go v1.3.1/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I=
github.com/buildkite/terminal-to-html/v3 v3.7.0 h1:chdLUSpiOj/A4v3dzxyOqixXI6aw7IDA6Dk77FXsvNU=
github.com/buildkite/terminal-to-html/v3 v3.7.0/go.mod h1:g0ME1XqbkBSgXR9YmlIHcJIjzaMyWW+HbsG0rPb5puo=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
@ -354,6 +360,7 @@ github.com/ethantkoenig/rupture v1.0.1 h1:6aAXghmvtnngMgQzy7SMGdicMvkV86V4n9fT0m
github.com/ethantkoenig/rupture v1.0.1/go.mod h1:Sjqo/nbffZp1pVVXNGhpugIjsWmuS9KiIB4GtpEBur4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
@ -404,6 +411,8 @@ github.com/go-git/go-git/v5 v5.5.2/go.mod h1:BE5hUJ5yaV2YMxhmaP4l6RBQ08kMxKSPD4B
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
@ -881,7 +890,9 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
@ -889,6 +900,8 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
@ -1082,11 +1095,15 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rhysd/actionlint v1.6.22 h1:cAEf2PGNwJXhdcTVF2xS/0ORqWS+ueUHwjQYsqFsGSk=
github.com/rhysd/actionlint v1.6.22/go.mod h1:gIKOdxtV40mBOcD0ZR8EBa8NqjEXToAZioroS3oedMg=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -1267,10 +1284,6 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.jolheiser.com/hcaptcha v0.0.4 h1:RrDERcr/Tz/kWyJenjVtI+V09RtLinXxlAemiwN5F+I=
go.jolheiser.com/hcaptcha v0.0.4/go.mod h1:aw32WQOxnQZ6E06C0LypCf+sxNxPACyOnq+ZGnrIYho=
go.jolheiser.com/pwn v0.0.3 h1:MQowb3QvCL5r5NmHmCPxw93SdjfgJ0q6rAwYn4i1Hjg=
go.jolheiser.com/pwn v0.0.3/go.mod h1:/j5Dl8ftNqqJ8Dlx3YTrJV1wIR2lWOTyrNU3Qe7rk6I=
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
@ -1483,8 +1496,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde h1:ejfdSekXMDxDLbRrJMwUk6KnSLZ2McaUCVcIKM+N6jc=
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1828,6 +1842,7 @@ google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90 h1:4SPz2GL2CXJt28MTF8V6Ap/9ZiVbQlJeGSd9qtA7DLs=
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -1865,6 +1880,7 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=

274
models/actions/run.go Normal file
View file

@ -0,0 +1,274 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"fmt"
"strings"
"time"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/json"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
webhook_module "code.gitea.io/gitea/modules/webhook"
"github.com/nektos/act/pkg/jobparser"
"xorm.io/builder"
)
// ActionRun represents a run of a workflow file
type ActionRun struct {
ID int64
Title string
RepoID int64 `xorm:"index unique(repo_index)"`
Repo *repo_model.Repository `xorm:"-"`
OwnerID int64 `xorm:"index"`
WorkflowID string `xorm:"index"` // the name of workflow file
Index int64 `xorm:"index unique(repo_index)"` // a unique number for each run of a repository
TriggerUserID int64
TriggerUser *user_model.User `xorm:"-"`
Ref string
CommitSHA string
IsForkPullRequest bool
Event webhook_module.HookEventType
EventPayload string `xorm:"LONGTEXT"`
Status Status `xorm:"index"`
Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated"`
}
func init() {
db.RegisterModel(new(ActionRun))
db.RegisterModel(new(ActionRunIndex))
}
func (run *ActionRun) HTMLURL() string {
if run.Repo == nil {
return ""
}
return fmt.Sprintf("%s/actions/runs/%d", run.Repo.HTMLURL(), run.Index)
}
func (run *ActionRun) Link() string {
if run.Repo == nil {
return ""
}
return fmt.Sprintf("%s/actions/runs/%d", run.Repo.Link(), run.Index)
}
// RefLink return the url of run's ref
func (run *ActionRun) RefLink() string {
refName := git.RefName(run.Ref)
if refName.RefGroup() == "pull" {
return run.Repo.Link() + "/pulls/" + refName.ShortName()
}
return git.RefURL(run.Repo.Link(), run.Ref)
}
// PrettyRef return #id for pull ref or ShortName for others
func (run *ActionRun) PrettyRef() string {
refName := git.RefName(run.Ref)
if refName.RefGroup() == "pull" {
return "#" + strings.TrimSuffix(strings.TrimPrefix(run.Ref, git.PullPrefix), "/head")
}
return refName.ShortName()
}
// LoadAttributes load Repo TriggerUser if not loaded
func (run *ActionRun) LoadAttributes(ctx context.Context) error {
if run == nil {
return nil
}
if run.Repo == nil {
repo, err := repo_model.GetRepositoryByID(ctx, run.RepoID)
if err != nil {
return err
}
run.Repo = repo
}
if err := run.Repo.LoadAttributes(ctx); err != nil {
return err
}
if run.TriggerUser == nil {
u, err := user_model.GetPossibleUserByID(ctx, run.TriggerUserID)
if err != nil {
return err
}
run.TriggerUser = u
}
return nil
}
func (run *ActionRun) Duration() time.Duration {
return calculateDuration(run.Started, run.Stopped, run.Status)
}
func (run *ActionRun) GetPushEventPayload() (*api.PushPayload, error) {
if run.Event == webhook_module.HookEventPush {
var payload api.PushPayload
if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil {
return nil, err
}
return &payload, nil
}
return nil, fmt.Errorf("event %s is not a push event", run.Event)
}
func updateRepoRunsNumbers(ctx context.Context, repo *repo_model.Repository) error {
_, err := db.GetEngine(ctx).ID(repo.ID).
SetExpr("num_action_runs",
builder.Select("count(*)").From("action_run").
Where(builder.Eq{"repo_id": repo.ID}),
).
SetExpr("num_closed_action_runs",
builder.Select("count(*)").From("action_run").
Where(builder.Eq{
"repo_id": repo.ID,
}.And(
builder.In("status",
StatusSuccess,
StatusFailure,
StatusCancelled,
StatusSkipped,
),
),
),
).
Update(repo)
return err
}
// InsertRun inserts a run
func InsertRun(ctx context.Context, run *ActionRun, jobs []*jobparser.SingleWorkflow) error {
ctx, commiter, err := db.TxContext(ctx)
if err != nil {
return err
}
defer commiter.Close()
index, err := db.GetNextResourceIndex(ctx, "action_run_index", run.RepoID)
if err != nil {
return err
}
run.Index = index
if run.Status.IsUnknown() {
run.Status = StatusWaiting
}
if err := db.Insert(ctx, run); err != nil {
return err
}
if run.Repo == nil {
repo, err := repo_model.GetRepositoryByID(ctx, run.RepoID)
if err != nil {
return err
}
run.Repo = repo
}
if err := updateRepoRunsNumbers(ctx, run.Repo); err != nil {
return err
}
runJobs := make([]*ActionRunJob, 0, len(jobs))
for _, v := range jobs {
id, job := v.Job()
needs := job.Needs()
job.EraseNeeds()
payload, _ := v.Marshal()
status := StatusWaiting
if len(needs) > 0 {
status = StatusBlocked
}
runJobs = append(runJobs, &ActionRunJob{
RunID: run.ID,
RepoID: run.RepoID,
OwnerID: run.OwnerID,
CommitSHA: run.CommitSHA,
IsForkPullRequest: run.IsForkPullRequest,
Name: job.Name,
WorkflowPayload: payload,
JobID: id,
Needs: needs,
RunsOn: job.RunsOn(),
Status: status,
})
}
if err := db.Insert(ctx, runJobs); err != nil {
return err
}
return commiter.Commit()
}
func GetRunByID(ctx context.Context, id int64) (*ActionRun, error) {
var run ActionRun
has, err := db.GetEngine(ctx).Where("id=?", id).Get(&run)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("run with id %d: %w", id, util.ErrNotExist)
}
return &run, nil
}
func GetRunByIndex(ctx context.Context, repoID, index int64) (*ActionRun, error) {
run := &ActionRun{
RepoID: repoID,
Index: index,
}
has, err := db.GetEngine(ctx).Get(run)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("run with index %d %d: %w", repoID, index, util.ErrNotExist)
}
return run, nil
}
func UpdateRun(ctx context.Context, run *ActionRun, cols ...string) error {
sess := db.GetEngine(ctx).ID(run.ID)
if len(cols) > 0 {
sess.Cols(cols...)
}
_, err := sess.Update(run)
if run.Status != 0 || util.SliceContains(cols, "status") {
if run.RepoID == 0 {
run, err = GetRunByID(ctx, run.ID)
if err != nil {
return err
}
}
if run.Repo == nil {
repo, err := repo_model.GetRepositoryByID(ctx, run.RepoID)
if err != nil {
return err
}
run.Repo = repo
}
if err := updateRepoRunsNumbers(ctx, run.Repo); err != nil {
return err
}
}
return err
}
type ActionRunIndex db.ResourceIndex

163
models/actions/run_job.go Normal file
View file

@ -0,0 +1,163 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"fmt"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
)
// ActionRunJob represents a job of a run
type ActionRunJob struct {
ID int64
RunID int64 `xorm:"index"`
Run *ActionRun `xorm:"-"`
RepoID int64 `xorm:"index"`
OwnerID int64 `xorm:"index"`
CommitSHA string `xorm:"index"`
IsForkPullRequest bool
Name string `xorm:"VARCHAR(255)"`
Attempt int64
WorkflowPayload []byte
JobID string `xorm:"VARCHAR(255)"` // job id in workflow, not job's id
Needs []string `xorm:"JSON TEXT"`
RunsOn []string `xorm:"JSON TEXT"`
TaskID int64 // the latest task of the job
Status Status `xorm:"index"`
Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated index"`
}
func init() {
db.RegisterModel(new(ActionRunJob))
}
func (job *ActionRunJob) Duration() time.Duration {
return calculateDuration(job.Started, job.Stopped, job.Status)
}
func (job *ActionRunJob) LoadRun(ctx context.Context) error {
if job.Run == nil {
run, err := GetRunByID(ctx, job.RunID)
if err != nil {
return err
}
job.Run = run
}
return nil
}
// LoadAttributes load Run if not loaded
func (job *ActionRunJob) LoadAttributes(ctx context.Context) error {
if job == nil {
return nil
}
if err := job.LoadRun(ctx); err != nil {
return err
}
return job.Run.LoadAttributes(ctx)
}
func GetRunJobByID(ctx context.Context, id int64) (*ActionRunJob, error) {
var job ActionRunJob
has, err := db.GetEngine(ctx).Where("id=?", id).Get(&job)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("run job with id %d: %w", id, util.ErrNotExist)
}
return &job, nil
}
func GetRunJobsByRunID(ctx context.Context, runID int64) ([]*ActionRunJob, error) {
var jobs []*ActionRunJob
if err := db.GetEngine(ctx).Where("run_id=?", runID).OrderBy("id").Find(&jobs); err != nil {
return nil, err
}
return jobs, nil
}
func UpdateRunJob(ctx context.Context, job *ActionRunJob, cond builder.Cond, cols ...string) (int64, error) {
e := db.GetEngine(ctx)
sess := e.ID(job.ID)
if len(cols) > 0 {
sess.Cols(cols...)
}
if cond != nil {
sess.Where(cond)
}
affected, err := sess.Update(job)
if err != nil {
return 0, err
}
if affected == 0 || (!util.SliceContains(cols, "status") && job.Status == 0) {
return affected, nil
}
if job.RunID == 0 {
var err error
if job, err = GetRunJobByID(ctx, job.ID); err != nil {
return affected, err
}
}
jobs, err := GetRunJobsByRunID(ctx, job.RunID)
if err != nil {
return affected, err
}
runStatus := aggregateJobStatus(jobs)
run := &ActionRun{
ID: job.RunID,
Status: runStatus,
}
if runStatus.IsDone() {
run.Stopped = timeutil.TimeStampNow()
}
return affected, UpdateRun(ctx, run)
}
func aggregateJobStatus(jobs []*ActionRunJob) Status {
allDone := true
allWaiting := true
hasFailure := false
for _, job := range jobs {
if !job.Status.IsDone() {
allDone = false
}
if job.Status != StatusWaiting {
allWaiting = false
}
if job.Status == StatusFailure || job.Status == StatusCancelled {
hasFailure = true
}
}
if allDone {
if hasFailure {
return StatusFailure
}
return StatusSuccess
}
if allWaiting {
return StatusWaiting
}
return StatusRunning
}

View file

@ -0,0 +1,99 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
)
type ActionJobList []*ActionRunJob
func (jobs ActionJobList) GetRunIDs() []int64 {
ids := make(container.Set[int64], len(jobs))
for _, j := range jobs {
if j.RunID == 0 {
continue
}
ids.Add(j.RunID)
}
return ids.Values()
}
func (jobs ActionJobList) LoadRuns(ctx context.Context, withRepo bool) error {
runIDs := jobs.GetRunIDs()
runs := make(map[int64]*ActionRun, len(runIDs))
if err := db.GetEngine(ctx).In("id", runIDs).Find(&runs); err != nil {
return err
}
for _, j := range jobs {
if j.RunID > 0 && j.Run == nil {
j.Run = runs[j.RunID]
}
}
if withRepo {
var runsList RunList = make([]*ActionRun, 0, len(runs))
for _, r := range runs {
runsList = append(runsList, r)
}
return runsList.LoadRepos()
}
return nil
}
func (jobs ActionJobList) LoadAttributes(ctx context.Context, withRepo bool) error {
return jobs.LoadRuns(ctx, withRepo)
}
type FindRunJobOptions struct {
db.ListOptions
RunID int64
RepoID int64
OwnerID int64
CommitSHA string
Statuses []Status
UpdatedBefore timeutil.TimeStamp
}
func (opts FindRunJobOptions) toConds() builder.Cond {
cond := builder.NewCond()
if opts.RunID > 0 {
cond = cond.And(builder.Eq{"run_id": opts.RunID})
}
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
}
if opts.OwnerID > 0 {
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
}
if opts.CommitSHA != "" {
cond = cond.And(builder.Eq{"commit_sha": opts.CommitSHA})
}
if len(opts.Statuses) > 0 {
cond = cond.And(builder.In("status", opts.Statuses))
}
if opts.UpdatedBefore > 0 {
cond = cond.And(builder.Lt{"updated": opts.UpdatedBefore})
}
return cond
}
func FindRunJobs(ctx context.Context, opts FindRunJobOptions) (ActionJobList, int64, error) {
e := db.GetEngine(ctx).Where(opts.toConds())
if opts.PageSize > 0 && opts.Page >= 1 {
e.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
var tasks ActionJobList
total, err := e.FindAndCount(&tasks)
return tasks, total, err
}
func CountRunJobs(ctx context.Context, opts FindRunJobOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(ActionRunJob))
}

107
models/actions/run_list.go Normal file
View file

@ -0,0 +1,107 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/util"
"xorm.io/builder"
)
type RunList []*ActionRun
// GetUserIDs returns a slice of user's id
func (runs RunList) GetUserIDs() []int64 {
ids := make(container.Set[int64], len(runs))
for _, run := range runs {
ids.Add(run.TriggerUserID)
}
return ids.Values()
}
func (runs RunList) GetRepoIDs() []int64 {
ids := make(container.Set[int64], len(runs))
for _, run := range runs {
ids.Add(run.RepoID)
}
return ids.Values()
}
func (runs RunList) LoadTriggerUser(ctx context.Context) error {
userIDs := runs.GetUserIDs()
users := make(map[int64]*user_model.User, len(userIDs))
if err := db.GetEngine(ctx).In("id", userIDs).Find(&users); err != nil {
return err
}
for _, run := range runs {
if run.TriggerUserID == user_model.ActionsUserID {
run.TriggerUser = user_model.NewActionsUser()
} else {
run.TriggerUser = users[run.TriggerUserID]
}
}
return nil
}
func (runs RunList) LoadRepos() error {
repoIDs := runs.GetRepoIDs()
repos, err := repo_model.GetRepositoriesMapByIDs(repoIDs)
if err != nil {
return err
}
for _, run := range runs {
run.Repo = repos[run.RepoID]
}
return nil
}
type FindRunOptions struct {
db.ListOptions
RepoID int64
OwnerID int64
IsClosed util.OptionalBool
WorkflowFileName string
}
func (opts FindRunOptions) toConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
}
if opts.OwnerID > 0 {
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
}
if opts.IsClosed.IsFalse() {
cond = cond.And(builder.Eq{"status": StatusWaiting}.Or(
builder.Eq{"status": StatusRunning}))
} else if opts.IsClosed.IsTrue() {
cond = cond.And(
builder.Neq{"status": StatusWaiting}.And(
builder.Neq{"status": StatusRunning}))
}
if opts.WorkflowFileName != "" {
cond = cond.And(builder.Eq{"workflow_id": opts.WorkflowFileName})
}
return cond
}
func FindRuns(ctx context.Context, opts FindRunOptions) (RunList, int64, error) {
e := db.GetEngine(ctx).Where(opts.toConds())
if opts.PageSize > 0 && opts.Page >= 1 {
e.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
var runs RunList
total, err := e.Desc("id").FindAndCount(&runs)
return runs, total, err
}
func CountRuns(ctx context.Context, opts FindRunOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(ActionRun))
}

252
models/actions/runner.go Normal file
View file

@ -0,0 +1,252 @@
// Copyright 2021 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"fmt"
"strings"
"time"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/translation"
"code.gitea.io/gitea/modules/util"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"xorm.io/builder"
)
// ActionRunner represents runner machines
type ActionRunner struct {
ID int64
UUID string `xorm:"CHAR(36) UNIQUE"`
Name string `xorm:"VARCHAR(255)"`
OwnerID int64 `xorm:"index"` // org level runner, 0 means system
Owner *user_model.User `xorm:"-"`
RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global
Repo *repo_model.Repository `xorm:"-"`
Description string `xorm:"TEXT"`
Base int // 0 native 1 docker 2 virtual machine
RepoRange string // glob match which repositories could use this runner
Token string `xorm:"-"`
TokenHash string `xorm:"UNIQUE"` // sha256 of token
TokenSalt string
// TokenLastEight string `xorm:"token_last_eight"` // it's unnecessary because we don't find runners by token
LastOnline timeutil.TimeStamp `xorm:"index"`
LastActive timeutil.TimeStamp `xorm:"index"`
// Store OS and Artch.
AgentLabels []string
// Store custom labes use defined.
CustomLabels []string
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated"`
Deleted timeutil.TimeStamp `xorm:"deleted"`
}
func (r *ActionRunner) OwnType() string {
if r.RepoID != 0 {
return fmt.Sprintf("Repo(%s)", r.Repo.FullName())
}
if r.OwnerID != 0 {
return fmt.Sprintf("Org(%s)", r.Owner.Name)
}
return "Global"
}
func (r *ActionRunner) Status() runnerv1.RunnerStatus {
if time.Since(r.LastOnline.AsTime()) > time.Minute {
return runnerv1.RunnerStatus_RUNNER_STATUS_OFFLINE
}
if time.Since(r.LastActive.AsTime()) > 10*time.Second {
return runnerv1.RunnerStatus_RUNNER_STATUS_IDLE
}
return runnerv1.RunnerStatus_RUNNER_STATUS_ACTIVE
}
func (r *ActionRunner) StatusName() string {
return strings.ToLower(strings.TrimPrefix(r.Status().String(), "RUNNER_STATUS_"))
}
func (r *ActionRunner) StatusLocaleName(lang translation.Locale) string {
return lang.Tr("actions.runners.status." + r.StatusName())
}
func (r *ActionRunner) IsOnline() bool {
status := r.Status()
if status == runnerv1.RunnerStatus_RUNNER_STATUS_IDLE || status == runnerv1.RunnerStatus_RUNNER_STATUS_ACTIVE {
return true
}
return false
}
// AllLabels returns agent and custom labels
func (r *ActionRunner) AllLabels() []string {
return append(r.AgentLabels, r.CustomLabels...)
}
// Editable checks if the runner is editable by the user
func (r *ActionRunner) Editable(ownerID, repoID int64) bool {
if ownerID == 0 && repoID == 0 {
return true
}
if ownerID > 0 && r.OwnerID == ownerID {
return true
}
return repoID > 0 && r.RepoID == repoID
}
// LoadAttributes loads the attributes of the runner
func (r *ActionRunner) LoadAttributes(ctx context.Context) error {
if r.OwnerID > 0 {
var user user_model.User
has, err := db.GetEngine(ctx).ID(r.OwnerID).Get(&user)
if err != nil {
return err
}
if has {
r.Owner = &user
}
}
if r.RepoID > 0 {
var repo repo_model.Repository
has, err := db.GetEngine(ctx).ID(r.RepoID).Get(&repo)
if err != nil {
return err
}
if has {
r.Repo = &repo
}
}
return nil
}
func (r *ActionRunner) GenerateToken() (err error) {
r.Token, r.TokenSalt, r.TokenHash, _, err = generateSaltedToken()
return err
}
func init() {
db.RegisterModel(&ActionRunner{})
}
type FindRunnerOptions struct {
db.ListOptions
RepoID int64
OwnerID int64
Sort string
Filter string
WithAvailable bool // not only runners belong to, but also runners can be used
}
func (opts FindRunnerOptions) toCond() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
c := builder.NewCond().And(builder.Eq{"repo_id": opts.RepoID})
if opts.WithAvailable {
c = c.Or(builder.Eq{"owner_id": builder.Select("owner_id").From("repository").Where(builder.Eq{"id": opts.RepoID})})
c = c.Or(builder.Eq{"repo_id": 0, "owner_id": 0})
}
cond = cond.And(c)
}
if opts.OwnerID > 0 {
c := builder.NewCond().And(builder.Eq{"owner_id": opts.OwnerID})
if opts.WithAvailable {
c = c.Or(builder.Eq{"repo_id": 0, "owner_id": 0})
}
cond = cond.And(c)
}
if opts.Filter != "" {
cond = cond.And(builder.Like{"name", opts.Filter})
}
return cond
}
func (opts FindRunnerOptions) toOrder() string {
switch opts.Sort {
case "online":
return "last_online DESC"
case "offline":
return "last_online ASC"
case "alphabetically":
return "name ASC"
}
return "last_online DESC"
}
func CountRunners(ctx context.Context, opts FindRunnerOptions) (int64, error) {
return db.GetEngine(ctx).
Where(opts.toCond()).
Count(ActionRunner{})
}
func FindRunners(ctx context.Context, opts FindRunnerOptions) (runners RunnerList, err error) {
sess := db.GetEngine(ctx).
Where(opts.toCond()).
OrderBy(opts.toOrder())
if opts.Page > 0 {
sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
return runners, sess.Find(&runners)
}
// GetRunnerByUUID returns a runner via uuid
func GetRunnerByUUID(ctx context.Context, uuid string) (*ActionRunner, error) {
var runner ActionRunner
has, err := db.GetEngine(ctx).Where("uuid=?", uuid).Get(&runner)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("runner with uuid %s: %w", uuid, util.ErrNotExist)
}
return &runner, nil
}
// GetRunnerByID returns a runner via id
func GetRunnerByID(ctx context.Context, id int64) (*ActionRunner, error) {
var runner ActionRunner
has, err := db.GetEngine(ctx).Where("id=?", id).Get(&runner)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("runner with id %d: %w", id, util.ErrNotExist)
}
return &runner, nil
}
// UpdateRunner updates runner's information.
func UpdateRunner(ctx context.Context, r *ActionRunner, cols ...string) error {
e := db.GetEngine(ctx)
var err error
if len(cols) == 0 {
_, err = e.ID(r.ID).AllCols().Update(r)
} else {
_, err = e.ID(r.ID).Cols(cols...).Update(r)
}
return err
}
// DeleteRunner deletes a runner by given ID.
func DeleteRunner(ctx context.Context, id int64) error {
if _, err := GetRunnerByID(ctx, id); err != nil {
return err
}
_, err := db.GetEngine(ctx).Delete(&ActionRunner{ID: id})
return err
}
// CreateRunner creates new runner.
func CreateRunner(ctx context.Context, t *ActionRunner) error {
_, err := db.GetEngine(ctx).Insert(t)
return err
}

View file

@ -0,0 +1,77 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
)
type RunnerList []*ActionRunner
// GetUserIDs returns a slice of user's id
func (runners RunnerList) GetUserIDs() []int64 {
ids := make(container.Set[int64], len(runners))
for _, runner := range runners {
if runner.OwnerID == 0 {
continue
}
ids.Add(runner.OwnerID)
}
return ids.Values()
}
func (runners RunnerList) LoadOwners(ctx context.Context) error {
userIDs := runners.GetUserIDs()
users := make(map[int64]*user_model.User, len(userIDs))
if err := db.GetEngine(ctx).In("id", userIDs).Find(&users); err != nil {
return err
}
for _, runner := range runners {
if runner.OwnerID > 0 && runner.Owner == nil {
runner.Owner = users[runner.OwnerID]
}
}
return nil
}
func (runners RunnerList) getRepoIDs() []int64 {
repoIDs := make(container.Set[int64], len(runners))
for _, runner := range runners {
if runner.RepoID == 0 {
continue
}
if _, ok := repoIDs[runner.RepoID]; !ok {
repoIDs[runner.RepoID] = struct{}{}
}
}
return repoIDs.Values()
}
func (runners RunnerList) LoadRepos(ctx context.Context) error {
repoIDs := runners.getRepoIDs()
repos := make(map[int64]*repo_model.Repository, len(repoIDs))
if err := db.GetEngine(ctx).In("id", repoIDs).Find(&repos); err != nil {
return err
}
for _, runner := range runners {
if runner.RepoID > 0 && runner.Repo == nil {
runner.Repo = repos[runner.RepoID]
}
}
return nil
}
func (runners RunnerList) LoadAttributes(ctx context.Context) error {
if err := runners.LoadOwners(ctx); err != nil {
return err
}
return runners.LoadRepos(ctx)
}

View file

@ -0,0 +1,86 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"fmt"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
)
// ActionRunnerToken represents runner tokens
type ActionRunnerToken struct {
ID int64
Token string `xorm:"UNIQUE"`
OwnerID int64 `xorm:"index"` // org level runner, 0 means system
Owner *user_model.User `xorm:"-"`
RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global
Repo *repo_model.Repository `xorm:"-"`
IsActive bool
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated"`
Deleted timeutil.TimeStamp `xorm:"deleted"`
}
func init() {
db.RegisterModel(new(ActionRunnerToken))
}
// GetRunnerToken returns a action runner via token
func GetRunnerToken(ctx context.Context, token string) (*ActionRunnerToken, error) {
var runnerToken ActionRunnerToken
has, err := db.GetEngine(ctx).Where("token=?", token).Get(&runnerToken)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("runner token %q: %w", token, util.ErrNotExist)
}
return &runnerToken, nil
}
// UpdateRunnerToken updates runner token information.
func UpdateRunnerToken(ctx context.Context, r *ActionRunnerToken, cols ...string) (err error) {
e := db.GetEngine(ctx)
if len(cols) == 0 {
_, err = e.ID(r.ID).AllCols().Update(r)
} else {
_, err = e.ID(r.ID).Cols(cols...).Update(r)
}
return err
}
// NewRunnerToken creates a new runner token
func NewRunnerToken(ctx context.Context, ownerID, repoID int64) (*ActionRunnerToken, error) {
token, err := util.CryptoRandomString(40)
if err != nil {
return nil, err
}
runnerToken := &ActionRunnerToken{
OwnerID: ownerID,
RepoID: repoID,
IsActive: false,
Token: token,
}
_, err = db.GetEngine(ctx).Insert(runnerToken)
return runnerToken, err
}
// GetUnactivatedRunnerToken returns a unactivated runner token
func GetUnactivatedRunnerToken(ctx context.Context, ownerID, repoID int64) (*ActionRunnerToken, error) {
var runnerToken ActionRunnerToken
has, err := db.GetEngine(ctx).Where("owner_id=? AND repo_id=? AND is_active=?", ownerID, repoID, false).OrderBy("id DESC").Get(&runnerToken)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("runner token: %w", util.ErrNotExist)
}
return &runnerToken, nil
}

100
models/actions/status.go Normal file
View file

@ -0,0 +1,100 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"code.gitea.io/gitea/modules/translation"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
)
// Status represents the status of ActionRun, ActionRunJob, ActionTask, or ActionTaskStep
type Status int
const (
StatusUnknown Status = iota // 0, consistent with runnerv1.Result_RESULT_UNSPECIFIED
StatusSuccess // 1, consistent with runnerv1.Result_RESULT_SUCCESS
StatusFailure // 2, consistent with runnerv1.Result_RESULT_FAILURE
StatusCancelled // 3, consistent with runnerv1.Result_RESULT_CANCELLED
StatusSkipped // 4, consistent with runnerv1.Result_RESULT_SKIPPED
StatusWaiting // 5, isn't a runnerv1.Result
StatusRunning // 6, isn't a runnerv1.Result
StatusBlocked // 7, isn't a runnerv1.Result
)
var statusNames = map[Status]string{
StatusUnknown: "unknown",
StatusWaiting: "waiting",
StatusRunning: "running",
StatusSuccess: "success",
StatusFailure: "failure",
StatusCancelled: "cancelled",
StatusSkipped: "skipped",
StatusBlocked: "blocked",
}
// String returns the string name of the Status
func (s Status) String() string {
return statusNames[s]
}
// LocaleString returns the locale string name of the Status
func (s Status) LocaleString(lang translation.Locale) string {
return lang.Tr("actions.status." + s.String())
}
// IsDone returns whether the Status is final
func (s Status) IsDone() bool {
return s.In(StatusSuccess, StatusFailure, StatusCancelled, StatusSkipped)
}
// HasRun returns whether the Status is a result of running
func (s Status) HasRun() bool {
return s.In(StatusSuccess, StatusFailure)
}
func (s Status) IsUnknown() bool {
return s == StatusUnknown
}
func (s Status) IsSuccess() bool {
return s == StatusSuccess
}
func (s Status) IsFailure() bool {
return s == StatusFailure
}
func (s Status) IsCancelled() bool {
return s == StatusCancelled
}
func (s Status) IsSkipped() bool {
return s == StatusSkipped
}
func (s Status) IsWaiting() bool {
return s == StatusWaiting
}
func (s Status) IsRunning() bool {
return s == StatusRunning
}
// In returns whether s is one of the given statuses
func (s Status) In(statuses ...Status) bool {
for _, v := range statuses {
if s == v {
return true
}
}
return false
}
func (s Status) AsResult() runnerv1.Result {
if s.IsDone() {
return runnerv1.Result(s)
}
return runnerv1.Result_RESULT_UNSPECIFIED
}

504
models/actions/task.go Normal file
View file

@ -0,0 +1,504 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"crypto/subtle"
"fmt"
"time"
auth_model "code.gitea.io/gitea/models/auth"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
lru "github.com/hashicorp/golang-lru"
"github.com/nektos/act/pkg/jobparser"
"google.golang.org/protobuf/types/known/timestamppb"
"xorm.io/builder"
)
// ActionTask represents a distribution of job
type ActionTask struct {
ID int64
JobID int64
Job *ActionRunJob `xorm:"-"`
Steps []*ActionTaskStep `xorm:"-"`
Attempt int64
RunnerID int64 `xorm:"index"`
Status Status `xorm:"index"`
Started timeutil.TimeStamp `xorm:"index"`
Stopped timeutil.TimeStamp
RepoID int64 `xorm:"index"`
OwnerID int64 `xorm:"index"`
CommitSHA string `xorm:"index"`
IsForkPullRequest bool
Token string `xorm:"-"`
TokenHash string `xorm:"UNIQUE"` // sha256 of token
TokenSalt string
TokenLastEight string `xorm:"index token_last_eight"`
LogFilename string // file name of log
LogInStorage bool // read log from database or from storage
LogLength int64 // lines count
LogSize int64 // blob size
LogIndexes LogIndexes `xorm:"LONGBLOB"` // line number to offset
LogExpired bool // files that are too old will be deleted
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated index"`
}
var successfulTokenTaskCache *lru.Cache
func init() {
db.RegisterModel(new(ActionTask), func() error {
if setting.SuccessfulTokensCacheSize > 0 {
var err error
successfulTokenTaskCache, err = lru.New(setting.SuccessfulTokensCacheSize)
if err != nil {
return fmt.Errorf("unable to allocate Task cache: %v", err)
}
} else {
successfulTokenTaskCache = nil
}
return nil
})
}
func (task *ActionTask) Duration() time.Duration {
return calculateDuration(task.Started, task.Stopped, task.Status)
}
func (task *ActionTask) IsStopped() bool {
return task.Stopped > 0
}
func (task *ActionTask) GetRunLink() string {
if task.Job == nil || task.Job.Run == nil {
return ""
}
return task.Job.Run.Link()
}
func (task *ActionTask) GetCommitLink() string {
if task.Job == nil || task.Job.Run == nil || task.Job.Run.Repo == nil {
return ""
}
return task.Job.Run.Repo.CommitLink(task.CommitSHA)
}
func (task *ActionTask) GetRepoName() string {
if task.Job == nil || task.Job.Run == nil || task.Job.Run.Repo == nil {
return ""
}
return task.Job.Run.Repo.FullName()
}
func (task *ActionTask) GetRepoLink() string {
if task.Job == nil || task.Job.Run == nil || task.Job.Run.Repo == nil {
return ""
}
return task.Job.Run.Repo.Link()
}
func (task *ActionTask) LoadJob(ctx context.Context) error {
if task.Job == nil {
job, err := GetRunJobByID(ctx, task.JobID)
if err != nil {
return err
}
task.Job = job
}
return nil
}
// LoadAttributes load Job Steps if not loaded
func (task *ActionTask) LoadAttributes(ctx context.Context) error {
if task == nil {
return nil
}
if err := task.LoadJob(ctx); err != nil {
return err
}
if err := task.Job.LoadAttributes(ctx); err != nil {
return err
}
if task.Steps == nil { // be careful, an empty slice (not nil) also means loaded
steps, err := GetTaskStepsByTaskID(ctx, task.ID)
if err != nil {
return err
}
task.Steps = steps
}
return nil
}
func (task *ActionTask) GenerateToken() (err error) {
task.Token, task.TokenSalt, task.TokenHash, task.TokenLastEight, err = generateSaltedToken()
return err
}
func GetTaskByID(ctx context.Context, id int64) (*ActionTask, error) {
var task ActionTask
has, err := db.GetEngine(ctx).Where("id=?", id).Get(&task)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("task with id %d: %w", id, util.ErrNotExist)
}
return &task, nil
}
func GetRunningTaskByToken(ctx context.Context, token string) (*ActionTask, error) {
errNotExist := fmt.Errorf("task with token %q: %w", token, util.ErrNotExist)
if token == "" {
return nil, errNotExist
}
// A token is defined as being SHA1 sum these are 40 hexadecimal bytes long
if len(token) != 40 {
return nil, errNotExist
}
for _, x := range []byte(token) {
if x < '0' || (x > '9' && x < 'a') || x > 'f' {
return nil, errNotExist
}
}
lastEight := token[len(token)-8:]
if id := getTaskIDFromCache(token); id > 0 {
task := &ActionTask{
TokenLastEight: lastEight,
}
// Re-get the task from the db in case it has been deleted in the intervening period
has, err := db.GetEngine(ctx).ID(id).Get(task)
if err != nil {
return nil, err
}
if has {
return task, nil
}
successfulTokenTaskCache.Remove(token)
}
var tasks []*ActionTask
err := db.GetEngine(ctx).Where("token_last_eight = ? AND status = ?", lastEight, StatusRunning).Find(&tasks)
if err != nil {
return nil, err
} else if len(tasks) == 0 {
return nil, errNotExist
}
for _, t := range tasks {
tempHash := auth_model.HashToken(token, t.TokenSalt)
if subtle.ConstantTimeCompare([]byte(t.TokenHash), []byte(tempHash)) == 1 {
if successfulTokenTaskCache != nil {
successfulTokenTaskCache.Add(token, t.ID)
}
return t, nil
}
}
return nil, errNotExist
}
func CreateTaskForRunner(ctx context.Context, runner *ActionRunner) (*ActionTask, bool, error) {
dbCtx, commiter, err := db.TxContext(ctx)
if err != nil {
return nil, false, err
}
defer commiter.Close()
ctx = dbCtx.WithContext(ctx)
e := db.GetEngine(ctx)
jobCond := builder.NewCond()
if runner.RepoID != 0 {
jobCond = builder.Eq{"repo_id": runner.RepoID}
} else if runner.OwnerID != 0 {
jobCond = builder.In("repo_id", builder.Select("id").From("repository").Where(builder.Eq{"owner_id": runner.OwnerID}))
}
if jobCond.IsValid() {
jobCond = builder.In("run_id", builder.Select("id").From("action_run").Where(jobCond))
}
var jobs []*ActionRunJob
if err := e.Where("task_id=? AND status=?", 0, StatusWaiting).And(jobCond).Asc("id").Find(&jobs); err != nil {
return nil, false, err
}
// TODO: a more efficient way to filter labels
var job *ActionRunJob
labels := runner.AgentLabels
labels = append(labels, runner.CustomLabels...)
log.Trace("runner labels: %v", labels)
for _, v := range jobs {
if isSubset(labels, v.RunsOn) {
job = v
break
}
}
if job == nil {
return nil, false, nil
}
if err := job.LoadAttributes(ctx); err != nil {
return nil, false, err
}
now := timeutil.TimeStampNow()
job.Attempt++
job.Started = now
job.Status = StatusRunning
task := &ActionTask{
JobID: job.ID,
Attempt: job.Attempt,
RunnerID: runner.ID,
Started: now,
Status: StatusRunning,
RepoID: job.RepoID,
OwnerID: job.OwnerID,
CommitSHA: job.CommitSHA,
IsForkPullRequest: job.IsForkPullRequest,
}
if err := task.GenerateToken(); err != nil {
return nil, false, err
}
var workflowJob *jobparser.Job
if gots, err := jobparser.Parse(job.WorkflowPayload); err != nil {
return nil, false, fmt.Errorf("parse workflow of job %d: %w", job.ID, err)
} else if len(gots) != 1 {
return nil, false, fmt.Errorf("workflow of job %d: not signle workflow", job.ID)
} else {
_, workflowJob = gots[0].Job()
}
if _, err := e.Insert(task); err != nil {
return nil, false, err
}
task.LogFilename = logFileName(job.Run.Repo.FullName(), task.ID)
if _, err := e.ID(task.ID).Cols("log_filename").Update(task); err != nil {
return nil, false, err
}
if len(workflowJob.Steps) > 0 {
steps := make([]*ActionTaskStep, len(workflowJob.Steps))
for i, v := range workflowJob.Steps {
steps[i] = &ActionTaskStep{
Name: v.String(),
TaskID: task.ID,
Index: int64(i),
RepoID: task.RepoID,
Status: StatusWaiting,
}
}
if _, err := e.Insert(steps); err != nil {
return nil, false, err
}
task.Steps = steps
}
job.TaskID = task.ID
if n, err := UpdateRunJob(ctx, job, builder.Eq{"task_id": 0}); err != nil {
return nil, false, err
} else if n != 1 {
return nil, false, nil
}
if job.Run.Status.IsWaiting() {
job.Run.Status = StatusRunning
job.Run.Started = now
if err := UpdateRun(ctx, job.Run, "status", "started"); err != nil {
return nil, false, err
}
}
task.Job = job
if err := commiter.Commit(); err != nil {
return nil, false, err
}
return task, true, nil
}
func UpdateTask(ctx context.Context, task *ActionTask, cols ...string) error {
sess := db.GetEngine(ctx).ID(task.ID)
if len(cols) > 0 {
sess.Cols(cols...)
}
_, err := sess.Update(task)
return err
}
func UpdateTaskByState(ctx context.Context, state *runnerv1.TaskState) (*ActionTask, error) {
stepStates := map[int64]*runnerv1.StepState{}
for _, v := range state.Steps {
stepStates[v.Id] = v
}
ctx, commiter, err := db.TxContext(ctx)
if err != nil {
return nil, err
}
defer commiter.Close()
e := db.GetEngine(ctx)
task := &ActionTask{}
if has, err := e.ID(state.Id).Get(task); err != nil {
return nil, err
} else if !has {
return nil, util.ErrNotExist
}
if state.Result != runnerv1.Result_RESULT_UNSPECIFIED {
task.Status = Status(state.Result)
task.Stopped = timeutil.TimeStamp(state.StoppedAt.AsTime().Unix())
if _, err := UpdateRunJob(ctx, &ActionRunJob{
ID: task.JobID,
Status: task.Status,
Stopped: task.Stopped,
}, nil); err != nil {
return nil, err
}
}
if _, err := e.ID(task.ID).Update(task); err != nil {
return nil, err
}
if err := task.LoadAttributes(ctx); err != nil {
return nil, err
}
for _, step := range task.Steps {
var result runnerv1.Result
if v, ok := stepStates[step.Index]; ok {
result = v.Result
step.LogIndex = v.LogIndex
step.LogLength = v.LogLength
step.Started = convertTimestamp(v.StartedAt)
step.Stopped = convertTimestamp(v.StoppedAt)
}
if result != runnerv1.Result_RESULT_UNSPECIFIED {
step.Status = Status(result)
} else if step.Started != 0 {
step.Status = StatusRunning
}
if _, err := e.ID(step.ID).Update(step); err != nil {
return nil, err
}
}
if err := commiter.Commit(); err != nil {
return nil, err
}
return task, nil
}
func StopTask(ctx context.Context, taskID int64, status Status) error {
if !status.IsDone() {
return fmt.Errorf("cannot stop task with status %v", status)
}
e := db.GetEngine(ctx)
task := &ActionTask{}
if has, err := e.ID(taskID).Get(task); err != nil {
return err
} else if !has {
return util.ErrNotExist
}
if task.Status.IsDone() {
return nil
}
now := timeutil.TimeStampNow()
task.Status = status
task.Stopped = now
if _, err := UpdateRunJob(ctx, &ActionRunJob{
ID: task.JobID,
Status: task.Status,
Stopped: task.Stopped,
}, nil); err != nil {
return err
}
if _, err := e.ID(task.ID).Update(task); err != nil {
return err
}
if err := task.LoadAttributes(ctx); err != nil {
return err
}
for _, step := range task.Steps {
if !step.Status.IsDone() {
step.Status = status
if step.Started == 0 {
step.Started = now
}
step.Stopped = now
}
if _, err := e.ID(step.ID).Update(step); err != nil {
return err
}
}
return nil
}
func isSubset(set, subset []string) bool {
m := make(container.Set[string], len(set))
for _, v := range set {
m.Add(v)
}
for _, v := range subset {
if !m.Contains(v) {
return false
}
}
return true
}
func convertTimestamp(timestamp *timestamppb.Timestamp) timeutil.TimeStamp {
if timestamp.GetSeconds() == 0 && timestamp.GetNanos() == 0 {
return timeutil.TimeStamp(0)
}
return timeutil.TimeStamp(timestamp.AsTime().Unix())
}
func logFileName(repoFullName string, taskID int64) string {
return fmt.Sprintf("%s/%02x/%d.log", repoFullName, taskID%256, taskID)
}
func getTaskIDFromCache(token string) int64 {
if successfulTokenTaskCache == nil {
return 0
}
tInterface, ok := successfulTokenTaskCache.Get(token)
if !ok {
return 0
}
t, ok := tInterface.(int64)
if !ok {
return 0
}
return t
}

105
models/actions/task_list.go Normal file
View file

@ -0,0 +1,105 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/builder"
)
type TaskList []*ActionTask
func (tasks TaskList) GetJobIDs() []int64 {
ids := make(container.Set[int64], len(tasks))
for _, t := range tasks {
if t.JobID == 0 {
continue
}
ids.Add(t.JobID)
}
return ids.Values()
}
func (tasks TaskList) LoadJobs(ctx context.Context) error {
jobIDs := tasks.GetJobIDs()
jobs := make(map[int64]*ActionRunJob, len(jobIDs))
if err := db.GetEngine(ctx).In("id", jobIDs).Find(&jobs); err != nil {
return err
}
for _, t := range tasks {
if t.JobID > 0 && t.Job == nil {
t.Job = jobs[t.JobID]
}
}
// TODO: Replace with "ActionJobList(maps.Values(jobs))" once available
var jobsList ActionJobList = make([]*ActionRunJob, 0, len(jobs))
for _, j := range jobs {
jobsList = append(jobsList, j)
}
return jobsList.LoadAttributes(ctx, true)
}
func (tasks TaskList) LoadAttributes(ctx context.Context) error {
return tasks.LoadJobs(ctx)
}
type FindTaskOptions struct {
db.ListOptions
RepoID int64
OwnerID int64
CommitSHA string
Status Status
UpdatedBefore timeutil.TimeStamp
StartedBefore timeutil.TimeStamp
RunnerID int64
IDOrderDesc bool
}
func (opts FindTaskOptions) toConds() builder.Cond {
cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
}
if opts.OwnerID > 0 {
cond = cond.And(builder.Eq{"owner_id": opts.OwnerID})
}
if opts.CommitSHA != "" {
cond = cond.And(builder.Eq{"commit_sha": opts.CommitSHA})
}
if opts.Status > StatusUnknown {
cond = cond.And(builder.Eq{"status": opts.Status})
}
if opts.UpdatedBefore > 0 {
cond = cond.And(builder.Lt{"updated": opts.UpdatedBefore})
}
if opts.StartedBefore > 0 {
cond = cond.And(builder.Lt{"started": opts.StartedBefore})
}
if opts.RunnerID > 0 {
cond = cond.And(builder.Eq{"runner_id": opts.RunnerID})
}
return cond
}
func FindTasks(ctx context.Context, opts FindTaskOptions) (TaskList, error) {
e := db.GetEngine(ctx).Where(opts.toConds())
if opts.PageSize > 0 && opts.Page >= 1 {
e.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
}
if opts.IDOrderDesc {
e.OrderBy("id DESC")
}
var tasks TaskList
return tasks, e.Find(&tasks)
}
func CountTasks(ctx context.Context, opts FindTaskOptions) (int64, error) {
return db.GetEngine(ctx).Where(opts.toConds()).Count(new(ActionTask))
}

View file

@ -0,0 +1,41 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"context"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/timeutil"
)
// ActionTaskStep represents a step of ActionTask
type ActionTaskStep struct {
ID int64
Name string `xorm:"VARCHAR(255)"`
TaskID int64 `xorm:"index unique(task_index)"`
Index int64 `xorm:"index unique(task_index)"`
RepoID int64 `xorm:"index"`
Status Status `xorm:"index"`
LogIndex int64
LogLength int64
Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated"`
}
func (step *ActionTaskStep) Duration() time.Duration {
return calculateDuration(step.Started, step.Stopped, step.Status)
}
func init() {
db.RegisterModel(new(ActionTaskStep))
}
func GetTaskStepsByTaskID(ctx context.Context, taskID int64) ([]*ActionTaskStep, error) {
var steps []*ActionTaskStep
return steps, db.GetEngine(ctx).Where("task_id=?", taskID).OrderBy("`index` ASC").Find(&steps)
}

84
models/actions/utils.go Normal file
View file

@ -0,0 +1,84 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"bytes"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"time"
auth_model "code.gitea.io/gitea/models/auth"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
)
func generateSaltedToken() (string, string, string, string, error) {
salt, err := util.CryptoRandomString(10)
if err != nil {
return "", "", "", "", err
}
buf, err := util.CryptoRandomBytes(20)
if err != nil {
return "", "", "", "", err
}
token := hex.EncodeToString(buf)
hash := auth_model.HashToken(token, salt)
return token, salt, hash, token[len(token)-8:], nil
}
/*
LogIndexes is the index for mapping log line number to buffer offset.
Because it uses varint encoding, it is impossible to predict its size.
But we can make a simple estimate with an assumption that each log line has 200 byte, then:
| lines | file size | index size |
|-----------|---------------------|--------------------|
| 100 | 20 KiB(20000) | 258 B(258) |
| 1000 | 195 KiB(200000) | 2.9 KiB(2958) |
| 10000 | 1.9 MiB(2000000) | 34 KiB(34715) |
| 100000 | 19 MiB(20000000) | 386 KiB(394715) |
| 1000000 | 191 MiB(200000000) | 4.1 MiB(4323626) |
| 10000000 | 1.9 GiB(2000000000) | 47 MiB(49323626) |
| 100000000 | 19 GiB(20000000000) | 490 MiB(513424280) |
*/
type LogIndexes []int64
func (indexes *LogIndexes) FromDB(b []byte) error {
reader := bytes.NewReader(b)
for {
v, err := binary.ReadVarint(reader)
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return fmt.Errorf("binary ReadVarint: %w", err)
}
*indexes = append(*indexes, v)
}
}
func (indexes *LogIndexes) ToDB() ([]byte, error) {
buf, i := make([]byte, binary.MaxVarintLen64*len(*indexes)), 0
for _, v := range *indexes {
n := binary.PutVarint(buf[i:], v)
i += n
}
return buf[:i], nil
}
var timeSince = time.Since
func calculateDuration(started, stopped timeutil.TimeStamp, status Status) time.Duration {
if started == 0 {
return 0
}
s := started.AsTime()
if status.IsDone() {
return stopped.AsTime().Sub(s)
}
return timeSince(s).Truncate(time.Second)
}

View file

@ -0,0 +1,90 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"math"
"testing"
"time"
"code.gitea.io/gitea/modules/timeutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLogIndexes_ToDB(t *testing.T) {
tests := []struct {
indexes LogIndexes
}{
{
indexes: []int64{1, 2, 0, -1, -2, math.MaxInt64, math.MinInt64},
},
}
for _, tt := range tests {
t.Run("", func(t *testing.T) {
got, err := tt.indexes.ToDB()
require.NoError(t, err)
indexes := LogIndexes{}
require.NoError(t, indexes.FromDB(got))
assert.Equal(t, tt.indexes, indexes)
})
}
}
func Test_calculateDuration(t *testing.T) {
oldTimeSince := timeSince
defer func() {
timeSince = oldTimeSince
}()
timeSince = func(t time.Time) time.Duration {
return timeutil.TimeStamp(1000).AsTime().Sub(t)
}
type args struct {
started timeutil.TimeStamp
stopped timeutil.TimeStamp
status Status
}
tests := []struct {
name string
args args
want time.Duration
}{
{
name: "unknown",
args: args{
started: 0,
stopped: 0,
status: StatusUnknown,
},
want: 0,
},
{
name: "running",
args: args{
started: 500,
stopped: 0,
status: StatusRunning,
},
want: 500 * time.Second,
},
{
name: "done",
args: args{
started: 500,
stopped: 600,
status: StatusSuccess,
},
want: 100 * time.Second,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, calculateDuration(tt.args.started, tt.args.stopped, tt.args.status), "calculateDuration(%v, %v, %v)", tt.args.started, tt.args.stopped, tt.args.status)
})
}
}

View file

@ -151,7 +151,7 @@ func CreateRepoTransferNotification(ctx context.Context, doer, newOwner *user_mo
}
for i := range users {
notify = append(notify, &Notification{
UserID: users[i].ID,
UserID: i,
RepoID: repo.ID,
Status: NotificationStatusUnread,
UpdatedBy: doer.ID,

View file

@ -24,6 +24,9 @@ func (err ErrKeyUnableVerify) Error() string {
return fmt.Sprintf("Unable to verify key content [result: %s]", err.Result)
}
// ErrKeyIsPrivate is returned when the provided key is a private key not a public key
var ErrKeyIsPrivate = util.NewSilentWrapErrorf(util.ErrInvalidArgument, "the provided key is a private key")
// ErrKeyNotExist represents a "KeyNotExist" kind of error.
type ErrKeyNotExist struct {
ID int64

View file

@ -96,6 +96,9 @@ func parseKeyString(content string) (string, error) {
if block == nil {
return "", fmt.Errorf("failed to parse PEM block containing the public key")
}
if strings.Contains(block.Type, "PRIVATE") {
return "", ErrKeyIsPrivate
}
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {

View file

@ -27,3 +27,9 @@ const (
SearchOrderByForks SearchOrderBy = "num_forks ASC"
SearchOrderByForksReverse SearchOrderBy = "num_forks DESC"
)
const (
// Which means a condition to filter the records which don't match any id.
// It's different from zero which means the condition could be ignored.
NoneID = -1
)

View file

@ -37,9 +37,7 @@ func (d *postgresSchemaDriver) Open(name string) (driver.Conn, error) {
}
schemaValue, _ := driver.String.ConvertValue(setting.Database.Schema)
// golangci lint is incorrect here - there is no benefit to using driver.ExecerContext here
// and in any case pq does not implement it
if execer, ok := conn.(driver.Execer); ok { //nolint
if execer, ok := conn.(driver.Execer); ok {
_, err := execer.Exec(`SELECT set_config(
'search_path',
$1 || ',' || current_setting('search_path'),
@ -63,8 +61,7 @@ func (d *postgresSchemaDriver) Open(name string) (driver.Conn, error) {
// driver.String.ConvertValue will never return err for string
// golangci lint is incorrect here - there is no benefit to using stmt.ExecWithContext here
_, err = stmt.Exec([]driver.Value{schemaValue}) //nolint
_, err = stmt.Exec([]driver.Value{schemaValue})
if err != nil {
_ = conn.Close()
return nil, err

357
models/dbfs/dbfile.go Normal file
View file

@ -0,0 +1,357 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package dbfs
import (
"context"
"errors"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"code.gitea.io/gitea/models/db"
)
var defaultFileBlockSize int64 = 32 * 1024
type File interface {
io.ReadWriteCloser
io.Seeker
}
type file struct {
ctx context.Context
metaID int64
fullPath string
blockSize int64
allowRead bool
allowWrite bool
offset int64
}
var _ File = (*file)(nil)
func (f *file) readAt(fileMeta *dbfsMeta, offset int64, p []byte) (n int, err error) {
if offset >= fileMeta.FileSize {
return 0, io.EOF
}
blobPos := int(offset % f.blockSize)
blobOffset := offset - int64(blobPos)
blobRemaining := int(f.blockSize) - blobPos
needRead := len(p)
if needRead > blobRemaining {
needRead = blobRemaining
}
if blobOffset+int64(blobPos)+int64(needRead) > fileMeta.FileSize {
needRead = int(fileMeta.FileSize - blobOffset - int64(blobPos))
}
if needRead <= 0 {
return 0, io.EOF
}
var fileData dbfsData
ok, err := db.GetEngine(f.ctx).Where("meta_id = ? AND blob_offset = ?", f.metaID, blobOffset).Get(&fileData)
if err != nil {
return 0, err
}
blobData := fileData.BlobData
if !ok {
blobData = nil
}
canCopy := len(blobData) - blobPos
if canCopy <= 0 {
canCopy = 0
}
realRead := needRead
if realRead > canCopy {
realRead = canCopy
}
if realRead > 0 {
copy(p[:realRead], fileData.BlobData[blobPos:blobPos+realRead])
}
for i := realRead; i < needRead; i++ {
p[i] = 0
}
return needRead, nil
}
func (f *file) Read(p []byte) (n int, err error) {
if f.metaID == 0 || !f.allowRead {
return 0, os.ErrInvalid
}
fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
if err != nil {
return 0, err
}
n, err = f.readAt(fileMeta, f.offset, p)
f.offset += int64(n)
return n, err
}
func (f *file) Write(p []byte) (n int, err error) {
if f.metaID == 0 || !f.allowWrite {
return 0, os.ErrInvalid
}
fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
if err != nil {
return 0, err
}
needUpdateSize := false
written := 0
for len(p) > 0 {
blobPos := int(f.offset % f.blockSize)
blobOffset := f.offset - int64(blobPos)
blobRemaining := int(f.blockSize) - blobPos
needWrite := len(p)
if needWrite > blobRemaining {
needWrite = blobRemaining
}
buf := make([]byte, f.blockSize)
readBytes, err := f.readAt(fileMeta, blobOffset, buf)
if err != nil && !errors.Is(err, io.EOF) {
return written, err
}
copy(buf[blobPos:blobPos+needWrite], p[:needWrite])
if blobPos+needWrite > readBytes {
buf = buf[:blobPos+needWrite]
} else {
buf = buf[:readBytes]
}
fileData := dbfsData{
MetaID: fileMeta.ID,
BlobOffset: blobOffset,
BlobData: buf,
}
if res, err := db.GetEngine(f.ctx).Exec("UPDATE dbfs_data SET revision=revision+1, blob_data=? WHERE meta_id=? AND blob_offset=?", buf, fileMeta.ID, blobOffset); err != nil {
return written, err
} else if updated, err := res.RowsAffected(); err != nil {
return written, err
} else if updated == 0 {
if _, err = db.GetEngine(f.ctx).Insert(&fileData); err != nil {
return written, err
}
}
written += needWrite
f.offset += int64(needWrite)
if f.offset > fileMeta.FileSize {
fileMeta.FileSize = f.offset
needUpdateSize = true
}
p = p[needWrite:]
}
fileMetaUpdate := dbfsMeta{
ModifyTimestamp: timeToFileTimestamp(time.Now()),
}
if needUpdateSize {
fileMetaUpdate.FileSize = f.offset
}
if _, err := db.GetEngine(f.ctx).ID(fileMeta.ID).Update(fileMetaUpdate); err != nil {
return written, err
}
return written, nil
}
func (f *file) Seek(n int64, whence int) (int64, error) {
if f.metaID == 0 {
return 0, os.ErrInvalid
}
newOffset := f.offset
switch whence {
case io.SeekStart:
newOffset = n
case io.SeekCurrent:
newOffset += n
case io.SeekEnd:
size, err := f.size()
if err != nil {
return f.offset, err
}
newOffset = size + n
default:
return f.offset, os.ErrInvalid
}
if newOffset < 0 {
return f.offset, os.ErrInvalid
}
f.offset = newOffset
return newOffset, nil
}
func (f *file) Close() error {
return nil
}
func timeToFileTimestamp(t time.Time) int64 {
return t.UnixMicro()
}
func (f *file) loadMetaByPath() (*dbfsMeta, error) {
var fileMeta dbfsMeta
if ok, err := db.GetEngine(f.ctx).Where("full_path = ?", f.fullPath).Get(&fileMeta); err != nil {
return nil, err
} else if ok {
f.metaID = fileMeta.ID
f.blockSize = fileMeta.BlockSize
return &fileMeta, nil
}
return nil, nil
}
func (f *file) open(flag int) (err error) {
// see os.OpenFile for flag values
if flag&os.O_WRONLY != 0 {
f.allowWrite = true
} else if flag&os.O_RDWR != 0 {
f.allowRead = true
f.allowWrite = true
} else /* O_RDONLY */ {
f.allowRead = true
}
if f.allowWrite {
if flag&os.O_CREATE != 0 {
if flag&os.O_EXCL != 0 {
// file must not exist.
if f.metaID != 0 {
return os.ErrExist
}
} else {
// create a new file if none exists.
if f.metaID == 0 {
if err = f.createEmpty(); err != nil {
return err
}
}
}
}
if flag&os.O_TRUNC != 0 {
if err = f.truncate(); err != nil {
return err
}
}
if flag&os.O_APPEND != 0 {
if _, err = f.Seek(0, io.SeekEnd); err != nil {
return err
}
}
return nil
}
// read only mode
if f.metaID == 0 {
return os.ErrNotExist
}
return nil
}
func (f *file) createEmpty() error {
if f.metaID != 0 {
return os.ErrExist
}
now := time.Now()
_, err := db.GetEngine(f.ctx).Insert(&dbfsMeta{
FullPath: f.fullPath,
BlockSize: f.blockSize,
CreateTimestamp: timeToFileTimestamp(now),
ModifyTimestamp: timeToFileTimestamp(now),
})
if err != nil {
return err
}
if _, err = f.loadMetaByPath(); err != nil {
return err
}
return nil
}
func (f *file) truncate() error {
if f.metaID == 0 {
return os.ErrNotExist
}
return db.WithTx(f.ctx, func(ctx context.Context) error {
if _, err := db.GetEngine(ctx).Exec("UPDATE dbfs_meta SET file_size = 0 WHERE id = ?", f.metaID); err != nil {
return err
}
if _, err := db.GetEngine(ctx).Delete(&dbfsData{MetaID: f.metaID}); err != nil {
return err
}
return nil
})
}
func (f *file) renameTo(newPath string) error {
if f.metaID == 0 {
return os.ErrNotExist
}
newPath = buildPath(newPath)
return db.WithTx(f.ctx, func(ctx context.Context) error {
if _, err := db.GetEngine(ctx).Exec("UPDATE dbfs_meta SET full_path = ? WHERE id = ?", newPath, f.metaID); err != nil {
return err
}
return nil
})
}
func (f *file) delete() error {
if f.metaID == 0 {
return os.ErrNotExist
}
return db.WithTx(f.ctx, func(ctx context.Context) error {
if _, err := db.GetEngine(ctx).Delete(&dbfsMeta{ID: f.metaID}); err != nil {
return err
}
if _, err := db.GetEngine(ctx).Delete(&dbfsData{MetaID: f.metaID}); err != nil {
return err
}
return nil
})
}
func (f *file) size() (int64, error) {
if f.metaID == 0 {
return 0, os.ErrNotExist
}
fileMeta, err := findFileMetaByID(f.ctx, f.metaID)
if err != nil {
return 0, err
}
return fileMeta.FileSize, nil
}
func findFileMetaByID(ctx context.Context, metaID int64) (*dbfsMeta, error) {
var fileMeta dbfsMeta
if ok, err := db.GetEngine(ctx).Where("id = ?", metaID).Get(&fileMeta); err != nil {
return nil, err
} else if ok {
return &fileMeta, nil
}
return nil, nil
}
func buildPath(path string) string {
path = filepath.Clean(path)
path = strings.ReplaceAll(path, "\\", "/")
path = strings.TrimPrefix(path, "/")
return strconv.Itoa(strings.Count(path, "/")) + ":" + path
}
func newDbFile(ctx context.Context, path string) (*file, error) {
path = buildPath(path)
f := &file{ctx: ctx, fullPath: path, blockSize: defaultFileBlockSize}
if _, err := f.loadMetaByPath(); err != nil {
return nil, err
}
return f, nil
}

102
models/dbfs/dbfs.go Normal file
View file

@ -0,0 +1,102 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package dbfs
import (
"context"
"os"
"code.gitea.io/gitea/models/db"
)
/*
The reasons behind the DBFS (database-filesystem) package:
When a Gitea action is running, the Gitea action server should collect and store all the logs.
The requirements are:
* The running logs must be stored across the cluster if the Gitea servers are deployed as a cluster.
* The logs will be archived to Object Storage (S3/MinIO, etc.) after a period of time.
* The Gitea action UI should be able to render the running logs and the archived logs.
Some possible solutions for the running logs:
* [Not ideal] Using local temp file: it can not be shared across the cluster.
* [Not ideal] Using shared file in the filesystem of git repository: although at the moment, the Gitea cluster's
git repositories must be stored in a shared filesystem, in the future, Gitea may need a dedicated Git Service Server
to decouple the shared filesystem. Then the action logs will become a blocker.
* [Not ideal] Record the logs in a database table line by line: it has a couple of problems:
- It's difficult to make multiple increasing sequence (log line number) for different databases.
- The database table will have a lot of rows and be affected by the big-table performance problem.
- It's difficult to load logs by using the same interface as other storages.
- It's difficult to calculate the size of the logs.
The DBFS solution:
* It can be used in a cluster.
* It can share the same interface (Read/Write/Seek) as other storages.
* It's very friendly to database because it only needs to store much fewer rows than the log-line solution.
* In the future, when Gitea action needs to limit the log size (other CI/CD services also do so), it's easier to calculate the log file size.
* Even sometimes the UI needs to render the tailing lines, the tailing lines can be found be counting the "\n" from the end of the file by seek.
The seeking and finding is not the fastest way, but it's still acceptable and won't affect the performance too much.
*/
type dbfsMeta struct {
ID int64 `xorm:"pk autoincr"`
FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"`
BlockSize int64 `xorm:"BIGINT NOT NULL"`
FileSize int64 `xorm:"BIGINT NOT NULL"`
CreateTimestamp int64 `xorm:"BIGINT NOT NULL"`
ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"`
}
type dbfsData struct {
ID int64 `xorm:"pk autoincr"`
Revision int64 `xorm:"BIGINT NOT NULL"`
MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
BlobOffset int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
BlobSize int64 `xorm:"BIGINT NOT NULL"`
BlobData []byte `xorm:"BLOB NOT NULL"`
}
func init() {
db.RegisterModel(new(dbfsMeta))
db.RegisterModel(new(dbfsData))
}
func OpenFile(ctx context.Context, name string, flag int) (File, error) {
f, err := newDbFile(ctx, name)
if err != nil {
return nil, err
}
err = f.open(flag)
if err != nil {
_ = f.Close()
return nil, err
}
return f, nil
}
func Open(ctx context.Context, name string) (File, error) {
return OpenFile(ctx, name, os.O_RDONLY)
}
func Create(ctx context.Context, name string) (File, error) {
return OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
}
func Rename(ctx context.Context, oldPath, newPath string) error {
f, err := newDbFile(ctx, oldPath)
if err != nil {
return err
}
defer f.Close()
return f.renameTo(newPath)
}
func Remove(ctx context.Context, name string) error {
f, err := newDbFile(ctx, name)
if err != nil {
return err
}
defer f.Close()
return f.delete()
}

179
models/dbfs/dbfs_test.go Normal file
View file

@ -0,0 +1,179 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package dbfs
import (
"bufio"
"io"
"os"
"testing"
"code.gitea.io/gitea/models/db"
"github.com/stretchr/testify/assert"
_ "github.com/mattn/go-sqlite3"
)
func changeDefaultFileBlockSize(n int64) (restore func()) {
old := defaultFileBlockSize
defaultFileBlockSize = n
return func() {
defaultFileBlockSize = old
}
}
func TestDbfsBasic(t *testing.T) {
defer changeDefaultFileBlockSize(4)()
// test basic write/read
f, err := OpenFile(db.DefaultContext, "test.txt", os.O_RDWR|os.O_CREATE)
assert.NoError(t, err)
n, err := f.Write([]byte("0123456789")) // blocks: 0123 4567 89
assert.NoError(t, err)
assert.EqualValues(t, 10, n)
_, err = f.Seek(0, io.SeekStart)
assert.NoError(t, err)
buf, err := io.ReadAll(f)
assert.NoError(t, err)
assert.EqualValues(t, 10, n)
assert.EqualValues(t, "0123456789", string(buf))
// write some new data
_, err = f.Seek(1, io.SeekStart)
assert.NoError(t, err)
_, err = f.Write([]byte("bcdefghi")) // blocks: 0bcd efgh i9
assert.NoError(t, err)
// read from offset
buf, err = io.ReadAll(f)
assert.NoError(t, err)
assert.EqualValues(t, "9", string(buf))
// read all
_, err = f.Seek(0, io.SeekStart)
assert.NoError(t, err)
buf, err = io.ReadAll(f)
assert.NoError(t, err)
assert.EqualValues(t, "0bcdefghi9", string(buf))
// write to new size
_, err = f.Seek(-1, io.SeekEnd)
assert.NoError(t, err)
_, err = f.Write([]byte("JKLMNOP")) // blocks: 0bcd efgh iJKL MNOP
assert.NoError(t, err)
_, err = f.Seek(0, io.SeekStart)
assert.NoError(t, err)
buf, err = io.ReadAll(f)
assert.NoError(t, err)
assert.EqualValues(t, "0bcdefghiJKLMNOP", string(buf))
// write beyond EOF and fill with zero
_, err = f.Seek(5, io.SeekCurrent)
assert.NoError(t, err)
_, err = f.Write([]byte("xyzu")) // blocks: 0bcd efgh iJKL MNOP 0000 0xyz u
assert.NoError(t, err)
_, err = f.Seek(0, io.SeekStart)
assert.NoError(t, err)
buf, err = io.ReadAll(f)
assert.NoError(t, err)
assert.EqualValues(t, "0bcdefghiJKLMNOP\x00\x00\x00\x00\x00xyzu", string(buf))
// write to the block with zeros
_, err = f.Seek(-6, io.SeekCurrent)
assert.NoError(t, err)
_, err = f.Write([]byte("ABCD")) // blocks: 0bcd efgh iJKL MNOP 000A BCDz u
assert.NoError(t, err)
_, err = f.Seek(0, io.SeekStart)
assert.NoError(t, err)
buf, err = io.ReadAll(f)
assert.NoError(t, err)
assert.EqualValues(t, "0bcdefghiJKLMNOP\x00\x00\x00ABCDzu", string(buf))
assert.NoError(t, f.Close())
// test rename
err = Rename(db.DefaultContext, "test.txt", "test2.txt")
assert.NoError(t, err)
_, err = OpenFile(db.DefaultContext, "test.txt", os.O_RDONLY)
assert.Error(t, err)
f, err = OpenFile(db.DefaultContext, "test2.txt", os.O_RDONLY)
assert.NoError(t, err)
assert.NoError(t, f.Close())
// test remove
err = Remove(db.DefaultContext, "test2.txt")
assert.NoError(t, err)
_, err = OpenFile(db.DefaultContext, "test2.txt", os.O_RDONLY)
assert.Error(t, err)
}
func TestDbfsReadWrite(t *testing.T) {
defer changeDefaultFileBlockSize(4)()
f1, err := OpenFile(db.DefaultContext, "test.log", os.O_RDWR|os.O_CREATE)
assert.NoError(t, err)
defer f1.Close()
f2, err := OpenFile(db.DefaultContext, "test.log", os.O_RDONLY)
assert.NoError(t, err)
defer f2.Close()
_, err = f1.Write([]byte("line 1\n"))
assert.NoError(t, err)
f2r := bufio.NewReader(f2)
line, err := f2r.ReadString('\n')
assert.NoError(t, err)
assert.EqualValues(t, "line 1\n", line)
_, err = f2r.ReadString('\n')
assert.ErrorIs(t, err, io.EOF)
_, err = f1.Write([]byte("line 2\n"))
assert.NoError(t, err)
line, err = f2r.ReadString('\n')
assert.NoError(t, err)
assert.EqualValues(t, "line 2\n", line)
_, err = f2r.ReadString('\n')
assert.ErrorIs(t, err, io.EOF)
}
func TestDbfsSeekWrite(t *testing.T) {
defer changeDefaultFileBlockSize(4)()
f, err := OpenFile(db.DefaultContext, "test2.log", os.O_RDWR|os.O_CREATE)
assert.NoError(t, err)
defer f.Close()
n, err := f.Write([]byte("111"))
assert.NoError(t, err)
_, err = f.Seek(int64(n), io.SeekStart)
assert.NoError(t, err)
_, err = f.Write([]byte("222"))
assert.NoError(t, err)
_, err = f.Seek(int64(n), io.SeekStart)
assert.NoError(t, err)
_, err = f.Write([]byte("333"))
assert.NoError(t, err)
fr, err := OpenFile(db.DefaultContext, "test2.log", os.O_RDONLY)
assert.NoError(t, err)
defer f.Close()
buf, err := io.ReadAll(fr)
assert.NoError(t, err)
assert.EqualValues(t, "111333", string(buf))
}

23
models/dbfs/main_test.go Normal file
View file

@ -0,0 +1,23 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package dbfs
import (
"path/filepath"
"testing"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/setting"
)
func init() {
setting.SetCustomPathAndConf("", "", "")
setting.LoadForTest()
}
func TestMain(m *testing.M) {
unittest.MainTest(m, &unittest.TestOptions{
GiteaRootPath: filepath.Join("..", ".."),
})
}

View file

@ -4,6 +4,7 @@
owner_name: user2
lower_name: repo1
name: repo1
default_branch: master
num_watches: 4
num_stars: 0
num_forks: 0
@ -34,6 +35,7 @@
owner_name: user2
lower_name: repo2
name: repo2
default_branch: master
num_watches: 0
num_stars: 1
num_forks: 0
@ -64,6 +66,7 @@
owner_name: user3
lower_name: repo3
name: repo3
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -94,6 +97,7 @@
owner_name: user5
lower_name: repo4
name: repo4
default_branch: master
num_watches: 0
num_stars: 1
num_forks: 0
@ -274,6 +278,7 @@
owner_name: user12
lower_name: repo10
name: repo10
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 1
@ -304,6 +309,7 @@
owner_name: user13
lower_name: repo11
name: repo11
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -425,6 +431,7 @@
owner_name: user2
lower_name: repo15
name: repo15
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -455,6 +462,7 @@
owner_name: user2
lower_name: repo16
name: repo16
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -905,6 +913,7 @@
owner_name: user2
lower_name: repo20
name: repo20
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -965,6 +974,7 @@
owner_name: user2
lower_name: utf8
name: utf8
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1055,6 +1065,7 @@
owner_name: user2
lower_name: commits_search_test
name: commits_search_test
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1085,6 +1096,7 @@
owner_name: user2
lower_name: git_hooks_test
name: git_hooks_test
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1115,6 +1127,7 @@
owner_name: limited_org
lower_name: public_repo_on_limited_org
name: public_repo_on_limited_org
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1145,6 +1158,7 @@
owner_name: limited_org
lower_name: private_repo_on_limited_org
name: private_repo_on_limited_org
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1175,6 +1189,7 @@
owner_name: privated_org
lower_name: public_repo_on_private_org
name: public_repo_on_private_org
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1205,6 +1220,7 @@
owner_name: privated_org
lower_name: private_repo_on_private_org
name: private_repo_on_private_org
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1235,6 +1251,7 @@
owner_name: user2
lower_name: glob
name: glob
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1295,6 +1312,7 @@
owner_name: user27
lower_name: template1
name: template1
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1355,6 +1373,7 @@
owner_name: org26
lower_name: repo_external_tracker
name: repo_external_tracker
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1385,6 +1404,7 @@
owner_name: org26
lower_name: repo_external_tracker_numeric
name: repo_external_tracker_numeric
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1415,6 +1435,7 @@
owner_name: org26
lower_name: repo_external_tracker_alpha
name: repo_external_tracker_alpha
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1445,6 +1466,7 @@
owner_name: user27
lower_name: repo49
name: repo49
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1475,6 +1497,7 @@
owner_name: user30
lower_name: repo50
name: repo50
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1505,6 +1528,7 @@
owner_name: user30
lower_name: repo51
name: repo51
default_branch: master
num_watches: 0
num_stars: 0
num_forks: 0
@ -1565,6 +1589,7 @@
owner_name: user30
lower_name: renderer
name: renderer
default_branch: master
is_archived: false
is_empty: false
is_private: false
@ -1592,6 +1617,7 @@
owner_name: user2
lower_name: lfs
name: lfs
default_branch: master
is_empty: false
is_archived: false
is_private: true

View file

@ -140,3 +140,14 @@
num_members: 1
includes_all_repositories: false
can_create_org_repo: false
-
id: 14
org_id: 3
lower_name: teamcreaterepo
name: teamCreateRepo
authorize: 2 # write
num_repos: 0
num_members: 1
includes_all_repositories: false
can_create_org_repo: true

View file

@ -93,3 +93,9 @@
org_id: 19
team_id: 6
uid: 31
-
id: 17
org_id: 3
team_id: 14
uid: 2

View file

@ -104,7 +104,7 @@
num_following: 0
num_stars: 0
num_repos: 3
num_teams: 4
num_teams: 5
num_members: 3
visibility: 0
repo_admin_change_team_access: false

View file

@ -357,7 +357,7 @@ func (c *Comment) LoadPoster(ctx context.Context) (err error) {
return nil
}
c.Poster, err = user_model.GetUserByID(ctx, c.PosterID)
c.Poster, err = user_model.GetPossibleUserByID(ctx, c.PosterID)
if err != nil {
if user_model.IsErrUserNotExist(err) {
c.PosterID = -1

View file

@ -29,32 +29,13 @@ func (comments CommentList) LoadPosters(ctx context.Context) error {
return nil
}
posterIDs := comments.getPosterIDs()
posterMaps := make(map[int64]*user_model.User, len(posterIDs))
left := len(posterIDs)
for left > 0 {
limit := db.DefaultMaxInSize
if left < limit {
limit = left
}
err := db.GetEngine(ctx).
In("id", posterIDs[:limit]).
Find(&posterMaps)
if err != nil {
return err
}
left -= limit
posterIDs = posterIDs[limit:]
posterMaps, err := getPosters(ctx, comments.getPosterIDs())
if err != nil {
return err
}
for _, comment := range comments {
if comment.PosterID <= 0 {
continue
}
var ok bool
if comment.Poster, ok = posterMaps[comment.PosterID]; !ok {
comment.Poster = user_model.NewGhostUser()
}
comment.Poster = getPoster(comment.PosterID, posterMaps)
}
return nil
}

View file

@ -237,7 +237,7 @@ func (issue *Issue) LoadLabels(ctx context.Context) (err error) {
// LoadPoster loads poster
func (issue *Issue) LoadPoster(ctx context.Context) (err error) {
if issue.Poster == nil {
issue.Poster, err = user_model.GetUserByID(ctx, issue.PosterID)
issue.Poster, err = user_model.GetPossibleUserByID(ctx, issue.PosterID)
if err != nil {
issue.PosterID = -1
issue.Poster = user_model.NewGhostUser()
@ -1253,6 +1253,8 @@ func (opts *IssuesOptions) setupSessionNoLimit(sess *xorm.Session) {
if opts.ProjectID > 0 {
sess.Join("INNER", "project_issue", "issue.id = project_issue.issue_id").
And("project_issue.project_id=?", opts.ProjectID)
} else if opts.ProjectID == db.NoneID { // show those that are in no project
sess.And(builder.NotIn("issue.id", builder.Select("issue_id").From("project_issue")))
}
if opts.ProjectBoardID != 0 {
@ -1574,6 +1576,7 @@ type IssueStatsOptions struct {
RepoID int64
Labels string
MilestoneID int64
ProjectID int64
AssigneeID int64
MentionedID int64
PosterID int64
@ -1652,6 +1655,11 @@ func getIssueStatsChunk(opts *IssueStatsOptions, issueIDs []int64) (*IssueStats,
sess.And("issue.milestone_id = ?", opts.MilestoneID)
}
if opts.ProjectID > 0 {
sess.Join("INNER", "project_issue", "issue.id = project_issue.issue_id").
And("project_issue.project_id=?", opts.ProjectID)
}
if opts.AssigneeID > 0 {
applyAssigneeCondition(sess, opts.AssigneeID)
}

View file

@ -86,7 +86,18 @@ func (issues IssueList) loadPosters(ctx context.Context) error {
return nil
}
posterIDs := issues.getPosterIDs()
posterMaps, err := getPosters(ctx, issues.getPosterIDs())
if err != nil {
return err
}
for _, issue := range issues {
issue.Poster = getPoster(issue.PosterID, posterMaps)
}
return nil
}
func getPosters(ctx context.Context, posterIDs []int64) (map[int64]*user_model.User, error) {
posterMaps := make(map[int64]*user_model.User, len(posterIDs))
left := len(posterIDs)
for left > 0 {
@ -98,22 +109,26 @@ func (issues IssueList) loadPosters(ctx context.Context) error {
In("id", posterIDs[:limit]).
Find(&posterMaps)
if err != nil {
return err
return nil, err
}
left -= limit
posterIDs = posterIDs[limit:]
}
return posterMaps, nil
}
for _, issue := range issues {
if issue.PosterID <= 0 {
continue
}
var ok bool
if issue.Poster, ok = posterMaps[issue.PosterID]; !ok {
issue.Poster = user_model.NewGhostUser()
}
func getPoster(posterID int64, posterMaps map[int64]*user_model.User) *user_model.User {
if posterID == user_model.ActionsUserID {
return user_model.NewActionsUser()
}
return nil
if posterID <= 0 {
return nil
}
poster, ok := posterMaps[posterID]
if !ok {
return user_model.NewGhostUser()
}
return poster
}
func (issues IssueList) getIssueIDs() []int64 {

View file

@ -8,6 +8,7 @@ import (
"context"
"fmt"
"io"
"strconv"
"strings"
"code.gitea.io/gitea/models/db"
@ -132,6 +133,27 @@ const (
PullRequestStatusAncestor
)
func (status PullRequestStatus) String() string {
switch status {
case PullRequestStatusConflict:
return "CONFLICT"
case PullRequestStatusChecking:
return "CHECKING"
case PullRequestStatusMergeable:
return "MERGEABLE"
case PullRequestStatusManuallyMerged:
return "MANUALLY_MERGED"
case PullRequestStatusError:
return "ERROR"
case PullRequestStatusEmpty:
return "EMPTY"
case PullRequestStatusAncestor:
return "ANCESTOR"
default:
return strconv.Itoa(int(status))
}
}
// PullRequestFlow the flow of pull request
type PullRequestFlow int
@ -202,6 +224,42 @@ func DeletePullsByBaseRepoID(ctx context.Context, repoID int64) error {
return err
}
// ColorFormat writes a colored string to identify this struct
func (pr *PullRequest) ColorFormat(s fmt.State) {
if pr == nil {
log.ColorFprintf(s, "PR[%d]%s#%d[%s...%s:%s]",
log.NewColoredIDValue(0),
log.NewColoredValue("<nil>/<nil>"),
log.NewColoredIDValue(0),
log.NewColoredValue("<nil>"),
log.NewColoredValue("<nil>/<nil>"),
log.NewColoredValue("<nil>"),
)
return
}
log.ColorFprintf(s, "PR[%d]", log.NewColoredIDValue(pr.ID))
if pr.BaseRepo != nil {
log.ColorFprintf(s, "%s#%d[%s...", log.NewColoredValue(pr.BaseRepo.FullName()),
log.NewColoredIDValue(pr.Index), log.NewColoredValue(pr.BaseBranch))
} else {
log.ColorFprintf(s, "Repo[%d]#%d[%s...", log.NewColoredIDValue(pr.BaseRepoID),
log.NewColoredIDValue(pr.Index), log.NewColoredValue(pr.BaseBranch))
}
if pr.HeadRepoID == pr.BaseRepoID {
log.ColorFprintf(s, "%s]", log.NewColoredValue(pr.HeadBranch))
} else if pr.HeadRepo != nil {
log.ColorFprintf(s, "%s:%s]", log.NewColoredValue(pr.HeadRepo.FullName()), log.NewColoredValue(pr.HeadBranch))
} else {
log.ColorFprintf(s, "Repo[%d]:%s]", log.NewColoredIDValue(pr.HeadRepoID), log.NewColoredValue(pr.HeadBranch))
}
}
// String represents the pr as a simple string
func (pr *PullRequest) String() string {
return log.ColorFormatAsString(pr)
}
// MustHeadUserName returns the HeadRepo's username if failed return blank
func (pr *PullRequest) MustHeadUserName(ctx context.Context) string {
if err := pr.LoadHeadRepo(ctx); err != nil {
@ -234,7 +292,8 @@ func (pr *PullRequest) LoadAttributes(ctx context.Context) (err error) {
return nil
}
// LoadHeadRepo loads the head repository
// LoadHeadRepo loads the head repository, pr.HeadRepo will remain nil if it does not exist
// and thus ErrRepoNotExist will never be returned
func (pr *PullRequest) LoadHeadRepo(ctx context.Context) (err error) {
if !pr.isHeadRepoLoaded && pr.HeadRepo == nil && pr.HeadRepoID > 0 {
if pr.HeadRepoID == pr.BaseRepoID {
@ -249,14 +308,14 @@ func (pr *PullRequest) LoadHeadRepo(ctx context.Context) (err error) {
pr.HeadRepo, err = repo_model.GetRepositoryByID(ctx, pr.HeadRepoID)
if err != nil && !repo_model.IsErrRepoNotExist(err) { // Head repo maybe deleted, but it should still work
return fmt.Errorf("GetRepositoryByID(head): %w", err)
return fmt.Errorf("pr[%d].LoadHeadRepo[%d]: %w", pr.ID, pr.HeadRepoID, err)
}
pr.isHeadRepoLoaded = true
}
return nil
}
// LoadBaseRepo loads the target repository
// LoadBaseRepo loads the target repository. ErrRepoNotExist may be returned.
func (pr *PullRequest) LoadBaseRepo(ctx context.Context) (err error) {
if pr.BaseRepo != nil {
return nil
@ -274,7 +333,7 @@ func (pr *PullRequest) LoadBaseRepo(ctx context.Context) (err error) {
pr.BaseRepo, err = repo_model.GetRepositoryByID(ctx, pr.BaseRepoID)
if err != nil {
return fmt.Errorf("repo_model.GetRepositoryByID(base): %w", err)
return fmt.Errorf("pr[%d].LoadBaseRepo[%d]: %w", pr.ID, pr.BaseRepoID, err)
}
return nil
}
@ -394,6 +453,11 @@ func (pr *PullRequest) IsAncestor() bool {
return pr.Status == PullRequestStatusAncestor
}
// IsFromFork return true if this PR is from a fork.
func (pr *PullRequest) IsFromFork() bool {
return pr.HeadRepoID != pr.BaseRepoID
}
// SetMerged sets a pull request to merged and closes the corresponding issue
func (pr *PullRequest) SetMerged(ctx context.Context) (bool, error) {
if pr.HasMerged {

View file

@ -173,8 +173,9 @@ func (prs PullRequestList) loadAttributes(ctx context.Context) error {
for i := range issues {
set[issues[i].ID] = issues[i]
}
for i := range prs {
prs[i].Issue = set[prs[i].IssueID]
for _, pr := range prs {
pr.Issue = set[pr.IssueID]
pr.Issue.PullRequest = pr // panic here means issueIDs and prs are not in sync
}
return nil
}

View file

@ -158,7 +158,7 @@ func (r *Review) LoadReviewer(ctx context.Context) (err error) {
if r.ReviewerID == 0 || r.Reviewer != nil {
return
}
r.Reviewer, err = user_model.GetUserByID(ctx, r.ReviewerID)
r.Reviewer, err = user_model.GetPossibleUserByID(ctx, r.ReviewerID)
return err
}

View file

@ -453,6 +453,8 @@ var migrations = []Migration{
NewMigration("Add updated unix to LFSMetaObject", v1_19.AddUpdatedUnixToLFSMetaObject),
// v239 -> v240
NewMigration("Add scope for access_token", v1_19.AddScopeForAccessTokens),
// v240 -> v241
NewMigration("Add actions tables", v1_19.AddActionsTables),
}
// GetCurrentDBVersion returns the current db version

View file

@ -0,0 +1,176 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package v1_19 //nolint
import (
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/xorm"
)
func AddActionsTables(x *xorm.Engine) error {
type ActionRunner struct {
ID int64
UUID string `xorm:"CHAR(36) UNIQUE"`
Name string `xorm:"VARCHAR(255)"`
OwnerID int64 `xorm:"index"` // org level runner, 0 means system
RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global
Description string `xorm:"TEXT"`
Base int // 0 native 1 docker 2 virtual machine
RepoRange string // glob match which repositories could use this runner
Token string `xorm:"-"`
TokenHash string `xorm:"UNIQUE"` // sha256 of token
TokenSalt string
// TokenLastEight string `xorm:"token_last_eight"` // it's unnecessary because we don't find runners by token
LastOnline timeutil.TimeStamp `xorm:"index"`
LastActive timeutil.TimeStamp `xorm:"index"`
// Store OS and Artch.
AgentLabels []string
// Store custom labes use defined.
CustomLabels []string
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated"`
Deleted timeutil.TimeStamp `xorm:"deleted"`
}
type ActionRunnerToken struct {
ID int64
Token string `xorm:"UNIQUE"`
OwnerID int64 `xorm:"index"` // org level runner, 0 means system
RepoID int64 `xorm:"index"` // repo level runner, if orgid also is zero, then it's a global
IsActive bool
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated"`
Deleted timeutil.TimeStamp `xorm:"deleted"`
}
type ActionRun struct {
ID int64
Title string
RepoID int64 `xorm:"index unique(repo_index)"`
OwnerID int64 `xorm:"index"`
WorkflowID string `xorm:"index"` // the name of workflow file
Index int64 `xorm:"index unique(repo_index)"` // a unique number for each run of a repository
TriggerUserID int64
Ref string
CommitSHA string
Event string
IsForkPullRequest bool
EventPayload string `xorm:"LONGTEXT"`
Status int `xorm:"index"`
Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated"`
}
type ActionRunJob struct {
ID int64
RunID int64 `xorm:"index"`
RepoID int64 `xorm:"index"`
OwnerID int64 `xorm:"index"`
CommitSHA string `xorm:"index"`
IsForkPullRequest bool
Name string `xorm:"VARCHAR(255)"`
Attempt int64
WorkflowPayload []byte
JobID string `xorm:"VARCHAR(255)"` // job id in workflow, not job's id
Needs []string `xorm:"JSON TEXT"`
RunsOn []string `xorm:"JSON TEXT"`
TaskID int64 // the latest task of the job
Status int `xorm:"index"`
Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated index"`
}
type Repository struct {
NumActionRuns int `xorm:"NOT NULL DEFAULT 0"`
NumClosedActionRuns int `xorm:"NOT NULL DEFAULT 0"`
}
type ActionRunIndex db.ResourceIndex
type ActionTask struct {
ID int64
JobID int64
Attempt int64
RunnerID int64 `xorm:"index"`
Status int `xorm:"index"`
Started timeutil.TimeStamp `xorm:"index"`
Stopped timeutil.TimeStamp
RepoID int64 `xorm:"index"`
OwnerID int64 `xorm:"index"`
CommitSHA string `xorm:"index"`
IsForkPullRequest bool
TokenHash string `xorm:"UNIQUE"` // sha256 of token
TokenSalt string
TokenLastEight string `xorm:"index token_last_eight"`
LogFilename string // file name of log
LogInStorage bool // read log from database or from storage
LogLength int64 // lines count
LogSize int64 // blob size
LogIndexes []int64 `xorm:"LONGBLOB"` // line number to offset
LogExpired bool // files that are too old will be deleted
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated index"`
}
type ActionTaskStep struct {
ID int64
Name string `xorm:"VARCHAR(255)"`
TaskID int64 `xorm:"index unique(task_index)"`
Index int64 `xorm:"index unique(task_index)"`
RepoID int64 `xorm:"index"`
Status int `xorm:"index"`
LogIndex int64
LogLength int64
Started timeutil.TimeStamp
Stopped timeutil.TimeStamp
Created timeutil.TimeStamp `xorm:"created"`
Updated timeutil.TimeStamp `xorm:"updated"`
}
type dbfsMeta struct {
ID int64 `xorm:"pk autoincr"`
FullPath string `xorm:"VARCHAR(500) UNIQUE NOT NULL"`
BlockSize int64 `xorm:"BIGINT NOT NULL"`
FileSize int64 `xorm:"BIGINT NOT NULL"`
CreateTimestamp int64 `xorm:"BIGINT NOT NULL"`
ModifyTimestamp int64 `xorm:"BIGINT NOT NULL"`
}
type dbfsData struct {
ID int64 `xorm:"pk autoincr"`
Revision int64 `xorm:"BIGINT NOT NULL"`
MetaID int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
BlobOffset int64 `xorm:"BIGINT index(meta_offset) NOT NULL"`
BlobSize int64 `xorm:"BIGINT NOT NULL"`
BlobData []byte `xorm:"BLOB NOT NULL"`
}
return x.Sync(
new(ActionRunner),
new(ActionRunnerToken),
new(ActionRun),
new(ActionRunJob),
new(Repository),
new(ActionRunIndex),
new(ActionTask),
new(ActionTaskStep),
new(dbfsMeta),
new(dbfsData),
)
}

View file

@ -397,13 +397,14 @@ func (org *Organization) GetOrgUserMaxAuthorizeLevel(uid int64) (perm.AccessMode
}
// GetUsersWhoCanCreateOrgRepo returns users which are able to create repo in organization
func GetUsersWhoCanCreateOrgRepo(ctx context.Context, orgID int64) ([]*user_model.User, error) {
users := make([]*user_model.User, 0, 10)
func GetUsersWhoCanCreateOrgRepo(ctx context.Context, orgID int64) (map[int64]*user_model.User, error) {
// Use a map, in order to de-duplicate users.
users := make(map[int64]*user_model.User)
return users, db.GetEngine(ctx).
Join("INNER", "`team_user`", "`team_user`.uid=`user`.id").
Join("INNER", "`team`", "`team`.id=`team_user`.team_id").
Where(builder.Eq{"team.can_create_org_repo": true}.Or(builder.Eq{"team.authorize": perm.AccessModeOwner})).
And("team_user.org_id = ?", orgID).Asc("`user`.name").Find(&users)
And("team_user.org_id = ?", orgID).Find(&users)
}
// SearchOrganizationsOptions options to filter organizations

View file

@ -91,11 +91,12 @@ func TestUser_GetTeams(t *testing.T) {
org := unittest.AssertExistsAndLoadBean(t, &organization.Organization{ID: 3})
teams, err := org.LoadTeams()
assert.NoError(t, err)
if assert.Len(t, teams, 4) {
if assert.Len(t, teams, 5) {
assert.Equal(t, int64(1), teams[0].ID)
assert.Equal(t, int64(2), teams[1].ID)
assert.Equal(t, int64(12), teams[2].ID)
assert.Equal(t, int64(7), teams[3].ID)
assert.Equal(t, int64(14), teams[3].ID)
assert.Equal(t, int64(7), teams[4].ID)
}
}
@ -292,7 +293,7 @@ func TestUser_GetUserTeamIDs(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, expected, teamIDs)
}
testSuccess(2, []int64{1, 2})
testSuccess(2, []int64{1, 2, 14})
testSuccess(4, []int64{2})
testSuccess(unittest.NonexistentID, []int64{})
}
@ -447,7 +448,7 @@ func TestGetUsersWhoCanCreateOrgRepo(t *testing.T) {
users, err = organization.GetUsersWhoCanCreateOrgRepo(db.DefaultContext, 7)
assert.NoError(t, err)
assert.Len(t, users, 1)
assert.EqualValues(t, 5, users[0].ID)
assert.NotNil(t, users[5])
}
func TestUser_RemoveOrgRepo(t *testing.T) {

View file

@ -0,0 +1,63 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package conda
import (
"context"
"strings"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/packages"
conda_module "code.gitea.io/gitea/modules/packages/conda"
"xorm.io/builder"
)
type FileSearchOptions struct {
OwnerID int64
Channel string
Subdir string
Filename string
}
// SearchFiles gets all files matching the search options
func SearchFiles(ctx context.Context, opts *FileSearchOptions) ([]*packages.PackageFile, error) {
var cond builder.Cond = builder.Eq{
"package.type": packages.TypeConda,
"package.owner_id": opts.OwnerID,
"package_version.is_internal": false,
}
if opts.Filename != "" {
cond = cond.And(builder.Eq{
"package_file.lower_name": strings.ToLower(opts.Filename),
})
}
var versionPropsCond builder.Cond = builder.Eq{
"package_property.ref_type": packages.PropertyTypePackage,
"package_property.name": conda_module.PropertyChannel,
"package_property.value": opts.Channel,
}
cond = cond.And(builder.In("package.id", builder.Select("package_property.ref_id").Where(versionPropsCond).From("package_property")))
var filePropsCond builder.Cond = builder.Eq{
"package_property.ref_type": packages.PropertyTypeFile,
"package_property.name": conda_module.PropertySubdir,
"package_property.value": opts.Subdir,
}
cond = cond.And(builder.In("package_file.id", builder.Select("package_property.ref_id").Where(filePropsCond).From("package_property")))
sess := db.GetEngine(ctx).
Select("package_file.*").
Table("package_file").
Join("INNER", "package_version", "package_version.id = package_file.version_id").
Join("INNER", "package", "package.id = package_version.package_id").
Where(cond)
pfs := make([]*packages.PackageFile, 0, 10)
return pfs, sess.Find(&pfs)
}

View file

@ -13,6 +13,7 @@ import (
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/packages/composer"
"code.gitea.io/gitea/modules/packages/conan"
"code.gitea.io/gitea/modules/packages/conda"
"code.gitea.io/gitea/modules/packages/container"
"code.gitea.io/gitea/modules/packages/helm"
"code.gitea.io/gitea/modules/packages/maven"
@ -132,6 +133,8 @@ func GetPackageDescriptor(ctx context.Context, pv *PackageVersion) (*PackageDesc
metadata = &composer.Metadata{}
case TypeConan:
metadata = &conan.Metadata{}
case TypeConda:
metadata = &conda.VersionMetadata{}
case TypeContainer:
metadata = &container.Metadata{}
case TypeGeneric:

View file

@ -32,6 +32,7 @@ type Type string
const (
TypeComposer Type = "composer"
TypeConan Type = "conan"
TypeConda Type = "conda"
TypeContainer Type = "container"
TypeGeneric Type = "generic"
TypeHelm Type = "helm"
@ -47,6 +48,7 @@ const (
var TypeList = []Type{
TypeComposer,
TypeConan,
TypeConda,
TypeContainer,
TypeGeneric,
TypeHelm,
@ -66,6 +68,8 @@ func (pt Type) Name() string {
return "Composer"
case TypeConan:
return "Conan"
case TypeConda:
return "Conda"
case TypeContainer:
return "Container"
case TypeGeneric:
@ -97,6 +101,8 @@ func (pt Type) SVGName() string {
return "gitea-composer"
case TypeConan:
return "gitea-conan"
case TypeConda:
return "gitea-conda"
case TypeContainer:
return "octicon-container"
case TypeGeneric:

View file

@ -11,6 +11,7 @@ import (
_ "image/jpeg" // Needed for jpeg support
actions_model "code.gitea.io/gitea/models/actions"
activities_model "code.gitea.io/gitea/models/activities"
admin_model "code.gitea.io/gitea/models/admin"
asymkey_model "code.gitea.io/gitea/models/asymkey"
@ -26,6 +27,7 @@ import (
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/models/webhook"
actions_module "code.gitea.io/gitea/modules/actions"
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/storage"
@ -52,6 +54,12 @@ func DeleteRepository(doer *user_model.User, uid, repoID int64) error {
defer committer.Close()
sess := db.GetEngine(ctx)
// Query the action tasks of this repo, they will be needed after they have been deleted to remove the logs
tasks, err := actions_model.FindTasks(ctx, actions_model.FindTaskOptions{RepoID: repoID})
if err != nil {
return fmt.Errorf("find actions tasks of repo %v: %w", repoID, err)
}
// In case is a organization.
org, err := user_model.GetUserByID(ctx, uid)
if err != nil {
@ -152,6 +160,11 @@ func DeleteRepository(doer *user_model.User, uid, repoID int64) error {
&repo_model.Watch{RepoID: repoID},
&webhook.Webhook{RepoID: repoID},
&secret_model.Secret{RepoID: repoID},
&actions_model.ActionTaskStep{RepoID: repoID},
&actions_model.ActionTask{RepoID: repoID},
&actions_model.ActionRunJob{RepoID: repoID},
&actions_model.ActionRun{RepoID: repoID},
&actions_model.ActionRunner{RepoID: repoID},
); err != nil {
return fmt.Errorf("deleteBeans: %w", err)
}
@ -315,6 +328,15 @@ func DeleteRepository(doer *user_model.User, uid, repoID int64) error {
}
}
// Finally, delete action logs after the actions have already been deleted to avoid new log files
for _, task := range tasks {
err := actions_module.RemoveLogs(ctx, task.LogInStorage, task.LogFilename)
if err != nil {
log.Error("remove log file %q: %v", task.LogFilename, err)
// go on
}
}
return nil
}

View file

@ -142,6 +142,9 @@ type Repository struct {
NumProjects int `xorm:"NOT NULL DEFAULT 0"`
NumClosedProjects int `xorm:"NOT NULL DEFAULT 0"`
NumOpenProjects int `xorm:"-"`
NumActionRuns int `xorm:"NOT NULL DEFAULT 0"`
NumClosedActionRuns int `xorm:"NOT NULL DEFAULT 0"`
NumOpenActionRuns int `xorm:"-"`
IsPrivate bool `xorm:"INDEX"`
IsEmpty bool `xorm:"INDEX"`
@ -225,15 +228,11 @@ func (repo *Repository) IsBroken() bool {
// AfterLoad is invoked from XORM after setting the values of all fields of this object.
func (repo *Repository) AfterLoad() {
// FIXME: use models migration to solve all at once.
if len(repo.DefaultBranch) == 0 {
repo.DefaultBranch = setting.Repository.DefaultBranch
}
repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues
repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls
repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones
repo.NumOpenProjects = repo.NumProjects - repo.NumClosedProjects
repo.NumOpenActionRuns = repo.NumActionRuns - repo.NumClosedActionRuns
}
// LoadAttributes loads attributes of the repository.

View file

@ -494,7 +494,7 @@ func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
}
if opts.OnlyShowRelevant {
// Only show a repo that either has a topic or description.
// Only show a repo that has at least a topic, an icon, or a description
subQueryCond := builder.NewCond()
// Topic checking. Topics are present.
@ -504,13 +504,13 @@ func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
subQueryCond = subQueryCond.Or(builder.And(builder.Neq{"topics": "null"}, builder.Neq{"topics": "[]"}))
}
// Description checking. Description not empty.
// Description checking. Description not empty
subQueryCond = subQueryCond.Or(builder.Neq{"description": ""})
// Repo has a avatar.
// Repo has a avatar
subQueryCond = subQueryCond.Or(builder.Neq{"avatar": ""})
// Always hide repo's that are empty.
// Always hide repo's that are empty
subQueryCond = subQueryCond.And(builder.Eq{"is_empty": false})
cond = cond.And(subQueryCond)

View file

@ -174,7 +174,7 @@ func (r *RepoUnit) BeforeSet(colName string, val xorm.Cell) {
r.Config = new(PullRequestsConfig)
case unit.TypeIssues:
r.Config = new(IssuesConfig)
case unit.TypeCode, unit.TypeReleases, unit.TypeWiki, unit.TypeProjects, unit.TypePackages:
case unit.TypeCode, unit.TypeReleases, unit.TypeWiki, unit.TypeProjects, unit.TypePackages, unit.TypeActions:
fallthrough
default:
r.Config = new(UnitConfig)

View file

@ -27,6 +27,7 @@ const (
TypeExternalTracker // 7 ExternalTracker
TypeProjects // 8 Kanban board
TypePackages // 9 Packages
TypeActions // 10 Actions
)
// Value returns integer value for unit type
@ -54,6 +55,8 @@ func (u Type) String() string {
return "TypeProjects"
case TypePackages:
return "TypePackages"
case TypeActions:
return "TypeActions"
}
return fmt.Sprintf("Unknown Type %d", u)
}
@ -77,6 +80,7 @@ var (
TypeExternalTracker,
TypeProjects,
TypePackages,
TypeActions,
}
// DefaultRepoUnits contains the default unit types
@ -90,6 +94,12 @@ var (
TypePackages,
}
// ForkRepoUnits contains the default unit types for forks
DefaultForkRepoUnits = []Type{
TypeCode,
TypePullRequests,
}
// NotAllowedDefaultRepoUnits contains units that can't be default
NotAllowedDefaultRepoUnits = []Type{
TypeExternalWiki,
@ -106,26 +116,41 @@ var (
DisabledRepoUnits = []Type{}
)
// LoadUnitConfig load units from settings
func LoadUnitConfig() {
setDefaultRepoUnits := FindUnitTypes(setting.Repository.DefaultRepoUnits...)
// Default repo units set if setting is not empty
if len(setDefaultRepoUnits) > 0 {
// Get valid set of default repository units from settings
func validateDefaultRepoUnits(defaultUnits, settingDefaultUnits []Type) []Type {
units := defaultUnits
// Use setting if not empty
if len(settingDefaultUnits) > 0 {
// MustRepoUnits required as default
DefaultRepoUnits = make([]Type, len(MustRepoUnits))
copy(DefaultRepoUnits, MustRepoUnits)
for _, defaultU := range setDefaultRepoUnits {
if !defaultU.CanBeDefault() {
log.Warn("Not allowed as default unit: %s", defaultU.String())
units = make([]Type, len(MustRepoUnits))
copy(units, MustRepoUnits)
for _, settingUnit := range settingDefaultUnits {
if !settingUnit.CanBeDefault() {
log.Warn("Not allowed as default unit: %s", settingUnit.String())
continue
}
// MustRepoUnits already added
if defaultU.CanDisable() {
DefaultRepoUnits = append(DefaultRepoUnits, defaultU)
if settingUnit.CanDisable() {
units = append(units, settingUnit)
}
}
}
// Remove disabled units
for _, disabledUnit := range DisabledRepoUnits {
for i, unit := range units {
if unit == disabledUnit {
units = append(units[:i], units[i+1:]...)
}
}
}
return units
}
// LoadUnitConfig load units from settings
func LoadUnitConfig() {
DisabledRepoUnits = FindUnitTypes(setting.Repository.DisabledRepoUnits...)
// Check that must units are not disabled
for i, disabledU := range DisabledRepoUnits {
@ -134,14 +159,11 @@ func LoadUnitConfig() {
DisabledRepoUnits = append(DisabledRepoUnits[:i], DisabledRepoUnits[i+1:]...)
}
}
// Remove disabled units from default units
for _, disabledU := range DisabledRepoUnits {
for i, defaultU := range DefaultRepoUnits {
if defaultU == disabledU {
DefaultRepoUnits = append(DefaultRepoUnits[:i], DefaultRepoUnits[i+1:]...)
}
}
}
setDefaultRepoUnits := FindUnitTypes(setting.Repository.DefaultRepoUnits...)
DefaultRepoUnits = validateDefaultRepoUnits(DefaultRepoUnits, setDefaultRepoUnits)
setDefaultForkRepoUnits := FindUnitTypes(setting.Repository.DefaultForkRepoUnits...)
DefaultForkRepoUnits = validateDefaultRepoUnits(DefaultForkRepoUnits, setDefaultForkRepoUnits)
}
// UnitGlobalDisabled checks if unit type is global disabled
@ -288,6 +310,15 @@ var (
perm.AccessModeRead,
}
UnitActions = Unit{
TypeActions,
"actions.actions",
"/actions",
"actions.unit.desc",
7,
perm.AccessModeOwner,
}
// Units contains all the units
Units = map[Type]Unit{
TypeCode: UnitCode,
@ -299,6 +330,7 @@ var (
TypeExternalWiki: UnitExternalWiki,
TypeProjects: UnitProjects,
TypePackages: UnitPackages,
TypeActions: UnitActions,
}
)

View file

@ -104,6 +104,8 @@ func MainTest(m *testing.M, testOpts *TestOptions) {
setting.Packages.Storage.Path = filepath.Join(setting.AppDataPath, "packages")
setting.Actions.Storage.Path = filepath.Join(setting.AppDataPath, "actions_log")
setting.Git.HomePath = filepath.Join(setting.AppDataPath, "home")
setting.IncomingEmail.ReplyToAddress = "incoming+%{token}@localhost"

View file

@ -560,32 +560,6 @@ func GetUserSalt() (string, error) {
return hex.EncodeToString(rBytes), nil
}
// NewGhostUser creates and returns a fake user for someone has deleted their account.
func NewGhostUser() *User {
return &User{
ID: -1,
Name: "Ghost",
LowerName: "ghost",
}
}
// NewReplaceUser creates and returns a fake user for external user
func NewReplaceUser(name string) *User {
return &User{
ID: -1,
Name: name,
LowerName: strings.ToLower(name),
}
}
// IsGhost check if user is fake user for a deleted account
func (u *User) IsGhost() bool {
if u == nil {
return false
}
return u.ID == -1 && u.Name == "Ghost"
}
var (
reservedUsernames = []string{
".",
@ -623,6 +597,7 @@ var (
"swagger.v1.json",
"user",
"v2",
"gitea-actions",
}
reservedUserPatterns = []string{"*.keys", "*.gpg", "*.rss", "*.atom"}
@ -1014,6 +989,20 @@ func GetUserByID(ctx context.Context, id int64) (*User, error) {
return u, nil
}
// GetPossibleUserByID returns the user if id > 0 or return system usrs if id < 0
func GetPossibleUserByID(ctx context.Context, id int64) (*User, error) {
switch id {
case -1:
return NewGhostUser(), nil
case ActionsUserID:
return NewActionsUser(), nil
case 0:
return nil, ErrUserNotExist{}
default:
return GetUserByID(ctx, id)
}
}
// GetUserByNameCtx returns user by given name.
func GetUserByName(ctx context.Context, name string) (*User, error) {
if len(name) == 0 {

View file

@ -0,0 +1,64 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package user
import (
"strings"
"code.gitea.io/gitea/modules/structs"
)
// NewGhostUser creates and returns a fake user for someone has deleted their account.
func NewGhostUser() *User {
return &User{
ID: -1,
Name: "Ghost",
LowerName: "ghost",
}
}
// IsGhost check if user is fake user for a deleted account
func (u *User) IsGhost() bool {
if u == nil {
return false
}
return u.ID == -1 && u.Name == "Ghost"
}
// NewReplaceUser creates and returns a fake user for external user
func NewReplaceUser(name string) *User {
return &User{
ID: -1,
Name: name,
LowerName: strings.ToLower(name),
}
}
const (
ActionsUserID = -2
ActionsUserName = "gitea-actions"
ActionsFullName = "Gitea Actions"
ActionsEmail = "teabot@gitea.io"
)
// NewActionsUser creates and returns a fake user for running the actions.
func NewActionsUser() *User {
return &User{
ID: ActionsUserID,
Name: ActionsUserName,
LowerName: ActionsUserName,
IsActive: true,
FullName: ActionsFullName,
Email: ActionsEmail,
KeepEmailPrivate: true,
LoginName: ActionsUserName,
Type: UserTypeIndividual,
AllowCreateOrganization: true,
Visibility: structs.VisibleTypePublic,
}
}
func (u *User) IsActions() bool {
return u != nil && u.ID == ActionsUserID
}

View file

@ -463,41 +463,6 @@ func CountWebhooksByOpts(opts *ListWebhookOptions) (int64, error) {
return db.GetEngine(db.DefaultContext).Where(opts.toCond()).Count(&Webhook{})
}
// GetDefaultWebhooks returns all admin-default webhooks.
func GetDefaultWebhooks(ctx context.Context) ([]*Webhook, error) {
webhooks := make([]*Webhook, 0, 5)
return webhooks, db.GetEngine(ctx).
Where("repo_id=? AND org_id=? AND is_system_webhook=?", 0, 0, false).
Find(&webhooks)
}
// GetSystemOrDefaultWebhook returns admin system or default webhook by given ID.
func GetSystemOrDefaultWebhook(id int64) (*Webhook, error) {
webhook := &Webhook{ID: id}
has, err := db.GetEngine(db.DefaultContext).
Where("repo_id=? AND org_id=?", 0, 0).
Get(webhook)
if err != nil {
return nil, err
} else if !has {
return nil, ErrWebhookNotExist{ID: id}
}
return webhook, nil
}
// GetSystemWebhooks returns all admin system webhooks.
func GetSystemWebhooks(ctx context.Context, isActive util.OptionalBool) ([]*Webhook, error) {
webhooks := make([]*Webhook, 0, 5)
if isActive.IsNone() {
return webhooks, db.GetEngine(ctx).
Where("repo_id=? AND org_id=? AND is_system_webhook=?", 0, 0, true).
Find(&webhooks)
}
return webhooks, db.GetEngine(ctx).
Where("repo_id=? AND org_id=? AND is_system_webhook=? AND is_active = ?", 0, 0, true, isActive.IsTrue()).
Find(&webhooks)
}
// UpdateWebhook updates information of webhook.
func UpdateWebhook(w *Webhook) error {
_, err := db.GetEngine(db.DefaultContext).ID(w.ID).AllCols().Update(w)
@ -545,44 +510,3 @@ func DeleteWebhookByOrgID(orgID, id int64) error {
OrgID: orgID,
})
}
// DeleteDefaultSystemWebhook deletes an admin-configured default or system webhook (where Org and Repo ID both 0)
func DeleteDefaultSystemWebhook(id int64) error {
ctx, committer, err := db.TxContext(db.DefaultContext)
if err != nil {
return err
}
defer committer.Close()
count, err := db.GetEngine(ctx).
Where("repo_id=? AND org_id=?", 0, 0).
Delete(&Webhook{ID: id})
if err != nil {
return err
} else if count == 0 {
return ErrWebhookNotExist{ID: id}
}
if _, err := db.DeleteByBean(ctx, &HookTask{HookID: id}); err != nil {
return err
}
return committer.Commit()
}
// CopyDefaultWebhooksToRepo creates copies of the default webhooks in a new repo
func CopyDefaultWebhooksToRepo(ctx context.Context, repoID int64) error {
ws, err := GetDefaultWebhooks(ctx)
if err != nil {
return fmt.Errorf("GetDefaultWebhooks: %w", err)
}
for _, w := range ws {
w.ID = 0
w.RepoID = repoID
if err := CreateWebhook(ctx, w); err != nil {
return fmt.Errorf("CreateWebhook: %w", err)
}
}
return nil
}

View file

@ -0,0 +1,81 @@
// Copyright 2021 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package webhook
import (
"context"
"fmt"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/util"
)
// GetDefaultWebhooks returns all admin-default webhooks.
func GetDefaultWebhooks(ctx context.Context) ([]*Webhook, error) {
webhooks := make([]*Webhook, 0, 5)
return webhooks, db.GetEngine(ctx).
Where("repo_id=? AND org_id=? AND is_system_webhook=?", 0, 0, false).
Find(&webhooks)
}
// GetSystemOrDefaultWebhook returns admin system or default webhook by given ID.
func GetSystemOrDefaultWebhook(ctx context.Context, id int64) (*Webhook, error) {
webhook := &Webhook{ID: id}
has, err := db.GetEngine(ctx).
Where("repo_id=? AND org_id=?", 0, 0).
Get(webhook)
if err != nil {
return nil, err
} else if !has {
return nil, ErrWebhookNotExist{ID: id}
}
return webhook, nil
}
// GetSystemWebhooks returns all admin system webhooks.
func GetSystemWebhooks(ctx context.Context, isActive util.OptionalBool) ([]*Webhook, error) {
webhooks := make([]*Webhook, 0, 5)
if isActive.IsNone() {
return webhooks, db.GetEngine(ctx).
Where("repo_id=? AND org_id=? AND is_system_webhook=?", 0, 0, true).
Find(&webhooks)
}
return webhooks, db.GetEngine(ctx).
Where("repo_id=? AND org_id=? AND is_system_webhook=? AND is_active = ?", 0, 0, true, isActive.IsTrue()).
Find(&webhooks)
}
// DeleteDefaultSystemWebhook deletes an admin-configured default or system webhook (where Org and Repo ID both 0)
func DeleteDefaultSystemWebhook(ctx context.Context, id int64) error {
return db.WithTx(ctx, func(ctx context.Context) error {
count, err := db.GetEngine(ctx).
Where("repo_id=? AND org_id=?", 0, 0).
Delete(&Webhook{ID: id})
if err != nil {
return err
} else if count == 0 {
return ErrWebhookNotExist{ID: id}
}
_, err = db.DeleteByBean(ctx, &HookTask{HookID: id})
return err
})
}
// CopyDefaultWebhooksToRepo creates copies of the default webhooks in a new repo
func CopyDefaultWebhooksToRepo(ctx context.Context, repoID int64) error {
ws, err := GetDefaultWebhooks(ctx)
if err != nil {
return fmt.Errorf("GetDefaultWebhooks: %v", err)
}
for _, w := range ws {
w.ID = 0
w.RepoID = repoID
if err := CreateWebhook(ctx, w); err != nil {
return fmt.Errorf("CreateWebhook: %v", err)
}
}
return nil
}

163
modules/actions/log.go Normal file
View file

@ -0,0 +1,163 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"bufio"
"context"
"fmt"
"io"
"os"
"strings"
"time"
"code.gitea.io/gitea/models/dbfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/storage"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
MaxLineSize = 64 * 1024
DBFSPrefix = "actions_log/"
timeFormat = "2006-01-02T15:04:05.0000000Z07:00"
defaultBufSize = MaxLineSize
)
func WriteLogs(ctx context.Context, filename string, offset int64, rows []*runnerv1.LogRow) ([]int, error) {
name := DBFSPrefix + filename
f, err := dbfs.OpenFile(ctx, name, os.O_WRONLY|os.O_CREATE)
if err != nil {
return nil, fmt.Errorf("dbfs OpenFile %q: %w", name, err)
}
defer f.Close()
if _, err := f.Seek(offset, io.SeekStart); err != nil {
return nil, fmt.Errorf("dbfs Seek %q: %w", name, err)
}
writer := bufio.NewWriterSize(f, defaultBufSize)
ns := make([]int, 0, len(rows))
for _, row := range rows {
n, err := writer.WriteString(FormatLog(row.Time.AsTime(), row.Content) + "\n")
if err != nil {
return nil, err
}
ns = append(ns, n)
}
if err := writer.Flush(); err != nil {
return nil, err
}
return ns, nil
}
func ReadLogs(ctx context.Context, inStorage bool, filename string, offset, limit int64) ([]*runnerv1.LogRow, error) {
f, err := openLogs(ctx, inStorage, filename)
if err != nil {
return nil, err
}
defer f.Close()
if _, err := f.Seek(offset, io.SeekStart); err != nil {
return nil, fmt.Errorf("file seek: %w", err)
}
scanner := bufio.NewScanner(f)
maxLineSize := len(timeFormat) + MaxLineSize + 1
scanner.Buffer(make([]byte, maxLineSize), maxLineSize)
var rows []*runnerv1.LogRow
for scanner.Scan() && (int64(len(rows)) < limit || limit < 0) {
t, c, err := ParseLog(scanner.Text())
if err != nil {
return nil, fmt.Errorf("parse log %q: %w", scanner.Text(), err)
}
rows = append(rows, &runnerv1.LogRow{
Time: timestamppb.New(t),
Content: c,
})
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("scan: %w", err)
}
return rows, nil
}
func TransferLogs(ctx context.Context, filename string) (func(), error) {
name := DBFSPrefix + filename
remove := func() {
if err := dbfs.Remove(ctx, name); err != nil {
log.Warn("dbfs remove %q: %v", name, err)
}
}
f, err := dbfs.Open(ctx, name)
if err != nil {
return nil, fmt.Errorf("dbfs open %q: %w", name, err)
}
defer f.Close()
if _, err := storage.Actions.Save(filename, f, -1); err != nil {
return nil, fmt.Errorf("storage save %q: %w", filename, err)
}
return remove, nil
}
func RemoveLogs(ctx context.Context, inStorage bool, filename string) error {
if !inStorage {
name := DBFSPrefix + filename
err := dbfs.Remove(ctx, name)
if err != nil {
return fmt.Errorf("dbfs remove %q: %w", name, err)
}
return nil
}
err := storage.Actions.Delete(filename)
if err != nil {
return fmt.Errorf("storage delete %q: %w", filename, err)
}
return nil
}
func openLogs(ctx context.Context, inStorage bool, filename string) (io.ReadSeekCloser, error) {
if !inStorage {
name := DBFSPrefix + filename
f, err := dbfs.Open(ctx, name)
if err != nil {
return nil, fmt.Errorf("dbfs open %q: %w", name, err)
}
return f, nil
}
f, err := storage.Actions.Open(filename)
if err != nil {
return nil, fmt.Errorf("storage open %q: %w", filename, err)
}
return f, nil
}
func FormatLog(timestamp time.Time, content string) string {
// Content shouldn't contain new line, it will break log indexes, other control chars are safe.
content = strings.ReplaceAll(content, "\n", `\n`)
if len(content) > MaxLineSize {
content = content[:MaxLineSize]
}
return fmt.Sprintf("%s %s", timestamp.UTC().Format(timeFormat), content)
}
func ParseLog(in string) (time.Time, string, error) {
index := strings.IndexRune(in, ' ')
if index < 0 {
return time.Time{}, "", fmt.Errorf("invalid log: %q", in)
}
timestamp, err := time.Parse(timeFormat, in[:index])
if err != nil {
return time.Time{}, "", err
}
return timestamp, in[index+1:], nil
}

View file

@ -0,0 +1,101 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
actions_model "code.gitea.io/gitea/models/actions"
)
const (
preStepName = "Set up job"
postStepName = "Complete job"
)
// FullSteps returns steps with "Set up job" and "Complete job"
func FullSteps(task *actions_model.ActionTask) []*actions_model.ActionTaskStep {
if len(task.Steps) == 0 {
return fullStepsOfEmptySteps(task)
}
firstStep := task.Steps[0]
var logIndex int64
preStep := &actions_model.ActionTaskStep{
Name: preStepName,
LogLength: task.LogLength,
Started: task.Started,
Status: actions_model.StatusRunning,
}
if firstStep.Status.HasRun() || firstStep.Status.IsRunning() {
preStep.LogLength = firstStep.LogIndex
preStep.Stopped = firstStep.Started
preStep.Status = actions_model.StatusSuccess
} else if task.Status.IsDone() {
preStep.Stopped = task.Stopped
preStep.Status = actions_model.StatusFailure
}
logIndex += preStep.LogLength
var lastHasRunStep *actions_model.ActionTaskStep
for _, step := range task.Steps {
if step.Status.HasRun() {
lastHasRunStep = step
}
logIndex += step.LogLength
}
if lastHasRunStep == nil {
lastHasRunStep = preStep
}
postStep := &actions_model.ActionTaskStep{
Name: postStepName,
Status: actions_model.StatusWaiting,
}
if task.Status.IsDone() {
postStep.LogIndex = logIndex
postStep.LogLength = task.LogLength - postStep.LogIndex
postStep.Status = task.Status
postStep.Started = lastHasRunStep.Stopped
postStep.Stopped = task.Stopped
}
ret := make([]*actions_model.ActionTaskStep, 0, len(task.Steps)+2)
ret = append(ret, preStep)
ret = append(ret, task.Steps...)
ret = append(ret, postStep)
return ret
}
func fullStepsOfEmptySteps(task *actions_model.ActionTask) []*actions_model.ActionTaskStep {
preStep := &actions_model.ActionTaskStep{
Name: preStepName,
LogLength: task.LogLength,
Started: task.Started,
Stopped: task.Stopped,
Status: actions_model.StatusRunning,
}
postStep := &actions_model.ActionTaskStep{
Name: postStepName,
LogIndex: task.LogLength,
Started: task.Stopped,
Stopped: task.Stopped,
Status: actions_model.StatusWaiting,
}
if task.Status.IsDone() {
preStep.Status = task.Status
if preStep.Status.IsSuccess() {
postStep.Status = actions_model.StatusSuccess
} else {
postStep.Status = actions_model.StatusCancelled
}
}
return []*actions_model.ActionTaskStep{
preStep,
postStep,
}
}

View file

@ -0,0 +1,112 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"testing"
actions_model "code.gitea.io/gitea/models/actions"
"github.com/stretchr/testify/assert"
)
func TestFullSteps(t *testing.T) {
tests := []struct {
name string
task *actions_model.ActionTask
want []*actions_model.ActionTaskStep
}{
{
name: "regular",
task: &actions_model.ActionTask{
Steps: []*actions_model.ActionTaskStep{
{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 10090},
},
Status: actions_model.StatusSuccess,
Started: 10000,
Stopped: 10100,
LogLength: 100,
},
want: []*actions_model.ActionTaskStep{
{Name: preStepName, Status: actions_model.StatusSuccess, LogIndex: 0, LogLength: 10, Started: 10000, Stopped: 10010},
{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 10090},
{Name: postStepName, Status: actions_model.StatusSuccess, LogIndex: 90, LogLength: 10, Started: 10090, Stopped: 10100},
},
},
{
name: "failed step",
task: &actions_model.ActionTask{
Steps: []*actions_model.ActionTaskStep{
{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 20, Started: 10010, Stopped: 10020},
{Status: actions_model.StatusFailure, LogIndex: 30, LogLength: 60, Started: 10020, Stopped: 10090},
{Status: actions_model.StatusCancelled, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
},
Status: actions_model.StatusFailure,
Started: 10000,
Stopped: 10100,
LogLength: 100,
},
want: []*actions_model.ActionTaskStep{
{Name: preStepName, Status: actions_model.StatusSuccess, LogIndex: 0, LogLength: 10, Started: 10000, Stopped: 10010},
{Status: actions_model.StatusSuccess, LogIndex: 10, LogLength: 20, Started: 10010, Stopped: 10020},
{Status: actions_model.StatusFailure, LogIndex: 30, LogLength: 60, Started: 10020, Stopped: 10090},
{Status: actions_model.StatusCancelled, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
{Name: postStepName, Status: actions_model.StatusFailure, LogIndex: 90, LogLength: 10, Started: 10090, Stopped: 10100},
},
},
{
name: "first step is running",
task: &actions_model.ActionTask{
Steps: []*actions_model.ActionTaskStep{
{Status: actions_model.StatusRunning, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 0},
},
Status: actions_model.StatusRunning,
Started: 10000,
Stopped: 10100,
LogLength: 100,
},
want: []*actions_model.ActionTaskStep{
{Name: preStepName, Status: actions_model.StatusSuccess, LogIndex: 0, LogLength: 10, Started: 10000, Stopped: 10010},
{Status: actions_model.StatusRunning, LogIndex: 10, LogLength: 80, Started: 10010, Stopped: 0},
{Name: postStepName, Status: actions_model.StatusWaiting, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
},
},
{
name: "first step has canceled",
task: &actions_model.ActionTask{
Steps: []*actions_model.ActionTaskStep{
{Status: actions_model.StatusCancelled, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
},
Status: actions_model.StatusFailure,
Started: 10000,
Stopped: 10100,
LogLength: 100,
},
want: []*actions_model.ActionTaskStep{
{Name: preStepName, Status: actions_model.StatusFailure, LogIndex: 0, LogLength: 100, Started: 10000, Stopped: 10100},
{Status: actions_model.StatusCancelled, LogIndex: 0, LogLength: 0, Started: 0, Stopped: 0},
{Name: postStepName, Status: actions_model.StatusFailure, LogIndex: 100, LogLength: 0, Started: 10100, Stopped: 10100},
},
},
{
name: "empty steps",
task: &actions_model.ActionTask{
Steps: []*actions_model.ActionTaskStep{},
Status: actions_model.StatusSuccess,
Started: 10000,
Stopped: 10100,
LogLength: 100,
},
want: []*actions_model.ActionTaskStep{
{Name: preStepName, Status: actions_model.StatusSuccess, LogIndex: 0, LogLength: 100, Started: 10000, Stopped: 10100},
{Name: postStepName, Status: actions_model.StatusSuccess, LogIndex: 100, LogLength: 0, Started: 10100, Stopped: 10100},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, FullSteps(tt.task), "FullSteps(%v)", tt.task)
})
}
}

View file

@ -0,0 +1,222 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package actions
import (
"bytes"
"io"
"strings"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
api "code.gitea.io/gitea/modules/structs"
webhook_module "code.gitea.io/gitea/modules/webhook"
"github.com/gobwas/glob"
"github.com/nektos/act/pkg/jobparser"
"github.com/nektos/act/pkg/model"
)
func ListWorkflows(commit *git.Commit) (git.Entries, error) {
tree, err := commit.SubTree(".gitea/workflows")
if _, ok := err.(git.ErrNotExist); ok {
tree, err = commit.SubTree(".github/workflows")
}
if _, ok := err.(git.ErrNotExist); ok {
return nil, nil
}
if err != nil {
return nil, err
}
entries, err := tree.ListEntriesRecursiveFast()
if err != nil {
return nil, err
}
ret := make(git.Entries, 0, len(entries))
for _, entry := range entries {
if strings.HasSuffix(entry.Name(), ".yml") || strings.HasSuffix(entry.Name(), ".yaml") {
ret = append(ret, entry)
}
}
return ret, nil
}
func DetectWorkflows(commit *git.Commit, triggedEvent webhook_module.HookEventType, payload api.Payloader) (map[string][]byte, error) {
entries, err := ListWorkflows(commit)
if err != nil {
return nil, err
}
workflows := make(map[string][]byte, len(entries))
for _, entry := range entries {
f, err := entry.Blob().DataAsync()
if err != nil {
return nil, err
}
content, err := io.ReadAll(f)
_ = f.Close()
if err != nil {
return nil, err
}
workflow, err := model.ReadWorkflow(bytes.NewReader(content))
if err != nil {
log.Warn("ignore invalid workflow %q: %v", entry.Name(), err)
continue
}
events, err := jobparser.ParseRawOn(&workflow.RawOn)
if err != nil {
log.Warn("ignore invalid workflow %q: %v", entry.Name(), err)
continue
}
for _, evt := range events {
if evt.Name != triggedEvent.Event() {
continue
}
if detectMatched(commit, triggedEvent, payload, evt) {
workflows[entry.Name()] = content
}
}
}
return workflows, nil
}
func detectMatched(commit *git.Commit, triggedEvent webhook_module.HookEventType, payload api.Payloader, evt *jobparser.Event) bool {
if len(evt.Acts) == 0 {
return true
}
switch triggedEvent {
case webhook_module.HookEventCreate:
fallthrough
case webhook_module.HookEventDelete:
fallthrough
case webhook_module.HookEventFork:
log.Warn("unsupported event %q", triggedEvent.Event())
return false
case webhook_module.HookEventPush:
pushPayload := payload.(*api.PushPayload)
matchTimes := 0
// all acts conditions should be satisfied
for cond, vals := range evt.Acts {
switch cond {
case "branches", "tags":
refShortName := git.RefName(pushPayload.Ref).ShortName()
for _, val := range vals {
if glob.MustCompile(val, '/').Match(refShortName) {
matchTimes++
break
}
}
case "paths":
filesChanged, err := commit.GetFilesChangedSinceCommit(pushPayload.Before)
if err != nil {
log.Error("GetFilesChangedSinceCommit [commit_sha1: %s]: %v", commit.ID.String(), err)
} else {
for _, val := range vals {
matched := false
for _, file := range filesChanged {
if glob.MustCompile(val, '/').Match(file) {
matched = true
break
}
}
if matched {
matchTimes++
break
}
}
}
default:
log.Warn("unsupported condition %q", cond)
}
}
return matchTimes == len(evt.Acts)
case webhook_module.HookEventIssues:
fallthrough
case webhook_module.HookEventIssueAssign:
fallthrough
case webhook_module.HookEventIssueLabel:
fallthrough
case webhook_module.HookEventIssueMilestone:
fallthrough
case webhook_module.HookEventIssueComment:
fallthrough
case webhook_module.HookEventPullRequest:
prPayload := payload.(*api.PullRequestPayload)
matchTimes := 0
// all acts conditions should be satisfied
for cond, vals := range evt.Acts {
switch cond {
case "types":
for _, val := range vals {
if glob.MustCompile(val, '/').Match(string(prPayload.Action)) {
matchTimes++
break
}
}
case "branches":
refShortName := git.RefName(prPayload.PullRequest.Base.Ref).ShortName()
for _, val := range vals {
if glob.MustCompile(val, '/').Match(refShortName) {
matchTimes++
break
}
}
case "paths":
filesChanged, err := commit.GetFilesChangedSinceCommit(prPayload.PullRequest.Base.Ref)
if err != nil {
log.Error("GetFilesChangedSinceCommit [commit_sha1: %s]: %v", commit.ID.String(), err)
} else {
for _, val := range vals {
matched := false
for _, file := range filesChanged {
if glob.MustCompile(val, '/').Match(file) {
matched = true
break
}
}
if matched {
matchTimes++
break
}
}
}
default:
log.Warn("unsupported condition %q", cond)
}
}
return matchTimes == len(evt.Acts)
case webhook_module.HookEventPullRequestAssign:
fallthrough
case webhook_module.HookEventPullRequestLabel:
fallthrough
case webhook_module.HookEventPullRequestMilestone:
fallthrough
case webhook_module.HookEventPullRequestComment:
fallthrough
case webhook_module.HookEventPullRequestReviewApproved:
fallthrough
case webhook_module.HookEventPullRequestReviewRejected:
fallthrough
case webhook_module.HookEventPullRequestReviewComment:
fallthrough
case webhook_module.HookEventPullRequestSync:
fallthrough
case webhook_module.HookEventWiki:
fallthrough
case webhook_module.HookEventRepository:
fallthrough
case webhook_module.HookEventRelease:
fallthrough
case webhook_module.HookEventPackage:
fallthrough
default:
log.Warn("unsupported event %q", triggedEvent.Event())
}
return false
}

View file

@ -28,7 +28,7 @@ func Init() {
Config: &webauthn.Config{
RPDisplayName: setting.AppName,
RPID: setting.Domain,
RPOrigin: appURL,
RPOrigins: []string{appURL},
AuthenticatorSelection: protocol.AuthenticatorSelection{
UserVerification: "discouraged",
},

View file

@ -15,11 +15,11 @@ func TestInit(t *testing.T) {
setting.Domain = "domain"
setting.AppName = "AppName"
setting.AppURL = "https://domain/"
rpOrigin := "https://domain"
rpOrigin := []string{"https://domain"}
Init()
assert.Equal(t, setting.Domain, WebAuthn.Config.RPID)
assert.Equal(t, setting.AppName, WebAuthn.Config.RPDisplayName)
assert.Equal(t, rpOrigin, WebAuthn.Config.RPOrigin)
assert.Equal(t, rpOrigin, WebAuthn.Config.RPOrigins)
}

View file

@ -44,7 +44,7 @@ func EscapeControlReader(reader io.Reader, writer io.Writer, locale translation.
return streamer.escaped, err
}
// EscapeControlStringReader escapes the unicode control sequences in a provided reader of string content and writer in a locale and returns the findings as an EscapeStatus and the escaped []byte
// EscapeControlStringReader escapes the unicode control sequences in a provided reader of string content and writer in a locale and returns the findings as an EscapeStatus and the escaped []byte. HTML line breaks are not inserted after every newline by this method.
func EscapeControlStringReader(reader io.Reader, writer io.Writer, locale translation.Locale, allowed ...rune) (escaped *EscapeStatus, err error) {
bufRd := bufio.NewReader(reader)
outputStream := &HTMLStreamerWriter{Writer: writer}
@ -65,10 +65,6 @@ func EscapeControlStringReader(reader io.Reader, writer io.Writer, locale transl
}
break
}
if err := streamer.SelfClosingTag("br"); err != nil {
streamer.escaped.HasError = true
return streamer.escaped, err
}
}
return streamer.escaped, err
}

View file

@ -805,6 +805,7 @@ func Contexter(ctx context.Context) func(next http.Handler) http.Handler {
ctx.Data["EnableOpenIDSignIn"] = setting.Service.EnableOpenIDSignIn
ctx.Data["DisableMigrations"] = setting.Repository.DisableMigrations
ctx.Data["DisableStars"] = setting.Repository.DisableStars
ctx.Data["EnableActions"] = setting.Actions.Enabled
ctx.Data["ManifestData"] = setting.ManifestData
@ -812,6 +813,7 @@ func Contexter(ctx context.Context) func(next http.Handler) http.Handler {
ctx.Data["UnitIssuesGlobalDisabled"] = unit.TypeIssues.UnitGlobalDisabled()
ctx.Data["UnitPullsGlobalDisabled"] = unit.TypePullRequests.UnitGlobalDisabled()
ctx.Data["UnitProjectsGlobalDisabled"] = unit.TypeProjects.UnitGlobalDisabled()
ctx.Data["UnitActionsGlobalDisabled"] = unit.TypeActions.UnitGlobalDisabled()
ctx.Data["locale"] = locale
ctx.Data["AllLangs"] = translation.AllLangs()

View file

@ -1043,6 +1043,7 @@ func UnitTypes() func(ctx *Context) {
ctx.Data["UnitTypeExternalTracker"] = unit_model.TypeExternalTracker
ctx.Data["UnitTypeProjects"] = unit_model.TypeProjects
ctx.Data["UnitTypePackages"] = unit_model.TypePackages
ctx.Data["UnitTypeActions"] = unit_model.TypeActions
}
}

View file

@ -16,14 +16,20 @@ import (
"time"
"unsafe"
"code.gitea.io/gitea/modules/git/internal" //nolint:depguard // only this file can use the internal type CmdArg, other files and packages should use AddXxx functions
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/util"
)
// TrustedCmdArgs returns the trusted arguments for git command.
// It's mainly for passing user-provided and trusted arguments to git command
// In most cases, it shouldn't be used. Use AddXxx function instead
type TrustedCmdArgs []internal.CmdArg
var (
// globalCommandArgs global command args for external package setting
globalCommandArgs []CmdArg
globalCommandArgs TrustedCmdArgs
// defaultCommandExecutionTimeout default command execution timeout duration
defaultCommandExecutionTimeout = 360 * time.Second
@ -42,8 +48,6 @@ type Command struct {
brokenArgs []string
}
type CmdArg string
func (c *Command) String() string {
if len(c.args) == 0 {
return c.name
@ -53,7 +57,7 @@ func (c *Command) String() string {
// NewCommand creates and returns a new Git Command based on given command and arguments.
// Each argument should be safe to be trusted. User-provided arguments should be passed to AddDynamicArguments instead.
func NewCommand(ctx context.Context, args ...CmdArg) *Command {
func NewCommand(ctx context.Context, args ...internal.CmdArg) *Command {
// Make an explicit copy of globalCommandArgs, otherwise append might overwrite it
cargs := make([]string, 0, len(globalCommandArgs)+len(args))
for _, arg := range globalCommandArgs {
@ -70,15 +74,9 @@ func NewCommand(ctx context.Context, args ...CmdArg) *Command {
}
}
// NewCommandNoGlobals creates and returns a new Git Command based on given command and arguments only with the specify args and don't care global command args
// Each argument should be safe to be trusted. User-provided arguments should be passed to AddDynamicArguments instead.
func NewCommandNoGlobals(args ...CmdArg) *Command {
return NewCommandContextNoGlobals(DefaultContext, args...)
}
// NewCommandContextNoGlobals creates and returns a new Git Command based on given command and arguments only with the specify args and don't care global command args
// Each argument should be safe to be trusted. User-provided arguments should be passed to AddDynamicArguments instead.
func NewCommandContextNoGlobals(ctx context.Context, args ...CmdArg) *Command {
func NewCommandContextNoGlobals(ctx context.Context, args ...internal.CmdArg) *Command {
cargs := make([]string, 0, len(args))
for _, arg := range args {
cargs = append(cargs, string(arg))
@ -96,27 +94,70 @@ func (c *Command) SetParentContext(ctx context.Context) *Command {
return c
}
// SetDescription sets the description for this command which be returned on
// c.String()
// SetDescription sets the description for this command which be returned on c.String()
func (c *Command) SetDescription(desc string) *Command {
c.desc = desc
return c
}
// AddArguments adds new git argument(s) to the command. Each argument must be safe to be trusted.
// User-provided arguments should be passed to AddDynamicArguments instead.
func (c *Command) AddArguments(args ...CmdArg) *Command {
// isSafeArgumentValue checks if the argument is safe to be used as a value (not an option)
func isSafeArgumentValue(s string) bool {
return s == "" || s[0] != '-'
}
// isValidArgumentOption checks if the argument is a valid option (starting with '-').
// It doesn't check whether the option is supported or not
func isValidArgumentOption(s string) bool {
return s != "" && s[0] == '-'
}
// AddArguments adds new git arguments (option/value) to the command. It only accepts string literals, or trusted CmdArg.
// Type CmdArg is in the internal package, so it can not be used outside of this package directly,
// it makes sure that user-provided arguments won't cause RCE risks.
// User-provided arguments should be passed by other AddXxx functions
func (c *Command) AddArguments(args ...internal.CmdArg) *Command {
for _, arg := range args {
c.args = append(c.args, string(arg))
}
return c
}
// AddDynamicArguments adds new dynamic argument(s) to the command.
// The arguments may come from user input and can not be trusted, so no leading '-' is allowed to avoid passing options
// AddOptionValues adds a new option with a list of non-option values
// For example: AddOptionValues("--opt", val) means 2 arguments: {"--opt", val}.
// The values are treated as dynamic argument values. It equals to: AddArguments("--opt") then AddDynamicArguments(val).
func (c *Command) AddOptionValues(opt internal.CmdArg, args ...string) *Command {
if !isValidArgumentOption(string(opt)) {
c.brokenArgs = append(c.brokenArgs, string(opt))
return c
}
c.args = append(c.args, string(opt))
c.AddDynamicArguments(args...)
return c
}
// AddOptionFormat adds a new option with a format string and arguments
// For example: AddOptionFormat("--opt=%s %s", val1, val2) means 1 argument: {"--opt=val1 val2"}.
func (c *Command) AddOptionFormat(opt string, args ...any) *Command {
if !isValidArgumentOption(opt) {
c.brokenArgs = append(c.brokenArgs, opt)
return c
}
// a quick check to make sure the format string matches the number of arguments, to find low-level mistakes ASAP
if strings.Count(strings.ReplaceAll(opt, "%%", ""), "%") != len(args) {
c.brokenArgs = append(c.brokenArgs, opt)
return c
}
s := fmt.Sprintf(opt, args...)
c.args = append(c.args, s)
return c
}
// AddDynamicArguments adds new dynamic argument values to the command.
// The arguments may come from user input and can not be trusted, so no leading '-' is allowed to avoid passing options.
// TODO: in the future, this function can be renamed to AddArgumentValues
func (c *Command) AddDynamicArguments(args ...string) *Command {
for _, arg := range args {
if arg != "" && arg[0] == '-' {
if !isSafeArgumentValue(arg) {
c.brokenArgs = append(c.brokenArgs, arg)
}
}
@ -137,14 +178,14 @@ func (c *Command) AddDashesAndList(list ...string) *Command {
return c
}
// CmdArgCheck checks whether the string is safe to be used as a dynamic argument.
// It panics if the check fails. Usually it should not be used, it's just for refactoring purpose
// deprecated
func CmdArgCheck(s string) CmdArg {
if s != "" && s[0] == '-' {
panic("invalid git cmd argument: " + s)
// ToTrustedCmdArgs converts a list of strings (trusted as argument) to TrustedCmdArgs
// In most cases, it shouldn't be used. Use AddXxx function instead
func ToTrustedCmdArgs(args []string) TrustedCmdArgs {
ret := make(TrustedCmdArgs, len(args))
for i, arg := range args {
ret[i] = internal.CmdArg(arg)
}
return CmdArg(s)
return ret
}
// RunOpts represents parameters to run the command. If UseContextTimeout is specified, then Timeout is ignored.
@ -364,9 +405,9 @@ func (c *Command) RunStdBytes(opts *RunOpts) (stdout, stderr []byte, runErr RunS
}
// AllowLFSFiltersArgs return globalCommandArgs with lfs filter, it should only be used for tests
func AllowLFSFiltersArgs() []CmdArg {
func AllowLFSFiltersArgs() TrustedCmdArgs {
// Now here we should explicitly allow lfs filters to run
filteredLFSGlobalArgs := make([]CmdArg, len(globalCommandArgs))
filteredLFSGlobalArgs := make(TrustedCmdArgs, len(globalCommandArgs))
j := 0
for _, arg := range globalCommandArgs {
if strings.Contains(string(arg), "lfs") {

View file

@ -41,3 +41,14 @@ func TestRunWithContextStd(t *testing.T) {
assert.Empty(t, stderr)
assert.Contains(t, stdout, "git version")
}
func TestGitArgument(t *testing.T) {
assert.True(t, isValidArgumentOption("-x"))
assert.True(t, isValidArgumentOption("--xx"))
assert.False(t, isValidArgumentOption(""))
assert.False(t, isValidArgumentOption("x"))
assert.True(t, isSafeArgumentValue(""))
assert.True(t, isSafeArgumentValue("x"))
assert.False(t, isSafeArgumentValue("-x"))
}

View file

@ -9,7 +9,6 @@ import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os/exec"
"strconv"
@ -91,8 +90,8 @@ func AddChanges(repoPath string, all bool, files ...string) error {
}
// AddChangesWithArgs marks local changes to be ready for commit.
func AddChangesWithArgs(repoPath string, globalArgs []CmdArg, all bool, files ...string) error {
cmd := NewCommandNoGlobals(append(globalArgs, "add")...)
func AddChangesWithArgs(repoPath string, globalArgs TrustedCmdArgs, all bool, files ...string) error {
cmd := NewCommandContextNoGlobals(DefaultContext, globalArgs...).AddArguments("add")
if all {
cmd.AddArguments("--all")
}
@ -111,17 +110,18 @@ type CommitChangesOptions struct {
// CommitChanges commits local changes with given committer, author and message.
// If author is nil, it will be the same as committer.
func CommitChanges(repoPath string, opts CommitChangesOptions) error {
cargs := make([]CmdArg, len(globalCommandArgs))
cargs := make(TrustedCmdArgs, len(globalCommandArgs))
copy(cargs, globalCommandArgs)
return CommitChangesWithArgs(repoPath, cargs, opts)
}
// CommitChangesWithArgs commits local changes with given committer, author and message.
// If author is nil, it will be the same as committer.
func CommitChangesWithArgs(repoPath string, args []CmdArg, opts CommitChangesOptions) error {
cmd := NewCommandNoGlobals(args...)
func CommitChangesWithArgs(repoPath string, args TrustedCmdArgs, opts CommitChangesOptions) error {
cmd := NewCommandContextNoGlobals(DefaultContext, args...)
if opts.Committer != nil {
cmd.AddArguments("-c", CmdArg("user.name="+opts.Committer.Name), "-c", CmdArg("user.email="+opts.Committer.Email))
cmd.AddOptionValues("-c", "user.name="+opts.Committer.Name)
cmd.AddOptionValues("-c", "user.email="+opts.Committer.Email)
}
cmd.AddArguments("commit")
@ -129,9 +129,9 @@ func CommitChangesWithArgs(repoPath string, args []CmdArg, opts CommitChangesOpt
opts.Author = opts.Committer
}
if opts.Author != nil {
cmd.AddArguments(CmdArg(fmt.Sprintf("--author='%s <%s>'", opts.Author.Name, opts.Author.Email)))
cmd.AddOptionFormat("--author='%s <%s>'", opts.Author.Name, opts.Author.Email)
}
cmd.AddArguments("-m").AddDynamicArguments(opts.Message)
cmd.AddOptionValues("-m", opts.Message)
_, _, err := cmd.RunStdString(&RunOpts{Dir: repoPath})
// No stderr but exit status 1 means nothing to commit.

View file

@ -383,6 +383,6 @@ func configUnsetAll(key, value string) error {
}
// Fsck verifies the connectivity and validity of the objects in the database
func Fsck(ctx context.Context, repoPath string, timeout time.Duration, args ...CmdArg) error {
func Fsck(ctx context.Context, repoPath string, timeout time.Duration, args TrustedCmdArgs) error {
return NewCommand(ctx, "fsck").AddArguments(args...).Run(&RunOpts{Timeout: timeout, Dir: repoPath})
}

View file

@ -0,0 +1,9 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package internal
// CmdArg represents a command argument for git command, and it will be used for the git command directly without any further processing.
// In most cases, you should use the "AddXxx" functions to add arguments, but not use this type directly.
// Casting a risky (user-provided) string to CmdArg would cause security issues if it's injected with a "--xxx" argument.
type CmdArg string

View file

@ -115,7 +115,7 @@ func Clone(ctx context.Context, from, to string, opts CloneRepoOptions) error {
}
// CloneWithArgs original repository to target path.
func CloneWithArgs(ctx context.Context, args []CmdArg, from, to string, opts CloneRepoOptions) (err error) {
func CloneWithArgs(ctx context.Context, args TrustedCmdArgs, from, to string, opts CloneRepoOptions) (err error) {
toDir := path.Dir(to)
if err = os.MkdirAll(toDir, os.ModePerm); err != nil {
return err

View file

@ -57,9 +57,9 @@ func (repo *Repository) CreateArchive(ctx context.Context, format ArchiveType, t
cmd := NewCommand(ctx, "archive")
if usePrefix {
cmd.AddArguments(CmdArg("--prefix=" + filepath.Base(strings.TrimSuffix(repo.Path, ".git")) + "/"))
cmd.AddOptionFormat("--prefix=%s", filepath.Base(strings.TrimSuffix(repo.Path, ".git"))+"/")
}
cmd.AddArguments(CmdArg("--format=" + format.String()))
cmd.AddOptionFormat("--format=%s", format.String())
cmd.AddDynamicArguments(commitID)
var stderr strings.Builder

View file

@ -17,7 +17,7 @@ import (
type CheckAttributeOpts struct {
CachedOnly bool
AllAttributes bool
Attributes []CmdArg
Attributes []string
Filenames []string
IndexFile string
WorkTree string
@ -48,7 +48,7 @@ func (repo *Repository) CheckAttribute(opts CheckAttributeOpts) (map[string]map[
} else {
for _, attribute := range opts.Attributes {
if attribute != "" {
cmd.AddArguments(attribute)
cmd.AddDynamicArguments(attribute)
}
}
}
@ -95,7 +95,7 @@ func (repo *Repository) CheckAttribute(opts CheckAttributeOpts) (map[string]map[
// CheckAttributeReader provides a reader for check-attribute content that can be long running
type CheckAttributeReader struct {
// params
Attributes []CmdArg
Attributes []string
Repo *Repository
IndexFile string
WorkTree string
@ -111,19 +111,6 @@ type CheckAttributeReader struct {
// Init initializes the CheckAttributeReader
func (c *CheckAttributeReader) Init(ctx context.Context) error {
cmdArgs := []CmdArg{"check-attr", "--stdin", "-z"}
if len(c.IndexFile) > 0 {
cmdArgs = append(cmdArgs, "--cached")
c.env = append(c.env, "GIT_INDEX_FILE="+c.IndexFile)
}
if len(c.WorkTree) > 0 {
c.env = append(c.env, "GIT_WORK_TREE="+c.WorkTree)
}
c.env = append(c.env, "GIT_FLUSH=1")
if len(c.Attributes) == 0 {
lw := new(nulSeparatedAttributeWriter)
lw.attributes = make(chan attributeTriple)
@ -134,11 +121,22 @@ func (c *CheckAttributeReader) Init(ctx context.Context) error {
return fmt.Errorf("no provided Attributes to check")
}
cmdArgs = append(cmdArgs, c.Attributes...)
cmdArgs = append(cmdArgs, "--")
c.ctx, c.cancel = context.WithCancel(ctx)
c.cmd = NewCommand(c.ctx, cmdArgs...)
c.cmd = NewCommand(c.ctx, "check-attr", "--stdin", "-z")
if len(c.IndexFile) > 0 {
c.cmd.AddArguments("--cached")
c.env = append(c.env, "GIT_INDEX_FILE="+c.IndexFile)
}
if len(c.WorkTree) > 0 {
c.env = append(c.env, "GIT_WORK_TREE="+c.WorkTree)
}
c.env = append(c.env, "GIT_FLUSH=1")
// The empty "--" comes from #16773 , and it seems unnecessary because nothing else would be added later.
c.cmd.AddDynamicArguments(c.Attributes...).AddArguments("--")
var err error
@ -294,7 +292,7 @@ func (repo *Repository) CheckAttributeReader(commitID string) (*CheckAttributeRe
}
checker := &CheckAttributeReader{
Attributes: []CmdArg{"linguist-vendored", "linguist-generated", "linguist-language", "gitlab-language"},
Attributes: []string{"linguist-vendored", "linguist-generated", "linguist-language", "gitlab-language"},
Repo: repo,
IndexFile: indexFilename,
WorkTree: worktree,

View file

@ -3,7 +3,9 @@
package git
import "fmt"
import (
"fmt"
)
// FileBlame return the Blame object of file
func (repo *Repository) FileBlame(revision, path, file string) ([]byte, error) {
@ -14,8 +16,8 @@ func (repo *Repository) FileBlame(revision, path, file string) ([]byte, error) {
// LineBlame returns the latest commit at the given line
func (repo *Repository) LineBlame(revision, path, file string, line uint) (*Commit, error) {
res, _, err := NewCommand(repo.Ctx, "blame").
AddArguments(CmdArg(fmt.Sprintf("-L %d,%d", line, line))).
AddArguments("-p").AddDynamicArguments(revision).
AddOptionFormat("-L %d,%d", line, line).
AddOptionValues("-p", revision).
AddDashesAndList(file).RunStdString(&RunOpts{Dir: path})
if err != nil {
return nil, err

View file

@ -50,8 +50,8 @@ func (repo *Repository) IsBranchExist(name string) bool {
return reference.Type() != plumbing.InvalidReference
}
// GetBranches returns branches from the repository, skipping skip initial branches and
// returning at most limit branches, or all branches if limit is 0.
// GetBranches returns branches from the repository, skipping "skip" initial branches and
// returning at most "limit" branches, or all branches if "limit" is 0.
func (repo *Repository) GetBranchNames(skip, limit int) ([]string, int, error) {
var branchNames []string

View file

@ -59,10 +59,10 @@ func (repo *Repository) IsBranchExist(name string) bool {
return repo.IsReferenceExist(BranchPrefix + name)
}
// GetBranchNames returns branches from the repository, skipping skip initial branches and
// returning at most limit branches, or all branches if limit is 0.
// GetBranchNames returns branches from the repository, skipping "skip" initial branches and
// returning at most "limit" branches, or all branches if "limit" is 0.
func (repo *Repository) GetBranchNames(skip, limit int) ([]string, int, error) {
return callShowRef(repo.Ctx, repo.Path, BranchPrefix, []CmdArg{BranchPrefix, "--sort=-committerdate"}, skip, limit)
return callShowRef(repo.Ctx, repo.Path, BranchPrefix, TrustedCmdArgs{BranchPrefix, "--sort=-committerdate"}, skip, limit)
}
// WalkReferences walks all the references from the repository
@ -73,19 +73,19 @@ func WalkReferences(ctx context.Context, repoPath string, walkfn func(sha1, refn
// WalkReferences walks all the references from the repository
// refType should be empty, ObjectTag or ObjectBranch. All other values are equivalent to empty.
func (repo *Repository) WalkReferences(refType ObjectType, skip, limit int, walkfn func(sha1, refname string) error) (int, error) {
var args []CmdArg
var args TrustedCmdArgs
switch refType {
case ObjectTag:
args = []CmdArg{TagPrefix, "--sort=-taggerdate"}
args = TrustedCmdArgs{TagPrefix, "--sort=-taggerdate"}
case ObjectBranch:
args = []CmdArg{BranchPrefix, "--sort=-committerdate"}
args = TrustedCmdArgs{BranchPrefix, "--sort=-committerdate"}
}
return walkShowRef(repo.Ctx, repo.Path, args, skip, limit, walkfn)
}
// callShowRef return refs, if limit = 0 it will not limit
func callShowRef(ctx context.Context, repoPath, trimPrefix string, extraArgs []CmdArg, skip, limit int) (branchNames []string, countAll int, err error) {
func callShowRef(ctx context.Context, repoPath, trimPrefix string, extraArgs TrustedCmdArgs, skip, limit int) (branchNames []string, countAll int, err error) {
countAll, err = walkShowRef(ctx, repoPath, extraArgs, skip, limit, func(_, branchName string) error {
branchName = strings.TrimPrefix(branchName, trimPrefix)
branchNames = append(branchNames, branchName)
@ -95,7 +95,7 @@ func callShowRef(ctx context.Context, repoPath, trimPrefix string, extraArgs []C
return branchNames, countAll, err
}
func walkShowRef(ctx context.Context, repoPath string, extraArgs []CmdArg, skip, limit int, walkfn func(sha1, refname string) error) (countAll int, err error) {
func walkShowRef(ctx context.Context, repoPath string, extraArgs TrustedCmdArgs, skip, limit int, walkfn func(sha1, refname string) error) (countAll int, err error) {
stdoutReader, stdoutWriter := io.Pipe()
defer func() {
_ = stdoutReader.Close()
@ -104,7 +104,7 @@ func walkShowRef(ctx context.Context, repoPath string, extraArgs []CmdArg, skip,
go func() {
stderrBuilder := &strings.Builder{}
args := []CmdArg{"for-each-ref", "--format=%(objectname) %(refname)"}
args := TrustedCmdArgs{"for-each-ref", "--format=%(objectname) %(refname)"}
args = append(args, extraArgs...)
err := NewCommand(ctx, args...).Run(&RunOpts{
Dir: repoPath,

Some files were not shown because too many files have changed in this diff Show more