mirror of
https://github.com/go-gitea/gitea.git
synced 2025-02-20 11:43:57 +08:00
Compare commits
50 Commits
96fc8c33b7
...
1b7dcc3550
Author | SHA1 | Date | |
---|---|---|---|
|
1b7dcc3550 | ||
|
c2e23d3301 | ||
|
84d2159ef6 | ||
|
ce65613690 | ||
|
748b731612 | ||
|
241f799edf | ||
|
9f560d47c9 | ||
|
15e020eec8 | ||
|
7df09e31fa | ||
|
f5a81f9636 | ||
|
f35850f48e | ||
|
69de5a65c2 | ||
|
5df9fd3e9c | ||
|
50a5d6bf5d | ||
|
3bbacac62c | ||
|
37c4f3760c | ||
|
58c124cc4f | ||
|
62389dd08b | ||
|
950abfe8ee | ||
|
fc1b383da9 | ||
|
2b8cfb557d | ||
|
01bf8da02e | ||
|
57997f1518 | ||
|
1ba7cbbfd6 | ||
|
8aede14b1d | ||
|
70327d6a92 | ||
|
f232d8f530 | ||
|
b426e383fe | ||
|
d88b012525 | ||
|
fba365b425 | ||
|
42d817e814 | ||
|
3e39583bb5 | ||
|
e741448a14 | ||
|
bcd1317d17 | ||
|
f58f5bb3d8 | ||
|
06f1065636 | ||
|
245ac321c3 | ||
|
e9b98aef44 | ||
|
217ffe5492 | ||
|
b3302748fa | ||
|
c422f179dd | ||
|
e3adb686bb | ||
|
30993e9508 | ||
|
085f273d19 | ||
|
72518a8dab | ||
|
704b65e012 | ||
|
523751dc82 | ||
|
06088ec672 | ||
|
a52720b5b4 | ||
|
af5b1503ec |
115
.eslintrc.cjs
115
.eslintrc.cjs
@ -1,3 +1,4 @@
|
||||
const vitestPlugin = require('@vitest/eslint-plugin');
|
||||
const restrictedSyntax = ['WithStatement', 'ForInStatement', 'LabeledStatement', 'SequenceExpression'];
|
||||
|
||||
module.exports = {
|
||||
@ -37,8 +38,6 @@ module.exports = {
|
||||
'eslint-plugin-regexp',
|
||||
'eslint-plugin-sonarjs',
|
||||
'eslint-plugin-unicorn',
|
||||
'eslint-plugin-vitest',
|
||||
'eslint-plugin-vitest-globals',
|
||||
'eslint-plugin-wc',
|
||||
],
|
||||
env: {
|
||||
@ -46,6 +45,13 @@ module.exports = {
|
||||
node: true,
|
||||
},
|
||||
overrides: [
|
||||
{
|
||||
files: ['**/*.cjs'],
|
||||
rules: {
|
||||
'import-x/no-commonjs': [0],
|
||||
'@typescript-eslint/no-require-imports': [0],
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['web_src/**/*'],
|
||||
globals: {
|
||||
@ -82,59 +88,58 @@ module.exports = {
|
||||
},
|
||||
{
|
||||
files: ['**/*.test.*', 'web_src/js/test/setup.ts'],
|
||||
env: {
|
||||
'vitest-globals/env': true,
|
||||
},
|
||||
plugins: ['@vitest/eslint-plugin'],
|
||||
globals: vitestPlugin.environments.env.globals,
|
||||
rules: {
|
||||
'vitest/consistent-test-filename': [0],
|
||||
'vitest/consistent-test-it': [0],
|
||||
'vitest/expect-expect': [0],
|
||||
'vitest/max-expects': [0],
|
||||
'vitest/max-nested-describe': [0],
|
||||
'vitest/no-alias-methods': [0],
|
||||
'vitest/no-commented-out-tests': [0],
|
||||
'vitest/no-conditional-expect': [0],
|
||||
'vitest/no-conditional-in-test': [0],
|
||||
'vitest/no-conditional-tests': [0],
|
||||
'vitest/no-disabled-tests': [0],
|
||||
'vitest/no-done-callback': [0],
|
||||
'vitest/no-duplicate-hooks': [0],
|
||||
'vitest/no-focused-tests': [0],
|
||||
'vitest/no-hooks': [0],
|
||||
'vitest/no-identical-title': [2],
|
||||
'vitest/no-interpolation-in-snapshots': [0],
|
||||
'vitest/no-large-snapshots': [0],
|
||||
'vitest/no-mocks-import': [0],
|
||||
'vitest/no-restricted-matchers': [0],
|
||||
'vitest/no-restricted-vi-methods': [0],
|
||||
'vitest/no-standalone-expect': [0],
|
||||
'vitest/no-test-prefixes': [0],
|
||||
'vitest/no-test-return-statement': [0],
|
||||
'vitest/prefer-called-with': [0],
|
||||
'vitest/prefer-comparison-matcher': [0],
|
||||
'vitest/prefer-each': [0],
|
||||
'vitest/prefer-equality-matcher': [0],
|
||||
'vitest/prefer-expect-resolves': [0],
|
||||
'vitest/prefer-hooks-in-order': [0],
|
||||
'vitest/prefer-hooks-on-top': [2],
|
||||
'vitest/prefer-lowercase-title': [0],
|
||||
'vitest/prefer-mock-promise-shorthand': [0],
|
||||
'vitest/prefer-snapshot-hint': [0],
|
||||
'vitest/prefer-spy-on': [0],
|
||||
'vitest/prefer-strict-equal': [0],
|
||||
'vitest/prefer-to-be': [0],
|
||||
'vitest/prefer-to-be-falsy': [0],
|
||||
'vitest/prefer-to-be-object': [0],
|
||||
'vitest/prefer-to-be-truthy': [0],
|
||||
'vitest/prefer-to-contain': [0],
|
||||
'vitest/prefer-to-have-length': [0],
|
||||
'vitest/prefer-todo': [0],
|
||||
'vitest/require-hook': [0],
|
||||
'vitest/require-to-throw-message': [0],
|
||||
'vitest/require-top-level-describe': [0],
|
||||
'vitest/valid-describe-callback': [2],
|
||||
'vitest/valid-expect': [2],
|
||||
'vitest/valid-title': [2],
|
||||
'@vitest/consistent-test-filename': [0],
|
||||
'@vitest/consistent-test-it': [0],
|
||||
'@vitest/expect-expect': [0],
|
||||
'@vitest/max-expects': [0],
|
||||
'@vitest/max-nested-describe': [0],
|
||||
'@vitest/no-alias-methods': [0],
|
||||
'@vitest/no-commented-out-tests': [0],
|
||||
'@vitest/no-conditional-expect': [0],
|
||||
'@vitest/no-conditional-in-test': [0],
|
||||
'@vitest/no-conditional-tests': [0],
|
||||
'@vitest/no-disabled-tests': [0],
|
||||
'@vitest/no-done-callback': [0],
|
||||
'@vitest/no-duplicate-hooks': [0],
|
||||
'@vitest/no-focused-tests': [0],
|
||||
'@vitest/no-hooks': [0],
|
||||
'@vitest/no-identical-title': [2],
|
||||
'@vitest/no-interpolation-in-snapshots': [0],
|
||||
'@vitest/no-large-snapshots': [0],
|
||||
'@vitest/no-mocks-import': [0],
|
||||
'@vitest/no-restricted-matchers': [0],
|
||||
'@vitest/no-restricted-vi-methods': [0],
|
||||
'@vitest/no-standalone-expect': [0],
|
||||
'@vitest/no-test-prefixes': [0],
|
||||
'@vitest/no-test-return-statement': [0],
|
||||
'@vitest/prefer-called-with': [0],
|
||||
'@vitest/prefer-comparison-matcher': [0],
|
||||
'@vitest/prefer-each': [0],
|
||||
'@vitest/prefer-equality-matcher': [0],
|
||||
'@vitest/prefer-expect-resolves': [0],
|
||||
'@vitest/prefer-hooks-in-order': [0],
|
||||
'@vitest/prefer-hooks-on-top': [2],
|
||||
'@vitest/prefer-lowercase-title': [0],
|
||||
'@vitest/prefer-mock-promise-shorthand': [0],
|
||||
'@vitest/prefer-snapshot-hint': [0],
|
||||
'@vitest/prefer-spy-on': [0],
|
||||
'@vitest/prefer-strict-equal': [0],
|
||||
'@vitest/prefer-to-be': [0],
|
||||
'@vitest/prefer-to-be-falsy': [0],
|
||||
'@vitest/prefer-to-be-object': [0],
|
||||
'@vitest/prefer-to-be-truthy': [0],
|
||||
'@vitest/prefer-to-contain': [0],
|
||||
'@vitest/prefer-to-have-length': [0],
|
||||
'@vitest/prefer-todo': [0],
|
||||
'@vitest/require-hook': [0],
|
||||
'@vitest/require-to-throw-message': [0],
|
||||
'@vitest/require-top-level-describe': [0],
|
||||
'@vitest/valid-describe-callback': [2],
|
||||
'@vitest/valid-expect': [2],
|
||||
'@vitest/valid-title': [2],
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -163,7 +168,7 @@ module.exports = {
|
||||
{
|
||||
files: ['tests/e2e/**'],
|
||||
plugins: [
|
||||
'eslint-plugin-playwright'
|
||||
'eslint-plugin-playwright',
|
||||
],
|
||||
extends: [
|
||||
'plugin:playwright/recommended',
|
||||
|
4
.github/workflows/cron-licenses.yml
vendored
4
.github/workflows/cron-licenses.yml
vendored
@ -1,8 +1,8 @@
|
||||
name: cron-licenses
|
||||
|
||||
on:
|
||||
#schedule:
|
||||
# - cron: "7 0 * * 1" # every Monday at 00:07 UTC
|
||||
# schedule:
|
||||
# - cron: "7 0 * * 1" # every Monday at 00:07 UTC
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
|
2
.mailmap
Normal file
2
.mailmap
Normal file
@ -0,0 +1,2 @@
|
||||
Unknwon <u@gogs.io> <joe2010xtmf@163.com>
|
||||
Unknwon <u@gogs.io> 无闻 <u@gogs.io>
|
@ -1,5 +1,5 @@
|
||||
# Build stage
|
||||
FROM docker.io/library/golang:1.23-alpine3.21 AS build-env
|
||||
FROM docker.io/library/golang:1.24-alpine3.21 AS build-env
|
||||
|
||||
ARG GOPROXY
|
||||
ENV GOPROXY=${GOPROXY:-direct}
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Build stage
|
||||
FROM docker.io/library/golang:1.23-alpine3.21 AS build-env
|
||||
FROM docker.io/library/golang:1.24-alpine3.21 AS build-env
|
||||
|
||||
ARG GOPROXY
|
||||
ENV GOPROXY=${GOPROXY:-direct}
|
||||
|
8
Makefile
8
Makefile
@ -23,7 +23,7 @@ SHASUM ?= shasum -a 256
|
||||
HAS_GO := $(shell hash $(GO) > /dev/null 2>&1 && echo yes)
|
||||
COMMA := ,
|
||||
|
||||
XGO_VERSION := go-1.23.x
|
||||
XGO_VERSION := go-1.24.x
|
||||
|
||||
AIR_PACKAGE ?= github.com/air-verse/air@v1
|
||||
EDITORCONFIG_CHECKER_PACKAGE ?= github.com/editorconfig-checker/editorconfig-checker/v3/cmd/editorconfig-checker@v3.1.2
|
||||
@ -144,9 +144,9 @@ TAR_EXCLUDES := .git data indexers queues log node_modules $(EXECUTABLE) $(FOMAN
|
||||
GO_DIRS := build cmd models modules routers services tests
|
||||
WEB_DIRS := web_src/js web_src/css
|
||||
|
||||
ESLINT_FILES := web_src/js tools *.js *.ts tests/e2e
|
||||
ESLINT_FILES := web_src/js tools *.js *.ts *.cjs tests/e2e
|
||||
STYLELINT_FILES := web_src/css web_src/js/components/*.vue
|
||||
SPELLCHECK_FILES := $(GO_DIRS) $(WEB_DIRS) templates options/locale/locale_en-US.ini .github $(filter-out CHANGELOG.md, $(wildcard *.go *.js *.md *.yml *.yaml *.toml))
|
||||
SPELLCHECK_FILES := $(GO_DIRS) $(WEB_DIRS) templates options/locale/locale_en-US.ini .github $(filter-out CHANGELOG.md, $(wildcard *.go *.js *.md *.yml *.yaml *.toml)) $(filter-out tools/misspellings.csv, $(wildcard tools/*))
|
||||
EDITORCONFIG_FILES := templates .github/workflows options/locale/locale_en-US.ini
|
||||
|
||||
GO_SOURCES := $(wildcard *.go)
|
||||
@ -393,7 +393,7 @@ lint-templates: .venv node_modules ## lint template files
|
||||
|
||||
.PHONY: lint-yaml
|
||||
lint-yaml: .venv ## lint yaml files
|
||||
@poetry run yamllint .
|
||||
@poetry run yamllint -s .
|
||||
|
||||
.PHONY: watch
|
||||
watch: ## watch everything and continuously rebuild
|
||||
|
64
README.md
64
README.md
@ -150,10 +150,64 @@ for the full license text.
|
||||
<details>
|
||||
<summary>Looking for an overview of the interface? Check it out!</summary>
|
||||
|
||||
||||
|
||||
|:---:|:---:|:---:|
|
||||
||||
|
||||
||||
|
||||
||||
|
||||
### Login/Register Page
|
||||
|
||||

|
||||

|
||||
|
||||
### User Dashboard
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
### User Profile
|
||||
|
||||

|
||||
|
||||
### Explore
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
### Repository
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
#### Repository Issue
|
||||
|
||||

|
||||

|
||||
|
||||
#### Repository Pull Requests
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
#### Repository Actions
|
||||
|
||||

|
||||

|
||||
|
||||
#### Repository Activity
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
### Organization
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
64
README_ZH.md
64
README_ZH.md
@ -93,10 +93,64 @@ Gitea 提供官方的 [go-sdk](https://gitea.com/gitea/go-sdk),以及名为 [t
|
||||
<details>
|
||||
<summary>截图</summary>
|
||||
|
||||
||||
|
||||
|:---:|:---:|:---:|
|
||||
||||
|
||||
||||
|
||||
||||
|
||||
### 登录界面
|
||||
|
||||

|
||||

|
||||
|
||||
### 用户首页
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
### 用户资料
|
||||
|
||||

|
||||
|
||||
### 探索
|
||||
|
||||

|
||||

|
||||

|
||||
|
||||
### 仓库
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
#### 仓库工单
|
||||
|
||||

|
||||

|
||||
|
||||
#### 仓库合并请求
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
#### 仓库 Actions
|
||||
|
||||

|
||||

|
||||
|
||||
#### 仓库动态
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
### 组织
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
@ -196,7 +196,7 @@ func migrateActionsLog(ctx context.Context, dstStorage storage.ObjectStorage) er
|
||||
|
||||
func migrateActionsArtifacts(ctx context.Context, dstStorage storage.ObjectStorage) error {
|
||||
return db.Iterate(ctx, nil, func(ctx context.Context, artifact *actions_model.ActionArtifact) error {
|
||||
if artifact.Status == int64(actions_model.ArtifactStatusExpired) {
|
||||
if artifact.Status == actions_model.ArtifactStatusExpired {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
6
flake.lock
generated
6
flake.lock
generated
@ -20,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1736798957,
|
||||
"narHash": "sha256-qwpCtZhSsSNQtK4xYGzMiyEDhkNzOCz/Vfu4oL2ETsQ=",
|
||||
"lastModified": 1739214665,
|
||||
"narHash": "sha256-26L8VAu3/1YRxS8MHgBOyOM8xALdo6N0I04PgorE7UM=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3",
|
||||
"rev": "64e75cd44acf21c7933d61d7721e812eac1b5a0a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -29,13 +29,13 @@
|
||||
poetry
|
||||
|
||||
# backend
|
||||
go_1_23
|
||||
go_1_24
|
||||
gofumpt
|
||||
sqlite
|
||||
];
|
||||
shellHook = ''
|
||||
export GO="${pkgs.go_1_23}/bin/go"
|
||||
export GOROOT="${pkgs.go_1_23}/share/go"
|
||||
export GO="${pkgs.go_1_24}/bin/go"
|
||||
export GOROOT="${pkgs.go_1_24}/share/go"
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
2
go.mod
2
go.mod
@ -1,6 +1,6 @@
|
||||
module code.gitea.io/gitea
|
||||
|
||||
go 1.23
|
||||
go 1.24
|
||||
|
||||
// rfc5280 said: "The serial number is an integer assigned by the CA to each certificate."
|
||||
// But some CAs use negative serial number, just relax the check. related:
|
||||
|
@ -48,7 +48,7 @@ type ActionArtifact struct {
|
||||
ContentEncoding string // The content encoding of the artifact
|
||||
ArtifactPath string `xorm:"index unique(runid_name_path)"` // The path to the artifact when runner uploads it
|
||||
ArtifactName string `xorm:"index unique(runid_name_path)"` // The name of the artifact when runner uploads it
|
||||
Status int64 `xorm:"index"` // The status of the artifact, uploading, expired or need-delete
|
||||
Status ArtifactStatus `xorm:"index"` // The status of the artifact, uploading, expired or need-delete
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"created"`
|
||||
UpdatedUnix timeutil.TimeStamp `xorm:"updated index"`
|
||||
ExpiredUnix timeutil.TimeStamp `xorm:"index"` // The time when the artifact will be expired
|
||||
@ -68,7 +68,7 @@ func CreateArtifact(ctx context.Context, t *ActionTask, artifactName, artifactPa
|
||||
RepoID: t.RepoID,
|
||||
OwnerID: t.OwnerID,
|
||||
CommitSHA: t.CommitSHA,
|
||||
Status: int64(ArtifactStatusUploadPending),
|
||||
Status: ArtifactStatusUploadPending,
|
||||
ExpiredUnix: timeutil.TimeStamp(time.Now().Unix() + timeutil.Day*expiredDays),
|
||||
}
|
||||
if _, err := db.GetEngine(ctx).Insert(artifact); err != nil {
|
||||
@ -108,12 +108,19 @@ func UpdateArtifactByID(ctx context.Context, id int64, art *ActionArtifact) erro
|
||||
|
||||
type FindArtifactsOptions struct {
|
||||
db.ListOptions
|
||||
RepoID int64
|
||||
RunID int64
|
||||
ArtifactName string
|
||||
Status int
|
||||
RepoID int64
|
||||
RunID int64
|
||||
ArtifactName string
|
||||
Status int
|
||||
FinalizedArtifactsV4 bool
|
||||
}
|
||||
|
||||
func (opts FindArtifactsOptions) ToOrders() string {
|
||||
return "id"
|
||||
}
|
||||
|
||||
var _ db.FindOptionsOrder = (*FindArtifactsOptions)(nil)
|
||||
|
||||
func (opts FindArtifactsOptions) ToConds() builder.Cond {
|
||||
cond := builder.NewCond()
|
||||
if opts.RepoID > 0 {
|
||||
@ -128,11 +135,15 @@ func (opts FindArtifactsOptions) ToConds() builder.Cond {
|
||||
if opts.Status > 0 {
|
||||
cond = cond.And(builder.Eq{"status": opts.Status})
|
||||
}
|
||||
if opts.FinalizedArtifactsV4 {
|
||||
cond = cond.And(builder.Eq{"status": ArtifactStatusUploadConfirmed}.Or(builder.Eq{"status": ArtifactStatusExpired}))
|
||||
cond = cond.And(builder.Eq{"content_encoding": "application/zip"})
|
||||
}
|
||||
|
||||
return cond
|
||||
}
|
||||
|
||||
// ActionArtifactMeta is the meta data of an artifact
|
||||
// ActionArtifactMeta is the meta-data of an artifact
|
||||
type ActionArtifactMeta struct {
|
||||
ArtifactName string
|
||||
FileSize int64
|
||||
@ -166,18 +177,18 @@ func ListPendingDeleteArtifacts(ctx context.Context, limit int) ([]*ActionArtifa
|
||||
|
||||
// SetArtifactExpired sets an artifact to expired
|
||||
func SetArtifactExpired(ctx context.Context, artifactID int64) error {
|
||||
_, err := db.GetEngine(ctx).Where("id=? AND status = ?", artifactID, ArtifactStatusUploadConfirmed).Cols("status").Update(&ActionArtifact{Status: int64(ArtifactStatusExpired)})
|
||||
_, err := db.GetEngine(ctx).Where("id=? AND status = ?", artifactID, ArtifactStatusUploadConfirmed).Cols("status").Update(&ActionArtifact{Status: ArtifactStatusExpired})
|
||||
return err
|
||||
}
|
||||
|
||||
// SetArtifactNeedDelete sets an artifact to need-delete, cron job will delete it
|
||||
func SetArtifactNeedDelete(ctx context.Context, runID int64, name string) error {
|
||||
_, err := db.GetEngine(ctx).Where("run_id=? AND artifact_name=? AND status = ?", runID, name, ArtifactStatusUploadConfirmed).Cols("status").Update(&ActionArtifact{Status: int64(ArtifactStatusPendingDeletion)})
|
||||
_, err := db.GetEngine(ctx).Where("run_id=? AND artifact_name=? AND status = ?", runID, name, ArtifactStatusUploadConfirmed).Cols("status").Update(&ActionArtifact{Status: ArtifactStatusPendingDeletion})
|
||||
return err
|
||||
}
|
||||
|
||||
// SetArtifactDeleted sets an artifact to deleted
|
||||
func SetArtifactDeleted(ctx context.Context, artifactID int64) error {
|
||||
_, err := db.GetEngine(ctx).ID(artifactID).Cols("status").Update(&ActionArtifact{Status: int64(ArtifactStatusDeleted)})
|
||||
_, err := db.GetEngine(ctx).ID(artifactID).Cols("status").Update(&ActionArtifact{Status: ArtifactStatusDeleted})
|
||||
return err
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/container"
|
||||
"code.gitea.io/gitea/modules/translation"
|
||||
webhook_module "code.gitea.io/gitea/modules/webhook"
|
||||
|
||||
"xorm.io/builder"
|
||||
@ -112,14 +113,14 @@ type StatusInfo struct {
|
||||
}
|
||||
|
||||
// GetStatusInfoList returns a slice of StatusInfo
|
||||
func GetStatusInfoList(ctx context.Context) []StatusInfo {
|
||||
func GetStatusInfoList(ctx context.Context, lang translation.Locale) []StatusInfo {
|
||||
// same as those in aggregateJobStatus
|
||||
allStatus := []Status{StatusSuccess, StatusFailure, StatusWaiting, StatusRunning}
|
||||
statusInfoList := make([]StatusInfo, 0, 4)
|
||||
for _, s := range allStatus {
|
||||
statusInfoList = append(statusInfoList, StatusInfo{
|
||||
Status: int(s),
|
||||
DisplayedStatus: s.String(),
|
||||
DisplayedStatus: s.LocaleString(lang),
|
||||
})
|
||||
}
|
||||
return statusInfoList
|
||||
|
@ -167,6 +167,7 @@ func init() {
|
||||
|
||||
type FindRunnerOptions struct {
|
||||
db.ListOptions
|
||||
IDs []int64
|
||||
RepoID int64
|
||||
OwnerID int64 // it will be ignored if RepoID is set
|
||||
Sort string
|
||||
@ -178,6 +179,14 @@ type FindRunnerOptions struct {
|
||||
func (opts FindRunnerOptions) ToConds() builder.Cond {
|
||||
cond := builder.NewCond()
|
||||
|
||||
if len(opts.IDs) > 0 {
|
||||
if len(opts.IDs) == 1 {
|
||||
cond = cond.And(builder.Eq{"id": opts.IDs[0]})
|
||||
} else {
|
||||
cond = cond.And(builder.In("id", opts.IDs))
|
||||
}
|
||||
}
|
||||
|
||||
if opts.RepoID > 0 {
|
||||
c := builder.NewCond().And(builder.Eq{"repo_id": opts.RepoID})
|
||||
if opts.WithAvailable {
|
||||
|
@ -58,6 +58,7 @@ func InsertVariable(ctx context.Context, ownerID, repoID int64, name, data strin
|
||||
|
||||
type FindVariablesOpts struct {
|
||||
db.ListOptions
|
||||
IDs []int64
|
||||
RepoID int64
|
||||
OwnerID int64 // it will be ignored if RepoID is set
|
||||
Name string
|
||||
@ -65,6 +66,15 @@ type FindVariablesOpts struct {
|
||||
|
||||
func (opts FindVariablesOpts) ToConds() builder.Cond {
|
||||
cond := builder.NewCond()
|
||||
|
||||
if len(opts.IDs) > 0 {
|
||||
if len(opts.IDs) == 1 {
|
||||
cond = cond.And(builder.Eq{"id": opts.IDs[0]})
|
||||
} else {
|
||||
cond = cond.And(builder.In("id", opts.IDs))
|
||||
}
|
||||
}
|
||||
|
||||
// Since we now support instance-level variables,
|
||||
// there is no need to check for null values for `owner_id` and `repo_id`
|
||||
cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
|
||||
@ -85,12 +95,12 @@ func FindVariables(ctx context.Context, opts FindVariablesOpts) ([]*ActionVariab
|
||||
return db.Find[ActionVariable](ctx, opts)
|
||||
}
|
||||
|
||||
func UpdateVariable(ctx context.Context, variable *ActionVariable) (bool, error) {
|
||||
count, err := db.GetEngine(ctx).ID(variable.ID).Cols("name", "data").
|
||||
Update(&ActionVariable{
|
||||
Name: variable.Name,
|
||||
Data: variable.Data,
|
||||
})
|
||||
func UpdateVariableCols(ctx context.Context, variable *ActionVariable, cols ...string) (bool, error) {
|
||||
variable.Name = strings.ToUpper(variable.Name)
|
||||
count, err := db.GetEngine(ctx).
|
||||
ID(variable.ID).
|
||||
Cols(cols...).
|
||||
Update(variable)
|
||||
return count != 0, err
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ func GPGKeyToEntity(ctx context.Context, k *GPGKey) (*openpgp.Entity, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys, err := checkArmoredGPGKeyString(impKey.Content)
|
||||
keys, err := CheckArmoredGPGKeyString(impKey.Content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -115,7 +115,7 @@ func GPGKeyToEntity(ctx context.Context, k *GPGKey) (*openpgp.Entity, error) {
|
||||
|
||||
// parseSubGPGKey parse a sub Key
|
||||
func parseSubGPGKey(ownerID int64, primaryID string, pubkey *packet.PublicKey, expiry time.Time) (*GPGKey, error) {
|
||||
content, err := base64EncPubKey(pubkey)
|
||||
content, err := Base64EncPubKey(pubkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -183,7 +183,7 @@ func parseGPGKey(ctx context.Context, ownerID int64, e *openpgp.Entity, verified
|
||||
}
|
||||
}
|
||||
|
||||
content, err := base64EncPubKey(pubkey)
|
||||
content, err := Base64EncPubKey(pubkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -239,33 +239,3 @@ func DeleteGPGKey(ctx context.Context, doer *user_model.User, id int64) (err err
|
||||
|
||||
return committer.Commit()
|
||||
}
|
||||
|
||||
func checkKeyEmails(ctx context.Context, email string, keys ...*GPGKey) (bool, string) {
|
||||
uid := int64(0)
|
||||
var userEmails []*user_model.EmailAddress
|
||||
var user *user_model.User
|
||||
for _, key := range keys {
|
||||
for _, e := range key.Emails {
|
||||
if e.IsActivated && (email == "" || strings.EqualFold(e.Email, email)) {
|
||||
return true, e.Email
|
||||
}
|
||||
}
|
||||
if key.Verified && key.OwnerID != 0 {
|
||||
if uid != key.OwnerID {
|
||||
userEmails, _ = user_model.GetEmailAddresses(ctx, key.OwnerID)
|
||||
uid = key.OwnerID
|
||||
user = &user_model.User{ID: uid}
|
||||
_, _ = user_model.GetUser(ctx, user)
|
||||
}
|
||||
for _, e := range userEmails {
|
||||
if e.IsActivated && (email == "" || strings.EqualFold(e.Email, email)) {
|
||||
return true, e.Email
|
||||
}
|
||||
}
|
||||
if user.KeepEmailPrivate && strings.EqualFold(email, user.GetEmail()) {
|
||||
return true, user.GetEmail()
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, email
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func addGPGSubKey(ctx context.Context, key *GPGKey) (err error) {
|
||||
|
||||
// AddGPGKey adds new public key to database.
|
||||
func AddGPGKey(ctx context.Context, ownerID int64, content, token, signature string) ([]*GPGKey, error) {
|
||||
ekeys, err := checkArmoredGPGKeyString(content)
|
||||
ekeys, err := CheckArmoredGPGKeyString(content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -4,17 +4,12 @@
|
||||
package asymkey
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/git"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
|
||||
"github.com/ProtonMail/go-crypto/openpgp/packet"
|
||||
)
|
||||
@ -70,263 +65,6 @@ const (
|
||||
NoKeyFound = "gpg.error.no_gpg_keys_found"
|
||||
)
|
||||
|
||||
// ParseCommitsWithSignature checks if signaute of commits are corresponding to users gpg keys.
|
||||
func ParseCommitsWithSignature(ctx context.Context, oldCommits []*user_model.UserCommit, repoTrustModel repo_model.TrustModelType, isOwnerMemberCollaborator func(*user_model.User) (bool, error)) []*SignCommit {
|
||||
newCommits := make([]*SignCommit, 0, len(oldCommits))
|
||||
keyMap := map[string]bool{}
|
||||
|
||||
for _, c := range oldCommits {
|
||||
signCommit := &SignCommit{
|
||||
UserCommit: c,
|
||||
Verification: ParseCommitWithSignature(ctx, c.Commit),
|
||||
}
|
||||
|
||||
_ = CalculateTrustStatus(signCommit.Verification, repoTrustModel, isOwnerMemberCollaborator, &keyMap)
|
||||
|
||||
newCommits = append(newCommits, signCommit)
|
||||
}
|
||||
return newCommits
|
||||
}
|
||||
|
||||
// ParseCommitWithSignature check if signature is good against keystore.
|
||||
func ParseCommitWithSignature(ctx context.Context, c *git.Commit) *CommitVerification {
|
||||
var committer *user_model.User
|
||||
if c.Committer != nil {
|
||||
var err error
|
||||
// Find Committer account
|
||||
committer, err = user_model.GetUserByEmail(ctx, c.Committer.Email) // This finds the user by primary email or activated email so commit will not be valid if email is not
|
||||
if err != nil { // Skipping not user for committer
|
||||
committer = &user_model.User{
|
||||
Name: c.Committer.Name,
|
||||
Email: c.Committer.Email,
|
||||
}
|
||||
// We can expect this to often be an ErrUserNotExist. in the case
|
||||
// it is not, however, it is important to log it.
|
||||
if !user_model.IsErrUserNotExist(err) {
|
||||
log.Error("GetUserByEmail: %v", err)
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.no_committer_account",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no signature just report the committer
|
||||
if c.Signature == nil {
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false, // Default value
|
||||
Reason: "gpg.error.not_signed_commit", // Default value
|
||||
}
|
||||
}
|
||||
|
||||
// If this a SSH signature handle it differently
|
||||
if strings.HasPrefix(c.Signature.Signature, "-----BEGIN SSH SIGNATURE-----") {
|
||||
return ParseCommitWithSSHSignature(ctx, c, committer)
|
||||
}
|
||||
|
||||
// Parsing signature
|
||||
sig, err := extractSignature(c.Signature.Signature)
|
||||
if err != nil { // Skipping failed to extract sign
|
||||
log.Error("SignatureRead err: %v", err)
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.extract_sign",
|
||||
}
|
||||
}
|
||||
|
||||
keyID := tryGetKeyIDFromSignature(sig)
|
||||
defaultReason := NoKeyFound
|
||||
|
||||
// First check if the sig has a keyID and if so just look at that
|
||||
if commitVerification := hashAndVerifyForKeyID(
|
||||
ctx,
|
||||
sig,
|
||||
c.Signature.Payload,
|
||||
committer,
|
||||
keyID,
|
||||
setting.AppName,
|
||||
""); commitVerification != nil {
|
||||
if commitVerification.Reason == BadSignature {
|
||||
defaultReason = BadSignature
|
||||
} else {
|
||||
return commitVerification
|
||||
}
|
||||
}
|
||||
|
||||
// Now try to associate the signature with the committer, if present
|
||||
if committer.ID != 0 {
|
||||
keys, err := db.Find[GPGKey](ctx, FindGPGKeyOptions{
|
||||
OwnerID: committer.ID,
|
||||
})
|
||||
if err != nil { // Skipping failed to get gpg keys of user
|
||||
log.Error("ListGPGKeys: %v", err)
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.failed_retrieval_gpg_keys",
|
||||
}
|
||||
}
|
||||
|
||||
if err := GPGKeyList(keys).LoadSubKeys(ctx); err != nil {
|
||||
log.Error("LoadSubKeys: %v", err)
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.failed_retrieval_gpg_keys",
|
||||
}
|
||||
}
|
||||
|
||||
committerEmailAddresses, _ := user_model.GetEmailAddresses(ctx, committer.ID)
|
||||
activated := false
|
||||
for _, e := range committerEmailAddresses {
|
||||
if e.IsActivated && strings.EqualFold(e.Email, c.Committer.Email) {
|
||||
activated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
// Pre-check (& optimization) that emails attached to key can be attached to the committer email and can validate
|
||||
canValidate := false
|
||||
email := ""
|
||||
if k.Verified && activated {
|
||||
canValidate = true
|
||||
email = c.Committer.Email
|
||||
}
|
||||
if !canValidate {
|
||||
for _, e := range k.Emails {
|
||||
if e.IsActivated && strings.EqualFold(e.Email, c.Committer.Email) {
|
||||
canValidate = true
|
||||
email = e.Email
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !canValidate {
|
||||
continue // Skip this key
|
||||
}
|
||||
|
||||
commitVerification := hashAndVerifyWithSubKeysCommitVerification(sig, c.Signature.Payload, k, committer, committer, email)
|
||||
if commitVerification != nil {
|
||||
return commitVerification
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if setting.Repository.Signing.SigningKey != "" && setting.Repository.Signing.SigningKey != "default" && setting.Repository.Signing.SigningKey != "none" {
|
||||
// OK we should try the default key
|
||||
gpgSettings := git.GPGSettings{
|
||||
Sign: true,
|
||||
KeyID: setting.Repository.Signing.SigningKey,
|
||||
Name: setting.Repository.Signing.SigningName,
|
||||
Email: setting.Repository.Signing.SigningEmail,
|
||||
}
|
||||
if err := gpgSettings.LoadPublicKeyContent(); err != nil {
|
||||
log.Error("Error getting default signing key: %s %v", gpgSettings.KeyID, err)
|
||||
} else if commitVerification := verifyWithGPGSettings(ctx, &gpgSettings, sig, c.Signature.Payload, committer, keyID); commitVerification != nil {
|
||||
if commitVerification.Reason == BadSignature {
|
||||
defaultReason = BadSignature
|
||||
} else {
|
||||
return commitVerification
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defaultGPGSettings, err := c.GetRepositoryDefaultPublicGPGKey(false)
|
||||
if err != nil {
|
||||
log.Error("Error getting default public gpg key: %v", err)
|
||||
} else if defaultGPGSettings == nil {
|
||||
log.Warn("Unable to get defaultGPGSettings for unattached commit: %s", c.ID.String())
|
||||
} else if defaultGPGSettings.Sign {
|
||||
if commitVerification := verifyWithGPGSettings(ctx, defaultGPGSettings, sig, c.Signature.Payload, committer, keyID); commitVerification != nil {
|
||||
if commitVerification.Reason == BadSignature {
|
||||
defaultReason = BadSignature
|
||||
} else {
|
||||
return commitVerification
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &CommitVerification{ // Default at this stage
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Warning: defaultReason != NoKeyFound,
|
||||
Reason: defaultReason,
|
||||
SigningKey: &GPGKey{
|
||||
KeyID: keyID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func verifyWithGPGSettings(ctx context.Context, gpgSettings *git.GPGSettings, sig *packet.Signature, payload string, committer *user_model.User, keyID string) *CommitVerification {
|
||||
// First try to find the key in the db
|
||||
if commitVerification := hashAndVerifyForKeyID(ctx, sig, payload, committer, gpgSettings.KeyID, gpgSettings.Name, gpgSettings.Email); commitVerification != nil {
|
||||
return commitVerification
|
||||
}
|
||||
|
||||
// Otherwise we have to parse the key
|
||||
ekeys, err := checkArmoredGPGKeyString(gpgSettings.PublicKeyContent)
|
||||
if err != nil {
|
||||
log.Error("Unable to get default signing key: %v", err)
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.generate_hash",
|
||||
}
|
||||
}
|
||||
for _, ekey := range ekeys {
|
||||
pubkey := ekey.PrimaryKey
|
||||
content, err := base64EncPubKey(pubkey)
|
||||
if err != nil {
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.generate_hash",
|
||||
}
|
||||
}
|
||||
k := &GPGKey{
|
||||
Content: content,
|
||||
CanSign: pubkey.CanSign(),
|
||||
KeyID: pubkey.KeyIdString(),
|
||||
}
|
||||
for _, subKey := range ekey.Subkeys {
|
||||
content, err := base64EncPubKey(subKey.PublicKey)
|
||||
if err != nil {
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.generate_hash",
|
||||
}
|
||||
}
|
||||
k.SubsKey = append(k.SubsKey, &GPGKey{
|
||||
Content: content,
|
||||
CanSign: subKey.PublicKey.CanSign(),
|
||||
KeyID: subKey.PublicKey.KeyIdString(),
|
||||
})
|
||||
}
|
||||
if commitVerification := hashAndVerifyWithSubKeysCommitVerification(sig, payload, k, committer, &user_model.User{
|
||||
Name: gpgSettings.Name,
|
||||
Email: gpgSettings.Email,
|
||||
}, gpgSettings.Email); commitVerification != nil {
|
||||
return commitVerification
|
||||
}
|
||||
if keyID == k.KeyID {
|
||||
// This is a bad situation ... We have a key id that matches our default key but the signature doesn't match.
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Warning: true,
|
||||
Reason: BadSignature,
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifySign(s *packet.Signature, h hash.Hash, k *GPGKey) error {
|
||||
// Check if key can sign
|
||||
if !k.CanSign {
|
||||
@ -369,7 +107,7 @@ func hashAndVerifyWithSubKeys(sig *packet.Signature, payload string, k *GPGKey)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func hashAndVerifyWithSubKeysCommitVerification(sig *packet.Signature, payload string, k *GPGKey, committer, signer *user_model.User, email string) *CommitVerification {
|
||||
func HashAndVerifyWithSubKeysCommitVerification(sig *packet.Signature, payload string, k *GPGKey, committer, signer *user_model.User, email string) *CommitVerification {
|
||||
key, err := hashAndVerifyWithSubKeys(sig, payload, k)
|
||||
if err != nil { // Skipping failed to generate hash
|
||||
return &CommitVerification{
|
||||
@ -392,78 +130,6 @@ func hashAndVerifyWithSubKeysCommitVerification(sig *packet.Signature, payload s
|
||||
return nil
|
||||
}
|
||||
|
||||
func hashAndVerifyForKeyID(ctx context.Context, sig *packet.Signature, payload string, committer *user_model.User, keyID, name, email string) *CommitVerification {
|
||||
if keyID == "" {
|
||||
return nil
|
||||
}
|
||||
keys, err := db.Find[GPGKey](ctx, FindGPGKeyOptions{
|
||||
KeyID: keyID,
|
||||
IncludeSubKeys: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("GetGPGKeysByKeyID: %v", err)
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.failed_retrieval_gpg_keys",
|
||||
}
|
||||
}
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, key := range keys {
|
||||
var primaryKeys []*GPGKey
|
||||
if key.PrimaryKeyID != "" {
|
||||
primaryKeys, err = db.Find[GPGKey](ctx, FindGPGKeyOptions{
|
||||
KeyID: key.PrimaryKeyID,
|
||||
IncludeSubKeys: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("GetGPGKeysByKeyID: %v", err)
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.failed_retrieval_gpg_keys",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
activated, email := checkKeyEmails(ctx, email, append([]*GPGKey{key}, primaryKeys...)...)
|
||||
if !activated {
|
||||
continue
|
||||
}
|
||||
|
||||
signer := &user_model.User{
|
||||
Name: name,
|
||||
Email: email,
|
||||
}
|
||||
if key.OwnerID != 0 {
|
||||
owner, err := user_model.GetUserByID(ctx, key.OwnerID)
|
||||
if err == nil {
|
||||
signer = owner
|
||||
} else if !user_model.IsErrUserNotExist(err) {
|
||||
log.Error("Failed to user_model.GetUserByID: %d for key ID: %d (%s) %v", key.OwnerID, key.ID, key.KeyID, err)
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Reason: "gpg.error.no_committer_account",
|
||||
}
|
||||
}
|
||||
}
|
||||
commitVerification := hashAndVerifyWithSubKeysCommitVerification(sig, payload, key, committer, signer, email)
|
||||
if commitVerification != nil {
|
||||
return commitVerification
|
||||
}
|
||||
}
|
||||
// This is a bad situation ... We have a key id that is in our database but the signature doesn't match.
|
||||
return &CommitVerification{
|
||||
CommittingUser: committer,
|
||||
Verified: false,
|
||||
Warning: true,
|
||||
Reason: BadSignature,
|
||||
}
|
||||
}
|
||||
|
||||
// CalculateTrustStatus will calculate the TrustStatus for a commit verification within a repository
|
||||
// There are several trust models in Gitea
|
||||
func CalculateTrustStatus(verification *CommitVerification, repoTrustModel repo_model.TrustModelType, isOwnerMemberCollaborator func(*user_model.User) (bool, error), keyMap *map[string]bool) error {
|
||||
|
@ -33,9 +33,9 @@ import (
|
||||
|
||||
// This file provides common functions relating to GPG Keys
|
||||
|
||||
// checkArmoredGPGKeyString checks if the given key string is a valid GPG armored key.
|
||||
// CheckArmoredGPGKeyString checks if the given key string is a valid GPG armored key.
|
||||
// The function returns the actual public key on success
|
||||
func checkArmoredGPGKeyString(content string) (openpgp.EntityList, error) {
|
||||
func CheckArmoredGPGKeyString(content string) (openpgp.EntityList, error) {
|
||||
list, err := openpgp.ReadArmoredKeyRing(strings.NewReader(content))
|
||||
if err != nil {
|
||||
return nil, ErrGPGKeyParsing{err}
|
||||
@ -43,8 +43,8 @@ func checkArmoredGPGKeyString(content string) (openpgp.EntityList, error) {
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// base64EncPubKey encode public key content to base 64
|
||||
func base64EncPubKey(pubkey *packet.PublicKey) (string, error) {
|
||||
// Base64EncPubKey encode public key content to base 64
|
||||
func Base64EncPubKey(pubkey *packet.PublicKey) (string, error) {
|
||||
var w bytes.Buffer
|
||||
err := pubkey.Serialize(&w)
|
||||
if err != nil {
|
||||
@ -119,7 +119,7 @@ func readArmoredSign(r io.Reader) (body io.Reader, err error) {
|
||||
return block.Body, nil
|
||||
}
|
||||
|
||||
func extractSignature(s string) (*packet.Signature, error) {
|
||||
func ExtractSignature(s string) (*packet.Signature, error) {
|
||||
r, err := readArmoredSign(strings.NewReader(s))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to read signature armor")
|
||||
@ -135,7 +135,7 @@ func extractSignature(s string) (*packet.Signature, error) {
|
||||
return sig, nil
|
||||
}
|
||||
|
||||
func tryGetKeyIDFromSignature(sig *packet.Signature) string {
|
||||
func TryGetKeyIDFromSignature(sig *packet.Signature) string {
|
||||
if sig.IssuerKeyId != nil && (*sig.IssuerKeyId) != 0 {
|
||||
return fmt.Sprintf("%016X", *sig.IssuerKeyId)
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ MkM/fdpyc2hY7Dl/+qFmN5MG5yGmMpQcX+RNNR222ibNC1D3wg==
|
||||
=i9b7
|
||||
-----END PGP PUBLIC KEY BLOCK-----`
|
||||
|
||||
key, err := checkArmoredGPGKeyString(testGPGArmor)
|
||||
key, err := CheckArmoredGPGKeyString(testGPGArmor)
|
||||
assert.NoError(t, err, "Could not parse a valid GPG public armored rsa key", key)
|
||||
// TODO verify value of key
|
||||
}
|
||||
@ -72,7 +72,7 @@ OyjLLnFQiVmq7kEA/0z0CQe3ZQiQIq5zrs7Nh1XRkFAo8GlU/SGC9XFFi722
|
||||
=ZiSe
|
||||
-----END PGP PUBLIC KEY BLOCK-----`
|
||||
|
||||
key, err := checkArmoredGPGKeyString(testGPGArmor)
|
||||
key, err := CheckArmoredGPGKeyString(testGPGArmor)
|
||||
assert.NoError(t, err, "Could not parse a valid GPG public armored brainpoolP256r1 key", key)
|
||||
// TODO verify value of key
|
||||
}
|
||||
@ -108,14 +108,14 @@ Av844q/BfRuVsJsK1NDNG09LC30B0l3LKBqlrRmRTUMHtgchdX2dY+p7GPOoSzlR
|
||||
MkM/fdpyc2hY7Dl/+qFmN5MG5yGmMpQcX+RNNR222ibNC1D3wg==
|
||||
=i9b7
|
||||
-----END PGP PUBLIC KEY BLOCK-----`
|
||||
keys, err := checkArmoredGPGKeyString(testGPGArmor)
|
||||
keys, err := CheckArmoredGPGKeyString(testGPGArmor)
|
||||
require.NotEmpty(t, keys)
|
||||
|
||||
ekey := keys[0]
|
||||
assert.NoError(t, err, "Could not parse a valid GPG armored key", ekey)
|
||||
|
||||
pubkey := ekey.PrimaryKey
|
||||
content, err := base64EncPubKey(pubkey)
|
||||
content, err := Base64EncPubKey(pubkey)
|
||||
assert.NoError(t, err, "Could not base64 encode a valid PublicKey content", ekey)
|
||||
|
||||
key := &GPGKey{
|
||||
@ -176,9 +176,9 @@ committer Antoine GIRARD <sapk@sapk.fr> 1489013107 +0100
|
||||
Unknown GPG key with good email
|
||||
`
|
||||
// Reading Sign
|
||||
goodSig, err := extractSignature(testGoodSigArmor)
|
||||
goodSig, err := ExtractSignature(testGoodSigArmor)
|
||||
assert.NoError(t, err, "Could not parse a valid GPG armored signature", testGoodSigArmor)
|
||||
badSig, err := extractSignature(testBadSigArmor)
|
||||
badSig, err := ExtractSignature(testBadSigArmor)
|
||||
assert.NoError(t, err, "Could not parse a valid GPG armored signature", testBadSigArmor)
|
||||
|
||||
// Generating hash of commit
|
||||
@ -386,7 +386,7 @@ epiDVQ==
|
||||
=VSKJ
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
`
|
||||
keys, err := checkArmoredGPGKeyString(testIssue6599)
|
||||
keys, err := CheckArmoredGPGKeyString(testIssue6599)
|
||||
assert.NoError(t, err)
|
||||
if assert.NotEmpty(t, keys) {
|
||||
ekey := keys[0]
|
||||
@ -396,11 +396,11 @@ epiDVQ==
|
||||
}
|
||||
|
||||
func TestTryGetKeyIDFromSignature(t *testing.T) {
|
||||
assert.Empty(t, tryGetKeyIDFromSignature(&packet.Signature{}))
|
||||
assert.Equal(t, "038D1A3EADDBEA9C", tryGetKeyIDFromSignature(&packet.Signature{
|
||||
assert.Empty(t, TryGetKeyIDFromSignature(&packet.Signature{}))
|
||||
assert.Equal(t, "038D1A3EADDBEA9C", TryGetKeyIDFromSignature(&packet.Signature{
|
||||
IssuerKeyId: util.ToPointer(uint64(0x38D1A3EADDBEA9C)),
|
||||
}))
|
||||
assert.Equal(t, "038D1A3EADDBEA9C", tryGetKeyIDFromSignature(&packet.Signature{
|
||||
assert.Equal(t, "038D1A3EADDBEA9C", TryGetKeyIDFromSignature(&packet.Signature{
|
||||
IssuerFingerprint: []uint8{0xb, 0x23, 0x24, 0xc7, 0xe6, 0xfe, 0x4f, 0x3a, 0x6, 0x26, 0xc1, 0x21, 0x3, 0x8d, 0x1a, 0x3e, 0xad, 0xdb, 0xea, 0x9c},
|
||||
}))
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func VerifyGPGKey(ctx context.Context, ownerID int64, keyID, token, signature st
|
||||
return "", err
|
||||
}
|
||||
|
||||
sig, err := extractSignature(signature)
|
||||
sig, err := ExtractSignature(signature)
|
||||
if err != nil {
|
||||
return "", ErrGPGInvalidTokenSignature{
|
||||
ID: key.KeyID,
|
||||
|
@ -69,3 +69,21 @@
|
||||
created_unix: 1730330775
|
||||
updated_unix: 1730330775
|
||||
expired_unix: 1738106775
|
||||
|
||||
-
|
||||
id: 23
|
||||
run_id: 793
|
||||
runner_id: 1
|
||||
repo_id: 2
|
||||
owner_id: 2
|
||||
commit_sha: c2d72f548424103f01ee1dc02889c1e2bff816b0
|
||||
storage_path: "27/5/1730330775594233150.chunk"
|
||||
file_size: 1024
|
||||
file_compressed_size: 1024
|
||||
content_encoding: "application/zip"
|
||||
artifact_path: "artifact-v4-download.zip"
|
||||
artifact_name: "artifact-v4-download"
|
||||
status: 2
|
||||
created_unix: 1730330775
|
||||
updated_unix: 1730330775
|
||||
expired_unix: 1738106775
|
||||
|
6
models/fixtures/issue_pin.yml
Normal file
6
models/fixtures/issue_pin.yml
Normal file
@ -0,0 +1,6 @@
|
||||
-
|
||||
id: 1
|
||||
repo_id: 2
|
||||
issue_id: 4
|
||||
is_pull: false
|
||||
pin_order: 1
|
@ -496,47 +496,11 @@ type SignCommitWithStatuses struct {
|
||||
*asymkey_model.SignCommit
|
||||
}
|
||||
|
||||
// ParseCommitsWithStatus checks commits latest statuses and calculates its worst status state
|
||||
func ParseCommitsWithStatus(ctx context.Context, oldCommits []*asymkey_model.SignCommit, repo *repo_model.Repository) []*SignCommitWithStatuses {
|
||||
newCommits := make([]*SignCommitWithStatuses, 0, len(oldCommits))
|
||||
|
||||
for _, c := range oldCommits {
|
||||
commit := &SignCommitWithStatuses{
|
||||
SignCommit: c,
|
||||
}
|
||||
statuses, _, err := GetLatestCommitStatus(ctx, repo.ID, commit.ID.String(), db.ListOptions{})
|
||||
if err != nil {
|
||||
log.Error("GetLatestCommitStatus: %v", err)
|
||||
} else {
|
||||
commit.Statuses = statuses
|
||||
commit.Status = CalcCommitStatus(statuses)
|
||||
}
|
||||
|
||||
newCommits = append(newCommits, commit)
|
||||
}
|
||||
return newCommits
|
||||
}
|
||||
|
||||
// hashCommitStatusContext hash context
|
||||
func hashCommitStatusContext(context string) string {
|
||||
return fmt.Sprintf("%x", sha1.Sum([]byte(context)))
|
||||
}
|
||||
|
||||
// ConvertFromGitCommit converts git commits into SignCommitWithStatuses
|
||||
func ConvertFromGitCommit(ctx context.Context, commits []*git.Commit, repo *repo_model.Repository) []*SignCommitWithStatuses {
|
||||
return ParseCommitsWithStatus(ctx,
|
||||
asymkey_model.ParseCommitsWithSignature(
|
||||
ctx,
|
||||
user_model.ValidateCommitsWithEmails(ctx, commits),
|
||||
repo.GetTrustModel(),
|
||||
func(user *user_model.User) (bool, error) {
|
||||
return repo_model.IsOwnerMemberCollaborator(ctx, repo, user.ID)
|
||||
},
|
||||
),
|
||||
repo,
|
||||
)
|
||||
}
|
||||
|
||||
// CommitStatusesHideActionsURL hide Gitea Actions urls
|
||||
func CommitStatusesHideActionsURL(ctx context.Context, statuses []*CommitStatus) {
|
||||
idToRepos := make(map[int64]*repo_model.Repository)
|
||||
|
@ -19,8 +19,6 @@ import (
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/container"
|
||||
"code.gitea.io/gitea/modules/gitrepo"
|
||||
"code.gitea.io/gitea/modules/json"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/optional"
|
||||
"code.gitea.io/gitea/modules/references"
|
||||
@ -774,41 +772,6 @@ func (c *Comment) CodeCommentLink(ctx context.Context) string {
|
||||
return fmt.Sprintf("%s/files#%s", c.Issue.Link(), c.HashTag())
|
||||
}
|
||||
|
||||
// LoadPushCommits Load push commits
|
||||
func (c *Comment) LoadPushCommits(ctx context.Context) (err error) {
|
||||
if c.Content == "" || c.Commits != nil || c.Type != CommentTypePullRequestPush {
|
||||
return nil
|
||||
}
|
||||
|
||||
var data PushActionContent
|
||||
|
||||
err = json.Unmarshal([]byte(c.Content), &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.IsForcePush = data.IsForcePush
|
||||
|
||||
if c.IsForcePush {
|
||||
if len(data.CommitIDs) != 2 {
|
||||
return nil
|
||||
}
|
||||
c.OldCommit = data.CommitIDs[0]
|
||||
c.NewCommit = data.CommitIDs[1]
|
||||
} else {
|
||||
gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, c.Issue.Repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
c.Commits = git_model.ConvertFromGitCommit(ctx, gitRepo.GetCommitsFromIDs(data.CommitIDs), c.Issue.Repo)
|
||||
c.CommitsNum = int64(len(c.Commits))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateComment creates comment with context
|
||||
func CreateComment(ctx context.Context, opts *CreateCommentOptions) (_ *Comment, err error) {
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
|
@ -86,8 +86,10 @@ func findCodeComments(ctx context.Context, opts FindCommentsOptions, issue *Issu
|
||||
ids = append(ids, comment.ReviewID)
|
||||
}
|
||||
}
|
||||
if err := e.In("id", ids).Find(&reviews); err != nil {
|
||||
return nil, err
|
||||
if len(ids) > 0 {
|
||||
if err := e.In("id", ids).Find(&reviews); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
n := 0
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/container"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/optional"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
api "code.gitea.io/gitea/modules/structs"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
@ -96,7 +97,7 @@ type Issue struct {
|
||||
// TODO: RemoveIssueRef: see "repo/issue/branch_selector_field.tmpl"
|
||||
Ref string
|
||||
|
||||
PinOrder int `xorm:"DEFAULT 0"`
|
||||
PinOrder int `xorm:"-"` // 0 means not loaded, -1 means loaded but not pinned
|
||||
|
||||
DeadlineUnix timeutil.TimeStamp `xorm:"INDEX"`
|
||||
|
||||
@ -290,6 +291,23 @@ func (issue *Issue) LoadMilestone(ctx context.Context) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (issue *Issue) LoadPinOrder(ctx context.Context) error {
|
||||
if issue.PinOrder != 0 {
|
||||
return nil
|
||||
}
|
||||
issuePin, err := GetIssuePin(ctx, issue)
|
||||
if err != nil && !db.IsErrNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if issuePin != nil {
|
||||
issue.PinOrder = issuePin.PinOrder
|
||||
} else {
|
||||
issue.PinOrder = -1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadAttributes loads the attribute of this issue.
|
||||
func (issue *Issue) LoadAttributes(ctx context.Context) (err error) {
|
||||
if err = issue.LoadRepo(ctx); err != nil {
|
||||
@ -329,6 +347,10 @@ func (issue *Issue) LoadAttributes(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = issue.LoadPinOrder(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = issue.Comments.LoadAttributes(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -341,6 +363,14 @@ func (issue *Issue) LoadAttributes(ctx context.Context) (err error) {
|
||||
return issue.loadReactions(ctx)
|
||||
}
|
||||
|
||||
// IsPinned returns if a Issue is pinned
|
||||
func (issue *Issue) IsPinned() bool {
|
||||
if issue.PinOrder == 0 {
|
||||
setting.PanicInDevOrTesting("issue's pinorder has not been loaded")
|
||||
}
|
||||
return issue.PinOrder > 0
|
||||
}
|
||||
|
||||
func (issue *Issue) ResetAttributesLoaded() {
|
||||
issue.isLabelsLoaded = false
|
||||
issue.isMilestoneLoaded = false
|
||||
@ -501,6 +531,45 @@ func GetIssueByIndex(ctx context.Context, repoID, index int64) (*Issue, error) {
|
||||
return issue, nil
|
||||
}
|
||||
|
||||
func isPullToCond(isPull optional.Option[bool]) builder.Cond {
|
||||
if isPull.Has() {
|
||||
return builder.Eq{"is_pull": isPull.Value()}
|
||||
}
|
||||
return builder.NewCond()
|
||||
}
|
||||
|
||||
func FindLatestUpdatedIssues(ctx context.Context, repoID int64, isPull optional.Option[bool], pageSize int) (IssueList, error) {
|
||||
issues := make([]*Issue, 0, pageSize)
|
||||
err := db.GetEngine(ctx).Where("repo_id = ?", repoID).
|
||||
And(isPullToCond(isPull)).
|
||||
OrderBy("updated_unix DESC").
|
||||
Limit(pageSize).
|
||||
Find(&issues)
|
||||
return issues, err
|
||||
}
|
||||
|
||||
func FindIssuesSuggestionByKeyword(ctx context.Context, repoID int64, keyword string, isPull optional.Option[bool], excludedID int64, pageSize int) (IssueList, error) {
|
||||
cond := builder.NewCond()
|
||||
if excludedID > 0 {
|
||||
cond = cond.And(builder.Neq{"`id`": excludedID})
|
||||
}
|
||||
|
||||
// It seems that GitHub searches both title and content (maybe sorting by the search engine's ranking system?)
|
||||
// The first PR (https://github.com/go-gitea/gitea/pull/32327) uses "search indexer" to search "name(title) + content"
|
||||
// But it seems that searching "content" (especially LIKE by DB engine) generates worse (unusable) results.
|
||||
// So now (https://github.com/go-gitea/gitea/pull/33538) it only searches "name(title)", leave the improvements to the future.
|
||||
cond = cond.And(db.BuildCaseInsensitiveLike("`name`", keyword))
|
||||
|
||||
issues := make([]*Issue, 0, pageSize)
|
||||
err := db.GetEngine(ctx).Where("repo_id = ?", repoID).
|
||||
And(isPullToCond(isPull)).
|
||||
And(cond).
|
||||
OrderBy("updated_unix DESC, `index` DESC").
|
||||
Limit(pageSize).
|
||||
Find(&issues)
|
||||
return issues, err
|
||||
}
|
||||
|
||||
// GetIssueWithAttrsByIndex returns issue by index in a repository.
|
||||
func GetIssueWithAttrsByIndex(ctx context.Context, repoID, index int64) (*Issue, error) {
|
||||
issue, err := GetIssueByIndex(ctx, repoID, index)
|
||||
@ -680,190 +749,6 @@ func (issue *Issue) HasOriginalAuthor() bool {
|
||||
return issue.OriginalAuthor != "" && issue.OriginalAuthorID != 0
|
||||
}
|
||||
|
||||
var ErrIssueMaxPinReached = util.NewInvalidArgumentErrorf("the max number of pinned issues has been readched")
|
||||
|
||||
// IsPinned returns if a Issue is pinned
|
||||
func (issue *Issue) IsPinned() bool {
|
||||
return issue.PinOrder != 0
|
||||
}
|
||||
|
||||
// Pin pins a Issue
|
||||
func (issue *Issue) Pin(ctx context.Context, user *user_model.User) error {
|
||||
// If the Issue is already pinned, we don't need to pin it twice
|
||||
if issue.IsPinned() {
|
||||
return nil
|
||||
}
|
||||
|
||||
var maxPin int
|
||||
_, err := db.GetEngine(ctx).SQL("SELECT MAX(pin_order) FROM issue WHERE repo_id = ? AND is_pull = ?", issue.RepoID, issue.IsPull).Get(&maxPin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if the maximum allowed Pins reached
|
||||
if maxPin >= setting.Repository.Issue.MaxPinned {
|
||||
return ErrIssueMaxPinReached
|
||||
}
|
||||
|
||||
_, err = db.GetEngine(ctx).Table("issue").
|
||||
Where("id = ?", issue.ID).
|
||||
Update(map[string]any{
|
||||
"pin_order": maxPin + 1,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the pin event to the history
|
||||
opts := &CreateCommentOptions{
|
||||
Type: CommentTypePin,
|
||||
Doer: user,
|
||||
Repo: issue.Repo,
|
||||
Issue: issue,
|
||||
}
|
||||
if _, err = CreateComment(ctx, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnpinIssue unpins a Issue
|
||||
func (issue *Issue) Unpin(ctx context.Context, user *user_model.User) error {
|
||||
// If the Issue is not pinned, we don't need to unpin it
|
||||
if !issue.IsPinned() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This sets the Pin for all Issues that come after the unpined Issue to the correct value
|
||||
_, err := db.GetEngine(ctx).Exec("UPDATE issue SET pin_order = pin_order - 1 WHERE repo_id = ? AND is_pull = ? AND pin_order > ?", issue.RepoID, issue.IsPull, issue.PinOrder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.GetEngine(ctx).Table("issue").
|
||||
Where("id = ?", issue.ID).
|
||||
Update(map[string]any{
|
||||
"pin_order": 0,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the unpin event to the history
|
||||
opts := &CreateCommentOptions{
|
||||
Type: CommentTypeUnpin,
|
||||
Doer: user,
|
||||
Repo: issue.Repo,
|
||||
Issue: issue,
|
||||
}
|
||||
if _, err = CreateComment(ctx, opts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PinOrUnpin pins or unpins a Issue
|
||||
func (issue *Issue) PinOrUnpin(ctx context.Context, user *user_model.User) error {
|
||||
if !issue.IsPinned() {
|
||||
return issue.Pin(ctx, user)
|
||||
}
|
||||
|
||||
return issue.Unpin(ctx, user)
|
||||
}
|
||||
|
||||
// MovePin moves a Pinned Issue to a new Position
|
||||
func (issue *Issue) MovePin(ctx context.Context, newPosition int) error {
|
||||
// If the Issue is not pinned, we can't move them
|
||||
if !issue.IsPinned() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if newPosition < 1 {
|
||||
return fmt.Errorf("The Position can't be lower than 1")
|
||||
}
|
||||
|
||||
dbctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
var maxPin int
|
||||
_, err = db.GetEngine(dbctx).SQL("SELECT MAX(pin_order) FROM issue WHERE repo_id = ? AND is_pull = ?", issue.RepoID, issue.IsPull).Get(&maxPin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the new Position bigger than the current Maximum, set it to the Maximum
|
||||
if newPosition > maxPin+1 {
|
||||
newPosition = maxPin + 1
|
||||
}
|
||||
|
||||
// Lower the Position of all Pinned Issue that came after the current Position
|
||||
_, err = db.GetEngine(dbctx).Exec("UPDATE issue SET pin_order = pin_order - 1 WHERE repo_id = ? AND is_pull = ? AND pin_order > ?", issue.RepoID, issue.IsPull, issue.PinOrder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Higher the Position of all Pinned Issues that comes after the new Position
|
||||
_, err = db.GetEngine(dbctx).Exec("UPDATE issue SET pin_order = pin_order + 1 WHERE repo_id = ? AND is_pull = ? AND pin_order >= ?", issue.RepoID, issue.IsPull, newPosition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.GetEngine(dbctx).Table("issue").
|
||||
Where("id = ?", issue.ID).
|
||||
Update(map[string]any{
|
||||
"pin_order": newPosition,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return committer.Commit()
|
||||
}
|
||||
|
||||
// GetPinnedIssues returns the pinned Issues for the given Repo and type
|
||||
func GetPinnedIssues(ctx context.Context, repoID int64, isPull bool) (IssueList, error) {
|
||||
issues := make(IssueList, 0)
|
||||
|
||||
err := db.GetEngine(ctx).
|
||||
Table("issue").
|
||||
Where("repo_id = ?", repoID).
|
||||
And("is_pull = ?", isPull).
|
||||
And("pin_order > 0").
|
||||
OrderBy("pin_order").
|
||||
Find(&issues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = issues.LoadAttributes(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// IsNewPinAllowed returns if a new Issue or Pull request can be pinned
|
||||
func IsNewPinAllowed(ctx context.Context, repoID int64, isPull bool) (bool, error) {
|
||||
var maxPin int
|
||||
_, err := db.GetEngine(ctx).SQL("SELECT COUNT(pin_order) FROM issue WHERE repo_id = ? AND is_pull = ? AND pin_order > 0", repoID, isPull).Get(&maxPin)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return maxPin < setting.Repository.Issue.MaxPinned, nil
|
||||
}
|
||||
|
||||
// IsErrIssueMaxPinReached returns if the error is, that the User can't pin more Issues
|
||||
func IsErrIssueMaxPinReached(err error) bool {
|
||||
return err == ErrIssueMaxPinReached
|
||||
}
|
||||
|
||||
// InsertIssues insert issues to database
|
||||
func InsertIssues(ctx context.Context, issues ...*Issue) error {
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
|
@ -506,6 +506,39 @@ func (issues IssueList) loadTotalTrackedTimes(ctx context.Context) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (issues IssueList) LoadPinOrder(ctx context.Context) error {
|
||||
if len(issues) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
issueIDs := container.FilterSlice(issues, func(issue *Issue) (int64, bool) {
|
||||
return issue.ID, issue.PinOrder == 0
|
||||
})
|
||||
if len(issueIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
issuePins, err := GetIssuePinsByIssueIDs(ctx, issueIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, issue := range issues {
|
||||
if issue.PinOrder != 0 {
|
||||
continue
|
||||
}
|
||||
for _, pin := range issuePins {
|
||||
if pin.IssueID == issue.ID {
|
||||
issue.PinOrder = pin.PinOrder
|
||||
break
|
||||
}
|
||||
}
|
||||
if issue.PinOrder == 0 {
|
||||
issue.PinOrder = -1
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadAttributes loads all attributes, expect for attachments and comments
|
||||
func (issues IssueList) LoadAttributes(ctx context.Context) error {
|
||||
if _, err := issues.LoadRepositories(ctx); err != nil {
|
||||
|
246
models/issues/issue_pin.go
Normal file
246
models/issues/issue_pin.go
Normal file
@ -0,0 +1,246 @@
|
||||
// Copyright 2025 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package issues
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sort"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
)
|
||||
|
||||
type IssuePin struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
RepoID int64 `xorm:"UNIQUE(s) NOT NULL"`
|
||||
IssueID int64 `xorm:"UNIQUE(s) NOT NULL"`
|
||||
IsPull bool `xorm:"NOT NULL"`
|
||||
PinOrder int `xorm:"DEFAULT 0"`
|
||||
}
|
||||
|
||||
var ErrIssueMaxPinReached = util.NewInvalidArgumentErrorf("the max number of pinned issues has been readched")
|
||||
|
||||
// IsErrIssueMaxPinReached returns if the error is, that the User can't pin more Issues
|
||||
func IsErrIssueMaxPinReached(err error) bool {
|
||||
return err == ErrIssueMaxPinReached
|
||||
}
|
||||
|
||||
func init() {
|
||||
db.RegisterModel(new(IssuePin))
|
||||
}
|
||||
|
||||
func GetIssuePin(ctx context.Context, issue *Issue) (*IssuePin, error) {
|
||||
pin := new(IssuePin)
|
||||
has, err := db.GetEngine(ctx).
|
||||
Where("repo_id = ?", issue.RepoID).
|
||||
And("issue_id = ?", issue.ID).Get(pin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !has {
|
||||
return nil, db.ErrNotExist{
|
||||
Resource: "IssuePin",
|
||||
ID: issue.ID,
|
||||
}
|
||||
}
|
||||
return pin, nil
|
||||
}
|
||||
|
||||
func GetIssuePinsByIssueIDs(ctx context.Context, issueIDs []int64) ([]IssuePin, error) {
|
||||
var pins []IssuePin
|
||||
if err := db.GetEngine(ctx).In("issue_id", issueIDs).Find(&pins); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pins, nil
|
||||
}
|
||||
|
||||
// Pin pins a Issue
|
||||
func PinIssue(ctx context.Context, issue *Issue, user *user_model.User) error {
|
||||
return db.WithTx(ctx, func(ctx context.Context) error {
|
||||
pinnedIssuesNum, err := getPinnedIssuesNum(ctx, issue.RepoID, issue.IsPull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if the maximum allowed Pins reached
|
||||
if pinnedIssuesNum >= setting.Repository.Issue.MaxPinned {
|
||||
return ErrIssueMaxPinReached
|
||||
}
|
||||
|
||||
pinnedIssuesMaxPinOrder, err := getPinnedIssuesMaxPinOrder(ctx, issue.RepoID, issue.IsPull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = db.GetEngine(ctx).Insert(&IssuePin{
|
||||
RepoID: issue.RepoID,
|
||||
IssueID: issue.ID,
|
||||
IsPull: issue.IsPull,
|
||||
PinOrder: pinnedIssuesMaxPinOrder + 1,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add the pin event to the history
|
||||
_, err = CreateComment(ctx, &CreateCommentOptions{
|
||||
Type: CommentTypePin,
|
||||
Doer: user,
|
||||
Repo: issue.Repo,
|
||||
Issue: issue,
|
||||
})
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// UnpinIssue unpins a Issue
|
||||
func UnpinIssue(ctx context.Context, issue *Issue, user *user_model.User) error {
|
||||
return db.WithTx(ctx, func(ctx context.Context) error {
|
||||
// This sets the Pin for all Issues that come after the unpined Issue to the correct value
|
||||
cnt, err := db.GetEngine(ctx).Where("issue_id=?", issue.ID).Delete(new(IssuePin))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cnt == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the unpin event to the history
|
||||
_, err = CreateComment(ctx, &CreateCommentOptions{
|
||||
Type: CommentTypeUnpin,
|
||||
Doer: user,
|
||||
Repo: issue.Repo,
|
||||
Issue: issue,
|
||||
})
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func getPinnedIssuesNum(ctx context.Context, repoID int64, isPull bool) (int, error) {
|
||||
var pinnedIssuesNum int
|
||||
_, err := db.GetEngine(ctx).SQL("SELECT count(pin_order) FROM issue_pin WHERE repo_id = ? AND is_pull = ?", repoID, isPull).Get(&pinnedIssuesNum)
|
||||
return pinnedIssuesNum, err
|
||||
}
|
||||
|
||||
func getPinnedIssuesMaxPinOrder(ctx context.Context, repoID int64, isPull bool) (int, error) {
|
||||
var maxPinnedIssuesMaxPinOrder int
|
||||
_, err := db.GetEngine(ctx).SQL("SELECT max(pin_order) FROM issue_pin WHERE repo_id = ? AND is_pull = ?", repoID, isPull).Get(&maxPinnedIssuesMaxPinOrder)
|
||||
return maxPinnedIssuesMaxPinOrder, err
|
||||
}
|
||||
|
||||
// MovePin moves a Pinned Issue to a new Position
|
||||
func MovePin(ctx context.Context, issue *Issue, newPosition int) error {
|
||||
if newPosition < 1 {
|
||||
return errors.New("The Position can't be lower than 1")
|
||||
}
|
||||
|
||||
issuePin, err := GetIssuePin(ctx, issue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if issuePin.PinOrder == newPosition {
|
||||
return nil
|
||||
}
|
||||
|
||||
return db.WithTx(ctx, func(ctx context.Context) error {
|
||||
if issuePin.PinOrder > newPosition { // move the issue to a lower position
|
||||
_, err = db.GetEngine(ctx).Exec("UPDATE issue_pin SET pin_order = pin_order + 1 WHERE repo_id = ? AND is_pull = ? AND pin_order >= ? AND pin_order < ?", issue.RepoID, issue.IsPull, newPosition, issuePin.PinOrder)
|
||||
} else { // move the issue to a higher position
|
||||
// Lower the Position of all Pinned Issue that came after the current Position
|
||||
_, err = db.GetEngine(ctx).Exec("UPDATE issue_pin SET pin_order = pin_order - 1 WHERE repo_id = ? AND is_pull = ? AND pin_order > ? AND pin_order <= ?", issue.RepoID, issue.IsPull, issuePin.PinOrder, newPosition)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.GetEngine(ctx).
|
||||
Table("issue_pin").
|
||||
Where("id = ?", issuePin.ID).
|
||||
Update(map[string]any{
|
||||
"pin_order": newPosition,
|
||||
})
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func GetPinnedIssueIDs(ctx context.Context, repoID int64, isPull bool) ([]int64, error) {
|
||||
var issuePins []IssuePin
|
||||
if err := db.GetEngine(ctx).
|
||||
Table("issue_pin").
|
||||
Where("repo_id = ?", repoID).
|
||||
And("is_pull = ?", isPull).
|
||||
Find(&issuePins); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Slice(issuePins, func(i, j int) bool {
|
||||
return issuePins[i].PinOrder < issuePins[j].PinOrder
|
||||
})
|
||||
|
||||
var ids []int64
|
||||
for _, pin := range issuePins {
|
||||
ids = append(ids, pin.IssueID)
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
func GetIssuePinsByRepoID(ctx context.Context, repoID int64, isPull bool) ([]*IssuePin, error) {
|
||||
var pins []*IssuePin
|
||||
if err := db.GetEngine(ctx).Where("repo_id = ? AND is_pull = ?", repoID, isPull).Find(&pins); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pins, nil
|
||||
}
|
||||
|
||||
// GetPinnedIssues returns the pinned Issues for the given Repo and type
|
||||
func GetPinnedIssues(ctx context.Context, repoID int64, isPull bool) (IssueList, error) {
|
||||
issuePins, err := GetIssuePinsByRepoID(ctx, repoID, isPull)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(issuePins) == 0 {
|
||||
return IssueList{}, nil
|
||||
}
|
||||
ids := make([]int64, 0, len(issuePins))
|
||||
for _, pin := range issuePins {
|
||||
ids = append(ids, pin.IssueID)
|
||||
}
|
||||
|
||||
issues := make(IssueList, 0, len(ids))
|
||||
if err := db.GetEngine(ctx).In("id", ids).Find(&issues); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, issue := range issues {
|
||||
for _, pin := range issuePins {
|
||||
if pin.IssueID == issue.ID {
|
||||
issue.PinOrder = pin.PinOrder
|
||||
break
|
||||
}
|
||||
}
|
||||
if (!setting.IsProd || setting.IsInTesting) && issue.PinOrder == 0 {
|
||||
panic("It should not happen that a pinned Issue has no PinOrder")
|
||||
}
|
||||
}
|
||||
sort.Slice(issues, func(i, j int) bool {
|
||||
return issues[i].PinOrder < issues[j].PinOrder
|
||||
})
|
||||
|
||||
if err = issues.LoadAttributes(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// IsNewPinAllowed returns if a new Issue or Pull request can be pinned
|
||||
func IsNewPinAllowed(ctx context.Context, repoID int64, isPull bool) (bool, error) {
|
||||
var maxPin int
|
||||
_, err := db.GetEngine(ctx).SQL("SELECT COUNT(pin_order) FROM issue_pin WHERE repo_id = ? AND is_pull = ?", repoID, isPull).Get(&maxPin)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return maxPin < setting.Repository.Issue.MaxPinned, nil
|
||||
}
|
@ -49,6 +49,21 @@ func (issue *Issue) ProjectColumnID(ctx context.Context) (int64, error) {
|
||||
return ip.ProjectColumnID, nil
|
||||
}
|
||||
|
||||
func LoadProjectIssueColumnMap(ctx context.Context, projectID, defaultColumnID int64) (map[int64]int64, error) {
|
||||
issues := make([]project_model.ProjectIssue, 0)
|
||||
if err := db.GetEngine(ctx).Where("project_id=?", projectID).Find(&issues); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make(map[int64]int64, len(issues))
|
||||
for _, issue := range issues {
|
||||
if issue.ProjectColumnID == 0 {
|
||||
issue.ProjectColumnID = defaultColumnID
|
||||
}
|
||||
result[issue.IssueID] = issue.ProjectColumnID
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// LoadIssuesFromColumn load issues assigned to this column
|
||||
func LoadIssuesFromColumn(ctx context.Context, b *project_model.Column, opts *IssuesOptions) (IssueList, error) {
|
||||
issueList, err := Issues(ctx, opts.Copy(func(o *IssuesOptions) {
|
||||
@ -61,11 +76,11 @@ func LoadIssuesFromColumn(ctx context.Context, b *project_model.Column, opts *Is
|
||||
}
|
||||
|
||||
if b.Default {
|
||||
issues, err := Issues(ctx, &IssuesOptions{
|
||||
ProjectColumnID: db.NoConditionID,
|
||||
ProjectID: b.ProjectID,
|
||||
SortType: "project-column-sorting",
|
||||
})
|
||||
issues, err := Issues(ctx, opts.Copy(func(o *IssuesOptions) {
|
||||
o.ProjectColumnID = db.NoConditionID
|
||||
o.ProjectID = b.ProjectID
|
||||
o.SortType = "project-column-sorting"
|
||||
}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -79,19 +94,6 @@ func LoadIssuesFromColumn(ctx context.Context, b *project_model.Column, opts *Is
|
||||
return issueList, nil
|
||||
}
|
||||
|
||||
// LoadIssuesFromColumnList load issues assigned to the columns
|
||||
func LoadIssuesFromColumnList(ctx context.Context, bs project_model.ColumnList, opts *IssuesOptions) (map[int64]IssueList, error) {
|
||||
issuesMap := make(map[int64]IssueList, len(bs))
|
||||
for i := range bs {
|
||||
il, err := LoadIssuesFromColumn(ctx, bs[i], opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
issuesMap[bs[i].ID] = il
|
||||
}
|
||||
return issuesMap, nil
|
||||
}
|
||||
|
||||
// IssueAssignOrRemoveProject changes the project associated with an issue
|
||||
// If newProjectID is 0, the issue is removed from the project
|
||||
func IssueAssignOrRemoveProject(ctx context.Context, issue *Issue, doer *user_model.User, newProjectID, newColumnID int64) error {
|
||||
@ -112,7 +114,7 @@ func IssueAssignOrRemoveProject(ctx context.Context, issue *Issue, doer *user_mo
|
||||
return util.NewPermissionDeniedErrorf("issue %d can't be accessed by project %d", issue.ID, newProject.ID)
|
||||
}
|
||||
if newColumnID == 0 {
|
||||
newDefaultColumn, err := newProject.GetDefaultColumn(ctx)
|
||||
newDefaultColumn, err := newProject.MustDefaultColumn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -49,9 +49,9 @@ type IssuesOptions struct { //nolint
|
||||
// prioritize issues from this repo
|
||||
PriorityRepoID int64
|
||||
IsArchived optional.Option[bool]
|
||||
Org *organization.Organization // issues permission scope
|
||||
Team *organization.Team // issues permission scope
|
||||
User *user_model.User // issues permission scope
|
||||
Owner *user_model.User // issues permission scope, it could be an organization or a user
|
||||
Team *organization.Team // issues permission scope
|
||||
Doer *user_model.User // issues permission scope
|
||||
}
|
||||
|
||||
// Copy returns a copy of the options.
|
||||
@ -273,8 +273,12 @@ func applyConditions(sess *xorm.Session, opts *IssuesOptions) {
|
||||
|
||||
applyLabelsCondition(sess, opts)
|
||||
|
||||
if opts.User != nil {
|
||||
sess.And(issuePullAccessibleRepoCond("issue.repo_id", opts.User.ID, opts.Org, opts.Team, opts.IsPull.Value()))
|
||||
if opts.Owner != nil {
|
||||
sess.And(repo_model.UserOwnedRepoCond(opts.Owner.ID))
|
||||
}
|
||||
|
||||
if opts.Doer != nil && !opts.Doer.IsAdmin {
|
||||
sess.And(issuePullAccessibleRepoCond("issue.repo_id", opts.Doer.ID, opts.Owner, opts.Team, opts.IsPull.Value()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -321,20 +325,20 @@ func teamUnitsRepoCond(id string, userID, orgID, teamID int64, units ...unit.Typ
|
||||
}
|
||||
|
||||
// issuePullAccessibleRepoCond userID must not be zero, this condition require join repository table
|
||||
func issuePullAccessibleRepoCond(repoIDstr string, userID int64, org *organization.Organization, team *organization.Team, isPull bool) builder.Cond {
|
||||
func issuePullAccessibleRepoCond(repoIDstr string, userID int64, owner *user_model.User, team *organization.Team, isPull bool) builder.Cond {
|
||||
cond := builder.NewCond()
|
||||
unitType := unit.TypeIssues
|
||||
if isPull {
|
||||
unitType = unit.TypePullRequests
|
||||
}
|
||||
if org != nil {
|
||||
if owner != nil && owner.IsOrganization() {
|
||||
if team != nil {
|
||||
cond = cond.And(teamUnitsRepoCond(repoIDstr, userID, org.ID, team.ID, unitType)) // special team member repos
|
||||
cond = cond.And(teamUnitsRepoCond(repoIDstr, userID, owner.ID, team.ID, unitType)) // special team member repos
|
||||
} else {
|
||||
cond = cond.And(
|
||||
builder.Or(
|
||||
repo_model.UserOrgUnitRepoCond(repoIDstr, userID, org.ID, unitType), // team member repos
|
||||
repo_model.UserOrgPublicUnitRepoCond(userID, org.ID), // user org public non-member repos, TODO: check repo has issues
|
||||
repo_model.UserOrgUnitRepoCond(repoIDstr, userID, owner.ID, unitType), // team member repos
|
||||
repo_model.UserOrgPublicUnitRepoCond(userID, owner.ID), // user org public non-member repos, TODO: check repo has issues
|
||||
),
|
||||
)
|
||||
}
|
||||
|
@ -373,6 +373,7 @@ func prepareMigrationTasks() []*migration {
|
||||
|
||||
// Gitea 1.23.0-rc0 ends at migration ID number 311 (database version 312)
|
||||
newMigration(312, "Add DeleteBranchAfterMerge to AutoMerge", v1_24.AddDeleteBranchAfterMergeForAutoMerge),
|
||||
newMigration(313, "Move PinOrder from issue table to a new table issue_pin", v1_24.MovePinOrderToTableIssuePin),
|
||||
}
|
||||
return preparedMigrations
|
||||
}
|
||||
|
31
models/migrations/v1_24/v313.go
Normal file
31
models/migrations/v1_24/v313.go
Normal file
@ -0,0 +1,31 @@
|
||||
// Copyright 2025 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package v1_24 //nolint
|
||||
|
||||
import (
|
||||
"code.gitea.io/gitea/models/migrations/base"
|
||||
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
func MovePinOrderToTableIssuePin(x *xorm.Engine) error {
|
||||
type IssuePin struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
RepoID int64 `xorm:"UNIQUE(s) NOT NULL"`
|
||||
IssueID int64 `xorm:"UNIQUE(s) NOT NULL"`
|
||||
IsPull bool `xorm:"NOT NULL"`
|
||||
PinOrder int `xorm:"DEFAULT 0"`
|
||||
}
|
||||
|
||||
if err := x.Sync(new(IssuePin)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := x.Exec("INSERT INTO issue_pin (repo_id, issue_id, is_pull, pin_order) SELECT repo_id, id, is_pull, pin_order FROM issue WHERE pin_order > 0"); err != nil {
|
||||
return err
|
||||
}
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
return base.DropTableColumns(sess, "issue", "pin_order")
|
||||
}
|
@ -228,6 +228,11 @@ func SetRepositoryLink(ctx context.Context, packageID, repoID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func UnlinkRepository(ctx context.Context, packageID int64) error {
|
||||
_, err := db.GetEngine(ctx).ID(packageID).Cols("repo_id").Update(&Package{RepoID: 0})
|
||||
return err
|
||||
}
|
||||
|
||||
// UnlinkRepositoryFromAllPackages unlinks every package from the repository
|
||||
func UnlinkRepositoryFromAllPackages(ctx context.Context, repoID int64) error {
|
||||
_, err := db.GetEngine(ctx).Where("repo_id = ?", repoID).Cols("repo_id").Update(&Package{})
|
||||
|
@ -152,7 +152,7 @@ func (p *Permission) ReadableUnitTypes() []unit.Type {
|
||||
}
|
||||
|
||||
func (p *Permission) LogString() string {
|
||||
format := "<Permission AccessMode=%s, %d Units, %d UnitsMode(s): [ "
|
||||
format := "<Permission AccessMode=%s, %d Units, %d UnitsMode(s): ["
|
||||
args := []any{p.AccessMode.ToString(), len(p.units), len(p.unitsMode)}
|
||||
|
||||
for i, u := range p.units {
|
||||
@ -164,14 +164,16 @@ func (p *Permission) LogString() string {
|
||||
config = err.Error()
|
||||
}
|
||||
}
|
||||
format += "\nUnits[%d]: ID: %d RepoID: %d Type: %s Config: %s"
|
||||
format += "\n\tunits[%d]: ID=%d RepoID=%d Type=%s Config=%s"
|
||||
args = append(args, i, u.ID, u.RepoID, u.Type.LogString(), config)
|
||||
}
|
||||
for key, value := range p.unitsMode {
|
||||
format += "\nUnitMode[%-v]: %-v"
|
||||
format += "\n\tunitsMode[%-v]: %-v"
|
||||
args = append(args, key.LogString(), value.LogString())
|
||||
}
|
||||
format += " ]>"
|
||||
format += "\n\teveryoneAccessMode: %-v"
|
||||
args = append(args, p.everyoneAccessMode)
|
||||
format += "\n\t]>"
|
||||
return fmt.Sprintf(format, args...)
|
||||
}
|
||||
|
||||
|
@ -48,6 +48,8 @@ type Column struct {
|
||||
ProjectID int64 `xorm:"INDEX NOT NULL"`
|
||||
CreatorID int64 `xorm:"NOT NULL"`
|
||||
|
||||
NumIssues int64 `xorm:"-"`
|
||||
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
|
||||
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
|
||||
}
|
||||
@ -57,20 +59,6 @@ func (Column) TableName() string {
|
||||
return "project_board" // TODO: the legacy table name should be project_column
|
||||
}
|
||||
|
||||
// NumIssues return counter of all issues assigned to the column
|
||||
func (c *Column) NumIssues(ctx context.Context) int {
|
||||
total, err := db.GetEngine(ctx).Table("project_issue").
|
||||
Where("project_id=?", c.ProjectID).
|
||||
And("project_board_id=?", c.ID).
|
||||
GroupBy("issue_id").
|
||||
Cols("issue_id").
|
||||
Count()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return int(total)
|
||||
}
|
||||
|
||||
func (c *Column) GetIssues(ctx context.Context) ([]*ProjectIssue, error) {
|
||||
issues := make([]*ProjectIssue, 0, 5)
|
||||
if err := db.GetEngine(ctx).Where("project_id=?", c.ProjectID).
|
||||
@ -192,7 +180,7 @@ func deleteColumnByID(ctx context.Context, columnID int64) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defaultColumn, err := project.GetDefaultColumn(ctx)
|
||||
defaultColumn, err := project.MustDefaultColumn(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -257,8 +245,8 @@ func (p *Project) GetColumns(ctx context.Context) (ColumnList, error) {
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// GetDefaultColumn return default column and ensure only one exists
|
||||
func (p *Project) GetDefaultColumn(ctx context.Context) (*Column, error) {
|
||||
// getDefaultColumn return default column and ensure only one exists
|
||||
func (p *Project) getDefaultColumn(ctx context.Context) (*Column, error) {
|
||||
var column Column
|
||||
has, err := db.GetEngine(ctx).
|
||||
Where("project_id=? AND `default` = ?", p.ID, true).
|
||||
@ -270,6 +258,33 @@ func (p *Project) GetDefaultColumn(ctx context.Context) (*Column, error) {
|
||||
if has {
|
||||
return &column, nil
|
||||
}
|
||||
return nil, ErrProjectColumnNotExist{ColumnID: 0}
|
||||
}
|
||||
|
||||
// MustDefaultColumn returns the default column for a project.
|
||||
// If one exists, it is returned
|
||||
// If none exists, the first column will be elevated to the default column of this project
|
||||
func (p *Project) MustDefaultColumn(ctx context.Context) (*Column, error) {
|
||||
c, err := p.getDefaultColumn(ctx)
|
||||
if err != nil && !IsErrProjectColumnNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
var column Column
|
||||
has, err := db.GetEngine(ctx).Where("project_id=?", p.ID).OrderBy("sorting, id").Get(&column)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if has {
|
||||
column.Default = true
|
||||
if _, err := db.GetEngine(ctx).ID(column.ID).Cols("`default`").Update(&column); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &column, nil
|
||||
}
|
||||
|
||||
// create a default column if none is found
|
||||
column = Column{
|
||||
|
@ -20,19 +20,19 @@ func TestGetDefaultColumn(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check if default column was added
|
||||
column, err := projectWithoutDefault.GetDefaultColumn(db.DefaultContext)
|
||||
column, err := projectWithoutDefault.MustDefaultColumn(db.DefaultContext)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(5), column.ProjectID)
|
||||
assert.Equal(t, "Uncategorized", column.Title)
|
||||
assert.Equal(t, "Done", column.Title)
|
||||
|
||||
projectWithMultipleDefaults, err := GetProjectByID(db.DefaultContext, 6)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check if multiple defaults were removed
|
||||
column, err = projectWithMultipleDefaults.GetDefaultColumn(db.DefaultContext)
|
||||
column, err = projectWithMultipleDefaults.MustDefaultColumn(db.DefaultContext)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(6), column.ProjectID)
|
||||
assert.Equal(t, int64(9), column.ID)
|
||||
assert.Equal(t, int64(9), column.ID) // there are 2 default columns in the test data, use the latest one
|
||||
|
||||
// set 8 as default column
|
||||
assert.NoError(t, SetDefaultColumn(db.DefaultContext, column.ProjectID, 8))
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
)
|
||||
|
||||
@ -34,48 +33,6 @@ func deleteProjectIssuesByProjectID(ctx context.Context, projectID int64) error
|
||||
return err
|
||||
}
|
||||
|
||||
// NumIssues return counter of all issues assigned to a project
|
||||
func (p *Project) NumIssues(ctx context.Context) int {
|
||||
c, err := db.GetEngine(ctx).Table("project_issue").
|
||||
Where("project_id=?", p.ID).
|
||||
GroupBy("issue_id").
|
||||
Cols("issue_id").
|
||||
Count()
|
||||
if err != nil {
|
||||
log.Error("NumIssues: %v", err)
|
||||
return 0
|
||||
}
|
||||
return int(c)
|
||||
}
|
||||
|
||||
// NumClosedIssues return counter of closed issues assigned to a project
|
||||
func (p *Project) NumClosedIssues(ctx context.Context) int {
|
||||
c, err := db.GetEngine(ctx).Table("project_issue").
|
||||
Join("INNER", "issue", "project_issue.issue_id=issue.id").
|
||||
Where("project_issue.project_id=? AND issue.is_closed=?", p.ID, true).
|
||||
Cols("issue_id").
|
||||
Count()
|
||||
if err != nil {
|
||||
log.Error("NumClosedIssues: %v", err)
|
||||
return 0
|
||||
}
|
||||
return int(c)
|
||||
}
|
||||
|
||||
// NumOpenIssues return counter of open issues assigned to a project
|
||||
func (p *Project) NumOpenIssues(ctx context.Context) int {
|
||||
c, err := db.GetEngine(ctx).Table("project_issue").
|
||||
Join("INNER", "issue", "project_issue.issue_id=issue.id").
|
||||
Where("project_issue.project_id=? AND issue.is_closed=?", p.ID, false).
|
||||
Cols("issue_id").
|
||||
Count()
|
||||
if err != nil {
|
||||
log.Error("NumOpenIssues: %v", err)
|
||||
return 0
|
||||
}
|
||||
return int(c)
|
||||
}
|
||||
|
||||
func (c *Column) moveIssuesToAnotherColumn(ctx context.Context, newColumn *Column) error {
|
||||
if c.ProjectID != newColumn.ProjectID {
|
||||
return fmt.Errorf("columns have to be in the same project")
|
||||
|
@ -97,6 +97,9 @@ type Project struct {
|
||||
Type Type
|
||||
|
||||
RenderedContent template.HTML `xorm:"-"`
|
||||
NumOpenIssues int64 `xorm:"-"`
|
||||
NumClosedIssues int64 `xorm:"-"`
|
||||
NumIssues int64 `xorm:"-"`
|
||||
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
|
||||
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
|
||||
|
@ -1129,28 +1129,89 @@ func ValidateCommitWithEmail(ctx context.Context, c *git.Commit) *User {
|
||||
}
|
||||
|
||||
// ValidateCommitsWithEmails checks if authors' e-mails of commits are corresponding to users.
|
||||
func ValidateCommitsWithEmails(ctx context.Context, oldCommits []*git.Commit) []*UserCommit {
|
||||
func ValidateCommitsWithEmails(ctx context.Context, oldCommits []*git.Commit) ([]*UserCommit, error) {
|
||||
var (
|
||||
emails = make(map[string]*User)
|
||||
newCommits = make([]*UserCommit, 0, len(oldCommits))
|
||||
emailSet = make(container.Set[string])
|
||||
)
|
||||
for _, c := range oldCommits {
|
||||
var u *User
|
||||
if c.Author != nil {
|
||||
if v, ok := emails[c.Author.Email]; !ok {
|
||||
u, _ = GetUserByEmail(ctx, c.Author.Email)
|
||||
emails[c.Author.Email] = u
|
||||
} else {
|
||||
u = v
|
||||
emailSet.Add(c.Author.Email)
|
||||
}
|
||||
}
|
||||
|
||||
emailUserMap, err := GetUsersByEmails(ctx, emailSet.Values())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, c := range oldCommits {
|
||||
user, ok := emailUserMap[c.Author.Email]
|
||||
if !ok {
|
||||
user = &User{
|
||||
Name: c.Author.Name,
|
||||
Email: c.Author.Email,
|
||||
}
|
||||
}
|
||||
|
||||
newCommits = append(newCommits, &UserCommit{
|
||||
User: u,
|
||||
User: user,
|
||||
Commit: c,
|
||||
})
|
||||
}
|
||||
return newCommits
|
||||
return newCommits, nil
|
||||
}
|
||||
|
||||
func GetUsersByEmails(ctx context.Context, emails []string) (map[string]*User, error) {
|
||||
if len(emails) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
needCheckEmails := make(container.Set[string])
|
||||
needCheckUserNames := make(container.Set[string])
|
||||
for _, email := range emails {
|
||||
if strings.HasSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress)) {
|
||||
username := strings.TrimSuffix(email, fmt.Sprintf("@%s", setting.Service.NoReplyAddress))
|
||||
needCheckUserNames.Add(username)
|
||||
} else {
|
||||
needCheckEmails.Add(strings.ToLower(email))
|
||||
}
|
||||
}
|
||||
|
||||
emailAddresses := make([]*EmailAddress, 0, len(needCheckEmails))
|
||||
if err := db.GetEngine(ctx).In("lower_email", needCheckEmails.Values()).
|
||||
And("is_activated=?", true).
|
||||
Find(&emailAddresses); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userIDs := make(container.Set[int64])
|
||||
for _, email := range emailAddresses {
|
||||
userIDs.Add(email.UID)
|
||||
}
|
||||
users, err := GetUsersMapByIDs(ctx, userIDs.Values())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results := make(map[string]*User, len(emails))
|
||||
for _, email := range emailAddresses {
|
||||
user := users[email.UID]
|
||||
if user != nil {
|
||||
if user.KeepEmailPrivate {
|
||||
results[user.LowerName+"@"+setting.Service.NoReplyAddress] = user
|
||||
} else {
|
||||
results[email.Email] = user
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
users = make(map[int64]*User, len(needCheckUserNames))
|
||||
if err := db.GetEngine(ctx).In("lower_name", needCheckUserNames.Values()).Find(&users); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, user := range users {
|
||||
results[user.LowerName+"@"+setting.Service.NoReplyAddress] = user
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetUserByEmail returns the user object by given e-mail if exists.
|
||||
|
48
modules/actions/artifacts.go
Normal file
48
modules/actions/artifacts.go
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2025 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package actions
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
actions_model "code.gitea.io/gitea/models/actions"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
"code.gitea.io/gitea/services/context"
|
||||
)
|
||||
|
||||
// Artifacts using the v4 backend are stored as a single combined zip file per artifact on the backend
|
||||
// The v4 backend ensures ContentEncoding is set to "application/zip", which is not the case for the old backend
|
||||
func IsArtifactV4(art *actions_model.ActionArtifact) bool {
|
||||
return art.ArtifactName+".zip" == art.ArtifactPath && art.ContentEncoding == "application/zip"
|
||||
}
|
||||
|
||||
func DownloadArtifactV4ServeDirectOnly(ctx *context.Base, art *actions_model.ActionArtifact) (bool, error) {
|
||||
if setting.Actions.ArtifactStorage.ServeDirect() {
|
||||
u, err := storage.ActionsArtifacts.URL(art.StoragePath, art.ArtifactPath, nil)
|
||||
if u != nil && err == nil {
|
||||
ctx.Redirect(u.String(), http.StatusFound)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func DownloadArtifactV4Fallback(ctx *context.Base, art *actions_model.ActionArtifact) error {
|
||||
f, err := storage.ActionsArtifacts.Open(art.StoragePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
http.ServeContent(ctx.Resp, ctx.Req, art.ArtifactName+".zip", art.CreatedUnix.AsLocalTime(), f)
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownloadArtifactV4(ctx *context.Base, art *actions_model.ActionArtifact) error {
|
||||
ok, err := DownloadArtifactV4ServeDirectOnly(ctx, art)
|
||||
if ok || err != nil {
|
||||
return err
|
||||
}
|
||||
return DownloadArtifactV4Fallback(ctx, art)
|
||||
}
|
@ -70,11 +70,18 @@ func GuessCurrentHostURL(ctx context.Context) string {
|
||||
// 1. The reverse proxy is configured correctly, it passes "X-Forwarded-Proto/Host" headers. Perfect, Gitea can handle it correctly.
|
||||
// 2. The reverse proxy is not configured correctly, doesn't pass "X-Forwarded-Proto/Host" headers, eg: only one "proxy_pass http://gitea:3000" in Nginx.
|
||||
// 3. There is no reverse proxy.
|
||||
// Without an extra config option, Gitea is impossible to distinguish between case 2 and case 3,
|
||||
// then case 2 would result in wrong guess like guessed AppURL becomes "http://gitea:3000/", which is not accessible by end users.
|
||||
// So in the future maybe it should introduce a new config option, to let site admin decide how to guess the AppURL.
|
||||
// With the "USE_HOST_HEADER" config option disabled (default), Gitea is impossible to distinguish between case 2 and case 3,
|
||||
// When enabling "USE_HOST_HEADER", any reverse proxies must be configured to properly pass "X-Forwarded-Proto/Host" headers,
|
||||
// otherwise this would result in wrong guess like guessed AppURL becomes "http://gitea:3000/", which is not accessible by end users.
|
||||
reqScheme := getRequestScheme(req)
|
||||
if reqScheme == "" {
|
||||
if setting.UseHostHeader && req.Host != "" {
|
||||
if req.TLS != nil {
|
||||
return "https://" + req.Host
|
||||
}
|
||||
return "http://" + req.Host
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(setting.AppURL, setting.AppSubURL+"/")
|
||||
}
|
||||
// X-Forwarded-Host has many problems: non-standard, not well-defined (X-Forwarded-Port or not), conflicts with Host header.
|
||||
|
@ -5,6 +5,7 @@ package httplib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
@ -39,6 +40,32 @@ func TestIsRelativeURL(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGuessCurrentHostURL(t *testing.T) {
|
||||
defer test.MockVariableValue(&setting.AppURL, "http://cfg-host/sub/")()
|
||||
defer test.MockVariableValue(&setting.AppSubURL, "/sub")()
|
||||
|
||||
ctx := context.Background()
|
||||
assert.Equal(t, "http://cfg-host", GuessCurrentHostURL(ctx))
|
||||
|
||||
ctx = context.WithValue(ctx, RequestContextKey, &http.Request{
|
||||
Host: "localhost:3000",
|
||||
})
|
||||
assert.Equal(t, "http://cfg-host", GuessCurrentHostURL(ctx))
|
||||
|
||||
defer test.MockVariableValue(&setting.UseHostHeader, true)()
|
||||
|
||||
ctx = context.WithValue(ctx, RequestContextKey, &http.Request{
|
||||
Host: "localhost:3000",
|
||||
})
|
||||
assert.Equal(t, "http://localhost:3000", GuessCurrentHostURL(ctx))
|
||||
|
||||
ctx = context.WithValue(ctx, RequestContextKey, &http.Request{
|
||||
Host: "localhost",
|
||||
TLS: &tls.ConnectionState{},
|
||||
})
|
||||
assert.Equal(t, "https://localhost", GuessCurrentHostURL(ctx))
|
||||
}
|
||||
|
||||
func TestMakeAbsoluteURL(t *testing.T) {
|
||||
defer test.MockVariableValue(&setting.Protocol, "http")()
|
||||
defer test.MockVariableValue(&setting.AppURL, "http://cfg-host/sub/")()
|
||||
|
@ -260,17 +260,28 @@ func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int
|
||||
var (
|
||||
indexerQuery query.Query
|
||||
keywordQuery query.Query
|
||||
contentQuery query.Query
|
||||
)
|
||||
|
||||
pathQuery := bleve.NewPrefixQuery(strings.ToLower(opts.Keyword))
|
||||
pathQuery.FieldVal = "Filename"
|
||||
pathQuery.SetBoost(10)
|
||||
|
||||
contentQuery := bleve.NewMatchQuery(opts.Keyword)
|
||||
contentQuery.FieldVal = "Content"
|
||||
|
||||
if opts.IsKeywordFuzzy {
|
||||
contentQuery.Fuzziness = inner_bleve.GuessFuzzinessByKeyword(opts.Keyword)
|
||||
keywordAsPhrase, isPhrase := internal.ParseKeywordAsPhrase(opts.Keyword)
|
||||
if isPhrase {
|
||||
q := bleve.NewMatchPhraseQuery(keywordAsPhrase)
|
||||
q.FieldVal = "Content"
|
||||
if opts.IsKeywordFuzzy {
|
||||
q.Fuzziness = inner_bleve.GuessFuzzinessByKeyword(keywordAsPhrase)
|
||||
}
|
||||
contentQuery = q
|
||||
} else {
|
||||
q := bleve.NewMatchQuery(opts.Keyword)
|
||||
q.FieldVal = "Content"
|
||||
if opts.IsKeywordFuzzy {
|
||||
q.Fuzziness = inner_bleve.GuessFuzzinessByKeyword(opts.Keyword)
|
||||
}
|
||||
contentQuery = q
|
||||
}
|
||||
|
||||
keywordQuery = bleve.NewDisjunctionQuery(contentQuery, pathQuery)
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"code.gitea.io/gitea/modules/typesniffer"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
|
||||
"github.com/go-enry/go-enry/v2"
|
||||
"github.com/olivere/elastic/v7"
|
||||
@ -359,13 +360,19 @@ func extractAggs(searchResult *elastic.SearchResult) []*internal.SearchResultLan
|
||||
|
||||
// Search searches for codes and language stats by given conditions.
|
||||
func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int64, []*internal.SearchResult, []*internal.SearchResultLanguages, error) {
|
||||
searchType := esMultiMatchTypePhrasePrefix
|
||||
if opts.IsKeywordFuzzy {
|
||||
searchType = esMultiMatchTypeBestFields
|
||||
var contentQuery elastic.Query
|
||||
keywordAsPhrase, isPhrase := internal.ParseKeywordAsPhrase(opts.Keyword)
|
||||
if isPhrase {
|
||||
contentQuery = elastic.NewMatchPhraseQuery("content", keywordAsPhrase)
|
||||
} else {
|
||||
// TODO: this is the old logic, but not really using "fuzziness"
|
||||
// * IsKeywordFuzzy=true: "best_fields"
|
||||
// * IsKeywordFuzzy=false: "phrase_prefix"
|
||||
contentQuery = elastic.NewMultiMatchQuery("content", opts.Keyword).
|
||||
Type(util.Iif(opts.IsKeywordFuzzy, esMultiMatchTypeBestFields, esMultiMatchTypePhrasePrefix))
|
||||
}
|
||||
|
||||
kwQuery := elastic.NewBoolQuery().Should(
|
||||
elastic.NewMultiMatchQuery(opts.Keyword, "content").Type(searchType),
|
||||
contentQuery,
|
||||
elastic.NewMultiMatchQuery(opts.Keyword, "filename^10").Type(esMultiMatchTypePhrasePrefix),
|
||||
)
|
||||
query := elastic.NewBoolQuery()
|
||||
|
59
modules/indexer/code/gitgrep/gitgrep.go
Normal file
59
modules/indexer/code/gitgrep/gitgrep.go
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2025 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package gitgrep
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/modules/git"
|
||||
code_indexer "code.gitea.io/gitea/modules/indexer/code"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
)
|
||||
|
||||
func indexSettingToGitGrepPathspecList() (list []string) {
|
||||
for _, expr := range setting.Indexer.IncludePatterns {
|
||||
list = append(list, ":(glob)"+expr.PatternString())
|
||||
}
|
||||
for _, expr := range setting.Indexer.ExcludePatterns {
|
||||
list = append(list, ":(glob,exclude)"+expr.PatternString())
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func PerformSearch(ctx context.Context, page int, repoID int64, gitRepo *git.Repository, ref git.RefName, keyword string, isFuzzy bool) (searchResults []*code_indexer.Result, total int, err error) {
|
||||
// TODO: it should also respect ParseKeywordAsPhrase and clarify the "fuzzy" behavior
|
||||
res, err := git.GrepSearch(ctx, gitRepo, keyword, git.GrepOptions{
|
||||
ContextLineNumber: 1,
|
||||
IsFuzzy: isFuzzy,
|
||||
RefName: ref.String(),
|
||||
PathspecList: indexSettingToGitGrepPathspecList(),
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: if no branch exists, it reports: exit status 128, fatal: this operation must be run in a work tree.
|
||||
return nil, 0, fmt.Errorf("git.GrepSearch: %w", err)
|
||||
}
|
||||
commitID, err := gitRepo.GetRefCommitID(ref.String())
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("gitRepo.GetRefCommitID: %w", err)
|
||||
}
|
||||
|
||||
total = len(res)
|
||||
pageStart := min((page-1)*setting.UI.RepoSearchPagingNum, len(res))
|
||||
pageEnd := min(page*setting.UI.RepoSearchPagingNum, len(res))
|
||||
res = res[pageStart:pageEnd]
|
||||
for _, r := range res {
|
||||
searchResults = append(searchResults, &code_indexer.Result{
|
||||
RepoID: repoID,
|
||||
Filename: r.Filename,
|
||||
CommitID: commitID,
|
||||
// UpdatedUnix: not supported yet
|
||||
// Language: not supported yet
|
||||
// Color: not supported yet
|
||||
Lines: code_indexer.HighlightSearchResultCode(r.Filename, "", r.LineNumbers, strings.Join(r.LineCodes, "\n")),
|
||||
})
|
||||
}
|
||||
return searchResults, total, nil
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package repo
|
||||
package gitgrep
|
||||
|
||||
import (
|
||||
"testing"
|
@ -29,13 +29,11 @@ var (
|
||||
// When the real indexer is not ready, it will be a dummy indexer which will return error to explain it's not ready.
|
||||
// So it's always safe use it as *globalIndexer.Load() and call its methods.
|
||||
globalIndexer atomic.Pointer[internal.Indexer]
|
||||
dummyIndexer *internal.Indexer
|
||||
)
|
||||
|
||||
func init() {
|
||||
i := internal.NewDummyIndexer()
|
||||
dummyIndexer = &i
|
||||
globalIndexer.Store(dummyIndexer)
|
||||
dummyIndexer := internal.NewDummyIndexer()
|
||||
globalIndexer.Store(&dummyIndexer)
|
||||
}
|
||||
|
||||
func index(ctx context.Context, indexer internal.Indexer, repoID int64) error {
|
||||
|
@ -35,7 +35,7 @@ func FilenameOfIndexerID(indexerID string) string {
|
||||
return indexerID[index+1:]
|
||||
}
|
||||
|
||||
// Given the contents of file, returns the boundaries of its first seven lines.
|
||||
// FilenameMatchIndexPos returns the boundaries of its first seven lines.
|
||||
func FilenameMatchIndexPos(content string) (int, int) {
|
||||
count := 1
|
||||
for i, c := range content {
|
||||
@ -48,3 +48,11 @@ func FilenameMatchIndexPos(content string) (int, int) {
|
||||
}
|
||||
return 0, len(content)
|
||||
}
|
||||
|
||||
func ParseKeywordAsPhrase(keyword string) (string, bool) {
|
||||
if strings.HasPrefix(keyword, `"`) && strings.HasSuffix(keyword, `"`) && len(keyword) > 1 {
|
||||
// only remove the prefix and suffix quotes, no need to decode the content at the moment
|
||||
return keyword[1 : len(keyword)-1], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
30
modules/indexer/code/internal/util_test.go
Normal file
30
modules/indexer/code/internal/util_test.go
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2025 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseKeywordAsPhrase(t *testing.T) {
|
||||
cases := []struct {
|
||||
keyword string
|
||||
phrase string
|
||||
isPhrase bool
|
||||
}{
|
||||
{``, "", false},
|
||||
{`a`, "", false},
|
||||
{`"`, "", false},
|
||||
{`"a`, "", false},
|
||||
{`"a"`, "a", true},
|
||||
{`""\"""`, `"\""`, true},
|
||||
}
|
||||
for _, c := range cases {
|
||||
phrase, isPhrase := ParseKeywordAsPhrase(c.keyword)
|
||||
assert.Equal(t, c.phrase, phrase, "keyword=%q", c.keyword)
|
||||
assert.Equal(t, c.isPhrase, isPhrase, "keyword=%q", c.keyword)
|
||||
}
|
||||
}
|
@ -73,9 +73,9 @@ func ToDBOptions(ctx context.Context, options *internal.SearchOptions) (*issue_m
|
||||
UpdatedBeforeUnix: options.UpdatedBeforeUnix.Value(),
|
||||
PriorityRepoID: 0,
|
||||
IsArchived: options.IsArchived,
|
||||
Org: nil,
|
||||
Owner: nil,
|
||||
Team: nil,
|
||||
User: nil,
|
||||
Doer: nil,
|
||||
}
|
||||
|
||||
if len(options.MilestoneIDs) == 1 && options.MilestoneIDs[0] == 0 {
|
||||
|
@ -5,6 +5,7 @@ package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -175,6 +176,20 @@ func (l *LoggerImpl) IsEnabled() bool {
|
||||
return l.level.Load() < int32(FATAL) && len(l.eventWriters) > 0
|
||||
}
|
||||
|
||||
func asLogStringer(v any) LogStringer {
|
||||
if s, ok := v.(LogStringer); ok {
|
||||
return s
|
||||
} else if a := reflect.ValueOf(v); a.Kind() == reflect.Struct {
|
||||
// in case the receiver is a pointer, but the value is a struct
|
||||
vp := reflect.New(a.Type())
|
||||
vp.Elem().Set(a)
|
||||
if s, ok := vp.Interface().(LogStringer); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log prepares the log event, if the level matches, the event will be sent to the writers
|
||||
func (l *LoggerImpl) Log(skip int, level Level, format string, logArgs ...any) {
|
||||
if Level(l.level.Load()) > level {
|
||||
@ -207,11 +222,11 @@ func (l *LoggerImpl) Log(skip int, level Level, format string, logArgs ...any) {
|
||||
// handle LogStringer values
|
||||
for i, v := range msgArgs {
|
||||
if cv, ok := v.(*ColoredValue); ok {
|
||||
if s, ok := cv.v.(LogStringer); ok {
|
||||
cv.v = logStringFormatter{v: s}
|
||||
if ls := asLogStringer(cv.v); ls != nil {
|
||||
cv.v = logStringFormatter{v: ls}
|
||||
}
|
||||
} else if s, ok := v.(LogStringer); ok {
|
||||
msgArgs[i] = logStringFormatter{v: s}
|
||||
} else if ls := asLogStringer(v); ls != nil {
|
||||
msgArgs[i] = logStringFormatter{v: ls}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -116,6 +116,14 @@ func (t testLogString) LogString() string {
|
||||
return "log-string"
|
||||
}
|
||||
|
||||
type testLogStringPtrReceiver struct {
|
||||
Field string
|
||||
}
|
||||
|
||||
func (t *testLogStringPtrReceiver) LogString() string {
|
||||
return "log-string-ptr-receiver"
|
||||
}
|
||||
|
||||
func TestLoggerLogString(t *testing.T) {
|
||||
logger := NewLoggerWithWriters(context.Background(), "test")
|
||||
|
||||
@ -124,9 +132,13 @@ func TestLoggerLogString(t *testing.T) {
|
||||
logger.AddWriters(w1)
|
||||
|
||||
logger.Info("%s %s %#v %v", testLogString{}, &testLogString{}, testLogString{Field: "detail"}, NewColoredValue(testLogString{}, FgRed))
|
||||
logger.Info("%s %s %#v %v", testLogStringPtrReceiver{}, &testLogStringPtrReceiver{}, testLogStringPtrReceiver{Field: "detail"}, NewColoredValue(testLogStringPtrReceiver{}, FgRed))
|
||||
logger.Close()
|
||||
|
||||
assert.Equal(t, []string{"log-string log-string log.testLogString{Field:\"detail\"} \x1b[31mlog-string\x1b[0m\n"}, w1.GetLogs())
|
||||
assert.Equal(t, []string{
|
||||
"log-string log-string log.testLogString{Field:\"detail\"} \x1b[31mlog-string\x1b[0m\n",
|
||||
"log-string-ptr-receiver log-string-ptr-receiver &log.testLogStringPtrReceiver{Field:\"detail\"} \x1b[31mlog-string-ptr-receiver\x1b[0m\n",
|
||||
}, w1.GetLogs())
|
||||
}
|
||||
|
||||
func TestLoggerExpressionFilter(t *testing.T) {
|
||||
|
@ -50,6 +50,10 @@ var (
|
||||
AppSubURL string
|
||||
// UseSubURLPath makes Gitea handle requests with sub-path like "/sub-path/owner/repo/...", to make it easier to debug sub-path related problems without a reverse proxy.
|
||||
UseSubURLPath bool
|
||||
// UseHostHeader makes Gitea always use the "Host" request header for construction of absolute URLs.
|
||||
// This requires any reverse proxy to properly pass headers like "X-Forwarded-Proto" and "Host".
|
||||
// It maps to ini:"USE_HOST_HEADER" in [server] and defaults to false
|
||||
UseHostHeader bool
|
||||
// AppDataPath is the default path for storing data.
|
||||
// It maps to ini:"APP_DATA_PATH" in [server] and defaults to AppWorkPath + "/data"
|
||||
AppDataPath string
|
||||
@ -277,6 +281,7 @@ func loadServerFrom(rootCfg ConfigProvider) {
|
||||
// This value is empty if site does not have sub-url.
|
||||
AppSubURL = strings.TrimSuffix(appURL.Path, "/")
|
||||
UseSubURLPath = sec.Key("USE_SUB_URL_PATH").MustBool(false)
|
||||
UseHostHeader = sec.Key("USE_HOST_HEADER").MustBool(false)
|
||||
StaticURLPrefix = strings.TrimSuffix(sec.Key("STATIC_URL_PREFIX").MustString(AppSubURL), "/")
|
||||
|
||||
// Check if Domain differs from AppURL domain than update it to AppURL's domain
|
||||
|
@ -32,3 +32,67 @@ type ActionTaskResponse struct {
|
||||
Entries []*ActionTask `json:"workflow_runs"`
|
||||
TotalCount int64 `json:"total_count"`
|
||||
}
|
||||
|
||||
// CreateActionWorkflowDispatch represents the payload for triggering a workflow dispatch event
|
||||
// swagger:model
|
||||
type CreateActionWorkflowDispatch struct {
|
||||
// required: true
|
||||
// example: refs/heads/main
|
||||
Ref string `json:"ref" binding:"Required"`
|
||||
// required: false
|
||||
Inputs map[string]string `json:"inputs,omitempty"`
|
||||
}
|
||||
|
||||
// ActionWorkflow represents a ActionWorkflow
|
||||
type ActionWorkflow struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
// swagger:strfmt date-time
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
// swagger:strfmt date-time
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
URL string `json:"url"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
BadgeURL string `json:"badge_url"`
|
||||
// swagger:strfmt date-time
|
||||
DeletedAt time.Time `json:"deleted_at,omitempty"`
|
||||
}
|
||||
|
||||
// ActionWorkflowResponse returns a ActionWorkflow
|
||||
type ActionWorkflowResponse struct {
|
||||
Workflows []*ActionWorkflow `json:"workflows"`
|
||||
TotalCount int64 `json:"total_count"`
|
||||
}
|
||||
|
||||
// ActionArtifact represents a ActionArtifact
|
||||
type ActionArtifact struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
SizeInBytes int64 `json:"size_in_bytes"`
|
||||
URL string `json:"url"`
|
||||
ArchiveDownloadURL string `json:"archive_download_url"`
|
||||
Expired bool `json:"expired"`
|
||||
WorkflowRun *ActionWorkflowRun `json:"workflow_run"`
|
||||
|
||||
// swagger:strfmt date-time
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
// swagger:strfmt date-time
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
// swagger:strfmt date-time
|
||||
ExpiresAt time.Time `json:"expires_at"`
|
||||
}
|
||||
|
||||
// ActionWorkflowRun represents a WorkflowRun
|
||||
type ActionWorkflowRun struct {
|
||||
ID int64 `json:"id"`
|
||||
RepositoryID int64 `json:"repository_id"`
|
||||
HeadSha string `json:"head_sha"`
|
||||
}
|
||||
|
||||
// ActionArtifactsResponse returns ActionArtifacts
|
||||
type ActionArtifactsResponse struct {
|
||||
Entries []*ActionArtifact `json:"artifacts"`
|
||||
TotalCount int64 `json:"total_count"`
|
||||
}
|
||||
|
@ -10,13 +10,16 @@ import (
|
||||
|
||||
// Common Errors forming the base of our error system
|
||||
//
|
||||
// Many Errors returned by Gitea can be tested against these errors
|
||||
// using errors.Is.
|
||||
// Many Errors returned by Gitea can be tested against these errors using "errors.Is".
|
||||
var (
|
||||
ErrInvalidArgument = errors.New("invalid argument")
|
||||
ErrPermissionDenied = errors.New("permission denied")
|
||||
ErrAlreadyExist = errors.New("resource already exists")
|
||||
ErrNotExist = errors.New("resource does not exist")
|
||||
ErrInvalidArgument = errors.New("invalid argument") // also implies HTTP 400
|
||||
ErrPermissionDenied = errors.New("permission denied") // also implies HTTP 403
|
||||
ErrNotExist = errors.New("resource does not exist") // also implies HTTP 404
|
||||
ErrAlreadyExist = errors.New("resource already exists") // also implies HTTP 409
|
||||
|
||||
// ErrUnprocessableContent implies HTTP 422, syntax of the request content was correct,
|
||||
// but server was unable to process the contained instructions
|
||||
ErrUnprocessableContent = errors.New("unprocessable content")
|
||||
)
|
||||
|
||||
// SilentWrap provides a simple wrapper for a wrapped error where the wrapped error message plays no part in the error message
|
||||
@ -36,6 +39,22 @@ func (w SilentWrap) Unwrap() error {
|
||||
return w.Err
|
||||
}
|
||||
|
||||
type LocaleWrap struct {
|
||||
err error
|
||||
TrKey string
|
||||
TrArgs []any
|
||||
}
|
||||
|
||||
// Error returns the message
|
||||
func (w LocaleWrap) Error() string {
|
||||
return w.err.Error()
|
||||
}
|
||||
|
||||
// Unwrap returns the underlying error
|
||||
func (w LocaleWrap) Unwrap() error {
|
||||
return w.err
|
||||
}
|
||||
|
||||
// NewSilentWrapErrorf returns an error that formats as the given text but unwraps as the provided error
|
||||
func NewSilentWrapErrorf(unwrap error, message string, args ...any) error {
|
||||
if len(args) == 0 {
|
||||
@ -63,3 +82,16 @@ func NewAlreadyExistErrorf(message string, args ...any) error {
|
||||
func NewNotExistErrorf(message string, args ...any) error {
|
||||
return NewSilentWrapErrorf(ErrNotExist, message, args...)
|
||||
}
|
||||
|
||||
// ErrWrapLocale wraps an err with a translation key and arguments
|
||||
func ErrWrapLocale(err error, trKey string, trArgs ...any) error {
|
||||
return LocaleWrap{err: err, TrKey: trKey, TrArgs: trArgs}
|
||||
}
|
||||
|
||||
func ErrAsLocale(err error) *LocaleWrap {
|
||||
var e LocaleWrap
|
||||
if errors.As(err, &e) {
|
||||
return &e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -71,3 +71,10 @@ func KeysOfMap[K comparable, V any](m map[K]V) []K {
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func SliceNilAsEmpty[T any](a []T) []T {
|
||||
if a == nil {
|
||||
return []T{}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
@ -385,6 +385,7 @@ show_only_public=Zobrazeny pouze veřejné
|
||||
|
||||
issues.in_your_repos=Ve vašich repozitářích
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repozitáře
|
||||
users=Uživatelé
|
||||
|
@ -384,6 +384,7 @@ show_only_public=Nur öffentliche anzeigen
|
||||
|
||||
issues.in_your_repos=Eigene Repositories
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repositories
|
||||
users=Benutzer
|
||||
|
@ -335,6 +335,7 @@ show_only_public=Εμφανίζονται μόνο δημόσια
|
||||
|
||||
issues.in_your_repos=Στα αποθετήρια σας
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Αποθετήρια
|
||||
users=Χρήστες
|
||||
|
@ -385,6 +385,13 @@ show_only_public = Showing only public
|
||||
|
||||
issues.in_your_repos = In your repositories
|
||||
|
||||
guide_title = No Activity
|
||||
guide_desc = You are currently not following any repositories or users, so there is no content to display. You can explore repositories or users of interest from the links below.
|
||||
explore_repos = Explore repositories
|
||||
explore_users = Explore users
|
||||
empty_org = There are no organizations yet.
|
||||
empty_repo = There are no repositories yet.
|
||||
|
||||
[explore]
|
||||
repos = Repositories
|
||||
users = Users
|
||||
@ -1695,7 +1702,9 @@ issues.time_estimate_invalid = Time estimate format is invalid
|
||||
issues.start_tracking_history = started working %s
|
||||
issues.tracker_auto_close = Timer will be stopped automatically when this issue gets closed
|
||||
issues.tracking_already_started = `You have already started time tracking on <a href="%s">another issue</a>!`
|
||||
issues.stop_tracking = Stop Timer
|
||||
issues.stop_tracking_history = worked for <b>%[1]s</b> %[2]s
|
||||
issues.cancel_tracking = Discard
|
||||
issues.cancel_tracking_history = `canceled time tracking %s`
|
||||
issues.del_time = Delete this time log
|
||||
issues.add_time_history = added spent time <b>%[1]s</b> %[2]s
|
||||
|
@ -333,6 +333,7 @@ show_only_public=Mostrar sólo repositorios públicos
|
||||
|
||||
issues.in_your_repos=En tus repositorios
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repositorios
|
||||
users=Usuarios
|
||||
|
@ -256,6 +256,7 @@ show_only_public=نمایش دادن موارد عمومی
|
||||
|
||||
issues.in_your_repos=در مخازن شما
|
||||
|
||||
|
||||
[explore]
|
||||
repos=مخازن
|
||||
users=کاربران
|
||||
|
@ -266,6 +266,7 @@ show_only_public=Näytetään vain julkiset
|
||||
|
||||
issues.in_your_repos=Repoissasi
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repot
|
||||
users=Käyttäjät
|
||||
|
@ -385,6 +385,13 @@ show_only_public=Afficher uniquement les dépôts publics
|
||||
|
||||
issues.in_your_repos=Dans vos dépôts
|
||||
|
||||
guide_title=Aucune activité
|
||||
guide_desc=Vous n’êtes actuellement abonné à aucun dépôt ou utilisateur et il n’y a donc aucun contenu à afficher. Les liens ci-dessous vous permettront d’explorer des dépôts ou des utilisateurs susceptibles de vous intéresser.
|
||||
explore_repos=Explorer des dépôts
|
||||
explore_users=Explorer des utilisateurs
|
||||
empty_org=Il n’y a pas encore d’organisations.
|
||||
empty_repo=Il n’y a pas encore de dépôts.
|
||||
|
||||
[explore]
|
||||
repos=Dépôts
|
||||
users=Utilisateurs
|
||||
@ -2330,6 +2337,8 @@ settings.event_fork=Bifurcation
|
||||
settings.event_fork_desc=Dépôt bifurqué.
|
||||
settings.event_wiki=Wiki
|
||||
settings.event_wiki_desc=Page wiki créée, renommée, modifiée ou supprimée.
|
||||
settings.event_statuses=Statuts
|
||||
settings.event_statuses_desc=Statut de validation mis à jour depuis l’API.
|
||||
settings.event_release=Publication
|
||||
settings.event_release_desc=Publication publiée, mise à jour ou supprimée.
|
||||
settings.event_push=Soumission
|
||||
@ -2877,6 +2886,14 @@ view_as_role=Voir en tant que %s
|
||||
view_as_public_hint=Vous visualisez le README en tant qu’utilisateur public.
|
||||
view_as_member_hint=Vous visualisez le README en tant que membre de cette organisation.
|
||||
|
||||
worktime=Temps de travail
|
||||
worktime.date_range_start=Date de début
|
||||
worktime.date_range_end=Date de fin
|
||||
worktime.query=Demande
|
||||
worktime.time=Durée
|
||||
worktime.by_repositories=Par dépôts
|
||||
worktime.by_milestones=Par jalons
|
||||
worktime.by_members=Par membres
|
||||
|
||||
[admin]
|
||||
maintenance=Maintenance
|
||||
|
@ -385,6 +385,13 @@ show_only_public=Ag taispeáint poiblí amháin
|
||||
|
||||
issues.in_your_repos=I do stórais
|
||||
|
||||
guide_title=Gan Ghníomhaíocht
|
||||
guide_desc=Níl aon stórtha nó úsáideoirí á leanúint agat faoi láthair, mar sin níl aon ábhar le taispeáint. Is féidir leat stórtha nó úsáideoirí spéise a iniúchadh ó na naisc thíos.
|
||||
explore_repos=Déan stórtha a iniúchadh
|
||||
explore_users=Déan iniúchadh ar úsáideoirí
|
||||
empty_org=Níl aon eagraíochtaí ann fós.
|
||||
empty_repo=Níl aon stórtha ann fós.
|
||||
|
||||
[explore]
|
||||
repos=Stórais
|
||||
users=Úsáideoirí
|
||||
|
@ -225,6 +225,7 @@ show_only_public=Csak publikus mutatása
|
||||
|
||||
issues.in_your_repos=A tárolóidban
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Tárolók
|
||||
users=Felhasználók
|
||||
|
@ -243,6 +243,7 @@ show_private=Pribadi
|
||||
|
||||
issues.in_your_repos=Dalam repositori anda
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repositori
|
||||
users=Pengguna
|
||||
|
@ -240,6 +240,7 @@ show_only_public=Að sýna aðeins opinber
|
||||
|
||||
issues.in_your_repos=Í hugbúnaðarsöfnum þínum
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Hugbúnaðarsöfn
|
||||
users=Notendur
|
||||
|
@ -277,6 +277,7 @@ show_only_public=Mostrando solo pubblici
|
||||
|
||||
issues.in_your_repos=Nei tuoi repository
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repository
|
||||
users=Utenti
|
||||
|
@ -385,6 +385,7 @@ show_only_public=公開のみ表示
|
||||
|
||||
issues.in_your_repos=あなたのリポジトリ
|
||||
|
||||
|
||||
[explore]
|
||||
repos=リポジトリ
|
||||
users=ユーザー
|
||||
|
@ -212,6 +212,7 @@ show_private=비공개
|
||||
|
||||
issues.in_your_repos=당신의 저장소에
|
||||
|
||||
|
||||
[explore]
|
||||
repos=저장소
|
||||
users=유저
|
||||
|
@ -338,6 +338,7 @@ show_only_public=Attēlot tikai publiskos
|
||||
|
||||
issues.in_your_repos=Jūsu repozitorijos
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repozitoriji
|
||||
users=Lietotāji
|
||||
|
@ -276,6 +276,7 @@ show_only_public=Toon alleen opbenbaar
|
||||
|
||||
issues.in_your_repos=In uw repositories
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repositories
|
||||
users=Gebruikers
|
||||
|
@ -272,6 +272,7 @@ show_only_public=Wyświetlanie tylko publicznych
|
||||
|
||||
issues.in_your_repos=W Twoich repozytoriach
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repozytoria
|
||||
users=Użytkownicy
|
||||
|
@ -335,6 +335,7 @@ show_only_public=Mostrando somente públicos
|
||||
|
||||
issues.in_your_repos=Em seus repositórios
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repositórios
|
||||
users=Usuários
|
||||
|
@ -385,6 +385,13 @@ show_only_public=Apresentando somente os públicos
|
||||
|
||||
issues.in_your_repos=Nos seus repositórios
|
||||
|
||||
guide_title=Sem trabalho
|
||||
guide_desc=Neste momento não está a seguir repositórios nem utilizadores, por isso não há conteúdo a apresentar. Pode explorar repositórios ou utilizadores de interesse a partir das ligações abaixo.
|
||||
explore_repos=Explorar repositórios
|
||||
explore_users=Explorar utilizadores
|
||||
empty_org=Ainda não há organizações.
|
||||
empty_repo=Ainda não há repositórios.
|
||||
|
||||
[explore]
|
||||
repos=Repositórios
|
||||
users=Utilizadores
|
||||
|
@ -333,6 +333,7 @@ show_only_public=Показаны только публичные
|
||||
|
||||
issues.in_your_repos=В ваших репозиториях
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Репозитории
|
||||
users=Пользователи
|
||||
|
@ -246,6 +246,7 @@ show_only_public=ප්රසිද්ධ පමණක් පෙන්වය
|
||||
|
||||
issues.in_your_repos=ඔබගේ කෝෂ්ඨවල
|
||||
|
||||
|
||||
[explore]
|
||||
repos=කෝෂ්ඨ
|
||||
users=පරිශීලකයින්
|
||||
|
@ -328,6 +328,7 @@ show_only_public=Zobrazuje sa iba verejné
|
||||
|
||||
issues.in_your_repos=Vo vašich repozitároch
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Repozitáre
|
||||
users=Používatelia
|
||||
|
@ -233,6 +233,7 @@ show_only_public=Visar endast publika
|
||||
|
||||
issues.in_your_repos=I dina utvecklingskataloger
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Utvecklingskataloger
|
||||
users=Användare
|
||||
|
@ -380,6 +380,7 @@ show_only_public=Yalnızca açık olanlar gösteriliyor
|
||||
|
||||
issues.in_your_repos=Depolarınızda
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Depolar
|
||||
users=Kullanıcılar
|
||||
|
@ -260,6 +260,7 @@ show_only_public=Показано тільки публічні
|
||||
|
||||
issues.in_your_repos=В ваших репозиторіях
|
||||
|
||||
|
||||
[explore]
|
||||
repos=Репозиторії
|
||||
users=Користувачі
|
||||
|
@ -384,6 +384,7 @@ show_only_public=只显示公开的
|
||||
|
||||
issues.in_your_repos=在您的仓库中
|
||||
|
||||
|
||||
[explore]
|
||||
repos=仓库
|
||||
users=用户
|
||||
|
@ -118,6 +118,7 @@ show_private=私有庫
|
||||
|
||||
issues.in_your_repos=屬於該用戶儲存庫的
|
||||
|
||||
|
||||
[explore]
|
||||
repos=儲存庫
|
||||
users=使用者
|
||||
|
@ -383,6 +383,7 @@ show_only_public=只顯示公開
|
||||
|
||||
issues.in_your_repos=在您的儲存庫中
|
||||
|
||||
|
||||
[explore]
|
||||
repos=儲存庫
|
||||
users=使用者
|
||||
|
4278
package-lock.json
generated
4278
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
55
package.json
55
package.json
@ -4,29 +4,29 @@
|
||||
"node": ">= 18.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@citation-js/core": "0.7.14",
|
||||
"@citation-js/plugin-bibtex": "0.7.17",
|
||||
"@citation-js/plugin-csl": "0.7.14",
|
||||
"@citation-js/core": "0.7.18",
|
||||
"@citation-js/plugin-bibtex": "0.7.18",
|
||||
"@citation-js/plugin-csl": "0.7.18",
|
||||
"@citation-js/plugin-software-formats": "0.6.1",
|
||||
"@github/markdown-toolbar-element": "2.2.3",
|
||||
"@github/relative-time-element": "4.4.5",
|
||||
"@github/text-expander-element": "2.9.1",
|
||||
"@mcaptcha/vanilla-glue": "0.1.0-alpha-3",
|
||||
"@primer/octicons": "19.14.0",
|
||||
"@primer/octicons": "19.15.0",
|
||||
"@silverwind/vue3-calendar-heatmap": "2.0.6",
|
||||
"add-asset-webpack-plugin": "3.0.0",
|
||||
"ansi_up": "6.0.2",
|
||||
"asciinema-player": "3.8.2",
|
||||
"asciinema-player": "3.9.0",
|
||||
"chart.js": "4.4.7",
|
||||
"chartjs-adapter-dayjs-4": "1.0.4",
|
||||
"chartjs-plugin-zoom": "2.2.0",
|
||||
"clippie": "4.1.4",
|
||||
"clippie": "4.1.5",
|
||||
"cropperjs": "1.6.2",
|
||||
"css-loader": "7.1.2",
|
||||
"dayjs": "1.11.13",
|
||||
"dropzone": "6.0.0-beta.2",
|
||||
"easymde": "2.18.0",
|
||||
"esbuild-loader": "4.2.2",
|
||||
"esbuild-loader": "4.3.0",
|
||||
"escape-goat": "4.0.0",
|
||||
"fast-glob": "3.3.3",
|
||||
"htmx.org": "2.0.4",
|
||||
@ -39,13 +39,13 @@
|
||||
"minimatch": "10.0.1",
|
||||
"monaco-editor": "0.52.2",
|
||||
"monaco-editor-webpack-plugin": "7.1.0",
|
||||
"pdfobject": "2.3.0",
|
||||
"pdfobject": "2.3.1",
|
||||
"perfect-debounce": "1.0.0",
|
||||
"postcss": "8.5.1",
|
||||
"postcss": "8.5.2",
|
||||
"postcss-loader": "8.1.1",
|
||||
"postcss-nesting": "13.0.1",
|
||||
"sortablejs": "1.15.6",
|
||||
"swagger-ui-dist": "5.18.2",
|
||||
"swagger-ui-dist": "5.18.3",
|
||||
"tailwindcss": "3.4.17",
|
||||
"throttle-debounce": "5.0.2",
|
||||
"tinycolor2": "1.6.0",
|
||||
@ -59,7 +59,7 @@
|
||||
"vue-bar-graph": "2.2.0",
|
||||
"vue-chartjs": "5.3.2",
|
||||
"vue-loader": "17.4.2",
|
||||
"webpack": "5.97.1",
|
||||
"webpack": "5.98.0",
|
||||
"webpack-cli": "6.0.1",
|
||||
"wrap-ansi": "9.0.0"
|
||||
},
|
||||
@ -67,8 +67,8 @@
|
||||
"@eslint-community/eslint-plugin-eslint-comments": "4.4.1",
|
||||
"@playwright/test": "1.49.1",
|
||||
"@stoplight/spectral-cli": "6.14.2",
|
||||
"@stylistic/eslint-plugin-js": "2.13.0",
|
||||
"@stylistic/stylelint-plugin": "3.1.1",
|
||||
"@stylistic/eslint-plugin-js": "3.1.0",
|
||||
"@stylistic/stylelint-plugin": "3.1.2",
|
||||
"@types/dropzone": "5.7.9",
|
||||
"@types/jquery": "3.5.32",
|
||||
"@types/katex": "0.16.7",
|
||||
@ -79,41 +79,40 @@
|
||||
"@types/throttle-debounce": "5.0.2",
|
||||
"@types/tinycolor2": "1.4.6",
|
||||
"@types/toastify-js": "1.12.3",
|
||||
"@typescript-eslint/eslint-plugin": "8.21.0",
|
||||
"@typescript-eslint/parser": "8.21.0",
|
||||
"@typescript-eslint/eslint-plugin": "8.24.0",
|
||||
"@typescript-eslint/parser": "8.24.0",
|
||||
"@vitejs/plugin-vue": "5.2.1",
|
||||
"@vitest/eslint-plugin": "1.1.31",
|
||||
"eslint": "8.57.0",
|
||||
"eslint-import-resolver-typescript": "3.7.0",
|
||||
"eslint-import-resolver-typescript": "3.8.0",
|
||||
"eslint-plugin-array-func": "4.0.0",
|
||||
"eslint-plugin-github": "5.0.2",
|
||||
"eslint-plugin-import-x": "4.6.1",
|
||||
"eslint-plugin-no-jquery": "3.1.0",
|
||||
"eslint-plugin-no-use-extend-native": "0.5.0",
|
||||
"eslint-plugin-playwright": "2.1.0",
|
||||
"eslint-plugin-playwright": "2.2.0",
|
||||
"eslint-plugin-regexp": "2.7.0",
|
||||
"eslint-plugin-sonarjs": "3.0.1",
|
||||
"eslint-plugin-sonarjs": "3.0.2",
|
||||
"eslint-plugin-unicorn": "56.0.1",
|
||||
"eslint-plugin-vitest": "0.4.1",
|
||||
"eslint-plugin-vitest-globals": "1.5.0",
|
||||
"eslint-plugin-vue": "9.32.0",
|
||||
"eslint-plugin-vue-scoped-css": "2.9.0",
|
||||
"eslint-plugin-wc": "2.2.0",
|
||||
"happy-dom": "16.7.2",
|
||||
"markdownlint-cli": "0.43.0",
|
||||
"happy-dom": "17.1.0",
|
||||
"markdownlint-cli": "0.44.0",
|
||||
"nolyfill": "1.0.43",
|
||||
"postcss-html": "1.8.0",
|
||||
"stylelint": "16.14.1",
|
||||
"stylelint-config-recommended": "15.0.0",
|
||||
"stylelint-declaration-block-no-ignored-properties": "2.8.0",
|
||||
"stylelint-declaration-strict-value": "1.10.7",
|
||||
"stylelint-define-config": "16.14.0",
|
||||
"stylelint-define-config": "16.14.1",
|
||||
"stylelint-value-no-unknown-custom-properties": "6.0.1",
|
||||
"svgo": "3.3.2",
|
||||
"type-fest": "4.33.0",
|
||||
"updates": "16.4.1",
|
||||
"vite-string-plugin": "1.4.3",
|
||||
"vitest": "3.0.3",
|
||||
"vue-tsc": "2.2.0"
|
||||
"type-fest": "4.34.1",
|
||||
"updates": "16.4.2",
|
||||
"vite-string-plugin": "1.4.4",
|
||||
"vitest": "3.0.5",
|
||||
"vue-tsc": "2.2.2"
|
||||
},
|
||||
"browserslist": [
|
||||
"defaults"
|
||||
|
10
poetry.lock
generated
10
poetry.lock
generated
@ -29,13 +29,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "cssbeautifier"
|
||||
version = "1.15.1"
|
||||
version = "1.15.3"
|
||||
description = "CSS unobfuscator and beautifier."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "cssbeautifier-1.15.1.tar.gz", hash = "sha256:9f7064362aedd559c55eeecf6b6bed65e05f33488dcbe39044f0403c26e1c006"},
|
||||
{file = "cssbeautifier-1.15.3-py3-none-any.whl", hash = "sha256:0dcaf5ce197743a79b3a160b84ea58fcbd9e3e767c96df1171e428125b16d410"},
|
||||
{file = "cssbeautifier-1.15.3.tar.gz", hash = "sha256:406b04d09e7d62c0be084fbfa2cba5126fe37359ea0d8d9f7b963a6354fc8303"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@ -102,13 +103,14 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "jsbeautifier"
|
||||
version = "1.15.1"
|
||||
version = "1.15.3"
|
||||
description = "JavaScript unobfuscator and beautifier."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "jsbeautifier-1.15.1.tar.gz", hash = "sha256:ebd733b560704c602d744eafc839db60a1ee9326e30a2a80c4adb8718adc1b24"},
|
||||
{file = "jsbeautifier-1.15.3-py3-none-any.whl", hash = "sha256:b207a15ab7529eee4a35ae7790e9ec4e32a2b5026d51e2d0386c3a65e6ecfc91"},
|
||||
{file = "jsbeautifier-1.15.3.tar.gz", hash = "sha256:5f1baf3d4ca6a615bb5417ee861b34b77609eeb12875555f8bbfabd9bf2f3457"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
1
public/assets/img/svg/octicon-square-circle.svg
generated
Normal file
1
public/assets/img/svg/octicon-square-circle.svg
generated
Normal file
@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" class="svg octicon-square-circle" width="16" height="16" aria-hidden="true"><path d="M8 16A8 8 0 1 1 8 0a8 8 0 0 1 0 16m0-1.5a6.5 6.5 0 1 0 0-13 6.5 6.5 0 0 0 0 13"/><path d="M5 5.75A.75.75 0 0 1 5.75 5h4.5a.75.75 0 0 1 .75.75v4.5a.75.75 0 0 1-.75.75h-4.5a.75.75 0 0 1-.75-.75Z"/></svg>
|
After Width: | Height: | Size: 346 B |
@ -135,7 +135,7 @@ func ArtifactContexter() func(next http.Handler) http.Handler {
|
||||
// we should verify the ACTIONS_RUNTIME_TOKEN
|
||||
authHeader := req.Header.Get("Authorization")
|
||||
if len(authHeader) == 0 || !strings.HasPrefix(authHeader, "Bearer ") {
|
||||
ctx.Error(http.StatusUnauthorized, "Bad authorization header")
|
||||
ctx.HTTPError(http.StatusUnauthorized, "Bad authorization header")
|
||||
return
|
||||
}
|
||||
|
||||
@ -147,12 +147,12 @@ func ArtifactContexter() func(next http.Handler) http.Handler {
|
||||
task, err = actions.GetTaskByID(req.Context(), tID)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task by ID: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task by ID")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task by ID")
|
||||
return
|
||||
}
|
||||
if task.Status != actions.StatusRunning {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@ -162,14 +162,14 @@ func ArtifactContexter() func(next http.Handler) http.Handler {
|
||||
task, err = actions.GetRunningTaskByToken(req.Context(), authToken)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := task.LoadJob(req.Context()); err != nil {
|
||||
log.Error("Error runner api getting job: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting job")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting job")
|
||||
return
|
||||
}
|
||||
|
||||
@ -211,7 +211,7 @@ func (ar artifactRoutes) getUploadArtifactURL(ctx *ArtifactContext) {
|
||||
var req getUploadArtifactRequest
|
||||
if err := json.NewDecoder(ctx.Req.Body).Decode(&req); err != nil {
|
||||
log.Error("Error decode request body: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error decode request body")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error decode request body")
|
||||
return
|
||||
}
|
||||
|
||||
@ -250,7 +250,7 @@ func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
|
||||
expiredDays, err = strconv.ParseInt(queryRetentionDays, 10, 64)
|
||||
if err != nil {
|
||||
log.Error("Error parse retention days: %v", err)
|
||||
ctx.Error(http.StatusBadRequest, "Error parse retention days")
|
||||
ctx.HTTPError(http.StatusBadRequest, "Error parse retention days")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -261,7 +261,7 @@ func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
|
||||
artifact, err := actions.CreateArtifact(ctx, task, artifactName, artifactPath, expiredDays)
|
||||
if err != nil {
|
||||
log.Error("Error create or get artifact: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error create or get artifact")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error create or get artifact")
|
||||
return
|
||||
}
|
||||
|
||||
@ -271,7 +271,7 @@ func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
|
||||
chunksTotalSize, err := saveUploadChunk(ar.fs, ctx, artifact, contentLength, runID)
|
||||
if err != nil {
|
||||
log.Error("Error save upload chunk: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error save upload chunk")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error save upload chunk")
|
||||
return
|
||||
}
|
||||
|
||||
@ -285,7 +285,7 @@ func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) {
|
||||
artifact.ContentEncoding = ctx.Req.Header.Get("Content-Encoding")
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
log.Error("Error update artifact: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error update artifact")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error update artifact")
|
||||
return
|
||||
}
|
||||
log.Debug("[artifact] update artifact size, artifact_id: %d, size: %d, compressed size: %d",
|
||||
@ -307,12 +307,12 @@ func (ar artifactRoutes) comfirmUploadArtifact(ctx *ArtifactContext) {
|
||||
artifactName := ctx.Req.URL.Query().Get("artifactName")
|
||||
if artifactName == "" {
|
||||
log.Error("Error artifact name is empty")
|
||||
ctx.Error(http.StatusBadRequest, "Error artifact name is empty")
|
||||
ctx.HTTPError(http.StatusBadRequest, "Error artifact name is empty")
|
||||
return
|
||||
}
|
||||
if err := mergeChunksForRun(ctx, ar.fs, runID, artifactName); err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
ctx.JSON(http.StatusOK, map[string]string{
|
||||
@ -340,12 +340,12 @@ func (ar artifactRoutes) listArtifacts(ctx *ArtifactContext) {
|
||||
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{RunID: runID})
|
||||
if err != nil {
|
||||
log.Error("Error getting artifacts: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if len(artifacts) == 0 {
|
||||
log.Debug("[artifact] handleListArtifacts, no artifacts")
|
||||
ctx.Error(http.StatusNotFound)
|
||||
ctx.HTTPError(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
@ -405,18 +405,18 @@ func (ar artifactRoutes) getDownloadArtifactURL(ctx *ArtifactContext) {
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("Error getting artifacts: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if len(artifacts) == 0 {
|
||||
log.Debug("[artifact] getDownloadArtifactURL, no artifacts")
|
||||
ctx.Error(http.StatusNotFound)
|
||||
ctx.HTTPError(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if itemPath != artifacts[0].ArtifactName {
|
||||
log.Error("Error dismatch artifact name, itemPath: %v, artifact: %v", itemPath, artifacts[0].ArtifactName)
|
||||
ctx.Error(http.StatusBadRequest, "Error dismatch artifact name")
|
||||
ctx.HTTPError(http.StatusBadRequest, "Error dismatch artifact name")
|
||||
return
|
||||
}
|
||||
|
||||
@ -460,24 +460,24 @@ func (ar artifactRoutes) downloadArtifact(ctx *ArtifactContext) {
|
||||
artifact, exist, err := db.GetByID[actions.ActionArtifact](ctx, artifactID)
|
||||
if err != nil {
|
||||
log.Error("Error getting artifact: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if !exist {
|
||||
log.Error("artifact with ID %d does not exist", artifactID)
|
||||
ctx.Error(http.StatusNotFound, fmt.Sprintf("artifact with ID %d does not exist", artifactID))
|
||||
ctx.HTTPError(http.StatusNotFound, fmt.Sprintf("artifact with ID %d does not exist", artifactID))
|
||||
return
|
||||
}
|
||||
if artifact.RunID != runID {
|
||||
log.Error("Error mismatch runID and artifactID, task: %v, artifact: %v", runID, artifactID)
|
||||
ctx.Error(http.StatusBadRequest)
|
||||
ctx.HTTPError(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
fd, err := ar.fs.Open(artifact.StoragePath)
|
||||
if err != nil {
|
||||
log.Error("Error opening file: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
defer fd.Close()
|
||||
|
@ -292,7 +292,7 @@ func mergeChunksForArtifact(ctx *ArtifactContext, chunks []*chunkFileItem, st st
|
||||
}
|
||||
|
||||
artifact.StoragePath = storagePath
|
||||
artifact.Status = int64(actions.ArtifactStatusUploadConfirmed)
|
||||
artifact.Status = actions.ArtifactStatusUploadConfirmed
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
return fmt.Errorf("update artifact error: %v", err)
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ var invalidArtifactNameChars = strings.Join([]string{"\\", "/", "\"", ":", "<",
|
||||
func validateArtifactName(ctx *ArtifactContext, artifactName string) bool {
|
||||
if strings.ContainsAny(artifactName, invalidArtifactNameChars) {
|
||||
log.Error("Error checking artifact name contains invalid character")
|
||||
ctx.Error(http.StatusBadRequest, "Error checking artifact name contains invalid character")
|
||||
ctx.HTTPError(http.StatusBadRequest, "Error checking artifact name contains invalid character")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@ -37,7 +37,7 @@ func validateRunID(ctx *ArtifactContext) (*actions.ActionTask, int64, bool) {
|
||||
runID := ctx.PathParamInt64("run_id")
|
||||
if task.Job.RunID != runID {
|
||||
log.Error("Error runID not match")
|
||||
ctx.Error(http.StatusBadRequest, "run-id does not match")
|
||||
ctx.HTTPError(http.StatusBadRequest, "run-id does not match")
|
||||
return nil, 0, false
|
||||
}
|
||||
return task, runID, true
|
||||
@ -48,7 +48,7 @@ func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (*actions.ActionTask
|
||||
runID, err := strconv.ParseInt(rawRunID, 10, 64)
|
||||
if err != nil || task.Job.RunID != runID {
|
||||
log.Error("Error runID not match")
|
||||
ctx.Error(http.StatusBadRequest, "run-id does not match")
|
||||
ctx.HTTPError(http.StatusBadRequest, "run-id does not match")
|
||||
return nil, 0, false
|
||||
}
|
||||
return task, runID, true
|
||||
@ -62,7 +62,7 @@ func validateArtifactHash(ctx *ArtifactContext, artifactName string) bool {
|
||||
return true
|
||||
}
|
||||
log.Error("Invalid artifact hash: %s", paramHash)
|
||||
ctx.Error(http.StatusBadRequest, "Invalid artifact hash")
|
||||
ctx.HTTPError(http.StatusBadRequest, "Invalid artifact hash")
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ package actions
|
||||
// 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded
|
||||
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock
|
||||
// 1.4. BlockList xml payload to Blobstorage (unauthenticated request)
|
||||
// Files of about 800MB are parallel in parallel and / or out of order, this file is needed to enshure the correct order
|
||||
// Files of about 800MB are parallel in parallel and / or out of order, this file is needed to ensure the correct order
|
||||
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList
|
||||
// Request
|
||||
// <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
@ -187,29 +187,29 @@ func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (*a
|
||||
expecedsig := r.buildSignature(endp, expires, artifactName, taskID, artifactID)
|
||||
if !hmac.Equal(dsig, expecedsig) {
|
||||
log.Error("Error unauthorized")
|
||||
ctx.Error(http.StatusUnauthorized, "Error unauthorized")
|
||||
ctx.HTTPError(http.StatusUnauthorized, "Error unauthorized")
|
||||
return nil, "", false
|
||||
}
|
||||
t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", expires)
|
||||
if err != nil || t.Before(time.Now()) {
|
||||
log.Error("Error link expired")
|
||||
ctx.Error(http.StatusUnauthorized, "Error link expired")
|
||||
ctx.HTTPError(http.StatusUnauthorized, "Error link expired")
|
||||
return nil, "", false
|
||||
}
|
||||
task, err := actions.GetTaskByID(ctx, taskID)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task by ID: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task by ID")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task by ID")
|
||||
return nil, "", false
|
||||
}
|
||||
if task.Status != actions.StatusRunning {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return nil, "", false
|
||||
}
|
||||
if err := task.LoadJob(ctx); err != nil {
|
||||
log.Error("Error runner api getting job: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting job")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting job")
|
||||
return nil, "", false
|
||||
}
|
||||
return task, artifactName, true
|
||||
@ -230,13 +230,13 @@ func (r *artifactV4Routes) parseProtbufBody(ctx *ArtifactContext, req protorefle
|
||||
body, err := io.ReadAll(ctx.Req.Body)
|
||||
if err != nil {
|
||||
log.Error("Error decode request body: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error decode request body")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error decode request body")
|
||||
return false
|
||||
}
|
||||
err = protojson.Unmarshal(body, req)
|
||||
if err != nil {
|
||||
log.Error("Error decode request body: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error decode request body")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error decode request body")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@ -246,7 +246,7 @@ func (r *artifactV4Routes) sendProtbufBody(ctx *ArtifactContext, req protoreflec
|
||||
resp, err := protojson.Marshal(req)
|
||||
if err != nil {
|
||||
log.Error("Error encode response body: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error encode response body")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error encode response body")
|
||||
return
|
||||
}
|
||||
ctx.Resp.Header().Set("Content-Type", "application/json;charset=utf-8")
|
||||
@ -275,7 +275,7 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
|
||||
artifact, err := actions.CreateArtifact(ctx, ctx.ActionTask, artifactName, artifactName+".zip", rententionDays)
|
||||
if err != nil {
|
||||
log.Error("Error create or get artifact: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error create or get artifact")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error create or get artifact")
|
||||
return
|
||||
}
|
||||
artifact.ContentEncoding = ArtifactV4ContentEncoding
|
||||
@ -283,7 +283,7 @@ func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
|
||||
artifact.FileCompressedSize = 0
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
log.Error("Error UpdateArtifactByID: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error UpdateArtifactByID")
|
||||
return
|
||||
}
|
||||
|
||||
@ -309,28 +309,28 @@ func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) {
|
||||
artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.Error(http.StatusNotFound, "Error artifact not found")
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
_, err = appendUploadChunk(r.fs, ctx, artifact, artifact.FileSize, ctx.Req.ContentLength, artifact.RunID)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
artifact.FileCompressedSize += ctx.Req.ContentLength
|
||||
artifact.FileSize += ctx.Req.ContentLength
|
||||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil {
|
||||
log.Error("Error UpdateArtifactByID: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error UpdateArtifactByID")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error UpdateArtifactByID")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/block-%d-%d-%s", task.Job.RunID, task.Job.RunID, ctx.Req.ContentLength, base64.URLEncoding.EncodeToString([]byte(blockid))), ctx.Req.Body, -1)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -341,7 +341,7 @@ func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) {
|
||||
_, err := r.fs.Save(fmt.Sprintf("tmpv4%d/%d-%d-blocklist", task.Job.RunID, task.Job.RunID, artifactID), ctx.Req.Body, -1)
|
||||
if err != nil {
|
||||
log.Error("Error runner api getting task: task is not running")
|
||||
ctx.Error(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error runner api getting task: task is not running")
|
||||
return
|
||||
}
|
||||
ctx.JSON(http.StatusCreated, "created")
|
||||
@ -389,7 +389,7 @@ func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
|
||||
artifact, err := r.getArtifactByName(ctx, runID, req.Name)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.Error(http.StatusNotFound, "Error artifact not found")
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
@ -400,20 +400,20 @@ func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
|
||||
chunkMap, err := listChunksByRunID(r.fs, runID)
|
||||
if err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
chunks, ok = chunkMap[artifact.ID]
|
||||
if !ok {
|
||||
log.Error("Error merge chunks")
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
chunks, err = listChunksByRunIDV4(r.fs, runID, artifact.ID, blockList)
|
||||
if err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
artifact.FileSize = chunks[len(chunks)-1].End + 1
|
||||
@ -426,7 +426,7 @@ func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
|
||||
}
|
||||
if err := mergeChunksForArtifact(ctx, chunks, r.fs, artifact, checksum); err != nil {
|
||||
log.Error("Error merge chunks: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, "Error merge chunks")
|
||||
ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks")
|
||||
return
|
||||
}
|
||||
|
||||
@ -451,12 +451,12 @@ func (r *artifactV4Routes) listArtifacts(ctx *ArtifactContext) {
|
||||
artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{RunID: runID})
|
||||
if err != nil {
|
||||
log.Error("Error getting artifacts: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
if len(artifacts) == 0 {
|
||||
log.Debug("[artifact] handleListArtifacts, no artifacts")
|
||||
ctx.Error(http.StatusNotFound)
|
||||
ctx.HTTPError(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
@ -507,7 +507,7 @@ func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
|
||||
artifact, err := r.getArtifactByName(ctx, runID, artifactName)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.Error(http.StatusNotFound, "Error artifact not found")
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
@ -535,7 +535,7 @@ func (r *artifactV4Routes) downloadArtifact(ctx *ArtifactContext) {
|
||||
artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.Error(http.StatusNotFound, "Error artifact not found")
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
@ -559,14 +559,14 @@ func (r *artifactV4Routes) deleteArtifact(ctx *ArtifactContext) {
|
||||
artifact, err := r.getArtifactByName(ctx, runID, req.Name)
|
||||
if err != nil {
|
||||
log.Error("Error artifact not found: %v", err)
|
||||
ctx.Error(http.StatusNotFound, "Error artifact not found")
|
||||
ctx.HTTPError(http.StatusNotFound, "Error artifact not found")
|
||||
return
|
||||
}
|
||||
|
||||
err = actions.SetArtifactNeedDelete(ctx, runID, req.Name)
|
||||
if err != nil {
|
||||
log.Error("Error deleting artifacts: %v", err)
|
||||
ctx.Error(http.StatusInternalServerError, err.Error())
|
||||
ctx.HTTPError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ func (s *Service) FetchTask(
|
||||
// if the task version in request is not equal to the version in db,
|
||||
// it means there may still be some tasks not be assgined.
|
||||
// try to pick a task for the runner that send the request.
|
||||
if t, ok, err := pickTask(ctx, runner); err != nil {
|
||||
if t, ok, err := actions_service.PickTask(ctx, runner); err != nil {
|
||||
log.Error("pick task failed: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "pick task: %v", err)
|
||||
} else if ok {
|
||||
|
@ -1,95 +0,0 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
actions_model "code.gitea.io/gitea/models/actions"
|
||||
secret_model "code.gitea.io/gitea/models/secret"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/services/actions"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
)
|
||||
|
||||
func pickTask(ctx context.Context, runner *actions_model.ActionRunner) (*runnerv1.Task, bool, error) {
|
||||
t, ok, err := actions_model.CreateTaskForRunner(ctx, runner)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("CreateTaskForRunner: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
secrets, err := secret_model.GetSecretsOfTask(ctx, t)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("GetSecretsOfTask: %w", err)
|
||||
}
|
||||
|
||||
vars, err := actions_model.GetVariablesOfRun(ctx, t.Job.Run)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("GetVariablesOfRun: %w", err)
|
||||
}
|
||||
|
||||
actions.CreateCommitStatus(ctx, t.Job)
|
||||
|
||||
task := &runnerv1.Task{
|
||||
Id: t.ID,
|
||||
WorkflowPayload: t.Job.WorkflowPayload,
|
||||
Context: generateTaskContext(t),
|
||||
Secrets: secrets,
|
||||
Vars: vars,
|
||||
}
|
||||
|
||||
if needs, err := findTaskNeeds(ctx, t); err != nil {
|
||||
log.Error("Cannot find needs for task %v: %v", t.ID, err)
|
||||
// Go on with empty needs.
|
||||
// If return error, the task will be wild, which means the runner will never get it when it has been assigned to the runner.
|
||||
// In contrast, missing needs is less serious.
|
||||
// And the task will fail and the runner will report the error in the logs.
|
||||
} else {
|
||||
task.Needs = needs
|
||||
}
|
||||
|
||||
return task, true, nil
|
||||
}
|
||||
|
||||
func generateTaskContext(t *actions_model.ActionTask) *structpb.Struct {
|
||||
giteaRuntimeToken, err := actions.CreateAuthorizationToken(t.ID, t.Job.RunID, t.JobID)
|
||||
if err != nil {
|
||||
log.Error("actions.CreateAuthorizationToken failed: %v", err)
|
||||
}
|
||||
|
||||
gitCtx := actions.GenerateGiteaContext(t.Job.Run, t.Job)
|
||||
gitCtx["token"] = t.Token
|
||||
gitCtx["gitea_runtime_token"] = giteaRuntimeToken
|
||||
|
||||
taskContext, err := structpb.NewStruct(gitCtx)
|
||||
if err != nil {
|
||||
log.Error("structpb.NewStruct failed: %v", err)
|
||||
}
|
||||
|
||||
return taskContext
|
||||
}
|
||||
|
||||
func findTaskNeeds(ctx context.Context, task *actions_model.ActionTask) (map[string]*runnerv1.TaskNeed, error) {
|
||||
if err := task.LoadAttributes(ctx); err != nil {
|
||||
return nil, fmt.Errorf("task LoadAttributes: %w", err)
|
||||
}
|
||||
taskNeeds, err := actions.FindTaskNeeds(ctx, task.Job)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := make(map[string]*runnerv1.TaskNeed, len(taskNeeds))
|
||||
for jobID, taskNeed := range taskNeeds {
|
||||
ret[jobID] = &runnerv1.TaskNeed{
|
||||
Outputs: taskNeed.Outputs,
|
||||
Result: runnerv1.Result(taskNeed.Result),
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
@ -49,32 +49,32 @@ func reqPackageAccess(accessMode perm.AccessMode) func(ctx *context.Context) {
|
||||
if accessMode == perm.AccessModeRead {
|
||||
scopeMatched, err = scope.HasScope(auth_model.AccessTokenScopeReadPackage)
|
||||
if err != nil {
|
||||
ctx.Error(http.StatusInternalServerError, "HasScope", err.Error())
|
||||
ctx.HTTPError(http.StatusInternalServerError, "HasScope", err.Error())
|
||||
return
|
||||
}
|
||||
} else if accessMode == perm.AccessModeWrite {
|
||||
scopeMatched, err = scope.HasScope(auth_model.AccessTokenScopeWritePackage)
|
||||
if err != nil {
|
||||
ctx.Error(http.StatusInternalServerError, "HasScope", err.Error())
|
||||
ctx.HTTPError(http.StatusInternalServerError, "HasScope", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
if !scopeMatched {
|
||||
ctx.Resp.Header().Set("WWW-Authenticate", `Basic realm="Gitea Package API"`)
|
||||
ctx.Error(http.StatusUnauthorized, "reqPackageAccess", "user should have specific permission or be a site admin")
|
||||
ctx.HTTPError(http.StatusUnauthorized, "reqPackageAccess", "user should have specific permission or be a site admin")
|
||||
return
|
||||
}
|
||||
|
||||
// check if scope only applies to public resources
|
||||
publicOnly, err := scope.PublicOnly()
|
||||
if err != nil {
|
||||
ctx.Error(http.StatusForbidden, "tokenRequiresScope", "parsing public resource scope failed: "+err.Error())
|
||||
ctx.HTTPError(http.StatusForbidden, "tokenRequiresScope", "parsing public resource scope failed: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if publicOnly {
|
||||
if ctx.Package != nil && ctx.Package.Owner.Visibility.IsPrivate() {
|
||||
ctx.Error(http.StatusForbidden, "reqToken", "token scope is limited to public packages")
|
||||
ctx.HTTPError(http.StatusForbidden, "reqToken", "token scope is limited to public packages")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -83,7 +83,7 @@ func reqPackageAccess(accessMode perm.AccessMode) func(ctx *context.Context) {
|
||||
|
||||
if ctx.Package.AccessMode < accessMode && !ctx.IsUserSiteAdmin() {
|
||||
ctx.Resp.Header().Set("WWW-Authenticate", `Basic realm="Gitea Package API"`)
|
||||
ctx.Error(http.StatusUnauthorized, "reqPackageAccess", "user should have specific permission or be a site admin")
|
||||
ctx.HTTPError(http.StatusUnauthorized, "reqPackageAccess", "user should have specific permission or be a site admin")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -100,7 +100,7 @@ func verifyAuth(r *web.Router, authMethods []auth.Method) {
|
||||
ctx.Doer, err = authGroup.Verify(ctx.Req, ctx.Resp, ctx, ctx.Session)
|
||||
if err != nil {
|
||||
log.Error("Failed to verify user: %v", err)
|
||||
ctx.Error(http.StatusUnauthorized, "authGroup.Verify")
|
||||
ctx.HTTPError(http.StatusUnauthorized, "authGroup.Verify")
|
||||
return
|
||||
}
|
||||
ctx.IsSigned = ctx.Doer != nil
|
||||
|
@ -41,14 +41,14 @@ func Person(ctx *context.APIContext) {
|
||||
person.Name = ap.NaturalLanguageValuesNew()
|
||||
err := person.Name.Set("en", ap.Content(ctx.ContextUser.FullName))
|
||||
if err != nil {
|
||||
ctx.ServerError("Set Name", err)
|
||||
ctx.APIErrorInternal(err)
|
||||
return
|
||||
}
|
||||
|
||||
person.PreferredUsername = ap.NaturalLanguageValuesNew()
|
||||
err = person.PreferredUsername.Set("en", ap.Content(ctx.ContextUser.Name))
|
||||
if err != nil {
|
||||
ctx.ServerError("Set PreferredUsername", err)
|
||||
ctx.APIErrorInternal(err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -68,14 +68,14 @@ func Person(ctx *context.APIContext) {
|
||||
|
||||
publicKeyPem, err := activitypub.GetPublicKey(ctx, ctx.ContextUser)
|
||||
if err != nil {
|
||||
ctx.ServerError("GetPublicKey", err)
|
||||
ctx.APIErrorInternal(err)
|
||||
return
|
||||
}
|
||||
person.PublicKey.PublicKeyPem = publicKeyPem
|
||||
|
||||
binary, err := jsonld.WithContext(jsonld.IRI(ap.ActivityBaseURI), jsonld.IRI(ap.SecurityContextURI)).Marshal(person)
|
||||
if err != nil {
|
||||
ctx.ServerError("MarshalJSON", err)
|
||||
ctx.APIErrorInternal(err)
|
||||
return
|
||||
}
|
||||
ctx.Resp.Header().Add("Content-Type", activitypub.ActivityStreamsContentType)
|
||||
|
@ -89,9 +89,9 @@ func verifyHTTPSignatures(ctx *gitea_context.APIContext) (authenticated bool, er
|
||||
func ReqHTTPSignature() func(ctx *gitea_context.APIContext) {
|
||||
return func(ctx *gitea_context.APIContext) {
|
||||
if authenticated, err := verifyHTTPSignatures(ctx); err != nil {
|
||||
ctx.ServerError("verifyHttpSignatures", err)
|
||||
ctx.APIErrorInternal(err)
|
||||
} else if !authenticated {
|
||||
ctx.Error(http.StatusForbidden, "reqSignature", "request signature verification failed")
|
||||
ctx.APIError(http.StatusForbidden, "request signature verification failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user