mirror of
https://github.com/go-gitea/gitea.git
synced 2025-02-20 11:43:57 +08:00
Compare commits
17 Commits
ec08505414
...
e44ddf0310
Author | SHA1 | Date | |
---|---|---|---|
|
e44ddf0310 | ||
|
c2e23d3301 | ||
|
84d2159ef6 | ||
|
e20a865471 | ||
|
6536f219ad | ||
|
f87a5941c6 | ||
|
e989642bc3 | ||
|
9faa109ef3 | ||
|
cbb7b7252b | ||
|
b02aa8e493 | ||
|
3c4e4fd2a2 | ||
|
6325a14e08 | ||
|
7bdf6869a7 | ||
|
293698d75d | ||
|
ae6370f8f1 | ||
|
cd1c6657a9 | ||
|
a94e494d53 |
@ -152,7 +152,7 @@ func (p *Permission) ReadableUnitTypes() []unit.Type {
|
||||
}
|
||||
|
||||
func (p *Permission) LogString() string {
|
||||
format := "<Permission AccessMode=%s, %d Units, %d UnitsMode(s): [ "
|
||||
format := "<Permission AccessMode=%s, %d Units, %d UnitsMode(s): ["
|
||||
args := []any{p.AccessMode.ToString(), len(p.units), len(p.unitsMode)}
|
||||
|
||||
for i, u := range p.units {
|
||||
@ -164,14 +164,16 @@ func (p *Permission) LogString() string {
|
||||
config = err.Error()
|
||||
}
|
||||
}
|
||||
format += "\nUnits[%d]: ID: %d RepoID: %d Type: %s Config: %s"
|
||||
format += "\n\tunits[%d]: ID=%d RepoID=%d Type=%s Config=%s"
|
||||
args = append(args, i, u.ID, u.RepoID, u.Type.LogString(), config)
|
||||
}
|
||||
for key, value := range p.unitsMode {
|
||||
format += "\nUnitMode[%-v]: %-v"
|
||||
format += "\n\tunitsMode[%-v]: %-v"
|
||||
args = append(args, key.LogString(), value.LogString())
|
||||
}
|
||||
format += " ]>"
|
||||
format += "\n\teveryoneAccessMode: %-v"
|
||||
args = append(args, p.everyoneAccessMode)
|
||||
format += "\n\t]>"
|
||||
return fmt.Sprintf(format, args...)
|
||||
}
|
||||
|
||||
|
@ -55,17 +55,16 @@ func ListWorkflows(commit *git.Commit) (git.Entries, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries, err := tree.ListEntriesRecursiveFast()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := make(git.Entries, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
ret := make(git.Entries, 0, 5)
|
||||
if err := tree.IterateEntriesRecursive(func(entry *git.TreeEntry) error {
|
||||
if strings.HasSuffix(entry.Name(), ".yml") || strings.HasSuffix(entry.Name(), ".yaml") {
|
||||
ret = append(ret, entry)
|
||||
}
|
||||
return nil
|
||||
}, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,13 @@ func ParseTreeEntries(data []byte) ([]*TreeEntry, error) {
|
||||
// parseTreeEntries FIXME this function's design is not right, it should make the caller read all data into memory
|
||||
func parseTreeEntries(data []byte, ptree *Tree) ([]*TreeEntry, error) {
|
||||
entries := make([]*TreeEntry, 0, bytes.Count(data, []byte{'\n'})+1)
|
||||
return entries, iterateTreeEntries(data, ptree, func(entry *TreeEntry) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func iterateTreeEntries(data []byte, ptree *Tree, f func(entry *TreeEntry) error) error {
|
||||
for pos := 0; pos < len(data); {
|
||||
posEnd := bytes.IndexByte(data[pos:], '\n')
|
||||
if posEnd == -1 {
|
||||
@ -33,7 +40,7 @@ func parseTreeEntries(data []byte, ptree *Tree) ([]*TreeEntry, error) {
|
||||
line := data[pos:posEnd]
|
||||
lsTreeLine, err := parseLsTreeLine(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
entry := &TreeEntry{
|
||||
ptree: ptree,
|
||||
@ -44,9 +51,11 @@ func parseTreeEntries(data []byte, ptree *Tree) ([]*TreeEntry, error) {
|
||||
sized: lsTreeLine.Size.Has(),
|
||||
}
|
||||
pos = posEnd + 1
|
||||
entries = append(entries, entry)
|
||||
if err := f(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func catBatchParseTreeEntries(objectFormat ObjectFormat, ptree *Tree, rd *bufio.Reader, sz int64) ([]*TreeEntry, error) {
|
||||
|
@ -57,11 +57,6 @@ func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, err
|
||||
|
||||
tree := commit.Tree
|
||||
|
||||
entries, err := tree.ListEntriesRecursiveWithSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
checker, deferable := repo.CheckAttributeReader(commitID)
|
||||
defer deferable()
|
||||
|
||||
@ -77,18 +72,12 @@ func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, err
|
||||
firstExcludedLanguage := ""
|
||||
firstExcludedLanguageSize := int64(0)
|
||||
|
||||
for _, f := range entries {
|
||||
select {
|
||||
case <-repo.Ctx.Done():
|
||||
return sizes, repo.Ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if err := tree.IterateEntriesRecursive(func(f *TreeEntry) error {
|
||||
contentBuf.Reset()
|
||||
content = contentBuf.Bytes()
|
||||
|
||||
if f.Size() == 0 {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
isVendored := optional.None[bool]()
|
||||
@ -101,22 +90,22 @@ func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, err
|
||||
if err == nil {
|
||||
isVendored = AttributeToBool(attrs, AttributeLinguistVendored)
|
||||
if isVendored.ValueOrDefault(false) {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
isGenerated = AttributeToBool(attrs, AttributeLinguistGenerated)
|
||||
if isGenerated.ValueOrDefault(false) {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
isDocumentation = AttributeToBool(attrs, AttributeLinguistDocumentation)
|
||||
if isDocumentation.ValueOrDefault(false) {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
isDetectable = AttributeToBool(attrs, AttributeLinguistDetectable)
|
||||
if !isDetectable.ValueOrDefault(true) {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
hasLanguage := TryReadLanguageAttribute(attrs)
|
||||
@ -131,7 +120,7 @@ func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, err
|
||||
|
||||
// this language will always be added to the size
|
||||
sizes[language] += f.Size()
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -140,19 +129,19 @@ func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, err
|
||||
enry.IsDotFile(f.Name()) ||
|
||||
(!isDocumentation.Has() && enry.IsDocumentation(f.Name())) ||
|
||||
enry.IsConfiguration(f.Name()) {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
// If content can not be read or file is too big just do detection by filename
|
||||
|
||||
if f.Size() <= bigFileSize {
|
||||
if err := writeID(f.ID.String()); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
_, _, size, err := ReadBatchLine(batchReader)
|
||||
if err != nil {
|
||||
log.Debug("Error reading blob: %s Err: %v", f.ID.String(), err)
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
sizeToRead := size
|
||||
@ -164,22 +153,22 @@ func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, err
|
||||
|
||||
_, err = contentBuf.ReadFrom(io.LimitReader(batchReader, sizeToRead))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
content = contentBuf.Bytes()
|
||||
if err := DiscardFull(batchReader, discard); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !isGenerated.Has() && enry.IsGenerated(f.Name(), content) {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
// FIXME: Why can't we split this and the IsGenerated tests to avoid reading the blob unless absolutely necessary?
|
||||
// - eg. do the all the detection tests using filename first before reading content.
|
||||
language := analyze.GetCodeLanguage(f.Name(), content)
|
||||
if language == "" {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
// group languages, such as Pug -> HTML; SCSS -> CSS
|
||||
@ -200,6 +189,9 @@ func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, err
|
||||
firstExcludedLanguage = language
|
||||
firstExcludedLanguageSize += f.Size()
|
||||
}
|
||||
return nil
|
||||
}, TrustedCmdArgs{"--long"}); err != nil { // add --long to get size
|
||||
return sizes, err
|
||||
}
|
||||
|
||||
// If there are no included languages add the first excluded language
|
||||
|
@ -58,6 +58,24 @@ func (t *Tree) ListEntries() (Entries, error) {
|
||||
|
||||
// ListEntriesRecursiveWithSize returns all entries of current tree recursively including all subtrees
|
||||
func (t *Tree) ListEntriesRecursiveWithSize() (Entries, error) {
|
||||
var entries []*TreeEntry
|
||||
if err := t.IterateEntriesRecursive(func(entry *TreeEntry) error {
|
||||
entries = append(entries, convertedEntry)
|
||||
return nil
|
||||
}, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ListEntriesRecursiveFast is the alias of ListEntriesRecursiveWithSize for the gogit version
|
||||
func (t *Tree) ListEntriesRecursiveFast() (Entries, error) {
|
||||
return t.ListEntriesRecursiveWithSize()
|
||||
}
|
||||
|
||||
// IterateEntriesRecursive returns iterate entries of current tree recursively including all subtrees
|
||||
// extraArgs could be "-l" to get the size, which is slower
|
||||
func (t *Tree) IterateEntriesRecursive(f func(entry *TreeEntry) error, extraArgs TrustedCmdArgs) error {
|
||||
if t.gogitTree == nil {
|
||||
err := t.loadTreeObject()
|
||||
if err != nil {
|
||||
@ -65,7 +83,6 @@ func (t *Tree) ListEntriesRecursiveWithSize() (Entries, error) {
|
||||
}
|
||||
}
|
||||
|
||||
var entries []*TreeEntry
|
||||
seen := map[plumbing.Hash]bool{}
|
||||
walker := object.NewTreeWalker(t.gogitTree, true, seen)
|
||||
for {
|
||||
@ -86,13 +103,10 @@ func (t *Tree) ListEntriesRecursiveWithSize() (Entries, error) {
|
||||
ptree: t,
|
||||
fullName: fullName,
|
||||
}
|
||||
entries = append(entries, convertedEntry)
|
||||
if err := f(convertedEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ListEntriesRecursiveFast is the alias of ListEntriesRecursiveWithSize for the gogit version
|
||||
func (t *Tree) ListEntriesRecursiveFast() (Entries, error) {
|
||||
return t.ListEntriesRecursiveWithSize()
|
||||
return nil
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package git
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
@ -96,21 +97,17 @@ func (t *Tree) listEntriesRecursive(extraArgs TrustedCmdArgs) (Entries, error) {
|
||||
return t.entriesRecursive, nil
|
||||
}
|
||||
|
||||
stdout, _, runErr := NewCommand(t.repo.Ctx, "ls-tree", "-t", "-r").
|
||||
AddArguments(extraArgs...).
|
||||
AddDynamicArguments(t.ID.String()).
|
||||
RunStdBytes(&RunOpts{Dir: t.repo.Path})
|
||||
if runErr != nil {
|
||||
return nil, runErr
|
||||
t.entriesRecursive = make([]*TreeEntry, 0)
|
||||
if err := t.IterateEntriesRecursive(func(entry *TreeEntry) error {
|
||||
t.entriesRecursive = append(t.entriesRecursive, entry)
|
||||
return nil
|
||||
}, extraArgs); err != nil {
|
||||
t.entriesRecursive = nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
t.entriesRecursive, err = parseTreeEntries(stdout, t)
|
||||
if err == nil {
|
||||
t.entriesRecursiveParsed = true
|
||||
}
|
||||
|
||||
return t.entriesRecursive, err
|
||||
t.entriesRecursiveParsed = true
|
||||
return t.entriesRecursive, nil
|
||||
}
|
||||
|
||||
// ListEntriesRecursiveFast returns all entries of current tree recursively including all subtrees, no size
|
||||
@ -122,3 +119,50 @@ func (t *Tree) ListEntriesRecursiveFast() (Entries, error) {
|
||||
func (t *Tree) ListEntriesRecursiveWithSize() (Entries, error) {
|
||||
return t.listEntriesRecursive(TrustedCmdArgs{"--long"})
|
||||
}
|
||||
|
||||
// IterateEntriesRecursive returns iterate entries of current tree recursively including all subtrees
|
||||
// extraArgs could be "-l" to get the size, which is slower
|
||||
func (t *Tree) IterateEntriesRecursive(f func(entry *TreeEntry) error, extraArgs TrustedCmdArgs) error {
|
||||
reader, writer := io.Pipe()
|
||||
done := make(chan error)
|
||||
|
||||
go func(t *Tree, done chan error, writer *io.PipeWriter) {
|
||||
runErr := NewCommand(t.repo.Ctx, "ls-tree", "-t", "-r").
|
||||
AddArguments(extraArgs...).
|
||||
AddDynamicArguments(t.ID.String()).
|
||||
Run(&RunOpts{
|
||||
Dir: t.repo.Path,
|
||||
Stdout: writer,
|
||||
})
|
||||
|
||||
_ = writer.Close()
|
||||
|
||||
done <- runErr
|
||||
}(t, done, writer)
|
||||
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data := scanner.Bytes()
|
||||
if err := iterateTreeEntries(data, t, func(entry *TreeEntry) error {
|
||||
if err := f(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-t.repo.Ctx.Done():
|
||||
return t.repo.Ctx.Err()
|
||||
case runErr := <-done:
|
||||
return runErr
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -175,6 +176,20 @@ func (l *LoggerImpl) IsEnabled() bool {
|
||||
return l.level.Load() < int32(FATAL) && len(l.eventWriters) > 0
|
||||
}
|
||||
|
||||
func asLogStringer(v any) LogStringer {
|
||||
if s, ok := v.(LogStringer); ok {
|
||||
return s
|
||||
} else if a := reflect.ValueOf(v); a.Kind() == reflect.Struct {
|
||||
// in case the receiver is a pointer, but the value is a struct
|
||||
vp := reflect.New(a.Type())
|
||||
vp.Elem().Set(a)
|
||||
if s, ok := vp.Interface().(LogStringer); ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log prepares the log event, if the level matches, the event will be sent to the writers
|
||||
func (l *LoggerImpl) Log(skip int, level Level, format string, logArgs ...any) {
|
||||
if Level(l.level.Load()) > level {
|
||||
@ -207,11 +222,11 @@ func (l *LoggerImpl) Log(skip int, level Level, format string, logArgs ...any) {
|
||||
// handle LogStringer values
|
||||
for i, v := range msgArgs {
|
||||
if cv, ok := v.(*ColoredValue); ok {
|
||||
if s, ok := cv.v.(LogStringer); ok {
|
||||
cv.v = logStringFormatter{v: s}
|
||||
if ls := asLogStringer(cv.v); ls != nil {
|
||||
cv.v = logStringFormatter{v: ls}
|
||||
}
|
||||
} else if s, ok := v.(LogStringer); ok {
|
||||
msgArgs[i] = logStringFormatter{v: s}
|
||||
} else if ls := asLogStringer(v); ls != nil {
|
||||
msgArgs[i] = logStringFormatter{v: ls}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -116,6 +116,14 @@ func (t testLogString) LogString() string {
|
||||
return "log-string"
|
||||
}
|
||||
|
||||
type testLogStringPtrReceiver struct {
|
||||
Field string
|
||||
}
|
||||
|
||||
func (t *testLogStringPtrReceiver) LogString() string {
|
||||
return "log-string-ptr-receiver"
|
||||
}
|
||||
|
||||
func TestLoggerLogString(t *testing.T) {
|
||||
logger := NewLoggerWithWriters(context.Background(), "test")
|
||||
|
||||
@ -124,9 +132,13 @@ func TestLoggerLogString(t *testing.T) {
|
||||
logger.AddWriters(w1)
|
||||
|
||||
logger.Info("%s %s %#v %v", testLogString{}, &testLogString{}, testLogString{Field: "detail"}, NewColoredValue(testLogString{}, FgRed))
|
||||
logger.Info("%s %s %#v %v", testLogStringPtrReceiver{}, &testLogStringPtrReceiver{}, testLogStringPtrReceiver{Field: "detail"}, NewColoredValue(testLogStringPtrReceiver{}, FgRed))
|
||||
logger.Close()
|
||||
|
||||
assert.Equal(t, []string{"log-string log-string log.testLogString{Field:\"detail\"} \x1b[31mlog-string\x1b[0m\n"}, w1.GetLogs())
|
||||
assert.Equal(t, []string{
|
||||
"log-string log-string log.testLogString{Field:\"detail\"} \x1b[31mlog-string\x1b[0m\n",
|
||||
"log-string-ptr-receiver log-string-ptr-receiver &log.testLogStringPtrReceiver{Field:\"detail\"} \x1b[31mlog-string-ptr-receiver\x1b[0m\n",
|
||||
}, w1.GetLogs())
|
||||
}
|
||||
|
||||
func TestLoggerExpressionFilter(t *testing.T) {
|
||||
|
@ -1702,7 +1702,9 @@ issues.time_estimate_invalid = Time estimate format is invalid
|
||||
issues.start_tracking_history = started working %s
|
||||
issues.tracker_auto_close = Timer will be stopped automatically when this issue gets closed
|
||||
issues.tracking_already_started = `You have already started time tracking on <a href="%s">another issue</a>!`
|
||||
issues.stop_tracking = Stop Timer
|
||||
issues.stop_tracking_history = worked for <b>%[1]s</b> %[2]s
|
||||
issues.cancel_tracking = Discard
|
||||
issues.cancel_tracking_history = `canceled time tracking %s`
|
||||
issues.del_time = Delete this time log
|
||||
issues.add_time_history = added spent time <b>%[1]s</b> %[2]s
|
||||
|
@ -1196,6 +1196,10 @@ func registerRoutes(m *web.Router) {
|
||||
})
|
||||
})
|
||||
}
|
||||
// FIXME: many "pulls" requests are sent to "issues" endpoints correctly, so the issue endpoints have to tolerate pull request permissions at the moment
|
||||
m.Group("/{username}/{reponame}/{type:issues}", addIssuesPullsViewRoutes, optSignIn, context.RepoAssignment, context.RequireUnitReader(unit.TypeIssues, unit.TypePullRequests))
|
||||
m.Group("/{username}/{reponame}/{type:pulls}", addIssuesPullsViewRoutes, optSignIn, context.RepoAssignment, reqUnitPullsReader)
|
||||
|
||||
m.Group("/{username}/{reponame}", func() {
|
||||
m.Get("/comments/{id}/attachments", repo.GetCommentAttachments)
|
||||
m.Get("/labels", repo.RetrieveLabelsForList, repo.Labels)
|
||||
@ -1203,9 +1207,6 @@ func registerRoutes(m *web.Router) {
|
||||
m.Get("/milestone/{id}", context.RepoRef(), repo.MilestoneIssuesAndPulls)
|
||||
m.Get("/issues/suggestions", repo.IssueSuggestions)
|
||||
}, optSignIn, context.RepoAssignment, reqRepoIssuesOrPullsReader) // issue/pull attachments, labels, milestones
|
||||
|
||||
m.Group("/{username}/{reponame}/{type:issues}", addIssuesPullsViewRoutes, optSignIn, context.RepoAssignment, reqUnitIssuesReader)
|
||||
m.Group("/{username}/{reponame}/{type:pulls}", addIssuesPullsViewRoutes, optSignIn, context.RepoAssignment, reqUnitPullsReader)
|
||||
// end "/{username}/{reponame}": view milestone, label, issue, pull, etc
|
||||
|
||||
m.Group("/{username}/{reponame}/{type:issues}", func() {
|
||||
@ -1224,7 +1225,7 @@ func registerRoutes(m *web.Router) {
|
||||
m.Get("/search", repo.SearchRepoIssuesJSON)
|
||||
}, reqUnitIssuesReader)
|
||||
|
||||
addIssuesPullsRoutes := func() {
|
||||
addIssuesPullsUpdateRoutes := func() {
|
||||
// for "/{username}/{reponame}/issues" or "/{username}/{reponame}/pulls"
|
||||
m.Group("/{index}", func() {
|
||||
m.Post("/title", repo.UpdateIssueTitle)
|
||||
@ -1267,8 +1268,9 @@ func registerRoutes(m *web.Router) {
|
||||
m.Delete("/unpin/{index}", reqRepoAdmin, repo.IssueUnpin)
|
||||
m.Post("/move_pin", reqRepoAdmin, repo.IssuePinMove)
|
||||
}
|
||||
m.Group("/{type:issues}", addIssuesPullsRoutes, reqUnitIssuesReader, context.RepoMustNotBeArchived())
|
||||
m.Group("/{type:pulls}", addIssuesPullsRoutes, reqUnitPullsReader, context.RepoMustNotBeArchived())
|
||||
// FIXME: many "pulls" requests are sent to "issues" endpoints incorrectly, so the issue endpoints have to tolerate pull request permissions at the moment
|
||||
m.Group("/{type:issues}", addIssuesPullsUpdateRoutes, context.RequireUnitReader(unit.TypeIssues, unit.TypePullRequests), context.RepoMustNotBeArchived())
|
||||
m.Group("/{type:pulls}", addIssuesPullsUpdateRoutes, reqUnitPullsReader, context.RepoMustNotBeArchived())
|
||||
|
||||
m.Group("/comments/{id}", func() {
|
||||
m.Post("", repo.UpdateCommentContent)
|
||||
@ -1292,7 +1294,7 @@ func registerRoutes(m *web.Router) {
|
||||
m.Post("/delete", repo.DeleteMilestone)
|
||||
}, reqRepoIssuesOrPullsWriter, context.RepoRef())
|
||||
|
||||
// FIXME: need to move these routes to the proper place
|
||||
// FIXME: many "pulls" requests are sent to "issues" endpoints incorrectly, need to move these routes to the proper place
|
||||
m.Group("/issues", func() {
|
||||
m.Post("/request_review", repo.UpdatePullReviewRequest)
|
||||
m.Post("/dismiss_review", reqRepoAdmin, web.Bind(forms.DismissReviewForm{}), repo.DismissReview)
|
||||
|
Loading…
x
Reference in New Issue
Block a user