aboutsummaryrefslogtreecommitdiff
path: root/services
diff options
context:
space:
mode:
authorAnthony Wang2023-02-10 00:24:43 +0000
committerAnthony Wang2023-02-10 00:24:43 +0000
commit1a54d5e8970f2ff6ffe3aeaa19b3917f5e7dc9fd (patch)
treef8ff0e3f43a8d61879bb885e8f6248b95bf6ca57 /services
parente44c986b86200eb862d1db9c10ff44602a638554 (diff)
parent8574a6433fab47b6f20997f024c176490dfad1c0 (diff)
Merge remote-tracking branch 'origin/main' into forgejo-federation
Diffstat (limited to 'services')
-rw-r--r--services/actions/clear_tasks.go94
-rw-r--r--services/actions/commit_status.go88
-rw-r--r--services/actions/init.go22
-rw-r--r--services/actions/job_emitter.go140
-rw-r--r--services/actions/job_emitter_test.go80
-rw-r--r--services/actions/notifier.go528
-rw-r--r--services/actions/notifier_helper.go236
-rw-r--r--services/auth/basic.go14
-rw-r--r--services/auth/oauth2.go19
-rw-r--r--services/auth/source/ldap/source_search.go137
-rw-r--r--services/automerge/automerge.go26
-rw-r--r--services/cron/cron.go1
-rw-r--r--services/cron/tasks_actions.go51
-rw-r--r--services/cron/tasks_basic.go6
-rw-r--r--services/cron/tasks_extended.go6
-rw-r--r--services/forms/package_form.go2
-rw-r--r--services/forms/repo_form.go2
-rw-r--r--services/forms/runner.go25
-rw-r--r--services/gitdiff/gitdiff.go80
-rw-r--r--services/gitdiff/gitdiff_test.go2
-rw-r--r--services/mailer/incoming/incoming_handler.go8
-rw-r--r--services/mirror/mirror_pull.go10
-rw-r--r--services/mirror/mirror_push.go4
-rw-r--r--services/packages/packages.go14
-rw-r--r--services/pull/check.go188
-rw-r--r--services/pull/merge.go92
-rw-r--r--services/pull/patch.go123
-rw-r--r--services/pull/pull.go93
-rw-r--r--services/repository/adopt.go2
-rw-r--r--services/repository/check.go13
-rw-r--r--services/repository/files/patch.go8
-rw-r--r--services/repository/files/temp_repo.go14
-rw-r--r--services/repository/files/update.go2
-rw-r--r--services/repository/files/upload.go2
-rw-r--r--services/repository/fork.go2
-rw-r--r--services/repository/push.go20
36 files changed, 1700 insertions, 454 deletions
diff --git a/services/actions/clear_tasks.go b/services/actions/clear_tasks.go
new file mode 100644
index 000000000..583e588de
--- /dev/null
+++ b/services/actions/clear_tasks.go
@@ -0,0 +1,94 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/actions"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+const (
+ zombieTaskTimeout = 10 * time.Minute
+ endlessTaskTimeout = 3 * time.Hour
+ abandonedJobTimeout = 24 * time.Hour
+)
+
+// StopZombieTasks stops the task which have running status, but haven't been updated for a long time
+func StopZombieTasks(ctx context.Context) error {
+ return stopTasks(ctx, actions_model.FindTaskOptions{
+ Status: actions_model.StatusRunning,
+ UpdatedBefore: timeutil.TimeStamp(time.Now().Add(-zombieTaskTimeout).Unix()),
+ })
+}
+
+// StopEndlessTasks stops the tasks which have running status and continuous updates, but don't end for a long time
+func StopEndlessTasks(ctx context.Context) error {
+ return stopTasks(ctx, actions_model.FindTaskOptions{
+ Status: actions_model.StatusRunning,
+ StartedBefore: timeutil.TimeStamp(time.Now().Add(-endlessTaskTimeout).Unix()),
+ })
+}
+
+func stopTasks(ctx context.Context, opts actions_model.FindTaskOptions) error {
+ tasks, err := actions_model.FindTasks(ctx, opts)
+ if err != nil {
+ return fmt.Errorf("find tasks: %w", err)
+ }
+
+ for _, task := range tasks {
+ if err := db.WithTx(ctx, func(ctx context.Context) error {
+ if err := actions_model.StopTask(ctx, task.ID, actions_model.StatusFailure); err != nil {
+ return err
+ }
+ if err := task.LoadJob(ctx); err != nil {
+ return err
+ }
+ return CreateCommitStatus(ctx, task.Job)
+ }); err != nil {
+ log.Warn("Cannot stop task %v: %v", task.ID, err)
+ // go on
+ } else if remove, err := actions.TransferLogs(ctx, task.LogFilename); err != nil {
+ log.Warn("Cannot transfer logs of task %v: %v", task.ID, err)
+ } else {
+ remove()
+ }
+ }
+ return nil
+}
+
+// CancelAbandonedJobs cancels the jobs which have waiting status, but haven't been picked by a runner for a long time
+func CancelAbandonedJobs(ctx context.Context) error {
+ jobs, _, err := actions_model.FindRunJobs(ctx, actions_model.FindRunJobOptions{
+ Statuses: []actions_model.Status{actions_model.StatusWaiting, actions_model.StatusBlocked},
+ UpdatedBefore: timeutil.TimeStamp(time.Now().Add(-abandonedJobTimeout).Unix()),
+ })
+ if err != nil {
+ log.Warn("find abandoned tasks: %v", err)
+ return err
+ }
+
+ now := timeutil.TimeStampNow()
+ for _, job := range jobs {
+ job.Status = actions_model.StatusCancelled
+ job.Stopped = now
+ if err := db.WithTx(ctx, func(ctx context.Context) error {
+ if _, err := actions_model.UpdateRunJob(ctx, job, nil, "status", "stopped"); err != nil {
+ return err
+ }
+ return CreateCommitStatus(ctx, job)
+ }); err != nil {
+ log.Warn("cancel abandoned job %v: %v", job.ID, err)
+ // go on
+ }
+ }
+
+ return nil
+}
diff --git a/services/actions/commit_status.go b/services/actions/commit_status.go
new file mode 100644
index 000000000..c17f8ef15
--- /dev/null
+++ b/services/actions/commit_status.go
@@ -0,0 +1,88 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "fmt"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ user_model "code.gitea.io/gitea/models/user"
+ api "code.gitea.io/gitea/modules/structs"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+)
+
+func CreateCommitStatus(ctx context.Context, job *actions_model.ActionRunJob) error {
+ if err := job.LoadAttributes(ctx); err != nil {
+ return fmt.Errorf("load run: %w", err)
+ }
+
+ run := job.Run
+ if run.Event != webhook_module.HookEventPush {
+ return nil
+ }
+
+ payload, err := run.GetPushEventPayload()
+ if err != nil {
+ return fmt.Errorf("GetPushEventPayload: %w", err)
+ }
+
+ creator, err := user_model.GetUserByID(ctx, payload.Pusher.ID)
+ if err != nil {
+ return fmt.Errorf("GetUserByID: %w", err)
+ }
+
+ repo := run.Repo
+ sha := payload.HeadCommit.ID
+ ctxname := job.Name
+ state := toCommitStatus(job.Status)
+
+ if statuses, _, err := git_model.GetLatestCommitStatus(ctx, repo.ID, sha, db.ListOptions{}); err == nil {
+ for _, v := range statuses {
+ if v.Context == ctxname {
+ if v.State == state {
+ return nil
+ }
+ break
+ }
+ }
+ } else {
+ return fmt.Errorf("GetLatestCommitStatus: %w", err)
+ }
+
+ if err := git_model.NewCommitStatus(ctx, git_model.NewCommitStatusOptions{
+ Repo: repo,
+ SHA: payload.HeadCommit.ID,
+ Creator: creator,
+ CommitStatus: &git_model.CommitStatus{
+ SHA: sha,
+ TargetURL: run.HTMLURL(),
+ Description: "",
+ Context: ctxname,
+ CreatorID: payload.Pusher.ID,
+ State: state,
+ },
+ }); err != nil {
+ return fmt.Errorf("NewCommitStatus: %w", err)
+ }
+
+ return nil
+}
+
+func toCommitStatus(status actions_model.Status) api.CommitStatusState {
+ switch status {
+ case actions_model.StatusSuccess:
+ return api.CommitStatusSuccess
+ case actions_model.StatusFailure, actions_model.StatusCancelled, actions_model.StatusSkipped:
+ return api.CommitStatusFailure
+ case actions_model.StatusWaiting, actions_model.StatusBlocked:
+ return api.CommitStatusPending
+ case actions_model.StatusRunning:
+ return api.CommitStatusRunning
+ default:
+ return api.CommitStatusError
+ }
+}
diff --git a/services/actions/init.go b/services/actions/init.go
new file mode 100644
index 000000000..3fd03eeb6
--- /dev/null
+++ b/services/actions/init.go
@@ -0,0 +1,22 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "code.gitea.io/gitea/modules/graceful"
+ "code.gitea.io/gitea/modules/notification"
+ "code.gitea.io/gitea/modules/queue"
+ "code.gitea.io/gitea/modules/setting"
+)
+
+func Init() {
+ if !setting.Actions.Enabled {
+ return
+ }
+
+ jobEmitterQueue = queue.CreateUniqueQueue("actions_ready_job", jobEmitterQueueHandle, new(jobUpdate))
+ go graceful.GetManager().RunWithShutdownFns(jobEmitterQueue.Run)
+
+ notification.RegisterNotifier(NewNotifier())
+}
diff --git a/services/actions/job_emitter.go b/services/actions/job_emitter.go
new file mode 100644
index 000000000..cb2cc8d1a
--- /dev/null
+++ b/services/actions/job_emitter.go
@@ -0,0 +1,140 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/graceful"
+ "code.gitea.io/gitea/modules/queue"
+
+ "xorm.io/builder"
+)
+
+var jobEmitterQueue queue.UniqueQueue
+
+type jobUpdate struct {
+ RunID int64
+}
+
+func EmitJobsIfReady(runID int64) error {
+ err := jobEmitterQueue.Push(&jobUpdate{
+ RunID: runID,
+ })
+ if errors.Is(err, queue.ErrAlreadyInQueue) {
+ return nil
+ }
+ return err
+}
+
+func jobEmitterQueueHandle(data ...queue.Data) []queue.Data {
+ ctx := graceful.GetManager().ShutdownContext()
+ var ret []queue.Data
+ for _, d := range data {
+ update := d.(*jobUpdate)
+ if err := checkJobsOfRun(ctx, update.RunID); err != nil {
+ ret = append(ret, d)
+ }
+ }
+ return ret
+}
+
+func checkJobsOfRun(ctx context.Context, runID int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ jobs, _, err := actions_model.FindRunJobs(ctx, actions_model.FindRunJobOptions{RunID: runID})
+ if err != nil {
+ return err
+ }
+ idToJobs := make(map[string][]*actions_model.ActionRunJob, len(jobs))
+ for _, job := range jobs {
+ idToJobs[job.JobID] = append(idToJobs[job.JobID], job)
+ }
+
+ updates := newJobStatusResolver(jobs).Resolve()
+ for _, job := range jobs {
+ if status, ok := updates[job.ID]; ok {
+ job.Status = status
+ if n, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"status": actions_model.StatusBlocked}, "status"); err != nil {
+ return err
+ } else if n != 1 {
+ return fmt.Errorf("no affected for updating blocked job %v", job.ID)
+ }
+ }
+ }
+ return nil
+ })
+}
+
+type jobStatusResolver struct {
+ statuses map[int64]actions_model.Status
+ needs map[int64][]int64
+}
+
+func newJobStatusResolver(jobs actions_model.ActionJobList) *jobStatusResolver {
+ idToJobs := make(map[string][]*actions_model.ActionRunJob, len(jobs))
+ for _, job := range jobs {
+ idToJobs[job.JobID] = append(idToJobs[job.JobID], job)
+ }
+
+ statuses := make(map[int64]actions_model.Status, len(jobs))
+ needs := make(map[int64][]int64, len(jobs))
+ for _, job := range jobs {
+ statuses[job.ID] = job.Status
+ for _, need := range job.Needs {
+ for _, v := range idToJobs[need] {
+ needs[job.ID] = append(needs[job.ID], v.ID)
+ }
+ }
+ }
+ return &jobStatusResolver{
+ statuses: statuses,
+ needs: needs,
+ }
+}
+
+func (r *jobStatusResolver) Resolve() map[int64]actions_model.Status {
+ ret := map[int64]actions_model.Status{}
+ for i := 0; i < len(r.statuses); i++ {
+ updated := r.resolve()
+ if len(updated) == 0 {
+ return ret
+ }
+ for k, v := range updated {
+ ret[k] = v
+ r.statuses[k] = v
+ }
+ }
+ return ret
+}
+
+func (r *jobStatusResolver) resolve() map[int64]actions_model.Status {
+ ret := map[int64]actions_model.Status{}
+ for id, status := range r.statuses {
+ if status != actions_model.StatusBlocked {
+ continue
+ }
+ allDone, allSucceed := true, true
+ for _, need := range r.needs[id] {
+ needStatus := r.statuses[need]
+ if !needStatus.IsDone() {
+ allDone = false
+ }
+ if needStatus.In(actions_model.StatusFailure, actions_model.StatusCancelled, actions_model.StatusSkipped) {
+ allSucceed = false
+ }
+ }
+ if allDone {
+ if allSucceed {
+ ret[id] = actions_model.StatusWaiting
+ } else {
+ ret[id] = actions_model.StatusSkipped
+ }
+ }
+ }
+ return ret
+}
diff --git a/services/actions/job_emitter_test.go b/services/actions/job_emitter_test.go
new file mode 100644
index 000000000..e81aa61d8
--- /dev/null
+++ b/services/actions/job_emitter_test.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "testing"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_jobStatusResolver_Resolve(t *testing.T) {
+ tests := []struct {
+ name string
+ jobs actions_model.ActionJobList
+ want map[int64]actions_model.Status
+ }{
+ {
+ name: "no blocked",
+ jobs: actions_model.ActionJobList{
+ {ID: 1, JobID: "1", Status: actions_model.StatusWaiting, Needs: []string{}},
+ {ID: 2, JobID: "2", Status: actions_model.StatusWaiting, Needs: []string{}},
+ {ID: 3, JobID: "3", Status: actions_model.StatusWaiting, Needs: []string{}},
+ },
+ want: map[int64]actions_model.Status{},
+ },
+ {
+ name: "single blocked",
+ jobs: actions_model.ActionJobList{
+ {ID: 1, JobID: "1", Status: actions_model.StatusSuccess, Needs: []string{}},
+ {ID: 2, JobID: "2", Status: actions_model.StatusBlocked, Needs: []string{"1"}},
+ {ID: 3, JobID: "3", Status: actions_model.StatusWaiting, Needs: []string{}},
+ },
+ want: map[int64]actions_model.Status{
+ 2: actions_model.StatusWaiting,
+ },
+ },
+ {
+ name: "multiple blocked",
+ jobs: actions_model.ActionJobList{
+ {ID: 1, JobID: "1", Status: actions_model.StatusSuccess, Needs: []string{}},
+ {ID: 2, JobID: "2", Status: actions_model.StatusBlocked, Needs: []string{"1"}},
+ {ID: 3, JobID: "3", Status: actions_model.StatusBlocked, Needs: []string{"1"}},
+ },
+ want: map[int64]actions_model.Status{
+ 2: actions_model.StatusWaiting,
+ 3: actions_model.StatusWaiting,
+ },
+ },
+ {
+ name: "chain blocked",
+ jobs: actions_model.ActionJobList{
+ {ID: 1, JobID: "1", Status: actions_model.StatusFailure, Needs: []string{}},
+ {ID: 2, JobID: "2", Status: actions_model.StatusBlocked, Needs: []string{"1"}},
+ {ID: 3, JobID: "3", Status: actions_model.StatusBlocked, Needs: []string{"2"}},
+ },
+ want: map[int64]actions_model.Status{
+ 2: actions_model.StatusSkipped,
+ 3: actions_model.StatusSkipped,
+ },
+ },
+ {
+ name: "loop need",
+ jobs: actions_model.ActionJobList{
+ {ID: 1, JobID: "1", Status: actions_model.StatusBlocked, Needs: []string{"3"}},
+ {ID: 2, JobID: "2", Status: actions_model.StatusBlocked, Needs: []string{"1"}},
+ {ID: 3, JobID: "3", Status: actions_model.StatusBlocked, Needs: []string{"2"}},
+ },
+ want: map[int64]actions_model.Status{},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ r := newJobStatusResolver(tt.jobs)
+ assert.Equal(t, tt.want, r.Resolve())
+ })
+ }
+}
diff --git a/services/actions/notifier.go b/services/actions/notifier.go
new file mode 100644
index 000000000..0ed69097d
--- /dev/null
+++ b/services/actions/notifier.go
@@ -0,0 +1,528 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ issues_model "code.gitea.io/gitea/models/issues"
+ packages_model "code.gitea.io/gitea/models/packages"
+ perm_model "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/notification/base"
+ "code.gitea.io/gitea/modules/repository"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+ "code.gitea.io/gitea/services/convert"
+)
+
+type actionsNotifier struct {
+ base.NullNotifier
+}
+
+var _ base.Notifier = &actionsNotifier{}
+
+// NewNotifier create a new actionsNotifier notifier
+func NewNotifier() base.Notifier {
+ return &actionsNotifier{}
+}
+
+// NotifyNewIssue notifies issue created event
+func (n *actionsNotifier) NotifyNewIssue(ctx context.Context, issue *issues_model.Issue, _ []*user_model.User) {
+ ctx = withMethod(ctx, "NotifyNewIssue")
+ if err := issue.LoadRepo(ctx); err != nil {
+ log.Error("issue.LoadRepo: %v", err)
+ return
+ }
+ if err := issue.LoadPoster(ctx); err != nil {
+ log.Error("issue.LoadPoster: %v", err)
+ return
+ }
+ mode, _ := access_model.AccessLevel(ctx, issue.Poster, issue.Repo)
+
+ newNotifyInputFromIssue(issue, webhook_module.HookEventIssues).WithPayload(&api.IssuePayload{
+ Action: api.HookIssueOpened,
+ Index: issue.Index,
+ Issue: convert.ToAPIIssue(ctx, issue),
+ Repository: convert.ToRepo(ctx, issue.Repo, mode),
+ Sender: convert.ToUser(issue.Poster, nil),
+ }).Notify(withMethod(ctx, "NotifyNewIssue"))
+}
+
+// NotifyIssueChangeStatus notifies close or reopen issue to notifiers
+func (n *actionsNotifier) NotifyIssueChangeStatus(ctx context.Context, doer *user_model.User, commitID string, issue *issues_model.Issue, _ *issues_model.Comment, isClosed bool) {
+ ctx = withMethod(ctx, "NotifyIssueChangeStatus")
+ mode, _ := access_model.AccessLevel(ctx, issue.Poster, issue.Repo)
+ if issue.IsPull {
+ if err := issue.LoadPullRequest(ctx); err != nil {
+ log.Error("LoadPullRequest: %v", err)
+ return
+ }
+ // Merge pull request calls issue.changeStatus so we need to handle separately.
+ apiPullRequest := &api.PullRequestPayload{
+ Index: issue.Index,
+ PullRequest: convert.ToAPIPullRequest(db.DefaultContext, issue.PullRequest, nil),
+ Repository: convert.ToRepo(ctx, issue.Repo, mode),
+ Sender: convert.ToUser(doer, nil),
+ CommitID: commitID,
+ }
+ if isClosed {
+ apiPullRequest.Action = api.HookIssueClosed
+ } else {
+ apiPullRequest.Action = api.HookIssueReOpened
+ }
+ newNotifyInputFromIssue(issue, webhook_module.HookEventPullRequest).
+ WithDoer(doer).
+ WithPayload(apiPullRequest).
+ Notify(ctx)
+ return
+ }
+ apiIssue := &api.IssuePayload{
+ Index: issue.Index,
+ Issue: convert.ToAPIIssue(ctx, issue),
+ Repository: convert.ToRepo(ctx, issue.Repo, mode),
+ Sender: convert.ToUser(doer, nil),
+ }
+ if isClosed {
+ apiIssue.Action = api.HookIssueClosed
+ } else {
+ apiIssue.Action = api.HookIssueReOpened
+ }
+ newNotifyInputFromIssue(issue, webhook_module.HookEventIssues).
+ WithDoer(doer).
+ WithPayload(apiIssue).
+ Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifyIssueChangeLabels(ctx context.Context, doer *user_model.User, issue *issues_model.Issue,
+ _, _ []*issues_model.Label,
+) {
+ ctx = withMethod(ctx, "NotifyIssueChangeLabels")
+
+ var err error
+ if err = issue.LoadRepo(ctx); err != nil {
+ log.Error("LoadRepo: %v", err)
+ return
+ }
+
+ if err = issue.LoadPoster(ctx); err != nil {
+ log.Error("LoadPoster: %v", err)
+ return
+ }
+
+ mode, _ := access_model.AccessLevel(ctx, issue.Poster, issue.Repo)
+ if issue.IsPull {
+ if err = issue.LoadPullRequest(ctx); err != nil {
+ log.Error("loadPullRequest: %v", err)
+ return
+ }
+ if err = issue.PullRequest.LoadIssue(ctx); err != nil {
+ log.Error("LoadIssue: %v", err)
+ return
+ }
+ newNotifyInputFromIssue(issue, webhook_module.HookEventPullRequestLabel).
+ WithDoer(doer).
+ WithPayload(&api.PullRequestPayload{
+ Action: api.HookIssueLabelUpdated,
+ Index: issue.Index,
+ PullRequest: convert.ToAPIPullRequest(ctx, issue.PullRequest, nil),
+ Repository: convert.ToRepo(ctx, issue.Repo, perm_model.AccessModeNone),
+ Sender: convert.ToUser(doer, nil),
+ }).
+ Notify(ctx)
+ return
+ }
+ newNotifyInputFromIssue(issue, webhook_module.HookEventIssueLabel).
+ WithDoer(doer).
+ WithPayload(&api.IssuePayload{
+ Action: api.HookIssueLabelUpdated,
+ Index: issue.Index,
+ Issue: convert.ToAPIIssue(ctx, issue),
+ Repository: convert.ToRepo(ctx, issue.Repo, mode),
+ Sender: convert.ToUser(doer, nil),
+ }).
+ Notify(ctx)
+}
+
+// NotifyCreateIssueComment notifies comment on an issue to notifiers
+func (n *actionsNotifier) NotifyCreateIssueComment(ctx context.Context, doer *user_model.User, repo *repo_model.Repository,
+ issue *issues_model.Issue, comment *issues_model.Comment, _ []*user_model.User,
+) {
+ ctx = withMethod(ctx, "NotifyCreateIssueComment")
+
+ mode, _ := access_model.AccessLevel(ctx, doer, repo)
+
+ if issue.IsPull {
+ newNotifyInputFromIssue(issue, webhook_module.HookEventPullRequestComment).
+ WithDoer(doer).
+ WithPayload(&api.IssueCommentPayload{
+ Action: api.HookIssueCommentCreated,
+ Issue: convert.ToAPIIssue(ctx, issue),
+ Comment: convert.ToComment(comment),
+ Repository: convert.ToRepo(ctx, repo, mode),
+ Sender: convert.ToUser(doer, nil),
+ IsPull: true,
+ }).
+ Notify(ctx)
+ return
+ }
+ newNotifyInputFromIssue(issue, webhook_module.HookEventIssueComment).
+ WithDoer(doer).
+ WithPayload(&api.IssueCommentPayload{
+ Action: api.HookIssueCommentCreated,
+ Issue: convert.ToAPIIssue(ctx, issue),
+ Comment: convert.ToComment(comment),
+ Repository: convert.ToRepo(ctx, repo, mode),
+ Sender: convert.ToUser(doer, nil),
+ IsPull: false,
+ }).
+ Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifyNewPullRequest(ctx context.Context, pull *issues_model.PullRequest, _ []*user_model.User) {
+ ctx = withMethod(ctx, "NotifyNewPullRequest")
+
+ if err := pull.LoadIssue(ctx); err != nil {
+ log.Error("pull.LoadIssue: %v", err)
+ return
+ }
+ if err := pull.Issue.LoadRepo(ctx); err != nil {
+ log.Error("pull.Issue.LoadRepo: %v", err)
+ return
+ }
+ if err := pull.Issue.LoadPoster(ctx); err != nil {
+ log.Error("pull.Issue.LoadPoster: %v", err)
+ return
+ }
+
+ mode, _ := access_model.AccessLevel(ctx, pull.Issue.Poster, pull.Issue.Repo)
+
+ newNotifyInputFromIssue(pull.Issue, webhook_module.HookEventPullRequest).
+ WithPayload(&api.PullRequestPayload{
+ Action: api.HookIssueOpened,
+ Index: pull.Issue.Index,
+ PullRequest: convert.ToAPIPullRequest(ctx, pull, nil),
+ Repository: convert.ToRepo(ctx, pull.Issue.Repo, mode),
+ Sender: convert.ToUser(pull.Issue.Poster, nil),
+ }).
+ WithPullRequest(pull).
+ Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifyCreateRepository(ctx context.Context, doer, u *user_model.User, repo *repo_model.Repository) {
+ ctx = withMethod(ctx, "NotifyCreateRepository")
+
+ newNotifyInput(repo, doer, webhook_module.HookEventRepository).WithPayload(&api.RepositoryPayload{
+ Action: api.HookRepoCreated,
+ Repository: convert.ToRepo(ctx, repo, perm_model.AccessModeOwner),
+ Organization: convert.ToUser(u, nil),
+ Sender: convert.ToUser(doer, nil),
+ }).Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifyForkRepository(ctx context.Context, doer *user_model.User, oldRepo, repo *repo_model.Repository) {
+ ctx = withMethod(ctx, "NotifyForkRepository")
+
+ oldMode, _ := access_model.AccessLevel(ctx, doer, oldRepo)
+ mode, _ := access_model.AccessLevel(ctx, doer, repo)
+
+ // forked webhook
+ newNotifyInput(oldRepo, doer, webhook_module.HookEventFork).WithPayload(&api.ForkPayload{
+ Forkee: convert.ToRepo(ctx, oldRepo, oldMode),
+ Repo: convert.ToRepo(ctx, repo, mode),
+ Sender: convert.ToUser(doer, nil),
+ }).Notify(ctx)
+
+ u := repo.MustOwner(ctx)
+
+ // Add to hook queue for created repo after session commit.
+ if u.IsOrganization() {
+ newNotifyInput(repo, doer, webhook_module.HookEventRepository).
+ WithRef(oldRepo.DefaultBranch).
+ WithPayload(&api.RepositoryPayload{
+ Action: api.HookRepoCreated,
+ Repository: convert.ToRepo(ctx, repo, perm_model.AccessModeOwner),
+ Organization: convert.ToUser(u, nil),
+ Sender: convert.ToUser(doer, nil),
+ }).Notify(ctx)
+ }
+}
+
+func (n *actionsNotifier) NotifyPullRequestReview(ctx context.Context, pr *issues_model.PullRequest, review *issues_model.Review, _ *issues_model.Comment, _ []*user_model.User) {
+ ctx = withMethod(ctx, "NotifyPullRequestReview")
+
+ var reviewHookType webhook_module.HookEventType
+
+ switch review.Type {
+ case issues_model.ReviewTypeApprove:
+ reviewHookType = webhook_module.HookEventPullRequestReviewApproved
+ case issues_model.ReviewTypeComment:
+ reviewHookType = webhook_module.HookEventPullRequestComment
+ case issues_model.ReviewTypeReject:
+ reviewHookType = webhook_module.HookEventPullRequestReviewRejected
+ default:
+ // unsupported review webhook type here
+ log.Error("Unsupported review webhook type")
+ return
+ }
+
+ if err := pr.LoadIssue(ctx); err != nil {
+ log.Error("pr.LoadIssue: %v", err)
+ return
+ }
+
+ mode, err := access_model.AccessLevel(ctx, review.Issue.Poster, review.Issue.Repo)
+ if err != nil {
+ log.Error("models.AccessLevel: %v", err)
+ return
+ }
+
+ newNotifyInput(review.Issue.Repo, review.Reviewer, reviewHookType).
+ WithRef(review.CommitID).
+ WithPayload(&api.PullRequestPayload{
+ Action: api.HookIssueReviewed,
+ Index: review.Issue.Index,
+ PullRequest: convert.ToAPIPullRequest(db.DefaultContext, pr, nil),
+ Repository: convert.ToRepo(ctx, review.Issue.Repo, mode),
+ Sender: convert.ToUser(review.Reviewer, nil),
+ Review: &api.ReviewPayload{
+ Type: string(reviewHookType),
+ Content: review.Content,
+ },
+ }).Notify(ctx)
+}
+
+func (*actionsNotifier) NotifyMergePullRequest(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
+ ctx = withMethod(ctx, "NotifyMergePullRequest")
+
+ // Reload pull request information.
+ if err := pr.LoadAttributes(ctx); err != nil {
+ log.Error("LoadAttributes: %v", err)
+ return
+ }
+
+ if err := pr.LoadIssue(ctx); err != nil {
+ log.Error("LoadAttributes: %v", err)
+ return
+ }
+
+ if err := pr.Issue.LoadRepo(db.DefaultContext); err != nil {
+ log.Error("pr.Issue.LoadRepo: %v", err)
+ return
+ }
+
+ mode, err := access_model.AccessLevel(ctx, doer, pr.Issue.Repo)
+ if err != nil {
+ log.Error("models.AccessLevel: %v", err)
+ return
+ }
+
+ // Merge pull request calls issue.changeStatus so we need to handle separately.
+ apiPullRequest := &api.PullRequestPayload{
+ Index: pr.Issue.Index,
+ PullRequest: convert.ToAPIPullRequest(db.DefaultContext, pr, nil),
+ Repository: convert.ToRepo(ctx, pr.Issue.Repo, mode),
+ Sender: convert.ToUser(doer, nil),
+ Action: api.HookIssueClosed,
+ }
+
+ newNotifyInput(pr.Issue.Repo, doer, webhook_module.HookEventPullRequest).
+ WithRef(pr.MergedCommitID).
+ WithPayload(apiPullRequest).
+ WithPullRequest(pr).
+ Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifyPushCommits(ctx context.Context, pusher *user_model.User, repo *repo_model.Repository, opts *repository.PushUpdateOptions, commits *repository.PushCommits) {
+ ctx = withMethod(ctx, "NotifyPushCommits")
+
+ apiPusher := convert.ToUser(pusher, nil)
+ apiCommits, apiHeadCommit, err := commits.ToAPIPayloadCommits(ctx, repo.RepoPath(), repo.HTMLURL())
+ if err != nil {
+ log.Error("commits.ToAPIPayloadCommits failed: %v", err)
+ return
+ }
+
+ newNotifyInput(repo, pusher, webhook_module.HookEventPush).
+ WithRef(opts.RefFullName).
+ WithPayload(&api.PushPayload{
+ Ref: opts.RefFullName,
+ Before: opts.OldCommitID,
+ After: opts.NewCommitID,
+ CompareURL: setting.AppURL + commits.CompareURL,
+ Commits: apiCommits,
+ HeadCommit: apiHeadCommit,
+ Repo: convert.ToRepo(ctx, repo, perm_model.AccessModeOwner),
+ Pusher: apiPusher,
+ Sender: apiPusher,
+ }).
+ Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifyCreateRef(ctx context.Context, pusher *user_model.User, repo *repo_model.Repository, refType, refFullName, refID string) {
+ ctx = withMethod(ctx, "NotifyCreateRef")
+
+ apiPusher := convert.ToUser(pusher, nil)
+ apiRepo := convert.ToRepo(ctx, repo, perm_model.AccessModeNone)
+ refName := git.RefEndName(refFullName)
+
+ newNotifyInput(repo, pusher, webhook_module.HookEventCreate).
+ WithRef(refName).
+ WithPayload(&api.CreatePayload{
+ Ref: refName,
+ Sha: refID,
+ RefType: refType,
+ Repo: apiRepo,
+ Sender: apiPusher,
+ }).
+ Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifyDeleteRef(ctx context.Context, pusher *user_model.User, repo *repo_model.Repository, refType, refFullName string) {
+ ctx = withMethod(ctx, "NotifyDeleteRef")
+
+ apiPusher := convert.ToUser(pusher, nil)
+ apiRepo := convert.ToRepo(ctx, repo, perm_model.AccessModeNone)
+ refName := git.RefEndName(refFullName)
+
+ newNotifyInput(repo, pusher, webhook_module.HookEventDelete).
+ WithRef(refName).
+ WithPayload(&api.DeletePayload{
+ Ref: refName,
+ RefType: refType,
+ PusherType: api.PusherTypeUser,
+ Repo: apiRepo,
+ Sender: apiPusher,
+ }).
+ Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifySyncPushCommits(ctx context.Context, pusher *user_model.User, repo *repo_model.Repository, opts *repository.PushUpdateOptions, commits *repository.PushCommits) {
+ ctx = withMethod(ctx, "NotifySyncPushCommits")
+
+ apiPusher := convert.ToUser(pusher, nil)
+ apiCommits, apiHeadCommit, err := commits.ToAPIPayloadCommits(db.DefaultContext, repo.RepoPath(), repo.HTMLURL())
+ if err != nil {
+ log.Error("commits.ToAPIPayloadCommits failed: %v", err)
+ return
+ }
+
+ newNotifyInput(repo, pusher, webhook_module.HookEventPush).
+ WithRef(opts.RefFullName).
+ WithPayload(&api.PushPayload{
+ Ref: opts.RefFullName,
+ Before: opts.OldCommitID,
+ After: opts.NewCommitID,
+ CompareURL: setting.AppURL + commits.CompareURL,
+ Commits: apiCommits,
+ TotalCommits: commits.Len,
+ HeadCommit: apiHeadCommit,
+ Repo: convert.ToRepo(ctx, repo, perm_model.AccessModeOwner),
+ Pusher: apiPusher,
+ Sender: apiPusher,
+ }).
+ Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifySyncCreateRef(ctx context.Context, pusher *user_model.User, repo *repo_model.Repository, refType, refFullName, refID string) {
+ ctx = withMethod(ctx, "NotifySyncCreateRef")
+ n.NotifyCreateRef(ctx, pusher, repo, refType, refFullName, refID)
+}
+
+func (n *actionsNotifier) NotifySyncDeleteRef(ctx context.Context, pusher *user_model.User, repo *repo_model.Repository, refType, refFullName string) {
+ ctx = withMethod(ctx, "NotifySyncDeleteRef")
+ n.NotifyDeleteRef(ctx, pusher, repo, refType, refFullName)
+}
+
+func (n *actionsNotifier) NotifyNewRelease(ctx context.Context, rel *repo_model.Release) {
+ ctx = withMethod(ctx, "NotifyNewRelease")
+ notifyRelease(ctx, rel.Publisher, rel, rel.Sha1, api.HookReleasePublished)
+}
+
+func (n *actionsNotifier) NotifyUpdateRelease(ctx context.Context, doer *user_model.User, rel *repo_model.Release) {
+ ctx = withMethod(ctx, "NotifyUpdateRelease")
+ notifyRelease(ctx, doer, rel, rel.Sha1, api.HookReleaseUpdated)
+}
+
+func (n *actionsNotifier) NotifyDeleteRelease(ctx context.Context, doer *user_model.User, rel *repo_model.Release) {
+ ctx = withMethod(ctx, "NotifyDeleteRelease")
+ notifyRelease(ctx, doer, rel, rel.Sha1, api.HookReleaseDeleted)
+}
+
+func (n *actionsNotifier) NotifyPackageCreate(ctx context.Context, doer *user_model.User, pd *packages_model.PackageDescriptor) {
+ ctx = withMethod(ctx, "NotifyPackageCreate")
+ notifyPackage(ctx, doer, pd, api.HookPackageCreated)
+}
+
+func (n *actionsNotifier) NotifyPackageDelete(ctx context.Context, doer *user_model.User, pd *packages_model.PackageDescriptor) {
+ ctx = withMethod(ctx, "NotifyPackageDelete")
+ notifyPackage(ctx, doer, pd, api.HookPackageDeleted)
+}
+
+func (n *actionsNotifier) NotifyAutoMergePullRequest(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
+ ctx = withMethod(ctx, "NotifyAutoMergePullRequest")
+ n.NotifyMergePullRequest(ctx, doer, pr)
+}
+
+func (n *actionsNotifier) NotifyPullRequestSynchronized(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
+ ctx = withMethod(ctx, "NotifyPullRequestSynchronized")
+
+ if err := pr.LoadIssue(ctx); err != nil {
+ log.Error("LoadAttributes: %v", err)
+ return
+ }
+
+ if err := pr.Issue.LoadRepo(db.DefaultContext); err != nil {
+ log.Error("pr.Issue.LoadRepo: %v", err)
+ return
+ }
+
+ newNotifyInput(pr.Issue.Repo, doer, webhook_module.HookEventPullRequestSync).
+ WithPayload(&api.PullRequestPayload{
+ Action: api.HookIssueSynchronized,
+ Index: pr.Issue.Index,
+ PullRequest: convert.ToAPIPullRequest(ctx, pr, nil),
+ Repository: convert.ToRepo(ctx, pr.Issue.Repo, perm_model.AccessModeNone),
+ Sender: convert.ToUser(doer, nil),
+ }).
+ WithPullRequest(pr).
+ Notify(ctx)
+}
+
+func (n *actionsNotifier) NotifyPullRequestChangeTargetBranch(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest, oldBranch string) {
+ ctx = withMethod(ctx, "NotifyPullRequestChangeTargetBranch")
+
+ if err := pr.LoadIssue(ctx); err != nil {
+ log.Error("LoadAttributes: %v", err)
+ return
+ }
+
+ if err := pr.Issue.LoadRepo(db.DefaultContext); err != nil {
+ log.Error("pr.Issue.LoadRepo: %v", err)
+ return
+ }
+
+ mode, _ := access_model.AccessLevel(ctx, pr.Issue.Poster, pr.Issue.Repo)
+ newNotifyInput(pr.Issue.Repo, doer, webhook_module.HookEventPullRequest).
+ WithPayload(&api.PullRequestPayload{
+ Action: api.HookIssueEdited,
+ Index: pr.Issue.Index,
+ Changes: &api.ChangesPayload{
+ Ref: &api.ChangesFromPayload{
+ From: oldBranch,
+ },
+ },
+ PullRequest: convert.ToAPIPullRequest(ctx, pr, nil),
+ Repository: convert.ToRepo(ctx, pr.Issue.Repo, mode),
+ Sender: convert.ToUser(doer, nil),
+ }).
+ WithPullRequest(pr).
+ Notify(ctx)
+}
diff --git a/services/actions/notifier_helper.go b/services/actions/notifier_helper.go
new file mode 100644
index 000000000..5b8f6bfdf
--- /dev/null
+++ b/services/actions/notifier_helper.go
@@ -0,0 +1,236 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package actions
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ actions_model "code.gitea.io/gitea/models/actions"
+ issues_model "code.gitea.io/gitea/models/issues"
+ packages_model "code.gitea.io/gitea/models/packages"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ unit_model "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ actions_module "code.gitea.io/gitea/modules/actions"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/log"
+ api "code.gitea.io/gitea/modules/structs"
+ webhook_module "code.gitea.io/gitea/modules/webhook"
+ "code.gitea.io/gitea/services/convert"
+
+ "github.com/nektos/act/pkg/jobparser"
+)
+
+var methodCtxKey struct{}
+
+// withMethod sets the notification method that this context currently executes.
+// Used for debugging/ troubleshooting purposes.
+func withMethod(ctx context.Context, method string) context.Context {
+ // don't overwrite
+ if v := ctx.Value(methodCtxKey); v != nil {
+ if _, ok := v.(string); ok {
+ return ctx
+ }
+ }
+ return context.WithValue(ctx, methodCtxKey, method)
+}
+
+// getMethod gets the notification method that this context currently executes.
+// Default: "notify"
+// Used for debugging/ troubleshooting purposes.
+func getMethod(ctx context.Context) string {
+ if v := ctx.Value(methodCtxKey); v != nil {
+ if s, ok := v.(string); ok {
+ return s
+ }
+ }
+ return "notify"
+}
+
+type notifyInput struct {
+ // required
+ Repo *repo_model.Repository
+ Doer *user_model.User
+ Event webhook_module.HookEventType
+
+ // optional
+ Ref string
+ Payload api.Payloader
+ PullRequest *issues_model.PullRequest
+}
+
+func newNotifyInput(repo *repo_model.Repository, doer *user_model.User, event webhook_module.HookEventType) *notifyInput {
+ return &notifyInput{
+ Repo: repo,
+ Doer: doer,
+ Event: event,
+ }
+}
+
+func (input *notifyInput) WithDoer(doer *user_model.User) *notifyInput {
+ input.Doer = doer
+ return input
+}
+
+func (input *notifyInput) WithRef(ref string) *notifyInput {
+ input.Ref = ref
+ return input
+}
+
+func (input *notifyInput) WithPayload(payload api.Payloader) *notifyInput {
+ input.Payload = payload
+ return input
+}
+
+func (input *notifyInput) WithPullRequest(pr *issues_model.PullRequest) *notifyInput {
+ input.PullRequest = pr
+ if input.Ref == "" {
+ input.Ref = pr.GetGitRefName()
+ }
+ return input
+}
+
+func (input *notifyInput) Notify(ctx context.Context) {
+ log.Trace("execute %v for event %v whose doer is %v", getMethod(ctx), input.Event, input.Doer.Name)
+
+ if err := notify(ctx, input); err != nil {
+ log.Error("an error occurred while executing the %s actions method: %v", getMethod(ctx), err)
+ }
+}
+
+func notify(ctx context.Context, input *notifyInput) error {
+ if input.Doer.IsActions() {
+ // avoiding triggering cyclically, for example:
+ // a comment of an issue will trigger the runner to add a new comment as reply,
+ // and the new comment will trigger the runner again.
+ log.Debug("ignore executing %v for event %v whose doer is %v", getMethod(ctx), input.Event, input.Doer.Name)
+ return nil
+ }
+ if unit_model.TypeActions.UnitGlobalDisabled() {
+ return nil
+ }
+ if err := input.Repo.LoadUnits(ctx); err != nil {
+ return fmt.Errorf("repo.LoadUnits: %w", err)
+ } else if !input.Repo.UnitEnabled(ctx, unit_model.TypeActions) {
+ return nil
+ }
+
+ gitRepo, err := git.OpenRepository(context.Background(), input.Repo.RepoPath())
+ if err != nil {
+ return fmt.Errorf("git.OpenRepository: %w", err)
+ }
+ defer gitRepo.Close()
+
+ ref := input.Ref
+ if ref == "" {
+ ref = input.Repo.DefaultBranch
+ }
+
+ // Get the commit object for the ref
+ commit, err := gitRepo.GetCommit(ref)
+ if err != nil {
+ return fmt.Errorf("gitRepo.GetCommit: %w", err)
+ }
+
+ workflows, err := actions_module.DetectWorkflows(commit, input.Event, input.Payload)
+ if err != nil {
+ return fmt.Errorf("DetectWorkflows: %w", err)
+ }
+
+ if len(workflows) == 0 {
+ log.Trace("repo %s with commit %s couldn't find workflows", input.Repo.RepoPath(), commit.ID)
+ return nil
+ }
+
+ p, err := json.Marshal(input.Payload)
+ if err != nil {
+ return fmt.Errorf("json.Marshal: %w", err)
+ }
+
+ for id, content := range workflows {
+ run := actions_model.ActionRun{
+ Title: strings.SplitN(commit.CommitMessage, "\n", 2)[0],
+ RepoID: input.Repo.ID,
+ OwnerID: input.Repo.OwnerID,
+ WorkflowID: id,
+ TriggerUserID: input.Doer.ID,
+ Ref: ref,
+ CommitSHA: commit.ID.String(),
+ IsForkPullRequest: input.PullRequest != nil && input.PullRequest.IsFromFork(),
+ Event: input.Event,
+ EventPayload: string(p),
+ Status: actions_model.StatusWaiting,
+ }
+ jobs, err := jobparser.Parse(content)
+ if err != nil {
+ log.Error("jobparser.Parse: %v", err)
+ continue
+ }
+ if err := actions_model.InsertRun(ctx, &run, jobs); err != nil {
+ log.Error("InsertRun: %v", err)
+ continue
+ }
+ if jobs, _, err := actions_model.FindRunJobs(ctx, actions_model.FindRunJobOptions{RunID: run.ID}); err != nil {
+ log.Error("FindRunJobs: %v", err)
+ } else {
+ for _, job := range jobs {
+ if err := CreateCommitStatus(ctx, job); err != nil {
+ log.Error("CreateCommitStatus: %v", err)
+ }
+ }
+ }
+
+ }
+ return nil
+}
+
+func newNotifyInputFromIssue(issue *issues_model.Issue, event webhook_module.HookEventType) *notifyInput {
+ return newNotifyInput(issue.Repo, issue.Poster, event)
+}
+
+func notifyRelease(ctx context.Context, doer *user_model.User, rel *repo_model.Release, ref string, action api.HookReleaseAction) {
+ if err := rel.LoadAttributes(ctx); err != nil {
+ log.Error("LoadAttributes: %v", err)
+ return
+ }
+
+ mode, _ := access_model.AccessLevel(ctx, doer, rel.Repo)
+
+ newNotifyInput(rel.Repo, doer, webhook_module.HookEventRelease).
+ WithRef(ref).
+ WithPayload(&api.ReleasePayload{
+ Action: action,
+ Release: convert.ToRelease(rel),
+ Repository: convert.ToRepo(ctx, rel.Repo, mode),
+ Sender: convert.ToUser(doer, nil),
+ }).
+ Notify(ctx)
+}
+
+func notifyPackage(ctx context.Context, sender *user_model.User, pd *packages_model.PackageDescriptor, action api.HookPackageAction) {
+ if pd.Repository == nil {
+ // When a package is uploaded to an organization, it could trigger an event to notify.
+ // So the repository could be nil, however, actions can't support that yet.
+ // See https://github.com/go-gitea/gitea/pull/17940
+ return
+ }
+
+ apiPackage, err := convert.ToPackage(ctx, pd, sender)
+ if err != nil {
+ log.Error("Error converting package: %v", err)
+ return
+ }
+
+ newNotifyInput(pd.Repository, sender, webhook_module.HookEventPackage).
+ WithPayload(&api.PackagePayload{
+ Action: action,
+ Package: apiPackage,
+ Sender: convert.ToUser(sender, nil),
+ }).
+ Notify(ctx)
+}
diff --git a/services/auth/basic.go b/services/auth/basic.go
index 5fb80703a..dc0378090 100644
--- a/services/auth/basic.go
+++ b/services/auth/basic.go
@@ -8,6 +8,7 @@ import (
"net/http"
"strings"
+ actions_model "code.gitea.io/gitea/models/actions"
auth_model "code.gitea.io/gitea/models/auth"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/base"
@@ -70,6 +71,7 @@ func (b *Basic) Verify(req *http.Request, w http.ResponseWriter, store DataStore
log.Trace("Basic Authorization: Attempting login with username as token")
}
+ // check oauth2 token
uid := CheckOAuthAccessToken(authToken)
if uid != 0 {
log.Trace("Basic Authorization: Valid OAuthAccessToken for user[%d]", uid)
@@ -84,6 +86,7 @@ func (b *Basic) Verify(req *http.Request, w http.ResponseWriter, store DataStore
return u, nil
}
+ // check personal access token
token, err := auth_model.GetAccessTokenBySHA(authToken)
if err == nil {
log.Trace("Basic Authorization: Valid AccessToken for user[%d]", uid)
@@ -104,6 +107,17 @@ func (b *Basic) Verify(req *http.Request, w http.ResponseWriter, store DataStore
log.Error("GetAccessTokenBySha: %v", err)
}
+ // check task token
+ task, err := actions_model.GetRunningTaskByToken(req.Context(), authToken)
+ if err == nil && task != nil {
+ log.Trace("Basic Authorization: Valid AccessToken for task[%d]", task.ID)
+
+ store.GetData()["IsActionsToken"] = true
+ store.GetData()["ActionsTaskID"] = task.ID
+
+ return user_model.NewActionsUser(), nil
+ }
+
if !setting.Service.EnableBasicAuth {
return nil, nil
}
diff --git a/services/auth/oauth2.go b/services/auth/oauth2.go
index 1be78b85c..b70f84da9 100644
--- a/services/auth/oauth2.go
+++ b/services/auth/oauth2.go
@@ -9,6 +9,7 @@ import (
"strings"
"time"
+ actions_model "code.gitea.io/gitea/models/actions"
auth_model "code.gitea.io/gitea/models/auth"
"code.gitea.io/gitea/models/db"
user_model "code.gitea.io/gitea/models/user"
@@ -94,7 +95,18 @@ func (o *OAuth2) userIDFromToken(req *http.Request, store DataStore) int64 {
}
t, err := auth_model.GetAccessTokenBySHA(tokenSHA)
if err != nil {
- if !auth_model.IsErrAccessTokenNotExist(err) && !auth_model.IsErrAccessTokenEmpty(err) {
+ if auth_model.IsErrAccessTokenNotExist(err) {
+ // check task token
+ task, err := actions_model.GetRunningTaskByToken(db.DefaultContext, tokenSHA)
+ if err == nil && task != nil {
+ log.Trace("Basic Authorization: Valid AccessToken for task[%d]", task.ID)
+
+ store.GetData()["IsActionsToken"] = true
+ store.GetData()["ActionsTaskID"] = task.ID
+
+ return user_model.ActionsUserID
+ }
+ } else if !auth_model.IsErrAccessTokenNotExist(err) && !auth_model.IsErrAccessTokenEmpty(err) {
log.Error("GetAccessTokenBySHA: %v", err)
}
return 0
@@ -118,12 +130,13 @@ func (o *OAuth2) Verify(req *http.Request, w http.ResponseWriter, store DataStor
}
id := o.userIDFromToken(req, store)
- if id <= 0 {
+
+ if id <= 0 && id != -2 { // -2 means actions, so we need to allow it.
return nil, nil
}
log.Trace("OAuth2 Authorization: Found token for user[%d]", id)
- user, err := user_model.GetUserByID(req.Context(), id)
+ user, err := user_model.GetPossibleUserByID(req.Context(), id)
if err != nil {
if !user_model.IsErrUserNotExist(err) {
log.Error("GetUserByName: %v", err)
diff --git a/services/auth/source/ldap/source_search.go b/services/auth/source/ldap/source_search.go
index 68ebba391..16f13029f 100644
--- a/services/auth/source/ldap/source_search.go
+++ b/services/auth/source/ldap/source_search.go
@@ -196,22 +196,39 @@ func checkRestricted(l *ldap.Conn, ls *Source, userDN string) bool {
}
// List all group memberships of a user
-func (source *Source) listLdapGroupMemberships(l *ldap.Conn, uid string) []string {
+func (source *Source) listLdapGroupMemberships(l *ldap.Conn, uid string, applyGroupFilter bool) []string {
var ldapGroups []string
- groupFilter := fmt.Sprintf("(%s=%s)", source.GroupMemberUID, ldap.EscapeFilter(uid))
+ var searchFilter string
+
+ groupFilter, ok := source.sanitizedGroupFilter(source.GroupFilter)
+ if !ok {
+ return ldapGroups
+ }
+
+ groupDN, ok := source.sanitizedGroupDN(source.GroupDN)
+ if !ok {
+ return ldapGroups
+ }
+
+ if applyGroupFilter {
+ searchFilter = fmt.Sprintf("(&(%s)(%s=%s))", groupFilter, source.GroupMemberUID, ldap.EscapeFilter(uid))
+ } else {
+ searchFilter = fmt.Sprintf("(%s=%s)", source.GroupMemberUID, ldap.EscapeFilter(uid))
+ }
+
result, err := l.Search(ldap.NewSearchRequest(
- source.GroupDN,
+ groupDN,
ldap.ScopeWholeSubtree,
ldap.NeverDerefAliases,
0,
0,
false,
- groupFilter,
+ searchFilter,
[]string{},
nil,
))
if err != nil {
- log.Error("Failed group search using filter[%s]: %v", groupFilter, err)
+ log.Error("Failed group search in LDAP with filter [%s]: %v", searchFilter, err)
return ldapGroups
}
@@ -238,9 +255,7 @@ func (source *Source) mapLdapGroupsToTeams() map[string]map[string][]string {
}
// getMappedMemberships : returns the organizations and teams to modify the users membership
-func (source *Source) getMappedMemberships(l *ldap.Conn, uid string) (map[string][]string, map[string][]string) {
- // get all LDAP group memberships for user
- usersLdapGroups := source.listLdapGroupMemberships(l, uid)
+func (source *Source) getMappedMemberships(usersLdapGroups []string, uid string) (map[string][]string, map[string][]string) {
// unmarshall LDAP group team map from configs
ldapGroupsToTeams := source.mapLdapGroupsToTeams()
membershipsToAdd := map[string][]string{}
@@ -260,6 +275,14 @@ func (source *Source) getMappedMemberships(l *ldap.Conn, uid string) (map[string
return membershipsToAdd, membershipsToRemove
}
+func (source *Source) getUserAttributeListedInGroup(entry *ldap.Entry) string {
+ if strings.ToLower(source.UserUID) == "dn" {
+ return entry.DN
+ }
+
+ return entry.GetAttributeValue(source.UserUID)
+}
+
// SearchEntry : search an LDAP source if an entry (name, passwd) is valid and in the specific filter
func (source *Source) SearchEntry(name, passwd string, directBind bool) *SearchResult {
// See https://tools.ietf.org/search/rfc4513#section-5.1.2
@@ -375,58 +398,30 @@ func (source *Source) SearchEntry(name, passwd string, directBind bool) *SearchR
firstname := sr.Entries[0].GetAttributeValue(source.AttributeName)
surname := sr.Entries[0].GetAttributeValue(source.AttributeSurname)
mail := sr.Entries[0].GetAttributeValue(source.AttributeMail)
- uid := sr.Entries[0].GetAttributeValue(source.UserUID)
- if source.UserUID == "dn" || source.UserUID == "DN" {
- uid = sr.Entries[0].DN
- }
- // Check group membership
- if source.GroupsEnabled && source.GroupFilter != "" {
- groupFilter, ok := source.sanitizedGroupFilter(source.GroupFilter)
- if !ok {
- return nil
- }
- groupDN, ok := source.sanitizedGroupDN(source.GroupDN)
- if !ok {
- return nil
- }
+ teamsToAdd := make(map[string][]string)
+ teamsToRemove := make(map[string][]string)
- log.Trace("Fetching groups '%v' with filter '%s' and base '%s'", source.GroupMemberUID, groupFilter, groupDN)
- groupSearch := ldap.NewSearchRequest(
- groupDN, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, groupFilter,
- []string{source.GroupMemberUID},
- nil)
+ // Check group membership
+ if source.GroupsEnabled {
+ userAttributeListedInGroup := source.getUserAttributeListedInGroup(sr.Entries[0])
+ usersLdapGroups := source.listLdapGroupMemberships(l, userAttributeListedInGroup, true)
- srg, err := l.Search(groupSearch)
- if err != nil {
- log.Error("LDAP group search failed: %v", err)
- return nil
- } else if len(srg.Entries) < 1 {
- log.Error("LDAP group search failed: 0 entries")
+ if source.GroupFilter != "" && len(usersLdapGroups) == 0 {
return nil
}
- isMember := false
- Entries:
- for _, group := range srg.Entries {
- for _, member := range group.GetAttributeValues(source.GroupMemberUID) {
- if (source.UserUID == "dn" && member == sr.Entries[0].DN) || member == uid {
- isMember = true
- break Entries
- }
- }
- }
-
- if !isMember {
- log.Error("LDAP group membership test failed")
- return nil
+ if source.GroupTeamMap != "" || source.GroupTeamMapRemoval {
+ teamsToAdd, teamsToRemove = source.getMappedMemberships(usersLdapGroups, userAttributeListedInGroup)
}
}
if isAttributeSSHPublicKeySet {
sshPublicKey = sr.Entries[0].GetAttributeValues(source.AttributeSSHPublicKey)
}
+
isAdmin := checkAdmin(l, source, userDN)
+
var isRestricted bool
if !isAdmin {
isRestricted = checkRestricted(l, source, userDN)
@@ -436,12 +431,6 @@ func (source *Source) SearchEntry(name, passwd string, directBind bool) *SearchR
Avatar = sr.Entries[0].GetRawAttributeValue(source.AttributeAvatar)
}
- teamsToAdd := make(map[string][]string)
- teamsToRemove := make(map[string][]string)
- if source.GroupsEnabled && (source.GroupTeamMap != "" || source.GroupTeamMapRemoval) {
- teamsToAdd, teamsToRemove = source.getMappedMemberships(l, uid)
- }
-
if !directBind && source.AttributesInBind {
// binds user (checking password) after looking-up attributes in BindDN context
err = bindUser(l, userDN, passwd)
@@ -520,19 +509,29 @@ func (source *Source) SearchEntries() ([]*SearchResult, error) {
return nil, err
}
- result := make([]*SearchResult, len(sr.Entries))
+ result := make([]*SearchResult, 0, len(sr.Entries))
- for i, v := range sr.Entries {
+ for _, v := range sr.Entries {
teamsToAdd := make(map[string][]string)
teamsToRemove := make(map[string][]string)
- if source.GroupsEnabled && (source.GroupTeamMap != "" || source.GroupTeamMapRemoval) {
- userAttributeListedInGroup := v.GetAttributeValue(source.UserUID)
- if source.UserUID == "dn" || source.UserUID == "DN" {
- userAttributeListedInGroup = v.DN
+
+ if source.GroupsEnabled {
+ userAttributeListedInGroup := source.getUserAttributeListedInGroup(v)
+
+ if source.GroupFilter != "" {
+ usersLdapGroups := source.listLdapGroupMemberships(l, userAttributeListedInGroup, true)
+ if len(usersLdapGroups) == 0 {
+ continue
+ }
+ }
+
+ if source.GroupTeamMap != "" || source.GroupTeamMapRemoval {
+ usersLdapGroups := source.listLdapGroupMemberships(l, userAttributeListedInGroup, false)
+ teamsToAdd, teamsToRemove = source.getMappedMemberships(usersLdapGroups, userAttributeListedInGroup)
}
- teamsToAdd, teamsToRemove = source.getMappedMemberships(l, userAttributeListedInGroup)
}
- result[i] = &SearchResult{
+
+ user := &SearchResult{
Username: v.GetAttributeValue(source.AttributeUsername),
Name: v.GetAttributeValue(source.AttributeName),
Surname: v.GetAttributeValue(source.AttributeSurname),
@@ -541,16 +540,22 @@ func (source *Source) SearchEntries() ([]*SearchResult, error) {
LdapTeamAdd: teamsToAdd,
LdapTeamRemove: teamsToRemove,
}
- if !result[i].IsAdmin {
- result[i].IsRestricted = checkRestricted(l, source, v.DN)
+
+ if !user.IsAdmin {
+ user.IsRestricted = checkRestricted(l, source, v.DN)
}
+
if isAttributeSSHPublicKeySet {
- result[i].SSHPublicKey = v.GetAttributeValues(source.AttributeSSHPublicKey)
+ user.SSHPublicKey = v.GetAttributeValues(source.AttributeSSHPublicKey)
}
+
if isAtributeAvatarSet {
- result[i].Avatar = v.GetRawAttributeValue(source.AttributeAvatar)
+ user.Avatar = v.GetRawAttributeValue(source.AttributeAvatar)
}
- result[i].LowerName = strings.ToLower(result[i].Username)
+
+ user.LowerName = strings.ToLower(user.Username)
+
+ result = append(result, user)
}
return result, nil
diff --git a/services/automerge/automerge.go b/services/automerge/automerge.go
index 15d94e792..74cfb8da8 100644
--- a/services/automerge/automerge.go
+++ b/services/automerge/automerge.go
@@ -164,7 +164,7 @@ func getPullRequestsByHeadSHA(ctx context.Context, sha string, repo *repo_model.
func handlePull(pullID int64, sha string) {
ctx, _, finished := process.GetManager().AddContext(graceful.GetManager().HammerContext(),
- fmt.Sprintf("Handle AutoMerge of pull[%d] with sha[%s]", pullID, sha))
+ fmt.Sprintf("Handle AutoMerge of PR[%d] with sha[%s]", pullID, sha))
defer finished()
pr, err := issues_model.GetPullRequestByID(ctx, pullID)
@@ -176,7 +176,7 @@ func handlePull(pullID int64, sha string) {
// Check if there is a scheduled pr in the db
exists, scheduledPRM, err := pull_model.GetScheduledMergeByPullID(ctx, pr.ID)
if err != nil {
- log.Error("pull[%d] GetScheduledMergeByPullID: %v", pr.ID, err)
+ log.Error("%-v GetScheduledMergeByPullID: %v", pr, err)
return
}
if !exists {
@@ -188,13 +188,13 @@ func handlePull(pullID int64, sha string) {
// did not succeed or was not finished yet.
if err = pr.LoadHeadRepo(ctx); err != nil {
- log.Error("pull[%d] LoadHeadRepo: %v", pr.ID, err)
+ log.Error("%-v LoadHeadRepo: %v", pr, err)
return
}
headGitRepo, err := git.OpenRepository(ctx, pr.HeadRepo.RepoPath())
if err != nil {
- log.Error("OpenRepository: %v", err)
+ log.Error("OpenRepository %-v: %v", pr.HeadRepo, err)
return
}
defer headGitRepo.Close()
@@ -202,40 +202,40 @@ func handlePull(pullID int64, sha string) {
headBranchExist := headGitRepo.IsBranchExist(pr.HeadBranch)
if pr.HeadRepo == nil || !headBranchExist {
- log.Warn("Head branch of auto merge pr does not exist [HeadRepoID: %d, Branch: %s, PR ID: %d]", pr.HeadRepoID, pr.HeadBranch, pr.ID)
+ log.Warn("Head branch of auto merge %-v does not exist [HeadRepoID: %d, Branch: %s]", pr, pr.HeadRepoID, pr.HeadBranch)
return
}
// Check if all checks succeeded
pass, err := pull_service.IsPullCommitStatusPass(ctx, pr)
if err != nil {
- log.Error("IsPullCommitStatusPass: %v", err)
+ log.Error("%-v IsPullCommitStatusPass: %v", pr, err)
return
}
if !pass {
- log.Info("Scheduled auto merge pr has unsuccessful status checks [PullID: %d]", pr.ID)
+ log.Info("Scheduled auto merge %-v has unsuccessful status checks", pr)
return
}
// Merge if all checks succeeded
doer, err := user_model.GetUserByID(ctx, scheduledPRM.DoerID)
if err != nil {
- log.Error("GetUserByIDCtx: %v", err)
+ log.Error("Unable to get scheduled User[%d]: %v", scheduledPRM.DoerID, err)
return
}
perm, err := access_model.GetUserRepoPermission(ctx, pr.HeadRepo, doer)
if err != nil {
- log.Error("GetUserRepoPermission: %v", err)
+ log.Error("GetUserRepoPermission %-v: %v", pr.HeadRepo, err)
return
}
if err := pull_service.CheckPullMergable(ctx, doer, &perm, pr, false, false); err != nil {
if errors.Is(pull_service.ErrUserNotAllowedToMerge, err) {
- log.Info("PR %d was scheduled to automerge by an unauthorized user", pr.ID)
+ log.Info("%-v was scheduled to automerge by an unauthorized user", pr)
return
}
- log.Error("pull[%d] CheckPullMergable: %v", pr.ID, err)
+ log.Error("%-v CheckPullMergable: %v", pr, err)
return
}
@@ -244,13 +244,13 @@ func handlePull(pullID int64, sha string) {
baseGitRepo = headGitRepo
} else {
if err = pr.LoadBaseRepo(ctx); err != nil {
- log.Error("LoadBaseRepo: %v", err)
+ log.Error("%-v LoadBaseRepo: %v", pr, err)
return
}
baseGitRepo, err = git.OpenRepository(ctx, pr.BaseRepo.RepoPath())
if err != nil {
- log.Error("OpenRepository: %v", err)
+ log.Error("OpenRepository %-v: %v", pr.BaseRepo, err)
return
}
defer baseGitRepo.Close()
diff --git a/services/cron/cron.go b/services/cron/cron.go
index bda8f12f1..72deb94ce 100644
--- a/services/cron/cron.go
+++ b/services/cron/cron.go
@@ -30,6 +30,7 @@ func NewContext(original context.Context) {
_, _, finished := process.GetManager().AddTypedContext(graceful.GetManager().ShutdownContext(), "Service: Cron", process.SystemProcessType, true)
initBasicTasks()
initExtendedTasks()
+ initActionsTasks()
lock.Lock()
for _, task := range tasks {
diff --git a/services/cron/tasks_actions.go b/services/cron/tasks_actions.go
new file mode 100644
index 000000000..30e8749a5
--- /dev/null
+++ b/services/cron/tasks_actions.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package cron
+
+import (
+ "context"
+
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ actions_service "code.gitea.io/gitea/services/actions"
+)
+
+func initActionsTasks() {
+ if !setting.Actions.Enabled {
+ return
+ }
+ registerStopZombieTasks()
+ registerStopEndlessTasks()
+ registerCancelAbandonedJobs()
+}
+
+func registerStopZombieTasks() {
+ RegisterTaskFatal("stop_zombie_tasks", &BaseConfig{
+ Enabled: true,
+ RunAtStart: true,
+ Schedule: "@every 5m",
+ }, func(ctx context.Context, _ *user_model.User, cfg Config) error {
+ return actions_service.StopZombieTasks(ctx)
+ })
+}
+
+func registerStopEndlessTasks() {
+ RegisterTaskFatal("stop_endless_tasks", &BaseConfig{
+ Enabled: true,
+ RunAtStart: true,
+ Schedule: "@every 30m",
+ }, func(ctx context.Context, _ *user_model.User, cfg Config) error {
+ return actions_service.StopEndlessTasks(ctx)
+ })
+}
+
+func registerCancelAbandonedJobs() {
+ RegisterTaskFatal("cancel_abandoned_jobs", &BaseConfig{
+ Enabled: true,
+ RunAtStart: true,
+ Schedule: "@every 6h",
+ }, func(ctx context.Context, _ *user_model.User, cfg Config) error {
+ return actions_service.CancelAbandonedJobs(ctx)
+ })
+}
diff --git a/services/cron/tasks_basic.go b/services/cron/tasks_basic.go
index 05aef6623..aad0e3959 100644
--- a/services/cron/tasks_basic.go
+++ b/services/cron/tasks_basic.go
@@ -59,11 +59,7 @@ func registerRepoHealthCheck() {
}, func(ctx context.Context, _ *user_model.User, config Config) error {
rhcConfig := config.(*RepoHealthCheckConfig)
// the git args are set by config, they can be safe to be trusted
- args := make([]git.CmdArg, 0, len(rhcConfig.Args))
- for _, arg := range rhcConfig.Args {
- args = append(args, git.CmdArg(arg))
- }
- return repo_service.GitFsckRepos(ctx, rhcConfig.Timeout, args)
+ return repo_service.GitFsckRepos(ctx, rhcConfig.Timeout, git.ToTrustedCmdArgs(rhcConfig.Args))
})
}
diff --git a/services/cron/tasks_extended.go b/services/cron/tasks_extended.go
index 520d940ed..3e0dbd132 100644
--- a/services/cron/tasks_extended.go
+++ b/services/cron/tasks_extended.go
@@ -61,11 +61,7 @@ func registerGarbageCollectRepositories() {
}, func(ctx context.Context, _ *user_model.User, config Config) error {
rhcConfig := config.(*RepoHealthCheckConfig)
// the git args are set by config, they can be safe to be trusted
- args := make([]git.CmdArg, 0, len(rhcConfig.Args))
- for _, arg := range rhcConfig.Args {
- args = append(args, git.CmdArg(arg))
- }
- return repo_service.GitGcRepos(ctx, rhcConfig.Timeout, args...)
+ return repo_service.GitGcRepos(ctx, rhcConfig.Timeout, git.ToTrustedCmdArgs(rhcConfig.Args))
})
}
diff --git a/services/forms/package_form.go b/services/forms/package_form.go
index 734bb05dc..e78e64ef7 100644
--- a/services/forms/package_form.go
+++ b/services/forms/package_form.go
@@ -15,7 +15,7 @@ import (
type PackageCleanupRuleForm struct {
ID int64
Enabled bool
- Type string `binding:"Required;In(composer,conan,container,generic,helm,maven,npm,nuget,pub,pypi,rubygems,vagrant)"`
+ Type string `binding:"Required;In(composer,conan,conda,container,generic,helm,maven,npm,nuget,pub,pypi,rubygems,vagrant)"`
KeepCount int `binding:"In(0,1,5,10,25,50,100)"`
KeepPattern string `binding:"RegexPattern"`
RemoveDays int `binding:"In(0,7,14,30,60,90,180)"`
diff --git a/services/forms/repo_form.go b/services/forms/repo_form.go
index b7687af2b..436d79df6 100644
--- a/services/forms/repo_form.go
+++ b/services/forms/repo_form.go
@@ -146,8 +146,10 @@ type RepoSettingForm struct {
ExternalTrackerRegexpPattern string
EnableCloseIssuesViaCommitInAnyBranch bool
EnableProjects bool
+ EnableReleases bool
EnablePackages bool
EnablePulls bool
+ EnableActions bool
PullsIgnoreWhitespace bool
PullsAllowMerge bool
PullsAllowRebase bool
diff --git a/services/forms/runner.go b/services/forms/runner.go
new file mode 100644
index 000000000..906306034
--- /dev/null
+++ b/services/forms/runner.go
@@ -0,0 +1,25 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package forms
+
+import (
+ "net/http"
+
+ "code.gitea.io/gitea/modules/context"
+ "code.gitea.io/gitea/modules/web/middleware"
+
+ "gitea.com/go-chi/binding"
+)
+
+// EditRunnerForm form for admin to create runner
+type EditRunnerForm struct {
+ Description string
+ CustomLabels string // comma-separated
+}
+
+// Validate validates form fields
+func (f *EditRunnerForm) Validate(req *http.Request, errs binding.Errors) binding.Errors {
+ ctx := context.GetContext(req)
+ return middleware.Validate(errs, ctx.Data, f, ctx.Locale)
+}
diff --git a/services/gitdiff/gitdiff.go b/services/gitdiff/gitdiff.go
index d3ee93ec9..4a74c1a89 100644
--- a/services/gitdiff/gitdiff.go
+++ b/services/gitdiff/gitdiff.go
@@ -1056,7 +1056,7 @@ type DiffOptions struct {
MaxLines int
MaxLineCharacters int
MaxFiles int
- WhitespaceBehavior git.CmdArg
+ WhitespaceBehavior git.TrustedCmdArgs
DirectComparison bool
}
@@ -1071,38 +1071,22 @@ func GetDiff(gitRepo *git.Repository, opts *DiffOptions, files ...string) (*Diff
return nil, err
}
- argsLength := 6
- if len(opts.WhitespaceBehavior) > 0 {
- argsLength++
- }
- if len(opts.SkipTo) > 0 {
- argsLength++
- }
- if len(files) > 0 {
- argsLength += len(files) + 1
- }
-
- diffArgs := make([]git.CmdArg, 0, argsLength)
+ cmdDiff := git.NewCommand(gitRepo.Ctx)
if (len(opts.BeforeCommitID) == 0 || opts.BeforeCommitID == git.EmptySHA) && commit.ParentCount() == 0 {
- diffArgs = append(diffArgs, "diff", "--src-prefix=\\a/", "--dst-prefix=\\b/", "-M")
- if len(opts.WhitespaceBehavior) != 0 {
- diffArgs = append(diffArgs, opts.WhitespaceBehavior)
- }
- // append empty tree ref
- diffArgs = append(diffArgs, "4b825dc642cb6eb9a060e54bf8d69288fbee4904")
- diffArgs = append(diffArgs, git.CmdArgCheck(opts.AfterCommitID))
+ cmdDiff.AddArguments("diff", "--src-prefix=\\a/", "--dst-prefix=\\b/", "-M").
+ AddArguments(opts.WhitespaceBehavior...).
+ AddArguments("4b825dc642cb6eb9a060e54bf8d69288fbee4904"). // append empty tree ref
+ AddDynamicArguments(opts.AfterCommitID)
} else {
actualBeforeCommitID := opts.BeforeCommitID
if len(actualBeforeCommitID) == 0 {
parentCommit, _ := commit.Parent(0)
actualBeforeCommitID = parentCommit.ID.String()
}
- diffArgs = append(diffArgs, "diff", "--src-prefix=\\a/", "--dst-prefix=\\b/", "-M")
- if len(opts.WhitespaceBehavior) != 0 {
- diffArgs = append(diffArgs, opts.WhitespaceBehavior)
- }
- diffArgs = append(diffArgs, git.CmdArgCheck(actualBeforeCommitID))
- diffArgs = append(diffArgs, git.CmdArgCheck(opts.AfterCommitID))
+
+ cmdDiff.AddArguments("diff", "--src-prefix=\\a/", "--dst-prefix=\\b/", "-M").
+ AddArguments(opts.WhitespaceBehavior...).
+ AddDynamicArguments(actualBeforeCommitID, opts.AfterCommitID)
opts.BeforeCommitID = actualBeforeCommitID
}
@@ -1111,16 +1095,11 @@ func GetDiff(gitRepo *git.Repository, opts *DiffOptions, files ...string) (*Diff
// the skipping for us
parsePatchSkipToFile := opts.SkipTo
if opts.SkipTo != "" && git.CheckGitVersionAtLeast("2.31") == nil {
- diffArgs = append(diffArgs, git.CmdArg("--skip-to="+opts.SkipTo))
+ cmdDiff.AddOptionFormat("--skip-to=%s", opts.SkipTo)
parsePatchSkipToFile = ""
}
- if len(files) > 0 {
- diffArgs = append(diffArgs, "--")
- for _, file := range files {
- diffArgs = append(diffArgs, git.CmdArg(file)) // it's safe to cast it to CmdArg because there is a "--" before
- }
- }
+ cmdDiff.AddDashesAndList(files...)
reader, writer := io.Pipe()
defer func() {
@@ -1128,10 +1107,9 @@ func GetDiff(gitRepo *git.Repository, opts *DiffOptions, files ...string) (*Diff
_ = writer.Close()
}()
- go func(ctx context.Context, diffArgs []git.CmdArg, repoPath string, writer *io.PipeWriter) {
- cmd := git.NewCommand(ctx, diffArgs...)
- cmd.SetDescription(fmt.Sprintf("GetDiffRange [repo_path: %s]", repoPath))
- if err := cmd.Run(&git.RunOpts{
+ go func() {
+ cmdDiff.SetDescription(fmt.Sprintf("GetDiffRange [repo_path: %s]", repoPath))
+ if err := cmdDiff.Run(&git.RunOpts{
Timeout: time.Duration(setting.Git.Timeout.Default) * time.Second,
Dir: repoPath,
Stderr: os.Stderr,
@@ -1141,7 +1119,7 @@ func GetDiff(gitRepo *git.Repository, opts *DiffOptions, files ...string) (*Diff
}
_ = writer.Close()
- }(gitRepo.Ctx, diffArgs, repoPath, writer)
+ }()
diff, err := ParsePatch(opts.MaxLines, opts.MaxLineCharacters, opts.MaxFiles, reader, parsePatchSkipToFile)
if err != nil {
@@ -1201,16 +1179,16 @@ func GetDiff(gitRepo *git.Repository, opts *DiffOptions, files ...string) (*Diff
separator = ".."
}
- shortstatArgs := []git.CmdArg{git.CmdArgCheck(opts.BeforeCommitID + separator + opts.AfterCommitID)}
+ diffPaths := []string{opts.BeforeCommitID + separator + opts.AfterCommitID}
if len(opts.BeforeCommitID) == 0 || opts.BeforeCommitID == git.EmptySHA {
- shortstatArgs = []git.CmdArg{git.EmptyTreeSHA, git.CmdArgCheck(opts.AfterCommitID)}
+ diffPaths = []string{git.EmptyTreeSHA, opts.AfterCommitID}
}
- diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, shortstatArgs...)
+ diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, nil, diffPaths...)
if err != nil && strings.Contains(err.Error(), "no merge base") {
// git >= 2.28 now returns an error if base and head have become unrelated.
// previously it would return the results of git diff --shortstat base head so let's try that...
- shortstatArgs = []git.CmdArg{git.CmdArgCheck(opts.BeforeCommitID), git.CmdArgCheck(opts.AfterCommitID)}
- diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, shortstatArgs...)
+ diffPaths = []string{opts.BeforeCommitID, opts.AfterCommitID}
+ diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(gitRepo.Ctx, repoPath, nil, diffPaths...)
}
if err != nil {
return nil, err
@@ -1324,17 +1302,17 @@ func CommentMustAsDiff(c *issues_model.Comment) *Diff {
}
// GetWhitespaceFlag returns git diff flag for treating whitespaces
-func GetWhitespaceFlag(whitespaceBehavior string) git.CmdArg {
- whitespaceFlags := map[string]string{
- "ignore-all": "-w",
- "ignore-change": "-b",
- "ignore-eol": "--ignore-space-at-eol",
- "show-all": "",
+func GetWhitespaceFlag(whitespaceBehavior string) git.TrustedCmdArgs {
+ whitespaceFlags := map[string]git.TrustedCmdArgs{
+ "ignore-all": {"-w"},
+ "ignore-change": {"-b"},
+ "ignore-eol": {"--ignore-space-at-eol"},
+ "show-all": nil,
}
if flag, ok := whitespaceFlags[whitespaceBehavior]; ok {
- return git.CmdArg(flag)
+ return flag
}
log.Warn("unknown whitespace behavior: %q, default to 'show-all'", whitespaceBehavior)
- return ""
+ return nil
}
diff --git a/services/gitdiff/gitdiff_test.go b/services/gitdiff/gitdiff_test.go
index 267f0e4cf..eb9ed862e 100644
--- a/services/gitdiff/gitdiff_test.go
+++ b/services/gitdiff/gitdiff_test.go
@@ -626,7 +626,7 @@ func TestGetDiffRangeWithWhitespaceBehavior(t *testing.T) {
return
}
defer gitRepo.Close()
- for _, behavior := range []git.CmdArg{"-w", "--ignore-space-at-eol", "-b", ""} {
+ for _, behavior := range []git.TrustedCmdArgs{{"-w"}, {"--ignore-space-at-eol"}, {"-b"}, nil} {
diffs, err := GetDiff(gitRepo,
&DiffOptions{
AfterCommitID: "bd7063cc7c04689c4d082183d32a604ed27a24f9",
diff --git a/services/mailer/incoming/incoming_handler.go b/services/mailer/incoming/incoming_handler.go
index 173b362a5..d89a5eab3 100644
--- a/services/mailer/incoming/incoming_handler.go
+++ b/services/mailer/incoming/incoming_handler.go
@@ -71,11 +71,17 @@ func (h *ReplyHandler) Handle(ctx context.Context, content *MailContent, doer *u
return err
}
- if !perm.CanWriteIssuesOrPulls(issue.IsPull) || issue.IsLocked && !doer.IsAdmin {
+ // Locked issues require write permissions
+ if issue.IsLocked && !perm.CanWriteIssuesOrPulls(issue.IsPull) && !doer.IsAdmin {
log.Debug("can't write issue or pull")
return nil
}
+ if !perm.CanReadIssuesOrPulls(issue.IsPull) {
+ log.Debug("can't read issue or pull")
+ return nil
+ }
+
switch r := ref.(type) {
case *issues_model.Issue:
attachmentIDs := make([]string, 0, len(content.Attachments))
diff --git a/services/mirror/mirror_pull.go b/services/mirror/mirror_pull.go
index 98e8d122a..7dee90352 100644
--- a/services/mirror/mirror_pull.go
+++ b/services/mirror/mirror_pull.go
@@ -203,11 +203,11 @@ func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bo
log.Trace("SyncMirrors [repo: %-v]: running git remote update...", m.Repo)
- gitArgs := []git.CmdArg{"remote", "update"}
+ cmd := git.NewCommand(ctx, "remote", "update")
if m.EnablePrune {
- gitArgs = append(gitArgs, "--prune")
+ cmd.AddArguments("--prune")
}
- gitArgs = append(gitArgs, git.CmdArgCheck(m.GetRemoteName()))
+ cmd.AddDynamicArguments(m.GetRemoteName())
remoteURL, remoteErr := git.GetRemoteURL(ctx, repoPath, m.GetRemoteName())
if remoteErr != nil {
@@ -217,7 +217,7 @@ func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bo
stdoutBuilder := strings.Builder{}
stderrBuilder := strings.Builder{}
- if err := git.NewCommand(ctx, gitArgs...).
+ if err := cmd.
SetDescription(fmt.Sprintf("Mirror.runSync: %s", m.Repo.FullName())).
Run(&git.RunOpts{
Timeout: timeout,
@@ -243,7 +243,7 @@ func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bo
// Successful prune - reattempt mirror
stderrBuilder.Reset()
stdoutBuilder.Reset()
- if err = git.NewCommand(ctx, gitArgs...).
+ if err = cmd.
SetDescription(fmt.Sprintf("Mirror.runSync: %s", m.Repo.FullName())).
Run(&git.RunOpts{
Timeout: timeout,
diff --git a/services/mirror/mirror_push.go b/services/mirror/mirror_push.go
index c0c68a3f5..2c1b00b60 100644
--- a/services/mirror/mirror_push.go
+++ b/services/mirror/mirror_push.go
@@ -37,10 +37,10 @@ func AddPushMirrorRemote(ctx context.Context, m *repo_model.PushMirror, addr str
if _, _, err := cmd.RunStdString(&git.RunOpts{Dir: path}); err != nil {
return err
}
- if _, _, err := git.NewCommand(ctx, "config", "--add", git.CmdArg("remote."+m.RemoteName+".push"), "+refs/heads/*:refs/heads/*").RunStdString(&git.RunOpts{Dir: path}); err != nil {
+ if _, _, err := git.NewCommand(ctx, "config", "--add").AddDynamicArguments("remote."+m.RemoteName+".push", "+refs/heads/*:refs/heads/*").RunStdString(&git.RunOpts{Dir: path}); err != nil {
return err
}
- if _, _, err := git.NewCommand(ctx, "config", "--add", git.CmdArg("remote."+m.RemoteName+".push"), "+refs/tags/*:refs/tags/*").RunStdString(&git.RunOpts{Dir: path}); err != nil {
+ if _, _, err := git.NewCommand(ctx, "config", "--add").AddDynamicArguments("remote."+m.RemoteName+".push", "+refs/tags/*:refs/tags/*").RunStdString(&git.RunOpts{Dir: path}); err != nil {
return err
}
return nil
diff --git a/services/packages/packages.go b/services/packages/packages.go
index 410e73c04..9e52cb145 100644
--- a/services/packages/packages.go
+++ b/services/packages/packages.go
@@ -173,7 +173,7 @@ func createPackageAndVersion(ctx context.Context, pvci *PackageCreationInfo, all
}
if versionCreated {
- if err := checkCountQuotaExceeded(ctx, pvci.Creator, pvci.Owner); err != nil {
+ if err := CheckCountQuotaExceeded(ctx, pvci.Creator, pvci.Owner); err != nil {
return nil, false, err
}
@@ -240,7 +240,7 @@ func NewPackageBlob(hsr packages_module.HashedSizeReader) *packages_model.Packag
func addFileToPackageVersion(ctx context.Context, pv *packages_model.PackageVersion, pvi *PackageInfo, pfci *PackageFileCreationInfo) (*packages_model.PackageFile, *packages_model.PackageBlob, bool, error) {
log.Trace("Adding package file: %v, %s", pv.ID, pfci.Filename)
- if err := checkSizeQuotaExceeded(ctx, pfci.Creator, pvi.Owner, pvi.PackageType, pfci.Data.Size()); err != nil {
+ if err := CheckSizeQuotaExceeded(ctx, pfci.Creator, pvi.Owner, pvi.PackageType, pfci.Data.Size()); err != nil {
return nil, nil, false, err
}
@@ -302,7 +302,9 @@ func addFileToPackageVersion(ctx context.Context, pv *packages_model.PackageVers
return pf, pb, !exists, nil
}
-func checkCountQuotaExceeded(ctx context.Context, doer, owner *user_model.User) error {
+// CheckCountQuotaExceeded checks if the owner has more than the allowed packages
+// The check is skipped if the doer is an admin.
+func CheckCountQuotaExceeded(ctx context.Context, doer, owner *user_model.User) error {
if doer.IsAdmin {
return nil
}
@@ -324,7 +326,9 @@ func checkCountQuotaExceeded(ctx context.Context, doer, owner *user_model.User)
return nil
}
-func checkSizeQuotaExceeded(ctx context.Context, doer, owner *user_model.User, packageType packages_model.Type, uploadSize int64) error {
+// CheckSizeQuotaExceeded checks if the upload size is bigger than the allowed size
+// The check is skipped if the doer is an admin.
+func CheckSizeQuotaExceeded(ctx context.Context, doer, owner *user_model.User, packageType packages_model.Type, uploadSize int64) error {
if doer.IsAdmin {
return nil
}
@@ -335,6 +339,8 @@ func checkSizeQuotaExceeded(ctx context.Context, doer, owner *user_model.User, p
typeSpecificSize = setting.Packages.LimitSizeComposer
case packages_model.TypeConan:
typeSpecificSize = setting.Packages.LimitSizeConan
+ case packages_model.TypeConda:
+ typeSpecificSize = setting.Packages.LimitSizeConda
case packages_model.TypeContainer:
typeSpecificSize = setting.Packages.LimitSizeContainer
case packages_model.TypeGeneric:
diff --git a/services/pull/check.go b/services/pull/check.go
index db8637890..481491c73 100644
--- a/services/pull/check.go
+++ b/services/pull/check.go
@@ -8,7 +8,6 @@ import (
"context"
"errors"
"fmt"
- "os"
"strconv"
"strings"
@@ -27,7 +26,6 @@ import (
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/queue"
"code.gitea.io/gitea/modules/timeutil"
- "code.gitea.io/gitea/modules/util"
asymkey_service "code.gitea.io/gitea/services/asymkey"
)
@@ -50,14 +48,14 @@ func AddToTaskQueue(pr *issues_model.PullRequest) {
pr.Status = issues_model.PullRequestStatusChecking
err := pr.UpdateColsIfNotMerged(db.DefaultContext, "status")
if err != nil {
- log.Error("AddToTaskQueue.UpdateCols[%d].(add to queue): %v", pr.ID, err)
+ log.Error("AddToTaskQueue(%-v).UpdateCols.(add to queue): %v", pr, err)
} else {
- log.Trace("Adding PR ID: %d to the test pull requests queue", pr.ID)
+ log.Trace("Adding %-v to the test pull requests queue", pr)
}
return err
})
if err != nil && err != queue.ErrAlreadyInQueue {
- log.Error("Error adding prID %d to the test pull requests queue: %v", pr.ID, err)
+ log.Error("Error adding %-v to the test pull requests queue: %v", pr, err)
}
}
@@ -69,12 +67,14 @@ func CheckPullMergable(stdCtx context.Context, doer *user_model.User, perm *acce
}
if err := pr.LoadIssue(ctx); err != nil {
+ log.Error("Unable to load issue[%d] for %-v: %v", pr.IssueID, pr, err)
return err
} else if pr.Issue.IsClosed {
return ErrIsClosed
}
if allowedMerge, err := IsUserAllowedToMerge(ctx, pr, *perm, doer); err != nil {
+ log.Error("Error whilst checking if %-v is allowed to merge %-v: %v", doer, pr, err)
return err
} else if !allowedMerge {
return ErrUserNotAllowedToMerge
@@ -98,15 +98,19 @@ func CheckPullMergable(stdCtx context.Context, doer *user_model.User, perm *acce
}
if err := CheckPullBranchProtections(ctx, pr, false); err != nil {
- if models.IsErrDisallowedToMerge(err) {
- if force {
- if isRepoAdmin, err2 := access_model.IsUserRepoAdmin(ctx, pr.BaseRepo, doer); err2 != nil {
- return err2
- } else if !isRepoAdmin {
- return err
- }
- }
- } else {
+ if !models.IsErrDisallowedToMerge(err) {
+ log.Error("Error whilst checking pull branch protection for %-v: %v", pr, err)
+ return err
+ }
+
+ if !force {
+ return err
+ }
+
+ if isRepoAdmin, err2 := access_model.IsUserRepoAdmin(ctx, pr.BaseRepo, doer); err2 != nil {
+ log.Error("Unable to check if %-v is a repo admin in %-v: %v", doer, pr.BaseRepo, err2)
+ return err2
+ } else if !isRepoAdmin {
return err
}
}
@@ -144,7 +148,7 @@ func isSignedIfRequired(ctx context.Context, pr *issues_model.PullRequest, doer
// checkAndUpdateStatus checks if pull request is possible to leaving checking status,
// and set to be either conflict or mergeable.
func checkAndUpdateStatus(ctx context.Context, pr *issues_model.PullRequest) {
- // Status is not changed to conflict means mergeable.
+ // If status has not been changed to conflict by testPatch then we are mergeable
if pr.Status == issues_model.PullRequestStatusChecking {
pr.Status = issues_model.PullRequestStatusMergeable
}
@@ -152,79 +156,69 @@ func checkAndUpdateStatus(ctx context.Context, pr *issues_model.PullRequest) {
// Make sure there is no waiting test to process before leaving the checking status.
has, err := prPatchCheckerQueue.Has(strconv.FormatInt(pr.ID, 10))
if err != nil {
- log.Error("Unable to check if the queue is waiting to reprocess pr.ID %d. Error: %v", pr.ID, err)
+ log.Error("Unable to check if the queue is waiting to reprocess %-v. Error: %v", pr, err)
}
- if !has {
- if err := pr.UpdateColsIfNotMerged(ctx, "merge_base", "status", "conflicted_files", "changed_protected_files"); err != nil {
- log.Error("Update[%d]: %v", pr.ID, err)
- }
+ if has {
+ log.Trace("Not updating status for %-v as it is due to be rechecked", pr)
+ return
+ }
+
+ if err := pr.UpdateColsIfNotMerged(ctx, "merge_base", "status", "conflicted_files", "changed_protected_files"); err != nil {
+ log.Error("Update[%-v]: %v", pr, err)
}
}
-// getMergeCommit checks if a pull request got merged
+// getMergeCommit checks if a pull request has been merged
// Returns the git.Commit of the pull request if merged
func getMergeCommit(ctx context.Context, pr *issues_model.PullRequest) (*git.Commit, error) {
- if pr.BaseRepo == nil {
- var err error
- pr.BaseRepo, err = repo_model.GetRepositoryByID(ctx, pr.BaseRepoID)
- if err != nil {
- return nil, fmt.Errorf("GetRepositoryByID: %w", err)
- }
- }
-
- indexTmpPath, err := os.MkdirTemp(os.TempDir(), "gitea-"+pr.BaseRepo.Name)
- if err != nil {
- return nil, fmt.Errorf("Failed to create temp dir for repository %s: %w", pr.BaseRepo.RepoPath(), err)
+ if err := pr.LoadBaseRepo(ctx); err != nil {
+ return nil, fmt.Errorf("unable to load base repo for %s: %w", pr, err)
}
- defer func() {
- if err := util.RemoveAll(indexTmpPath); err != nil {
- log.Warn("Unable to remove temporary index path: %s: Error: %v", indexTmpPath, err)
- }
- }()
- headFile := pr.GetGitRefName()
+ prHeadRef := pr.GetGitRefName()
- // Check if a pull request is merged into BaseBranch
- _, _, err = git.NewCommand(ctx, "merge-base", "--is-ancestor").AddDynamicArguments(headFile, pr.BaseBranch).
- RunStdString(&git.RunOpts{Dir: pr.BaseRepo.RepoPath(), Env: []string{"GIT_INDEX_FILE=" + indexTmpPath, "GIT_DIR=" + pr.BaseRepo.RepoPath()}})
- if err != nil {
- // Errors are signaled by a non-zero status that is not 1
+ // Check if the pull request is merged into BaseBranch
+ if _, _, err := git.NewCommand(ctx, "merge-base", "--is-ancestor").
+ AddDynamicArguments(prHeadRef, pr.BaseBranch).
+ RunStdString(&git.RunOpts{Dir: pr.BaseRepo.RepoPath()}); err != nil {
if strings.Contains(err.Error(), "exit status 1") {
+ // prHeadRef is not an ancestor of the base branch
return nil, nil
}
- return nil, fmt.Errorf("git merge-base --is-ancestor: %w", err)
+ // Errors are signaled by a non-zero status that is not 1
+ return nil, fmt.Errorf("%-v git merge-base --is-ancestor: %w", pr, err)
}
- commitIDBytes, err := os.ReadFile(pr.BaseRepo.RepoPath() + "/" + headFile)
+ // If merge-base successfully exits then prHeadRef is an ancestor of pr.BaseBranch
+
+ // Find the head commit id
+ prHeadCommitID, err := git.GetFullCommitID(ctx, pr.BaseRepo.RepoPath(), prHeadRef)
if err != nil {
- return nil, fmt.Errorf("ReadFile(%s): %w", headFile, err)
+ return nil, fmt.Errorf("GetFullCommitID(%s) in %s: %w", prHeadRef, pr.BaseRepo.FullName(), err)
}
- commitID := string(commitIDBytes)
- if len(commitID) < git.SHAFullLength {
- return nil, fmt.Errorf(`ReadFile(%s): invalid commit-ID "%s"`, headFile, commitID)
- }
- cmd := commitID[:git.SHAFullLength] + ".." + pr.BaseBranch
// Get the commit from BaseBranch where the pull request got merged
- mergeCommit, _, err := git.NewCommand(ctx, "rev-list", "--ancestry-path", "--merges", "--reverse").AddDynamicArguments(cmd).
- RunStdString(&git.RunOpts{Dir: "", Env: []string{"GIT_INDEX_FILE=" + indexTmpPath, "GIT_DIR=" + pr.BaseRepo.RepoPath()}})
+ mergeCommit, _, err := git.NewCommand(ctx, "rev-list", "--ancestry-path", "--merges", "--reverse").
+ AddDynamicArguments(prHeadCommitID + ".." + pr.BaseBranch).
+ RunStdString(&git.RunOpts{Dir: pr.BaseRepo.RepoPath()})
if err != nil {
return nil, fmt.Errorf("git rev-list --ancestry-path --merges --reverse: %w", err)
} else if len(mergeCommit) < git.SHAFullLength {
// PR was maybe fast-forwarded, so just use last commit of PR
- mergeCommit = commitID[:git.SHAFullLength]
+ mergeCommit = prHeadCommitID
}
+ mergeCommit = strings.TrimSpace(mergeCommit)
gitRepo, err := git.OpenRepository(ctx, pr.BaseRepo.RepoPath())
if err != nil {
- return nil, fmt.Errorf("OpenRepository: %w", err)
+ return nil, fmt.Errorf("%-v OpenRepository: %w", pr.BaseRepo, err)
}
defer gitRepo.Close()
- commit, err := gitRepo.GetCommit(mergeCommit[:git.SHAFullLength])
+ commit, err := gitRepo.GetCommit(mergeCommit)
if err != nil {
- return nil, fmt.Errorf("GetMergeCommit[%v]: %w", mergeCommit[:git.SHAFullLength], err)
+ return nil, fmt.Errorf("GetMergeCommit[%s]: %w", mergeCommit, err)
}
return commit, nil
@@ -234,7 +228,7 @@ func getMergeCommit(ctx context.Context, pr *issues_model.PullRequest) (*git.Com
// When a pull request got manually merged mark the pull request as merged
func manuallyMerged(ctx context.Context, pr *issues_model.PullRequest) bool {
if err := pr.LoadBaseRepo(ctx); err != nil {
- log.Error("PullRequest[%d].LoadBaseRepo: %v", pr.ID, err)
+ log.Error("%-v LoadBaseRepo: %v", pr, err)
return false
}
@@ -244,47 +238,50 @@ func manuallyMerged(ctx context.Context, pr *issues_model.PullRequest) bool {
return false
}
} else {
- log.Error("PullRequest[%d].BaseRepo.GetUnit(unit.TypePullRequests): %v", pr.ID, err)
+ log.Error("%-v BaseRepo.GetUnit(unit.TypePullRequests): %v", pr, err)
return false
}
commit, err := getMergeCommit(ctx, pr)
if err != nil {
- log.Error("PullRequest[%d].getMergeCommit: %v", pr.ID, err)
+ log.Error("%-v getMergeCommit: %v", pr, err)
+ return false
+ }
+
+ if commit == nil {
+ // no merge commit found
return false
}
- if commit != nil {
- pr.MergedCommitID = commit.ID.String()
- pr.MergedUnix = timeutil.TimeStamp(commit.Author.When.Unix())
- pr.Status = issues_model.PullRequestStatusManuallyMerged
- merger, _ := user_model.GetUserByEmail(commit.Author.Email)
-
- // When the commit author is unknown set the BaseRepo owner as merger
- if merger == nil {
- if pr.BaseRepo.Owner == nil {
- if err = pr.BaseRepo.GetOwner(ctx); err != nil {
- log.Error("BaseRepo.GetOwner[%d]: %v", pr.ID, err)
- return false
- }
+
+ pr.MergedCommitID = commit.ID.String()
+ pr.MergedUnix = timeutil.TimeStamp(commit.Author.When.Unix())
+ pr.Status = issues_model.PullRequestStatusManuallyMerged
+ merger, _ := user_model.GetUserByEmail(commit.Author.Email)
+
+ // When the commit author is unknown set the BaseRepo owner as merger
+ if merger == nil {
+ if pr.BaseRepo.Owner == nil {
+ if err = pr.BaseRepo.GetOwner(ctx); err != nil {
+ log.Error("%-v BaseRepo.GetOwner: %v", pr, err)
+ return false
}
- merger = pr.BaseRepo.Owner
}
- pr.Merger = merger
- pr.MergerID = merger.ID
+ merger = pr.BaseRepo.Owner
+ }
+ pr.Merger = merger
+ pr.MergerID = merger.ID
- if merged, err := pr.SetMerged(ctx); err != nil {
- log.Error("PullRequest[%d].setMerged : %v", pr.ID, err)
- return false
- } else if !merged {
- return false
- }
+ if merged, err := pr.SetMerged(ctx); err != nil {
+ log.Error("%-v setMerged : %v", pr, err)
+ return false
+ } else if !merged {
+ return false
+ }
- notification.NotifyMergePullRequest(ctx, merger, pr)
+ notification.NotifyMergePullRequest(ctx, merger, pr)
- log.Info("manuallyMerged[%d]: Marked as manually merged into %s/%s by commit id: %s", pr.ID, pr.BaseRepo.Name, pr.BaseBranch, commit.ID.String())
- return true
- }
- return false
+ log.Info("manuallyMerged[%-v]: Marked as manually merged into %s/%s by commit id: %s", pr, pr.BaseRepo.Name, pr.BaseBranch, commit.ID.String())
+ return true
}
// InitializePullRequests checks and tests untested patches of pull requests.
@@ -300,10 +297,10 @@ func InitializePullRequests(ctx context.Context) {
return
default:
if err := prPatchCheckerQueue.PushFunc(strconv.FormatInt(prID, 10), func() error {
- log.Trace("Adding PR ID: %d to the pull requests patch checking queue", prID)
+ log.Trace("Adding PR[%d] to the pull requests patch checking queue", prID)
return nil
}); err != nil {
- log.Error("Error adding prID: %s to the pull requests patch checking queue %v", prID, err)
+ log.Error("Error adding PR[%d] to the pull requests patch checking queue %v", prID, err)
}
}
}
@@ -327,23 +324,30 @@ func testPR(id int64) {
pr, err := issues_model.GetPullRequestByID(ctx, id)
if err != nil {
- log.Error("GetPullRequestByID[%d]: %v", id, err)
+ log.Error("Unable to GetPullRequestByID[%d] for testPR: %v", id, err)
return
}
+ log.Trace("Testing %-v", pr)
+ defer func() {
+ log.Trace("Done testing %-v (status: %s)", pr, pr.Status)
+ }()
+
if pr.HasMerged {
+ log.Trace("%-v is already merged (status: %s, merge commit: %s)", pr, pr.Status, pr.MergedCommitID)
return
}
if manuallyMerged(ctx, pr) {
+ log.Trace("%-v is manually merged (status: %s, merge commit: %s)", pr, pr.Status, pr.MergedCommitID)
return
}
if err := TestPatch(pr); err != nil {
- log.Error("testPatch[%d]: %v", pr.ID, err)
+ log.Error("testPatch[%-v]: %v", pr, err)
pr.Status = issues_model.PullRequestStatusError
if err := pr.UpdateCols("status"); err != nil {
- log.Error("update pr [%d] status to PullRequestStatusError failed: %v", pr.ID, err)
+ log.Error("update pr [%-v] status to PullRequestStatusError failed: %v", pr, err)
}
return
}
diff --git a/services/pull/merge.go b/services/pull/merge.go
index 7ffbdb78b..edd5b601d 100644
--- a/services/pull/merge.go
+++ b/services/pull/merge.go
@@ -370,16 +370,16 @@ func rawMerge(ctx context.Context, pr *issues_model.PullRequest, doer *user_mode
sig := doer.NewGitSig()
committer := sig
- // Determine if we should sign
- var signArg git.CmdArg
- sign, keyID, signer, _ := asymkey_service.SignMerge(ctx, pr, doer, tmpBasePath, "HEAD", trackingBranch)
+ // Determine if we should sign. If no signKeyID, use --no-gpg-sign to countermand the sign config (from gitconfig)
+ var signArgs git.TrustedCmdArgs
+ sign, signKeyID, signer, _ := asymkey_service.SignMerge(ctx, pr, doer, tmpBasePath, "HEAD", trackingBranch)
if sign {
- signArg = git.CmdArg("-S" + keyID)
if pr.BaseRepo.GetTrustModel() == repo_model.CommitterTrustModel || pr.BaseRepo.GetTrustModel() == repo_model.CollaboratorCommitterTrustModel {
committer = signer
}
+ signArgs = git.ToTrustedCmdArgs([]string{"-S" + signKeyID})
} else {
- signArg = git.CmdArg("--no-gpg-sign")
+ signArgs = append(signArgs, "--no-gpg-sign")
}
commitTimeStr := time.Now().Format(time.RFC3339)
@@ -403,7 +403,7 @@ func rawMerge(ctx context.Context, pr *issues_model.PullRequest, doer *user_mode
return "", err
}
- if err := commitAndSignNoAuthor(ctx, pr, message, signArg, tmpBasePath, env); err != nil {
+ if err := commitAndSignNoAuthor(ctx, pr, message, signArgs, tmpBasePath, env); err != nil {
log.Error("Unable to make final commit: %v", err)
return "", err
}
@@ -505,7 +505,7 @@ func rawMerge(ctx context.Context, pr *issues_model.PullRequest, doer *user_mode
return "", err
}
if mergeStyle == repo_model.MergeStyleRebaseMerge {
- if err := commitAndSignNoAuthor(ctx, pr, message, signArg, tmpBasePath, env); err != nil {
+ if err := commitAndSignNoAuthor(ctx, pr, message, signArgs, tmpBasePath, env); err != nil {
log.Error("Unable to make final commit: %v", err)
return "", err
}
@@ -523,35 +523,22 @@ func rawMerge(ctx context.Context, pr *issues_model.PullRequest, doer *user_mode
return "", fmt.Errorf("LoadPoster: %w", err)
}
sig := pr.Issue.Poster.NewGitSig()
- if signArg == "" {
- if err := git.NewCommand(ctx, "commit", git.CmdArg(fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email)), "-m").AddDynamicArguments(message).
- Run(&git.RunOpts{
- Env: env,
- Dir: tmpBasePath,
- Stdout: &outbuf,
- Stderr: &errbuf,
- }); err != nil {
- log.Error("git commit [%s:%s -> %s:%s]: %v\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
- return "", fmt.Errorf("git commit [%s:%s -> %s:%s]: %w\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
- }
- } else {
- if setting.Repository.PullRequest.AddCoCommitterTrailers && committer.String() != sig.String() {
- // add trailer
- message += fmt.Sprintf("\nCo-authored-by: %s\nCo-committed-by: %s\n", sig.String(), sig.String())
- }
- if err := git.NewCommand(ctx, "commit").
- AddArguments(signArg).
- AddArguments(git.CmdArg(fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email))).
- AddArguments("-m").AddDynamicArguments(message).
- Run(&git.RunOpts{
- Env: env,
- Dir: tmpBasePath,
- Stdout: &outbuf,
- Stderr: &errbuf,
- }); err != nil {
- log.Error("git commit [%s:%s -> %s:%s]: %v\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
- return "", fmt.Errorf("git commit [%s:%s -> %s:%s]: %w\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
- }
+ if setting.Repository.PullRequest.AddCoCommitterTrailers && committer.String() != sig.String() {
+ // add trailer
+ message += fmt.Sprintf("\nCo-authored-by: %s\nCo-committed-by: %s\n", sig.String(), sig.String())
+ }
+ if err := git.NewCommand(ctx, "commit").
+ AddArguments(signArgs...).
+ AddOptionFormat("--author='%s <%s>'", sig.Name, sig.Email).
+ AddOptionValues("-m", message).
+ Run(&git.RunOpts{
+ Env: env,
+ Dir: tmpBasePath,
+ Stdout: &outbuf,
+ Stderr: &errbuf,
+ }); err != nil {
+ log.Error("git commit [%s:%s -> %s:%s]: %v\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
+ return "", fmt.Errorf("git commit [%s:%s -> %s:%s]: %w\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
}
outbuf.Reset()
errbuf.Reset()
@@ -649,30 +636,17 @@ func rawMerge(ctx context.Context, pr *issues_model.PullRequest, doer *user_mode
return mergeCommitID, nil
}
-func commitAndSignNoAuthor(ctx context.Context, pr *issues_model.PullRequest, message string, signArg git.CmdArg, tmpBasePath string, env []string) error {
+func commitAndSignNoAuthor(ctx context.Context, pr *issues_model.PullRequest, message string, signArgs git.TrustedCmdArgs, tmpBasePath string, env []string) error {
var outbuf, errbuf strings.Builder
- if signArg == "" {
- if err := git.NewCommand(ctx, "commit", "-m").AddDynamicArguments(message).
- Run(&git.RunOpts{
- Env: env,
- Dir: tmpBasePath,
- Stdout: &outbuf,
- Stderr: &errbuf,
- }); err != nil {
- log.Error("git commit [%s:%s -> %s:%s]: %v\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
- return fmt.Errorf("git commit [%s:%s -> %s:%s]: %w\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
- }
- } else {
- if err := git.NewCommand(ctx, "commit").AddArguments(signArg).AddArguments("-m").AddDynamicArguments(message).
- Run(&git.RunOpts{
- Env: env,
- Dir: tmpBasePath,
- Stdout: &outbuf,
- Stderr: &errbuf,
- }); err != nil {
- log.Error("git commit [%s:%s -> %s:%s]: %v\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
- return fmt.Errorf("git commit [%s:%s -> %s:%s]: %w\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
- }
+ if err := git.NewCommand(ctx, "commit").AddArguments(signArgs...).AddOptionValues("-m", message).
+ Run(&git.RunOpts{
+ Env: env,
+ Dir: tmpBasePath,
+ Stdout: &outbuf,
+ Stderr: &errbuf,
+ }); err != nil {
+ log.Error("git commit [%s:%s -> %s:%s]: %v\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
+ return fmt.Errorf("git commit [%s:%s -> %s:%s]: %w\n%s\n%s", pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseRepo.FullName(), pr.BaseBranch, err, outbuf.String(), errbuf.String())
}
return nil
}
diff --git a/services/pull/patch.go b/services/pull/patch.go
index 26a72a737..c2ccc75bd 100644
--- a/services/pull/patch.go
+++ b/services/pull/patch.go
@@ -60,7 +60,7 @@ var patchErrorSuffices = []string{
// TestPatch will test whether a simple patch will apply
func TestPatch(pr *issues_model.PullRequest) error {
- ctx, _, finished := process.GetManager().AddContext(graceful.GetManager().HammerContext(), fmt.Sprintf("TestPatch: Repo[%d]#%d", pr.BaseRepoID, pr.Index))
+ ctx, _, finished := process.GetManager().AddContext(graceful.GetManager().HammerContext(), fmt.Sprintf("TestPatch: %s", pr))
defer finished()
// Clone base repo.
@@ -376,16 +376,16 @@ func checkConflicts(ctx context.Context, pr *issues_model.PullRequest, gitRepo *
prConfig := prUnit.PullRequestsConfig()
// 6. Prepare the arguments to apply the patch against the index
- args := []git.CmdArg{"apply", "--check", "--cached"}
+ cmdApply := git.NewCommand(gitRepo.Ctx, "apply", "--check", "--cached")
if prConfig.IgnoreWhitespaceConflicts {
- args = append(args, "--ignore-whitespace")
+ cmdApply.AddArguments("--ignore-whitespace")
}
is3way := false
if git.CheckGitVersionAtLeast("2.32.0") == nil {
- args = append(args, "--3way")
+ cmdApply.AddArguments("--3way")
is3way = true
}
- args = append(args, git.CmdArgCheck(patchPath))
+ cmdApply.AddDynamicArguments(patchPath)
// 7. Prep the pipe:
// - Here we could do the equivalent of:
@@ -407,71 +407,70 @@ func checkConflicts(ctx context.Context, pr *issues_model.PullRequest, gitRepo *
// 8. Run the check command
conflict = false
- err = git.NewCommand(gitRepo.Ctx, args...).
- Run(&git.RunOpts{
- Dir: tmpBasePath,
- Stderr: stderrWriter,
- PipelineFunc: func(ctx context.Context, cancel context.CancelFunc) error {
- // Close the writer end of the pipe to begin processing
- _ = stderrWriter.Close()
- defer func() {
- // Close the reader on return to terminate the git command if necessary
- _ = stderrReader.Close()
- }()
-
- const prefix = "error: patch failed:"
- const errorPrefix = "error: "
- const threewayFailed = "Failed to perform three-way merge..."
- const appliedPatchPrefix = "Applied patch to '"
- const withConflicts = "' with conflicts."
-
- conflicts := make(container.Set[string])
-
- // Now scan the output from the command
- scanner := bufio.NewScanner(stderrReader)
- for scanner.Scan() {
- line := scanner.Text()
- log.Trace("PullRequest[%d].testPatch: stderr: %s", pr.ID, line)
- if strings.HasPrefix(line, prefix) {
- conflict = true
- filepath := strings.TrimSpace(strings.Split(line[len(prefix):], ":")[0])
- conflicts.Add(filepath)
- } else if is3way && line == threewayFailed {
- conflict = true
- } else if strings.HasPrefix(line, errorPrefix) {
- conflict = true
- for _, suffix := range patchErrorSuffices {
- if strings.HasSuffix(line, suffix) {
- filepath := strings.TrimSpace(strings.TrimSuffix(line[len(errorPrefix):], suffix))
- if filepath != "" {
- conflicts.Add(filepath)
- }
- break
+ err = cmdApply.Run(&git.RunOpts{
+ Dir: tmpBasePath,
+ Stderr: stderrWriter,
+ PipelineFunc: func(ctx context.Context, cancel context.CancelFunc) error {
+ // Close the writer end of the pipe to begin processing
+ _ = stderrWriter.Close()
+ defer func() {
+ // Close the reader on return to terminate the git command if necessary
+ _ = stderrReader.Close()
+ }()
+
+ const prefix = "error: patch failed:"
+ const errorPrefix = "error: "
+ const threewayFailed = "Failed to perform three-way merge..."
+ const appliedPatchPrefix = "Applied patch to '"
+ const withConflicts = "' with conflicts."
+
+ conflicts := make(container.Set[string])
+
+ // Now scan the output from the command
+ scanner := bufio.NewScanner(stderrReader)
+ for scanner.Scan() {
+ line := scanner.Text()
+ log.Trace("PullRequest[%d].testPatch: stderr: %s", pr.ID, line)
+ if strings.HasPrefix(line, prefix) {
+ conflict = true
+ filepath := strings.TrimSpace(strings.Split(line[len(prefix):], ":")[0])
+ conflicts.Add(filepath)
+ } else if is3way && line == threewayFailed {
+ conflict = true
+ } else if strings.HasPrefix(line, errorPrefix) {
+ conflict = true
+ for _, suffix := range patchErrorSuffices {
+ if strings.HasSuffix(line, suffix) {
+ filepath := strings.TrimSpace(strings.TrimSuffix(line[len(errorPrefix):], suffix))
+ if filepath != "" {
+ conflicts.Add(filepath)
}
- }
- } else if is3way && strings.HasPrefix(line, appliedPatchPrefix) && strings.HasSuffix(line, withConflicts) {
- conflict = true
- filepath := strings.TrimPrefix(strings.TrimSuffix(line, withConflicts), appliedPatchPrefix)
- if filepath != "" {
- conflicts.Add(filepath)
+ break
}
}
- // only list 10 conflicted files
- if len(conflicts) >= 10 {
- break
+ } else if is3way && strings.HasPrefix(line, appliedPatchPrefix) && strings.HasSuffix(line, withConflicts) {
+ conflict = true
+ filepath := strings.TrimPrefix(strings.TrimSuffix(line, withConflicts), appliedPatchPrefix)
+ if filepath != "" {
+ conflicts.Add(filepath)
}
}
+ // only list 10 conflicted files
+ if len(conflicts) >= 10 {
+ break
+ }
+ }
- if len(conflicts) > 0 {
- pr.ConflictedFiles = make([]string, 0, len(conflicts))
- for key := range conflicts {
- pr.ConflictedFiles = append(pr.ConflictedFiles, key)
- }
+ if len(conflicts) > 0 {
+ pr.ConflictedFiles = make([]string, 0, len(conflicts))
+ for key := range conflicts {
+ pr.ConflictedFiles = append(pr.ConflictedFiles, key)
}
+ }
- return nil
- },
- })
+ return nil
+ },
+ })
// 9. Check if the found conflictedfiles is non-zero, "err" could be non-nil, so we should ignore it if we found conflicts.
// Note: `"err" could be non-nil` is due that if enable 3-way merge, it doesn't return any error on found conflicts.
diff --git a/services/pull/pull.go b/services/pull/pull.go
index 7f81def6d..317875d21 100644
--- a/services/pull/pull.go
+++ b/services/pull/pull.go
@@ -4,14 +4,12 @@
package pull
import (
- "bufio"
- "bytes"
"context"
"fmt"
"io"
+ "os"
"regexp"
"strings"
- "time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/models/db"
@@ -29,6 +27,7 @@ import (
repo_module "code.gitea.io/gitea/modules/repository"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/sync"
+ "code.gitea.io/gitea/modules/util"
issue_service "code.gitea.io/gitea/services/issue"
)
@@ -299,7 +298,6 @@ func AddTestPullRequestTask(doer *user_model.User, repoID int64, branch string,
}
}
- pr.Issue.PullRequest = pr
notification.NotifyPullRequestSynchronized(ctx, doer, pr)
}
}
@@ -351,69 +349,56 @@ func AddTestPullRequestTask(doer *user_model.User, repoID int64, branch string,
// checkIfPRContentChanged checks if diff to target branch has changed by push
// A commit can be considered to leave the PR untouched if the patch/diff with its merge base is unchanged
func checkIfPRContentChanged(ctx context.Context, pr *issues_model.PullRequest, oldCommitID, newCommitID string) (hasChanged bool, err error) {
- if err = pr.LoadHeadRepo(ctx); err != nil {
- return false, fmt.Errorf("LoadHeadRepo: %w", err)
- } else if pr.HeadRepo == nil {
- // corrupt data assumed changed
- return true, nil
- }
-
- if err = pr.LoadBaseRepo(ctx); err != nil {
- return false, fmt.Errorf("LoadBaseRepo: %w", err)
- }
-
- headGitRepo, err := git.OpenRepository(ctx, pr.HeadRepo.RepoPath())
+ tmpBasePath, err := createTemporaryRepo(ctx, pr)
if err != nil {
- return false, fmt.Errorf("OpenRepository: %w", err)
- }
- defer headGitRepo.Close()
-
- // Add a temporary remote.
- tmpRemote := "checkIfPRContentChanged-" + fmt.Sprint(time.Now().UnixNano())
- if err = headGitRepo.AddRemote(tmpRemote, pr.BaseRepo.RepoPath(), true); err != nil {
- return false, fmt.Errorf("AddRemote: %s/%s-%s: %w", pr.HeadRepo.OwnerName, pr.HeadRepo.Name, tmpRemote, err)
+ log.Error("CreateTemporaryRepo: %v", err)
+ return false, err
}
defer func() {
- if err := headGitRepo.RemoveRemote(tmpRemote); err != nil {
- log.Error("checkIfPRContentChanged: RemoveRemote: %s/%s-%s: %v", pr.HeadRepo.OwnerName, pr.HeadRepo.Name, tmpRemote, err)
+ if err := repo_module.RemoveTemporaryPath(tmpBasePath); err != nil {
+ log.Error("checkIfPRContentChanged: RemoveTemporaryPath: %s", err)
}
}()
- // To synchronize repo and get a base ref
- _, base, err := headGitRepo.GetMergeBase(tmpRemote, pr.BaseBranch, pr.HeadBranch)
+
+ tmpRepo, err := git.OpenRepository(ctx, tmpBasePath)
if err != nil {
- return false, fmt.Errorf("GetMergeBase: %w", err)
+ return false, fmt.Errorf("OpenRepository: %w", err)
}
+ defer tmpRepo.Close()
- diffBefore := &bytes.Buffer{}
- diffAfter := &bytes.Buffer{}
- if err := headGitRepo.GetDiffFromMergeBase(base, oldCommitID, diffBefore); err != nil {
- // If old commit not found, assume changed.
- log.Debug("GetDiffFromMergeBase: %v", err)
- return true, nil
- }
- if err := headGitRepo.GetDiffFromMergeBase(base, newCommitID, diffAfter); err != nil {
- // New commit should be found
- return false, fmt.Errorf("GetDiffFromMergeBase: %w", err)
+ // Find the merge-base
+ _, base, err := tmpRepo.GetMergeBase("", "base", "tracking")
+ if err != nil {
+ return false, fmt.Errorf("GetMergeBase: %w", err)
}
- diffBeforeLines := bufio.NewScanner(diffBefore)
- diffAfterLines := bufio.NewScanner(diffAfter)
-
- for diffBeforeLines.Scan() && diffAfterLines.Scan() {
- if strings.HasPrefix(diffBeforeLines.Text(), "index") && strings.HasPrefix(diffAfterLines.Text(), "index") {
- // file hashes can change without the diff changing
- continue
- } else if strings.HasPrefix(diffBeforeLines.Text(), "@@") && strings.HasPrefix(diffAfterLines.Text(), "@@") {
- // the location of the difference may change
- continue
- } else if !bytes.Equal(diffBeforeLines.Bytes(), diffAfterLines.Bytes()) {
+ cmd := git.NewCommand(ctx, "diff", "--name-only", "-z").AddDynamicArguments(newCommitID, oldCommitID, base)
+ stdoutReader, stdoutWriter, err := os.Pipe()
+ if err != nil {
+ return false, fmt.Errorf("unable to open pipe for to run diff: %w", err)
+ }
+
+ if err := cmd.Run(&git.RunOpts{
+ Dir: tmpBasePath,
+ Stdout: stdoutWriter,
+ PipelineFunc: func(ctx context.Context, cancel context.CancelFunc) error {
+ _ = stdoutWriter.Close()
+ defer func() {
+ _ = stdoutReader.Close()
+ }()
+ return util.IsEmptyReader(stdoutReader)
+ },
+ }); err != nil {
+ if err == util.ErrNotEmpty {
return true, nil
}
- }
- if diffBeforeLines.Scan() || diffAfterLines.Scan() {
- // Diffs not of equal length
- return true, nil
+ log.Error("Unable to run diff on %s %s %s in tempRepo for PR[%d]%s/%s...%s/%s: Error: %v",
+ newCommitID, oldCommitID, base,
+ pr.ID, pr.BaseRepo.FullName(), pr.BaseBranch, pr.HeadRepo.FullName(), pr.HeadBranch,
+ err)
+
+ return false, fmt.Errorf("Unable to run git diff --name-only -z %s %s %s: %w", newCommitID, oldCommitID, base, err)
}
return false, nil
diff --git a/services/repository/adopt.go b/services/repository/adopt.go
index 8ebf2b6a3..280c4cc03 100644
--- a/services/repository/adopt.go
+++ b/services/repository/adopt.go
@@ -67,7 +67,7 @@ func AdoptRepository(doer, u *user_model.User, opts repo_module.CreateRepoOption
}
}
- if err := repo_module.CreateRepositoryByExample(ctx, doer, u, repo, true); err != nil {
+ if err := repo_module.CreateRepositoryByExample(ctx, doer, u, repo, true, false); err != nil {
return err
}
if err := adoptRepository(ctx, repoPath, doer, repo, opts); err != nil {
diff --git a/services/repository/check.go b/services/repository/check.go
index e9d65aea4..3a1f0b7f3 100644
--- a/services/repository/check.go
+++ b/services/repository/check.go
@@ -23,7 +23,7 @@ import (
)
// GitFsckRepos calls 'git fsck' to check repository health.
-func GitFsckRepos(ctx context.Context, timeout time.Duration, args []git.CmdArg) error {
+func GitFsckRepos(ctx context.Context, timeout time.Duration, args git.TrustedCmdArgs) error {
log.Trace("Doing: GitFsck")
if err := db.Iterate(
@@ -47,10 +47,10 @@ func GitFsckRepos(ctx context.Context, timeout time.Duration, args []git.CmdArg)
}
// GitFsckRepo calls 'git fsck' to check an individual repository's health.
-func GitFsckRepo(ctx context.Context, repo *repo_model.Repository, timeout time.Duration, args []git.CmdArg) error {
+func GitFsckRepo(ctx context.Context, repo *repo_model.Repository, timeout time.Duration, args git.TrustedCmdArgs) error {
log.Trace("Running health check on repository %-v", repo)
repoPath := repo.RepoPath()
- if err := git.Fsck(ctx, repoPath, timeout, args...); err != nil {
+ if err := git.Fsck(ctx, repoPath, timeout, args); err != nil {
log.Warn("Failed to health check repository (%-v): %v", repo, err)
if err = system_model.CreateRepositoryNotice("Failed to health check repository (%s): %v", repo.FullName(), err); err != nil {
log.Error("CreateRepositoryNotice: %v", err)
@@ -60,9 +60,8 @@ func GitFsckRepo(ctx context.Context, repo *repo_model.Repository, timeout time.
}
// GitGcRepos calls 'git gc' to remove unnecessary files and optimize the local repository
-func GitGcRepos(ctx context.Context, timeout time.Duration, args ...git.CmdArg) error {
+func GitGcRepos(ctx context.Context, timeout time.Duration, args git.TrustedCmdArgs) error {
log.Trace("Doing: GitGcRepos")
- args = append([]git.CmdArg{"gc"}, args...)
if err := db.Iterate(
ctx,
@@ -86,9 +85,9 @@ func GitGcRepos(ctx context.Context, timeout time.Duration, args ...git.CmdArg)
}
// GitGcRepo calls 'git gc' to remove unnecessary files and optimize the local repository
-func GitGcRepo(ctx context.Context, repo *repo_model.Repository, timeout time.Duration, args []git.CmdArg) error {
+func GitGcRepo(ctx context.Context, repo *repo_model.Repository, timeout time.Duration, args git.TrustedCmdArgs) error {
log.Trace("Running git gc on %-v", repo)
- command := git.NewCommand(ctx, args...).
+ command := git.NewCommand(ctx, "gc").AddArguments(args...).
SetDescription(fmt.Sprintf("Repository Garbage Collection: %s", repo.FullName()))
var stdout string
var err error
diff --git a/services/repository/files/patch.go b/services/repository/files/patch.go
index 73ee0fa81..f65199cfc 100644
--- a/services/repository/files/patch.go
+++ b/services/repository/files/patch.go
@@ -141,14 +141,12 @@ func ApplyDiffPatch(ctx context.Context, repo *repo_model.Repository, doer *user
stdout := &strings.Builder{}
stderr := &strings.Builder{}
- args := []git.CmdArg{"apply", "--index", "--recount", "--cached", "--ignore-whitespace", "--whitespace=fix", "--binary"}
-
+ cmdApply := git.NewCommand(ctx, "apply", "--index", "--recount", "--cached", "--ignore-whitespace", "--whitespace=fix", "--binary")
if git.CheckGitVersionAtLeast("2.32") == nil {
- args = append(args, "-3")
+ cmdApply.AddArguments("-3")
}
- cmd := git.NewCommand(ctx, args...)
- if err := cmd.Run(&git.RunOpts{
+ if err := cmdApply.Run(&git.RunOpts{
Dir: t.basePath,
Stdout: stdout,
Stderr: stderr,
diff --git a/services/repository/files/temp_repo.go b/services/repository/files/temp_repo.go
index 1f3375cdc..a086d15a4 100644
--- a/services/repository/files/temp_repo.go
+++ b/services/repository/files/temp_repo.go
@@ -233,11 +233,9 @@ func (t *TemporaryUploadRepository) CommitTreeWithDate(parent string, author, co
_, _ = messageBytes.WriteString(message)
_, _ = messageBytes.WriteString("\n")
- var args []git.CmdArg
+ cmdCommitTree := git.NewCommand(t.ctx, "commit-tree").AddDynamicArguments(treeHash)
if parent != "" {
- args = []git.CmdArg{"commit-tree", git.CmdArgCheck(treeHash), "-p", git.CmdArgCheck(parent)}
- } else {
- args = []git.CmdArg{"commit-tree", git.CmdArgCheck(treeHash)}
+ cmdCommitTree.AddOptionValues("-p", parent)
}
var sign bool
@@ -249,7 +247,7 @@ func (t *TemporaryUploadRepository) CommitTreeWithDate(parent string, author, co
sign, keyID, signer, _ = asymkey_service.SignInitialCommit(t.ctx, t.repo.RepoPath(), author)
}
if sign {
- args = append(args, git.CmdArg("-S"+keyID))
+ cmdCommitTree.AddOptionFormat("-S%s", keyID)
if t.repo.GetTrustModel() == repo_model.CommitterTrustModel || t.repo.GetTrustModel() == repo_model.CollaboratorCommitterTrustModel {
if committerSig.Name != authorSig.Name || committerSig.Email != authorSig.Email {
// Add trailers
@@ -264,7 +262,7 @@ func (t *TemporaryUploadRepository) CommitTreeWithDate(parent string, author, co
committerSig = signer
}
} else {
- args = append(args, "--no-gpg-sign")
+ cmdCommitTree.AddArguments("--no-gpg-sign")
}
if signoff {
@@ -281,7 +279,7 @@ func (t *TemporaryUploadRepository) CommitTreeWithDate(parent string, author, co
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
- if err := git.NewCommand(t.ctx, args...).
+ if err := cmdCommitTree.
Run(&git.RunOpts{
Env: env,
Dir: t.basePath,
@@ -364,7 +362,7 @@ func (t *TemporaryUploadRepository) DiffIndex() (*gitdiff.Diff, error) {
t.repo.FullName(), err, stderr)
}
- diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(t.ctx, t.basePath, "--cached", "HEAD")
+ diff.NumFiles, diff.TotalAddition, diff.TotalDeletion, err = git.GetDiffShortStat(t.ctx, t.basePath, git.TrustedCmdArgs{"--cached"}, "HEAD")
if err != nil {
return nil, err
}
diff --git a/services/repository/files/update.go b/services/repository/files/update.go
index 58b7a5e08..45a469239 100644
--- a/services/repository/files/update.go
+++ b/services/repository/files/update.go
@@ -370,7 +370,7 @@ func CreateOrUpdateRepoFile(ctx context.Context, repo *repo_model.Repository, do
if setting.LFS.StartServer && hasOldBranch {
// Check there is no way this can return multiple infos
filename2attribute2info, err := t.gitRepo.CheckAttribute(git.CheckAttributeOpts{
- Attributes: []git.CmdArg{"filter"},
+ Attributes: []string{"filter"},
Filenames: []string{treePath},
CachedOnly: true,
})
diff --git a/services/repository/files/upload.go b/services/repository/files/upload.go
index e7289dd60..cf2f7019b 100644
--- a/services/repository/files/upload.go
+++ b/services/repository/files/upload.go
@@ -96,7 +96,7 @@ func UploadRepoFiles(ctx context.Context, repo *repo_model.Repository, doer *use
var filename2attribute2info map[string]map[string]string
if setting.LFS.StartServer {
filename2attribute2info, err = t.gitRepo.CheckAttribute(git.CheckAttributeOpts{
- Attributes: []git.CmdArg{"filter"},
+ Attributes: []string{"filter"},
Filenames: names,
CachedOnly: true,
})
diff --git a/services/repository/fork.go b/services/repository/fork.go
index ad534be88..c3ca89e02 100644
--- a/services/repository/fork.go
+++ b/services/repository/fork.go
@@ -119,7 +119,7 @@ func ForkRepository(ctx context.Context, doer, owner *user_model.User, opts Fork
}()
err = db.WithTx(ctx, func(txCtx context.Context) error {
- if err = repo_module.CreateRepositoryByExample(txCtx, doer, owner, repo, false); err != nil {
+ if err = repo_module.CreateRepositoryByExample(txCtx, doer, owner, repo, false, true); err != nil {
return err
}
diff --git a/services/repository/push.go b/services/repository/push.go
index 013524338..ef6460cef 100644
--- a/services/repository/push.go
+++ b/services/repository/push.go
@@ -110,9 +110,13 @@ func pushUpdates(optsList []*repo_module.PushUpdateOptions) error {
}
if opts.IsTag() { // If is tag reference
if pusher == nil || pusher.ID != opts.PusherID {
- var err error
- if pusher, err = user_model.GetUserByID(ctx, opts.PusherID); err != nil {
- return err
+ if opts.PusherID == user_model.ActionsUserID {
+ pusher = user_model.NewActionsUser()
+ } else {
+ var err error
+ if pusher, err = user_model.GetUserByID(ctx, opts.PusherID); err != nil {
+ return err
+ }
}
}
tagName := opts.TagName()
@@ -150,9 +154,13 @@ func pushUpdates(optsList []*repo_module.PushUpdateOptions) error {
}
} else if opts.IsBranch() { // If is branch reference
if pusher == nil || pusher.ID != opts.PusherID {
- var err error
- if pusher, err = user_model.GetUserByID(ctx, opts.PusherID); err != nil {
- return err
+ if opts.PusherID == user_model.ActionsUserID {
+ pusher = user_model.NewActionsUser()
+ } else {
+ var err error
+ if pusher, err = user_model.GetUserByID(ctx, opts.PusherID); err != nil {
+ return err
+ }
}
}