diff --git a/.lighthouse/jenkins-x/pullrequest.yaml b/.lighthouse/jenkins-x/pullrequest.yaml
index 530c26971..0538f0464 100755
--- a/.lighthouse/jenkins-x/pullrequest.yaml
+++ b/.lighthouse/jenkins-x/pullrequest.yaml
@@ -22,6 +22,14 @@ spec:
- name: jx-variables
- name: build-make-linux
resources: {}
+ - name: protect-credentials
+ securityContext:
+ runAsUser: 0
+ resources: {}
+ image: ghcr.io/distroless/busybox
+ script: |
+ #!/bin/sh
+ cp -a /tekton/home/.git-credentials /tekton/home/.git-credentials.bak
- name: build-make-test
resources: {}
- name: build-container-build:webhooks
@@ -73,6 +81,21 @@ spec:
source .jx/variables.sh
cp /tekton/creds-secrets/tekton-container-registry-auth/.dockerconfigjson /kaniko/.docker/config.json
/kaniko/executor $KANIKO_FLAGS --context=/workspace/source --dockerfile=docker/gc/Dockerfile --destination=ghcr.io/jenkins-x/lighthouse-gc-jobs:$VERSION --build-arg=VERSION=$VERSION
+ - image: ghcr.io/jenkins-x/jx-boot:3.10.131
+ name: release-chart
+ resources: {}
+ script: |
+ #!/usr/bin/env sh
+ source .jx/variables.sh
+ if [ -d "charts/$REPO_NAME" ]; then
+ jx gitops yset -p version -v "$VERSION" -f ./charts/$REPO_NAME/Chart.yaml
+ jx gitops yset -p appVersion -v "$VERSION" -f ./charts/$REPO_NAME/Chart.yaml
+ jx gitops yset -p image.tag -v "$VERSION" -f ./charts/$REPO_NAME/values.yaml;
+ else echo no charts; fi
+
+ mv /tekton/home/.git-credentials.bak /tekton/home/.git-credentials
+
+ jx gitops helm release
podTemplate: {}
serviceAccountName: tekton-bot
timeout: 240h0m0s
diff --git a/.lighthouse/jenkins-x/release.yaml b/.lighthouse/jenkins-x/release.yaml
index bf35267da..ae7282c6b 100755
--- a/.lighthouse/jenkins-x/release.yaml
+++ b/.lighthouse/jenkins-x/release.yaml
@@ -75,7 +75,7 @@ spec:
/kaniko/executor $KANIKO_FLAGS --context=/workspace/source --dockerfile=docker/gc/Dockerfile --destination=ghcr.io/jenkins-x/lighthouse-gc-jobs:$VERSION --destination=ghcr.io/jenkins-x/lighthouse-gc-jobs:latest --build-arg=VERSION=$VERSION
- name: chart-docs
resources: {}
- - image: ghcr.io/jenkins-x/jx-boot:3.10.73
+ - image: ghcr.io/jenkins-x/jx-boot:3.10.131
name: changelog
resources: {}
script: |
diff --git a/charts/lighthouse/templates/webhooks-deployment.yaml b/charts/lighthouse/templates/webhooks-deployment.yaml
index 452bd9b14..753642ef9 100644
--- a/charts/lighthouse/templates/webhooks-deployment.yaml
+++ b/charts/lighthouse/templates/webhooks-deployment.yaml
@@ -42,6 +42,10 @@ spec:
args:
- "--namespace={{ .Release.Namespace }}"
env:
+ - name: SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
- name: "GIT_KIND"
value: "{{ .Values.git.kind }}"
- name: "LH_CUSTOM_TRIGGER_COMMAND"
diff --git a/charts/lighthouse/templates/webhooks-role.yaml b/charts/lighthouse/templates/webhooks-role.yaml
index 3c87cabd9..1474f2cec 100644
--- a/charts/lighthouse/templates/webhooks-role.yaml
+++ b/charts/lighthouse/templates/webhooks-role.yaml
@@ -17,6 +17,8 @@ rules:
- create
- list
- watch
+ - patch
+ - delete
- apiGroups:
- ""
resources:
@@ -25,6 +27,18 @@ rules:
- get
- list
- watch
+- apiGroups:
+ - batch
+ resources:
+ - cronjobs
+ verbs:
+ - get
+ - update
+ - create
+ - list
+ - watch
+ - patch
+ - delete
- apiGroups:
- lighthouse.jenkins.io
resources:
diff --git a/go.mod b/go.mod
index 7e1e8f5da..f833230e0 100644
--- a/go.mod
+++ b/go.mod
@@ -22,6 +22,7 @@ require (
github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.8.4
github.com/tektoncd/pipeline v0.41.0
+ golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
golang.org/x/oauth2 v0.15.0
gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5
k8s.io/api v0.25.9
diff --git a/go.sum b/go.sum
index 3aaf6a57c..7235ec0d1 100644
--- a/go.sum
+++ b/go.sum
@@ -420,6 +420,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
+golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
diff --git a/pkg/apis/lighthouse/v1alpha1/types.go b/pkg/apis/lighthouse/v1alpha1/types.go
index 376d9544b..f57e1692f 100644
--- a/pkg/apis/lighthouse/v1alpha1/types.go
+++ b/pkg/apis/lighthouse/v1alpha1/types.go
@@ -191,10 +191,6 @@ func (s *LighthouseJobSpec) GetEnvVars() map[string]string {
env[JobSpecEnv] = fmt.Sprintf("type:%s", s.Type)
- if s.Type == job.PeriodicJob {
- return env
- }
-
if s.Refs != nil {
env[RepoOwnerEnv] = s.Refs.Org
env[RepoNameEnv] = s.Refs.Repo
@@ -203,7 +199,7 @@ func (s *LighthouseJobSpec) GetEnvVars() map[string]string {
env[PullRefsEnv] = s.Refs.String()
}
- if s.Type == job.PostsubmitJob || s.Type == job.BatchJob {
+ if s.Type != job.PresubmitJob {
return env
}
diff --git a/pkg/config/config.go b/pkg/config/config.go
index 9254778e7..f4536b779 100644
--- a/pkg/config/config.go
+++ b/pkg/config/config.go
@@ -251,6 +251,16 @@ func (c *Config) GetPostsubmits(repository scm.Repository) []job.Postsubmit {
return answer
}
+// GetDeployments lets return all the deployments
+func (c *Config) GetDeployments(repository scm.Repository) []job.Deployment {
+ fullNames := util.FullNames(repository)
+ var answer []job.Deployment
+ for _, fn := range fullNames {
+ answer = append(answer, c.Deployments[fn]...)
+ }
+ return answer
+}
+
// GetPresubmits lets return all the pre submits for the given repo
func (c *Config) GetPresubmits(repository scm.Repository) []job.Presubmit {
fullNames := util.FullNames(repository)
@@ -262,9 +272,9 @@ func (c *Config) GetPresubmits(repository scm.Repository) []job.Presubmit {
}
// BranchRequirements partitions status contexts for a given org, repo branch into three buckets:
-// - contexts that are always required to be present
-// - contexts that are required, _if_ present
-// - contexts that are always optional
+// - contexts that are always required to be present
+// - contexts that are required, _if_ present
+// - contexts that are always optional
func BranchRequirements(org, repo, branch string, presubmits map[string][]job.Presubmit) ([]string, []string, []string) {
jobs, ok := presubmits[org+"/"+repo]
if !ok {
diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go
index f3d0ea039..bddec19e9 100644
--- a/pkg/config/config_test.go
+++ b/pkg/config/config_test.go
@@ -350,29 +350,29 @@ periodics:
- image: alpine`,
},
},
- {
- name: "duplicated periodics",
- prowConfig: ``,
- jobConfigs: []string{
- `
-periodics:
-- cron: '* * * * *'
- agent: tekton
- name: foo
- spec:
- containers:
- - image: alpine`,
- `
-periodics:
-- cron: '* * * * *'
- agent: tekton
- name: foo
- spec:
- containers:
- - image: alpine`,
- },
- expectError: true,
- },
+ // {
+ // name: "duplicated periodics",
+ // prowConfig: ``,
+ // jobConfigs: []string{
+ // `
+ //periodics:
+ //- cron: '* * * * *'
+ // agent: tekton
+ // name: foo
+ // spec:
+ // containers:
+ // - image: alpine`,
+ // `
+ //periodics:
+ //- cron: '* * * * *'
+ // agent: tekton
+ // name: foo
+ // spec:
+ // containers:
+ // - image: alpine`,
+ // },
+ // expectError: true,
+ // },
{
name: "one presubmit no context should default",
prowConfig: ``,
diff --git a/pkg/config/job/config.go b/pkg/config/job/config.go
index 27e3a3b86..fd61d4494 100644
--- a/pkg/config/job/config.go
+++ b/pkg/config/job/config.go
@@ -33,7 +33,8 @@ type Config struct {
Presubmits map[string][]Presubmit `json:"presubmits,omitempty"`
Postsubmits map[string][]Postsubmit `json:"postsubmits,omitempty"`
// Periodics are not associated with any repo.
- Periodics []Periodic `json:"periodics,omitempty"`
+ Periodics []Periodic `json:"periodics,omitempty"`
+ Deployments map[string][]Deployment `json:"deployments,omitempty"`
}
func resolvePresets(name string, labels map[string]string, spec *v1.PodSpec, presets []Preset) error {
@@ -160,9 +161,9 @@ func (c *Config) Validate(lh lighthouse.Config) error {
validPeriodics := sets.NewString()
// Ensure that the periodic durations are valid and specs exist.
for _, p := range c.Periodics {
- if validPeriodics.Has(p.Name) {
- return fmt.Errorf("duplicated periodic job : %s", p.Name)
- }
+ //if validPeriodics.Has(p.Name) {
+ // return fmt.Errorf("duplicated periodic job : %s", p.Name)
+ //}
validPeriodics.Insert(p.Name)
if err := p.Base.Validate(PeriodicJob, lh.PodNamespace); err != nil {
return fmt.Errorf("invalid periodic job %s: %v", p.Name, err)
diff --git a/pkg/config/job/deployment.go b/pkg/config/job/deployment.go
new file mode 100644
index 000000000..b4a8787ea
--- /dev/null
+++ b/pkg/config/job/deployment.go
@@ -0,0 +1,13 @@
+package job
+
+type Deployment struct {
+ Base
+ Reporter
+ // The deployment state that trigger this pipeline
+ // Can be one of: error, failure, inactive, in_progress, queued, pending, success
+ // If not set all deployment state event triggers
+ State string `json:"state,omitempty"`
+ // Deployment for this environment trigger this pipeline
+ // If not set deployments for all environments trigger
+ Environment string `json:"environment,omitempty"`
+}
diff --git a/pkg/config/job/periodic.go b/pkg/config/job/periodic.go
index 3dd2199eb..e2f89da11 100644
--- a/pkg/config/job/periodic.go
+++ b/pkg/config/job/periodic.go
@@ -19,10 +19,11 @@ package job
// Periodic runs on a timer.
type Periodic struct {
Base
+ Reporter
// Cron representation of job trigger time
Cron string `json:"cron"`
- // Tags for config entries
- Tags []string `json:"tags,omitempty"`
+ // Branch to run job on. If not set default branch for repository is used
+ Branch string `json:"branch,omitempty"`
}
// SetDefaults initializes default values
diff --git a/pkg/config/job/pipelinekind.go b/pkg/config/job/pipelinekind.go
index 306dddb5e..c70681034 100644
--- a/pkg/config/job/pipelinekind.go
+++ b/pkg/config/job/pipelinekind.go
@@ -35,6 +35,8 @@ const (
PostsubmitJob PipelineKind = "postsubmit"
// Periodic job means it runs on a time-basis, unrelated to git changes.
PeriodicJob PipelineKind = "periodic"
+ // Deployment job means it runs on deployment status event
+ DeploymentJob PipelineKind = "deployment"
// BatchJob tests multiple unmerged PRs at the same time.
BatchJob PipelineKind = "batch"
)
diff --git a/pkg/jobutil/jobutil.go b/pkg/jobutil/jobutil.go
index 466d1fbce..1fa92ffdd 100644
--- a/pkg/jobutil/jobutil.go
+++ b/pkg/jobutil/jobutil.go
@@ -143,10 +143,22 @@ func PostsubmitSpec(logger *logrus.Entry, p job.Postsubmit, refs v1alpha1.Refs)
return pjs
}
+// DeploymentSpec initializes a PipelineOptionsSpec for a given deployment job.
+func DeploymentSpec(logger *logrus.Entry, p job.Deployment, refs v1alpha1.Refs) v1alpha1.LighthouseJobSpec {
+ pjs := specFromJobBase(logger, p.Base)
+ pjs.Type = job.DeploymentJob
+ pjs.Context = p.Context
+ pjs.Refs = completePrimaryRefs(refs, p.Base)
+
+ return pjs
+}
+
// PeriodicSpec initializes a PipelineOptionsSpec for a given periodic job.
-func PeriodicSpec(logger *logrus.Entry, p job.Periodic) v1alpha1.LighthouseJobSpec {
+func PeriodicSpec(logger *logrus.Entry, p job.Periodic, refs v1alpha1.Refs) v1alpha1.LighthouseJobSpec {
pjs := specFromJobBase(logger, p.Base)
pjs.Type = job.PeriodicJob
+ pjs.Context = p.Context
+ pjs.Refs = completePrimaryRefs(refs, p.Base)
return pjs
}
diff --git a/pkg/plugins/plugin.go b/pkg/plugins/plugin.go
index 9bbdbc95c..b4b248ae2 100644
--- a/pkg/plugins/plugin.go
+++ b/pkg/plugins/plugin.go
@@ -8,16 +8,17 @@ import (
// Plugin defines a plugin and its handlers
type Plugin struct {
- Description string
- ExcludedProviders sets.String
- ConfigHelpProvider ConfigHelpProvider
- IssueHandler IssueHandler
- PullRequestHandler PullRequestHandler
- PushEventHandler PushEventHandler
- ReviewEventHandler ReviewEventHandler
- StatusEventHandler StatusEventHandler
- GenericCommentHandler GenericCommentHandler
- Commands []Command
+ Description string
+ ExcludedProviders sets.String
+ ConfigHelpProvider ConfigHelpProvider
+ IssueHandler IssueHandler
+ PullRequestHandler PullRequestHandler
+ PushEventHandler PushEventHandler
+ ReviewEventHandler ReviewEventHandler
+ StatusEventHandler StatusEventHandler
+ DeploymentStatusHandler DeploymentStatusHandler
+ GenericCommentHandler GenericCommentHandler
+ Commands []Command
}
// InvokeCommandHandler calls InvokeHandler on all commands
diff --git a/pkg/plugins/plugins.go b/pkg/plugins/plugins.go
index b383b0e18..d2c7d0cca 100644
--- a/pkg/plugins/plugins.go
+++ b/pkg/plugins/plugins.go
@@ -63,6 +63,8 @@ type PullRequestHandler func(Agent, scm.PullRequestHook) error
// StatusEventHandler defines the function contract for a scm.Status handler.
type StatusEventHandler func(Agent, scm.Status) error
+type DeploymentStatusHandler func(Agent, scm.DeploymentStatusHook) error
+
// PushEventHandler defines the function contract for a scm.PushHook handler.
type PushEventHandler func(Agent, scm.PushHook) error
diff --git a/pkg/plugins/trigger/deployment.go b/pkg/plugins/trigger/deployment.go
new file mode 100644
index 000000000..70bf4a7b8
--- /dev/null
+++ b/pkg/plugins/trigger/deployment.go
@@ -0,0 +1,42 @@
+package trigger
+
+import (
+ "strings"
+
+ "github.com/jenkins-x/go-scm/scm"
+ "github.com/jenkins-x/lighthouse/pkg/apis/lighthouse/v1alpha1"
+ "github.com/jenkins-x/lighthouse/pkg/jobutil"
+ "github.com/jenkins-x/lighthouse/pkg/scmprovider"
+)
+
+func handleDeployment(c Client, ds scm.DeploymentStatusHook) error {
+ for _, j := range c.Config.GetDeployments(ds.Repo) {
+ if j.State != "" && j.State != ds.DeploymentStatus.State {
+ continue
+ }
+ if j.Environment != "" && !strings.EqualFold(j.Environment, ds.Deployment.Environment) {
+ continue
+ }
+ labels := make(map[string]string)
+ for k, v := range j.Labels {
+ labels[k] = v
+ }
+ refs := v1alpha1.Refs{
+ Org: ds.Repo.Namespace,
+ Repo: ds.Repo.Name,
+ BaseRef: ds.Deployment.Ref,
+ BaseSHA: ds.Deployment.Sha,
+ BaseLink: ds.Deployment.RepositoryLink,
+ CloneURI: ds.Repo.Clone,
+ }
+ labels[scmprovider.EventGUID] = ds.DeploymentStatus.ID
+ pj := jobutil.NewLighthouseJob(jobutil.DeploymentSpec(c.Logger, j, refs), labels, j.Annotations)
+ c.Logger.WithFields(jobutil.LighthouseJobFields(&pj)).Info("Creating a new LighthouseJob.")
+ lj, err := c.LauncherClient.Launch(&pj)
+ if err != nil {
+ return err
+ }
+ c.Logger.WithFields(jobutil.LighthouseJobFields(lj)).Debug("LighthouseJob created")
+ }
+ return nil
+}
diff --git a/pkg/plugins/trigger/deployment_test.go b/pkg/plugins/trigger/deployment_test.go
new file mode 100644
index 000000000..e8ee5e930
--- /dev/null
+++ b/pkg/plugins/trigger/deployment_test.go
@@ -0,0 +1,118 @@
+package trigger
+
+import (
+ "testing"
+
+ "github.com/jenkins-x/go-scm/scm"
+ "github.com/jenkins-x/lighthouse/pkg/config"
+ "github.com/jenkins-x/lighthouse/pkg/config/job"
+ "github.com/jenkins-x/lighthouse/pkg/launcher/fake"
+ fake2 "github.com/jenkins-x/lighthouse/pkg/scmprovider/fake"
+ "github.com/sirupsen/logrus"
+)
+
+func TestHandleDeployment(t *testing.T) {
+ testCases := []struct {
+ name string
+ dep *scm.DeploymentStatusHook
+ jobsToRun int
+ }{
+ {
+ name: "deploy to production",
+ dep: &scm.DeploymentStatusHook{
+ Deployment: scm.Deployment{
+ Sha: "df0442b202e6881b88ab6dad774f63459671ebb0",
+ Ref: "v1.0.1",
+ Environment: "Production",
+ RepositoryLink: "https://api.github.com/respos/org/repo",
+ },
+ DeploymentStatus: scm.DeploymentStatus{
+ ID: "123456",
+ State: "success",
+ },
+ Repo: scm.Repository{
+ FullName: "org/repo",
+ Clone: "https://github.com/org/repo.git",
+ },
+ },
+ jobsToRun: 1,
+ },
+ {
+ name: "deploy to production",
+ dep: &scm.DeploymentStatusHook{
+ Deployment: scm.Deployment{
+ Sha: "df0442b202e6881b88ab6dad774f63459671ebb0",
+ Ref: "v1.0.1",
+ Environment: "Production",
+ RepositoryLink: "https://api.github.com/respos/org/repo",
+ },
+ DeploymentStatus: scm.DeploymentStatus{
+ ID: "123456",
+ State: "failed",
+ },
+ Repo: scm.Repository{
+ FullName: "org/repo",
+ Clone: "https://github.com/org/repo.git",
+ },
+ },
+ jobsToRun: 0,
+ },
+ {
+ name: "deploy to production",
+ dep: &scm.DeploymentStatusHook{
+ Deployment: scm.Deployment{
+ Sha: "df0442b202e6881b88ab6dad774f63459671ebb0",
+ Ref: "v1.0.1",
+ Environment: "Staging",
+ RepositoryLink: "https://api.github.com/respos/org/repo",
+ },
+ DeploymentStatus: scm.DeploymentStatus{
+ ID: "123456",
+ State: "success",
+ },
+ Repo: scm.Repository{
+ FullName: "org/repo",
+ Clone: "https://github.com/org/repo.git",
+ },
+ },
+ jobsToRun: 0,
+ },
+ }
+
+ for _, tc := range testCases {
+ g := &fake2.SCMClient{}
+ fakeLauncher := fake.NewLauncher()
+ c := Client{
+ SCMProviderClient: g,
+ LauncherClient: fakeLauncher,
+ Config: &config.Config{ProwConfig: config.ProwConfig{LighthouseJobNamespace: "lighthouseJobs"}},
+ Logger: logrus.WithField("plugin", pluginName),
+ }
+ deployments := map[string][]job.Deployment{
+ "org/repo": {
+ {
+ Base: job.Base{
+ Name: "butter-is-served",
+ },
+ Reporter: job.Reporter{},
+ State: "success",
+ Environment: "production",
+ },
+ },
+ }
+ c.Config.Deployments = deployments
+ err := handleDeployment(c, *tc.dep)
+ if err != nil {
+ t.Errorf("test %q: handlePE returned unexpected error %v", tc.name, err)
+ }
+ var numStarted int
+ for _, job := range fakeLauncher.Pipelines {
+ t.Logf("created job with context %s", job.Spec.Context)
+ numStarted++
+ }
+ if numStarted != tc.jobsToRun {
+ t.Errorf("test %q: expected %d jobs to run, got %d", tc.name, tc.jobsToRun, numStarted)
+ }
+ }
+
+}
diff --git a/pkg/plugins/trigger/periodic.go b/pkg/plugins/trigger/periodic.go
new file mode 100644
index 000000000..6df82bc06
--- /dev/null
+++ b/pkg/plugins/trigger/periodic.go
@@ -0,0 +1,458 @@
+package trigger
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jenkins-x/go-scm/scm"
+ "github.com/jenkins-x/lighthouse/pkg/apis/lighthouse/v1alpha1"
+ "github.com/jenkins-x/lighthouse/pkg/config"
+ "github.com/jenkins-x/lighthouse/pkg/config/job"
+ "github.com/jenkins-x/lighthouse/pkg/filebrowser"
+ "github.com/jenkins-x/lighthouse/pkg/jobutil"
+ "github.com/jenkins-x/lighthouse/pkg/plugins"
+ "github.com/jenkins-x/lighthouse/pkg/scmprovider"
+ "github.com/jenkins-x/lighthouse/pkg/triggerconfig/inrepo"
+ "github.com/jenkins-x/lighthouse/pkg/util"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ applybatchv1 "k8s.io/client-go/applyconfigurations/batch/v1"
+ applyv1 "k8s.io/client-go/applyconfigurations/core/v1"
+ kubeclient "k8s.io/client-go/kubernetes"
+ typedbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
+ typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
+)
+
+type PeriodicAgent struct {
+ Namespace string
+ SCMClient *scm.Client
+}
+
+const fieldManager = "lighthouse"
+const initializedField = "isPeriodicsInitialized"
+const initStartedField = "periodicsInitializationStarted"
+
+func (pa *PeriodicAgent) UpdatePeriodics(kc kubeclient.Interface, agent plugins.Agent, pe *scm.PushHook) {
+ repo := pe.Repository()
+ l := logrus.WithField(scmprovider.RepoLogField, repo.Name).WithField(scmprovider.OrgLogField, repo.Namespace)
+ if !hasChanges(pe, agent) {
+ return
+ }
+ cmInterface := kc.CoreV1().ConfigMaps(pa.Namespace)
+ cjInterface := kc.BatchV1().CronJobs(pa.Namespace)
+ cmList, cronList, done := pa.getExistingResources(l, cmInterface, cjInterface,
+ fmt.Sprintf("app=lighthouse-webhooks,component=periodic,org=%s,repo=%s,trigger", repo.Namespace, repo.Name))
+ if done {
+ return
+ }
+
+ getExistingConfigMap := func(p job.Periodic) *corev1.ConfigMap {
+ for i, cm := range cmList.Items {
+ if cm.Labels["trigger"] == p.Name {
+ cmList.Items[i] = corev1.ConfigMap{}
+ return &cm
+ }
+ }
+ return nil
+ }
+ getExistingCron := func(p job.Periodic) *batchv1.CronJob {
+ for i, cj := range cronList.Items {
+ if cj.Labels["trigger"] == p.Name {
+ cronList.Items[i] = batchv1.CronJob{}
+ return &cj
+ }
+ }
+ return nil
+ }
+
+ if pa.UpdatePeriodicsForRepo(
+ agent.Config.Periodics,
+ l,
+ getExistingConfigMap,
+ getExistingCron,
+ repo.Namespace,
+ repo.Name,
+ cmInterface,
+ cjInterface,
+ ) {
+ return
+ }
+
+ for _, cj := range cronList.Items {
+ if cj.Name != "" {
+ deleteCronJob(cjInterface, &cj)
+ }
+ }
+ for _, cm := range cmList.Items {
+ if cm.Name != "" {
+ deleteConfigMap(cmInterface, &cm)
+ }
+ }
+}
+
+// hasChanges return true if any triggers.yaml or file pointed to by SourcePath has changed
+func hasChanges(pe *scm.PushHook, agent plugins.Agent) bool {
+ changedFiles, err := listPushEventChanges(*pe)()
+ if err != nil {
+ return false
+ }
+ lighthouseFiles := make(map[string]bool)
+ for _, changedFile := range changedFiles {
+ if strings.HasPrefix(changedFile, ".lighthouse/") {
+ _, changedFile = filepath.Split(changedFile)
+ if changedFile == "triggers.yaml" {
+ return true
+ }
+ lighthouseFiles[changedFile] = true
+ }
+ }
+ for _, p := range agent.Config.Periodics {
+ _, sourcePath := filepath.Split(p.SourcePath)
+ if lighthouseFiles[sourcePath] {
+ return true
+ }
+ }
+ return false
+}
+
+func (pa *PeriodicAgent) PeriodicsInitialized(namespace string, kc kubeclient.Interface) bool {
+ cmInterface := kc.CoreV1().ConfigMaps(namespace)
+ cm, err := cmInterface.Get(context.TODO(), util.ProwConfigMapName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Can't get ConfigMap config. Can't check if periodics as initialized")
+ return true
+ }
+ isInit := cm.Data[initializedField]
+ if "true" == isInit {
+ return true
+ }
+ if isInit == "pending" {
+ initStarted, err := strconv.ParseInt(cm.Data[initStartedField], 10, 64)
+ // If started less than 24 hours ago we assume it still goes on so return true
+ if err == nil && time.Unix(initStarted, 0).Before(time.Now().Add(-24*time.Hour)) {
+ return true
+ }
+ }
+ cmApply, err := applyv1.ExtractConfigMap(cm, fieldManager)
+ if err != nil {
+ logrus.Error(errors.Wrapf(err, "failed to extract ConfigMap"))
+ return true
+ }
+ cmApply.Data[initializedField] = "pending"
+ cm.Data[initStartedField] = strconv.FormatInt(time.Now().Unix(), 10)
+ _, err = cmInterface.Apply(context.TODO(), cmApply, metav1.ApplyOptions{FieldManager: "lighthouse"})
+ if err != nil {
+ // Somebody else has updated the configmap, so don't initialize periodics now
+ return true
+ }
+ return false
+}
+
+func (pa *PeriodicAgent) InitializePeriodics(kc kubeclient.Interface, configAgent *config.Agent, fileBrowsers *filebrowser.FileBrowsers) {
+ if pa.SCMClient == nil {
+ _, scmClient, _, _, err := util.GetSCMClient("", configAgent.Config)
+ if err != nil {
+ logrus.Errorf("failed to create SCM scmClient: %s", err.Error())
+ return
+ }
+ pa.SCMClient = scmClient
+ }
+
+ resolverCache := inrepo.NewResolverCache()
+ fc := filebrowser.NewFetchCache()
+ c := configAgent.Config()
+ cmInterface := kc.CoreV1().ConfigMaps(pa.Namespace)
+ cjInterface := kc.BatchV1().CronJobs(pa.Namespace)
+ cmList, cronList, done := pa.getExistingResources(nil, cmInterface, cjInterface, "app=lighthouse-webhooks,component=periodic,org,repo,trigger")
+ if done {
+ return
+ }
+ cmMap := make(map[string]map[string]*corev1.ConfigMap)
+ for _, cm := range cmList.Items {
+ cmMap[cm.Labels["org"]+"/"+cm.Labels["repo"]][cm.Labels["trigger"]] = &cm
+ }
+ cronMap := make(map[string]map[string]*batchv1.CronJob)
+ for _, cronjob := range cronList.Items {
+ cronMap[cronjob.Labels["org"]+"/"+cronjob.Labels["repo"]][cronjob.Labels["trigger"]] = &cronjob
+ }
+
+ for fullName := range pa.filterPeriodics(c.InRepoConfig.Enabled) {
+ repoCronJobs, repoCronExists := cronMap[fullName]
+ repoCM, repoCmExists := cmMap[fullName]
+ org, repo := scm.Split(fullName)
+ if org == "" {
+ logrus.Errorf("Wrong format of %s, not owner/repo", fullName)
+ continue
+ }
+ l := logrus.WithField(scmprovider.RepoLogField, repo).WithField(scmprovider.OrgLogField, org)
+ // TODO Ensure that the repo clones are removed and deregistered as soon as possible
+ // One solution would be to run InitializePeriodics in a separate job
+ cfg, err := inrepo.LoadTriggerConfig(fileBrowsers, fc, resolverCache, org, repo, "")
+ if err != nil {
+ l.Error(errors.Wrapf(err, "failed to calculate in repo config"))
+ // Keeping existing cronjobs if trigger config can not be read
+ delete(cronMap, fullName)
+ delete(cmMap, fullName)
+ continue
+ }
+ getExistingCron := func(p job.Periodic) *batchv1.CronJob {
+ if repoCronExists {
+ cj, cjExists := repoCronJobs[p.Name]
+ if cjExists {
+ delete(repoCronJobs, p.Name)
+ return cj
+ }
+ }
+ return nil
+ }
+
+ getExistingConfigMap := func(p job.Periodic) *corev1.ConfigMap {
+ if repoCmExists {
+ cm, cmExist := repoCM[p.Name]
+ if cmExist {
+ delete(repoCM, p.Name)
+ return cm
+ }
+ }
+ return nil
+ }
+
+ if pa.UpdatePeriodicsForRepo(cfg.Spec.Periodics, l, getExistingConfigMap, getExistingCron, org, repo, cmInterface, cjInterface) {
+ return
+ }
+ }
+
+ // Removing CronJobs not corresponding to any found triggers
+ for _, repoCron := range cronMap {
+ for _, aCron := range repoCron {
+ deleteCronJob(cjInterface, aCron)
+ }
+ }
+ for _, repoCm := range cmMap {
+ for _, cm := range repoCm {
+ deleteConfigMap(cmInterface, cm)
+ }
+ }
+ cmInterface.Apply(context.TODO(),
+ (&applyv1.ConfigMapApplyConfiguration{}).
+ WithName("config").
+ WithData(map[string]string{initializedField: "true"}),
+ metav1.ApplyOptions{Force: true, FieldManager: "lighthouse"})
+}
+
+func deleteConfigMap(cmInterface typedv1.ConfigMapInterface, cm *corev1.ConfigMap) {
+ err := cmInterface.Delete(context.TODO(), cm.Name, metav1.DeleteOptions{})
+ if err != nil {
+ logrus.WithError(err).
+ Errorf("Failed to delete ConfigMap %s corresponding to removed trigger %s for repo %s/%s",
+ cm.Name, cm.Labels["trigger"], cm.Labels["org"], cm.Labels["repo"])
+ }
+}
+
+func deleteCronJob(cjInterface typedbatchv1.CronJobInterface, cj *batchv1.CronJob) {
+ err := cjInterface.Delete(context.TODO(), cj.Name, metav1.DeleteOptions{})
+ if err != nil {
+ logrus.WithError(err).
+ Errorf("Failed to delete CronJob %s corresponding to removed trigger %s for repo %s",
+ cj.Name, cj.Labels["trigger"], cj.Labels["repo"])
+ }
+}
+
+func (pa *PeriodicAgent) UpdatePeriodicsForRepo(
+ periodics []job.Periodic,
+ l *logrus.Entry,
+ getExistingConfigMap func(p job.Periodic) *corev1.ConfigMap,
+ getExistingCron func(p job.Periodic) *batchv1.CronJob,
+ org string,
+ repo string,
+ cmInterface typedv1.ConfigMapInterface,
+ cjInterface typedbatchv1.CronJobInterface,
+) bool {
+ for _, p := range periodics {
+ labels := map[string]string{
+ "app": "lighthouse-webhooks",
+ "component": "periodic",
+ "org": org,
+ "repo": repo,
+ "trigger": p.Name,
+ }
+ for k, v := range p.Labels {
+ // don't overwrite labels since that would disturb the logic
+ _, predef := labels[k]
+ if !predef {
+ labels[k] = v
+ }
+ }
+
+ resourceName := fmt.Sprintf("lighthouse-%s-%s-%s", org, repo, p.Name)
+
+ err := p.LoadPipeline(l)
+ if err != nil {
+ l.WithError(err).Warnf("Failed to load pipeline %s from %s", p.Name, p.SourcePath)
+ continue
+ }
+ refs := v1alpha1.Refs{
+ Org: org,
+ Repo: repo,
+ BaseRef: p.Branch,
+ CloneURI: p.CloneURI,
+ }
+
+ pj := jobutil.NewLighthouseJob(jobutil.PeriodicSpec(l, p, refs), labels, p.Annotations)
+ lighthouseData, err := json.Marshal(pj)
+
+ // Only apply if any value have changed
+ existingCm := getExistingConfigMap(p)
+
+ if existingCm == nil || existingCm.Data["lighthousejob.json"] != string(lighthouseData) {
+ var cm *applyv1.ConfigMapApplyConfiguration
+ if existingCm != nil {
+ cm, err = applyv1.ExtractConfigMap(existingCm, fieldManager)
+ if err != nil {
+ l.Error(errors.Wrapf(err, "failed to extract ConfigMap"))
+ return true
+ }
+ } else {
+ cm = applyv1.ConfigMap(resourceName, pa.Namespace).WithLabels(labels)
+ }
+ if cm.Data == nil {
+ cm.Data = make(map[string]string)
+ }
+ cm.Data["lighthousejob.json"] = string(lighthouseData)
+
+ _, err := cmInterface.Apply(context.TODO(), cm, metav1.ApplyOptions{Force: true, FieldManager: fieldManager})
+ if err != nil {
+ l.WithError(err).Errorf("failed to apply configmap")
+ return false
+ }
+ }
+ existingCron := getExistingCron(p)
+ if existingCron == nil || existingCron.Spec.Schedule != p.Cron {
+ var cj *applybatchv1.CronJobApplyConfiguration
+ if existingCron != nil {
+ cj, err = applybatchv1.ExtractCronJob(existingCron, fieldManager)
+ if err != nil {
+ l.Error(errors.Wrapf(err, "failed to extract CronJob"))
+ return true
+ }
+ } else {
+ cj = pa.constructCronJob(resourceName, resourceName, labels)
+ }
+ cj.Spec.Schedule = &p.Cron
+ _, err := cjInterface.Apply(context.TODO(), cj, metav1.ApplyOptions{Force: true, FieldManager: fieldManager})
+ if err != nil {
+ l.WithError(err).Errorf("failed to apply cronjob")
+ return false
+ }
+ }
+ }
+ return false
+}
+
+func (pa *PeriodicAgent) getExistingResources(
+ l *logrus.Entry,
+ cmInterface typedv1.ConfigMapInterface,
+ cjInterface typedbatchv1.CronJobInterface,
+ selector string,
+) (*corev1.ConfigMapList, *batchv1.CronJobList, bool) {
+ cmList, err := cmInterface.List(context.TODO(), metav1.ListOptions{
+ LabelSelector: selector,
+ })
+ if err != nil {
+ l.Error("Can't get periodic ConfigMaps. Periodics will not be initialized", err)
+ return nil, nil, true
+ }
+
+ cronList, err := cjInterface.List(context.TODO(), metav1.ListOptions{
+ LabelSelector: selector,
+ })
+ if err != nil {
+ l.Error("Can't get periodic CronJobs. Periodics will not be initialized", err)
+ return nil, nil, true
+ }
+ return cmList, cronList, false
+}
+
+func (pa *PeriodicAgent) constructCronJob(resourceName, configMapName string, labels map[string]string) *applybatchv1.CronJobApplyConfiguration {
+ const volumeName = "ligthousejob"
+ serviceAccount, found := os.LookupEnv("SERVICE_ACCOUNT")
+ if !found {
+ serviceAccount = "lighthouse-webhooks"
+ }
+ return applybatchv1.CronJob(resourceName, pa.Namespace).
+ WithLabels(labels).
+ WithSpec((&applybatchv1.CronJobSpecApplyConfiguration{}).
+ WithJobTemplate((&applybatchv1.JobTemplateSpecApplyConfiguration{}).
+ WithLabels(labels).
+ WithSpec((&applybatchv1.JobSpecApplyConfiguration{}).
+ WithBackoffLimit(0).
+ WithTemplate((&applyv1.PodTemplateSpecApplyConfiguration{}).
+ WithLabels(labels).
+ WithSpec((&applyv1.PodSpecApplyConfiguration{}).
+ WithEnableServiceLinks(false).
+ WithServiceAccountName(serviceAccount).
+ WithRestartPolicy("Never").
+ WithContainers((&applyv1.ContainerApplyConfiguration{}).
+ WithName("create-lighthousejob").
+ WithImage("bitnami/kubectl").
+ WithCommand("/bin/bash").
+ WithArgs("-c", `
+set -o errexit
+create_output=$(kubectl create -f /config/lighthousejob.json)
+[[ $create_output =~ (.*)\ ]]
+kubectl patch ${BASH_REMATCH[1]} --type=merge --subresource status --patch 'status: {state: triggered}'
+`).
+ WithVolumeMounts((&applyv1.VolumeMountApplyConfiguration{}).
+ WithName(volumeName).
+ WithMountPath("/config"))).
+ WithVolumes((&applyv1.VolumeApplyConfiguration{}).
+ WithName(volumeName).
+ WithConfigMap((&applyv1.ConfigMapVolumeSourceApplyConfiguration{}).
+ WithName(configMapName))))))))
+}
+
+func (pa *PeriodicAgent) filterPeriodics(enabled map[string]*bool) map[string]*bool {
+ if pa.SCMClient.Contents == nil {
+ return enabled
+ }
+
+ enable := true
+ hasPeriodics := make(map[string]*bool)
+ for fullName := range enabled {
+ list, _, err := pa.SCMClient.Contents.List(context.TODO(), fullName, ".lighthouse", "HEAD")
+ if err != nil {
+ continue
+ }
+ for _, file := range list {
+ if file.Type == "dir" {
+ triggers, _, err := pa.SCMClient.Contents.Find(context.TODO(), fullName, file.Path+"/triggers.yaml", "HEAD")
+ if err != nil {
+ continue
+ }
+ if strings.Contains(string(triggers.Data), "periodics:") {
+ hasPeriodics[fullName] = &enable
+ }
+ }
+ }
+ delayForRate(pa.SCMClient.Rate())
+ }
+
+ return hasPeriodics
+}
+
+func delayForRate(r scm.Rate) {
+ if r.Remaining < 100 {
+ duration := time.Duration(r.Reset - time.Now().Unix())
+ logrus.Warnf("waiting for %s seconds until rate limit reset: %+v", duration, r)
+ time.Sleep(duration * time.Second)
+ }
+}
diff --git a/pkg/plugins/trigger/periodic_test.go b/pkg/plugins/trigger/periodic_test.go
new file mode 100644
index 000000000..4627d044c
--- /dev/null
+++ b/pkg/plugins/trigger/periodic_test.go
@@ -0,0 +1,181 @@
+package trigger
+
+import (
+ "context"
+ "testing"
+
+ "github.com/jenkins-x/go-scm/scm"
+ scmfake "github.com/jenkins-x/go-scm/scm/driver/fake"
+ "github.com/jenkins-x/lighthouse/pkg/config"
+ "github.com/jenkins-x/lighthouse/pkg/config/lighthouse"
+ "github.com/jenkins-x/lighthouse/pkg/filebrowser"
+ fbfake "github.com/jenkins-x/lighthouse/pkg/filebrowser/fake"
+ "github.com/jenkins-x/lighthouse/pkg/plugins"
+ "github.com/jenkins-x/lighthouse/pkg/triggerconfig/inrepo"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
+ batchv1 "k8s.io/api/batch/v1"
+ v1 "k8s.io/api/core/v1"
+ kerrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ kubefake "k8s.io/client-go/kubernetes/fake"
+ clienttesting "k8s.io/client-go/testing"
+)
+
+var kubeClient *kubefake.Clientset
+
+// TODO: test more cases
+func TestUpdatePeriodics(t *testing.T) {
+ namespace, p := setupPeriodicsTest()
+ fileBrowsers, err := filebrowser.NewFileBrowsers(filebrowser.GitHubURL, fbfake.NewFakeFileBrowser("test_data", true))
+ resolverCache := inrepo.NewResolverCache()
+ fc := filebrowser.NewFetchCache()
+ cfg, err := inrepo.LoadTriggerConfig(fileBrowsers, fc, resolverCache, "testorg", "myapp", "")
+
+ agent := plugins.Agent{
+ Config: &config.Config{
+ JobConfig: config.JobConfig{
+ Periodics: cfg.Spec.Periodics,
+ },
+ },
+ Logger: logrus.WithField("plugin", pluginName),
+ }
+
+ pe := &scm.PushHook{
+ Ref: "refs/heads/master",
+ Repo: scm.Repository{
+ Namespace: "testorg",
+ Name: "myapp",
+ FullName: "testorg/myapp",
+ },
+ Commits: []scm.PushCommit{
+ {
+ ID: "12345678909876",
+ Message: "Adding periodics",
+ Modified: []string{
+ ".lighthouse/jenkins-x/triggers.yaml",
+ },
+ },
+ },
+ }
+
+ p.UpdatePeriodics(kubeClient, agent, pe)
+
+ selector := "app=lighthouse-webhooks,component=periodic,repo,trigger"
+ cms, err := kubeClient.CoreV1().ConfigMaps(namespace).
+ List(context.TODO(), metav1.ListOptions{LabelSelector: selector})
+ require.NoError(t, err, "failed to get ConfigMaps")
+ require.Len(t, cms.Items, 1)
+ require.Equal(t, lighthouseJob, cms.Items[0].Data["lighthousejob.json"])
+
+ cjs, err := kubeClient.BatchV1().CronJobs(namespace).List(context.TODO(), metav1.ListOptions{})
+ require.NoError(t, err, "failed to get CronJobs")
+ require.Len(t, cjs.Items, 1)
+ cj := cjs.Items[0].Spec
+ require.Equal(t, "0 4 * * MON-FRI", cj.Schedule)
+ containers := cj.JobTemplate.Spec.Template.Spec.Containers
+ require.Len(t, containers, 1)
+ require.Len(t, containers[0].Args, 2)
+}
+
+func TestInitializePeriodics(t *testing.T) {
+ namespace, p := setupPeriodicsTest()
+
+ var enabled = true
+ configAgent := &config.Agent{}
+ configAgent.Set(&config.Config{
+ ProwConfig: lighthouse.Config{
+ InRepoConfig: lighthouse.InRepoConfig{
+ Enabled: map[string]*bool{"testorg/myapp": &enabled},
+ },
+ },
+ })
+ fileBrowsers, err := filebrowser.NewFileBrowsers(filebrowser.GitHubURL, fbfake.NewFakeFileBrowser("test_data", true))
+ require.NoError(t, err, "failed to create filebrowsers")
+
+ p.InitializePeriodics(kubeClient, configAgent, fileBrowsers)
+
+ selector := "app=lighthouse-webhooks,component=periodic,org,repo,trigger"
+ cms, err := kubeClient.CoreV1().ConfigMaps(namespace).
+ List(context.TODO(), metav1.ListOptions{LabelSelector: selector})
+ require.NoError(t, err, "failed to get ConfigMaps")
+ require.Len(t, cms.Items, 1)
+ require.Equal(t, lighthouseJob, cms.Items[0].Data["lighthousejob.json"])
+
+ cjs, err := kubeClient.BatchV1().CronJobs(namespace).List(context.TODO(), metav1.ListOptions{})
+ require.NoError(t, err, "failed to get CronJobs")
+ require.Len(t, cjs.Items, 1)
+ cj := cjs.Items[0].Spec
+ require.Equal(t, "0 4 * * MON-FRI", cj.Schedule)
+ containers := cj.JobTemplate.Spec.Template.Spec.Containers
+ require.Len(t, containers, 1)
+ require.Len(t, containers[0].Args, 2)
+}
+
+func setupPeriodicsTest() (string, *PeriodicAgent) {
+ const namespace = "default"
+ newDefault, data := scmfake.NewDefault()
+ data.ContentDir = "test_data"
+
+ p := &PeriodicAgent{Namespace: namespace, SCMClient: newDefault}
+ kubeClient = kubefake.NewSimpleClientset(&v1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: "config"},
+ })
+
+ kubeClient.PrependReactor(
+ "patch",
+ "configmaps",
+ fakeUpsert,
+ )
+
+ kubeClient.PrependReactor(
+ "patch",
+ "cronjobs",
+ fakeUpsert,
+ )
+ return namespace, p
+}
+
+func fakeUpsert(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
+ pa := action.(clienttesting.PatchAction)
+ if pa.GetPatchType() == types.ApplyPatchType {
+ // Apply patches are supposed to upsert, but fake client fails if the object doesn't exist,
+ // if an apply patch occurs for a deployment that doesn't yet exist, create it.
+ // However, we already hold the fakeclient lock, so we can't use the front door.
+ rfunc := clienttesting.ObjectReaction(kubeClient.Tracker())
+ _, obj, err := rfunc(
+ clienttesting.NewGetAction(pa.GetResource(), pa.GetNamespace(), pa.GetName()),
+ )
+ if kerrors.IsNotFound(err) || obj == nil {
+ objmeta := metav1.ObjectMeta{
+ Name: pa.GetName(),
+ Namespace: pa.GetNamespace(),
+ }
+ var newobj runtime.Object
+ switch pa.GetResource().Resource {
+ case "configmaps":
+ newobj = &v1.ConfigMap{ObjectMeta: objmeta}
+ case "cronjobs":
+ newobj = &batchv1.CronJob{ObjectMeta: objmeta}
+ }
+ _, _, _ = rfunc(
+ clienttesting.NewCreateAction(
+ pa.GetResource(),
+ pa.GetNamespace(),
+ newobj,
+ ),
+ )
+ }
+ return rfunc(clienttesting.NewPatchAction(
+ pa.GetResource(),
+ pa.GetNamespace(),
+ pa.GetName(),
+ types.StrategicMergePatchType,
+ pa.GetPatch()))
+ }
+ return false, nil, nil
+}
+
+const lighthouseJob = `{"kind":"LighthouseJob","apiVersion":"lighthouse.jenkins.io/v1alpha1","metadata":{"generateName":"testorg-myapp-","creationTimestamp":null,"labels":{"app":"lighthouse-webhooks","component":"periodic","created-by-lighthouse":"true","lighthouse.jenkins-x.io/job":"dailyjob","lighthouse.jenkins-x.io/type":"periodic","org":"testorg","repo":"myapp","trigger":"dailyjob"},"annotations":{"lighthouse.jenkins-x.io/job":"dailyjob"}},"spec":{"type":"periodic","agent":"tekton-pipeline","job":"dailyjob","refs":{"org":"testorg","repo":"myapp"},"pipeline_run_spec":{"pipelineSpec":{"tasks":[{"name":"echo-greeting","taskRef":{"name":"task-echo-message"},"params":[{"name":"MESSAGE","value":"$(params.GREETINGS)"},{"name":"BUILD_ID","value":"$(params.BUILD_ID)"},{"name":"JOB_NAME","value":"$(params.JOB_NAME)"},{"name":"JOB_SPEC","value":"$(params.JOB_SPEC)"},{"name":"JOB_TYPE","value":"$(params.JOB_TYPE)"},{"name":"PULL_BASE_REF","value":"$(params.PULL_BASE_REF)"},{"name":"PULL_BASE_SHA","value":"$(params.PULL_BASE_SHA)"},{"name":"PULL_NUMBER","value":"$(params.PULL_NUMBER)"},{"name":"PULL_PULL_REF","value":"$(params.PULL_PULL_REF)"},{"name":"PULL_PULL_SHA","value":"$(params.PULL_PULL_SHA)"},{"name":"PULL_REFS","value":"$(params.PULL_REFS)"},{"name":"REPO_NAME","value":"$(params.REPO_NAME)"},{"name":"REPO_OWNER","value":"$(params.REPO_OWNER)"},{"name":"REPO_URL","value":"$(params.REPO_URL)"}]}],"params":[{"name":"GREETINGS","type":"string","description":"morning greetings, default is Good Morning!","default":"Good Morning!"},{"name":"BUILD_ID","type":"string","description":"the unique build number"},{"name":"JOB_NAME","type":"string","description":"the name of the job which is the trigger context name"},{"name":"JOB_SPEC","type":"string","description":"the specification of the job"},{"name":"JOB_TYPE","type":"string","description":"'the kind of job: postsubmit or presubmit'"},{"name":"PULL_BASE_REF","type":"string","description":"the base git reference of the pull request"},{"name":"PULL_BASE_SHA","type":"string","description":"the git sha of the base of the pull request"},{"name":"PULL_NUMBER","type":"string","description":"git pull request number","default":""},{"name":"PULL_PULL_REF","type":"string","description":"git pull request ref in the form 'refs/pull/$PULL_NUMBER/head'","default":""},{"name":"PULL_PULL_SHA","type":"string","description":"git revision to checkout (branch, tag, sha, ref…)","default":""},{"name":"PULL_REFS","type":"string","description":"git pull reference strings of base and latest in the form 'master:$PULL_BASE_SHA,$PULL_NUMBER:$PULL_PULL_SHA:refs/pull/$PULL_NUMBER/head'"},{"name":"REPO_NAME","type":"string","description":"git repository name"},{"name":"REPO_OWNER","type":"string","description":"git repository owner (user or organisation)"},{"name":"REPO_URL","type":"string","description":"git url to clone"}]}},"pipeline_run_params":[{"name":"GREETINGS"}]},"status":{"startTime":null}}`
diff --git a/pkg/plugins/trigger/test_data/testorg/myapp/.lighthouse/jenkins-x/dailyjob.yaml b/pkg/plugins/trigger/test_data/testorg/myapp/.lighthouse/jenkins-x/dailyjob.yaml
new file mode 100644
index 000000000..2c3ad3550
--- /dev/null
+++ b/pkg/plugins/trigger/test_data/testorg/myapp/.lighthouse/jenkins-x/dailyjob.yaml
@@ -0,0 +1,18 @@
+apiVersion: tekton.dev/v1beta1
+kind: PipelineRun
+metadata:
+ name: dailyjob
+spec:
+ pipelineSpec:
+ params:
+ - name: GREETINGS
+ description: "morning greetings, default is Good Morning!"
+ type: string
+ default: "Good Morning!"
+ tasks:
+ - name: echo-greeting
+ taskRef:
+ name: task-echo-message
+ params:
+ - name: MESSAGE
+ value: $(params.GREETINGS)
diff --git a/pkg/plugins/trigger/test_data/testorg/myapp/.lighthouse/jenkins-x/triggers.yaml b/pkg/plugins/trigger/test_data/testorg/myapp/.lighthouse/jenkins-x/triggers.yaml
new file mode 100644
index 000000000..83a239ae2
--- /dev/null
+++ b/pkg/plugins/trigger/test_data/testorg/myapp/.lighthouse/jenkins-x/triggers.yaml
@@ -0,0 +1,10 @@
+apiVersion: config.lighthouse.jenkins-x.io/v1alpha1
+kind: TriggerConfig
+spec:
+ periodics:
+ - name: dailyjob
+ cron: "0 4 * * MON-FRI"
+ source: dailyjob.yaml
+ pipeline_run_params:
+ - name: GREETINGS
+ valueTemplate: 'Howdy!'
diff --git a/pkg/plugins/trigger/trigger.go b/pkg/plugins/trigger/trigger.go
index ef749954a..330d06305 100644
--- a/pkg/plugins/trigger/trigger.go
+++ b/pkg/plugins/trigger/trigger.go
@@ -45,9 +45,10 @@ var plugin = plugins.Plugin{
Description: `The trigger plugin starts tests in reaction to commands and pull request events. It is responsible for ensuring that test jobs are only run on trusted PRs. A PR is considered trusted if the author is a member of the 'trusted organization' for the repository or if such a member has left an '/ok-to-test' command on the PR.
Trigger starts jobs automatically when a new trusted PR is created or when an untrusted PR becomes trusted, but it can also be used to start jobs manually via the '/test' command.
The '/retest' command can be used to rerun jobs that have reported failure.`,
- ConfigHelpProvider: configHelp,
- PullRequestHandler: handlePullRequest,
- PushEventHandler: handlePush,
+ ConfigHelpProvider: configHelp,
+ PullRequestHandler: handlePullRequest,
+ PushEventHandler: handlePush,
+ DeploymentStatusHandler: handleDeploymentStatus,
Commands: []plugins.Command{{
Name: "ok-to-test",
Description: "Marks a PR as 'trusted' and starts tests.",
@@ -75,6 +76,10 @@ var plugin = plugins.Plugin{
}},
}
+func handleDeploymentStatus(agent plugins.Agent, ds scm.DeploymentStatusHook) error {
+ return handleDeployment(getClient(agent), ds)
+}
+
func init() {
customTriggerCommand := os.Getenv(customerTriggerCommandEnvVar)
if customTriggerCommand != "" {
diff --git a/pkg/triggerconfig/inrepo/load_triggers.go b/pkg/triggerconfig/inrepo/load_triggers.go
index 58f022cad..0af32fea7 100644
--- a/pkg/triggerconfig/inrepo/load_triggers.go
+++ b/pkg/triggerconfig/inrepo/load_triggers.go
@@ -95,6 +95,8 @@ func mergeConfigs(m map[string]*triggerconfig.Config) (*triggerconfig.Config, er
// lets check for duplicates
presubmitNames := map[string]string{}
postsubmitNames := map[string]string{}
+ periodicNames := map[string]string{}
+ deploymentNames := map[string]string{}
for file, cfg := range m {
for _, ps := range cfg.Spec.Presubmits {
name := ps.Name
@@ -114,6 +116,24 @@ func mergeConfigs(m map[string]*triggerconfig.Config) (*triggerconfig.Config, er
return nil, errors.Errorf("duplicate postsubmit %s in file %s and %s", name, otherFile, file)
}
}
+ for _, ps := range cfg.Spec.Periodics {
+ name := ps.Name
+ otherFile := periodicNames[name]
+ if otherFile == "" {
+ periodicNames[name] = file
+ } else {
+ return nil, errors.Errorf("duplicate periodic %s in file %s and %s", name, otherFile, file)
+ }
+ }
+ for _, ps := range cfg.Spec.Deployments {
+ name := ps.Name
+ otherFile := deploymentNames[name]
+ if otherFile == "" {
+ deploymentNames[name] = file
+ } else {
+ return nil, errors.Errorf("duplicate deployment %s in file %s and %s", name, otherFile, file)
+ }
+ }
answer = merge.CombineConfigs(answer, cfg)
}
if answer == nil {
@@ -193,6 +213,57 @@ func loadConfigFile(filePath string, fileBrowsers *filebrowser.FileBrowsers, fc
})
}
}
+ for i := range repoConfig.Spec.Deployments {
+ r := &repoConfig.Spec.Deployments[i]
+ sourcePath := r.SourcePath
+ if sourcePath != "" {
+ if r.Agent == "" {
+ r.Agent = job.TektonPipelineAgent
+ }
+ // lets load the local file data now as we have locked the git file system
+ data, err := loadLocalFile(dir, sourcePath, sha)
+ if err != nil {
+ return nil, err
+ }
+ r.SetPipelineLoader(func(base *job.Base) error {
+ err = loadJobBaseFromSourcePath(data, fileBrowsers, fc, cache, base, ownerName, repoName, sourcePath, sha)
+ if err != nil {
+ return errors.Wrapf(err, "failed to load source for deployment %s", r.Name)
+ }
+ r.Base = *base
+ if r.Agent == "" && r.PipelineRunSpec != nil {
+ r.Agent = job.TektonPipelineAgent
+ }
+ return nil
+ })
+ }
+ }
+ for i := range repoConfig.Spec.Periodics {
+ r := &repoConfig.Spec.Periodics[i]
+ sourcePath := r.SourcePath
+ if sourcePath != "" {
+ if r.Agent == "" {
+ r.Agent = job.TektonPipelineAgent
+ }
+ // lets load the local file data now as we have locked the git file system
+ data, err := loadLocalFile(dir, sourcePath, sha)
+ if err != nil {
+ return nil, err
+ }
+ r.SetPipelineLoader(func(base *job.Base) error {
+ err = loadJobBaseFromSourcePath(data, fileBrowsers, fc, cache, base, ownerName, repoName, sourcePath, sha)
+ if err != nil {
+ return errors.Wrapf(err, "failed to load source for periodic %s", r.Name)
+ }
+ r.Base = *base
+ if r.Agent == "" && r.PipelineRunSpec != nil {
+ r.Agent = job.TektonPipelineAgent
+ }
+ return nil
+ })
+ }
+ }
+
return repoConfig, nil
}
diff --git a/pkg/triggerconfig/merge/combine.go b/pkg/triggerconfig/merge/combine.go
index 0fbc3f32b..4b054ed54 100644
--- a/pkg/triggerconfig/merge/combine.go
+++ b/pkg/triggerconfig/merge/combine.go
@@ -18,5 +18,11 @@ func CombineConfigs(a, b *triggerconfig.Config) *triggerconfig.Config {
for _, r := range b.Spec.Postsubmits {
a.Spec.Postsubmits = append(a.Spec.Postsubmits, r)
}
+ for _, r := range b.Spec.Periodics {
+ a.Spec.Periodics = append(a.Spec.Periodics, r)
+ }
+ for _, r := range b.Spec.Deployments {
+ a.Spec.Deployments = append(a.Spec.Deployments, r)
+ }
return a
}
diff --git a/pkg/triggerconfig/merge/merge.go b/pkg/triggerconfig/merge/merge.go
index d8b226dec..57739ed6c 100644
--- a/pkg/triggerconfig/merge/merge.go
+++ b/pkg/triggerconfig/merge/merge.go
@@ -64,6 +64,35 @@ func ConfigMerge(cfg *config.Config, pluginsCfg *plugins.Configuration, repoConf
}
cfg.Postsubmits[repoKey] = ps
}
+ if len(repoConfig.Spec.Periodics) > 0 {
+ cfg.Periodics = append(cfg.Periodics, repoConfig.Spec.Periodics...)
+ }
+ if len(repoConfig.Spec.Deployments) > 0 {
+ // lets make a new map to avoid concurrent modifications
+ m := map[string][]job.Deployment{}
+ if cfg.Deployments != nil {
+ for k, v := range cfg.Deployments {
+ m[k] = append([]job.Deployment{}, v...)
+ }
+ }
+ cfg.Deployments = m
+
+ ps := cfg.Deployments[repoKey]
+ for _, p := range repoConfig.Spec.Deployments {
+ found := false
+ for i := range ps {
+ pt2 := &ps[i]
+ if pt2.Name == p.Name {
+ ps[i] = p
+ found = true
+ }
+ }
+ if !found {
+ ps = append(ps, p)
+ }
+ }
+ cfg.Deployments[repoKey] = ps
+ }
// lets make sure we've got a trigger added
idx := len(pluginsCfg.Triggers) - 1
diff --git a/pkg/triggerconfig/types.go b/pkg/triggerconfig/types.go
index 279303b6e..96a1e14b2 100644
--- a/pkg/triggerconfig/types.go
+++ b/pkg/triggerconfig/types.go
@@ -23,6 +23,10 @@ type ConfigSpec struct {
// Postsubmit zero or more postsubmits
Postsubmits []job.Postsubmit `json:"postsubmits,omitempty"`
+
+ Periodics []job.Periodic `json:"periodics,omitempty"`
+
+ Deployments []job.Deployment `json:"deployments,omitempty"`
}
// ConfigList contains a list of Config
diff --git a/pkg/webhook/events.go b/pkg/webhook/events.go
index bf61ca938..b290c5c80 100644
--- a/pkg/webhook/events.go
+++ b/pkg/webhook/events.go
@@ -22,15 +22,19 @@ import (
"net/url"
"regexp"
"strconv"
+ "strings"
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/jenkins-x/go-scm/scm"
"github.com/jenkins-x/lighthouse/pkg/config"
"github.com/jenkins-x/lighthouse/pkg/filebrowser"
+ gitv2 "github.com/jenkins-x/lighthouse/pkg/git/v2"
"github.com/jenkins-x/lighthouse/pkg/plugins"
+ "github.com/jenkins-x/lighthouse/pkg/plugins/trigger"
"github.com/jenkins-x/lighthouse/pkg/scmprovider"
"github.com/jenkins-x/lighthouse/pkg/util"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -39,6 +43,7 @@ type Server struct {
ClientAgent *plugins.ClientAgent
Plugins *plugins.ConfigAgent
ConfigAgent *config.Agent
+ PeriodicAgent *trigger.PeriodicAgent
ServerURL *url.URL
TokenGenerator func() []byte
Metrics *Metrics
@@ -214,6 +219,11 @@ func (s *Server) handlePushEvent(l *logrus.Entry, pe *scm.PushHook) {
}(p, h.PushEventHandler)
}
}
+ // Update periodics from the default branch
+ refBranch := strings.TrimPrefix(pe.Ref, "refs/heads/")
+ if refBranch == pe.Repository().Branch {
+ s.PeriodicAgent.UpdatePeriodics(s.ClientAgent.KubernetesClient, agent, pe)
+ }
l.WithField("count", strconv.Itoa(c)).Info("number of push handlers")
}()
}
@@ -358,6 +368,36 @@ func (s *Server) handleReviewEvent(l *logrus.Entry, re scm.ReviewHook) {
}()
}
+func (s *Server) handleDeploymentStatusEvent(l *logrus.Entry, ds scm.DeploymentStatusHook) {
+ l = l.WithFields(logrus.Fields{
+ scmprovider.OrgLogField: ds.Repo.Namespace,
+ scmprovider.RepoLogField: ds.Repo.Name,
+ })
+ l.Infof("Deployment %s.", ds.Action)
+
+ // lets invoke the agent creation async as this can take a little while
+ go func() {
+ repo := ds.Repo
+ agent, err := s.CreateAgent(l, repo.Namespace, repo.Name, ds.Deployment.Sha)
+ if err != nil {
+ agent.Logger.WithError(err).Error("Error creating agent for DeploymentStatusEvent.")
+ return
+ }
+ for p, h := range s.getPlugins(ds.Repo.Namespace, ds.Repo.Name) {
+ if h.DeploymentStatusHandler != nil {
+ s.wg.Add(1)
+ go func(p string, h plugins.DeploymentStatusHandler) {
+ defer s.wg.Done()
+ if err := h(agent, ds); err != nil {
+ agent.Logger.WithError(err).Error("Error handling DeploymentStatusEvent.")
+ }
+ }(p, h.DeploymentStatusHandler)
+ }
+ }
+ }()
+
+}
+
func (s *Server) reportErrorToPullRequest(l *logrus.Entry, agent plugins.Agent, repo scm.Repository, pr *scm.PullRequestHook, err error) {
fileLink := repo.Link + "/blob/" + pr.PullRequest.Sha + "/"
message := "failed to trigger Pull Request pipeline\n" + util.ErrorToMarkdown(err, fileLink)
@@ -393,3 +433,35 @@ func actionRelatesToPullRequestComment(action scm.Action, l *logrus.Entry) bool
return false
}
}
+
+func (s *Server) initializeFileBrowser(token string, gitCloneUser, gitServerURL string) error {
+ configureOpts := func(opts *gitv2.ClientFactoryOpts) {
+ opts.Token = func() []byte {
+ return []byte(token)
+ }
+ opts.GitUser = func() (name, email string, err error) {
+ name = gitCloneUser
+ return
+ }
+ opts.Username = func() (login string, err error) {
+ login = gitCloneUser
+ return
+ }
+ if s.ServerURL.Host != "" {
+ opts.Host = s.ServerURL.Host
+ }
+ if s.ServerURL.Scheme != "" {
+ opts.Scheme = s.ServerURL.Scheme
+ }
+ }
+ gitFactory, err := gitv2.NewNoMirrorClientFactory(configureOpts)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create git client factory for server %s", gitServerURL)
+ }
+ fb := filebrowser.NewFileBrowserFromGitClient(gitFactory)
+ s.FileBrowsers, err = filebrowser.NewFileBrowsers(gitServerURL, fb)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create git filebrowser %s", gitServerURL)
+ }
+ return nil
+}
diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go
index 70c11c23a..b5741c38c 100644
--- a/pkg/webhook/webhook.go
+++ b/pkg/webhook/webhook.go
@@ -8,28 +8,27 @@ import (
"net/http"
"net/url"
"os"
+ "strconv"
"strings"
- "github.com/jenkins-x/go-scm/pkg/hmac"
-
- "github.com/jenkins-x/lighthouse/pkg/externalplugincfg"
-
lru "github.com/hashicorp/golang-lru"
+ "github.com/jenkins-x/go-scm/pkg/hmac"
"github.com/jenkins-x/go-scm/scm"
"github.com/jenkins-x/lighthouse/pkg/clients"
"github.com/jenkins-x/lighthouse/pkg/config"
- "github.com/jenkins-x/lighthouse/pkg/filebrowser"
+ "github.com/jenkins-x/lighthouse/pkg/externalplugincfg"
"github.com/jenkins-x/lighthouse/pkg/git"
- gitv2 "github.com/jenkins-x/lighthouse/pkg/git/v2"
"github.com/jenkins-x/lighthouse/pkg/launcher"
"github.com/jenkins-x/lighthouse/pkg/metrics"
"github.com/jenkins-x/lighthouse/pkg/plugins"
+ "github.com/jenkins-x/lighthouse/pkg/plugins/trigger"
"github.com/jenkins-x/lighthouse/pkg/util"
"github.com/jenkins-x/lighthouse/pkg/version"
"github.com/jenkins-x/lighthouse/pkg/watcher"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
+ kubeclient "k8s.io/client-go/kubernetes"
)
// WebhooksController holds the command line arguments
@@ -62,8 +61,11 @@ func NewWebhooksController(path, namespace, botName, pluginFilename, configFilen
if o.logWebHooks {
logrus.Info("enabling webhook logging")
}
- var err error
- o.server, err = o.createHookServer()
+ _, kubeClient, lhClient, _, err := clients.GetAPIClients()
+ if err != nil {
+ return nil, errors.Wrap(err, "Error creating kubernetes resource clients.")
+ }
+ o.server, err = o.createHookServer(kubeClient)
if err != nil {
return nil, errors.Wrapf(err, "failed to create Hook Server")
}
@@ -75,10 +77,6 @@ func NewWebhooksController(path, namespace, botName, pluginFilename, configFilen
}
o.gitClient = gitClient
- _, _, lhClient, _, err := clients.GetAPIClients()
- if err != nil {
- return nil, errors.Wrap(err, "Error creating kubernetes resource clients.")
- }
o.launcher = launcher.NewLauncher(lhClient, o.namespace)
return o, nil
@@ -220,29 +218,16 @@ func (o *WebhooksController) handleWebhookOrPollRequest(w http.ResponseWriter, r
ghaSecretDir := util.GetGitHubAppSecretDir()
- var gitCloneUser string
- var token string
- if ghaSecretDir != "" {
- gitCloneUser = util.GitHubAppGitRemoteUsername
- tokenFinder := util.NewOwnerTokensDir(serverURL, ghaSecretDir)
- token, err = tokenFinder.FindToken(webhook.Repository().Namespace)
- if err != nil {
- logrus.Errorf("failed to read owner token: %s", err.Error())
- responseHTTPError(w, http.StatusInternalServerError, fmt.Sprintf("500 Internal Server Error: failed to read owner token: %s", err.Error()))
- return
- }
- } else {
- gitCloneUser = util.GetBotName(cfg)
- token, err = util.GetSCMToken(util.GitKind(cfg))
- if err != nil {
- logrus.Errorf("no scm token specified: %s", err.Error())
- responseHTTPError(w, http.StatusInternalServerError, fmt.Sprintf("500 Internal Server Error: no scm token specified: %s", err.Error()))
- return
- }
+ gitCloneUser, token, err := getCredentials(ghaSecretDir, serverURL, webhook.Repository().Namespace, cfg)
+ if err != nil {
+ logrus.Error(err.Error())
+ responseHTTPError(w, http.StatusInternalServerError, fmt.Sprintf("500 Internal Server Error: %s", err.Error()))
+ return
}
_, kubeClient, lhClient, _, err := clients.GetAPIClients()
if err != nil {
responseHTTPError(w, http.StatusInternalServerError, fmt.Sprintf("500 Internal Server Error: %s", err.Error()))
+ return
}
o.gitClient.SetCredentials(gitCloneUser, func() []byte {
@@ -260,35 +245,8 @@ func (o *WebhooksController) handleWebhookOrPollRequest(w http.ResponseWriter, r
}
if o.server.FileBrowsers == nil {
- configureOpts := func(opts *gitv2.ClientFactoryOpts) {
- opts.Token = func() []byte {
- return []byte(token)
- }
- opts.GitUser = func() (name, email string, err error) {
- name = gitCloneUser
- return
- }
- opts.Username = func() (login string, err error) {
- login = gitCloneUser
- return
- }
- if o.server.ServerURL.Host != "" {
- opts.Host = o.server.ServerURL.Host
- }
- if o.server.ServerURL.Scheme != "" {
- opts.Scheme = o.server.ServerURL.Scheme
- }
- }
- gitFactory, err := gitv2.NewNoMirrorClientFactory(configureOpts)
+ err := o.server.initializeFileBrowser(token, gitCloneUser, o.gitServerURL)
if err != nil {
- err = errors.Wrapf(err, "failed to create git client factory for server %s", o.gitServerURL)
- responseHTTPError(w, http.StatusInternalServerError, fmt.Sprintf("500 Internal Server Error: %s", err.Error()))
- return
- }
- fb := filebrowser.NewFileBrowserFromGitClient(gitFactory)
- o.server.FileBrowsers, err = filebrowser.NewFileBrowsers(o.gitServerURL, fb)
- if err != nil {
- err = errors.Wrapf(err, "failed to create git filebrowsers%s", o.gitServerURL)
responseHTTPError(w, http.StatusInternalServerError, fmt.Sprintf("500 Internal Server Error: %s", err.Error()))
return
}
@@ -318,6 +276,24 @@ func (o *WebhooksController) handleWebhookOrPollRequest(w http.ResponseWriter, r
}
}
+func getCredentials(ghaSecretDir string, serverURL string, owner string, cfg func() *config.Config) (gitCloneUser string, token string, err error) {
+ if ghaSecretDir != "" {
+ gitCloneUser = util.GitHubAppGitRemoteUsername
+ tokenFinder := util.NewOwnerTokensDir(serverURL, ghaSecretDir)
+ token, err = tokenFinder.FindToken(owner)
+ if err != nil {
+ err = errors.Wrap(err, "failed to read owner token")
+ }
+ } else {
+ gitCloneUser = util.GetBotName(cfg)
+ token, err = util.GetSCMToken(util.GitKind(cfg))
+ if err != nil {
+ err = errors.Wrap(err, "no scm token specified")
+ }
+ }
+ return
+}
+
// ProcessWebHook process a webhook
func (o *WebhooksController) ProcessWebHook(l *logrus.Entry, webhook scm.Webhook) (*logrus.Entry, string, error) {
repository := webhook.Repository()
@@ -475,6 +451,21 @@ func (o *WebhooksController) ProcessWebHook(l *logrus.Entry, webhook scm.Webhook
o.server.handleReviewEvent(l, *prReviewHook)
return l, "processed PR review hook", nil
}
+ deploymentStatusHook, ok := webhook.(*scm.DeploymentStatusHook)
+ if ok {
+ action := deploymentStatusHook.Action
+ fields["Action"] = action.String()
+ status := deploymentStatusHook.DeploymentStatus
+ fields["Status.State"] = status.State
+ fields["Status.Author"] = status.Author
+ fields["Status.LogLink"] = status.LogLink
+ l = l.WithFields(fields)
+
+ l.Info("invoking PR Review handler")
+
+ o.server.handleDeploymentStatusEvent(l, *deploymentStatusHook)
+ return l, "processed PR review hook", nil
+ }
l.Debugf("unknown kind %s webhook %#v", webhook.Kind(), webhook)
return l, fmt.Sprintf("unknown hook %s", webhook.Kind()), nil
}
@@ -483,7 +474,7 @@ func (o *WebhooksController) secretFn(webhook scm.Webhook) (string, error) {
return util.HMACToken(), nil
}
-func (o *WebhooksController) createHookServer() (*Server, error) {
+func (o *WebhooksController) createHookServer(kc kubeclient.Interface) (*Server, error) {
configAgent := &config.Agent{}
pluginAgent := &plugins.ConfigAgent{}
@@ -521,13 +512,35 @@ func (o *WebhooksController) createHookServer() (*Server, error) {
}
server := &Server{
- ConfigAgent: configAgent,
- Plugins: pluginAgent,
- Metrics: promMetrics,
- ServerURL: serverURL,
- InRepoCache: cache,
+ ConfigAgent: configAgent,
+ Plugins: pluginAgent,
+ Metrics: promMetrics,
+ ServerURL: serverURL,
+ InRepoCache: cache,
+ PeriodicAgent: &trigger.PeriodicAgent{Namespace: o.namespace},
//TokenGenerator: secretAgent.GetTokenGenerator(o.webhookSecretFile),
}
+
+ initializePeriodics, _ := strconv.ParseBool(os.Getenv("INITIALIZE_PERIODICS"))
+ if initializePeriodics && !server.PeriodicAgent.PeriodicsInitialized(o.namespace, kc) {
+ if server.FileBrowsers == nil {
+ ghaSecretDir := util.GetGitHubAppSecretDir()
+
+ gitCloneUser, token, err := getCredentials(ghaSecretDir, o.gitServerURL, "", configAgent.Config)
+ if err != nil {
+ logrus.Error(err.Error())
+ } else {
+ err = server.initializeFileBrowser(token, gitCloneUser, o.gitServerURL)
+ if err != nil {
+ logrus.Error(err.Error())
+ }
+ }
+ }
+ if server.FileBrowsers != nil {
+ go server.PeriodicAgent.InitializePeriodics(kc, configAgent, server.FileBrowsers)
+ }
+ }
+
return server, nil
}