diff --git a/.github/workflows/helm_release.yaml b/.github/workflows/helm_release.yaml new file mode 100644 index 0000000..1858f14 --- /dev/null +++ b/.github/workflows/helm_release.yaml @@ -0,0 +1,27 @@ +name: Helm Chart publish + +on: + push: + paths: + - 'charts/**' + branches: + - main +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@intelops.dev" + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.5.0 + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + CR_SKIP_EXISTING: true diff --git a/.github/workflows/proact-scheduler-container-pr.yaml b/.github/workflows/proact-scheduler-container-pr.yaml new file mode 100644 index 0000000..dc24ee1 --- /dev/null +++ b/.github/workflows/proact-scheduler-container-pr.yaml @@ -0,0 +1,49 @@ +name: Docker Image build for proact-scheduler-container-pr.yaml + +on: + pull_request: + branches: + - 'main' + +jobs: + build: + runs-on: ubuntu-latest + env: + REGISTRY: ghcr.io + GH_URL: https://github.com + registry_username: ${{ github.actor }} + registry_password: ${{ secrets.GITHUB_TOKEN }} + steps: + - + name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - + name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - uses: docker/setup-buildx-action@v1 + name: Set up Docker Buildx + + - + name: Login to ghcr registry + uses: docker/login-action@v2 + with: + registry: ${{ env.REGISTRY }} + username: ${{ env.registry_username }} + password: ${{ env.registry_password }} + + - + name: Build and push on PR + uses: docker/build-push-action@v4 + if: github.event_name == 'pull_request' + with: + context: . + file: ./Dockerfile + push: true + tags: ${{ env.REGISTRY }}/${{ github.repository }}:pr-${{ github.event.pull_request.number }} + build-args: | + "GITHUB_TOKEN=${{ env.registry_password }}" + diff --git a/.github/workflows/proact-scheduler-container-release.yaml b/.github/workflows/proact-scheduler-container-release.yaml new file mode 100644 index 0000000..1133e12 --- /dev/null +++ b/.github/workflows/proact-scheduler-container-release.yaml @@ -0,0 +1,69 @@ +name: proact-scheduler-release +on: + push: + tags: + - "v*.*.*" + +jobs: + push_to_registry: + name: Build and push Docker image github container registry. + runs-on: ubuntu-latest + permissions: + packages: write + id-token: write + contents: read + actions: read + security-events: write + env: + REGISTRY: ghcr.io + GH_URL: https://github.com + registry_username: ${{ github.actor }} + registry_password: ${{ secrets.GITHUB_TOKEN }} + + steps: + - name: Set environment variable + run: | + echo "RELEASE_VERSION=${GITHUB_REF:10}" >> $GITHUB_ENV + + - name: Test environment variable + run: echo ${{ env.RELEASE_VERSION }} + + - name: Check out GitHub repo + uses: actions/checkout@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ${{ env.REGISTRY }} + username: ${{ env.registry_username }} + password: ${{ env.registry_password }} + + - name: Build image and push to GitHub Container Registry + uses: docker/build-push-action@v4 + with: + push: true + context: . + file: ./Dockerfile + tags: ${{ env.REGISTRY }}/${{ github.repository }}:${{ env.RELEASE_VERSION }} + + - name: Install cosign + uses: sigstore/cosign-installer@main + - name: Sign the images + run: | + cosign sign -y ${{ env.REGISTRY }}/${{ github.repository }}:${{ env.RELEASE_VERSION }} + env: + COSIGN_EXPERIMENTAL: 1 + + - name: Verify the pushed tags + run: cosign verify ${{ env.REGISTRY }}/${{ github.repository }}:${{ env.RELEASE_VERSION }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/proact-scheduler-container-release.yaml@refs/tags/${{ env.RELEASE_VERSION }} --certificate-oidc-issuer https://token.actions.githubusercontent.com + env: + COSIGN_EXPERIMENTAL: 1 + + - name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph + uses: aquasecurity/trivy-action@master + with: + scan-type: 'fs' + format: 'github' + output: 'dependency-results.sbom.json' + image-ref: '.' + github-pat: ${{ env.registry_password }} diff --git a/.github/workflows/proact-scheduler-container.yaml b/.github/workflows/proact-scheduler-container.yaml new file mode 100644 index 0000000..7e28819 --- /dev/null +++ b/.github/workflows/proact-scheduler-container.yaml @@ -0,0 +1,86 @@ +name: Docker Image build for proact-scheduler +on: + push: + paths-ignore: + - '**.md' + - 'charts/**' + branches: + - 'main' + + +jobs: + build: + runs-on: ubuntu-latest + permissions: + packages: write + id-token: write + contents: read + actions: read + security-events: write + env: + REGISTRY: ghcr.io + GH_URL: https://github.com + registry_username: ${{ github.actor }} + registry_password: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Checkout GitHub Action + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v2 + + - name: Docker metadata + id: metadata + uses: docker/metadata-action@v4 + with: + images: ${{ env.REGISTRY }}/${{ github.repository }} + tags: | + type=raw,value=latest + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }} + flavor: | + latest=true + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ${{ env.REGISTRY }} + username: ${{ env.registry_username }} + password: ${{ env.registry_password }} + + - name: Build image and push to GitHub Container Registry + uses: docker/build-push-action@v4 + with: + context: . + file: ./Dockerfile + tags: | + ${{ env.REGISTRY }}/${{ github.repository }}:${{ github.run_id }}, + ${{ env.REGISTRY }}/${{ github.repository }}:latest + labels: ${{ steps.metadata.outputs.labels }} + + push: true + + - name: Install cosign + uses: sigstore/cosign-installer@main + + - name: Sign the images + run: | + cosign sign -y ${{ env.REGISTRY }}/${{ github.repository }}:${{ github.run_id }} + env: + COSIGN_EXPERIMENTAL: 1 + + - name: Verify the pushed tags + run: cosign verify ${{ env.REGISTRY }}/${{ github.repository }}:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/proact-scheduler-container.yaml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com + env: + COSIGN_EXPERIMENTAL: 1 + + - name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph + uses: aquasecurity/trivy-action@master + with: + scan-type: 'fs' + format: 'github' + output: 'dependency-results.sbom.json' + image-ref: '.' + github-pat: ${{ env.registry_password }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ab5a061 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.env +.venv +__pycache__ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..724a7cd --- /dev/null +++ b/Dockerfile @@ -0,0 +1,28 @@ +#Docker file to build a docker image for the application + +FROM python:3.10-slim@sha256:2bac43769ace90ebd3ad83e5392295e25dfc58e58543d3ab326c3330b505283d + +# Set the working directory in the container +WORKDIR /app + +COPY ./requirements.txt /app/requirements.txt + +# Install any needed packages specified in requirements.txt +RUN pip install -r requirements.txt + + +RUN apt-get -y update; apt-get -y install curl +RUN curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin + +# Install scsctl from test pypi +RUN pip install --no-cache-dir --index-url https://test.pypi.org/simple/ scsctl==0.0.6.2 + +# Copy the current directory contents into the container at /app +COPY . /app + + + +EXPOSE 5000 + +# Run app.py when the container launches +CMD ["python", "src/proact_server/app.py"] \ No newline at end of file diff --git a/charts/proact-scheduler/.helmignore b/charts/proact-scheduler/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/charts/proact-scheduler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/proact-scheduler/Chart.yaml b/charts/proact-scheduler/Chart.yaml new file mode 100644 index 0000000..e504f81 --- /dev/null +++ b/charts/proact-scheduler/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: proact-scheduler +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/charts/proact-scheduler/templates/NOTES.txt b/charts/proact-scheduler/templates/NOTES.txt new file mode 100644 index 0000000..d4a070e --- /dev/null +++ b/charts/proact-scheduler/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "proact-scheduler.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "proact-scheduler.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "proact-scheduler.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "proact-scheduler.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/charts/proact-scheduler/templates/_helpers.tpl b/charts/proact-scheduler/templates/_helpers.tpl new file mode 100644 index 0000000..6957936 --- /dev/null +++ b/charts/proact-scheduler/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "proact-scheduler.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "proact-scheduler.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "proact-scheduler.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "proact-scheduler.labels" -}} +helm.sh/chart: {{ include "proact-scheduler.chart" . }} +{{ include "proact-scheduler.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "proact-scheduler.selectorLabels" -}} +app.kubernetes.io/name: {{ include "proact-scheduler.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "proact-scheduler.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "proact-scheduler.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/proact-scheduler/templates/deployment.yaml b/charts/proact-scheduler/templates/deployment.yaml new file mode 100644 index 0000000..37579e3 --- /dev/null +++ b/charts/proact-scheduler/templates/deployment.yaml @@ -0,0 +1,107 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "proact-scheduler.fullname" . }} + labels: + {{- include "proact-scheduler.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "proact-scheduler.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "proact-scheduler.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "proact-scheduler.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + initContainers: + - name: create-postgres-database + image: "{{ .Values.postgresql.image }}" + imagePullPolicy: Always + command: + - "/bin/sh" + - "-c" + - > + echo "Creating database {{ .Values.postgresql.database }} if it does not exist..." && + psql -h {{ .Values.postgresql.host }} -p {{ .Values.postgresql.port }} -U {{ .Values.postgresql.username }} -lqt | cut -d \| -f 1 | grep -qw {{ .Values.postgresql.database }} || psql -h {{ .Values.postgresql.host }} -p {{ .Values.postgresql.port }} -U {{ .Values.postgresql.username }} -c "CREATE DATABASE {{ .Values.postgresql.database }};" && + echo "Database {{ .Values.postgresql.database }} created or already exists. Listing all databases:" && + psql -h {{ .Values.postgresql.host }} -p {{ .Values.postgresql.port }} -U {{ .Values.postgresql.username }} -l && + sleep 5 + env: + - name: PGPASSWORD + {{- if not .Values.postgresql.existingSecret }} + value: "{{ .Values.postgresql.password }}" + {{- else }} + valueFrom: + secretKeyRef: + name: {{ .Values.postgresql.existingSecret.name }} + key: {{ .Values.postgresql.existingSecret.passwordKey }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + env: + - name: PROACT_PG_HOST + value: "{{ .Values.postgresql.host }}" + - name: PROACT_PG_PORT + value: "{{ .Values.postgresql.port }}" + - name: PROACT_PG_USER + value: "{{ .Values.postgresql.username }}" + - name: PROACT_PG_PASSWORD + {{- if not .Values.postgresql.existingSecret }} + value: "{{ .Values.postgresql.password }}" + {{- else }} + valueFrom: + secretKeyRef: + name: {{ .Values.postgresql.existingSecret.name }} + key: {{ .Values.postgresql.existingSecret.passwordKey }} + {{- end }} + - name: PROACT_PG_DATABASE + value: "{{ .Values.postgresql.database }}" + - name: PROACT_ENVIRONMENT + value: "{{ .Values.env.PROACT_ENVIRONMENT }}" + - name: PROACT_TEMPORAL_HOST + value: "{{ .Values.temporal.host }}:{{ .Values.temporal.port }}" + + livenessProbe: + httpGet: + path: /healthz + port: http + readinessProbe: + httpGet: + path: /ready + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/proact-scheduler/templates/hpa.yaml b/charts/proact-scheduler/templates/hpa.yaml new file mode 100644 index 0000000..f109d0a --- /dev/null +++ b/charts/proact-scheduler/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "proact-scheduler.fullname" . }} + labels: + {{- include "proact-scheduler.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "proact-scheduler.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/proact-scheduler/templates/ingress.yaml b/charts/proact-scheduler/templates/ingress.yaml new file mode 100644 index 0000000..977fee1 --- /dev/null +++ b/charts/proact-scheduler/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "proact-scheduler.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "proact-scheduler.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/proact-scheduler/templates/service.yaml b/charts/proact-scheduler/templates/service.yaml new file mode 100644 index 0000000..a5d9f83 --- /dev/null +++ b/charts/proact-scheduler/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "proact-scheduler.fullname" . }} + labels: + {{- include "proact-scheduler.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "proact-scheduler.selectorLabels" . | nindent 4 }} diff --git a/charts/proact-scheduler/templates/serviceaccount.yaml b/charts/proact-scheduler/templates/serviceaccount.yaml new file mode 100644 index 0000000..91620e3 --- /dev/null +++ b/charts/proact-scheduler/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "proact-scheduler.serviceAccountName" . }} + labels: + {{- include "proact-scheduler.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/proact-scheduler/templates/tests/test-connection.yaml b/charts/proact-scheduler/templates/tests/test-connection.yaml new file mode 100644 index 0000000..5e0797c --- /dev/null +++ b/charts/proact-scheduler/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "proact-scheduler.fullname" . }}-test-connection" + labels: + {{- include "proact-scheduler.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "proact-scheduler.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/charts/proact-scheduler/values.yaml b/charts/proact-scheduler/values.yaml new file mode 100644 index 0000000..965a10e --- /dev/null +++ b/charts/proact-scheduler/values.yaml @@ -0,0 +1,102 @@ +# Default values for scsctl. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: ghcr.io/intelops/proact-scheduler + pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 5000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +postgresql: + image: docker.io/bitnami/postgresql:16.0.0-debian-11-r13 + host: "postgresql" + port: "5432" + database: "" + username: "postgres" + password: "" + existingSecret: {} + # name: "" + # passwordKey: "" + +temporal: + host: temporal + port: 7233 + + +# Configure the environment variables for the application +env: + SCSCTL_ENVIRONMENT: "" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..61276d7 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,18 @@ +# Automatically generated by https://github.com/damnever/pigar. +fastapi==0.103.1 +pydantic==2.6.1 +setuptools==59.6.0 +SQLAlchemy==2.0.24 +temporalio==1.5.0 +hvac==1.2.1 +numpy==1.25.0 +wheel==0.40.0 +click==8.1.3 +clickhouse-driver==0.2.6 +requests==2.31.0 +questionary==1.10.0 +tabulate==0.9.0 +uvicorn==0.23.2 +pydgraph==23.0.2 +psycopg2-binary==2.9.9 +kubernetes==27.2.0 diff --git a/src/proact_server/__init__.py b/src/proact_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/proact_server/app.py b/src/proact_server/app.py new file mode 100644 index 0000000..d156a3c --- /dev/null +++ b/src/proact_server/app.py @@ -0,0 +1,67 @@ +import uvicorn +from fastapi import FastAPI +from utils.database import Base,engine +import os +from routes.schedule import schedule +from temporalio import workflow +import asyncio +from utils.constants import TEMPORAL_HOST, SERVER_PORT + +from temporalio.client import Client +from temporalio.worker import Worker + +with workflow.unsafe.imports_passed_through(): + from utils.temporal.activity import proact_scan_activity + from utils.temporal.workflow_dax import ProactWorkflow + + +app = FastAPI( + title="SCSCTL", + description="SCSCTL is a tool to automate security scans for container images", + version="0.0.1" +) + +app.include_router(schedule.router) + +Base.metadata.create_all(bind=engine) + + +# TODO: Need to update this probes later +@app.get("/ready", include_in_schema=False) +async def readinessProbe(): + return {"status": "ok"} + + +@app.get("/healthz", include_in_schema=False) +async def livenessProbe(): + # check if postgres + return {"status": "ok"} + + + +async def main(): + print(f"Connecting to Temporal at {TEMPORAL_HOST}") + client = await Client.connect(TEMPORAL_HOST) + + #Create a worker + worker = Worker( + client, + task_queue="proact-task-queue", + workflows=[ProactWorkflow], + activities=[proact_scan_activity], + ) + await worker.run() + + +if __name__ == "__main__": + # #check environment and run uvicorn accordingly + loop = asyncio.get_event_loop() + asyncio.set_event_loop(loop) + loop.create_task(main()) + if os.getenv("PROACT_ENVIRONMENT","dev") == "prod": + config = uvicorn.Config("app:app", loop=loop,host="0.0.0.0",port=int(SERVER_PORT), log_level="info", workers=2) + else: + config = uvicorn.Config("app:app", loop=loop,host="0.0.0.0",port=int(SERVER_PORT), log_level="debug", reload=True) + server = uvicorn.Server(config) + loop.run_until_complete(server.serve()) + diff --git a/src/proact_server/routes/schedule/schedule.py b/src/proact_server/routes/schedule/schedule.py new file mode 100644 index 0000000..890d523 --- /dev/null +++ b/src/proact_server/routes/schedule/schedule.py @@ -0,0 +1,73 @@ +from fastapi import APIRouter, Depends +from fastapi import Request +from utils.database import get_db +from utils.model import CreateScheduleConfig, CreateDeleteUpdateScheduleResponse, ScheduleResponse, ScheduleDetailsResponseNew +from sqlalchemy.orm import Session +from routes.schedule.service import create_new_schedule, delete_schedule, list_schedules, get_schedule_configs, get_schedule_details, pause_schedule, resume_schedule +from datetime import datetime + +router = APIRouter(prefix="/api/v1/schedule", tags=["scsctl"]) + + +@router.post("/") +async def createSchedule(request: Request, config: CreateScheduleConfig, db: Session = Depends(get_db)) -> CreateDeleteUpdateScheduleResponse: + """ + Create a new schedule + """ + + status, schedule_id = await create_new_schedule(config, db) + return CreateDeleteUpdateScheduleResponse(message=status.value, schedule_id=schedule_id) + +@router.delete("/{scheduleId}") +async def deleteSchedule(request: Request,scheduleId: str, db: Session = Depends(get_db)) -> CreateDeleteUpdateScheduleResponse: + """ + Delete a schedule with the given schedule_id + """ + status,scheduleId = await delete_schedule(scheduleId, db) + return CreateDeleteUpdateScheduleResponse(message=status.value, schedule_id=scheduleId) + +@router.get("/") +async def listSchedules(request: Request, db: Session = Depends(get_db)) -> list[ScheduleResponse]: + """ + List all schedules + """ + schedules = await list_schedules(db) + return schedules + +@router.get("/{scheduleId}", response_model_exclude_none=True) +async def getScheduleConfigs(request: Request, scheduleId: str, db: Session = Depends(get_db)) -> CreateScheduleConfig: + """ + Get schedule details with the given schedule_id + """ + #Get schedule details from Schedules + configs = await get_schedule_configs(scheduleId, db) + return configs + +@router.get("/{scheduleId}/details") +async def getScheduleDetails(request: Request, scheduleId: str, db: Session = Depends(get_db)) -> ScheduleDetailsResponseNew: + """ + Get schedule details with the given schedule_id + """ + + schedule_details_response = await get_schedule_details(scheduleId, db) + return schedule_details_response + + +@router.put("/{scheduleId}/pause") +async def pauseSchedule(request: Request, scheduleId: str, db: Session = Depends(get_db)) -> CreateDeleteUpdateScheduleResponse: + """ + Pause a schedule with the given schedule_id + """ + + status, schedule_id = await pause_schedule(scheduleId, db) + return CreateDeleteUpdateScheduleResponse(message=status.value, schedule_id=schedule_id) + + +@router.put("/{scheduleId}/resume") +async def resumeSchedule(request: Request, scheduleId: str, db: Session = Depends(get_db)) -> CreateDeleteUpdateScheduleResponse: + """ + Resume a schedule with the given schedule_id + """ + + status, schedule_id = await resume_schedule(scheduleId, db) + return CreateDeleteUpdateScheduleResponse(message=status.value, schedule_id=schedule_id) diff --git a/src/proact_server/routes/schedule/service.py b/src/proact_server/routes/schedule/service.py new file mode 100644 index 0000000..abfcef1 --- /dev/null +++ b/src/proact_server/routes/schedule/service.py @@ -0,0 +1,212 @@ + +from uuid import uuid4 +from sqlalchemy.orm import Session +from utils.model import CreateScheduleConfig, Schedules, ScanConfigs, Executions, ExecutionJobs, ScanStatus, ScheduleResponse, ScanConfig +from utils.model import ScheduleEnum, ScheduleDetailsResponse, ExecutionResponseNew, ScheduleDetailsResponseNew +from temporalio.client import Client,Schedule, ScheduleActionStartWorkflow, ScheduleSpec,ScheduleState +from temporalio import workflow +import json + +from utils.constants import TEMPORAL_HOST + +with workflow.unsafe.imports_passed_through(): + from utils.temporal.workflow_dax import ProactWorkflow + + +async def create_new_schedule(config: CreateScheduleConfig, db: Session, schedule_id: str = None) -> tuple[ScheduleEnum, str]: + #TODO: Make the db commits atomic, if any of the db commit fails then rollback all the db commits + try: + schedules = config.model_dump() + scan_configs = schedules.pop("scan_configs") + + #During update schedule_id will be passed to keep the schedule id same even if we are creating a new schedule + if(schedule_id): + schedules["schedule_id"] = schedule_id + + #Create a schedule + new_schedule = Schedules(**schedules) + db.add(new_schedule) + db.commit() + db.refresh(new_schedule) + #Get the schedule id + schedule_id = new_schedule.schedule_id + + #Create executions + new_execution = Executions(schedule_id=schedule_id, scan_images_count=len(scan_configs)) + db.add(new_execution) + db.commit() + db.refresh(new_execution) + execution_id = new_execution.execution_id + + + #Add scan configs + for scan_config in scan_configs: + job_id = str(uuid4()) + scan_config["schedule_id"] = schedule_id + scan_config["job_id"] = job_id + db.add(ScanConfigs(**scan_config)) + # db.commit() + + scan_config_without_none = {k: v for k, v in scan_config.items() if v is not None} + + # Add job to scheduler + kwargs = { + "job_id": job_id, + "is_api": True, + "execution_id": execution_id, + **scan_config_without_none + } + + #Create schedule in temporal + client = await Client.connect(TEMPORAL_HOST) + await client.create_schedule( + job_id, + Schedule( + action=ScheduleActionStartWorkflow( + ProactWorkflow, + kwargs, + id=str(uuid4()), + task_queue="proact-task-queue", + ), + spec=ScheduleSpec( + cron_expressions=[config.cron_schedule], + ), + state=ScheduleState(note="Proact Schedule Created"), + ) + ) + + + #Add job to execution_jobs + db.add(ExecutionJobs(execution_id=execution_id, job_id=job_id)) + db.commit() + return ScheduleEnum.SCHEDULE_CREATED, schedule_id + except Exception as e: + print(e) + return ScheduleEnum.SCHEDULE_CREATION_FAILED, schedule_id + +async def delete_schedule(schedule_id: str, db: Session) -> tuple[ScheduleEnum, str]: + try: + #Get execution_id + #Check if schedule exists if not return error + if(not db.query(Schedules).filter(Schedules.schedule_id == schedule_id).first()): + return ScheduleEnum.SCHEDULE_NOT_FOUND, schedule_id + execution_id = db.query(Executions).filter(Executions.schedule_id == schedule_id).first().execution_id + execution_jobs = db.query(ExecutionJobs).filter(ExecutionJobs.execution_id == execution_id).all() + job_ids = [job.job_id for job in execution_jobs] + + #Delete schedule from temporal + client = await Client.connect(TEMPORAL_HOST) + for job_id in job_ids: + handle = client.get_schedule_handle(str(job_id)) + await handle.delete() + db.query(ExecutionJobs).filter(ExecutionJobs.execution_id == execution_id).delete() + + #Delete scan configs + db.query(ScanConfigs).filter(ScanConfigs.schedule_id == schedule_id).delete() + + + #Delete scan status + db.query(ScanStatus).filter(ScanStatus.execution_id == execution_id).delete() + + #Delete execution + db.query(Executions).filter(Executions.schedule_id == schedule_id).delete() + + #Delete schedule + db.query(Schedules).filter(Schedules.schedule_id == schedule_id).delete() + db.commit() + + return ScheduleEnum.SCHEDULE_DELETED, schedule_id + except Exception as e: + print(e) + return ScheduleEnum.SCHEDULE_DELETE_FAILED, schedule_id + +async def list_schedules(db: Session) -> list[ScheduleResponse]: + #Get schedule name and schedule id from Schedules + schedules = db.query(Schedules.schedule_name, Schedules.schedule_id).all() + if(schedules == None): + return [] + else: + #Convert to list of ScheduleResponse using ** expression + schedules = [ScheduleResponse(**schedule._asdict()) for schedule in schedules] + return schedules + +async def get_schedule_configs(scheduleId: str, db: Session) -> CreateScheduleConfig: + """ + Get schedule details with the given schedule_id + """ + #Get schedule details from Schedules + try: + schedule = db.query(Schedules.schedule_id,Schedules.schedule_name,Schedules.start_date,Schedules.end_date, Schedules.container_registry_id,Schedules.cron_schedule,Schedules.update_time).filter(Schedules.schedule_id == scheduleId).first()._asdict() + + #Get scan configs + scan_configs = db.query(ScanConfigs.docker_image_name,ScanConfigs.pyroscope_url,ScanConfigs.pyroscope_app_name,ScanConfigs.falco_pod_name,ScanConfigs.falco_target_deployment_name, ScanConfigs.docker_file_folder_path, ScanConfigs.db_enabled, ScanConfigs.falco_enabled, ScanConfigs.renovate_enabled, ScanConfigs.renovate_repo_name, ScanConfigs.renovate_repo_token,ScanConfigs.dgraph_enabled, ScanConfigs.dgraph_db_host, ScanConfigs.dgraph_db_port).filter(ScanConfigs.schedule_id == scheduleId).all() + scan_configs = [ScanConfig(**scan_config._asdict()) for scan_config in scan_configs] + schedule["scan_configs"] = scan_configs + + schedule_details = CreateScheduleConfig(**schedule) + return schedule_details.model_dump() + except Exception as e: + print(e) + return CreateScheduleConfig(schedule_name="", container_registry_id="", cron_schedule="", scan_configs=[]) + +async def get_schedule_details(scheduleId: str, db: Session): + #Get execution details from Executions + try: + # Get schedule name from Schedules + sschedule_name = db.query(Schedules.schedule_name).filter(Schedules.schedule_id == scheduleId).first() + schedule_name = sschedule_name.schedule_name + execution = db.query(Executions).filter(Executions.schedule_id == scheduleId).first() + execution_id = execution.execution_id + #Get job ids from ExecutionJobs + job_ids = db.query(ExecutionJobs.job_id).filter(ExecutionJobs.execution_id == execution_id).all() + job_ids = [job_id.job_id for job_id in job_ids] + + execution_details = [] + for job_id in job_ids: + #Get only latest scan status for each job_id based on the datetime field + scan_status = db.query(ScanStatus.execution_id, ScanStatus.job_id, ScanStatus.vulnerable_packages_count, ScanStatus.vulnerablitites_count, ScanStatus.severity_critical_count, ScanStatus.severity_high_count, ScanStatus.severity_low_count, ScanStatus.severity_medium_count, ScanStatus.severity_unknown_count, ScanStatus.datetime, ScanStatus.scan_report).filter(ScanStatus.job_id == job_id).order_by(ScanStatus.datetime.desc()).first() + if(scan_status): + #convert to dictionary + scan_status_dict = scan_status._asdict() + scan_status_dict["execution_id"] = str(scan_status_dict["execution_id"]) + scan_status_dict["job_id"] = str(scan_status_dict["job_id"]) + scan_status_dict["scan_report"] = json.loads(scan_status_dict["scan_report"]) + execution_details.append(ExecutionResponseNew(**scan_status_dict)) + return ScheduleDetailsResponseNew(schedule_id=scheduleId, schedule_name=schedule_name,total_scan_images_count=execution.scan_images_count, total_vulnerable_images_count= execution.vulnerable_images_count, total_vulnerablities_count=execution.vulnerablities_count, executions=execution_details) + except Exception as e: + print(e) + return ScheduleDetailsResponseNew(schedule_id="", schedule_name="",total_scan_images_count=0, total_vulnerable_images_count= 0, total_vulnerablities_count=0, executions=[]) + +async def pause_schedule(scheduleId: str, db: Session) -> tuple[ScheduleEnum, str]: + try: + #Get execution_id + execution_id = db.query(Executions).filter(Executions.schedule_id == scheduleId).first().execution_id + execution_jobs = db.query(ExecutionJobs).filter(ExecutionJobs.execution_id == execution_id).all() + job_ids = [job.job_id for job in execution_jobs] + + #Pause schedule from temporal + client = await Client.connect(TEMPORAL_HOST) + for job_id in job_ids: + handle = client.get_schedule_handle(str(job_id)) + await handle.pause() + return ScheduleEnum.SCHEDULE_PAUSED, scheduleId + except Exception as e: + print(e) + return ScheduleEnum.SCHEDULE_PAUSE_FAILED, scheduleId + +async def resume_schedule(scheduleId: str, db: Session) -> tuple[ScheduleEnum, str]: + try: + #Get execution_id + execution_id = db.query(Executions).filter(Executions.schedule_id == scheduleId).first().execution_id + execution_jobs = db.query(ExecutionJobs).filter(ExecutionJobs.execution_id == execution_id).all() + job_ids = [job.job_id for job in execution_jobs] + + #Resume schedule from temporal + client = await Client.connect(TEMPORAL_HOST) + for job_id in job_ids: + handle = client.get_schedule_handle(str(job_id)) + await handle.unpause() + return ScheduleEnum.SCHEDULE_RESUMED, scheduleId + except Exception as e: + print(e) + return ScheduleEnum.SCHEDULE_RESUME_FAILED, scheduleId \ No newline at end of file diff --git a/src/proact_server/utils/__init__.py b/src/proact_server/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/proact_server/utils/constants.py b/src/proact_server/utils/constants.py new file mode 100644 index 0000000..5b235d8 --- /dev/null +++ b/src/proact_server/utils/constants.py @@ -0,0 +1,5 @@ +import os + +TEMPORAL_HOST = os.getenv('PROACT_TEMPORAL_HOST', 'localhost:7233') + +SERVER_PORT = os.getenv('PROACT_SERVER_PORT', 5000) \ No newline at end of file diff --git a/src/proact_server/utils/database.py b/src/proact_server/utils/database.py new file mode 100644 index 0000000..afbdc27 --- /dev/null +++ b/src/proact_server/utils/database.py @@ -0,0 +1,32 @@ +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +import os + +def get_postgres_db_url(): + + # Try to read the environment variable + host = os.getenv('PROACT_PG_HOST', 'localhost') + port = os.getenv('PROACT_PG_PORT', '5432') + user = os.getenv('PROACT_PG_USER', 'postgres') + password = os.getenv('PROACT_PG_PASSWORD', 'password') + database = os.getenv('PROACT_PG_DATABASE', 'scsctl') + + url = f"postgresql://{user}:{password}@{host}:{port}/{database}" + + return url + +SQLALCHEMY_DATABASE_URL = get_postgres_db_url() +engine = create_engine(SQLALCHEMY_DATABASE_URL) + +SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + +Base = declarative_base() + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() + diff --git a/src/proact_server/utils/model.py b/src/proact_server/utils/model.py new file mode 100644 index 0000000..17adca7 --- /dev/null +++ b/src/proact_server/utils/model.py @@ -0,0 +1,199 @@ +from pydantic import BaseModel, Field, validator +from utils.database import Base +from sqlalchemy import Column, Integer, String, DateTime, Boolean, ForeignKey +from sqlalchemy.dialects.postgresql import UUID +import uuid +from datetime import datetime +from enum import Enum +from typing import Optional + +class ScanConfig(BaseModel): + @validator('*', pre=True) + def empty_str_to_none(cls, v): + if v == '': + return None + return v + + docker_image_name: str + pyroscope_enabled: Optional[bool] = Field(default=False) + pyroscope_url: Optional[str] = None + pyroscope_app_name: Optional[str] = Field(default=None) + falco_pod_name: Optional[str] = Field(default=None) + falco_target_deployment_name: Optional[str] = Field(default=None) + docker_file_folder_path: Optional[str] = Field(default=None) + db_enabled: Optional[bool] = Field(default=False) + falco_enabled: Optional[bool] = Field(default=False) + renovate_enabled: Optional[bool] = Field(default=False) + renovate_repo_name: Optional[str] = Field(default=None) + renovate_repo_token: Optional[str] = Field(default=None) + dgraph_enabled: Optional[bool] = Field(default=False) + dgraph_db_host: Optional[str] = Field(default=None) + dgraph_db_port: Optional[str] = Field(default=None) + rebuild_image: Optional[bool] = Field(default=False) + +class CreateScheduleConfig(BaseModel): + @validator('*', pre=True) + def empty_str_to_none(cls, v): + if v == '': + return None + return v + schedule_name: str + container_registry_id: str + cron_schedule: str + start_date: Optional[datetime] = Field(default=None) + end_date: Optional[datetime] = Field(default=None) + scan_configs: list[ScanConfig] + +class DeleteScheduleConfig(BaseModel): + schedule_id: str + +class PauseScanConfig(BaseModel): + job_id: str + +class ResumeScanConfig(BaseModel): + job_id: str + +class Schedules(Base): + __tablename__ = 'schedules' + schedule_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + schedule_name = Column(String) + start_date = Column(DateTime, nullable=True) + end_date = Column(DateTime, nullable=True) + container_registry_id = Column(String) + cron_schedule = Column(String) + update_time = Column(DateTime, default=datetime.utcnow) + +class ScanConfigs(Base): + __tablename__ = 'scan_configs' + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + schedule_id = Column(UUID(as_uuid=True), ForeignKey('schedules.schedule_id'), nullable=False) + pyroscope_enabled = Column(Boolean, default=False) + job_id = Column(UUID(as_uuid=True)) + docker_image_name = Column(String, nullable=False) + pyroscope_url = Column(String, nullable=True) + pyroscope_app_name = Column(String, nullable=True) + falco_pod_name = Column(String, nullable=True) + falco_target_deployment_name = Column(String, nullable=True) + docker_file_folder_path = Column(String, nullable=True) + db_enabled = Column(Boolean, default=False) + falco_enabled = Column(Boolean, default=False) + renovate_enabled = Column(Boolean, default=False) + renovate_repo_name = Column(String, nullable=True) + renovate_repo_token = Column(String, nullable=True) + dgraph_enabled = Column(Boolean, default=False) + dgraph_db_host = Column(String, nullable=True) + dgraph_db_port = Column(String, nullable=True) + is_api = Column(Boolean, default=False) + rebuild_image = Column(Boolean, default=False) + +class Executions(Base): + __tablename__ = 'executions' + execution_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + schedule_id = Column(UUID(as_uuid=True), ForeignKey('schedules.schedule_id'), nullable=False) + start_time = Column(DateTime, nullable=True) + end_time = Column(DateTime, nullable=True) + scan_images_count = Column(Integer, nullable=True) + vulnerable_images_count = Column(Integer, nullable=True) + vulnerablities_count = Column(Integer, nullable=True) + status = Column(String, nullable=True) + +class ExecutionJobs(Base): + __tablename__ = 'execution_jobs' + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + execution_id = Column(UUID(as_uuid=True), ForeignKey('executions.execution_id'), nullable=False) + job_id = Column(UUID(as_uuid=True), nullable=False) + +class ScanStatus(Base): + __tablename__ = 'scan_status' + id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + job_id = Column(UUID(as_uuid=True)) + execution_id = Column(UUID(as_uuid=True), ForeignKey('executions.execution_id'), nullable=False) + batch_id = Column(String, unique=True) + run_type = Column(String) + vulnerable_packages_count = Column(Integer, default=0) + vulnerablitites_count = Column(Integer, default=0) + severity_high_count = Column(Integer, default=0) + severity_medium_count = Column(Integer, default=0) + severity_low_count = Column(Integer, default=0) + severity_critical_count = Column(Integer, default=0) + severity_unknown_count = Column(Integer, default=0) + status = Column(Boolean, default=False) + scan_report = Column(String, nullable=True) + datetime = Column(DateTime, default=datetime.utcnow) + + +class ScheduleEnum(Enum): + SCHEDULE_CREATED = "Schedule Created" + SCHEDULE_UPDATED = "Schedule Updated" + SCHEDULE_PAUSED = "Schedule Paused" + SCHEDULE_RESUMED = "Schedule Resumed" + SCHEDULE_DELETED = "Schedule Deleted" + SCHEDULE_CREATION_FAILED = "Error creating schedule" + SCHEDULE_UPDATE_FAILED = "Error updating schedule" + SCHEDULE_PAUSE_FAILED = "Error pausing schedule" + SCHEDULE_RESUME_FAILED = "Error resuming schedule" + SCHEDULE_DELETE_FAILED = "Error deleting schedule" + SCHEDULE_NOT_FOUND = "Schedule not found" + +class CreateDeleteUpdateScheduleResponse(BaseModel): + message: str + schedule_id: str + + @validator('*', pre=True, always=True) + def convert_to_string(cls, v): + return str(v) + +class ScheduleResponse(BaseModel): + + @validator('*', pre=True, always=True) + def convert_to_string(cls, v): + return str(v) + + schedule_id: str + schedule_name: str + + +class ExecutionResponse(BaseModel): + execution_id: str + start_time: datetime + end_time: datetime + scan_images_count: int + vulnerable_images_count: int + vulnerablities_count: int + status: str + scan_report: dict + +class ScheduleDetailsResponse(BaseModel): + schedule_id: str + schedule_name: str + executions: list[ExecutionResponse] + +class ExecutionResponseNew(BaseModel): + execution_id: str + job_id: str + vulnerable_packages_count: int + vulnerablitites_count: int + severity_critical_count: int + severity_high_count: int + severity_medium_count: int + severity_low_count: int + severity_unknown_count: int + datetime: datetime + scan_report: dict + +class ScheduleDetailsResponseNew(BaseModel): + schedule_id: str + schedule_name: str + total_scan_images_count: int + total_vulnerable_images_count: int + total_vulnerablities_count: int + executions: list[ExecutionResponseNew] + +class Stats(BaseModel): + vulnerable_packages_count: int = Field(default=0) + vulnerablitites_count: int = Field(default=0) + severity_critical_count: int = Field(default=0) + severity_high_count: int = Field(default=0) + severity_medium_count: int = Field(default=0) + severity_low_count: int = Field(default=0) + severity_unknown_count: int = Field(default=0) \ No newline at end of file diff --git a/src/proact_server/utils/temporal/activity.py b/src/proact_server/utils/temporal/activity.py new file mode 100644 index 0000000..5a197d1 --- /dev/null +++ b/src/proact_server/utils/temporal/activity.py @@ -0,0 +1,60 @@ +from temporalio import activity +from scsctl.helper.scan import run_scan +from utils.database import get_db +from utils.model import ScanStatus, ExecutionJobs, Executions +import json + +@activity.defn(name="proact_scan_activity") +async def proact_scan_activity(config: dict) -> str: + + result = run_scan(**config) + generator = get_db() + db = next(generator) + execution_id = config.get("execution_id") + scan_report = { + "sbom_report": json.loads(result.get("sbom_report")), + "profiler_data": result.get("pyroscope_data"), + "profiler_found_extra_packages": result.get("pyroscope_found_extra_packages"), + "runtime_security_tool_found_extra_packages": result.get("falco_found_extra_packages"), + "dependency_manager_status": result.get("renovate_status"), + "final_report": result.get("final_report") + } + #Convert scan_report to json string + scan_report = json.dumps(scan_report) + + scan = ScanStatus( + job_id=config.get("job_id"), + execution_id=execution_id, + batch_id=result.get("batch_id"), + run_type="api", + status=result.get("scan_status"), + scan_report=scan_report, + **result.get("stats") + ) + db.add(scan) + db.commit() + db.refresh(scan) + + #Get scan status for all jobs for execution_id and check if job has vulnerable_packages_count greater than 0. If yes count that. This count is the count of vulnerable_images_count in executions table. + #get job ids for execution_id + job_ids = db.query(ExecutionJobs.job_id).filter(ExecutionJobs.execution_id == execution_id).all() + # Get vulnerable_packages_count for all each job_id with latest datetime + vulnerable_images_count = 0 + vulnerabilities_count = 0 + scan_status_temp = True + for job_id in job_ids: + counts = db.query(ScanStatus.vulnerablitites_count,ScanStatus.status).filter(ScanStatus.job_id == job_id[0]).order_by(ScanStatus.datetime.desc()).first() + if(counts and counts[0] > 0): + vulnerable_images_count += 1 + if(counts): + vulnerabilities_count += counts[0] + if(counts and counts[1] == False): + scan_status_temp = False + + #Update the vulnerabilities_count in executions table + db.query(Executions).filter(Executions.execution_id == execution_id).update({"vulnerablities_count": vulnerabilities_count, "vulnerable_images_count": vulnerable_images_count, "status": scan_status_temp}) + + db.commit() + db.close() + + return "success" \ No newline at end of file diff --git a/src/proact_server/utils/temporal/dataobject.py b/src/proact_server/utils/temporal/dataobject.py new file mode 100644 index 0000000..3316c6e --- /dev/null +++ b/src/proact_server/utils/temporal/dataobject.py @@ -0,0 +1,22 @@ +from dataclasses import dataclass +@dataclass +class Config: + schedule_id: str = None + job_id: str = None + docker_image_name: str = None + pyroscope_enabled: bool = False + pyroscope_url: str = None + pyroscope_app_name: str = None + falco_pod_name: str = None + falco_target_deployment_name: str = None + docker_file_folder_path: str = None + db_enabled: bool = False + falco_enabled: bool = False + renovate_enabled: bool = False + renovate_repo_name: str = None + renovate_repo_token: str = None + dgraph_enabled: bool = False + dgraph_db_host: str = None + dgraph_db_port: str = None + is_api: bool = False + execution_id: str = None \ No newline at end of file diff --git a/src/proact_server/utils/temporal/workflow_dax.py b/src/proact_server/utils/temporal/workflow_dax.py new file mode 100644 index 0000000..85b3a7d --- /dev/null +++ b/src/proact_server/utils/temporal/workflow_dax.py @@ -0,0 +1,17 @@ +from temporalio import workflow +from datetime import timedelta +from utils.temporal.dataobject import Config + +with workflow.unsafe.imports_passed_through(): + from utils.temporal.activity import proact_scan_activity + + +@workflow.defn(name="ProactWorkflow", sandboxed=False) +class ProactWorkflow: + @workflow.run + async def run(self, config: Config) -> str: + return await workflow.execute_activity( + proact_scan_activity, + config, + start_to_close_timeout=timedelta(seconds=10) + )