diff --git a/.github/jsonnet/GIT_VERSION b/.github/jsonnet/GIT_VERSION index c94d00f2..ec3ee9e7 100644 --- a/.github/jsonnet/GIT_VERSION +++ b/.github/jsonnet/GIT_VERSION @@ -1 +1 @@ -5d9c576c6fbbcd81178560862e084035c0e72ceb +ba69e229877c795d0b03d039219b44958f553f97 diff --git a/.github/jsonnet/base.jsonnet b/.github/jsonnet/base.jsonnet index 0b85fa81..66486e2d 100644 --- a/.github/jsonnet/base.jsonnet +++ b/.github/jsonnet/base.jsonnet @@ -2,6 +2,16 @@ local images = import 'images.jsonnet'; local misc = import 'misc.jsonnet'; { + /** + * Creates a complete GitHub Actions workflow pipeline with multiple jobs. + * + * @param {string} name - The name of the workflow (becomes the .yml filename) + * @param {array of jobs} jobs - Array of job objects (created with ghJob, ghExternalJob, etc.) + * @param {array} [event=['pull_request']] - GitHub events that trigger this workflow + * @param {object} [permissions=null] - Permissions for the workflow (e.g., {contents: 'read'}) + * @param {object} [concurrency=null] - Concurrency settings to limit parallel runs + * @returns {workflows} - GitHub Actions YAML manifest + */ pipeline(name, jobs, event=['pull_request'], permissions=null, concurrency=null):: { [name + '.yml']: std.manifestYamlDoc( @@ -13,6 +23,25 @@ local misc = import 'misc.jsonnet'; ), }, + /** + * Creates a GitHub Actions job that runs on a containerized runner. + * + * @param {string} name - The name of the job (used as the job key) + * @param {number} [timeoutMinutes=30] - Maximum time in minutes before job is cancelled. Max value is 55, after which the runner is killed. + * @param {string} [runsOn=null] - Runner type (defaults to 'arc-runner-2') + * @param {string} [image=images.default_job_image] - Docker image to run the job in + * @param {steps} [steps=[]] - Array of step objects (created with step() or action()) + * @param {string} [ifClause=null] - Conditional expression to determine if job should run + * @param {array} [needs=null] - Array of job names this job depends on + * @param {object} [outputs=null] - Job outputs available to dependent jobs + * @param {boolean} [useCredentials=true] - Whether to use Docker registry credentials. Must be set to false for public images. + * @param {object} [services=null] - Service containers to run alongside the job + * @param {object} [permissions=null] - Job-level permissions (overrides workflow permissions) + * @param {object} [concurrency=null] - Job-level concurrency settings + * @param {boolean} [continueOnError=null] - Whether to continue workflow if job fails + * @param {object} [env=null] - Environment variables for all steps in the job + * @returns {jobs} - GitHub Actions job definition + */ ghJob( name, timeoutMinutes=30, @@ -55,6 +84,14 @@ local misc = import 'misc.jsonnet'; (if env == null then {} else { env: env }), }, + /** + * Creates a GitHub Actions job that uses a reusable workflow from another repository. + * + * @param {string} name - The name of the job (used as the job key) + * @param {string} uses - The reusable workflow reference (e.g., 'owner/repo/.github/workflows/workflow.yml@ref') + * @param {object} [with=null] - Input parameters to pass to the reusable workflow + * @returns {jobs} - GitHub Actions external job definition + */ ghExternalJob( name, uses, @@ -68,6 +105,38 @@ local misc = import 'misc.jsonnet'; } else {}), }, + /** + * Creates a GitHub Actions step that runs shell commands. + * + * @docs https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#jobsjob_idsteps + * + * @param {string} name - Display name for the step in the GitHub UI + * @param {string} run - Shell command(s) to execute + * @param {object} [env=null] - Environment variables for this step + * @param {string} [workingDirectory=null] - Directory to run the command in + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {string} [id=null] - Unique identifier for this step (used to reference outputs) + * @param {boolean} [continueOnError=null] - Whether to continue job if this step fails, defaults to false + * @param {string} [shell=null] - Shell to use for running commands (e.g., 'bash', 'python', 'powershell', defaults to 'bash') + * @returns {steps} - Array containing a single step object + + * @example + * base.step( + * name='Run tests', + * run='pytest tests/', + * env={ 'ENV_VAR': 'value' }, + * workingDirectory='backend', + * ) + * + * base.step( + * name='Set up Python', + * run=||| + * python -m venv venv + * source venv/bin/activate + * pip install -r requirements.txt + * |||, + * ) + */ step(name, run, env=null, workingDirectory=null, ifClause=null, id=null, continueOnError=null, shell=null):: [ { @@ -81,6 +150,22 @@ local misc = import 'misc.jsonnet'; + (if shell == null then {} else { 'shell': shell }), ], + /** + * Creates a GitHub Actions step that uses a predefined action from the marketplace or repository. + * Security: Prefer pinning action references to a full commit SHA (e.g., actions/checkout@) instead of a mutable tag/version, + * especially for lesser-known or smaller third-party actions to reduce supply chain attack risk. + * + * @docs https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#jobsjob_idsteps + * + * @param {string} name - Display name for the step in the GitHub UI + * @param {string} uses - The action to use (e.g., 'actions/checkout@v4', './path/to/action') + * @param {object} [env=null] - Environment variables for this step + * @param {object} [with=null] - Input parameters to pass to the action + * @param {string} [id=null] - Unique identifier for this step (used to reference outputs) + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {boolean} [continueOnError=null] - Whether to continue job if this step fails + * @returns {steps} - Array containing a single step object + */ action(name, uses, env=null, with=null, id=null, ifClause=null, continueOnError=null):: [ { diff --git a/.github/jsonnet/buckets.jsonnet b/.github/jsonnet/buckets.jsonnet index b2298d91..cbb1ddea 100644 --- a/.github/jsonnet/buckets.jsonnet +++ b/.github/jsonnet/buckets.jsonnet @@ -1,33 +1,29 @@ { - // Uploads all files in the source folder to the destination bucket, including compression and TTL headers. - // - // Warnings: - // - remote/destination files not included in the source will be DELETED recursively if pruneRemote is true! - // - the files in the source directory will be modified. Do not attempt to use this directory after running this command. - // - must be run with bash shell. - // - // Parameters: - // sourcePath: The source directory to upload. Can be a local folder of a path in a bucket, depending on sourceBucket. Required. - // sourceBucket: The source bucket. If null, the sourcePath is a local directory. Defaults to null. - // destinationBucket: The destination bucket. Required. - // destinationPath: The destination directory in the bucket. Required. - // - // pruneRemote: If true, all files in the destination bucket that are not in the source will be deleted. Can only be used with destinationPath containing 'pr-'. - // - // compressFileExtentions: A list of file extentions that will be compressed. Set to an empty list to disable compression. - // compressJobs: The number of parallel gzip compression jobs. Use 4 for arc-runner-2 and 16 for arc-runner-16. Defaults to 4. - // - // lowTTLfiles: A list of files, or a single regex, that will be uploaded with a low TTL. Use this for files that are not fingerprinted. - // - // lowTTL: The TTL for lowTTLfiles. Defaults to 60 seconds. - // lowTTLStaleWhileRevalidate: The stale-while-revalidate value for lowTTLfiles. Defaults to 60 seconds. - // lowTTLHeader: The Cache-Control header for lowTTLfiles. This is generated from lowTTL and lowTTLStaleWhileRevalidate. - // - // highTTL: The TTL for all other files. Defaults to 1 week. - // highTTLStaleWhileRevalidate: The stale-while-revalidate value for all other files. Defaults to 1 day. - // highTTLHeader: The Cache-Control header for all other files. This is generated from highTTL and highTTLStaleWhileRevalidate. - // - // additionalHeaders: Additional headers to add to all uploaded files. This should be an array of strings. + /** + * Uploads all files in the source folder to the destination bucket, including compression and TTL headers. + * + * WARNINGS: + * - Remote/destination files not included in the source will be DELETED recursively if pruneRemote is true! + * - The files in the source directory will be modified. Do not attempt to use this directory after running this command. + * - Must be run with bash shell. + * + * @param {string} sourcePath - The source directory to upload. Can be a local folder or a path in a bucket, depending on sourceBucket. Required. + * @param {string} [sourceBucket=null] - The source bucket. If null, the sourcePath is a local directory. + * @param {string} destinationBucket - The destination bucket. Required. + * @param {string} destinationPath - The destination directory in the bucket. Required. + * @param {boolean} [pruneRemote=false] - If true, all files in the destination bucket that are not in the source will be deleted. Can only be used with destinationPath containing 'pr-'. + * @param {array} [compressFileExtentions=['css', 'svg', 'html', 'json', 'js', 'xml', 'txt', 'map']] - A list of file extensions that will be compressed. Set to an empty list to disable compression. + * @param {number} [compressJobs=4] - The number of parallel gzip compression jobs. Use 4 for arc-runner-2 and 16 for arc-runner-16. + * @param {array|string} [lowTTLfiles=[]] - A list of files, or a single regex, that will be uploaded with a low TTL. Use this for files that are not fingerprinted. + * @param {number} [lowTTL=60] - The TTL for lowTTLfiles in seconds. + * @param {number} [lowTTLStaleWhileRevalidate=60] - The stale-while-revalidate value for lowTTLfiles in seconds. + * @param {string} [lowTTLHeader] - The Cache-Control header for lowTTLfiles. This is generated from lowTTL and lowTTLStaleWhileRevalidate. + * @param {number} [highTTL=604800] - The TTL for all other files in seconds (defaults to 1 week). + * @param {number} [highTTLStaleWhileRevalidate=86400] - The stale-while-revalidate value for all other files in seconds (defaults to 1 day). + * @param {string} [highTTLHeader] - The Cache-Control header for all other files. This is generated from highTTL and highTTLStaleWhileRevalidate. + * @param {array} [additionalHeaders=[]] - Additional headers to add to all uploaded files. This should be an array of strings. + * @returns {string} - Complete bash command for uploading files to Google Cloud Storage with compression and caching + */ uploadFilesToBucketCommand( sourcePath, sourceBucket=null, @@ -53,7 +49,7 @@ local highTTLfilesRegex = '(?!' + lowTTLfilesRegex + ').*'; local hasCompressedFiles = (std.isArray(compressFileExtentions) && std.length(compressFileExtentions) > 0) || (std.isString(compressFileExtentions) && compressFileExtentions != ''); - local compressedFilesRegex = '(' + std.join('|', std.map(function(ext) '((.*(\\.|/))?' + ext + ')', compressFileExtentions)) + ')'; + local compressedFilesRegex = '(' + std.join('|', std.map(function(ext) '(.*\\.' + ext + ')', compressFileExtentions)) + ')'; local uncompressedFilesRegex = '(?!' + compressedFilesRegex + ').*'; local compressionHeader = 'Content-Encoding: gzip'; diff --git a/.github/jsonnet/cache.jsonnet b/.github/jsonnet/cache.jsonnet index a6aeb70e..ecde2f35 100644 --- a/.github/jsonnet/cache.jsonnet +++ b/.github/jsonnet/cache.jsonnet @@ -1,21 +1,26 @@ local base = import 'base.jsonnet'; { - // Fetch a cache from the cache server. - // This is a generic function that can be used to fetch any cache. It is advised to wrap this function - // in a more specific function that fetches a specific cache, setting the cacheName and folders parameters. - // - // To be paired with the uploadCache function. - // - // Parameters: - // cacheName: The name of the cache to fetch. The name of the repository is usually a good option. Required. - // backupCacheName: The name of a backup cache to fetch if the main cache fails. Default is null. - // folders: A list of folders that are in the cache. These will be deleted if the download fails. Can be an empty list if additionalCleanupCommands are used. - // additionalCleanupCommands: A list of additional commands to run if the download fails. Default is an empty list. - // ifClause: An optional if clause to conditionally run this step. Default is null. - // workingDirectory: The working directory for this step. Default is null. - // retry: Whether to retry the download if it fails. Default is true. - // continueWithoutCache: Whether to continue if the cache is not found. Default is true. + /** + * Fetch a cache from the cache server. + * + * This is a generic function that can be used to fetch any cache. It is advised to wrap this function + * in a more specific function that fetches a specific cache, setting the cacheName and folders parameters. + * + * To be paired with the uploadCache function. + * + * @param {string} cacheName - The name of the cache to fetch. The name of the repository is usually a good option. + * @param {string} [backupCacheName=null] - The name of a backup cache to fetch if the main cache fails. + * @param {array} [folders=[]] - A list of folders that are in the cache. These will be deleted if the download fails. Can be an empty list if additionalCleanupCommands are used. + * @param {string} [version='v1'] - The version of the cache to fetch. + * @param {string} [backupCacheVersion=version] - The version of the backup cache to fetch. + * @param {array} [additionalCleanupCommands=[]] - A list of additional commands to run if the download fails. + * @param {string} [ifClause=null] - An optional if clause to conditionally run this step. + * @param {string} [workingDirectory=null] - The working directory for this step. + * @param {boolean} [retry=true] - Whether to retry the download if it fails. + * @param {boolean} [continueWithoutCache=true] - Whether to continue if the cache is not found. + * @returns {steps} - GitHub Actions step to download cache from Google Cloud Storage + */ fetchCache( cacheName, backupCacheName=null, @@ -75,17 +80,21 @@ local base = import 'base.jsonnet'; workingDirectory=workingDirectory, ), - // Uploads a cache to the cache server. - // This is a generic function that can be used to upload any cache. It is advised to wrap this function - // in a more specific function that uploads a specific cache, setting the cacheName and folders parameters. - // - // To be paired with the fetchCache function. - // - // Parameters: - // cacheName: The name of the cache to upload. The name of the repository is usually a good option. Required. - // folders: A list of folders to include in the cache. Required unless tarCommand is given. - // compressionLevel: The compression level to use for zstd. Default is 10. - // tarCommand: The command to run to create the tar file. Default is 'tar -c ' + std.join(' ', folders). + /** + * Uploads a cache to the cache server. + * + * This is a generic function that can be used to upload any cache. It is advised to wrap this function + * in a more specific function that uploads a specific cache, setting the cacheName and folders parameters. + * + * To be paired with the fetchCache function. + * + * @param {string} cacheName - The name of the cache to upload. The name of the repository is usually a good option. + * @param {array} [folders=null] - A list of folders to include in the cache. Required unless tarCommand is given. + * @param {string} [version='v1'] - The version of the cache to upload. + * @param {number} [compressionLevel=10] - The compression level to use for zstd. + * @param {string} [tarCommand='tar -c ' + std.join(' ', folders)] - The command to run to create the tar file. + * @returns {steps} - GitHub Actions step to upload cache to Google Cloud Storage with zstd compression + */ uploadCache( cacheName, folders=null, @@ -110,13 +119,16 @@ local base = import 'base.jsonnet'; 'echo "Upload finished"\n' ), - // Removes a cache from the cache server. - // This is a generic function that can be used to remove any cache. It is advised to wrap this function - // in a more specific function that removes a specific cache, setting the cacheName parameter. - // - // Parameters: - // cacheName: The name of the cache to remove. The name of the repository is usually a good option. Required. - // version: The version of the cache to remove. Default is 'v1'. + /** + * Removes a cache from the cache server. + * + * This is a generic function that can be used to remove any cache. It is advised to wrap this function + * in a more specific function that removes a specific cache, setting the cacheName parameter. + * + * @param {string} cacheName - The name of the cache to remove. The name of the repository is usually a good option. + * @param {string} [version='v1'] - The version of the cache to remove. + * @returns {steps} - GitHub Actions step to remove cache from Google Cloud Storage + */ removeCache(cacheName, version='v1'):: base.step( 'remove ' + cacheName + ' cache', diff --git a/.github/jsonnet/clusters.jsonnet b/.github/jsonnet/clusters.jsonnet index c1483e39..f266f61f 100644 --- a/.github/jsonnet/clusters.jsonnet +++ b/.github/jsonnet/clusters.jsonnet @@ -1,5 +1,12 @@ local misc = import 'misc.jsonnet'; +/** + * Kubernetes Cluster Configuration + * + * This module defines configuration for different Kubernetes clusters used for deployments. + * Each cluster configuration includes project information, authentication secrets, and + * node selector settings for job scheduling. + */ { test: { project: 'gynzy-test-project', diff --git a/.github/jsonnet/complete-workflows.jsonnet b/.github/jsonnet/complete-workflows.jsonnet index 096278ee..a9b18ad5 100644 --- a/.github/jsonnet/complete-workflows.jsonnet +++ b/.github/jsonnet/complete-workflows.jsonnet @@ -3,13 +3,21 @@ local misc = import 'misc.jsonnet'; local yarn = import 'yarn.jsonnet'; { - /* - @param {string[]} repositories - The repositories to publish to - @param {boolean} isPublicFork - Whether the repository is a public fork - @param {boolean} checkVersionBump - Whether to assert if the version was bumped (recommended) - @param {ghJob} testJob - a job to be ran during PR to assert tests. can be an array of jobs - @param {string} branch - the branch to run the publish-prod job on - */ + /** + * Creates a complete set of workflows for JavaScript package publishing and testing. + * + * Generates three pipelines: + * 1. 'misc' - Jsonnet validation workflow + * 2. 'publish-prod' - Production package publishing on branch push + * 3. 'pr' - Pull request preview publishing and testing + * + * @param {array} [repositories=['gynzy']] - The repositories to publish to + * @param {boolean} [isPublicFork=true] - Whether the repository is a public fork (affects runner selection) + * @param {boolean} [checkVersionBump=true] - Whether to assert if the version was bumped (recommended) + * @param {jobs} [testJob=null] - A job to be run during PR to assert tests. Can be an array of jobs + * @param {string} [branch='main'] - The branch to run the publish-prod job on + * @returns {workflows} - Complete set of GitHub Actions workflows for JavaScript package lifecycle + */ workflowJavascriptPackage(repositories=['gynzy'], isPublicFork=true, checkVersionBump=true, testJob=null, branch='main'):: local runsOn = (if isPublicFork then 'ubuntu-latest' else null); diff --git a/.github/jsonnet/databases.jsonnet b/.github/jsonnet/databases.jsonnet index 1fc81178..f0f6169f 100644 --- a/.github/jsonnet/databases.jsonnet +++ b/.github/jsonnet/databases.jsonnet @@ -2,17 +2,27 @@ local base = import 'base.jsonnet'; local images = import 'images.jsonnet'; { + /** + * Configuration for available database servers across different environments and services. + * + * Each database server entry contains: + * - type: Database type (currently 'mysql') + * - server: Cloud SQL instance name + * - region: GCP region where the instance is located + * - project: GCP project ID containing the instance + * - lifecycle: Environment tier (test/production) + */ database_servers: { - test: { + 'test-ams-8': { type: 'mysql', - server: 'test-ams', + server: 'test-ams-8', region: 'europe-west4', project: 'unicorn-985', lifecycle: 'test', }, - 'test-ams-8': { + 'eu-w4-test': { type: 'mysql', - server: 'test-ams-8', + server: 'eu-w4-test', region: 'europe-west4', project: 'unicorn-985', lifecycle: 'test', @@ -23,19 +33,6 @@ local images = import 'images.jsonnet'; region: 'europe-west4', project: 'unicorn-985', }, - 'gynzy-test': { - type: 'mysql', - server: 'gynzy-test', - region: 'europe-west4', - project: 'gynzy-1090', - lifecycle: 'test', - }, - 'gynzy-production': { - type: 'mysql', - server: 'gynzy-production', - region: 'europe-west4', - project: 'gynzy-1090', - }, 'eu-w4-licenses-v8': { type: 'mysql', server: 'eu-w4-licenses-v8', @@ -78,8 +75,26 @@ local images = import 'images.jsonnet'; region: 'europe-west4', project: 'unicorn-985', }, + 'eu-w4-edu-v': { + type: 'mysql', + server: 'eu-w4-edu-v', + region: 'europe-west4', + project: 'unicorn-985', + }, }, + /** + * Creates a GitHub Actions step to copy a MySQL database for PR testing. + * + * This function creates a step that clones a source database to a PR-specific database + * for isolated testing. The target database name must contain '_pr_' for safety. + * + * @param {object} mysqlActionOptions - MySQL action configuration object + * @param {string} mysqlActionOptions.database_name_target - Target database name (must contain '_pr_') + * @param {string} mysqlActionOptions.database_name_source - Source database to copy from + * @param {object} mysqlActionOptions.database_server - Database server configuration + * @returns {steps} - GitHub Actions step that copies the database + */ copyDatabase(mysqlActionOptions):: assert std.length(std.findSubstr('_pr_', mysqlActionOptions.database_name_target)) > 0; // target db gets deleted. must contain _pr_ @@ -93,6 +108,18 @@ local images = import 'images.jsonnet'; with=pluginOptions ), + /** + * Creates a GitHub Actions step to delete a MySQL PR database. + * + * This function creates a step that removes a PR-specific database after testing is complete. + * The target database name must contain '_pr_' for safety to prevent accidental deletion + * of production databases. + * + * @param {object} mysqlActionOptions - MySQL action configuration object + * @param {string} mysqlActionOptions.database_name_target - Target database name to delete (must contain '_pr_') + * @param {object} mysqlActionOptions.database_server - Database server configuration + * @returns {steps} - GitHub Actions step that deletes the database + */ deleteDatabase(mysqlActionOptions):: assert std.length(std.findSubstr('_pr_', mysqlActionOptions.database_name_target)) > 0; // this fn deletes the database. destination db must contain _pr_ diff --git a/.github/jsonnet/deployment.jsonnet b/.github/jsonnet/deployment.jsonnet index 8307171b..1d3c0b0c 100644 --- a/.github/jsonnet/deployment.jsonnet +++ b/.github/jsonnet/deployment.jsonnet @@ -4,51 +4,65 @@ local misc = import 'misc.jsonnet'; local notifications = import 'notifications.jsonnet'; { + /** + * Internal function to assert that a merge SHA is the latest commit on a branch. + * + * Prevents creating a deployment event for a closed PR whose code is considered merged, but not the latest commit. + * For a more detailed explanation see `masterMergeDeploymentEventHook()`. + * + * @param {string} branch - The target branch to check + * @param {string} [sha='${{ github.sha }}'] - The SHA to verify + * @param {string} [repository='${{ github.repository }}'] - The repository to check + * @returns {steps} - GitHub Actions steps to verify merge SHA is latest + * @private + */ _assertMergeShaIsLatestCommit(branch, sha='${{ github.sha }}', repository='${{ github.repository }}'):: - [ - base.step('install jq curl', 'apk add --no-cache jq curl'), - base.step( - 'assert merge sha is latest commit', - ||| - HEAD_SHA=$(curl -L -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${GITHUB_TOKEN}" -H "X-GitHub-Api-Version: 2022-11-28" https://api.github.com/repos/${GITHUB_REPOSITORY}/branches/${TARGET_BRANCH} | jq -r .commit.sha); - if [ ${HEAD_SHA} == ${PR_MERGE_COMMIT_SHA} ]; then - echo "Merge sha is latest commit on branch ${TARGET_BRANCH}! HEAD_SHA: ${HEAD_SHA} PR_MERGE_COMMIT_SHA: ${PR_MERGE_COMMIT_SHA}"; - echo "CREATE_DEPLOY_EVENT=true" >> $GITHUB_OUTPUT - else - echo "Merge sha is not latest commit on branch ${TARGET_BRANCH}! HEAD_SHA: ${HEAD_SHA} PR_MERGE_COMMIT_SHA: ${PR_MERGE_COMMIT_SHA}"; - echo "CREATE_DEPLOY_EVENT=false" >> $GITHUB_OUTPUT - fi - |||, - env={ - PR_MERGE_COMMIT_SHA: sha, - GITHUB_REPOSITORY: repository, - TARGET_BRANCH: branch, - GITHUB_TOKEN: '${{ github.token }}', - }, - id='assert-merge-sha-is-latest-commit', - ), - ], + base.step('install jq curl', 'apk add --no-cache jq curl') + + base.step( + 'assert merge sha is latest commit', + ||| + HEAD_SHA=$(curl -L -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${GITHUB_TOKEN}" -H "X-GitHub-Api-Version: 2022-11-28" https://api.github.com/repos/${GITHUB_REPOSITORY}/branches/${TARGET_BRANCH} | jq -r .commit.sha); + if [ ${HEAD_SHA} == ${PR_MERGE_COMMIT_SHA} ]; then + echo "Merge sha is latest commit on branch ${TARGET_BRANCH}! HEAD_SHA: ${HEAD_SHA} PR_MERGE_COMMIT_SHA: ${PR_MERGE_COMMIT_SHA}"; + echo "CREATE_DEPLOY_EVENT=true" >> $GITHUB_OUTPUT + else + echo "Merge sha is not latest commit on branch ${TARGET_BRANCH}! HEAD_SHA: ${HEAD_SHA} PR_MERGE_COMMIT_SHA: ${PR_MERGE_COMMIT_SHA}"; + echo "CREATE_DEPLOY_EVENT=false" >> $GITHUB_OUTPUT + fi + |||, + env={ + PR_MERGE_COMMIT_SHA: sha, + GITHUB_REPOSITORY: repository, + TARGET_BRANCH: branch, + GITHUB_TOKEN: '${{ github.token }}', + }, + id='assert-merge-sha-is-latest-commit', + ), + - /* - * Creates a production deployment event on pr-close if all of the following conditions are met: - * - the pr is merged - * - the pr is merged into the default branch - * - the merge sha is the latest commit on the default branch. - * this prevents a deployment from beeing created in a specific edge case: - * - PR A is merged into PR B - * - PR B is merged into the default branch - * - now github closes both PRs and without this additional sanity check, both would create a deploy event + /** + * Creates a production deployment event on PR close if all conditions are met. + * + * Conditions: + * - The PR is merged + * - The PR is merged into the default branch + * - The merge SHA is the latest commit on the default branch * - * For more complex deployment scenarios, use the branchMergeDeploymentEventHook instead + * This prevents deployments from being created in edge cases where: + * - PR A is merged into PR B + * - PR B is merged into the default branch + * - Both PRs would create deploy events without this sanity check * - * params: - * deployToTest {boolean} - if true, a deployment event is also created for the test environment - * prodBranch {string|null} - the branch to deploy to production. defaults to the default branch of the repository. but can be set to a differring release branch - * testBranch {string|null} - the branch to deploy to test. defaults to the default branch of the repository. but can be set to a differring test branch - * extraDeployTargets {string[]} - deploy targets to create deployment events for. defaults to ['production']. these targets will triger based on the configured prodBranch - * runsOn {string|null} - the name of the runner to run this job on. defaults to null, which will later on means the default self hosted runner will be used - * notifyOnTestDeploy {boolean} - if true, a slack message is sent when a test deployment is created - */ + * For more complex deployment scenarios, use the branchMergeDeploymentEventHook instead. + * + * @param {boolean} [deployToTest=false] - If true, a deployment event is also created for the test environment + * @param {string} [prodBranch=null] - The branch to deploy to production. Defaults to the default branch of the repository, but can be set to a different release branch + * @param {string} [testBranch=null] - The branch to deploy to test. Defaults to the default branch of the repository, but can be set to a different test branch + * @param {array} [deployTargets=['production']] - Deploy targets to create deployment events for. These targets will trigger based on the configured prodBranch + * @param {string} [runsOn=null] - The name of the runner to run this job on. Defaults to null, which means the default self-hosted runner will be used + * @param {boolean} [notifyOnTestDeploy=false] - If true, a Slack message is sent when a test deployment is created + * @returns {workflows} - GitHub Actions pipeline for deployment event creation on PR merge + */ masterMergeDeploymentEventHook(deployToTest=false, prodBranch=null, testBranch=null, deployTargets=['production'], runsOn=null, notifyOnTestDeploy=false):: local branches = [ { @@ -66,24 +80,25 @@ local notifications = import 'notifications.jsonnet'; self.branchMergeDeploymentEventHook(branches, runsOn=runsOn), - - /* - * Creates a production deployment event on pr-close if all of the following conditions are met: - * - the pr is merged - * - the pr is merged into the default branch - * - the merge sha is the latest commit on the default branch. - * this prevents a deployment from beeing created in a specific edge case: - * - PR A is merged into PR B - * - PR B is merged into the default branch - * - now github closes both PRs and without this additional sanity check, both would create a deploy event + /** + * Creates deployment events on PR close for multiple branches with different deployment targets. + * + * Conditions: + * - The PR is merged + * - The PR is merged into one of the configured branches + * - The merge SHA is the latest commit on the target branch + * + * This prevents deployments from being created in edge cases where: + * - PR A is merged into PR B + * - PR B is merged into the target branch + * - Both PRs would create deploy events without this sanity check * - * params: - * branches {{branch: string, deployments: string[], notifyOnDeploy: boolean}[]} - an array of the branches to create deployment events for. - * Each branch object has the following properties: - * branch {string} - the branch to which the PR has to be merged into. If '_default_' is used, the default branch of the repository is used. - * deployments {string[]} - the environments to deploy to. e.g. ['production', 'test'] - * notifyOnDeploy {boolean} - if true, a slack message is sent when a deployment is created - * runsOn {string|null} - the name of the runner to run this job on. defaults to null, which will later on means the default self hosted runner will be used + * @param {array} branches - Array of branch objects to create deployment events for + * @param {string} branches[].branch - The branch to which the PR has to be merged. If '_default_' is used, the default branch of the repository is used + * @param {array} branches[].deployments - The environments to deploy to (e.g., ['production', 'test']) + * @param {boolean} branches[].notifyOnDeploy - If true, a Slack message is sent when a deployment is created + * @param {string} [runsOn=null] - The name of the runner to run this job on. Defaults to null, which means the default self-hosted runner will be used + * @returns {workflows} - GitHub Actions pipeline for deployment event creation on PR merge to multiple branches */ branchMergeDeploymentEventHook(branches, runsOn=null):: base.pipeline( @@ -101,8 +116,8 @@ local notifications = import 'notifications.jsonnet'; useCredentials=false, runsOn=runsOn, permissions={ deployments: 'write', contents: 'read' }, - ifClause="${{ github.event.pull_request.merged == true}}", - steps=self._assertMergeShaIsLatestCommit(branch=branchName) + + ifClause='${{ github.event.pull_request.merged == true}}', + steps=[self._assertMergeShaIsLatestCommit(branch=branchName)] + std.map( function(deploymentTarget) base.action( @@ -141,9 +156,29 @@ local notifications = import 'notifications.jsonnet'; }, ), - /* - * Generate a github ifClause for the provided deployment targets: + /** + * Generate a GitHub ifClause for the provided deployment targets. + * + * @param {array} targets - Array of deployment target environment names + * @returns {string} - GitHub Actions conditional expression that matches any of the provided targets */ deploymentTargets(targets):: '${{ ' + std.join(' || ', std.map(function(target) "github.event.deployment.environment == '" + target + "'", targets)) + ' }}', + + /** + * Creates a step to update deployment status (success/failure) based on the result from the current job + * + * @returns {jobs} - GitHub Actions step that updates deployment status + */ + updateDeploymentStatus(status='${{ job.status }}'):: + base.action( + 'Update deployment status', + 'chrnorm/deployment-status@v2', + with={ + state: status, + ['deployment-id']: '${{ github.event.deployment.id }}', + token: '${{ secrets.GITHUB_TOKEN }}', + }, + ifClause='${{ always() }}', + ), } diff --git a/.github/jsonnet/docker.jsonnet b/.github/jsonnet/docker.jsonnet index 2fe4ba4e..f227296d 100644 --- a/.github/jsonnet/docker.jsonnet +++ b/.github/jsonnet/docker.jsonnet @@ -15,7 +15,7 @@ local misc = import 'misc.jsonnet'; * @param {object} [env] - Environment variables to be passed to the Docker build. * @param {object} [build_args] - Build arguments to be passed to the Docker build. * @param {string} [project] - The GCP project where the image will be store/pushed to. - * @returns {[object]} - a github actions step to build the docker image + * @returns {steps} - a github actions step to build the docker image */ buildDocker( imageName, diff --git a/.github/jsonnet/helm.jsonnet b/.github/jsonnet/helm.jsonnet index 6ee96dc8..47651d39 100644 --- a/.github/jsonnet/helm.jsonnet +++ b/.github/jsonnet/helm.jsonnet @@ -6,6 +6,29 @@ local misc = import 'misc.jsonnet'; local services = import 'services.jsonnet'; { + /** + * Creates a GitHub Actions step to deploy or delete a Helm chart to a Kubernetes cluster. + * + * @param {object} cluster - Target Kubernetes cluster configuration + * @param {string} cluster.project - GCP project containing the cluster + * @param {string} cluster.zone - GCP zone where the cluster is located + * @param {string} cluster.name - Name of the Kubernetes cluster + * @param {string} cluster.secret - Secret containing cluster service account JSON + * @param {string} release - Helm release name + * @param {object|string} values - Helm values (object will be JSON-encoded) + * @param {string} chartPath - Path to the Helm chart directory + * @param {boolean} [delete=false] - Whether to delete the release instead of deploying + * @param {boolean} [useHelm3=true] - Whether to use Helm 3 (recommended) + * @param {string} [title=null] - Custom step name (defaults to 'deploy-helm' or 'delete-helm') + * @param {string} [ifClause=null] - Conditional expression for step execution + * @param {string} [ttl=null] - Time-to-live for the release (e.g., '7 days'), the release is deleted after this period + * @param {string} [namespace='default'] - Kubernetes namespace for the release + * @param {string} [version='${{ github.event.pull_request.head.sha }}'] - Version/tag for the deployment + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies before deployment + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {steps} - GitHub Actions step for Helm deployment + */ deployHelm( cluster, release, @@ -17,7 +40,10 @@ local services = import 'services.jsonnet'; ifClause=null, ttl=null, namespace='default', - version='${{ github.event.pull_request.head.sha }}' + version='${{ github.event.pull_request.head.sha }}', + fetchDependencies=false, + wait=false, + timeout=null, ):: base.action( (if title == null then if delete then 'delete-helm' else 'deploy-helm' else title), @@ -33,13 +59,32 @@ local services = import 'services.jsonnet'; atomic: 'false', token: '${{ github.token }}', version: version, + 'fetch-dependencies': (if fetchDependencies then 'true' else 'false'), + wait: (if wait then 'true' else 'false'), values: if std.isString(values) then values else std.manifestJsonMinified(values), // Accepts a string and an object due to legacy reasons. } + (if delete then { task: 'remove' } else {}) + (if useHelm3 then { helm: 'helm3' } else { helm: 'helm' }) - + (if ttl != null then { ttl: ttl } else {}), + + (if ttl != null then { ttl: ttl } else {}) + + (if timeout != null then { timeout: timeout } else {}), ifClause=ifClause, ), + /** + * Creates a Helm deployment step for production environment. + * + * @param {string} serviceName - Name of the service being deployed + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName] - Path to the Helm chart + * @param {string} [deploymentName=serviceName + '-prod'] - Helm release name + * @param {string} [ifClause=null] - Conditional expression for step execution + * @param {object} [cluster=clusters.prod] - Target cluster (defaults to production) + * @param {string} [namespace='default'] - Kubernetes namespace + * @param {string} [version='${{ github.event.pull_request.head.sha }}'] - Deployment version + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {steps} - GitHub Actions step for production deployment + */ helmDeployProd( serviceName, options={}, @@ -49,6 +94,9 @@ local services = import 'services.jsonnet'; cluster=clusters.prod, namespace='default', version='${{ github.event.pull_request.head.sha }}', + fetchDependencies=false, + wait=false, + timeout=null, ):: self.deployHelm( cluster, @@ -66,8 +114,26 @@ local services = import 'services.jsonnet'; ifClause=ifClause, namespace=namespace, version=version, + fetchDependencies=fetchDependencies, + wait=wait, + timeout=timeout ), + /** + * Creates a complete GitHub Actions job for production deployment. + * + * @param {string} serviceName - Name of the service being deployed + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName] - Path to the Helm chart + * @param {string} [deploymentName=serviceName + '-prod'] - Helm release name + * @param {string} [image=images.default_job_image] - Container image for the job + * @param {boolean} [useCredentials=false] - Whether to use Docker registry credentials + * @param {string} [environment='production'] - GitHub environment for deployment + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {jobs} - Complete GitHub Actions job for production deployment + */ helmDeployProdJob( serviceName, options={}, @@ -76,6 +142,9 @@ local services = import 'services.jsonnet'; image=images.default_job_image, useCredentials=false, environment='production', + fetchDependencies=false, + wait=false, + timeout=null, ):: base.ghJob( 'deploy-prod', @@ -84,10 +153,24 @@ local services = import 'services.jsonnet'; useCredentials=useCredentials, steps=[ misc.checkout(), - self.helmDeployProd(serviceName, options, helmPath, deploymentName), + self.helmDeployProd(serviceName, options, helmPath, deploymentName, fetchDependencies=fetchDependencies, wait=wait, timeout=timeout), ], ), + /** + * Creates a Helm deployment step for test/master environment. + * + * @param {string} serviceName - Name of the service being deployed + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName] - Path to the Helm chart + * @param {string} [deploymentName=serviceName + '-master'] - Helm release name + * @param {object} [cluster=clusters.test] - Target cluster (defaults to test) + * @param {string} [namespace='default'] - Kubernetes namespace + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {steps} - GitHub Actions step for test environment deployment + */ helmDeployTest( serviceName, options={}, @@ -95,6 +178,9 @@ local services = import 'services.jsonnet'; deploymentName=serviceName + '-master', cluster=clusters.test, namespace='default', + fetchDependencies=false, + wait=false, + timeout=null, ):: self.deployHelm( cluster, @@ -110,8 +196,25 @@ local services = import 'services.jsonnet'; useHelm3=true, title='deploy-test', namespace=namespace, + fetchDependencies=fetchDependencies, + wait=wait, + timeout=timeout ), + /** + * Creates a complete GitHub Actions job for test environment deployment. + * + * @param {string} serviceName - Name of the service being deployed + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName] - Path to the Helm chart + * @param {string} [deploymentName=serviceName + '-master'] - Helm release name + * @param {string} [image=images.default_job_image] - Container image for the job + * @param {boolean} [useCredentials=false] - Whether to use Docker registry credentials + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {jobs} - Complete GitHub Actions job for test deployment + */ helmDeployTestJob( serviceName, options={}, @@ -119,6 +222,9 @@ local services = import 'services.jsonnet'; deploymentName=serviceName + '-master', image=images.default_job_image, useCredentials=false, + fetchDependencies=false, + wait=false, + timeout=null, ):: base.ghJob( 'deploy-test', @@ -127,10 +233,26 @@ local services = import 'services.jsonnet'; useCredentials=useCredentials, steps=[ misc.checkout(), - self.helmDeployTest(serviceName, options, helmPath, deploymentName), + self.helmDeployTest(serviceName, options, helmPath, deploymentName, fetchDependencies=fetchDependencies, wait=wait, timeout=timeout), ], ), + /** + * Creates a Helm deployment step for Pull Request environment. + * + * Deploys a PR-specific instance with a 7-day TTL for testing purposes. + * + * @param {string} serviceName - Name of the service being deployed + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName] - Path to the Helm chart + * @param {string} [deploymentName=serviceName + '-pr-${{ github.event.number }}'] - PR-specific release name + * @param {object} [cluster=clusters.test] - Target cluster (defaults to test) + * @param {string} [namespace='default'] - Kubernetes namespace + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {steps} - GitHub Actions step for PR deployment with TTL + */ helmDeployPR( serviceName, options={}, @@ -138,6 +260,9 @@ local services = import 'services.jsonnet'; deploymentName=serviceName + '-pr-${{ github.event.number }}', cluster=clusters.test, namespace='default', + fetchDependencies=false, + wait=false, + timeout=null, ):: self.deployHelm( cluster, @@ -154,7 +279,25 @@ local services = import 'services.jsonnet'; title='deploy-pr', ttl='7 days', namespace=namespace, + fetchDependencies=fetchDependencies, + wait=wait, + timeout=timeout ), + + /** + * Creates a complete GitHub Actions job for PR deployment. + * + * @param {string} serviceName - Name of the service being deployed + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName] - Path to the Helm chart + * @param {string} [deploymentName=serviceName + '-pr-${{ github.event.number }}'] - PR-specific release name + * @param {string} [image=images.default_job_image] - Container image for the job + * @param {boolean} [useCredentials=false] - Whether to use Docker registry credentials + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {jobs} - Complete GitHub Actions job for PR deployment + */ helmDeployPRJob( serviceName, options={}, @@ -162,6 +305,9 @@ local services = import 'services.jsonnet'; deploymentName=serviceName + '-pr-${{ github.event.number }}', image=images.default_job_image, useCredentials=false, + fetchDependencies=false, + wait=false, + timeout=null, ):: base.ghJob( 'deploy-pr', @@ -169,10 +315,24 @@ local services = import 'services.jsonnet'; useCredentials=useCredentials, steps=[ misc.checkout(), - self.helmDeployPR(serviceName, options, helmPath, deploymentName), + self.helmDeployPR(serviceName, options, helmPath, deploymentName, fetchDependencies=fetchDependencies, wait=wait, timeout=timeout), ], ), + /** + * Creates a Helm step to delete a PR deployment. + * + * @param {string} serviceName - Name of the service being deleted + * @param {object} [options={}] - Helm values (usually not needed for deletion) + * @param {string} [helmPath='./helm/' + serviceName] - Path to the Helm chart + * @param {string} [deploymentName=serviceName + '-pr-${{ github.event.number }}'] - PR-specific release name to delete + * @param {object} [cluster=clusters.test] - Target cluster (defaults to test) + * @param {string} [namespace='default'] - Kubernetes namespace + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {steps} - GitHub Actions step for PR deletion + */ helmDeletePr( serviceName, options={}, @@ -180,6 +340,9 @@ local services = import 'services.jsonnet'; deploymentName=serviceName + '-pr-${{ github.event.number }}', cluster=clusters.test, namespace='default', + fetchDependencies=false, + wait=false, + timeout=null, ):: self.deployHelm( cluster, @@ -190,13 +353,33 @@ local services = import 'services.jsonnet'; delete=true, title='delete-pr', namespace=namespace, + wait=wait, + timeout=timeout ), + + /** + * Creates a complete GitHub Actions job for PR cleanup, including database deletion. + * + * @param {string} serviceName - Name of the service being cleaned up + * @param {object} [options={}] - Helm values (usually not needed for deletion) + * @param {string} [helmPath='./helm/' + serviceName] - Path to the Helm chart + * @param {string} [deploymentName=serviceName + '-pr-${{ github.event.number }}'] - PR-specific release name to delete + * @param {object} [mysqlDeleteOptions={ enabled: false }] - MySQL database cleanup options + * @param {boolean} mysqlDeleteOptions.enabled - Whether to delete associated PR database + * @param {boolean} [fetchDependencies=fetchDependencies] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {jobs} - Complete GitHub Actions job for PR cleanup + */ helmDeletePRJob( serviceName, options={}, helmPath='./helm/' + serviceName, deploymentName=serviceName + '-pr-${{ github.event.number }}', mysqlDeleteOptions={ enabled: false }, + fetchDependencies=fetchDependencies, + wait=false, + timeout=null, ):: base.ghJob( 'helm-delete-pr', @@ -204,22 +387,37 @@ local services = import 'services.jsonnet'; useCredentials=false, steps=[ misc.checkout(), - self.helmDeletePr(serviceName, options, helmPath, deploymentName), + self.helmDeletePr(serviceName, options, helmPath, deploymentName, fetchDependencies=fetchDependencies, wait=wait, timeout=timeout), ] + (if mysqlDeleteOptions.enabled then [databases.deleteDatabase(mysqlDeleteOptions)] else []), services=(if mysqlDeleteOptions.enabled then { 'cloudsql-proxy': services.cloudsql_proxy_service(mysqlDeleteOptions.database) } else null), ), + /** + * Creates a complete pipeline that automatically cleans up PR deployments when PRs are closed. + * + * @param {string} serviceName - Name of the service being cleaned up + * @param {object} [options={}] - Helm values (usually not needed for deletion) + * @param {string} [helmPath='./helm/' + serviceName] - Path to the Helm chart + * @param {string} [deploymentName=serviceName + '-pr-${{ github.event.number }}'] - PR-specific release name to delete + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {workflows} - Complete GitHub Actions pipeline for automatic PR cleanup + */ helmDeletePRPipeline( serviceName, options={}, helmPath='./helm/' + serviceName, deploymentName=serviceName + '-pr-${{ github.event.number }}', + fetchDependencies=false, + wait=false, + timeout=null, ):: base.pipeline( 'close-pr', [ - self.helmDeletePRJob(serviceName, options, helmPath, deploymentName), + self.helmDeletePRJob(serviceName, options, helmPath, deploymentName, fetchDependencies=fetchDependencies, wait=wait, timeout=timeout), ], event={ pull_request: { @@ -228,12 +426,30 @@ local services = import 'services.jsonnet'; } ), + /** + * Creates a Helm deployment step for canary releases in production. + * + * Deploys a single replica canary instance for gradual rollout testing. + * + * @param {string} serviceName - Name of the service being deployed + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName + '-canary'] - Path to the canary Helm chart + * @param {string} [deploymentName=serviceName + '-canary'] - Canary release name + * @param {string} [ifClause=null] - Conditional expression for step execution + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {steps} - GitHub Actions step for canary deployment + */ helmDeployCanary( serviceName, options={}, helmPath='./helm/' + serviceName + '-canary', deploymentName=serviceName + '-canary', ifClause=null, + fetchDependencies=false, + wait=false, + timeout=null, ):: self.deployHelm( clusters.prod, @@ -250,7 +466,25 @@ local services = import 'services.jsonnet'; useHelm3=true, title='deploy-canary', ifClause=ifClause, + fetchDependencies=fetchDependencies, + wait=wait, + timeout=timeout ), + + /** + * Creates a complete GitHub Actions job for canary deployment. + * + * @param {string} serviceName - Name of the service being deployed + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName + '-canary'] - Path to the canary Helm chart + * @param {string} [deploymentName=serviceName + '-canary'] - Canary release name + * @param {string} [image=images.default_job_image] - Container image for the job + * @param {boolean} [useCredentials=false] - Whether to use Docker registry credentials + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {jobs} - Complete GitHub Actions job for canary deployment + */ helmDeployCanaryJob( serviceName, options={}, @@ -258,6 +492,9 @@ local services = import 'services.jsonnet'; deploymentName=serviceName + '-canary', image=images.default_job_image, useCredentials=false, + fetchDependencies=false, + wait=false, + timeout=null, ):: base.ghJob( 'deploy-canary', @@ -266,16 +503,36 @@ local services = import 'services.jsonnet'; ifClause="${{ github.event.deployment.environment == 'canary' }}", steps=[ misc.checkout(), - self.helmDeployCanary(serviceName, options, helmPath, deploymentName), + self.helmDeployCanary( + serviceName, options, helmPath, deploymentName, fetchDependencies=fetchDependencies, wait=wait, timeout=timeout, + ), ], ), + /** + * Creates a Helm step to scale down (kill) a canary deployment. + * + * Sets replica count to 0 to stop the canary without removing the release. + * + * @param {string} serviceName - Name of the service canary to scale down + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName + '-canary'] - Path to the canary Helm chart + * @param {string} [deploymentName=serviceName + '-canary'] - Canary release name + * @param {string} [ifClause=null] - Conditional expression for step execution + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {steps} - GitHub Actions step to scale down canary deployment + */ helmKillCanary( serviceName, options={}, helmPath='./helm/' + serviceName + '-canary', deploymentName=serviceName + '-canary', ifClause=null, + fetchDependencies=false, + wait=false, + timeout=null, ):: self.deployHelm( clusters.prod, @@ -292,12 +549,33 @@ local services = import 'services.jsonnet'; useHelm3=true, title='kill-canary', ifClause=ifClause, + fetchDependencies=fetchDependencies, + wait=wait, + timeout=timeout ), + + /** + * Creates a complete GitHub Actions job to scale down canary deployments. + * + * Triggers when 'kill-canary' or 'production' deployment environments are used. + * + * @param {string} serviceName - Name of the service canary to scale down + * @param {object} [options={}] - Additional Helm values to merge with defaults + * @param {string} [helmPath='./helm/' + serviceName + '-canary'] - Path to the canary Helm chart + * @param {string} [deploymentName=serviceName + '-canary'] - Canary release name + * @param {boolean} [fetchDependencies=false] - Whether to fetch Helm dependencies + * @param {boolean} [wait=false] - Whether to wait for resources to be ready before marking the release as successful + * @param {string} [timeout=null] - Time to wait for resources (pods) to become ready (e.g., '5m') + * @returns {jobs} - Complete GitHub Actions job to kill canary deployment + */ helmKillCanaryJob( serviceName, options={}, helmPath='./helm/' + serviceName + '-canary', deploymentName=serviceName + '-canary', + fetchDependencies=false, + wait=false, + timeout=null, ):: base.ghJob( 'kill-canary', @@ -306,7 +584,7 @@ local services = import 'services.jsonnet'; useCredentials=false, steps=[ misc.checkout(), - self.helmKillCanary(serviceName, options, helmPath, deploymentName), + self.helmKillCanary(serviceName, options, helmPath, deploymentName, fetchDependencies=fetchDependencies, wait=wait, timeout=timeout), ], ), } diff --git a/.github/jsonnet/images.jsonnet b/.github/jsonnet/images.jsonnet index c0d4e024..ddef17e0 100644 --- a/.github/jsonnet/images.jsonnet +++ b/.github/jsonnet/images.jsonnet @@ -1,17 +1,25 @@ +/** + * Docker Image Configuration + * + * This module defines standardized Docker images used across GitHub Actions workflows. + * It centralizes image references to ensure consistency and simplify updates. + * Images are primarily hosted on Google Cloud registries (GCR and Artifact Registry). + */ { jsonnet_bin_image: 'europe-docker.pkg.dev/unicorn-985/private-images/docker-images_jsonnet:v1', - helm_action_image: 'docker://europe-docker.pkg.dev/gynzy-test-project/public-images/helm-action:v2', - mysql_action_image: 'docker://europe-docker.pkg.dev/unicorn-985/public-images/docker-images_mysql-cloner-action:v1', - docker_action_image: 'docker://europe-docker.pkg.dev/gynzy-test-project/public-images/push-to-gcr-github-action:v1', + helm_action_image: 'docker://europe-docker.pkg.dev/unicorn-985/public-images/helm-action:v4', + mysql_action_image: 'docker://europe-docker.pkg.dev/unicorn-985/public-images/docker-images_mysql-cloner-action:v2', + docker_action_image: 'docker://europe-docker.pkg.dev/unicorn-985/public-images/push-to-gcr-github-action:v1', default_job_image: 'mirror.gcr.io/alpine:3.20.0', default_mysql8_image: 'europe-docker.pkg.dev/unicorn-985/private-images/docker-images_mysql8_utf8mb4:v1', + default_mysql84_image: 'europe-docker.pkg.dev/unicorn-985/private-images/docker-images_mysql84_utf8mb4:v1', default_cloudsql_image: 'europe-docker.pkg.dev/unicorn-985/private-images/docker-images_cloudsql-sidecar:v1', default_redis_image: 'mirror.gcr.io/redis:5.0.6', - default_unicorns_image: 'mirror.gcr.io/node:18.15', + default_unicorns_image: 'mirror.gcr.io/node:22.16', default_pubsub_image: 'mirror.gcr.io/messagebird/gcloud-pubsub-emulator:latest', - default_mongodb_image: 'europe-docker.pkg.dev/unicorn-985/private-images/docker-images_mongo6-replicated:v1', + default_mongodb_image: 'europe-docker.pkg.dev/unicorn-985/private-images/docker-images_mongo8-replicated:v1', mongo_job_image: 'europe-docker.pkg.dev/unicorn-985/public-images/docker-images_mongo-cloner-job:v1', default_python_image: 'mirror.gcr.io/python:3.12.1', - default_pulumi_node_image: 'mirror.gcr.io/node:18', + default_pulumi_node_image: 'mirror.gcr.io/node:22', job_poster_image: 'europe-docker.pkg.dev/unicorn-985/public-images/docker-images_job-poster:v2', } diff --git a/.github/jsonnet/index.jsonnet b/.github/jsonnet/index.jsonnet index 2a171541..b9b901fc 100644 --- a/.github/jsonnet/index.jsonnet +++ b/.github/jsonnet/index.jsonnet @@ -17,4 +17,5 @@ (import 'complete-workflows.jsonnet') + { pnpm: import 'pnpm.jsonnet' } + { cache: import 'cache.jsonnet' } + -{ buckets: import 'buckets.jsonnet' } +{ buckets: import 'buckets.jsonnet' } + +{ onePassword: import 'onepassword.jsonnet' } diff --git a/.github/jsonnet/misc.jsonnet b/.github/jsonnet/misc.jsonnet index c00512c8..308cae06 100644 --- a/.github/jsonnet/misc.jsonnet +++ b/.github/jsonnet/misc.jsonnet @@ -2,6 +2,18 @@ local base = import 'base.jsonnet'; local images = import 'images.jsonnet'; { + /** + * Creates steps to check out repository code with intelligent SSH/HTTPS fallback. + * + * First attempts SSH checkout (if enabled), then falls back to HTTPS if SSH fails. + * Automatically installs git/ssh binaries if needed using system (apt/apk) package manager. + * + * @param {string} [ifClause=null] - Conditional expression for step execution + * @param {boolean} [fullClone=false] - Whether to perform full git clone (fetch-depth: 0) + * @param {string} [ref=null] - Specific git ref/branch/tag to checkout + * @param {boolean} [preferSshClone=true] - Whether to attempt SSH clone first + * @returns {steps} - GitHub Actions steps for repository checkout + */ checkout(ifClause=null, fullClone=false, ref=null, preferSshClone=true):: local with = (if fullClone then { 'fetch-depth': 0 } else {}) + (if ref != null then { ref: ref } else {}); local sshSteps = (if (preferSshClone) then @@ -70,6 +82,14 @@ local images = import 'images.jsonnet'; else self.checkoutWithoutSshMagic(ifClause, fullClone, ref), + /** + * Creates a simple repository checkout without SSH fallback logic. + * + * @param {string} [ifClause=null] - Conditional expression for step execution + * @param {boolean} [fullClone=false] - Whether to perform full git clone (fetch-depth: 0) + * @param {string} [ref=null] - Specific git ref/branch/tag to checkout + * @returns {steps} - GitHub Actions steps for basic repository checkout + */ checkoutWithoutSshMagic(ifClause=null, fullClone=false, ref=null):: local with = (if fullClone then { 'fetch-depth': 0 } else {}) + (if ref != null then { ref: ref } else {}); base.action( @@ -80,19 +100,45 @@ local images = import 'images.jsonnet'; ) + base.step('git safe directory', "command -v git && git config --global --add safe.directory '*' || true"), + /** + * Creates a linting step for a specific service using ESLint. + * + * @param {string} service - Name of the service to lint + * @returns {steps} - GitHub Actions step to run ESLint on service files + */ lint(service):: base.step('lint-' + service, './node_modules/.bin/eslint "./packages/' + service + '/{app,lib,tests,config,addon}/**/*.js" --quiet'), + /** + * Creates a step to lint all code using yarn lint command. + * + * @returns {steps} - GitHub Actions step to run yarn lint + */ lintAll():: base.step('lint', 'yarn lint'), + /** + * Creates a step to verify good-fences architectural boundaries. + * + * @returns {steps} - GitHub Actions step to run good-fences verification + */ verifyGoodFences():: base.step('verify-good-fences', 'yarn run gf'), + /** + * Creates a step to run improved npm audit for security vulnerabilities. + * + * @returns {steps} - GitHub Actions step to run yarn improved-audit + */ improvedAudit():: base.step('audit', 'yarn improved-audit'), + /** + * Creates a complete pipeline to verify jsonnet workflow generation. + * + * @returns {workflows} - GitHub Actions pipeline that validates jsonnet workflows on pull requests + */ verifyJsonnetWorkflow():: base.pipeline( 'misc', @@ -102,7 +148,14 @@ local images = import 'images.jsonnet'; event='pull_request', ), - verifyJsonnet(fetch_upstream=true, runsOn=null):: + /** + * Creates a GitHub Actions job to verify that jsonnet files generate correct workflows. + * + * @param {boolean} [fetch_upstream=false] - Whether to fetch the latest lib-jsonnet from upstream (deprecated) + * @param {string} [runsOn=null] - Runner type to use for the job + * @returns {jobs} - GitHub Actions job that validates jsonnet workflow generation + */ + verifyJsonnet(fetch_upstream=false, runsOn=null):: base.ghJob( 'verify-jsonnet-gh-actions', runsOn=runsOn, @@ -118,19 +171,34 @@ local images = import 'images.jsonnet'; + [ base.step('generate-workflows', 'jsonnet -m .github/workflows/ -S .github.jsonnet;'), base.step('git workaround', 'git config --global --add safe.directory $PWD'), - base.step('check-jsonnet-diff', 'git diff --exit-code'), base.step( - 'possible-causes-for-error', - 'echo "Possible causes: \n' + - '1. You updated jsonnet files, but did not regenerate the workflows. \n' + - "To fix, run 'yarn github:generate' locally and commit the changes. If this helps, check if your pre-commit hooks work.\n" + - '2. You used the wrong jsonnet binary. In this case, the newlines at the end of the files differ.\n' + - 'To fix, install the go binary. On mac, run \'brew uninstall jsonnet && brew install jsonnet-go\'"', - ifClause='failure()', + 'check-jsonnet-diff', ||| + echo "If this step fails, look at the end of the logs for possible causes"; + git diff --exit-code && exit 0; + echo "Error: mismatch between jsonnet <-> github workflows"; + echo "Possible reasons:"; + echo " - You updated jsonnet files, but did not regenerate the workflows."; + echo " To regenerate jsonnet run: 'rm .github/workflows/*; jsonnet -m .github/workflows/ -S .github.jsonnet'"; + echo " - You used the wrong jsonnet binary. In this case, the newlines at the end of the files differ."; + echo " To fix, install the go binary. On mac, run 'brew uninstall jsonnet && brew install go-jsonnet'"; + exit 1; + ||| ), ], ), + /** + * Creates a pipeline to automatically update PR descriptions and titles based on templates. + * + * @param {string} bodyTemplate - Template for the PR body content + * @param {string} [titleTemplate=''] - Template for the PR title + * @param {string} [baseBranchRegex=null] - Regex to match base branch names + * @param {string} [headBranchRegex=null] - Regex to match head branch names + * @param {string} [bodyUpdateAction='suffix'] - How to update the body ('suffix', 'prefix', 'replace') + * @param {string} [titleUpdateAction='prefix'] - How to update the title ('suffix', 'prefix', 'replace') + * @param {object} [otherOptions={}] - Additional options to pass to the action + * @returns {workflows} - GitHub Actions pipeline for automatic PR description updates + */ updatePRDescriptionPipeline( bodyTemplate, titleTemplate='', @@ -171,16 +239,15 @@ local images = import 'images.jsonnet'; }, ), - // Create a markdown table. - // - // The headers array and each row array must have the same length. - // - // Parameters: - // headers: a list of headers for the table - // rows: a list of rows, where each row is a list of values - // - // Returns: - // a markdown table as a string + /** + * Generates a markdown table from headers and rows data. + * + * The headers array and each row array must have the same length. + * + * @param {array} headers - Array of column header strings + * @param {array} rows - Array of row data (each row is an array of cell values) + * @returns {string} - Formatted markdown table + */ markdownTable(headers, rows):: local renderLine = function(line) '| ' + std.join(' | ', line) + ' |\n'; local renderedHeader = renderLine(headers) + renderLine(std.map(function(x) '---', headers)); @@ -193,58 +260,54 @@ local images = import 'images.jsonnet'; ); renderedHeader + std.join('', renderedRows), - // Create a collapsable markdown section. - // - // Parameters: - // title: the title of the section - // content: the content of the section - // - // Returns: - // a collapsable markdown section as a string + /** + * Creates a collapsible markdown section using HTML details/summary tags. + * + * @param {string} title - Title text for the collapsible section + * @param {string} content - Content to display when expanded + * @returns {string} - HTML details element with markdown content + */ markdownCollapsable(title, content):: '
\n' + '' + title + '\n\n' + content + '\n' + '
\n', - // Create a markdown table with preview links. - // - // Parameters: - // environments: a list of environment names - // apps: a list of apps, where each app is an object with the following fields: - // - name: the name of the app - // - linkToLinear: a list of environment names for which to create a preview link in Linear - // - environment names: the environment links - // - the key is the environment name - // - the value is the link, or an object with the link name as the key and the link as the value. This is useful for multiple links per environment. - // - // Returns: - // a markdown table with preview links as a string - // - // Example: - // misc.previewLinksTable( - // ['pr', 'acceptance', 'test', 'prod'], - // [ - // { - // name: 'app1', - // pr: 'https://pr-link', - // acceptance: 'https://acceptance-link', - // test: 'https://test-link', - // prod: 'https://prod-link', - // }, - // { - // name: 'app2', - // linkToLinear: ['pr', 'acceptance'], - // pr: 'https://pr-link', - // acceptance: 'https://acceptance-link', - // test: 'https://test-link', - // prod: { - // prod-nl: 'https://prod-link/nl', - // prod-en: 'https://prod-link/en', - // }, - // }, - // ], - // ) + /** + * Creates a markdown table with preview links for different environments. + * + * @param {array} environments - Array of environment names + * @param {array} apps - Array of app objects with the following fields: + * - name: The name of the app + * - linkToLinear: Array of environment names for which to create preview links in Linear + * - [environment]: The environment links (key is environment name, value is link or object with multiple links) + * @returns {string} - Markdown table with preview links and collapsible Linear links section + * + * @example + * misc.previewLinksTable( + * ['pr', 'acceptance', 'test', 'prod'], + * [ + * { + * name: 'app1', + * pr: 'https://pr-link', + * acceptance: 'https://acceptance-link', + * test: 'https://test-link', + * prod: 'https://prod-link', + * }, + * { + * name: 'app2', + * linkToLinear: ['pr', 'acceptance'], + * pr: 'https://pr-link', + * acceptance: 'https://acceptance-link', + * test: 'https://test-link', + * prod: { + * 'prod-nl': 'https://prod-link/nl', + * 'prod-en': 'https://prod-link/en', + * }, + * }, + * ], + * ) + */ previewLinksTable(environments, apps):: local headers = ['Application'] + environments; local rows = std.map( @@ -283,13 +346,38 @@ local images = import 'images.jsonnet'; ); self.markdownTable(headers, rows) + self.markdownCollapsable('Linear links', std.join('\n', linearLinks)), + /** + * Creates a shortened service name by removing common prefixes. + * + * @param {string} name - Full service name + * @returns {string} - Shortened service name without 'service-' prefix + */ shortServiceName(name):: assert name != null; std.strReplace(std.strReplace(name, 'gynzy-', ''), 'unicorn-', ''), + /** + * Creates a reference to a GitHub repository secret. + * + * @param {string} secretName - Name of the secret in GitHub repository settings + * @returns {string} - GitHub Actions expression to access the secret + */ secret(secretName):: '${{ secrets.' + secretName + ' }}', + /** + * Creates a step to poll a URL until it returns expected content. + * + * Useful for verifying that deployments are healthy and serving correct content. + * + * @param {string} url - URL to poll for content verification + * @param {string} expectedContent - Content expected to be found in the response + * @param {string} [name='verify-deploy'] - Name of the verification step + * @param {string} [attempts='100'] - Maximum number of polling attempts + * @param {string} [interval='2000'] - Interval between attempts in milliseconds + * @param {string} [ifClause=null] - Conditional expression for step execution + * @returns {steps} - GitHub Actions step that polls URL until content matches + */ pollUrlForContent(url, expectedContent, name='verify-deploy', attempts='100', interval='2000', ifClause=null):: base.action( name, @@ -303,6 +391,14 @@ local images = import 'images.jsonnet'; ifClause=ifClause, ), + /** + * Creates a scheduled pipeline to automatically clean up old branches. + * + * Runs weekly on Monday at 12:00 UTC to remove branches older than 3 months. + * + * @param {string} [protectedBranchRegex='^(main|master|gynzy|upstream)$'] - Regex pattern for branches to protect from deletion + * @returns {workflows} - GitHub Actions pipeline scheduled to clean up old branches + */ cleanupOldBranchesPipelineCron(protectedBranchRegex='^(main|master|gynzy|upstream)$'):: base.pipeline( 'purge-old-branches', @@ -310,12 +406,13 @@ local images = import 'images.jsonnet'; base.ghJob( 'purge-old-branches', useCredentials=false, + image=null, + runsOn='ubuntu-latest', steps=[ - base.step('setup', 'apk add git bash'), - self.checkout(), + base.action('checkout', 'actions/checkout@v4'), base.action( 'Run delete-old-branches-action', - 'beatlabs/delete-old-branches-action@6e94df089372a619c01ae2c2f666bf474f890911', + 'beatlabs/delete-old-branches-action@4eeeb8740ff8b3cb310296ddd6b43c3387734588', with={ repo_token: '${{ github.token }}', date: '3 months ago', @@ -337,34 +434,31 @@ local images = import 'images.jsonnet'; }, ), - // Test if the changed files match the given glob patterns. - // Can test for multiple pattern groups, and sets multiple outputs. - // - // Parameters: - // changedFiles: a map of grouped glob patterns to test against. - // The map key is the name of the group. - // The map value is a list of glob patterns (as string, can use * and **) to test against. - // - // Outputs: - // steps.changes.outputs.: true if the group matched, false otherwise - // - // Permissions: - // Requires the 'pull-requests': 'read' permission - // - // Example: - // misc.testForChangedFiles({ - // 'app': ['packages/*/app/**/*', 'package.json'], - // 'lib': ['packages/*/lib/**/*'], - // }) - // - // This will set the following outputs: - // - steps.changes.outputs.app: true if any of the changed files match the patterns in the 'app' group - // - steps.changes.outputs.lib: true if any of the changed files match the patterns in the 'lib' group - // - // These can be tested as in an if clause as follows: - // if: steps.changes.outputs.app == 'true' - // - // See https://github.com/dorny/paths-filter for more information. + /** + * Creates a step to test if changed files match the given glob patterns. + * + * Can test for multiple pattern groups and sets multiple outputs. + * Requires the 'pull-requests': 'read' permission. + * + * @param {object} changedFiles - Map of grouped glob patterns to test against + * - Key: Name of the group + * - Value: Array of glob patterns (can use * and **) to test against + * @param {string} [headRef=null] - Head commit reference (defaults to current) + * @param {string} [baseRef=null] - Base commit reference (defaults to target branch) + * @returns {steps} - GitHub Actions step that sets outputs: steps.changes.outputs. + * + * @example + * misc.testForChangedFiles({ + * 'app': ['packages/star/app/doublestar/star', 'package.json'], + * 'lib': ['packages/star/lib/doublestar/star'], + * }) + * + * // This sets outputs that can be tested in if clauses: + * // if: steps.changes.outputs.app == 'true' + * + * // Note: Replace 'star' with '*' and 'doublestar' with '**' in actual usage + * // See https://github.com/dorny/paths-filter for more information. + */ testForChangedFiles(changedFiles, headRef=null, baseRef=null):: [ base.step('git safe directory', 'git config --global --add safe.directory $PWD'), @@ -383,15 +477,15 @@ local images = import 'images.jsonnet'; ), ], - // Wait for the given jobs to finish. - // Exits successfully if all jobs are successful, otherwise exits with an error. - // - // Parameters: - // name: the name of the github job - // jobs: a list of job names to wait for - // - // Returns: - // a job that waits for the given jobs to finish + /** + * Creates a job that waits for given jobs to finish. + * + * Exits successfully if all jobs are successful, otherwise exits with an error. + * + * @param {string} name - The name of the GitHub job + * @param {array} jobs - Array of job objects to wait for + * @returns {jobs} - GitHub Actions job that waits for the given jobs to finish + */ awaitJob(name, jobs):: local dependingJobs = std.flatMap( function(job) @@ -420,20 +514,22 @@ local images = import 'images.jsonnet'; ), ], - // Post a job to a kubernetes cluster - // - // Parameters: - // name: the name of the github job - // jobName: the name of the job to be posted - // cluster: the cluster to post the job to. This should be an object from the clusters module - // image: the image to use for the job - // environment: a map of environment variables to pass to the job - // command: the command to run in the job (optional) - // ifClause: the condition under which to run the job (optional) - // memory: the memory requested for the job (optional) - // memoryLimit: the memory limit for the job (optional) - // cpu: the cpu requested for the job (optional) - // cpuLimit: the cpu limit for the job (optional) + /** + * Creates a Kubernetes job that runs a container with specified resources. + * + * @param {string} name - Display name for the GitHub Actions step + * @param {string} jobName - Kubernetes job name (must be unique) + * @param {object} cluster - Target Kubernetes cluster configuration + * @param {string} image - Docker image to run in the job + * @param {object} environment - Environment variables for the container + * @param {string} [command=''] - Command to run in the container + * @param {string} [ifClause=null] - Conditional expression for step execution + * @param {string} [memory='100Mi'] - Memory request for the container + * @param {string} [memoryLimit='100Mi'] - Memory limit for the container + * @param {string} [cpu='100m'] - CPU request for the container + * @param {string} [cpuLimit='100m'] - CPU limit for the container + * @returns {steps} - GitHub Actions step that creates and monitors Kubernetes job + */ postJob(name, jobName, cluster, image, environment, command='', ifClause=null, memory='100Mi', memoryLimit='100Mi', cpu='100m', cpuLimit='100m'):: base.action( name, @@ -457,10 +553,14 @@ local images = import 'images.jsonnet'; } + environment, ), - // Auto approve PRs made by specific users. Usefull for renovate PRs. - // - // Parameters: - // users: a list of users to auto approve PRs for. Defaults to gynzy-virko. + /** + * Creates a pipeline to auto-approve PRs made by specific users. + * + * Useful for automatically approving renovate PRs or other trusted automation. + * + * @param {array} [users=['gynzy-virko']] - Array of usernames to auto-approve PRs for + * @returns {workflows} - GitHub Actions pipeline that auto-approves PRs from specified users + */ autoApprovePRs(users=['gynzy-virko']):: base.pipeline( 'auto-approve-prs', @@ -484,4 +584,69 @@ local images = import 'images.jsonnet'; pull_request: { types: ['opened'] }, }, ), + + /** + * Creates a step to obtain a mutex lock for mutual exclusion within a repository. + * + * Most commonly used to gate Pulumi since it does its own locking but does not wait for the lock. + * + * @param {string} [lockName='lock'] - The name of the lock (branch used for locking) + * @param {string} [lockTimeout='1200'] - How long to wait for the lock in seconds (defaults to 20 minutes) + * @returns {steps} - GitHub Actions step that acquires a mutex lock + */ + getLockStep( + lockName='lock', + lockTimeout="1200", // seconds + ):: + base.action( + 'get mutex lock', + 'gynzy/gh-action-mutex@main', + with={ + branch: lockName, + timeout: lockTimeout, + }, + ), + + /** + * Creates a step to install the 1Password CLI tool. + * + * @param {string} [version='v2.31.1'] - Version of the 1Password CLI to install + * @returns {steps} - GitHub Actions step that installs 1Password CLI + */ + install1Password( + version='v2.31.1', + ):: + base.step( + 'Install 1Password CLI', + ||| + OP_INSTALL_DIR="$(mktemp -d)" + curl -sSfLo op.zip "https://cache.agilebits.com/dist/1P/op2/pkg/${OP_CLI_VERSION}/op_linux_${ARCH}_${OP_CLI_VERSION}.zip" + unzip -od "$OP_INSTALL_DIR" op.zip && rm op.zip + echo "$OP_INSTALL_DIR" >> "$GITHUB_PATH" + |||, + env = { + OP_CLI_VERSION: version, + ARCH: 'amd64' + } + ), + + /** + * Creates a step to configure Google Cloud authentication. + * Also configures Docker registry access. + * + * @param {string} secret - Google Cloud service account JSON secret + * @returns {steps} - Array containing a single step object + */ + configureGoogleAuth(secret):: + base.step( + 'activate google service account', + run= + ||| + printf '%s' "${SERVICE_JSON}" > gce.json; + gcloud auth activate-service-account --key-file=gce.json; + gcloud --quiet auth configure-docker; + rm gce.json + |||, + env={ SERVICE_JSON: secret }, + ), } diff --git a/.github/jsonnet/mongo.jsonnet b/.github/jsonnet/mongo.jsonnet index 862ef2c5..4b83a1e6 100644 --- a/.github/jsonnet/mongo.jsonnet +++ b/.github/jsonnet/mongo.jsonnet @@ -27,7 +27,12 @@ local prodProjectSettings = { }; { - // List of available MongoDB clusters. + /** + * List of available MongoDB clusters. + * + * Each cluster contains configuration for connecting to MongoDB Atlas instances + * across different environments (test, production) and services. + */ mongo_clusters: { test: testProjectSettings { name: 'test', @@ -70,34 +75,34 @@ local prodProjectSettings = { // TODO: remove mongo_servers: self.mongo_clusters, - // Generate a deeplink to the Atlas UI for a given cluster and database. - // - // If the database is null, the link will point to the cluster overview. - // Otherwise, it will point to the database explorer. - // - // Parameters: - // cluster: The MongoDB cluster. One of the objects from the mongo_servers list. - // database: The name of the database (optional). - // - // Returns: - // string The deeplink to the Atlas UI. + /** + * Generate a deeplink to the Atlas UI for a given cluster and database. + * + * If the database is null, the link will point to the cluster overview. + * Otherwise, it will point to the database explorer. + * + * @param {object} mongoCluster - The MongoDB cluster. One of the objects from the mongo_clusters list + * @param {string} [database=null] - The name of the database (optional) + * @returns {string} - The deeplink to the Atlas UI + */ atlasDeeplink(mongoCluster, database=null):: if database == null || mongoCluster.clusterId == null then 'https://cloud.mongodb.com/v2/' + mongoCluster.projectId + '#clusters/detail/' + mongoCluster.name else 'https://cloud.mongodb.com/v2/' + mongoCluster.projectId + '#/metrics/replicaSet/' + mongoCluster.clusterId + '/explorer/' + database, - // Copy a MongoDB database to a new database. - // It does this by posting a job that runs the mongo-action image with the clone task. - // - // Parameters: - // service: The name of the service. - // mongoCluster: The MongoDB cluster to connect to. One of the objects from the mongo_servers list. - // testDatabase: The name of the source database. - // prDatabase: The name of the PR database. - // - // Returns: - // The job definition. + /** + * Copy a MongoDB database to a new database. + * + * It does this by posting a job that runs the mongo-action image with the clone task. + * The target database name must contain '_pr_' for safety. + * + * @param {string} service - The name of the service + * @param {object} mongoCluster - The MongoDB cluster to connect to. One of the objects from the mongo_clusters list + * @param {string} testDatabase - The name of the source database + * @param {string} prDatabase - The name of the PR database (must contain '_pr_') + * @returns {steps} - The job definition for copying the database + */ copyMongoDatabase(service, mongoCluster, testDatabase, prDatabase):: assert std.length(std.findSubstr('_pr_', prDatabase)) > 0; // target db gets deleted. must contain _pr_ @@ -124,16 +129,17 @@ local prodProjectSettings = { cpuLimit='1', ), - // Delete a MongoDB PR database. - // It does this by posting a job that runs the mongo-action image with the delete task. - // - // Parameters: - // service: The name of the service. - // mongoCluster: The MongoDB cluster to connect to. One of the objects from the mongo_servers list. - // prDatabase: The name of the PR database. - // - // Returns: - // The job definition. + /** + * Delete a MongoDB PR database. + * + * It does this by posting a job that runs the mongo-action image with the delete task. + * The target database name must contain '_pr_' for safety. + * + * @param {string} service - The name of the service + * @param {object} mongoCluster - The MongoDB cluster to connect to. One of the objects from the mongo_clusters list + * @param {string} prDatabase - The name of the PR database to delete (must contain '_pr_') + * @returns {steps} - The job definition for deleting the database + */ deleteMongoPrDatabase(service, mongoCluster, prDatabase):: assert std.length(std.findSubstr('_pr_', prDatabase)) > 0; // target db gets deleted. must contain _pr_ @@ -152,17 +158,17 @@ local prodProjectSettings = { JOB_REQUEST_MEM_LIMIT: '200Mi', }, ), - // Sync the indexes of a MongoDB database with the current codebase. - // - // Parameters: - // service: The name of the service. - // image: The name of the Docker image to use. - // mongoCluster: The MongoDB cluster to connect to. One of the objects from the mongo_servers list. - // database: The name of the database. - // ifClause: The condition to run the job. - // - // Returns: - // The job definition. + + /** + * Sync the indexes of a MongoDB database with the current codebase. + * + * @param {string} service - The name of the service + * @param {string} image - The name of the Docker image to use + * @param {object} mongoCluster - The MongoDB cluster to connect to. One of the objects from the mongo_clusters list + * @param {string} database - The name of the database + * @param {string} [ifClause=null] - The condition to run the job + * @returns {steps} - The job definition for syncing database indexes + */ mongoSyncIndexes(service, image, mongoCluster, database, ifClause=null):: misc.postJob( name='sync-mongo-indexes-' + mongoCluster.name + '-' + database, @@ -185,17 +191,17 @@ local prodProjectSettings = { command='docker/mongo-sync-indexes.sh', ), - // Generate a diff of the indexes of a MongoDB database and the currect codebase. - // The diff is posted as a comment on the pull request. - // - // Parameters: - // service: The name of the service. - // image: The name of the Docker image to use. - // mongoCluster: The MongoDB cluster to connect to. One of the objects from the mongo_servers list. - // database: The name of the database. - // - // Returns: - // The job definition. + /** + * Generate a diff of the indexes of a MongoDB database and the current codebase. + * + * The diff is posted as a comment on the pull request. + * + * @param {string} service - The name of the service + * @param {string} image - The name of the Docker image to use + * @param {object} mongoCluster - The MongoDB cluster to connect to. One of the objects from the mongo_clusters list + * @param {string} database - The name of the database + * @returns {steps} - The job definition for generating database index diffs + */ mongoDiffIndexes(service, image, mongoCluster, database):: local mongoDBLink = self.atlasDeeplink(mongoCluster, database); diff --git a/.github/jsonnet/newrelic.jsonnet b/.github/jsonnet/newrelic.jsonnet index cae2c366..6e7bb4ff 100644 --- a/.github/jsonnet/newrelic.jsonnet +++ b/.github/jsonnet/newrelic.jsonnet @@ -4,15 +4,27 @@ local misc = import 'misc.jsonnet'; local yarn = import 'yarn.jsonnet'; { + /** + * Creates a GitHub Actions job to post deployment information to New Relic. + * + * @param {array} apps - Array of application objects containing deployment information + * @param {string} [cacheName=null] - Name of the cache to use for yarn dependencies + * @param {string} [source='gynzy'] - Registry source ('gynzy' or 'github') for npm packages + * @param {string} [image='mirror.gcr.io/node:20.17'] - Docker image to use for the job + * @param {boolean} [useCredentials=false] - Whether to use Docker registry credentials + * @returns {jobs} - GitHub Actions job definition for New Relic deployment notification + */ postReleaseToNewRelicJob( apps, cacheName=null, source='gynzy', + image='mirror.gcr.io/node:20.17', + useCredentials=false, ):: base.ghJob( 'post-newrelic-release', - image='mirror.gcr.io/node:20.17', - useCredentials=false, + image=image, + useCredentials=useCredentials, ifClause="${{ github.event.deployment.environment == 'production' }}", steps=[ yarn.checkoutAndYarn(ref='${{ github.sha }}', cacheName=cacheName, source=source), diff --git a/.github/jsonnet/notifications.jsonnet b/.github/jsonnet/notifications.jsonnet index 72cf2efb..1bec39a7 100644 --- a/.github/jsonnet/notifications.jsonnet +++ b/.github/jsonnet/notifications.jsonnet @@ -1,6 +1,14 @@ local base = import 'base.jsonnet'; { + /** + * Creates a Slack notification step that triggers on deployment failure. + * + * @param {string} [channel='#dev-deployments'] - Slack channel to send the notification to + * @param {string} [name='notify-failure'] - Name of the notification step + * @param {string} [environment='production'] - Environment name to include in the failure message + * @returns {steps} - GitHub Actions step that sends Slack notification on job failure + */ notifiyDeployFailure(channel='#dev-deployments', name='notify-failure', environment='production'):: base.action( name, @@ -14,6 +22,15 @@ local base = import 'base.jsonnet'; ifClause='failure()', ), + /** + * Creates a Slack notification step with a custom message. + * + * @param {string} [channel='#dev-deployments'] - Slack channel to send the message to + * @param {string} [stepName='sendSlackMessage'] - Name of the notification step + * @param {string} [message=null] - Custom message to send to Slack + * @param {string} [ifClause=null] - Conditional expression to determine when to send the message + * @returns {steps} - GitHub Actions step that sends a Slack message + */ sendSlackMessage(channel='#dev-deployments', stepName='sendSlackMessage', message=null, ifClause=null):: base.action( stepName, @@ -27,9 +44,19 @@ local base = import 'base.jsonnet'; ifClause=ifClause, ), - // This action is used to create a deployment marker in New Relic. - // GUID is the entity guid of the application in New Relic. It can be found by All Entities > (select service) > Metadata > Entity GUID - newrelicCreateDeploymentMarker(stepName='newrelic-deployment', entityGuid):: + /** + * Creates a New Relic deployment marker to track deployments in APM. + * + * This action creates a deployment marker in New Relic to help correlate performance + * changes with deployments. The GUID can be found by navigating to: + * All Entities > (select service) > Metadata > Entity GUID in New Relic + * + * @param {string} [stepName='newrelic-deployment'] - Name of the deployment marker step + * @param {string} entityGuid - New Relic entity GUID for the application + * @param {string} [ifClause=null] - Conditional expression to determine when to set the deployment marker + * @returns {steps} - GitHub Actions step that creates a New Relic deployment marker + */ + newrelicCreateDeploymentMarker(entityGuid, stepName='newrelic-deployment', ifClause=null):: base.action( stepName, 'newrelic/deployment-marker-action@v2.5.0', @@ -39,5 +66,6 @@ local base = import 'base.jsonnet'; commit: '${{ github.sha }}', version: '${{ github.sha }}', }, + ifClause=ifClause, ), } diff --git a/.github/jsonnet/onepassword.jsonnet b/.github/jsonnet/onepassword.jsonnet new file mode 100644 index 00000000..f17073d7 --- /dev/null +++ b/.github/jsonnet/onepassword.jsonnet @@ -0,0 +1,103 @@ +local base = import 'base.jsonnet'; +local misc = import 'misc.jsonnet'; + +{ + /** + * Load secrets from 1Password PRODUCTION vault into the GitHub workflow. + * + * Requirements: + * - Cannot be run on Alpine image + * - Must have the following CLI tools available: curl, bash, unzip + * + * WARNING: The integration is flaky, there are no retries for transient failures on the 1Password side. + * See: https://github.com/1Password/load-secrets-action/issues/102 + * + * The exported secrets can then be referenced in subsequent steps: + * ${{ steps.load-1password-secrets.outputs.SECRET_NAME }} + * Or as environment variables: + * env: onepassword.env('load-1password-secrets', ['SECRET_NAME']) + * + * @param {string} [stepName='load-1password-secrets'] - The name of the step + * @param {object} [secrets={}] - Dictionary of secrets to load, e.g.: + * - {SECRET_NAME: 'vaultItem/keyName'} + * - {SECRET_NAME: 'OAuth client id/notesPlain'} // for a secure note + * - {SECRET_NAME: 'somePassword/password'} // for a username/password combination + * The function will automatically prefix with the vault: 'op://Pulumi Prod/' + * @returns {steps} - GitHub Actions step that loads secrets from 1Password Production vault + */ + loadSecretsProd( + stepName='load-1password-secrets', + secrets={}, + ):: + local prefixedSecrets = std.mapWithKey(function(key, value) 'op://Pulumi Prod/' + value, secrets); + base.action( + stepName, + '1password/load-secrets-action@v2.0.0', + id=stepName, + with={ + 'export-env': false, + }, + env= + prefixedSecrets + { OP_SERVICE_ACCOUNT_TOKEN: misc.secret('PULUMI_1PASSWORD_PROD') }, + ), + + /** + * Load secrets from 1Password TEST vault into the GitHub workflow. + * + * Requirements: + * - Cannot be run on Alpine image + * - Must have the following CLI tools available: curl, bash, unzip + * + * WARNING: The integration is flaky, there are no retries for transient failures on the 1Password side. + * See: https://github.com/1Password/load-secrets-action/issues/102 + * + * The exported secrets can then be referenced in subsequent steps: + * ${{ steps.load-1password-secrets.outputs.SECRET_NAME }} + * Or as environment variables: + * env: onepassword.env('load-1password-secrets', ['SECRET_NAME']) + * + * @param {string} [stepName='load-1password-secrets'] - The name of the step + * @param {object} [secrets={}] - Dictionary of secrets to load, e.g.: + * - {SECRET_NAME: 'vaultItem/keyName'} + * - {SECRET_NAME: 'OAuth client id/notesPlain'} // for a secure note + * - {SECRET_NAME: 'somePassword/password'} // for a username/password combination + * The function will automatically prefix with the vault: 'op://Pulumi Test/' + * @returns {steps} - GitHub Actions step that loads secrets from 1Password Test vault + */ + loadSecretsTest( + stepName='load-1password-secrets', + secrets={}, + ):: + local prefixedSecrets = std.mapWithKey(function(key, value) 'op://Pulumi Test/' + value, secrets); + base.action( + stepName, + '1password/load-secrets-action@v2.0.0', + id=stepName, + with={ + 'export-env': false, + }, + env= + prefixedSecrets + { OP_SERVICE_ACCOUNT_TOKEN: misc.secret('PULUMI_1PASSWORD_TEST') }, + ), + + /** + * Pass earlier loaded secrets as environment variables to the next step. + * + * Helper function to generate environment variable mappings for secrets loaded by 1Password steps. + * + * @param {string} stepName - The name of the step that loaded the secrets (e.g., 'load-1password-secrets') + * @param {array} [secrets=[]] - Array of secret names to map (e.g., ['SECRET_A', 'SECRET_B']) + * @returns {object} - Object mapping secret names to their GitHub Actions output references: { SECRET_A: '${{ steps.load-1password-secrets.outputs.SECRET_A }}' } + * + * @example + * onepassword.env('load-1password-secrets', ['SECRET_A', 'SECRET_B']) + */ + env(stepName, secrets=[]):: + std.foldl( + function(acc, secretName) acc + { [secretName]: '${{ steps.' + stepName + '.outputs.' + secretName + ' }}' }, + secrets, + {} + ), +} diff --git a/.github/jsonnet/pnpm.jsonnet b/.github/jsonnet/pnpm.jsonnet index 8d8bc06b..f68fe4cf 100644 --- a/.github/jsonnet/pnpm.jsonnet +++ b/.github/jsonnet/pnpm.jsonnet @@ -1,15 +1,161 @@ local base = import 'base.jsonnet'; +local cache = import 'cache.jsonnet'; +local misc = import 'misc.jsonnet'; +local yarn = import 'yarn.jsonnet'; { - install(args=[], with={}, version='9.5.0'):: + /** + * Creates an action to install pnpm itself and then to run pnpm install + * + * @param {array} [args=[]] - Additional command line arguments for pnpm install + * @param {object} [with={}] - Additional configuration options + * @param {string} [version='10'] - PNPM version to use + * @param {boolean} [prod=false] - Whether to install only production dependencies + * @param {string} [storeDir=null] - Directory for pnpm store + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {string} [workingDirectory=null] - Directory to run pnpm in + * @returns {steps} - Array containing a single step object + */ + install(args=[], with={}, version='10', prod=false, storeDir=null, ifClause=null, workingDirectory=null):: base.action( - 'Install application code', + 'Install pnpm tool', 'pnpm/action-setup@v4', - with={ - version: version, - run_install: ||| - - args: %(args)s - ||| % { args: args }, - } + with= + { version: version } + + with, + ifClause=ifClause, + ) + + self.installPackages( + args=args, + prod=prod, + ifClause=ifClause, + workingDirectory=workingDirectory, + storeDir=storeDir, + ), + + /** + * Creates a step to run pnpm install with configurable options. + * + * @param {array} [args=[]] - Additional command line arguments for pnpm install + * @param {boolean} [prod=false] - Whether to install only production dependencies + * @param {string} [storeDir=null] - Directory for pnpm store + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {string} [workingDirectory=null] - Directory to run pnpm in + * @returns {array} - Array containing a single step object + */ + installPackages(args=[], prod=false, storeDir=null, ifClause=null, workingDirectory=null):: + local installArgs = (if prod then args + ['--prod'] else args); + base.step( + 'Run pnpm install', + (if storeDir != null then 'pnpm config set store-dir ' + storeDir + ' && ' else '') + + 'pnpm install' + (if (std.length(installArgs) > 0) then ' ' + (std.join(' ', installArgs)) else ''), + ifClause=ifClause, + workingDirectory=workingDirectory + ), + + /** + * Creates a complete workflow combining checkout, npm token setup, cache fetching, and pnpm install. + * + * @param {string} [cacheName=null] - Name of the cache to fetch/store pnpm dependencies + * @param {string} [ifClause=null] - Conditional expression to determine if steps should run + * @param {boolean} [fullClone=false] - Whether to perform a full git clone or shallow clone + * @param {string} [ref=null] - Git ref to checkout (branch, tag, or commit) + * @param {boolean} [prod=false] - Whether to install only production dependencies + * @param {string} [workingDirectory=null] - Directory to run operations in + * @param {string} [source='gynzy'] - Registry source ('gynzy' or 'github') + * @param {array} [pnpmInstallArgs=[]] - Additional arguments for pnpm install command + * @param {boolean} [setupPnpm=true] - Whether to set up and install pnpm itself before installing all packages + * @returns {steps} - Array of step objects for the complete workflow + */ + checkoutAndPnpm( + cacheName=null, + ifClause=null, + fullClone=false, + ref=null, + prod=false, + workingDirectory=null, + source='gynzy', + pnpmInstallArgs=[], + setupPnpm=true, + ):: + misc.checkout(ifClause=ifClause, fullClone=fullClone, ref=ref) + + (if source == 'gynzy' then yarn.setGynzyNpmToken(ifClause=ifClause, workingDirectory=workingDirectory) else []) + + (if source == 'github' then yarn.setGithubNpmToken(ifClause=ifClause, workingDirectory=workingDirectory) else []) + + (if cacheName == null then [] else self.fetchPnpmCache(cacheName, ifClause=ifClause, workingDirectory=workingDirectory)) + + (if setupPnpm then self.install( + ifClause=ifClause, + prod=prod, + args=pnpmInstallArgs, + workingDirectory=workingDirectory, + storeDir=(if cacheName != null then '.pnpm-store' else null), + ) else + self.installPackages( + ifClause=ifClause, + prod=prod, + args=pnpmInstallArgs, + workingDirectory=workingDirectory, + storeDir='.pnpm-store', + )), + + /** + * Creates steps to fetch pnpm cache from cloud storage. + * + * @param {string} cacheName - Name of the cache to fetch + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {string} [workingDirectory=null] - Directory to extract cache to + * @returns {steps} - Array of step objects for cache fetching + */ + fetchPnpmCache(cacheName, ifClause=null, workingDirectory=null):: + cache.fetchCache( + cacheName=cacheName, + folders=['.pnpm-store'], + additionalCleanupCommands=["find . -type d -name 'node_modules' | xargs rm -rf"], + ifClause=ifClause, + workingDirectory=workingDirectory + ), + + /** + * Creates a complete pipeline to update pnpm cache on production deployments. + * + * @param {string} cacheName - Name of the cache to update + * @param {string} [appsDir='packages'] - Directory containing applications (currently unused) + * @param {string} [image=null] - Docker image to use for the job + * @param {boolean} [useCredentials=null] - Whether to use Docker registry credentials + * @param {boolean} [setupPnpm=true] - Whether to set up and install pnpm itself before installing all packages + * @param {string} [source=null] - Registry source ('gynzy' or 'github') + * @returns {workflows} - Complete GitHub Actions pipeline configuration + */ + updatePnpmCachePipeline(cacheName, appsDir='packages', image=null, useCredentials=null, setupPnpm=true, source=null):: + base.pipeline( + 'update-pnpm-cache', + [ + base.ghJob( + 'update-pnpm-cache', + image=image, + useCredentials=useCredentials, + ifClause="${{ github.event.deployment.environment == 'production' || github.event.deployment.environment == 'prod' }}", + steps=[ + self.checkoutAndPnpm( + cacheName=null, // to populate cache we want a clean install + setupPnpm=setupPnpm, + source=source, + ), + base.action( + 'setup auth', + 'google-github-actions/auth@v2', + with={ + credentials_json: misc.secret('SERVICE_JSON'), + }, + id='auth', + ), + base.action('setup-gcloud', 'google-github-actions/setup-gcloud@v2'), + cache.uploadCache( + cacheName=cacheName, + tarCommand='tar -c .pnpm-store', + ), + ], + ), + ], + event='deployment', ), } diff --git a/.github/jsonnet/pre-commit.sh b/.github/jsonnet/pre-commit.sh index 54202b03..28885946 100644 --- a/.github/jsonnet/pre-commit.sh +++ b/.github/jsonnet/pre-commit.sh @@ -5,7 +5,7 @@ set -e if git diff --staged --name-only --quiet -- '*.jsonnet'; then - echo "No changes detected, not regenrating gh actions yaml"; + echo "No changes detected, not regenerating gh actions yaml"; exit 0; else diff --git a/.github/jsonnet/pubsub.jsonnet b/.github/jsonnet/pubsub.jsonnet index 9a6b445f..5ede50b5 100644 --- a/.github/jsonnet/pubsub.jsonnet +++ b/.github/jsonnet/pubsub.jsonnet @@ -1,15 +1,20 @@ local base = import 'base.jsonnet'; local misc = import 'misc.jsonnet'; -local yarn = import 'yarn.jsonnet'; { + /** + * Creates a GitHub Actions job to delete PR-specific Google Cloud Pub/Sub subscriptions. + * + * @param {array|string} [needs=null] - Job dependencies that must complete before this job runs + * @returns {jobs} - GitHub Actions job definition for cleaning up PR-specific Pub/Sub subscriptions + */ deletePrPubsubSubscribersJob(needs=null):: base.ghJob( 'delete-pubsub-pr-subscribers', useCredentials=false, image='google/cloud-sdk:alpine', steps=[ - yarn.configureGoogleAuth(misc.secret('GCE_NEW_TEST_JSON')), + misc.configureGoogleAuth(misc.secret('GCE_NEW_TEST_JSON')), base.step('install jq', 'apk add jq'), base.step('show auth', 'gcloud auth list'), base.step('wait for pod termination', 'sleep 60'), @@ -19,5 +24,4 @@ local yarn = import 'yarn.jsonnet'; ], needs=needs, ), - } diff --git a/.github/jsonnet/pulumi.jsonnet b/.github/jsonnet/pulumi.jsonnet index 76c1c057..d12070ff 100644 --- a/.github/jsonnet/pulumi.jsonnet +++ b/.github/jsonnet/pulumi.jsonnet @@ -2,8 +2,11 @@ local base = import 'base.jsonnet'; local images = import 'images.jsonnet'; local misc = import 'misc.jsonnet'; local notifications = import 'notifications.jsonnet'; +local pnpm = import 'pnpm.jsonnet'; local yarn = import 'yarn.jsonnet'; +// Standard setup steps required for all Pulumi operations +// Includes authentication, cloud setup, and tool installation local pulumiSetupSteps = base.action( 'auth', @@ -15,9 +18,44 @@ local pulumiSetupSteps = ) + base.action('setup-gcloud', uses='google-github-actions/setup-gcloud@v2') + base.action('pulumi-cli-setup', 'pulumi/actions@v5') + - base.action('jsonnet-setup', 'kobtea/setup-jsonnet-action@v1'); + base.action('jsonnet-setup', 'kobtea/setup-jsonnet-action@v1') + + misc.install1Password() + + misc.getLockStep(lockName='lock-pulumi', lockTimeout='1200'); + +// Default environment variables for Pulumi operations +// Automatically configures different credentials based on stack (prod vs test) +local pulumiDefaultEnvironment(stack) = { + GITHUB_TOKEN: '${{ github.token }}', + PULUMI_CONFIG_PASSPHRASE: '${{ secrets.PULUMI_CONFIG_PASSPHRASE }}', + STATUSCAKE_API_TOKEN: '${{ secrets.STATUSCAKE_API_TOKEN }}', + STATUSCAKE_MIN_BACKOFF: '5', // seconds + STATUSCAKE_MAX_BACKOFF: '30', // seconds + STATUSCAKE_RETRIES: '10', + STATUSCAKE_RPS: '1', // requests per second. https://developers.statuscake.com/guides/api/ratelimiting/ +} + ( + if (stack == 'prod' || stack == 'production') then { + ACCOUNTS_API_CLIENT_ADMIN_USERNAME: '${{ secrets.ACCOUNTS_API_CLIENT_ADMIN_USERNAME_PROD }}', + ACCOUNTS_API_CLIENT_ADMIN_PASSWORD: '${{ secrets.ACCOUNTS_API_CLIENT_ADMIN_PASSWORD_PROD }}', + OP_SERVICE_ACCOUNT_TOKEN: '${{ secrets.PULUMI_1PASSWORD_PROD }}', + } else { + ACCOUNTS_API_CLIENT_ADMIN_USERNAME: '${{ secrets.ACCOUNTS_API_CLIENT_ADMIN_USERNAME_TEST }}', + ACCOUNTS_API_CLIENT_ADMIN_PASSWORD: '${{ secrets.ACCOUNTS_API_CLIENT_ADMIN_PASSWORD_TEST }}', + OP_SERVICE_ACCOUNT_TOKEN: '${{ secrets.PULUMI_1PASSWORD_TEST }}', + } +); { + /** + * Creates a GitHub Actions step to preview Pulumi infrastructure changes. + * + * Shows a preview of infrastructure changes without applying them, useful for PR reviews. + * + * @param {string} stack - Pulumi stack name (e.g., 'test', 'prod') + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [stepName='pulumi-preview-' + stack] - Name for the GitHub Actions step + * @param {object} [environmentVariables={}] - Additional environment variables + * @returns {steps} - GitHub Actions step for Pulumi preview with PR comments + */ pulumiPreview( stack, pulumiDir=null, @@ -36,11 +74,18 @@ local pulumiSetupSteps = upsert: true, refresh: true, }, - env={ - PULUMI_CONFIG_PASSPHRASE: '${{ secrets.PULUMI_CONFIG_PASSPHRASE }}', - } + environmentVariables, + env=pulumiDefaultEnvironment(stack) + environmentVariables, ), + /** + * Creates a GitHub Actions step to deploy Pulumi infrastructure changes. + * + * @param {string} stack - Pulumi stack name (e.g., 'test', 'prod') + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [stepName='pulumi-deploy-' + stack] - Name for the GitHub Actions step + * @param {object} [environmentVariables={}] - Additional environment variables + * @returns {steps} - GitHub Actions step for Pulumi deployment + */ pulumiDeploy( stack, pulumiDir=null, @@ -57,11 +102,20 @@ local pulumiSetupSteps = upsert: true, refresh: true, }, - env={ - PULUMI_CONFIG_PASSPHRASE: '${{ secrets.PULUMI_CONFIG_PASSPHRASE }}', - } + environmentVariables, + env=pulumiDefaultEnvironment(stack) + environmentVariables, ), + /** + * Creates a GitHub Actions step to destroy Pulumi infrastructure. + * + * SAFETY: Only works on stacks containing 'pr-' to prevent accidental production destruction. + * + * @param {string} stack - Pulumi stack name (must contain 'pr-' for safety) + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [stepName='pulumi-destroy-' + stack] - Name for the GitHub Actions step + * @param {object} [environmentVariables={}] - Additional environment variables + * @returns {steps} - GitHub Actions step for Pulumi stack destruction + */ pulumiDestroy( stack, pulumiDir=null, @@ -81,11 +135,24 @@ local pulumiSetupSteps = 'work-dir': pulumiDir, refresh: true, }, - env={ - PULUMI_CONFIG_PASSPHRASE: '${{ secrets.PULUMI_CONFIG_PASSPHRASE }}', - } + environmentVariables, + env=pulumiDefaultEnvironment(stack) + environmentVariables, ), + /** + * Creates a complete GitHub Actions job to preview Pulumi changes with Node.js setup. + * + * @param {string} stack - Pulumi stack name + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [yarnDir=null] - Directory containing package.json for dependencies + * @param {string} [gitCloneRef='${{ github.event.pull_request.head.sha }}'] - Git reference to checkout + * @param {string} [cacheName=null] - Cache key for dependency caching + * @param {string} [image=images.default_pulumi_node_image] - Container image for the job + * @param {string} [yarnNpmSource=null] - Custom npm registry source + * @param {object} [environmentVariables={}] - Additional environment variables + * @param {array} [additionalSetupSteps=[]] - Extra setup steps before Pulumi preview + * @param {boolean} [ignoreEngines=false] - Whether to ignore Node.js engine requirements + * @returns {jobs} - Complete GitHub Actions job for Pulumi preview + */ pulumiPreviewJob( stack, pulumiDir=null, @@ -96,19 +163,34 @@ local pulumiSetupSteps = yarnNpmSource=null, environmentVariables={}, additionalSetupSteps=[], + ignoreEngines=false, ):: base.ghJob( 'pulumi-preview-' + stack, image=image, useCredentials=false, steps=[ - yarn.checkoutAndYarn(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource), + yarn.checkoutAndYarn(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource, ignoreEngines=ignoreEngines), pulumiSetupSteps, additionalSetupSteps, self.pulumiPreview(stack, pulumiDir=pulumiDir, environmentVariables=environmentVariables), ], ), + /** + * Creates a GitHub Actions job to preview Pulumi changes for test environment. + * + * @param {string} [stack='test'] - Test stack name + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [yarnDir=null] - Directory containing package.json + * @param {string} [yarnNpmSource=null] - Custom npm registry source + * @param {string} [gitCloneRef='${{ github.event.pull_request.head.sha }}'] - Git reference + * @param {string} [cacheName=null] - Cache key for dependencies + * @param {string} [image=images.default_pulumi_node_image] - Container image + * @param {object} [environmentVariables={}] - Additional environment variables + * @param {array} [additionalSetupSteps=[]] - Extra setup steps + * @returns {jobs} - GitHub Actions job for test environment Pulumi preview + */ pulumiPreviewTestJob( stack='test', pulumiDir=null, @@ -132,6 +214,20 @@ local pulumiSetupSteps = additionalSetupSteps=additionalSetupSteps, ), + /** + * Creates a GitHub Actions job to preview Pulumi changes for production environment. + * + * @param {string} [stack='prod'] - Production stack name + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [yarnDir=null] - Directory containing package.json + * @param {string} [yarnNpmSource=null] - Custom npm registry source + * @param {string} [gitCloneRef='${{ github.event.pull_request.head.sha }}'] - Git reference + * @param {string} [cacheName=null] - Cache key for dependencies + * @param {string} [image=images.default_pulumi_node_image] - Container image + * @param {object} [environmentVariables={}] - Additional environment variables + * @param {array} [additionalSetupSteps=[]] - Extra setup steps + * @returns {jobs} - GitHub Actions job for production Pulumi preview + */ pulumiPreviewProdJob( stack='prod', pulumiDir=null, @@ -155,6 +251,24 @@ local pulumiSetupSteps = additionalSetupSteps=additionalSetupSteps, ), + /** + * Creates a GitHub Actions job to preview Pulumi changes for both test and production stacks. + * + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [yarnDir=null] - Directory containing package.json for dependencies + * @param {string} [yarnNpmSource=null] - Custom npm registry source + * @param {string} [gitCloneRef='${{ github.event.pull_request.head.sha }}'] - Git reference to checkout + * @param {string} [cacheName=null] - Cache key for dependency caching + * @param {string} [image=images.default_pulumi_node_image] - Container image for the job + * @param {string} [productionStack='prod'] - Production stack name + * @param {string} [testStack='test'] - Test stack name + * @param {object} [environmentVariables={}] - Additional environment variables + * @param {array} [additionalSetupSteps=[]] - Extra setup steps before Pulumi preview + * @param {boolean} [ignoreEngines=false] - Whether to ignore Node.js engine requirements + * @param {string} [packageManager='yarn'] - Package manager to use ('yarn' or 'pnpm') + * @param {array} [pnpmInstallArgs=[]] - Additional arguments for pnpm install + * @returns {jobs} - GitHub Actions job that previews both test and production stacks + */ pulumiPreviewTestAndProdJob( pulumiDir=null, yarnDir=null, @@ -166,13 +280,19 @@ local pulumiSetupSteps = testStack='test', environmentVariables={}, additionalSetupSteps=[], + ignoreEngines=false, + packageManager='yarn', + pnpmInstallArgs=[], ):: base.ghJob( 'pulumi-preview', image=image, useCredentials=false, steps=[ - yarn.checkoutAndYarn(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource), + ( + if packageManager == 'yarn' then yarn.checkoutAndYarn(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource, ignoreEngines=ignoreEngines) + else if packageManager == 'pnpm' then pnpm.checkoutAndPnpm(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource, pnpmInstallArgs=pnpmInstallArgs) + ), pulumiSetupSteps, additionalSetupSteps, self.pulumiPreview(testStack, pulumiDir=pulumiDir, environmentVariables=environmentVariables), @@ -180,6 +300,26 @@ local pulumiSetupSteps = ], ), + /** + * Creates a GitHub Actions job to deploy Pulumi infrastructure changes. + * + * @param {string} stack - Pulumi stack name to deploy + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [yarnDir=null] - Directory containing package.json for dependencies + * @param {string} [yarnNpmSource=null] - Custom npm registry source + * @param {string} [gitCloneRef='${{ github.sha }}'] - Git reference to checkout + * @param {string} [cacheName=null] - Cache key for dependency caching + * @param {string} [ifClause=null] - Conditional expression for job execution + * @param {string} [image=images.default_pulumi_node_image] - Container image for the job + * @param {string} [jobName='pulumi-deploy-' + stack] - Name for the GitHub Actions job + * @param {boolean} [notifyOnFailure=true] - Whether to send Slack notifications on failure + * @param {object} [environmentVariables={}] - Additional environment variables + * @param {array} [additionalSetupSteps=[]] - Extra setup steps before deployment + * @param {boolean} [ignoreEngines=false] - Whether to ignore Node.js engine requirements + * @param {string} [packageManager='yarn'] - Package manager to use ('yarn' or 'pnpm') + * @param {array} [pnpmInstallArgs=[]] - Additional arguments for pnpm install + * @returns {jobs} - GitHub Actions job for Pulumi deployment with failure notifications + */ pulumiDeployJob( stack, pulumiDir=null, @@ -193,6 +333,9 @@ local pulumiSetupSteps = notifyOnFailure=true, environmentVariables={}, additionalSetupSteps=[], + ignoreEngines=false, + packageManager='yarn', + pnpmInstallArgs=[], ):: base.ghJob( name=jobName, @@ -200,7 +343,10 @@ local pulumiSetupSteps = image=image, useCredentials=false, steps=[ - yarn.checkoutAndYarn(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource), + ( + if packageManager == 'yarn' then yarn.checkoutAndYarn(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource, ignoreEngines=ignoreEngines) + else if packageManager == 'pnpm' then pnpm.checkoutAndPnpm(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource, pnpmInstallArgs=pnpmInstallArgs) + ), pulumiSetupSteps, additionalSetupSteps, self.pulumiDeploy(stack, pulumiDir=pulumiDir, stepName=jobName, environmentVariables=environmentVariables), @@ -208,6 +354,23 @@ local pulumiSetupSteps = ] ), + /** + * Creates a GitHub Actions job to deploy Pulumi infrastructure to test environment. + * + * @param {string} [stack='test'] - Test stack name + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [yarnDir=null] - Directory containing package.json + * @param {string} [yarnNpmSource=null] - Custom npm registry source + * @param {string} [gitCloneRef='${{ github.sha }}'] - Git reference to checkout + * @param {string} [cacheName=null] - Cache key for dependency caching + * @param {string} [image=images.default_pulumi_node_image] - Container image + * @param {string} [ifClause="${{ github.event.deployment.environment == 'test' }}"] - Conditional for test deployments + * @param {object} [environmentVariables={}] - Additional environment variables + * @param {array} [additionalSetupSteps=[]] - Extra setup steps + * @param {boolean} [ignoreEngines=false] - Whether to ignore Node.js engine requirements + * @param {string} [packageManager='yarn'] - Package manager to use + * @returns {jobs} - GitHub Actions job for test environment deployment + */ pulumiDeployTestJob( stack='test', pulumiDir=null, @@ -219,6 +382,8 @@ local pulumiSetupSteps = ifClause="${{ github.event.deployment.environment == 'test' }}", environmentVariables={}, additionalSetupSteps=[], + ignoreEngines=false, + packageManager='yarn', ):: self.pulumiDeployJob( stack, @@ -231,8 +396,27 @@ local pulumiSetupSteps = image=image, environmentVariables=environmentVariables, additionalSetupSteps=additionalSetupSteps, + ignoreEngines=ignoreEngines, + packageManager=packageManager, ), + /** + * Creates a GitHub Actions job to deploy Pulumi infrastructure to production environment. + * + * @param {string} [stack='prod'] - Production stack name + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [yarnDir=null] - Directory containing package.json + * @param {string} [yarnNpmSource=null] - Custom npm registry source + * @param {string} [gitCloneRef='${{ github.sha }}'] - Git reference to checkout + * @param {string} [cacheName=null] - Cache key for dependency caching + * @param {string} [image=images.default_pulumi_node_image] - Container image + * @param {string} [ifClause="${{ github.event.deployment.environment == 'prod' || github.event.deployment.environment == 'production' }}"] - Conditional for production deployments + * @param {object} [environmentVariables={}] - Additional environment variables + * @param {array} [additionalSetupSteps=[]] - Extra setup steps + * @param {boolean} [ignoreEngines=false] - Whether to ignore Node.js engine requirements + * @param {string} [packageManager='yarn'] - Package manager to use + * @returns {jobs} - GitHub Actions job for production deployment + */ pulumiDeployProdJob( stack='prod', pulumiDir=null, @@ -244,6 +428,8 @@ local pulumiSetupSteps = ifClause="${{ github.event.deployment.environment == 'prod' || github.event.deployment.environment == 'production' }}", environmentVariables={}, additionalSetupSteps=[], + ignoreEngines=false, + packageManager='yarn', ):: self.pulumiDeployJob( stack, @@ -256,8 +442,32 @@ local pulumiSetupSteps = image=image, environmentVariables=environmentVariables, additionalSetupSteps=additionalSetupSteps, + ignoreEngines=ignoreEngines, + packageManager=packageManager, ), + /** + * Creates a GitHub Actions job to destroy Pulumi infrastructure. + * + * SAFETY: Only works on stacks containing 'pr-' to prevent accidental production destruction. + * + * @param {string} stack - Pulumi stack name to destroy (must contain 'pr-' for safety) + * @param {string} [pulumiDir=null] - Directory containing Pulumi project files + * @param {string} [yarnDir=null] - Directory containing package.json for dependencies + * @param {string} [yarnNpmSource=null] - Custom npm registry source + * @param {string} [gitCloneRef='${{ github.sha }}'] - Git reference to checkout + * @param {string} [cacheName=null] - Cache key for dependency caching + * @param {string} [ifClause=null] - Conditional expression for job execution + * @param {string} [image=images.default_pulumi_node_image] - Container image for the job + * @param {string} [jobName='pulumi-destroy-' + stack] - Name for the GitHub Actions job + * @param {boolean} [notifyOnFailure=true] - Whether to send Slack notifications on failure + * @param {object} [environmentVariables={}] - Additional environment variables + * @param {array} [additionalSetupSteps=[]] - Extra setup steps before destruction + * @param {boolean} [ignoreEngines=false] - Whether to ignore Node.js engine requirements + * @param {string} [packageManager='yarn'] - Package manager to use ('yarn' or 'pnpm') + * @param {array} [pnpmInstallArgs=[]] - Additional arguments for pnpm install + * @returns {jobs} - GitHub Actions job for Pulumi infrastructure destruction + */ pulumiDestroyJob( stack, pulumiDir=null, @@ -271,6 +481,9 @@ local pulumiSetupSteps = notifyOnFailure=true, environmentVariables={}, additionalSetupSteps=[], + ignoreEngines=false, + packageManager='yarn', + pnpmInstallArgs=[], ):: base.ghJob( name=jobName, @@ -278,7 +491,10 @@ local pulumiSetupSteps = image=image, useCredentials=false, steps=[ - yarn.checkoutAndYarn(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource), + ( + if packageManager == 'yarn' then yarn.checkoutAndYarn(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource, ignoreEngines=ignoreEngines) + else if packageManager == 'pnpm' then pnpm.checkoutAndPnpm(ref=gitCloneRef, cacheName=cacheName, fullClone=false, workingDirectory=yarnDir, source=yarnNpmSource, pnpmInstallArgs=pnpmInstallArgs) + ), pulumiSetupSteps, additionalSetupSteps, self.pulumiDestroy(stack, pulumiDir=pulumiDir, stepName=jobName, environmentVariables=environmentVariables), @@ -286,9 +502,29 @@ local pulumiSetupSteps = ], ), - + /** + * Creates a complete set of Pulumi pipelines for preview and deployment workflows. + * + * Generates two pipelines: + * 1. 'pulumi-preview' - Triggered on pull requests to preview changes + * 2. 'pulumi-deploy' - Triggered on deployment events to deploy infrastructure + * + * @param {string} [pulumiDir='.'] - Directory containing Pulumi project files + * @param {string} [yarnDir=null] - Directory containing package.json for dependencies + * @param {string} [yarnNpmSource=null] - Custom npm registry source + * @param {string} [cacheName=null] - Cache key for dependency caching + * @param {boolean} [deployTestWithProd=false] - Whether test deployments should also trigger on prod events + * @param {string} [image=images.default_pulumi_node_image] - Container image for jobs + * @param {string} [testStack='test'] - Test stack name + * @param {string} [productionStack='prod'] - Production stack name + * @param {object} [environmentVariables={}] - Additional environment variables + * @param {array} [additionalSetupSteps=[]] - Extra setup steps for all jobs + * @param {boolean} [ignoreEngines=false] - Whether to ignore Node.js engine requirements + * @returns {workflows} - Complete set of Pulumi preview and deployment pipelines + */ pulumiDefaultPipeline( pulumiDir='.', + packageManager='yarn', yarnDir=null, yarnNpmSource=null, cacheName=null, @@ -298,12 +534,14 @@ local pulumiSetupSteps = productionStack='prod', environmentVariables={}, additionalSetupSteps=[], + ignoreEngines=false, ):: base.pipeline( 'pulumi-preview', [ self.pulumiPreviewTestAndProdJob( pulumiDir=pulumiDir, + packageManager=packageManager, yarnDir=yarnDir, yarnNpmSource=yarnNpmSource, cacheName=cacheName, @@ -312,6 +550,7 @@ local pulumiSetupSteps = testStack=testStack, environmentVariables=environmentVariables, additionalSetupSteps=additionalSetupSteps, + ignoreEngines=ignoreEngines, ), ], ) + @@ -320,16 +559,19 @@ local pulumiSetupSteps = [ self.pulumiDeployTestJob( pulumiDir=pulumiDir, + packageManager=packageManager, yarnDir=yarnDir, yarnNpmSource=yarnNpmSource, cacheName=cacheName, image=image, environmentVariables=environmentVariables, additionalSetupSteps=additionalSetupSteps, - ifClause=if deployTestWithProd then "${{ github.event.deployment.environment == 'test' || github.event.deployment.environment == 'prod' || github.event.deployment.environment == 'production' }}" else "${{ github.event.deployment.environment == 'test' }}" + ifClause=if deployTestWithProd then "${{ github.event.deployment.environment == 'test' || github.event.deployment.environment == 'prod' || github.event.deployment.environment == 'production' }}" else "${{ github.event.deployment.environment == 'test' }}", + ignoreEngines=ignoreEngines ), self.pulumiDeployProdJob( pulumiDir=pulumiDir, + packageManager=packageManager, yarnDir=yarnDir, yarnNpmSource=yarnNpmSource, cacheName=cacheName, @@ -337,6 +579,7 @@ local pulumiSetupSteps = stack=productionStack, environmentVariables=environmentVariables, additionalSetupSteps=additionalSetupSteps, + ignoreEngines=ignoreEngines ), ], event='deployment', diff --git a/.github/jsonnet/ruby.jsonnet b/.github/jsonnet/ruby.jsonnet index 82819c2a..aae8da1c 100644 --- a/.github/jsonnet/ruby.jsonnet +++ b/.github/jsonnet/ruby.jsonnet @@ -7,6 +7,27 @@ local notifications = import 'notifications.jsonnet'; local servicesImport = import 'services.jsonnet'; { + /** + * Creates a complete PR deployment pipeline for Ruby/Rails applications. + * + * Handles Docker image building, database cloning, migrations, and Helm deployment + * for pull request environments with automatic cleanup. + * + * @param {string} serviceName - Name of the Ruby service + * @param {string} [dockerImageName='backend-' + serviceName] - Docker image name to build + * @param {object} [helmDeployOptions] - Helm deployment configuration + * @param {object} helmDeployOptions.ingress - Ingress configuration + * @param {object} helmDeployOptions.cronjob - Cronjob configuration + * @param {object} [mysqlCloneOptions={}] - Database cloning options for PR isolation + * @param {boolean} mysqlCloneOptions.enabled - Whether to clone database for PR + * @param {string} mysqlCloneOptions.database_name_target - Target PR database name + * @param {string} mysqlCloneOptions.database_name_source - Source database to clone + * @param {object} [migrateOptions={}] - Rails migration options + * @param {boolean} migrateOptions.enabled - Whether to run migrations + * @param {string} migrateOptions.RAILS_ENV - Rails environment + * @param {string} rubyImageName - Ruby base image for the job (required) + * @returns {workflows} - Complete GitHub Actions pipeline for Ruby PR deployment + */ rubyDeployPRPipeline( serviceName, dockerImageName='backend-' + serviceName, @@ -67,6 +88,17 @@ local servicesImport = import 'services.jsonnet'; event='pull_request', ), + /** + * Creates steps to run Rails database migrations and seeding. + * + * @param {object} migrateOptions - Rails migration configuration + * @param {string} migrateOptions.RAILS_DB_HOST - Database host + * @param {string} migrateOptions.RAILS_DB_NAME - Database name + * @param {string} migrateOptions.RAILS_DB_PASSWORD - Database password + * @param {string} migrateOptions.RAILS_DB_USER - Database user + * @param {string} migrateOptions.SECRET_KEY_BASE - Rails secret key + * @returns {steps} - GitHub Actions steps for bundle install, migrate, and seed + */ rubyMigrate(migrateOptions):: local env = { BUNDLE_GITHUB__COM: misc.secret('BUNDLE_GITHUB__COM'), @@ -86,12 +118,25 @@ local servicesImport = import 'services.jsonnet'; ] , + /** + * Creates a job to generate and deploy API documentation for Ruby applications. + * + * Generates Rails API docs and uploads them to Google Cloud Storage for hosting. + * + * @param {string} serviceName - Name of the service for documentation + * @param {boolean} [enableDatabase=false] - Whether to enable database service + * @param {string} [generateCommands=null] - Custom commands for doc generation + * @param {object} [extra_env={}] - Additional environment variables + * @param {object} [services] - Database services configuration + * @param {string} rubyImageName - Ruby base image for the job (required) + * @returns {jobs} - GitHub Actions job for API documentation deployment + */ deployApiDocs( serviceName, enableDatabase=false, generateCommands=null, extra_env={}, - services={ db: servicesImport.mysql57service(database='ci', password='ci', root_password='1234test', username='ci') }, + services={ db: servicesImport.mysql8service(database='ci', password='ci', root_password='1234test', username='ci', version='8.4') }, rubyImageName=null, ):: assert rubyImageName != null; @@ -132,12 +177,30 @@ local servicesImport = import 'services.jsonnet'; services=(if enableDatabase then services else null), ), + /** + * Creates a step to set version information in a file. + * + * @param {string} [version='${{ github.event.pull_request.head.sha }}'] - Version string to write + * @param {string} [file='VERSION'] - Target file for version information + * @returns {steps} - GitHub Actions step that writes version to file + */ setVerionFile(version='${{ github.event.pull_request.head.sha }}', file='VERSION'):: base.step( 'set-version', 'echo "' + version + '" > ' + file + ';\n echo "Generated version number:";\n cat ' + file + ';\n ' ), + /** + * Creates a pipeline to automatically clean up PR deployments when PRs are closed. + * + * @param {string} serviceName - Name of the Ruby service to clean up + * @param {object} [options={}] - Helm cleanup options + * @param {string} [helmPath='./helm/' + serviceName] - Path to Helm chart + * @param {string} [deploymentName=serviceName + '-pr-${{ github.event.number }}'] - PR deployment name + * @param {object} [mysqlDeleteOptions={}] - Database cleanup options + * @param {boolean} mysqlDeleteOptions.enabled - Whether to delete PR database + * @returns {workflows} - GitHub Actions pipeline for automatic PR cleanup + */ rubyDeletePRPipeline( serviceName, options={}, @@ -165,6 +228,18 @@ local servicesImport = import 'services.jsonnet'; }, ), + /** + * Creates a GitHub Actions job for Ruby application deployment to test environment. + * + * @param {string} serviceName - Name of the Ruby service + * @param {object} [options={}] - Helm deployment options + * @param {string} [helmPath='./helm/' + serviceName] - Path to Helm chart + * @param {string} [deploymentName=serviceName + '-master'] - Test deployment name + * @param {string} image - Container image for the job (required) + * @param {boolean} [useCredentials=false] - Whether to use Docker registry credentials + * @param {object} [migrateOptions={}] - Rails migration options + * @returns {jobs} - GitHub Actions job for test environment deployment + */ rubyDeployTestJob( serviceName, options={}, @@ -198,6 +273,20 @@ local servicesImport = import 'services.jsonnet'; (if migrateOptionsWithDefaults.enabled then { 'cloudsql-proxy': servicesImport.cloudsql_proxy_service(migrateOptionsWithDefaults.database) } else {}) ), + /** + * Creates a GitHub Actions job for Ruby application deployment to production. + * + * Includes automatic failure notifications via Slack on deployment errors. + * + * @param {string} serviceName - Name of the Ruby service + * @param {object} [options={}] - Helm deployment options + * @param {string} [helmPath='./helm/' + serviceName] - Path to Helm chart + * @param {string} [deploymentName=serviceName + '-prod'] - Production deployment name + * @param {string} image - Container image for the job (required) + * @param {boolean} [useCredentials=false] - Whether to use Docker registry credentials + * @param {object} [migrateOptions={}] - Rails migration options + * @returns {jobs} - GitHub Actions job for production deployment with failure notifications + */ rubyDeployProdJob( serviceName, options={}, diff --git a/.github/jsonnet/services.jsonnet b/.github/jsonnet/services.jsonnet index 2f3c879a..aaa1b7de 100644 --- a/.github/jsonnet/services.jsonnet +++ b/.github/jsonnet/services.jsonnet @@ -2,9 +2,20 @@ local images = import 'images.jsonnet'; local misc = import 'misc.jsonnet'; { - mysql8service(database=null, password=null, root_password=null, username=null, port='3306'):: + /** + * Creates a MySQL 8.4 or 8.0 service container for GitHub Actions workflows. + * + * @param {string} [database=null] - Name of the database to create + * @param {string} [password=null] - Password for the MySQL user + * @param {string} [root_password=null] - Password for the MySQL root user + * @param {string} [username=null] - MySQL username to create + * @param {string} [port='3306'] - Port to expose the MySQL service on + * @param {string} [version='8.4'] - MySQL version to use ('8.0' or '8.4') + * @returns {object} - MySQL service configuration for GitHub Actions + */ + mysql8service(database=null, password=null, root_password=null, username=null, port='3306', version="8.4"):: { - image: images.default_mysql8_image, + image: (if version == "8.0" then images.default_mysql8_image else images.default_mysql84_image), credentials: { username: '_json_key', password: misc.secret('docker_gcr_io'), @@ -20,6 +31,15 @@ local misc = import 'misc.jsonnet'; ports: [port + ':' + port], }, + /** + * Creates a Cloud SQL Proxy service for connecting to Google Cloud SQL instances. + * + * @param {object} database - Database configuration object containing project, region, and server + * @param {string} database.project - GCP project ID containing the Cloud SQL instance + * @param {string} database.region - GCP region/zone where the Cloud SQL instance is located + * @param {string} database.server - Cloud SQL instance name + * @returns {object} - Cloud SQL Proxy service configuration for GitHub Actions + */ cloudsql_proxy_service(database):: { image: images.default_cloudsql_image, @@ -36,21 +56,46 @@ local misc = import 'misc.jsonnet'; ports: ['3306:3306'], }, + /** + * Creates a Redis service container for GitHub Actions workflows. + * + * @returns {object} - Redis service configuration for GitHub Actions (uses default Redis image) + */ redis_service():: { image: images.default_redis_image, ports: ['6379:6379'], }, + /** + * Creates a Redis 7 service container for GitHub Actions workflows. + * + * @param {string} [port='6379'] - Port to expose the Redis service on + * @returns {object} - Redis 7 service configuration for GitHub Actions + */ redis_service_v7(port='6379'):: { - image: 'redis:7.0.15', + image: 'mirror.gcr.io/redis:7.0.15', ports: [port + ':' + port], }, + /** + * Creates a Google Cloud Pub/Sub emulator service container for GitHub Actions workflows. + * + * @returns {object} - Pub/Sub emulator service configuration for GitHub Actions + */ pubsub_service():: { image: images.default_pubsub_image, ports: ['8681:8681'], }, + /** + * Creates a MongoDB service container configured with replica set for GitHub Actions workflows. + * + * @param {string} service - Name of the service (used for naming the MongoDB service) + * @param {string} [name='mongodb-' + service] - Custom name for the MongoDB service + * @param {string} [username='root'] - MongoDB root username + * @param {string} [password='therootpass'] - MongoDB root password + * @returns {object} - MongoDB service configuration with replica set enabled and health checks + */ serviceMongodb( service, name='mongodb-' + service, @@ -69,6 +114,8 @@ local misc = import 'misc.jsonnet'; MONGO_INITDB_ROOT_PASSWORD: password, MONGO_REPLICA_SET_NAME: 'rs0', }, + options: + '--health-cmd "bash -c \'echo \\\"rs.status().ok\\\" | /usr/bin/mongosh \\\"mongodb://' + username + ':' + password + '@localhost\\\" --quiet\'" --health-interval 1s --health-timeout 1s --health-retries 10', }, }, } diff --git a/.github/jsonnet/yarn.jsonnet b/.github/jsonnet/yarn.jsonnet index 68b51cd8..e2cac9d8 100644 --- a/.github/jsonnet/yarn.jsonnet +++ b/.github/jsonnet/yarn.jsonnet @@ -4,24 +4,48 @@ local images = import 'images.jsonnet'; local misc = import 'misc.jsonnet'; { - yarn(ifClause=null, prod=false, workingDirectory=null):: + /** + * Creates a step to run yarn install with caching and retry logic. + * + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {boolean} [prod=false] - Whether to install only production dependencies + * @param {string} [workingDirectory=null] - Directory to run yarn in + * @param {boolean} [ignoreEngines=false] - Whether to ignore engine version checks + * @param {object} [env={}] - Additional environment variables for the step + * @returns {steps} - Array containing a single step object + */ + yarn(ifClause=null, prod=false, workingDirectory=null, ignoreEngines=false, env={}):: base.step( 'yarn' + (if prod then '-prod' else ''), - run='yarn --cache-folder .yarncache --frozen-lockfile --prefer-offline' + (if prod then ' --prod' else '') + ' || yarn --cache-folder .yarncache --frozen-lockfile --prefer-offline' + (if prod then ' --prod' else ''), + run='yarn --cache-folder .yarncache --frozen-lockfile --prefer-offline' + (if ignoreEngines then ' --ignore-engines' else '') + (if prod then ' --prod' else '') + ' || yarn --cache-folder .yarncache --frozen-lockfile --prefer-offline' + (if ignoreEngines then ' --ignore-engines' else '') + (if prod then ' --prod' else ''), ifClause=ifClause, - workingDirectory=workingDirectory + workingDirectory=workingDirectory, + env=env, ), + /** + * Creates a step to configure npm token for Gynzy registry (alias for setGynzyNpmToken). + * + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {string} [workingDirectory=null] - Directory to create .npmrc file in + * @returns {steps} - Array containing a single step object + */ setNpmToken(ifClause=null, workingDirectory=null):: self.setGynzyNpmToken(ifClause=ifClause, workingDirectory=workingDirectory), + /** + * Creates a step to configure npm token for Gynzy's private registry. + * + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {string} [workingDirectory=null] - Directory to create .npmrc file in + * @returns {steps} - Array containing a single step object + */ setGynzyNpmToken(ifClause=null, workingDirectory=null):: base.step( 'set gynzy npm_token', run= ||| cat < .npmrc - registry=https://npm.gynzy.net/ - always-auth="true" + @gynzy:registry=https://npm.gynzy.net/ "//npm.gynzy.net/:_authToken"="${NPM_TOKEN}" EOF |||, @@ -32,6 +56,13 @@ local misc = import 'misc.jsonnet'; workingDirectory=workingDirectory, ), + /** + * Creates a step to configure npm token for GitHub Package Registry. + * + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {string} [workingDirectory=null] - Directory to create .npmrc file in + * @returns {steps} - Array containing a single step object + */ setGithubNpmToken(ifClause=null, workingDirectory=null):: base.step( 'set github npm_token', @@ -39,7 +70,6 @@ local misc = import 'misc.jsonnet'; ||| cat < .npmrc @gynzy:registry=https://npm.pkg.github.com - always-auth=true //npm.pkg.github.com/:_authToken=${NODE_AUTH_TOKEN} EOF |||, @@ -50,13 +80,34 @@ local misc = import 'misc.jsonnet'; workingDirectory=workingDirectory, ), - checkoutAndYarn(cacheName=null, ifClause=null, fullClone=false, ref=null, prod=false, workingDirectory=null, source='gynzy'):: + /** + * Creates a complete workflow combining checkout, npm token setup, cache fetching, and yarn install. + * + * @param {string} [cacheName=null] - Name of the cache to fetch/store yarn dependencies + * @param {string} [ifClause=null] - Conditional expression to determine if steps should run + * @param {boolean} [fullClone=false] - Whether to perform a full git clone or shallow clone + * @param {string} [ref=null] - Git ref to checkout (branch, tag, or commit) + * @param {boolean} [prod=false] - Whether to install only production dependencies + * @param {string} [workingDirectory=null] - Directory to run operations in + * @param {string} [source='gynzy'] - Registry source ('gynzy' or 'github') + * @param {boolean} [ignoreEngines=false] - Whether to ignore engine version checks + * @returns {steps} - Array of step objects for the complete workflow + */ + checkoutAndYarn(cacheName=null, ifClause=null, fullClone=false, ref=null, prod=false, workingDirectory=null, source='gynzy', ignoreEngines=false):: misc.checkout(ifClause=ifClause, fullClone=fullClone, ref=ref) + (if source == 'gynzy' then self.setGynzyNpmToken(ifClause=ifClause, workingDirectory=workingDirectory) else []) + (if source == 'github' then self.setGithubNpmToken(ifClause=ifClause, workingDirectory=workingDirectory) else []) + (if cacheName == null then [] else self.fetchYarnCache(cacheName, ifClause=ifClause, workingDirectory=workingDirectory)) + - self.yarn(ifClause=ifClause, prod=prod, workingDirectory=workingDirectory), + self.yarn(ifClause=ifClause, prod=prod, workingDirectory=workingDirectory, ignoreEngines=ignoreEngines), + /** + * Creates steps to fetch yarn cache from cloud storage. + * + * @param {string} cacheName - Name of the cache to fetch + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @param {string} [workingDirectory=null] - Directory to extract cache to + * @returns {steps} - Array of step objects for cache fetching + */ fetchYarnCache(cacheName, ifClause=null, workingDirectory=null):: cache.fetchCache( cacheName=cacheName, @@ -66,7 +117,17 @@ local misc = import 'misc.jsonnet'; workingDirectory=workingDirectory ), - updateYarnCachePipeline(cacheName, appsDir='packages', image=null, useCredentials=null):: + /** + * Creates a complete pipeline to update yarn cache on production deployments. + * + * @param {string} cacheName - Name of the cache to update + * @param {string} [appsDir='packages'] - Directory containing applications with node_modules + * @param {string} [image=null] - Docker image to use for the job + * @param {boolean} [useCredentials=null] - Whether to use Docker registry credentials + * @param {boolean} [ignoreEngines=false] - Whether to ignore engine version checks + * @returns {workflows} - Complete GitHub Actions pipeline configuration + */ + updateYarnCachePipeline(cacheName, appsDir='packages', image=null, useCredentials=null, ignoreEngines=false):: base.pipeline( 'update-yarn-cache', [ @@ -78,7 +139,7 @@ local misc = import 'misc.jsonnet'; steps=[ misc.checkout() + self.setGynzyNpmToken() + - self.yarn(), + self.yarn(ignoreEngines=ignoreEngines), base.action( 'setup auth', 'google-github-actions/auth@v2', @@ -98,18 +159,13 @@ local misc = import 'misc.jsonnet'; event='deployment', ), - configureGoogleAuth(secret):: base.step( - 'activate google service account', - run= - ||| - printf '%s' "${SERVICE_JSON}" > gce.json; - gcloud auth activate-service-account --key-file=gce.json; - gcloud --quiet auth configure-docker; - rm gce.json - |||, - env={ SERVICE_JSON: secret }, - ), - + /** + * Creates a step to publish a package to npm registry with version handling. + * + * @param {boolean} [isPr=true] - Whether this is a PR build (affects versioning) + * @param {string} [ifClause=null] - Conditional expression to determine if step should run + * @returns {steps} - Array containing a single step object + */ yarnPublish(isPr=true, ifClause=null):: base.step( 'publish', @@ -148,6 +204,14 @@ local misc = import 'misc.jsonnet'; ifClause=ifClause, ), + /** + * Creates steps to publish a package to multiple repositories. + * + * @param {boolean} isPr - Whether this is a PR build (affects versioning) + * @param {array} repositories - List of repository types ('gynzy' or 'github') + * @param {string} [ifClause=null] - Conditional expression to determine if steps should run + * @returns {steps} - Array of step objects for publishing to all repositories + */ yarnPublishToRepositories(isPr, repositories, ifClause=null):: (std.flatMap(function(repository) if repository == 'gynzy' then [self.setGynzyNpmToken(ifClause=ifClause), self.yarnPublish(isPr=isPr, ifClause=ifClause)] @@ -156,8 +220,23 @@ local misc = import 'misc.jsonnet'; repositories)), + /** + * Creates a GitHub Actions job for publishing preview packages from PRs. + * + * @param {string} [image='mirror.gcr.io/node:22'] - Docker image to use for the job + * @param {boolean} [useCredentials=false] - Whether to use Docker registry credentials + * @param {string} [gitCloneRef='${{ github.event.pull_request.head.sha }}'] - Git reference to checkout + * @param {array} [buildSteps=[base.step('build', 'yarn build')]] - Build steps to run before publishing + * @param {boolean} [checkVersionBump=true] - Whether to check if package version was bumped + * @param {array} [repositories=['gynzy']] - List of repositories to publish to + * @param {boolean|string} [onChangedFiles=false] - Whether to only run on changed files (or glob pattern) + * @param {string} [changedFilesHeadRef=null] - Head reference for changed files comparison + * @param {string} [changedFilesBaseRef=null] - Base reference for changed files comparison + * @param {string} [runsOn=null] - Runner type to use + * @returns {jobs} - GitHub Actions job definition + */ yarnPublishPreviewJob( - image='mirror.gcr.io/node:18', + image='mirror.gcr.io/node:22', useCredentials=false, gitCloneRef='${{ github.event.pull_request.head.sha }}', buildSteps=[base.step('build', 'yarn build')], @@ -172,7 +251,7 @@ local misc = import 'misc.jsonnet'; base.ghJob( 'yarn-publish-preview', runsOn=runsOn, - image='mirror.gcr.io/node:18', + image='mirror.gcr.io/node:22', useCredentials=false, steps= [self.checkoutAndYarn(ref=gitCloneRef, fullClone=false)] + @@ -187,8 +266,23 @@ local misc = import 'misc.jsonnet'; permissions={ packages: 'write', contents: 'read', 'pull-requests': 'read' }, ), + /** + * Creates a GitHub Actions job for publishing packages from main branch or releases. + * + * @param {string} [image='mirror.gcr.io/node:22'] - Docker image to use for the job + * @param {boolean} [useCredentials=false] - Whether to use Docker registry credentials + * @param {string} [gitCloneRef='${{ github.sha }}'] - Git reference to checkout + * @param {array} [buildSteps=[base.step('build', 'yarn build')]] - Build steps to run before publishing + * @param {array} [repositories=['gynzy']] - List of repositories to publish to + * @param {boolean|string} [onChangedFiles=false] - Whether to only run on changed files (or glob pattern) + * @param {string} [changedFilesHeadRef=null] - Head reference for changed files comparison + * @param {string} [changedFilesBaseRef=null] - Base reference for changed files comparison + * @param {string} [ifClause=null] - Conditional expression to determine if job should run + * @param {string} [runsOn=null] - Runner type to use + * @returns {jobs} - GitHub Actions job definition + */ yarnPublishJob( - image='mirror.gcr.io/node:18', + image='mirror.gcr.io/node:22', useCredentials=false, gitCloneRef='${{ github.sha }}', buildSteps=[base.step('build', 'yarn build')], @@ -202,7 +296,7 @@ local misc = import 'misc.jsonnet'; local stepIfClause = (if onChangedFiles != false then "steps.changes.outputs.package == 'true'" else null); base.ghJob( 'yarn-publish', - image='mirror.gcr.io/node:18', + image='mirror.gcr.io/node:22', runsOn=runsOn, useCredentials=false, steps= diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index a09b81fa..7cb207e0 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -67,10 +67,16 @@ - "name": "git workaround" "run": "git config --global --add safe.directory $PWD" - "name": "check-jsonnet-diff" - "run": "git diff --exit-code" - - "if": "failure()" - "name": "possible-causes-for-error" - "run": "echo \"Possible causes: \n1. You updated jsonnet files, but did not regenerate the workflows. \nTo fix, run 'yarn github:generate' locally and commit the changes. If this helps, check if your pre-commit hooks work.\n2. You used the wrong jsonnet binary. In this case, the newlines at the end of the files differ.\nTo fix, install the go binary. On mac, run 'brew uninstall jsonnet && brew install jsonnet-go'\"" + "run": | + echo "If this step fails, look at the end of the logs for possible causes"; + git diff --exit-code && exit 0; + echo "Error: mismatch between jsonnet <-> github workflows"; + echo "Possible reasons:"; + echo " - You updated jsonnet files, but did not regenerate the workflows."; + echo " To regenerate jsonnet run: 'rm .github/workflows/*; jsonnet -m .github/workflows/ -S .github.jsonnet'"; + echo " - You used the wrong jsonnet binary. In this case, the newlines at the end of the files differ."; + echo " To fix, install the go binary. On mac, run 'brew uninstall jsonnet && brew install go-jsonnet'"; + exit 1; "timeout-minutes": 30 "name": "misc" "on": diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index fe61408e..ee1b0ce6 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -1,7 +1,7 @@ "jobs": "yarn-publish-preview": "container": - "image": "mirror.gcr.io/node:18" + "image": "mirror.gcr.io/node:22" "permissions": "contents": "read" "packages": "write" @@ -66,11 +66,11 @@ "name": "set gynzy npm_token" "run": | cat < .npmrc - registry=https://npm.gynzy.net/ - always-auth="true" + @gynzy:registry=https://npm.gynzy.net/ "//npm.gynzy.net/:_authToken"="${NPM_TOKEN}" EOF - - "name": "yarn" + - "env": {} + "name": "yarn" "run": "yarn --cache-folder .yarncache --frozen-lockfile --prefer-offline || yarn --cache-folder .yarncache --frozen-lockfile --prefer-offline" - "name": "check-version-bump" "uses": "del-systems/check-if-version-bumped@v1" @@ -83,8 +83,7 @@ "name": "set gynzy npm_token" "run": | cat < .npmrc - registry=https://npm.gynzy.net/ - always-auth="true" + @gynzy:registry=https://npm.gynzy.net/ "//npm.gynzy.net/:_authToken"="${NPM_TOKEN}" EOF - "env": diff --git a/.github/workflows/publish-prod.yml b/.github/workflows/publish-prod.yml index 9423e60c..1151ea80 100644 --- a/.github/workflows/publish-prod.yml +++ b/.github/workflows/publish-prod.yml @@ -1,7 +1,7 @@ "jobs": "yarn-publish": "container": - "image": "mirror.gcr.io/node:18" + "image": "mirror.gcr.io/node:22" "permissions": "contents": "read" "packages": "write" @@ -66,11 +66,11 @@ "name": "set gynzy npm_token" "run": | cat < .npmrc - registry=https://npm.gynzy.net/ - always-auth="true" + @gynzy:registry=https://npm.gynzy.net/ "//npm.gynzy.net/:_authToken"="${NPM_TOKEN}" EOF - - "name": "yarn" + - "env": {} + "name": "yarn" "run": "yarn --cache-folder .yarncache --frozen-lockfile --prefer-offline || yarn --cache-folder .yarncache --frozen-lockfile --prefer-offline" - "name": "build" "run": "yarn build" @@ -79,8 +79,7 @@ "name": "set gynzy npm_token" "run": | cat < .npmrc - registry=https://npm.gynzy.net/ - always-auth="true" + @gynzy:registry=https://npm.gynzy.net/ "//npm.gynzy.net/:_authToken"="${NPM_TOKEN}" EOF - "env": {}