diff --git a/README.md b/README.md index 6e5994a305..ed4c622882 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,7 @@ Join our discord community via [this invite link](https://discord.gg/bxgXW8jJGh) | [disable\_runner\_autoupdate](#input\_disable\_runner\_autoupdate) | Disable the auto update of the github runner agent. Be aware there is a grace period of 30 days, see also the [GitHub article](https://github.blog/changelog/2022-02-01-github-actions-self-hosted-runners-can-now-disable-automatic-updates/) | `bool` | `false` | no | | [enable\_ami\_housekeeper](#input\_enable\_ami\_housekeeper) | Option to disable the lambda to clean up old AMIs. | `bool` | `false` | no | | [enable\_cloudwatch\_agent](#input\_enable\_cloudwatch\_agent) | Enables the cloudwatch agent on the ec2 runner instances. The runner uses a default config that can be overridden via `cloudwatch_config`. | `bool` | `true` | no | +| [enable\_enterprise\_runners](#input\_enable\_enterprise\_runners) | Enable enterprise-level runners; when true, authentication must use enterprise\_pat. | `bool` | `false` | no | | [enable\_ephemeral\_runners](#input\_enable\_ephemeral\_runners) | Enable ephemeral runners, runners will only be used once. | `bool` | `false` | no | | [enable\_jit\_config](#input\_enable\_jit\_config) | Overwrite the default behavior for JIT configuration. By default JIT configuration is enabled for ephemeral runners and disabled for non-ephemeral runners. In case of GHES check first if the JIT config API is avaialbe. In case you upgradeing from 3.x to 4.x you can set `enable_jit_config` to `false` to avoid a breaking change when having your own AMI. | `bool` | `null` | no | | [enable\_job\_queued\_check](#input\_enable\_job\_queued\_check) | Only scale if the job event received by the scale up lambda is in the queued state. By default enabled for non ephemeral runners and disabled for ephemeral. Set this variable to overwrite the default behavior. | `bool` | `null` | no | @@ -139,10 +140,12 @@ Join our discord community via [this invite link](https://discord.gg/bxgXW8jJGh) | [enable\_ssm\_on\_runners](#input\_enable\_ssm\_on\_runners) | Enable to allow access to the runner instances for debugging purposes via SSM. Note that this adds additional permissions to the runner instances. | `bool` | `false` | no | | [enable\_user\_data\_debug\_logging\_runner](#input\_enable\_user\_data\_debug\_logging\_runner) | Option to enable debug logging for user-data, this logs all secrets as well. | `bool` | `false` | no | | [enable\_userdata](#input\_enable\_userdata) | Should the userdata script be enabled for the runner. Set this to false if you are using your own prebuilt AMI. | `bool` | `true` | no | +| [enterprise\_pat](#input\_enterprise\_pat) | GitHub enterprise PAT. Used only when enable\_enterprise\_runners is true. | `string` | `null` | no | +| [enterprise\_slug](#input\_enterprise\_slug) | Enterprise slug | `string` | `""` | no | | [eventbridge](#input\_eventbridge) | Enable the use of EventBridge by the module. By enabling this feature events will be put on the EventBridge by the webhook instead of directly dispatching to queues for scaling.

`enable`: Enable the EventBridge feature.
`accept_events`: List can be used to only allow specific events to be putted on the EventBridge. By default all events, empty list will be be interpreted as all events. |
object({
enable = optional(bool, true)
accept_events = optional(list(string), null)
})
| `{}` | no | | [ghes\_ssl\_verify](#input\_ghes\_ssl\_verify) | GitHub Enterprise SSL verification. Set to 'false' when custom certificate (chains) is used for GitHub Enterprise Server (insecure). | `bool` | `true` | no | | [ghes\_url](#input\_ghes\_url) | GitHub Enterprise Server URL. Example: https://github.internal.co - DO NOT SET IF USING PUBLIC GITHUB. However if you are using Github Enterprise Cloud with data-residency (ghe.com), set the endpoint here. Example - https://companyname.ghe.com | `string` | `null` | no | -| [github\_app](#input\_github\_app) | GitHub app parameters, see your github app.
You can optionally create the SSM parameters yourself and provide the ARN and name here, through the `*_ssm` attributes.
If you chose to provide the configuration values directly here,
please ensure the key is the base64-encoded `.pem` file (the output of `base64 app.private-key.pem`, not the content of `private-key.pem`).
Note: the provided SSM parameters arn and name have a precedence over the actual value (i.e `key_base64_ssm` has a precedence over `key_base64` etc). |
object({
key_base64 = optional(string)
key_base64_ssm = optional(object({
arn = string
name = string
}))
id = optional(string)
id_ssm = optional(object({
arn = string
name = string
}))
webhook_secret = optional(string)
webhook_secret_ssm = optional(object({
arn = string
name = string
}))
})
| n/a | yes | +| [github\_app](#input\_github\_app) | GitHub app parameters. |
object({
key_base64 = optional(string, null)
key_base64_ssm = optional(object({ arn = string, name = string }))
id = optional(string, null)
id_ssm = optional(object({ arn = string, name = string }))
webhook_secret = optional(string)
webhook_secret_ssm = optional(object({ arn = string, name = string }))
})
| n/a | yes | | [idle\_config](#input\_idle\_config) | List of time periods, defined as a cron expression, to keep a minimum amount of runners active instead of scaling down to 0. By defining this list you can ensure that in time periods that match the cron expression within 5 seconds a runner is kept idle. |
list(object({
cron = string
timeZone = string
idleCount = number
evictionStrategy = optional(string, "oldest_first")
}))
| `[]` | no | | [instance\_allocation\_strategy](#input\_instance\_allocation\_strategy) | The allocation strategy for spot instances. AWS recommends using `price-capacity-optimized` however the AWS default is `lowest-price`. | `string` | `"lowest-price"` | no | | [instance\_max\_spot\_price](#input\_instance\_max\_spot\_price) | Max price price for spot instances per hour. This variable will be passed to the create fleet as max spot price for the fleet. | `string` | `null` | no | diff --git a/lambdas/functions/control-plane/src/aws/runners.d.ts b/lambdas/functions/control-plane/src/aws/runners.d.ts index 72ff9e3e1a..074cc0ee25 100644 --- a/lambdas/functions/control-plane/src/aws/runners.d.ts +++ b/lambdas/functions/control-plane/src/aws/runners.d.ts @@ -1,6 +1,6 @@ import { DefaultTargetCapacityType, SpotAllocationStrategy } from '@aws-sdk/client-ec2'; -export type RunnerType = 'Org' | 'Repo'; +export type RunnerType = 'Enterprise' | 'Org' | 'Repo'; export interface RunnerList { instanceId: string; diff --git a/lambdas/functions/control-plane/src/aws/runners.test.ts b/lambdas/functions/control-plane/src/aws/runners.test.ts index c4ec328c9b..b40280b3db 100644 --- a/lambdas/functions/control-plane/src/aws/runners.test.ts +++ b/lambdas/functions/control-plane/src/aws/runners.test.ts @@ -661,7 +661,7 @@ function createRunnerConfig(runnerConfig: RunnerConfig): RunnerInputParameters { } interface ExpectedFleetRequestValues { - type: 'Repo' | 'Org'; + type: 'Enterprise' | 'Repo' | 'Org'; capacityType: DefaultTargetCapacityType; allocationStrategy: SpotAllocationStrategy; maxSpotPrice?: string; diff --git a/lambdas/functions/control-plane/src/github/octokit.test.ts b/lambdas/functions/control-plane/src/github/octokit.test.ts index a715a15476..2dcfed5104 100644 --- a/lambdas/functions/control-plane/src/github/octokit.test.ts +++ b/lambdas/functions/control-plane/src/github/octokit.test.ts @@ -10,6 +10,10 @@ const mockOctokit = { }, }; +function setDefaults() { + process.env.PARAMETER_ENTERPRISE_PAT_NAME = 'github-pat-id'; +} + vi.mock('../github/auth', async () => ({ createGithubInstallationAuth: vi.fn().mockImplementation(async (installationId) => { return { token: 'token', type: 'installation', installationId: installationId }; @@ -22,29 +26,46 @@ vi.mock('@octokit/rest', async () => ({ Octokit: vi.fn().mockImplementation(() => mockOctokit), })); +vi.mock('@aws-github-runner/aws-ssm-util', async () => { + const actual = (await vi.importActual( + '@aws-github-runner/aws-ssm-util', + )) as typeof import('@aws-github-runner/aws-ssm-util'); + + return { + ...actual, + getParameter: vi.fn(), + }; +}); + // We've already mocked '../github/auth' above describe('Test getOctokit', () => { const data = [ { description: 'Should look-up org installation if installationId is 0.', - input: { orgLevelRunner: false, installationId: 0 }, - output: { callReposInstallation: true, callOrgInstallation: false }, + input: { enableEnterpriseLevel: false, orgLevelRunner: false, installationId: 0 }, + output: { callEnterpriseToken: false, callReposInstallation: true, callOrgInstallation: false }, }, { description: 'Should look-up org installation if installationId is 0.', - input: { orgLevelRunner: true, installationId: 0 }, - output: { callReposInstallation: false, callOrgInstallation: true }, + input: { enableEnterpriseLevel: false, orgLevelRunner: true, installationId: 0 }, + output: { callEnterpriseToken: false, callReposInstallation: false, callOrgInstallation: true }, }, { description: 'Should not look-up org installation if provided in payload.', - input: { orgLevelRunner: true, installationId: 1 }, - output: { callReposInstallation: false, callOrgInstallation: false }, + input: { enableEnterpriseLevel: false, orgLevelRunner: true, installationId: 1 }, + output: { callEnterpriseToken: false, callReposInstallation: false, callOrgInstallation: false }, + }, + { + description: 'Should not look-up org installation if enterprise is enabled.', + input: { enableEnterpriseLevel: true, orgLevelRunner: false, installationId: 1 }, + output: { callEnterpriseToken: true, callReposInstallation: false, callOrgInstallation: false }, }, ]; beforeEach(() => { vi.clearAllMocks(); + setDefaults(); }); it.each(data)(`$description`, async ({ input, output }) => { @@ -64,9 +85,12 @@ describe('Test getOctokit', () => { mockOctokit.apps.getOrgInstallation.mockRejectedValue(new Error('Error')); } - await expect(getOctokit('', input.orgLevelRunner, payload)).resolves.toBeDefined(); + await expect(getOctokit('', input.enableEnterpriseLevel, input.orgLevelRunner, payload)).resolves.toBeDefined(); - if (output.callOrgInstallation) { + if (output.callEnterpriseToken) { + expect(mockOctokit.apps.getOrgInstallation).not.toHaveBeenCalled(); + expect(mockOctokit.apps.getRepoInstallation).not.toHaveBeenCalled(); + } else if (output.callOrgInstallation) { expect(mockOctokit.apps.getOrgInstallation).toHaveBeenCalled(); expect(mockOctokit.apps.getRepoInstallation).not.toHaveBeenCalled(); } else if (output.callReposInstallation) { diff --git a/lambdas/functions/control-plane/src/github/octokit.ts b/lambdas/functions/control-plane/src/github/octokit.ts index a2cce5f55d..32b87ca8dc 100644 --- a/lambdas/functions/control-plane/src/github/octokit.ts +++ b/lambdas/functions/control-plane/src/github/octokit.ts @@ -1,6 +1,7 @@ import { Octokit } from '@octokit/rest'; import { ActionRequestMessage } from '../scale-runners/scale-up'; import { createGithubAppAuth, createGithubInstallationAuth, createOctokitClient } from './auth'; +import { getParameter } from '@aws-github-runner/aws-ssm-util'; export async function getInstallationId( ghesApiUrl: string, @@ -37,10 +38,17 @@ export async function getInstallationId( */ export async function getOctokit( ghesApiUrl: string, + enableEnterpriseLevel: boolean, enableOrgLevel: boolean, payload: ActionRequestMessage, ): Promise { - const installationId = await getInstallationId(ghesApiUrl, enableOrgLevel, payload); - const ghAuth = await createGithubInstallationAuth(installationId, ghesApiUrl); - return await createOctokitClient(ghAuth.token, ghesApiUrl); + let ghToken; + if (enableEnterpriseLevel) { + ghToken = await getParameter(process.env.PARAMETER_ENTERPRISE_PAT_NAME); + } else { + const installationId = await getInstallationId(ghesApiUrl, enableOrgLevel, payload); + ghToken = (await createGithubInstallationAuth(installationId, ghesApiUrl)).token; + } + + return await createOctokitClient(ghToken, ghesApiUrl); } diff --git a/lambdas/functions/control-plane/src/modules.d.ts b/lambdas/functions/control-plane/src/modules.d.ts index 7570f29035..54bdce02ba 100644 --- a/lambdas/functions/control-plane/src/modules.d.ts +++ b/lambdas/functions/control-plane/src/modules.d.ts @@ -14,6 +14,7 @@ declare namespace NodeJS { PARAMETER_GITHUB_APP_CLIENT_SECRET_NAME: string; PARAMETER_GITHUB_APP_ID_NAME: string; PARAMETER_GITHUB_APP_KEY_BASE64_NAME: string; + PARAMETER_ENTERPRISE_PAT_NAME: string; RUNNER_OWNER: string; SCALE_DOWN_CONFIG: string; SSM_TOKEN_PATH: string; diff --git a/lambdas/functions/control-plane/src/pool/pool.test.ts b/lambdas/functions/control-plane/src/pool/pool.test.ts index 3a7ba3ab1c..40b6771291 100644 --- a/lambdas/functions/control-plane/src/pool/pool.test.ts +++ b/lambdas/functions/control-plane/src/pool/pool.test.ts @@ -1,5 +1,6 @@ import { Octokit } from '@octokit/rest'; import moment from 'moment-timezone'; +import * as aws_ssm from '@aws-github-runner/aws-ssm-util'; import * as nock from 'nock'; import { listEC2Runners } from '../aws/runners'; @@ -34,6 +35,10 @@ vi.mock('./../github/auth', async () => ({ createOctokitClient: vi.fn(), })); +vi.mock('@aws-github-runner/aws-ssm-util', () => ({ + getParameter: vi.fn(), +})); + vi.mock('../scale-runners/scale-up', async () => ({ scaleUp: vi.fn(), createRunners: vi.fn(), @@ -47,6 +52,7 @@ vi.mock('../scale-runners/scale-up', async () => ({ const mocktokit = Octokit as MockedClass; const mockedAppAuth = vi.mocked(ghAuth.createGithubAppAuth); const mockedInstallationAuth = vi.mocked(ghAuth.createGithubInstallationAuth); +const mockedGetParameter = vi.mocked(aws_ssm.getParameter); const mockCreateClient = vi.mocked(ghAuth.createOctokitClient); const mockListRunners = vi.mocked(listEC2Runners); @@ -174,6 +180,7 @@ beforeEach(() => { installationId: 0, }); + mockedGetParameter.mockResolvedValue('dummy-enterprise-pat'); mockCreateClient.mockResolvedValue(new mocktokit()); }); @@ -274,6 +281,24 @@ describe('Test simple pool.', () => { expect.anything(), ); }); + + it('Uses Enterprise PAT from SSM when ENABLE_ENTERPRISE_RUNNERS=true', async () => { + process.env.ENABLE_ENTERPRISE_RUNNERS = 'true'; + process.env.PARAMETER_ENTERPRISE_PAT_NAME = '/ssm/enterprise/pat'; + mockedGetParameter.mockResolvedValue('enterprise-pat-123'); + + await adjust({ poolSize: 5 }); + + // createGithubInstallationAuth must NOT be called in enterprise mode + expect(ghAuth.createGithubInstallationAuth).not.toHaveBeenCalled(); + + // client is created with PAT + expect(ghAuth.createOctokitClient).toHaveBeenCalledWith( + 'enterprise-pat-123', + 'https://api.github.enterprise.something', + ); + expect(createRunners).toHaveBeenCalled(); // still reaches scale-up if needed + }); }); describe('With Github Data Residency', () => { diff --git a/lambdas/functions/control-plane/src/pool/pool.ts b/lambdas/functions/control-plane/src/pool/pool.ts index 162a7d0f6d..7703da729e 100644 --- a/lambdas/functions/control-plane/src/pool/pool.ts +++ b/lambdas/functions/control-plane/src/pool/pool.ts @@ -1,11 +1,13 @@ import { Octokit } from '@octokit/rest'; import { createChildLogger } from '@aws-github-runner/aws-powertools-util'; +import { getParameter } from '@aws-github-runner/aws-ssm-util'; import yn from 'yn'; import { bootTimeExceeded, listEC2Runners } from '../aws/runners'; import { RunnerList } from '../aws/runners.d'; import { createGithubAppAuth, createGithubInstallationAuth, createOctokitClient } from '../github/auth'; import { createRunners, getGitHubEnterpriseApiUrl } from '../scale-runners/scale-up'; +import { GhRunners } from '../scale-runners/cache'; const logger = createChildLogger('pool'); @@ -20,6 +22,7 @@ interface RunnerStatus { export async function adjust(event: PoolEvent): Promise { logger.info(`Checking current pool size against pool of size: ${event.poolSize}`); + const enableEnterpriseLevel = yn(process.env.ENABLE_ENTERPRISE_RUNNERS, { default: false }); const runnerLabels = process.env.RUNNER_LABELS || ''; const runnerGroup = process.env.RUNNER_GROUP_NAME || ''; const runnerNamePrefix = process.env.RUNNER_NAME_PREFIX || ''; @@ -44,22 +47,27 @@ export async function adjust(event: PoolEvent): Promise { const { ghesApiUrl, ghesBaseUrl } = getGitHubEnterpriseApiUrl(); - const installationId = await getInstallationId(ghesApiUrl, runnerOwner); - const ghAuth = await createGithubInstallationAuth(installationId, ghesApiUrl); - const githubInstallationClient = await createOctokitClient(ghAuth.token, ghesApiUrl); + const installationId = enableEnterpriseLevel ? undefined : await getInstallationId(ghesApiUrl, runnerOwner); + const ghToken = enableEnterpriseLevel + ? await getParameter(process.env.PARAMETER_ENTERPRISE_PAT_NAME) + : (await createGithubInstallationAuth(installationId, ghesApiUrl)).token; + const githubInstallationClient = await createOctokitClient(ghToken, ghesApiUrl); // Get statusses of runners registed in GitHub const runnerStatusses = await getGitHubRegisteredRunnnerStatusses( + enableEnterpriseLevel, githubInstallationClient, runnerOwner, runnerNamePrefix, ); + const ec2RunnerType = enableEnterpriseLevel ? 'Enterprise' : 'Org'; + // Look up the managed ec2 runners in AWS, but running does not mean idle const ec2runners = await listEC2Runners({ environment, runnerOwner, - runnerType: 'Org', + runnerType: ec2RunnerType, statuses: ['running'], }); @@ -77,7 +85,7 @@ export async function adjust(event: PoolEvent): Promise { runnerGroup, runnerOwner, runnerNamePrefix, - runnerType: 'Org', + runnerType: ec2RunnerType, disableAutoUpdate: disableAutoUpdate, ssmTokenPath, ssmConfigPath, @@ -140,14 +148,20 @@ function calculatePooSize(ec2runners: RunnerList[], runnerStatus: Map> { - const runners = await ghClient.paginate(ghClient.actions.listSelfHostedRunnersForOrg, { - org: runnerOwner, - per_page: 100, - }); + const runners: GhRunners = enableEnterpriseLevel + ? await ghClient.paginate('GET /enterprises/{enterprise}/actions/runners', { + enterprise: runnerOwner, + per_page: 100, + }) + : await ghClient.paginate(ghClient.actions.listSelfHostedRunnersForOrg, { + org: runnerOwner, + per_page: 100, + }); const runnerStatus = new Map(); for (const runner of runners) { runner.name = runnerNamePrefix ? runner.name.replace(runnerNamePrefix, '') : runner.name; diff --git a/lambdas/functions/control-plane/src/scale-runners/job-retry.test.ts b/lambdas/functions/control-plane/src/scale-runners/job-retry.test.ts index 1edfefb69b..b078a39473 100644 --- a/lambdas/functions/control-plane/src/scale-runners/job-retry.test.ts +++ b/lambdas/functions/control-plane/src/scale-runners/job-retry.test.ts @@ -170,6 +170,42 @@ describe(`Test job retry check`, () => { expect(createSingleMetric).not.toHaveBeenCalled(); }); + it(`should publish a message for retry if retry is enabled and enterprise level is enabled.`, async () => { + // setup + mockOctokit.actions.getJobForWorkflowRun.mockImplementation(() => ({ + data: { + status: 'queued', + }, + })); + + const message: ActionRequestMessageRetry = { + eventType: 'workflow_job', + id: 0, + installationId: 0, + repositoryName: 'test', + repositoryOwner: 'github-aws-runners', + repoOwnerType: 'Enterprise', + retryCounter: 0, + }; + process.env.ENABLE_ENTERPRISE_RUNNERS = 'true'; + process.env.RUNNER_NAME_PREFIX = 'test'; + process.env.JOB_QUEUE_SCALE_UP_URL = + 'https://sqs.eu-west-1.amazonaws.com/123456789/webhook_events_workflow_job_queue'; + + // act + await checkAndRetryJob(message); + + // assert + expect(publishMessage).toHaveBeenCalledWith( + JSON.stringify({ + ...message, + }), + 'https://sqs.eu-west-1.amazonaws.com/123456789/webhook_events_workflow_job_queue', + ); + expect(createSingleMetric).not.toHaveBeenCalled(); + }); + + it(`should publish a message for retry if retry is enabled and counter is below max attempts.`, async () => { // setup mockOctokit.actions.getJobForWorkflowRun.mockImplementation(() => ({ diff --git a/lambdas/functions/control-plane/src/scale-runners/job-retry.ts b/lambdas/functions/control-plane/src/scale-runners/job-retry.ts index bd9ebbd3b9..99dd7f1592 100644 --- a/lambdas/functions/control-plane/src/scale-runners/job-retry.ts +++ b/lambdas/functions/control-plane/src/scale-runners/job-retry.ts @@ -38,8 +38,14 @@ export async function publishRetryMessage(payload: ActionRequestMessage): Promis export async function checkAndRetryJob(payload: ActionRequestMessageRetry): Promise { const enableOrgLevel = yn(process.env.ENABLE_ORGANIZATION_RUNNERS, { default: true }); - const runnerType = enableOrgLevel ? 'Org' : 'Repo'; - const runnerOwner = enableOrgLevel ? payload.repositoryOwner : `${payload.repositoryOwner}/${payload.repositoryName}`; + const enableEnterpriseLevel = yn(process.env.ENABLE_ENTERPRISE_RUNNERS, { default: true }); + const enterpriseSlug = process.env.ENTERPRISE_SLUG ?? ''; + const runnerType = enableEnterpriseLevel ? 'Enterprise' : enableOrgLevel ? 'Org' : 'Repo'; + const runnerOwner = enableEnterpriseLevel + ? enterpriseSlug + : enableOrgLevel + ? payload.repositoryOwner + : `${payload.repositoryOwner}/${payload.repositoryName}`; const runnerNamePrefix = process.env.RUNNER_NAME_PREFIX ?? ''; const jobQueueUrl = process.env.JOB_QUEUE_SCALE_UP_URL ?? ''; const enableMetrics = yn(process.env.ENABLE_METRIC_JOB_RETRY, { default: false }); @@ -60,7 +66,7 @@ export async function checkAndRetryJob(payload: ActionRequestMessageRetry): Prom logger.info(`Received event`); const { ghesApiUrl } = getGitHubEnterpriseApiUrl(); - const ghClient = await getOctokit(ghesApiUrl, enableOrgLevel, payload); + const ghClient = await getOctokit(ghesApiUrl, enableEnterpriseLevel, enableOrgLevel, payload); // check job is still queued if (await isJobQueued(ghClient, payload)) { diff --git a/lambdas/functions/control-plane/src/scale-runners/scale-down.test.ts b/lambdas/functions/control-plane/src/scale-runners/scale-down.test.ts index 87b719a4f1..44193525ba 100644 --- a/lambdas/functions/control-plane/src/scale-runners/scale-down.test.ts +++ b/lambdas/functions/control-plane/src/scale-runners/scale-down.test.ts @@ -1,5 +1,6 @@ import { Octokit } from '@octokit/rest'; import { RequestError } from '@octokit/request-error'; +import * as aws_ssm from '@aws-github-runner/aws-ssm-util'; import moment from 'moment'; import nock from 'nock'; @@ -10,6 +11,8 @@ import { githubCache } from './cache'; import { newestFirstStrategy, oldestFirstStrategy, scaleDown } from './scale-down'; import { describe, it, expect, beforeEach, vi } from 'vitest'; +const routeHandlers = new Map(); + const mockOctokit = { apps: { getOrgInstallation: vi.fn(), @@ -23,6 +26,11 @@ const mockOctokit = { getSelfHostedRunnerForOrg: vi.fn(), getSelfHostedRunnerForRepo: vi.fn(), }, + request: vi.fn((route, params) => { + const handler = routeHandlers.get(route); + if (!handler) throw new Error(`Unmocked route: ${route}`); + return handler(params); + }), paginate: vi.fn(), }; vi.mock('@octokit/rest', () => ({ @@ -39,6 +47,11 @@ vi.mock('./../aws/runners', async (importOriginal) => { listEC2Runners: vi.fn(), }; }); + +vi.mock('@aws-github-runner/aws-ssm-util', () => ({ + getParameter: vi.fn(), +})); + vi.mock('./../github/auth', async () => ({ createGithubAppAuth: vi.fn(), createGithubInstallationAuth: vi.fn(), @@ -66,6 +79,7 @@ const mockListRunners = vi.mocked(listEC2Runners); const mockTagRunners = vi.mocked(tag); const mockUntagRunners = vi.mocked(untag); const mockTerminateRunners = vi.mocked(terminateRunner); +const mockedGetParameter = vi.mocked(aws_ssm.getParameter); export interface TestData { repositoryName: string; @@ -104,6 +118,7 @@ describe('Scale down runners', () => { nock.disableNetConnect(); vi.clearAllMocks(); vi.resetModules(); + routeHandlers.clear(); githubCache.clients.clear(); githubCache.runners.clear(); mockOctokit.apps.getOrgInstallation.mockImplementation(() => ({ @@ -159,6 +174,8 @@ describe('Scale down runners', () => { } }); + mockedGetParameter.mockResolvedValue('dummy-enterprise-pat'); + mockTerminateRunners.mockImplementation(async () => { return; }); @@ -190,8 +207,8 @@ describe('Scale down runners', () => { } }); - type RunnerType = 'Repo' | 'Org'; - const runnerTypes: RunnerType[] = ['Org', 'Repo']; + type RunnerType = 'Repo' | 'Org' | 'Enterprise'; + const runnerTypes: RunnerType[] = ['Org', 'Repo', 'Enterprise']; describe.each(runnerTypes)('For %s runners.', (type) => { it('Should not call terminate when no runners online.', async () => { // setup @@ -222,6 +239,24 @@ describe('Scale down runners', () => { mockListRunners.mockResolvedValue(runners); mockAwsRunners(runners); + if (type === 'Enterprise') { + routeHandlers.set( + 'GET /enterprises/{enterprise}/actions/runners/{runner_id}', + ({ enterprise, runner_id }: { enterprise: string; runner_id: string }) => + Promise.resolve({ + data: { + id: runner_id, + owner: enterprise, + busy: runner_id.includes('busy') ? true : false, + status: 'online', + }, + }), + ); + routeHandlers.set('DELETE /enterprises/{enterprise}/actions/runners/{runner_id}', () => + Promise.resolve({ status: 204 }), + ); + } + await scaleDown(); // assert @@ -231,7 +266,7 @@ describe('Scale down runners', () => { if (type === 'Repo') { expect(mockOctokit.apps.getRepoInstallation).toHaveBeenCalled(); - } else { + } else if (type === 'Org') { expect(mockOctokit.apps.getOrgInstallation).toHaveBeenCalled(); } @@ -373,12 +408,20 @@ describe('Scale down runners', () => { mockOctokit.actions.getSelfHostedRunnerForRepo.mockResolvedValueOnce({ data: { id: 1234567890, name: orphanRunner.instanceId, busy: true, status: 'online' }, }); - } else { + } else if (type === 'Org') { mockOctokit.actions.getSelfHostedRunnerForOrg.mockResolvedValueOnce({ data: { id: 1234567890, name: orphanRunner.instanceId, busy: true, status: 'online' }, }); } + if (type === 'Enterprise') { + routeHandlers.set('GET /enterprises/{enterprise}/actions/runners/{runner_id}', () => + Promise.resolve({ + data: { id: 1234567890, name: orphanRunner.instanceId, busy: true, status: 'online' }, + }), + ); + } + // act await scaleDown(); @@ -391,12 +434,23 @@ describe('Scale down runners', () => { mockOctokit.actions.getSelfHostedRunnerForRepo.mockResolvedValueOnce({ data: { runnerId: 1234567890, name: orphanRunner.instanceId, busy: true, status: 'offline' }, }); - } else { + } else if (type === 'Org') { mockOctokit.actions.getSelfHostedRunnerForOrg.mockResolvedValueOnce({ data: { runnerId: 1234567890, name: orphanRunner.instanceId, busy: true, status: 'offline' }, }); } + if (type === 'Enterprise') { + routeHandlers.set('GET /enterprises/{enterprise}/actions/runners/{runner_id}', () => + Promise.resolve({ + data: { id: 1234567890, name: orphanRunner.instanceId, busy: true, status: 'offline' }, + }), + ); + routeHandlers.set('DELETE /enterprises/{enterprise}/actions/runners/{runner_id}', () => + Promise.resolve({ status: 204 }), + ); + } + // act await scaleDown(); @@ -436,6 +490,15 @@ describe('Scale down runners', () => { mockOctokit.actions.getSelfHostedRunnerForOrg.mockRejectedValueOnce(error404); } + if (type === 'Enterprise') { + routeHandlers.set('GET /enterprises/{enterprise}/actions/runners/{runner_id}', () => + Promise.resolve({ data: null }), + ); + routeHandlers.set('DELETE /enterprises/{enterprise}/actions/runners/{runner_id}', () => + Promise.resolve({ status: 204 }), + ); + } + // act await scaleDown(); @@ -469,10 +532,21 @@ describe('Scale down runners', () => { if (type === 'Repo') { mockOctokit.actions.getSelfHostedRunnerForRepo.mockRejectedValueOnce(error404); - } else { + } else if (type === 'Org') { mockOctokit.actions.getSelfHostedRunnerForOrg.mockRejectedValueOnce(error404); } + if (type === 'Enterprise') { + routeHandlers.set( + 'GET /enterprises/{enterprise}/actions/runners/{runner_id}', + ({ enterprise, runner_id }: { enterprise: string; runner_id: number }) => + Promise.resolve({ data: { id: runner_id, owner: enterprise, busy: false, status: 'online' } }), + ); + routeHandlers.set('DELETE /enterprises/{enterprise}/actions/runners/{runner_id}', () => + Promise.resolve({ status: 204 }), + ); + } + // act await scaleDown(); @@ -508,7 +582,7 @@ describe('Scale down runners', () => { if (type === 'Repo') { mockOctokit.actions.getSelfHostedRunnerForRepo.mockRejectedValueOnce(error500); - } else { + } else if (type === 'Org') { mockOctokit.actions.getSelfHostedRunnerForOrg.mockRejectedValueOnce(error500); } @@ -620,6 +694,7 @@ describe('Scale down runners', () => { }; beforeEach(() => { + mockOctokit.request.mockClear(); process.env.SCALE_DOWN_CONFIG = JSON.stringify([defaultConfig]); }); @@ -628,7 +703,7 @@ describe('Scale down runners', () => { const runnerToTerminateTime = evictionStrategy === 'oldest_first' ? MINIMUM_TIME_RUNNING_IN_MINUTES + 5 - : MINIMUM_TIME_RUNNING_IN_MINUTES + 1; + : MINIMUM_TIME_RUNNING_IN_MINUTES + 2; const runners = [ createRunnerTestData('idle-1', type, MINIMUM_TIME_RUNNING_IN_MINUTES + 4, true, false, false), createRunnerTestData('idle-to-terminate', type, runnerToTerminateTime, true, false, true), @@ -637,6 +712,18 @@ describe('Scale down runners', () => { mockGitHubRunners(runners); mockAwsRunners(runners); + if (type === 'Enterprise') { + routeHandlers.set( + 'GET /enterprises/{enterprise}/actions/runners/{runner_id}', + ({ enterprise, runner_id }: { enterprise: string; runner_id: number }) => + Promise.resolve({ data: { id: runner_id, owner: enterprise, busy: false, status: 'online' } }), + ); + + routeHandlers.set('DELETE /enterprises/{enterprise}/actions/runners/{runner_id}', () => + Promise.resolve({ status: 204 }), + ); + } + // act await scaleDown(); @@ -790,7 +877,7 @@ function mockGitHubRunners(runners: RunnerTestItem[]) { function createRunnerTestData( name: string, - type: 'Org' | 'Repo', + type: 'Enterprise' | 'Org' | 'Repo', minutesLaunchedAgo: number, registered: boolean, orphan: boolean, diff --git a/lambdas/functions/control-plane/src/scale-runners/scale-down.ts b/lambdas/functions/control-plane/src/scale-runners/scale-down.ts index 1e5e712a24..6c3f33c982 100644 --- a/lambdas/functions/control-plane/src/scale-runners/scale-down.ts +++ b/lambdas/functions/control-plane/src/scale-runners/scale-down.ts @@ -2,6 +2,7 @@ import { Octokit } from '@octokit/rest'; import { Endpoints } from '@octokit/types'; import { RequestError } from '@octokit/request-error'; import { createChildLogger } from '@aws-github-runner/aws-powertools-util'; +import { getParameter } from '@aws-github-runner/aws-ssm-util'; import moment from 'moment'; import { createGithubAppAuth, createGithubInstallationAuth, createOctokitClient } from '../github/auth'; @@ -29,24 +30,31 @@ async function getOrCreateOctokit(runner: RunnerInfo): Promise { logger.debug(`[createGitHubClientForRunner] Cache miss for ${key}`); const { ghesApiUrl } = getGitHubEnterpriseApiUrl(); - const ghAuthPre = await createGithubAppAuth(undefined, ghesApiUrl); - const githubClientPre = await createOctokitClient(ghAuthPre.token, ghesApiUrl); + const ghAuthPre = + runner.type === 'Enterprise' + ? await getParameter(process.env.PARAMETER_ENTERPRISE_PAT_NAME) + : (await createGithubAppAuth(undefined, ghesApiUrl)).token; + const githubClientPre = await createOctokitClient(ghAuthPre, ghesApiUrl); const installationId = - runner.type === 'Org' - ? ( - await githubClientPre.apps.getOrgInstallation({ - org: runner.owner, - }) - ).data.id - : ( - await githubClientPre.apps.getRepoInstallation({ - owner: runner.owner.split('/')[0], - repo: runner.owner.split('/')[1], - }) - ).data.id; - const ghAuth = await createGithubInstallationAuth(installationId, ghesApiUrl); - const octokit = await createOctokitClient(ghAuth.token, ghesApiUrl); + runner.type === 'Enterprise' + ? undefined + : runner.type === 'Org' + ? ( + await githubClientPre.apps.getOrgInstallation({ + org: runner.owner, + }) + ).data.id + : ( + await githubClientPre.apps.getRepoInstallation({ + owner: runner.owner.split('/')[0], + repo: runner.owner.split('/')[1], + }) + ).data.id; + + const ghToken = + runner.type === 'Enterprise' ? ghAuthPre : (await createGithubInstallationAuth(installationId, ghesApiUrl)).token; + const octokit = await createOctokitClient(ghToken, ghesApiUrl); githubCache.clients.set(key, octokit); return octokit; @@ -59,16 +67,21 @@ async function getGitHubSelfHostedRunnerState( ): Promise { try { const state = - ec2runner.type === 'Org' - ? await client.actions.getSelfHostedRunnerForOrg({ + ec2runner.type === 'Enterprise' + ? await client.request('GET /enterprises/{enterprise}/actions/runners/{runner_id}', { + enterprise: ec2runner.owner, runner_id: runnerId, - org: ec2runner.owner, }) - : await client.actions.getSelfHostedRunnerForRepo({ - runner_id: runnerId, - owner: ec2runner.owner.split('/')[0], - repo: ec2runner.owner.split('/')[1], - }); + : ec2runner.type === 'Org' + ? await client.actions.getSelfHostedRunnerForOrg({ + runner_id: runnerId, + org: ec2runner.owner, + }) + : await client.actions.getSelfHostedRunnerForRepo({ + runner_id: runnerId, + owner: ec2runner.owner.split('/')[0], + repo: ec2runner.owner.split('/')[1], + }); metricGitHubAppRateLimit(state.headers); return state.data; @@ -103,17 +116,24 @@ async function listGitHubRunners(runner: RunnerInfo): Promise { logger.debug(`[listGithubRunners] Cache miss for ${key}`); const client = await getOrCreateOctokit(runner); - const runners = - runner.type === 'Org' - ? await client.paginate(client.actions.listSelfHostedRunnersForOrg, { - org: runner.owner, + + const runners: GhRunners = + runner.type === 'Enterprise' + ? await client.paginate('GET /enterprises/{enterprise}/actions/runners', { + enterprise: runner.owner, per_page: 100, }) - : await client.paginate(client.actions.listSelfHostedRunnersForRepo, { - owner: runner.owner.split('/')[0], - repo: runner.owner.split('/')[1], - per_page: 100, - }); + : runner.type === 'Org' + ? await client.paginate(client.actions.listSelfHostedRunnersForOrg, { + org: runner.owner, + per_page: 100, + }) + : await client.paginate(client.actions.listSelfHostedRunnersForRepo, { + owner: runner.owner.split('/')[0], + repo: runner.owner.split('/')[1], + per_page: 100, + }); + githubCache.runners.set(key, runners); logger.debug(`[listGithubRunners] Cache set for ${key}`); logger.debug(`[listGithubRunners] Runners: ${JSON.stringify(runners)}`); @@ -141,16 +161,21 @@ async function removeRunner(ec2runner: RunnerInfo, ghRunnerIds: number[]): Promi const statuses = await Promise.all( ghRunnerIds.map(async (ghRunnerId) => { return ( - ec2runner.type === 'Org' - ? await githubAppClient.actions.deleteSelfHostedRunnerFromOrg({ - runner_id: ghRunnerId, - org: ec2runner.owner, - }) - : await githubAppClient.actions.deleteSelfHostedRunnerFromRepo({ + ec2runner.type === 'Enterprise' + ? await githubAppClient.request('DELETE /enterprises/{enterprise}/actions/runners/{runner_id}', { + enterprise: ec2runner.owner, runner_id: ghRunnerId, - owner: ec2runner.owner.split('/')[0], - repo: ec2runner.owner.split('/')[1], }) + : ec2runner.type === 'Org' + ? await githubAppClient.actions.deleteSelfHostedRunnerFromOrg({ + runner_id: ghRunnerId, + org: ec2runner.owner, + }) + : await githubAppClient.actions.deleteSelfHostedRunnerFromRepo({ + runner_id: ghRunnerId, + owner: ec2runner.owner.split('/')[0], + repo: ec2runner.owner.split('/')[1], + }) ).status; }), ); diff --git a/lambdas/functions/control-plane/src/scale-runners/scale-up.test.ts b/lambdas/functions/control-plane/src/scale-runners/scale-up.test.ts index 14c0a0422e..90a4aad321 100644 --- a/lambdas/functions/control-plane/src/scale-runners/scale-up.test.ts +++ b/lambdas/functions/control-plane/src/scale-runners/scale-up.test.ts @@ -14,6 +14,8 @@ import * as scaleUpModule from './scale-up'; import { getParameter } from '@aws-github-runner/aws-ssm-util'; import { describe, it, expect, beforeEach, vi } from 'vitest'; +const routeHandlers = new Map(); + const mockOctokit = { paginate: vi.fn(), checks: { get: vi.fn() }, @@ -28,6 +30,11 @@ const mockOctokit = { getOrgInstallation: vi.fn(), getRepoInstallation: vi.fn(), }, + request: vi.fn((route, params) => { + const handler = routeHandlers.get(route); + if (!handler) throw new Error(`Unmocked route: ${route}`); + return handler(params); + }), }; const mockCreateRunner = vi.mocked(createRunner); const mockListRunners = vi.mocked(listEC2Runners); @@ -97,15 +104,33 @@ const EXPECTED_RUNNER_PARAMS: RunnerInputParameters = { tracingEnabled: false, onDemandFailoverOnError: [], }; + +const EXPECTED_ENTERPRISE_RUNNER_PARAMS: RunnerInputParameters = { + environment: 'unit-test-environment', + runnerType: 'Enterprise', + runnerOwner: '', + numberOfRunners: 1, + launchTemplateName: 'lt-1', + ec2instanceCriteria: { + instanceTypes: ['m5.large'], + targetCapacityType: 'spot', + instanceAllocationStrategy: 'lowest-price', + }, + subnets: ['subnet-123'], + tracingEnabled: false, + onDemandFailoverOnError: [], +}; let expectedRunnerParams: RunnerInputParameters; function setDefaults() { process.env = { ...cleanEnv }; process.env.PARAMETER_GITHUB_APP_ID_NAME = 'github-app-id'; + process.env.PARAMETER_ENTERPRISE_PAT_NAME = 'github-enterprise-pat'; process.env.GITHUB_APP_KEY_BASE64 = 'TEST_CERTIFICATE_DATA'; process.env.GITHUB_APP_ID = '1337'; process.env.GITHUB_APP_CLIENT_ID = 'TEST_CLIENT_ID'; process.env.GITHUB_APP_CLIENT_SECRET = 'TEST_CLIENT_SECRET'; + process.env.GITHUB_ENTERPRISE_PAT = 'TEST_PAT'; process.env.RUNNERS_MAXIMUM_COUNT = '3'; process.env.ENVIRONMENT = EXPECTED_RUNNER_PARAMS.environment; process.env.LAUNCH_TEMPLATE_NAME = 'lt-1'; @@ -120,6 +145,7 @@ beforeEach(() => { vi.resetModules(); vi.clearAllMocks(); setDefaults(); + routeHandlers.clear(); defaultSSMGetParameterMockImpl(); defaultOctokitMockImpl(); @@ -182,10 +208,10 @@ describe('scaleUp with GHES', () => { await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); expect(listEC2Runners).not.toBeCalled(); }); - - describe('on org level', () => { + describe('on enterprise level', () => { beforeEach(() => { - process.env.ENABLE_ORGANIZATION_RUNNERS = 'true'; + process.env.ENABLE_ENTERPRISE_RUNNERS = 'true'; + process.env.ENABLE_ORGANIZATION_RUNNERS = 'false'; process.env.ENABLE_EPHEMERAL_RUNNERS = 'true'; process.env.RUNNER_NAME_PREFIX = 'unit-test-'; process.env.RUNNER_GROUP_NAME = 'Default'; @@ -193,7 +219,7 @@ describe('scaleUp with GHES', () => { process.env.SSM_TOKEN_PATH = '/github-action-runners/default/runners/config'; process.env.RUNNER_LABELS = 'label1,label2'; - expectedRunnerParams = { ...EXPECTED_RUNNER_PARAMS }; + expectedRunnerParams = { ...EXPECTED_ENTERPRISE_RUNNER_PARAMS }; mockSSMClient.reset(); }); @@ -201,8 +227,8 @@ describe('scaleUp with GHES', () => { await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); expect(listEC2Runners).toBeCalledWith({ environment: 'unit-test-environment', - runnerType: 'Org', - runnerOwner: TEST_DATA.repositoryOwner, + runnerType: 'Enterprise', + runnerOwner: '', }); }); @@ -225,9 +251,7 @@ describe('scaleUp with GHES', () => { it('creates a token when maximum runners has not been reached', async () => { process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); - expect(mockOctokit.actions.createRegistrationTokenForOrg).toBeCalledWith({ - org: TEST_DATA.repositoryOwner, - }); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); }); @@ -249,53 +273,11 @@ describe('scaleUp with GHES', () => { expect(createRunner).toBeCalledWith({ ...expectedRunnerParams, amiIdSsmParameterName: 'my-ami-id-param' }); }); - it('Throws an error if runner group doesnt exist for ephemeral runners', async () => { - process.env.RUNNER_GROUP_NAME = 'test-runner-group'; - mockSSMgetParameter.mockImplementation(async () => { - throw new Error('ParameterNotFound'); - }); - await expect(scaleUpModule.scaleUp('aws:sqs', TEST_DATA)).rejects.toBeInstanceOf(Error); - expect(mockOctokit.paginate).toHaveBeenCalledTimes(1); - }); - - it('Discards event if it is a User repo and org level runners is enabled', async () => { - process.env.ENABLE_ORGANIZATION_RUNNERS = 'true'; - const USER_REPO_TEST_DATA = { ...TEST_DATA }; - USER_REPO_TEST_DATA.repoOwnerType = 'User'; - await scaleUpModule.scaleUp('aws:sqs', USER_REPO_TEST_DATA); - expect(createRunner).not.toHaveBeenCalled(); - }); - - it('create SSM parameter for runner group id if it doesnt exist', async () => { - mockSSMgetParameter.mockImplementation(async () => { - throw new Error('ParameterNotFound'); - }); - await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); - expect(mockOctokit.paginate).toHaveBeenCalledTimes(1); - expect(mockSSMClient).toHaveReceivedCommandTimes(PutParameterCommand, 2); - expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { - Name: `${process.env.SSM_CONFIG_PATH}/runner-group/${process.env.RUNNER_GROUP_NAME}`, - Value: '1', - Type: 'String', - }); - }); - - it('Does not create SSM parameter for runner group id if it exists', async () => { - await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); - expect(mockOctokit.paginate).toHaveBeenCalledTimes(0); - expect(mockSSMClient).toHaveReceivedCommandTimes(PutParameterCommand, 1); - }); - it('create start runner config for ephemeral runners ', async () => { process.env.RUNNERS_MAXIMUM_COUNT = '2'; await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); - expect(mockOctokit.actions.generateRunnerJitconfigForOrg).toBeCalledWith({ - org: TEST_DATA.repositoryOwner, - name: 'unit-test-i-12345', - runner_group_id: 1, - labels: ['label1', 'label2'], - }); + expect(mockOctokit.actions.generateRunnerJitconfigForOrg).not.toBeCalled(); expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { Name: '/github-action-runners/default/runners/config/i-12345', Value: 'TEST_JIT_CONFIG_ORG', @@ -314,11 +296,11 @@ describe('scaleUp with GHES', () => { process.env.RUNNERS_MAXIMUM_COUNT = '2'; await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); expect(mockOctokit.actions.generateRunnerJitconfigForOrg).not.toBeCalled(); - expect(mockOctokit.actions.createRegistrationTokenForOrg).toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { Name: '/github-action-runners/default/runners/config/i-12345', Value: - '--url https://github.enterprise.something/Codertocat --token 1234abcd ' + + '--url https://github.enterprise.something/enterprise/ --token 1234abcd ' + '--labels label1,label2 --runnergroup Default', Type: 'SecureString', Tags: [ @@ -926,87 +908,846 @@ describe('scaleUp with Github Data Residency', () => { 10000, ); }); - describe('on repo level', () => { + + describe('on org level', () => { beforeEach(() => { - process.env.ENABLE_ORGANIZATION_RUNNERS = 'false'; - process.env.RUNNER_NAME_PREFIX = 'unit-test'; + process.env.ENABLE_ORGANIZATION_RUNNERS = 'true'; + process.env.ENABLE_EPHEMERAL_RUNNERS = 'true'; + process.env.RUNNER_NAME_PREFIX = 'unit-test-'; + process.env.RUNNER_GROUP_NAME = 'Default'; + process.env.SSM_CONFIG_PATH = '/github-action-runners/default/runners/config'; + process.env.SSM_TOKEN_PATH = '/github-action-runners/default/runners/config'; + process.env.RUNNER_LABELS = 'label1,label2'; + expectedRunnerParams = { ...EXPECTED_RUNNER_PARAMS }; - expectedRunnerParams.runnerType = 'Repo'; - expectedRunnerParams.runnerOwner = `${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`; - // `--url https://companyname.ghe.com${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`, - // `--token 1234abcd`, - // ]; + mockSSMClient.reset(); }); - it('gets the current repo level runners', async () => { + it('gets the current org level runners', async () => { await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); expect(listEC2Runners).toBeCalledWith({ environment: 'unit-test-environment', - runnerType: 'Repo', - runnerOwner: `${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`, + runnerType: 'Org', + runnerOwner: TEST_DATA.repositoryOwner, }); }); it('does not create a token when maximum runners has been reached', async () => { process.env.RUNNERS_MAXIMUM_COUNT = '1'; + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); }); - it('creates a token when maximum runners has not been reached', async () => { + it('does create a runner if maximum is set to -1', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '-1'; process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); - expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); - expect(mockOctokit.actions.createRegistrationTokenForRepo).toBeCalledWith({ - owner: TEST_DATA.repositoryOwner, - repo: TEST_DATA.repositoryName, - }); + expect(listEC2Runners).not.toHaveBeenCalled(); + expect(createRunner).toHaveBeenCalled(); }); - it('uses the default runner max count', async () => { - process.env.RUNNERS_MAXIMUM_COUNT = undefined; + it('creates a token when maximum runners has not been reached', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); - expect(mockOctokit.actions.createRegistrationTokenForRepo).toBeCalledWith({ - owner: TEST_DATA.repositoryOwner, - repo: TEST_DATA.repositoryName, + expect(mockOctokit.actions.createRegistrationTokenForOrg).toBeCalledWith({ + org: TEST_DATA.repositoryOwner, }); + expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); }); - it('creates a runner with correct config and labels', async () => { - process.env.RUNNER_LABELS = 'label1,label2'; + it('creates a runner with correct config', async () => { await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); expect(createRunner).toBeCalledWith(expectedRunnerParams); }); - it('creates a runner and ensure the group argument is ignored', async () => { + it('creates a runner with labels in a specific group', async () => { process.env.RUNNER_LABELS = 'label1,label2'; - process.env.RUNNER_GROUP_NAME = 'TEST_GROUP_IGNORED'; + process.env.RUNNER_GROUP_NAME = 'TEST_GROUP'; await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); expect(createRunner).toBeCalledWith(expectedRunnerParams); }); - it('Check error is thrown', async () => { - const mockCreateRunners = vi.mocked(createRunner); - mockCreateRunners.mockRejectedValue(new Error('no retry')); - await expect(scaleUpModule.scaleUp('aws:sqs', TEST_DATA)).rejects.toThrow('no retry'); - mockCreateRunners.mockReset(); + it('creates a runner with ami id override from ssm parameter', async () => { + process.env.AMI_ID_SSM_PARAMETER_NAME = 'my-ami-id-param'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith({ ...expectedRunnerParams, amiIdSsmParameterName: 'my-ami-id-param' }); }); - }); -}); -function defaultOctokitMockImpl() { - mockOctokit.actions.getJobForWorkflowRun.mockImplementation(() => ({ - data: { - status: 'queued', - }, - })); - mockOctokit.paginate.mockImplementation(() => [ - { - id: 1, - name: 'Default', - }, - ]); + it('Throws an error if runner group doesnt exist for ephemeral runners', async () => { + process.env.RUNNER_GROUP_NAME = 'test-runner-group'; + mockSSMgetParameter.mockImplementation(async () => { + throw new Error('ParameterNotFound'); + }); + await expect(scaleUpModule.scaleUp('aws:sqs', TEST_DATA)).rejects.toBeInstanceOf(Error); + expect(mockOctokit.paginate).toHaveBeenCalledTimes(1); + }); + + it('Discards event if it is a User repo and org level runners is enabled', async () => { + process.env.ENABLE_ORGANIZATION_RUNNERS = 'true'; + const USER_REPO_TEST_DATA = { ...TEST_DATA }; + USER_REPO_TEST_DATA.repoOwnerType = 'User'; + await scaleUpModule.scaleUp('aws:sqs', USER_REPO_TEST_DATA); + expect(createRunner).not.toHaveBeenCalled(); + }); + + it('create SSM parameter for runner group id if it doesnt exist', async () => { + mockSSMgetParameter.mockImplementation(async () => { + throw new Error('ParameterNotFound'); + }); + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.paginate).toHaveBeenCalledTimes(1); + expect(mockSSMClient).toHaveReceivedCommandTimes(PutParameterCommand, 2); + expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { + Name: `${process.env.SSM_CONFIG_PATH}/runner-group/${process.env.RUNNER_GROUP_NAME}`, + Value: '1', + Type: 'String', + }); + }); + + it('Does not create SSM parameter for runner group id if it exists', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.paginate).toHaveBeenCalledTimes(0); + expect(mockSSMClient).toHaveReceivedCommandTimes(PutParameterCommand, 1); + }); + + it('create start runner config for ephemeral runners ', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '2'; + + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.generateRunnerJitconfigForOrg).toBeCalledWith({ + org: TEST_DATA.repositoryOwner, + name: 'unit-test-i-12345', + runner_group_id: 1, + labels: ['label1', 'label2'], + }); + expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { + Name: '/github-action-runners/default/runners/config/i-12345', + Value: 'TEST_JIT_CONFIG_ORG', + Type: 'SecureString', + Tags: [ + { + Key: 'InstanceId', + Value: 'i-12345', + }, + ], + }); + }); + + it('create start runner config for non-ephemeral runners ', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; + process.env.RUNNERS_MAXIMUM_COUNT = '2'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.generateRunnerJitconfigForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForOrg).toBeCalled(); + expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { + Name: '/github-action-runners/default/runners/config/i-12345', + Value: + '--url https://companyname.ghe.com/Codertocat --token 1234abcd ' + + '--labels label1,label2 --runnergroup Default', + Type: 'SecureString', + Tags: [ + { + Key: 'InstanceId', + Value: 'i-12345', + }, + ], + }); + }); + it.each(RUNNER_TYPES)( + 'calls create start runner config of 40' + ' instances (ssm rate limit condition) to test time delay ', + async (type: RunnerType) => { + process.env.ENABLE_EPHEMERAL_RUNNERS = type === 'ephemeral' ? 'true' : 'false'; + process.env.RUNNERS_MAXIMUM_COUNT = '40'; + mockCreateRunner.mockImplementation(async () => { + return instances; + }); + mockListRunners.mockImplementation(async () => { + return []; + }); + const startTime = performance.now(); + const instances = [ + 'i-1234', + 'i-5678', + 'i-5567', + 'i-5569', + 'i-5561', + 'i-5560', + 'i-5566', + 'i-5536', + 'i-5526', + 'i-5516', + 'i-122', + 'i-123', + 'i-124', + 'i-125', + 'i-126', + 'i-127', + 'i-128', + 'i-129', + 'i-130', + 'i-131', + 'i-132', + 'i-133', + 'i-134', + 'i-135', + 'i-136', + 'i-137', + 'i-138', + 'i-139', + 'i-140', + 'i-141', + 'i-142', + 'i-143', + 'i-144', + 'i-145', + 'i-146', + 'i-147', + 'i-148', + 'i-149', + 'i-150', + 'i-151', + ]; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + const endTime = performance.now(); + expect(endTime - startTime).toBeGreaterThan(1000); + expect(mockSSMClient).toHaveReceivedCommandTimes(PutParameterCommand, 40); + }, + 10000, + ); + }); + describe('on repo level', () => { + beforeEach(() => { + process.env.ENABLE_ORGANIZATION_RUNNERS = 'false'; + process.env.RUNNER_NAME_PREFIX = 'unit-test'; + expectedRunnerParams = { ...EXPECTED_RUNNER_PARAMS }; + expectedRunnerParams.runnerType = 'Repo'; + expectedRunnerParams.runnerOwner = `${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`; + // `--url https://github.enterprise.something/${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`, + // `--token 1234abcd`, + // ]; + }); + + it('gets the current repo level runners', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(listEC2Runners).toBeCalledWith({ + environment: 'unit-test-environment', + runnerType: 'Repo', + runnerOwner: `${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`, + }); + }); + + it('does not create a token when maximum runners has been reached', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '1'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); + }); + + it('creates a token when maximum runners has not been reached', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForRepo).toBeCalledWith({ + owner: TEST_DATA.repositoryOwner, + repo: TEST_DATA.repositoryName, + }); + }); + + it('uses the default runner max count', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = undefined; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForRepo).toBeCalledWith({ + owner: TEST_DATA.repositoryOwner, + repo: TEST_DATA.repositoryName, + }); + }); + + it('creates a runner with correct config and labels', async () => { + process.env.RUNNER_LABELS = 'label1,label2'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('creates a runner and ensure the group argument is ignored', async () => { + process.env.RUNNER_LABELS = 'label1,label2'; + process.env.RUNNER_GROUP_NAME = 'TEST_GROUP_IGNORED'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('Check error is thrown', async () => { + const mockCreateRunners = vi.mocked(createRunner); + mockCreateRunners.mockRejectedValue(new Error('no retry')); + await expect(scaleUpModule.scaleUp('aws:sqs', TEST_DATA)).rejects.toThrow('no retry'); + mockCreateRunners.mockReset(); + }); + }); +}); + +describe('scaleUp with public GH', () => { + it('ignores non-sqs events', async () => { + expect.assertions(1); + await expect(scaleUpModule.scaleUp('aws:s3', TEST_DATA)).rejects.toEqual(Error('Cannot handle non-SQS events!')); + }); + + it('checks queued workflows', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.getJobForWorkflowRun).toBeCalledWith({ + job_id: TEST_DATA.id, + owner: TEST_DATA.repositoryOwner, + repo: TEST_DATA.repositoryName, + }); + }); + + it('not checking queued workflows', async () => { + process.env.ENABLE_JOB_QUEUED_CHECK = 'false'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.getJobForWorkflowRun).not.toBeCalled(); + }); + + it('does not list runners when no workflows are queued', async () => { + mockOctokit.actions.getJobForWorkflowRun.mockImplementation(() => ({ + data: { status: 'completed' }, + })); + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(listEC2Runners).not.toBeCalled(); + }); + + describe('on org level', () => { + beforeEach(() => { + process.env.ENABLE_ORGANIZATION_RUNNERS = 'true'; + process.env.RUNNER_NAME_PREFIX = 'unit-test'; + expectedRunnerParams = { ...EXPECTED_RUNNER_PARAMS }; + }); + + it('gets the current org level runners', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(listEC2Runners).toBeCalledWith({ + environment: 'unit-test-environment', + runnerType: 'Org', + runnerOwner: TEST_DATA.repositoryOwner, + }); + }); + + it('does not create a token when maximum runners has been reached', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '1'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); + }); + + it('creates a token when maximum runners has not been reached', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).toBeCalledWith({ + org: TEST_DATA.repositoryOwner, + }); + expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); + }); + + it('creates a runner with correct config', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('creates a runner with labels in s specific group', async () => { + process.env.RUNNER_LABELS = 'label1,label2'; + process.env.RUNNER_GROUP_NAME = 'TEST_GROUP'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + }); + + describe('on repo level', () => { + beforeEach(() => { + mockSSMClient.reset(); + + process.env.ENABLE_ORGANIZATION_RUNNERS = 'false'; + process.env.RUNNER_NAME_PREFIX = 'unit-test'; + expectedRunnerParams = { ...EXPECTED_RUNNER_PARAMS }; + expectedRunnerParams.runnerType = 'Repo'; + expectedRunnerParams.runnerOwner = `${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`; + }); + + it('gets the current repo level runners', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(listEC2Runners).toBeCalledWith({ + environment: 'unit-test-environment', + runnerType: 'Repo', + runnerOwner: `${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`, + }); + }); + + it('does not create a token when maximum runners has been reached', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '1'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); + }); + + it('creates a token when maximum runners has not been reached', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForRepo).toBeCalledWith({ + owner: TEST_DATA.repositoryOwner, + repo: TEST_DATA.repositoryName, + }); + }); + + it('creates a runner with correct config and labels', async () => { + process.env.RUNNER_LABELS = 'label1,label2'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('creates a runner with correct config and labels and on demand failover enabled.', async () => { + process.env.RUNNER_LABELS = 'label1,label2'; + process.env.ENABLE_ON_DEMAND_FAILOVER_FOR_ERRORS = JSON.stringify(['InsufficientInstanceCapacity']); + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith({ + ...expectedRunnerParams, + onDemandFailoverOnError: ['InsufficientInstanceCapacity'], + }); + }); + + it('creates a runner and ensure the group argument is ignored', async () => { + process.env.RUNNER_LABELS = 'label1,label2'; + process.env.RUNNER_GROUP_NAME = 'TEST_GROUP_IGNORED'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('ephemeral runners only run with workflow_job event, others should fail.', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'true'; + process.env.ENABLE_JOB_QUEUED_CHECK = 'false'; + await expect( + scaleUpModule.scaleUp('aws:sqs', { + ...TEST_DATA, + eventType: 'check_run', + }), + ).rejects.toBeInstanceOf(Error); + }); + + it('creates a ephemeral runner with JIT config.', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'true'; + process.env.ENABLE_JOB_QUEUED_CHECK = 'false'; + process.env.SSM_TOKEN_PATH = '/github-action-runners/default/runners/config'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.getJobForWorkflowRun).not.toBeCalled(); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + + expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { + Name: '/github-action-runners/default/runners/config/i-12345', + Value: 'TEST_JIT_CONFIG_REPO', + Type: 'SecureString', + Tags: [ + { + Key: 'InstanceId', + Value: 'i-12345', + }, + ], + }); + }); + + it('creates a ephemeral runner with registration token.', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'true'; + process.env.ENABLE_JIT_CONFIG = 'false'; + process.env.ENABLE_JOB_QUEUED_CHECK = 'false'; + process.env.SSM_TOKEN_PATH = '/github-action-runners/default/runners/config'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.getJobForWorkflowRun).not.toBeCalled(); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + + expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { + Name: '/github-action-runners/default/runners/config/i-12345', + Value: '--url https://github.com/Codertocat/hello-world --token 1234abcd --ephemeral', + Type: 'SecureString', + Tags: [ + { + Key: 'InstanceId', + Value: 'i-12345', + }, + ], + }); + }); + + it('JIT config is ignored for non-ephemeral runners.', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; + process.env.ENABLE_JIT_CONFIG = 'true'; + process.env.ENABLE_JOB_QUEUED_CHECK = 'false'; + process.env.RUNNER_LABELS = 'jit'; + process.env.SSM_TOKEN_PATH = '/github-action-runners/default/runners/config'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.getJobForWorkflowRun).not.toBeCalled(); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + + expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { + Name: '/github-action-runners/default/runners/config/i-12345', + Value: '--url https://github.com/Codertocat/hello-world --token 1234abcd --labels jit', + Type: 'SecureString', + Tags: [ + { + Key: 'InstanceId', + Value: 'i-12345', + }, + ], + }); + }); + + it('creates a ephemeral runner after checking job is queued.', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'true'; + process.env.ENABLE_JOB_QUEUED_CHECK = 'true'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.getJobForWorkflowRun).toBeCalled(); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('disable auto update on the runner.', async () => { + process.env.DISABLE_RUNNER_AUTOUPDATE = 'true'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('Scaling error should cause reject so retry can be triggered.', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '1'; + process.env.ENABLE_EPHEMERAL_RUNNERS = 'true'; + await expect(scaleUpModule.scaleUp('aws:sqs', TEST_DATA)).rejects.toBeInstanceOf(ScaleError); + }); + }); +}); + +describe('scaleUp with Github Data Residency', () => { + beforeEach(() => { + process.env.GHES_URL = 'https://companyname.ghe.com'; + }); + + it('ignores non-sqs events', async () => { + expect.assertions(1); + await expect(scaleUpModule.scaleUp('aws:s3', TEST_DATA)).rejects.toEqual(Error('Cannot handle non-SQS events!')); + }); + + it('checks queued workflows', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.getJobForWorkflowRun).toBeCalledWith({ + job_id: TEST_DATA.id, + owner: TEST_DATA.repositoryOwner, + repo: TEST_DATA.repositoryName, + }); + }); + + it('does not list runners when no workflows are queued', async () => { + mockOctokit.actions.getJobForWorkflowRun.mockImplementation(() => ({ + data: { total_count: 0 }, + })); + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(listEC2Runners).not.toBeCalled(); + }); + + describe('on org level', () => { + beforeEach(() => { + process.env.ENABLE_ORGANIZATION_RUNNERS = 'true'; + process.env.ENABLE_EPHEMERAL_RUNNERS = 'true'; + process.env.RUNNER_NAME_PREFIX = 'unit-test-'; + process.env.RUNNER_GROUP_NAME = 'Default'; + process.env.SSM_CONFIG_PATH = '/github-action-runners/default/runners/config'; + process.env.SSM_TOKEN_PATH = '/github-action-runners/default/runners/config'; + process.env.RUNNER_LABELS = 'label1,label2'; + + expectedRunnerParams = { ...EXPECTED_RUNNER_PARAMS }; + mockSSMClient.reset(); + }); + + it('gets the current org level runners', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(listEC2Runners).toBeCalledWith({ + environment: 'unit-test-environment', + runnerType: 'Org', + runnerOwner: TEST_DATA.repositoryOwner, + }); + }); + + it('does not create a token when maximum runners has been reached', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '1'; + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); + }); + + it('does create a runner if maximum is set to -1', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '-1'; + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(listEC2Runners).not.toHaveBeenCalled(); + expect(createRunner).toHaveBeenCalled(); + }); + + it('creates a token when maximum runners has not been reached', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).toBeCalledWith({ + org: TEST_DATA.repositoryOwner, + }); + expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); + }); + + it('creates a runner with correct config', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('creates a runner with labels in a specific group', async () => { + process.env.RUNNER_LABELS = 'label1,label2'; + process.env.RUNNER_GROUP_NAME = 'TEST_GROUP'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('creates a runner with ami id override from ssm parameter', async () => { + process.env.AMI_ID_SSM_PARAMETER_NAME = 'my-ami-id-param'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith({ ...expectedRunnerParams, amiIdSsmParameterName: 'my-ami-id-param' }); + }); + + it('Throws an error if runner group doesnt exist for ephemeral runners', async () => { + process.env.RUNNER_GROUP_NAME = 'test-runner-group'; + mockSSMgetParameter.mockImplementation(async () => { + throw new Error('ParameterNotFound'); + }); + await expect(scaleUpModule.scaleUp('aws:sqs', TEST_DATA)).rejects.toBeInstanceOf(Error); + expect(mockOctokit.paginate).toHaveBeenCalledTimes(1); + }); + + it('Discards event if it is a User repo and org level runners is enabled', async () => { + process.env.ENABLE_ORGANIZATION_RUNNERS = 'true'; + const USER_REPO_TEST_DATA = { ...TEST_DATA }; + USER_REPO_TEST_DATA.repoOwnerType = 'User'; + await scaleUpModule.scaleUp('aws:sqs', USER_REPO_TEST_DATA); + expect(createRunner).not.toHaveBeenCalled(); + }); + + it('create SSM parameter for runner group id if it doesnt exist', async () => { + mockSSMgetParameter.mockImplementation(async () => { + throw new Error('ParameterNotFound'); + }); + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.paginate).toHaveBeenCalledTimes(1); + expect(mockSSMClient).toHaveReceivedCommandTimes(PutParameterCommand, 2); + expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { + Name: `${process.env.SSM_CONFIG_PATH}/runner-group/${process.env.RUNNER_GROUP_NAME}`, + Value: '1', + Type: 'String', + }); + }); + + it('Does not create SSM parameter for runner group id if it exists', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.paginate).toHaveBeenCalledTimes(0); + expect(mockSSMClient).toHaveReceivedCommandTimes(PutParameterCommand, 1); + }); + + it('create start runner config for ephemeral runners ', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '2'; + + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.generateRunnerJitconfigForOrg).toBeCalledWith({ + org: TEST_DATA.repositoryOwner, + name: 'unit-test-i-12345', + runner_group_id: 1, + labels: ['label1', 'label2'], + }); + expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { + Name: '/github-action-runners/default/runners/config/i-12345', + Value: 'TEST_JIT_CONFIG_ORG', + Type: 'SecureString', + Tags: [ + { + Key: 'InstanceId', + Value: 'i-12345', + }, + ], + }); + }); + + it('create start runner config for non-ephemeral runners ', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; + process.env.RUNNERS_MAXIMUM_COUNT = '2'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.generateRunnerJitconfigForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForOrg).toBeCalled(); + expect(mockSSMClient).toHaveReceivedNthSpecificCommandWith(1, PutParameterCommand, { + Name: '/github-action-runners/default/runners/config/i-12345', + Value: + '--url https://companyname.ghe.com/Codertocat --token 1234abcd ' + + '--labels label1,label2 --runnergroup Default', + Type: 'SecureString', + Tags: [ + { + Key: 'InstanceId', + Value: 'i-12345', + }, + ], + }); + }); + it.each(RUNNER_TYPES)( + 'calls create start runner config of 40' + ' instances (ssm rate limit condition) to test time delay ', + async (type: RunnerType) => { + process.env.ENABLE_EPHEMERAL_RUNNERS = type === 'ephemeral' ? 'true' : 'false'; + process.env.RUNNERS_MAXIMUM_COUNT = '40'; + mockCreateRunner.mockImplementation(async () => { + return instances; + }); + mockListRunners.mockImplementation(async () => { + return []; + }); + const startTime = performance.now(); + const instances = [ + 'i-1234', + 'i-5678', + 'i-5567', + 'i-5569', + 'i-5561', + 'i-5560', + 'i-5566', + 'i-5536', + 'i-5526', + 'i-5516', + 'i-122', + 'i-123', + 'i-124', + 'i-125', + 'i-126', + 'i-127', + 'i-128', + 'i-129', + 'i-130', + 'i-131', + 'i-132', + 'i-133', + 'i-134', + 'i-135', + 'i-136', + 'i-137', + 'i-138', + 'i-139', + 'i-140', + 'i-141', + 'i-142', + 'i-143', + 'i-144', + 'i-145', + 'i-146', + 'i-147', + 'i-148', + 'i-149', + 'i-150', + 'i-151', + ]; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + const endTime = performance.now(); + expect(endTime - startTime).toBeGreaterThan(1000); + expect(mockSSMClient).toHaveReceivedCommandTimes(PutParameterCommand, 40); + }, + 10000, + ); + }); + describe('on repo level', () => { + beforeEach(() => { + process.env.ENABLE_ORGANIZATION_RUNNERS = 'false'; + process.env.RUNNER_NAME_PREFIX = 'unit-test'; + expectedRunnerParams = { ...EXPECTED_RUNNER_PARAMS }; + expectedRunnerParams.runnerType = 'Repo'; + expectedRunnerParams.runnerOwner = `${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`; + // `--url https://companyname.ghe.com${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`, + // `--token 1234abcd`, + // ]; + }); + + it('gets the current repo level runners', async () => { + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(listEC2Runners).toBeCalledWith({ + environment: 'unit-test-environment', + runnerType: 'Repo', + runnerOwner: `${TEST_DATA.repositoryOwner}/${TEST_DATA.repositoryName}`, + }); + }); + + it('does not create a token when maximum runners has been reached', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = '1'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForRepo).not.toBeCalled(); + }); + + it('creates a token when maximum runners has not been reached', async () => { + process.env.ENABLE_EPHEMERAL_RUNNERS = 'false'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForOrg).not.toBeCalled(); + expect(mockOctokit.actions.createRegistrationTokenForRepo).toBeCalledWith({ + owner: TEST_DATA.repositoryOwner, + repo: TEST_DATA.repositoryName, + }); + }); + + it('uses the default runner max count', async () => { + process.env.RUNNERS_MAXIMUM_COUNT = undefined; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(mockOctokit.actions.createRegistrationTokenForRepo).toBeCalledWith({ + owner: TEST_DATA.repositoryOwner, + repo: TEST_DATA.repositoryName, + }); + }); + + it('creates a runner with correct config and labels', async () => { + process.env.RUNNER_LABELS = 'label1,label2'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('creates a runner and ensure the group argument is ignored', async () => { + process.env.RUNNER_LABELS = 'label1,label2'; + process.env.RUNNER_GROUP_NAME = 'TEST_GROUP_IGNORED'; + await scaleUpModule.scaleUp('aws:sqs', TEST_DATA); + expect(createRunner).toBeCalledWith(expectedRunnerParams); + }); + + it('Check error is thrown', async () => { + const mockCreateRunners = vi.mocked(createRunner); + mockCreateRunners.mockRejectedValue(new Error('no retry')); + await expect(scaleUpModule.scaleUp('aws:sqs', TEST_DATA)).rejects.toThrow('no retry'); + mockCreateRunners.mockReset(); + }); + }); +}); + +function defaultOctokitMockImpl() { + mockOctokit.actions.getJobForWorkflowRun.mockImplementation(() => ({ + data: { + status: 'queued', + }, + })); + mockOctokit.paginate.mockImplementation(() => [ + { + id: 1, + name: 'Default', + }, + ]); + routeHandlers.set('POST /enterprises/{enterprise}/actions/runners/generate-jitconfig', () => + Promise.resolve({ + data: { + runner: { id: 9876543210 }, + encoded_jit_config: 'TEST_JIT_CONFIG_ORG', + }, + }), + ); + routeHandlers.set('POST /enterprises/{enterprise}/actions/runners/registration-token', () => + Promise.resolve({ + data: { + token: '1234abcd', + }, + }), + ); mockOctokit.actions.generateRunnerJitconfigForOrg.mockImplementation(() => ({ data: { runner: { id: 9876543210 }, @@ -1053,6 +1794,8 @@ function defaultSSMGetParameterMockImpl() { return '1'; } else if (name === `${process.env.PARAMETER_GITHUB_APP_ID_NAME}`) { return `${process.env.GITHUB_APP_ID}`; + } else if (name === `${process.env.PARAMETER_ENTERPRISE_PAT_NAME}`) { + return `${process.env.GITHUB_ENTERPRISE_PAT}`; } else { throw new Error(`ParameterNotFound: ${name}`); } diff --git a/lambdas/functions/control-plane/src/scale-runners/scale-up.ts b/lambdas/functions/control-plane/src/scale-runners/scale-up.ts index 638edd3232..97fa5dd9cb 100644 --- a/lambdas/functions/control-plane/src/scale-runners/scale-up.ts +++ b/lambdas/functions/control-plane/src/scale-runners/scale-up.ts @@ -45,7 +45,7 @@ interface CreateGitHubRunnerConfig { runnerGroup: string; runnerNamePrefix: string; runnerOwner: string; - runnerType: 'Org' | 'Repo'; + runnerType: 'Enterprise' | 'Org' | 'Repo'; disableAutoUpdate: boolean; ssmTokenPath: string; ssmConfigPath: string; @@ -63,8 +63,9 @@ interface CreateEC2RunnerConfig { } function generateRunnerServiceConfig(githubRunnerConfig: CreateGitHubRunnerConfig, token: string) { + const enterpriseContextPath = githubRunnerConfig.runnerType === 'Enterprise' ? 'enterprise/' : ''; const config = [ - `--url ${githubRunnerConfig.ghesBaseUrl ?? 'https://github.com'}/${githubRunnerConfig.runnerOwner}`, + `--url ${githubRunnerConfig.ghesBaseUrl ?? 'https://github.com'}/${enterpriseContextPath}${githubRunnerConfig.runnerOwner}`, `--token ${token}`, ]; @@ -76,7 +77,10 @@ function generateRunnerServiceConfig(githubRunnerConfig: CreateGitHubRunnerConfi config.push('--disableupdate'); } - if (githubRunnerConfig.runnerType === 'Org' && githubRunnerConfig.runnerGroup !== undefined) { + if ( + (githubRunnerConfig.runnerType === 'Org' || githubRunnerConfig.runnerType === 'Enterprise') && + githubRunnerConfig.runnerGroup !== undefined + ) { config.push(`--runnergroup ${githubRunnerConfig.runnerGroup}`); } @@ -89,12 +93,16 @@ function generateRunnerServiceConfig(githubRunnerConfig: CreateGitHubRunnerConfi async function getGithubRunnerRegistrationToken(githubRunnerConfig: CreateGitHubRunnerConfig, ghClient: Octokit) { const registrationToken = - githubRunnerConfig.runnerType === 'Org' - ? await ghClient.actions.createRegistrationTokenForOrg({ org: githubRunnerConfig.runnerOwner }) - : await ghClient.actions.createRegistrationTokenForRepo({ - owner: githubRunnerConfig.runnerOwner.split('/')[0], - repo: githubRunnerConfig.runnerOwner.split('/')[1], - }); + githubRunnerConfig.runnerType === 'Enterprise' + ? await ghClient.request('POST /enterprises/{enterprise}/actions/runners/registration-token', { + enterprise: githubRunnerConfig.runnerOwner, + }) + : githubRunnerConfig.runnerType === 'Org' + ? await ghClient.actions.createRegistrationTokenForOrg({ org: githubRunnerConfig.runnerOwner }) + : await ghClient.actions.createRegistrationTokenForRepo({ + owner: githubRunnerConfig.runnerOwner.split('/')[0], + repo: githubRunnerConfig.runnerOwner.split('/')[1], + }); const appId = parseInt(await getParameter(process.env.PARAMETER_GITHUB_APP_ID_NAME)); logger.info('App id from SSM', { appId: appId }); @@ -158,7 +166,10 @@ export async function isJobQueued(githubInstallationClient: Octokit, payload: Ac async function getRunnerGroupId(githubRunnerConfig: CreateGitHubRunnerConfig, ghClient: Octokit): Promise { // if the runnerType is Repo, then runnerGroupId is default to 1 let runnerGroupId: number | undefined = 1; - if (githubRunnerConfig.runnerType === 'Org' && githubRunnerConfig.runnerGroup !== undefined) { + if ( + (githubRunnerConfig.runnerType === 'Enterprise' || githubRunnerConfig.runnerType === 'Org') && + githubRunnerConfig.runnerGroup !== undefined + ) { let runnerGroup: string | undefined; // check if runner group id is already stored in SSM Parameter Store and // use it if it exists to avoid API call to GitHub @@ -195,10 +206,16 @@ async function getRunnerGroupId(githubRunnerConfig: CreateGitHubRunnerConfig, gh } async function getRunnerGroupByName(ghClient: Octokit, githubRunnerConfig: CreateGitHubRunnerConfig): Promise { - const runnerGroups: RunnerGroup[] = await ghClient.paginate(`GET /orgs/{org}/actions/runner-groups`, { - org: githubRunnerConfig.runnerOwner, - per_page: 100, - }); + const runnerGroups: RunnerGroup[] = + githubRunnerConfig.runnerType === 'Enterprise' + ? await ghClient.paginate(`GET /enterprises/{enterprise}/actions/runner-groups`, { + enterprise: githubRunnerConfig.runnerOwner, + per_page: 100, + }) + : await ghClient.paginate(`GET /orgs/{org}/actions/runner-groups`, { + org: githubRunnerConfig.runnerOwner, + per_page: 100, + }); const runnerGroupId = runnerGroups.find((runnerGroup) => runnerGroup.name === githubRunnerConfig.runnerGroup)?.id; if (runnerGroupId === undefined) { @@ -228,7 +245,9 @@ export async function scaleUp(eventSource: string, payload: ActionRequestMessage logger.info(`Received ${payload.eventType} from ${payload.repositoryOwner}/${payload.repositoryName}`); if (eventSource !== 'aws:sqs') throw Error('Cannot handle non-SQS events!'); - const enableOrgLevel = yn(process.env.ENABLE_ORGANIZATION_RUNNERS, { default: true }); + const enableEnterpriseLevel = yn(process.env.ENABLE_ENTERPRISE_RUNNERS, { default: false }); + const enableOrgLevel = enableEnterpriseLevel ? false : yn(process.env.ENABLE_ORGANIZATION_RUNNERS, { default: true }); // Enterprise level takes precedence + const enterpriseSlug = process.env.ENTERPRISE_SLUG ?? ''; const maximumRunners = parseInt(process.env.RUNNERS_MAXIMUM_COUNT || '3'); const runnerLabels = process.env.RUNNER_LABELS || ''; const runnerGroup = process.env.RUNNER_GROUP_NAME || 'Default'; @@ -270,8 +289,12 @@ export async function scaleUp(eventSource: string, payload: ActionRequestMessage } const ephemeral = ephemeralEnabled && payload.eventType === 'workflow_job'; - const runnerType = enableOrgLevel ? 'Org' : 'Repo'; - const runnerOwner = enableOrgLevel ? payload.repositoryOwner : `${payload.repositoryOwner}/${payload.repositoryName}`; + const runnerType = enableEnterpriseLevel ? 'Enterprise' : enableOrgLevel ? 'Org' : 'Repo'; + const runnerOwner = enableEnterpriseLevel + ? enterpriseSlug + : enableOrgLevel + ? payload.repositoryOwner + : `${payload.repositoryOwner}/${payload.repositoryName}`; addPersistentContextToChildLogger({ runner: { @@ -289,11 +312,17 @@ export async function scaleUp(eventSource: string, payload: ActionRequestMessage const { ghesApiUrl, ghesBaseUrl } = getGitHubEnterpriseApiUrl(); - const installationId = await getInstallationId(ghesApiUrl, enableOrgLevel, payload); - const ghAuth = await createGithubInstallationAuth(installationId, ghesApiUrl); - const githubInstallationClient = await createOctokitClient(ghAuth.token, ghesApiUrl); + const installationId = enableEnterpriseLevel + ? undefined // Enterprise level does not use installationId + : await getInstallationId(ghesApiUrl, enableOrgLevel, payload); - if (!enableJobQueuedCheck || (await isJobQueued(githubInstallationClient, payload))) { + const ghToken = enableEnterpriseLevel + ? await getParameter(process.env.PARAMETER_ENTERPRISE_PAT_NAME) + : (await createGithubInstallationAuth(installationId, ghesApiUrl)).token; + + const githubClient = await createOctokitClient(ghToken, ghesApiUrl); + + if (!enableJobQueuedCheck || (await isJobQueued(githubClient, payload))) { let scaleUp = true; if (maximumRunners !== -1) { const currentRunners = await listEC2Runners({ @@ -336,7 +365,7 @@ export async function scaleUp(eventSource: string, payload: ActionRequestMessage tracingEnabled, onDemandFailoverOnError, }, - githubInstallationClient, + githubClient, ); await publishRetryMessage(payload); @@ -440,20 +469,27 @@ async function createJitConfig(githubRunnerConfig: CreateGitHubRunnerConfig, ins }; logger.debug(`Runner name: ${ephemeralRunnerConfig.runnerName}`); const runnerConfig = - githubRunnerConfig.runnerType === 'Org' - ? await ghClient.actions.generateRunnerJitconfigForOrg({ - org: githubRunnerConfig.runnerOwner, + githubRunnerConfig.runnerType === 'Enterprise' + ? await ghClient.request('POST /enterprises/{enterprise}/actions/runners/generate-jitconfig', { + enterprise: githubRunnerConfig.runnerOwner, name: ephemeralRunnerConfig.runnerName, runner_group_id: ephemeralRunnerConfig.runnerGroupId, labels: ephemeralRunnerConfig.runnerLabels, }) - : await ghClient.actions.generateRunnerJitconfigForRepo({ - owner: githubRunnerConfig.runnerOwner.split('/')[0], - repo: githubRunnerConfig.runnerOwner.split('/')[1], - name: ephemeralRunnerConfig.runnerName, - runner_group_id: ephemeralRunnerConfig.runnerGroupId, - labels: ephemeralRunnerConfig.runnerLabels, - }); + : githubRunnerConfig.runnerType === 'Org' + ? await ghClient.actions.generateRunnerJitconfigForOrg({ + org: githubRunnerConfig.runnerOwner, + name: ephemeralRunnerConfig.runnerName, + runner_group_id: ephemeralRunnerConfig.runnerGroupId, + labels: ephemeralRunnerConfig.runnerLabels, + }) + : await ghClient.actions.generateRunnerJitconfigForRepo({ + owner: githubRunnerConfig.runnerOwner.split('/')[0], + repo: githubRunnerConfig.runnerOwner.split('/')[1], + name: ephemeralRunnerConfig.runnerName, + runner_group_id: ephemeralRunnerConfig.runnerGroupId, + labels: ephemeralRunnerConfig.runnerLabels, + }); metricGitHubAppRateLimit(runnerConfig.headers); diff --git a/main.tf b/main.tf index 9c72614808..137f89ee51 100644 --- a/main.tf +++ b/main.tf @@ -4,11 +4,13 @@ locals { }) github_app_parameters = { - id = coalesce(var.github_app.id_ssm, module.ssm.parameters.github_app_id) - key_base64 = coalesce(var.github_app.key_base64_ssm, module.ssm.parameters.github_app_key_base64) + id = var.enterprise_pat == null ? coalesce(var.github_app.id_ssm, module.ssm.parameters.github_app_id) : null + key_base64 = var.enterprise_pat == null ? coalesce(var.github_app.key_base64_ssm, module.ssm.parameters.github_app_key_base64) : null webhook_secret = coalesce(var.github_app.webhook_secret_ssm, module.ssm.parameters.github_app_webhook_secret) } + enterprise_pat = var.enterprise_pat != null ? module.ssm.parameters.enterprise_pat : null + default_runner_labels = distinct(concat(["self-hosted", var.runner_os, var.runner_architecture])) runner_labels = (var.runner_disable_default_labels == false) ? sort(concat(local.default_runner_labels, var.runner_extra_labels)) : var.runner_extra_labels @@ -88,11 +90,12 @@ resource "aws_sqs_queue" "queued_builds_dlq" { } module "ssm" { - source = "./modules/ssm" - kms_key_arn = var.kms_key_arn - path_prefix = "${local.ssm_root_path}/${var.ssm_paths.app}" - github_app = var.github_app - tags = local.tags + source = "./modules/ssm" + kms_key_arn = var.kms_key_arn + path_prefix = "${local.ssm_root_path}/${var.ssm_paths.app}" + github_app = var.github_app + enterprise_pat = var.enterprise_pat + tags = local.tags } module "webhook" { @@ -186,7 +189,10 @@ module "runners" { sqs_build_queue = aws_sqs_queue.queued_builds github_app_parameters = local.github_app_parameters + enterprise_pat = local.enterprise_pat enable_organization_runners = var.enable_organization_runners + enable_enterprise_runners = var.enable_enterprise_runners + enterprise_slug = var.enterprise_slug enable_ephemeral_runners = var.enable_ephemeral_runners enable_jit_config = var.enable_jit_config enable_job_queued_check = var.enable_job_queued_check diff --git a/modules/multi-runner/README.md b/modules/multi-runner/README.md index 759cb61832..d096dd1a72 100644 --- a/modules/multi-runner/README.md +++ b/modules/multi-runner/README.md @@ -128,10 +128,11 @@ module "multi-runner" { | [cloudwatch\_config](#input\_cloudwatch\_config) | (optional) Replaces the module default cloudwatch log config. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html for details. | `string` | `null` | no | | [enable\_ami\_housekeeper](#input\_enable\_ami\_housekeeper) | Option to disable the lambda to clean up old AMIs. | `bool` | `false` | no | | [enable\_managed\_runner\_security\_group](#input\_enable\_managed\_runner\_security\_group) | Enabling the default managed security group creation. Unmanaged security groups can be specified via `runner_additional_security_group_ids`. | `bool` | `true` | no | +| [enterprise\_pat](#input\_enterprise\_pat) | GitHub enterprise PAT. Used when not authenticating via GitHub App. | `string` | `null` | no | | [eventbridge](#input\_eventbridge) | Enable the use of EventBridge by the module. By enabling this feature events will be put on the EventBridge by the webhook instead of directly dispatching to queues for scaling. |
object({
enable = optional(bool, true)
accept_events = optional(list(string), [])
})
| `{}` | no | | [ghes\_ssl\_verify](#input\_ghes\_ssl\_verify) | GitHub Enterprise SSL verification. Set to 'false' when custom certificate (chains) is used for GitHub Enterprise Server (insecure). | `bool` | `true` | no | | [ghes\_url](#input\_ghes\_url) | GitHub Enterprise Server URL. Example: https://github.internal.co - DO NOT SET IF USING PUBLIC GITHUB. .However if you are using Github Enterprise Cloud with data-residency (ghe.com), set the endpoint here. Example - https://companyname.ghe.com\| | `string` | `null` | no | -| [github\_app](#input\_github\_app) | GitHub app parameters, see your github app.
You can optionally create the SSM parameters yourself and provide the ARN and name here, through the `*_ssm` attributes.
If you chose to provide the configuration values directly here,
please ensure the key is the base64-encoded `.pem` file (the output of `base64 app.private-key.pem`, not the content of `private-key.pem`).
Note: the provided SSM parameters arn and name have a precedence over the actual value (i.e `key_base64_ssm` has a precedence over `key_base64` etc). |
object({
key_base64 = optional(string)
key_base64_ssm = optional(object({
arn = string
name = string
}))
id = optional(string)
id_ssm = optional(object({
arn = string
name = string
}))
webhook_secret = optional(string)
webhook_secret_ssm = optional(object({
arn = string
name = string
}))
})
| n/a | yes | +| [github\_app](#input\_github\_app) | GitHub app parameters. |
object({
key_base64 = optional(string, null)
key_base64_ssm = optional(object({ arn = string, name = string }))
id = optional(string, null)
id_ssm = optional(object({ arn = string, name = string }))
webhook_secret = optional(string)
webhook_secret_ssm = optional(object({ arn = string, name = string }))
})
| n/a | yes | | [instance\_profile\_path](#input\_instance\_profile\_path) | The path that will be added to the instance\_profile, if not set the environment name will be used. | `string` | `null` | no | | [instance\_termination\_watcher](#input\_instance\_termination\_watcher) | Configuration for the spot termination watcher lambda function. This feature is Beta, changes will not trigger a major release as long in beta.

`enable`: Enable or disable the spot termination watcher.
`memory_size`: Memory size linit in MB of the lambda.
`s3_key`: S3 key for syncer lambda function. Required if using S3 bucket to specify lambdas.
`s3_object_version`: S3 object version for syncer lambda function. Useful if S3 versioning is enabled on source bucket.
`timeout`: Time out of the lambda in seconds.
`zip`: File location of the lambda zip file. |
object({
enable = optional(bool, false)
features = optional(object({
enable_spot_termination_handler = optional(bool, true)
enable_spot_termination_notification_watcher = optional(bool, true)
}), {})
memory_size = optional(number, null)
s3_key = optional(string, null)
s3_object_version = optional(string, null)
timeout = optional(number, null)
zip = optional(string, null)
})
| `{}` | no | | [key\_name](#input\_key\_name) | Key pair name | `string` | `null` | no | @@ -148,7 +149,7 @@ module "multi-runner" { | [logging\_retention\_in\_days](#input\_logging\_retention\_in\_days) | Specifies the number of days you want to retain log events for the lambda log group. Possible values are: 0, 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653. | `number` | `180` | no | | [matcher\_config\_parameter\_store\_tier](#input\_matcher\_config\_parameter\_store\_tier) | The tier of the parameter store for the matcher configuration. Valid values are `Standard`, and `Advanced`. | `string` | `"Standard"` | no | | [metrics](#input\_metrics) | Configuration for metrics created by the module, by default metrics are disabled to avoid additional costs. When metrics are enable all metrics are created unless explicit configured otherwise. |
object({
enable = optional(bool, false)
namespace = optional(string, "GitHub Runners")
metric = optional(object({
enable_github_app_rate_limit = optional(bool, true)
enable_job_retry = optional(bool, true)
enable_spot_termination_warning = optional(bool, true)
}), {})
})
| `{}` | no | -| [multi\_runner\_config](#input\_multi\_runner\_config) | multi\_runner\_config = {
runner\_config: {
runner\_os: "The EC2 Operating System type to use for action runner instances (linux,windows)."
runner\_architecture: "The platform architecture of the runner instance\_type."
runner\_metadata\_options: "(Optional) Metadata options for the ec2 runner instances."
ami: "(Optional) AMI configuration for the action runner instances. This object allows you to specify all AMI-related settings in one place."
ami\_filter: "(Optional) List of maps used to create the AMI filter for the action runner AMI. By default amazon linux 2 is used."
ami\_owners: "(Optional) The list of owners used to select the AMI of action runner instances."
create\_service\_linked\_role\_spot: (Optional) create the serviced linked role for spot instances that is required by the scale-up lambda.
credit\_specification: "(Optional) The credit specification of the runner instance\_type. Can be unset, `standard` or `unlimited`.
delay\_webhook\_event: "The number of seconds the event accepted by the webhook is invisible on the queue before the scale up lambda will receive the event."
disable\_runner\_autoupdate: "Disable the auto update of the github runner agent. Be aware there is a grace period of 30 days, see also the [GitHub article](https://github.blog/changelog/2022-02-01-github-actions-self-hosted-runners-can-now-disable-automatic-updates/)"
ebs\_optimized: "The EC2 EBS optimized configuration."
enable\_ephemeral\_runners: "Enable ephemeral runners, runners will only be used once."
enable\_job\_queued\_check: "Enables JIT configuration for creating runners instead of registration token based registraton. JIT configuration will only be applied for ephemeral runners. By default JIT confiugration is enabled for ephemeral runners an can be disabled via this override. When running on GHES without support for JIT configuration this variable should be set to true for ephemeral runners."
enable\_on\_demand\_failover\_for\_errors: "Enable on-demand failover. For example to fall back to on demand when no spot capacity is available the variable can be set to `InsufficientInstanceCapacity`. When not defined the default behavior is to retry later."
enable\_organization\_runners: "Register runners to organization, instead of repo level"
enable\_runner\_binaries\_syncer: "Option to disable the lambda to sync GitHub runner distribution, useful when using a pre-build AMI."
enable\_ssm\_on\_runners: "Enable to allow access the runner instances for debugging purposes via SSM. Note that this adds additional permissions to the runner instances."
enable\_userdata: "Should the userdata script be enabled for the runner. Set this to false if you are using your own prebuilt AMI."
instance\_allocation\_strategy: "The allocation strategy for spot instances. AWS recommends to use `capacity-optimized` however the AWS default is `lowest-price`."
instance\_max\_spot\_price: "Max price price for spot intances per hour. This variable will be passed to the create fleet as max spot price for the fleet."
instance\_target\_capacity\_type: "Default lifecycle used for runner instances, can be either `spot` or `on-demand`."
instance\_types: "List of instance types for the action runner. Defaults are based on runner\_os (al2023 for linux and Windows Server Core for win)."
job\_queue\_retention\_in\_seconds: "The number of seconds the job is held in the queue before it is purged"
minimum\_running\_time\_in\_minutes: "The time an ec2 action runner should be running at minimum before terminated if not busy."
pool\_runner\_owner: "The pool will deploy runners to the GitHub org ID, set this value to the org to which you want the runners deployed. Repo level is not supported."
runner\_additional\_security\_group\_ids: "List of additional security groups IDs to apply to the runner. If added outside the multi\_runner\_config block, the additional security group(s) will be applied to all runner configs. If added inside the multi\_runner\_config, the additional security group(s) will be applied to the individual runner."
runner\_as\_root: "Run the action runner under the root user. Variable `runner_run_as` will be ignored."
runner\_boot\_time\_in\_minutes: "The minimum time for an EC2 runner to boot and register as a runner."
runner\_disable\_default\_labels: "Disable default labels for the runners (os, architecture and `self-hosted`). If enabled, the runner will only have the extra labels provided in `runner_extra_labels`. In case you on own start script is used, this configuration parameter needs to be parsed via SSM."
runner\_extra\_labels: "Extra (custom) labels for the runners (GitHub). Separate each label by a comma. Labels checks on the webhook can be enforced by setting `multi_runner_config.matcherConfig.exactMatch`. GitHub read-only labels should not be provided."
runner\_group\_name: "Name of the runner group."
runner\_name\_prefix: "Prefix for the GitHub runner name."
runner\_run\_as: "Run the GitHub actions agent as user."
runners\_maximum\_count: "The maximum number of runners that will be created. Setting the variable to `-1` desiables the maximum check."
scale\_down\_schedule\_expression: "Scheduler expression to check every x for scale down."
scale\_up\_reserved\_concurrent\_executions: "Amount of reserved concurrent executions for the scale-up lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations."
userdata\_template: "Alternative user-data template, replacing the default template. By providing your own user\_data you have to take care of installing all required software, including the action runner. Variables userdata\_pre/post\_install are ignored."
enable\_jit\_config "Overwrite the default behavior for JIT configuration. By default JIT configuration is enabled for ephemeral runners and disabled for non-ephemeral runners. In case of GHES check first if the JIT config API is avaialbe. In case you upgradeing from 3.x to 4.x you can set `enable_jit_config` to `false` to avoid a breaking change when having your own AMI."
enable\_runner\_detailed\_monitoring: "Should detailed monitoring be enabled for the runner. Set this to true if you want to use detailed monitoring. See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html for details."
enable\_cloudwatch\_agent: "Enabling the cloudwatch agent on the ec2 runner instances, the runner contains default config. Configuration can be overridden via `cloudwatch_config`."
cloudwatch\_config: "(optional) Replaces the module default cloudwatch log config. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html for details."
userdata\_pre\_install: "Script to be ran before the GitHub Actions runner is installed on the EC2 instances"
userdata\_post\_install: "Script to be ran after the GitHub Actions runner is installed on the EC2 instances"
runner\_hook\_job\_started: "Script to be ran in the runner environment at the beginning of every job"
runner\_hook\_job\_completed: "Script to be ran in the runner environment at the end of every job"
runner\_ec2\_tags: "Map of tags that will be added to the launch template instance tag specifications."
runner\_iam\_role\_managed\_policy\_arns: "Attach AWS or customer-managed IAM policies (by ARN) to the runner IAM role"
vpc\_id: "The VPC for security groups of the action runners. If not set uses the value of `var.vpc_id`."
subnet\_ids: "List of subnets in which the action runners will be launched, the subnets needs to be subnets in the `vpc_id`. If not set, uses the value of `var.subnet_ids`."
idle\_config: "List of time period that can be defined as cron expression to keep a minimum amount of runners active instead of scaling down to 0. By defining this list you can ensure that in time periods that match the cron expression within 5 seconds a runner is kept idle."
runner\_log\_files: "(optional) Replaces the module default cloudwatch log config. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html for details."
block\_device\_mappings: "The EC2 instance block device configuration. Takes the following keys: `device_name`, `delete_on_termination`, `volume_type`, `volume_size`, `encrypted`, `iops`, `throughput`, `kms_key_id`, `snapshot_id`."
job\_retry: "Experimental! Can be removed / changed without trigger a major release. Configure job retries. The configuration enables job retries (for ephemeral runners). After creating the insances a message will be published to a job retry queue. The job retry check lambda is checking after a delay if the job is queued. If not the message will be published again on the scale-up (build queue). Using this feature can impact the reate limit of the GitHub app."
pool\_config: "The configuration for updating the pool. The `pool_size` to adjust to by the events triggered by the `schedule_expression`. For example you can configure a cron expression for week days to adjust the pool to 10 and another expression for the weekend to adjust the pool to 1. Use `schedule_expression_timezone` to override the schedule time zone (defaults to UTC)."
}
matcherConfig: {
labelMatchers: "The list of list of labels supported by the runner configuration. `[[self-hosted, linux, x64, example]]`"
exactMatch: "If set to true all labels in the workflow job must match the GitHub labels (os, architecture and `self-hosted`). When false if __any__ workflow label matches it will trigger the webhook."
priority: "If set it defines the priority of the matcher, the matcher with the lowest priority will be evaluated first. Default is 999, allowed values 0-999."
}
redrive\_build\_queue: "Set options to attach (optional) a dead letter queue to the build queue, the queue between the webhook and the scale up lambda. You have the following options. 1. Disable by setting `enabled` to false. 2. Enable by setting `enabled` to `true`, `maxReceiveCount` to a number of max retries."
} |
map(object({
runner_config = object({
runner_os = string
runner_architecture = string
runner_metadata_options = optional(map(any), {
instance_metadata_tags = "enabled"
http_endpoint = "enabled"
http_tokens = "required"
http_put_response_hop_limit = 1
})
ami = optional(object({
filter = optional(map(list(string)), { state = ["available"] })
owners = optional(list(string), ["amazon"])
id_ssm_parameter_arn = optional(string, null)
kms_key_arn = optional(string, null)
}), null) # Defaults to null, in which case the module falls back to individual AMI variables (deprecated)
# Deprecated: Use ami object instead
ami_filter = optional(map(list(string)), { state = ["available"] })
ami_owners = optional(list(string), ["amazon"])
ami_id_ssm_parameter_name = optional(string, null)
ami_kms_key_arn = optional(string, "")
create_service_linked_role_spot = optional(bool, false)
credit_specification = optional(string, null)
delay_webhook_event = optional(number, 30)
disable_runner_autoupdate = optional(bool, false)
ebs_optimized = optional(bool, false)
enable_ephemeral_runners = optional(bool, false)
enable_job_queued_check = optional(bool, null)
enable_on_demand_failover_for_errors = optional(list(string), [])
enable_organization_runners = optional(bool, false)
enable_runner_binaries_syncer = optional(bool, true)
enable_ssm_on_runners = optional(bool, false)
enable_userdata = optional(bool, true)
instance_allocation_strategy = optional(string, "lowest-price")
instance_max_spot_price = optional(string, null)
instance_target_capacity_type = optional(string, "spot")
instance_types = list(string)
job_queue_retention_in_seconds = optional(number, 86400)
minimum_running_time_in_minutes = optional(number, null)
pool_runner_owner = optional(string, null)
runner_as_root = optional(bool, false)
runner_boot_time_in_minutes = optional(number, 5)
runner_disable_default_labels = optional(bool, false)
runner_extra_labels = optional(list(string), [])
runner_group_name = optional(string, "Default")
runner_name_prefix = optional(string, "")
runner_run_as = optional(string, "ec2-user")
runners_maximum_count = number
runner_additional_security_group_ids = optional(list(string), [])
scale_down_schedule_expression = optional(string, "cron(*/5 * * * ? *)")
scale_up_reserved_concurrent_executions = optional(number, 1)
userdata_template = optional(string, null)
userdata_content = optional(string, null)
enable_jit_config = optional(bool, null)
enable_runner_detailed_monitoring = optional(bool, false)
enable_cloudwatch_agent = optional(bool, true)
cloudwatch_config = optional(string, null)
userdata_pre_install = optional(string, "")
userdata_post_install = optional(string, "")
runner_hook_job_started = optional(string, "")
runner_hook_job_completed = optional(string, "")
runner_ec2_tags = optional(map(string), {})
runner_iam_role_managed_policy_arns = optional(list(string), [])
vpc_id = optional(string, null)
subnet_ids = optional(list(string), null)
idle_config = optional(list(object({
cron = string
timeZone = string
idleCount = number
evictionStrategy = optional(string, "oldest_first")
})), [])
cpu_options = optional(object({
core_count = number
threads_per_core = number
}), null)
runner_log_files = optional(list(object({
log_group_name = string
prefix_log_group = bool
file_path = string
log_stream_name = string
})), null)
block_device_mappings = optional(list(object({
delete_on_termination = optional(bool, true)
device_name = optional(string, "/dev/xvda")
encrypted = optional(bool, true)
iops = optional(number)
kms_key_id = optional(string)
snapshot_id = optional(string)
throughput = optional(number)
volume_size = number
volume_type = optional(string, "gp3")
})), [{
volume_size = 30
}])
pool_config = optional(list(object({
schedule_expression = string
schedule_expression_timezone = optional(string)
size = number
})), [])
job_retry = optional(object({
enable = optional(bool, false)
delay_in_seconds = optional(number, 300)
delay_backoff = optional(number, 2)
lambda_memory_size = optional(number, 256)
lambda_timeout = optional(number, 30)
max_attempts = optional(number, 1)
}), {})
})
matcherConfig = object({
labelMatchers = list(list(string))
exactMatch = optional(bool, false)
priority = optional(number, 999)
})
redrive_build_queue = optional(object({
enabled = bool
maxReceiveCount = number
}), {
enabled = false
maxReceiveCount = null
})
}))
| n/a | yes | +| [multi\_runner\_config](#input\_multi\_runner\_config) | multi\_runner\_config = {
runner\_config: {
runner\_os: "The EC2 Operating System type to use for action runner instances (linux,windows)."
runner\_architecture: "The platform architecture of the runner instance\_type."
runner\_metadata\_options: "(Optional) Metadata options for the ec2 runner instances."
ami: "(Optional) AMI configuration for the action runner instances. This object allows you to specify all AMI-related settings in one place."
ami\_filter: "(Optional) List of maps used to create the AMI filter for the action runner AMI. By default amazon linux 2 is used."
ami\_owners: "(Optional) The list of owners used to select the AMI of action runner instances."
create\_service\_linked\_role\_spot: (Optional) create the serviced linked role for spot instances that is required by the scale-up lambda.
credit\_specification: "(Optional) The credit specification of the runner instance\_type. Can be unset, `standard` or `unlimited`.
delay\_webhook\_event: "The number of seconds the event accepted by the webhook is invisible on the queue before the scale up lambda will receive the event."
disable\_runner\_autoupdate: "Disable the auto update of the github runner agent. Be aware there is a grace period of 30 days, see also the [GitHub article](https://github.blog/changelog/2022-02-01-github-actions-self-hosted-runners-can-now-disable-automatic-updates/)"
ebs\_optimized: "The EC2 EBS optimized configuration."
enable\_ephemeral\_runners: "Enable ephemeral runners, runners will only be used once."
enable\_job\_queued\_check: "Enables JIT configuration for creating runners instead of registration token based registraton. JIT configuration will only be applied for ephemeral runners. By default JIT confiugration is enabled for ephemeral runners an can be disabled via this override. When running on GHES without support for JIT configuration this variable should be set to true for ephemeral runners."
enable\_on\_demand\_failover\_for\_errors: "Enable on-demand failover. For example to fall back to on demand when no spot capacity is available the variable can be set to `InsufficientInstanceCapacity`. When not defined the default behavior is to retry later."
enable\_organization\_runners: "Register runners to organization, instead of repo level"
enable\_enterprise\_runners: "Register runners to enterprise, instead of repo or organization level"
enterprise\_slug: "Enterprise slug"
enable\_runner\_binaries\_syncer: "Option to disable the lambda to sync GitHub runner distribution, useful when using a pre-build AMI."
enable\_ssm\_on\_runners: "Enable to allow access the runner instances for debugging purposes via SSM. Note that this adds additional permissions to the runner instances."
enable\_userdata: "Should the userdata script be enabled for the runner. Set this to false if you are using your own prebuilt AMI."
instance\_allocation\_strategy: "The allocation strategy for spot instances. AWS recommends to use `capacity-optimized` however the AWS default is `lowest-price`."
instance\_max\_spot\_price: "Max price price for spot intances per hour. This variable will be passed to the create fleet as max spot price for the fleet."
instance\_target\_capacity\_type: "Default lifecycle used for runner instances, can be either `spot` or `on-demand`."
instance\_types: "List of instance types for the action runner. Defaults are based on runner\_os (al2023 for linux and Windows Server Core for win)."
job\_queue\_retention\_in\_seconds: "The number of seconds the job is held in the queue before it is purged"
minimum\_running\_time\_in\_minutes: "The time an ec2 action runner should be running at minimum before terminated if not busy."
pool\_runner\_owner: "The pool will deploy runners to the GitHub org ID, set this value to the org to which you want the runners deployed. Repo level is not supported."
runner\_additional\_security\_group\_ids: "List of additional security groups IDs to apply to the runner. If added outside the multi\_runner\_config block, the additional security group(s) will be applied to all runner configs. If added inside the multi\_runner\_config, the additional security group(s) will be applied to the individual runner."
runner\_as\_root: "Run the action runner under the root user. Variable `runner_run_as` will be ignored."
runner\_boot\_time\_in\_minutes: "The minimum time for an EC2 runner to boot and register as a runner."
runner\_disable\_default\_labels: "Disable default labels for the runners (os, architecture and `self-hosted`). If enabled, the runner will only have the extra labels provided in `runner_extra_labels`. In case you on own start script is used, this configuration parameter needs to be parsed via SSM."
runner\_extra\_labels: "Extra (custom) labels for the runners (GitHub). Separate each label by a comma. Labels checks on the webhook can be enforced by setting `multi_runner_config.matcherConfig.exactMatch`. GitHub read-only labels should not be provided."
runner\_group\_name: "Name of the runner group."
runner\_name\_prefix: "Prefix for the GitHub runner name."
runner\_run\_as: "Run the GitHub actions agent as user."
runners\_maximum\_count: "The maximum number of runners that will be created. Setting the variable to `-1` desiables the maximum check."
scale\_down\_schedule\_expression: "Scheduler expression to check every x for scale down."
scale\_up\_reserved\_concurrent\_executions: "Amount of reserved concurrent executions for the scale-up lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations."
userdata\_template: "Alternative user-data template, replacing the default template. By providing your own user\_data you have to take care of installing all required software, including the action runner. Variables userdata\_pre/post\_install are ignored."
enable\_jit\_config "Overwrite the default behavior for JIT configuration. By default JIT configuration is enabled for ephemeral runners and disabled for non-ephemeral runners. In case of GHES check first if the JIT config API is avaialbe. In case you upgradeing from 3.x to 4.x you can set `enable_jit_config` to `false` to avoid a breaking change when having your own AMI."
enable\_runner\_detailed\_monitoring: "Should detailed monitoring be enabled for the runner. Set this to true if you want to use detailed monitoring. See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html for details."
enable\_cloudwatch\_agent: "Enabling the cloudwatch agent on the ec2 runner instances, the runner contains default config. Configuration can be overridden via `cloudwatch_config`."
cloudwatch\_config: "(optional) Replaces the module default cloudwatch log config. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html for details."
userdata\_pre\_install: "Script to be ran before the GitHub Actions runner is installed on the EC2 instances"
userdata\_post\_install: "Script to be ran after the GitHub Actions runner is installed on the EC2 instances"
runner\_hook\_job\_started: "Script to be ran in the runner environment at the beginning of every job"
runner\_hook\_job\_completed: "Script to be ran in the runner environment at the end of every job"
runner\_ec2\_tags: "Map of tags that will be added to the launch template instance tag specifications."
runner\_iam\_role\_managed\_policy\_arns: "Attach AWS or customer-managed IAM policies (by ARN) to the runner IAM role"
vpc\_id: "The VPC for security groups of the action runners. If not set uses the value of `var.vpc_id`."
subnet\_ids: "List of subnets in which the action runners will be launched, the subnets needs to be subnets in the `vpc_id`. If not set, uses the value of `var.subnet_ids`."
idle\_config: "List of time period that can be defined as cron expression to keep a minimum amount of runners active instead of scaling down to 0. By defining this list you can ensure that in time periods that match the cron expression within 5 seconds a runner is kept idle."
runner\_log\_files: "(optional) Replaces the module default cloudwatch log config. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html for details."
block\_device\_mappings: "The EC2 instance block device configuration. Takes the following keys: `device_name`, `delete_on_termination`, `volume_type`, `volume_size`, `encrypted`, `iops`, `throughput`, `kms_key_id`, `snapshot_id`."
job\_retry: "Experimental! Can be removed / changed without trigger a major release. Configure job retries. The configuration enables job retries (for ephemeral runners). After creating the insances a message will be published to a job retry queue. The job retry check lambda is checking after a delay if the job is queued. If not the message will be published again on the scale-up (build queue). Using this feature can impact the reate limit of the GitHub app."
pool\_config: "The configuration for updating the pool. The `pool_size` to adjust to by the events triggered by the `schedule_expression`. For example you can configure a cron expression for week days to adjust the pool to 10 and another expression for the weekend to adjust the pool to 1. Use `schedule_expression_timezone` to override the schedule time zone (defaults to UTC)."
}
matcherConfig: {
labelMatchers: "The list of list of labels supported by the runner configuration. `[[self-hosted, linux, x64, example]]`"
exactMatch: "If set to true all labels in the workflow job must match the GitHub labels (os, architecture and `self-hosted`). When false if __any__ workflow label matches it will trigger the webhook."
priority: "If set it defines the priority of the matcher, the matcher with the lowest priority will be evaluated first. Default is 999, allowed values 0-999."
}
redrive\_build\_queue: "Set options to attach (optional) a dead letter queue to the build queue, the queue between the webhook and the scale up lambda. You have the following options. 1. Disable by setting `enabled` to false. 2. Enable by setting `enabled` to `true`, `maxReceiveCount` to a number of max retries."
} |
map(object({
runner_config = object({
runner_os = string
runner_architecture = string
runner_metadata_options = optional(map(any), {
instance_metadata_tags = "enabled"
http_endpoint = "enabled"
http_tokens = "required"
http_put_response_hop_limit = 1
})
ami = optional(object({
filter = optional(map(list(string)), { state = ["available"] })
owners = optional(list(string), ["amazon"])
id_ssm_parameter_arn = optional(string, null)
kms_key_arn = optional(string, null)
}), null) # Defaults to null, in which case the module falls back to individual AMI variables (deprecated)
# Deprecated: Use ami object instead
ami_filter = optional(map(list(string)), { state = ["available"] })
ami_owners = optional(list(string), ["amazon"])
ami_id_ssm_parameter_name = optional(string, null)
ami_kms_key_arn = optional(string, "")
create_service_linked_role_spot = optional(bool, false)
credit_specification = optional(string, null)
delay_webhook_event = optional(number, 30)
disable_runner_autoupdate = optional(bool, false)
ebs_optimized = optional(bool, false)
enable_ephemeral_runners = optional(bool, false)
enable_job_queued_check = optional(bool, null)
enable_on_demand_failover_for_errors = optional(list(string), [])
enable_organization_runners = optional(bool, false)
enable_enterprise_runners = optional(bool, false)
enterprise_slug = optional(string, "")
enterprise_pat = optional(string, "")
enable_runner_binaries_syncer = optional(bool, true)
enable_ssm_on_runners = optional(bool, false)
enable_userdata = optional(bool, true)
instance_allocation_strategy = optional(string, "lowest-price")
instance_max_spot_price = optional(string, null)
instance_target_capacity_type = optional(string, "spot")
instance_types = list(string)
job_queue_retention_in_seconds = optional(number, 86400)
minimum_running_time_in_minutes = optional(number, null)
pool_runner_owner = optional(string, null)
runner_as_root = optional(bool, false)
runner_boot_time_in_minutes = optional(number, 5)
runner_disable_default_labels = optional(bool, false)
runner_extra_labels = optional(list(string), [])
runner_group_name = optional(string, "Default")
runner_name_prefix = optional(string, "")
runner_run_as = optional(string, "ec2-user")
runners_maximum_count = number
runner_additional_security_group_ids = optional(list(string), [])
scale_down_schedule_expression = optional(string, "cron(*/5 * * * ? *)")
scale_up_reserved_concurrent_executions = optional(number, 1)
userdata_template = optional(string, null)
userdata_content = optional(string, null)
enable_jit_config = optional(bool, null)
enable_runner_detailed_monitoring = optional(bool, false)
enable_cloudwatch_agent = optional(bool, true)
cloudwatch_config = optional(string, null)
userdata_pre_install = optional(string, "")
userdata_post_install = optional(string, "")
runner_hook_job_started = optional(string, "")
runner_hook_job_completed = optional(string, "")
runner_ec2_tags = optional(map(string), {})
runner_iam_role_managed_policy_arns = optional(list(string), [])
vpc_id = optional(string, null)
subnet_ids = optional(list(string), null)
idle_config = optional(list(object({
cron = string
timeZone = string
idleCount = number
evictionStrategy = optional(string, "oldest_first")
})), [])
cpu_options = optional(object({
core_count = number
threads_per_core = number
}), null)
runner_log_files = optional(list(object({
log_group_name = string
prefix_log_group = bool
file_path = string
log_stream_name = string
})), null)
block_device_mappings = optional(list(object({
delete_on_termination = optional(bool, true)
device_name = optional(string, "/dev/xvda")
encrypted = optional(bool, true)
iops = optional(number)
kms_key_id = optional(string)
snapshot_id = optional(string)
throughput = optional(number)
volume_size = number
volume_type = optional(string, "gp3")
})), [{
volume_size = 30
}])
pool_config = optional(list(object({
schedule_expression = string
schedule_expression_timezone = optional(string)
size = number
})), [])
job_retry = optional(object({
enable = optional(bool, false)
delay_in_seconds = optional(number, 300)
delay_backoff = optional(number, 2)
lambda_memory_size = optional(number, 256)
lambda_timeout = optional(number, 30)
max_attempts = optional(number, 1)
}), {})
})
matcherConfig = object({
labelMatchers = list(list(string))
exactMatch = optional(bool, false)
priority = optional(number, 999)
})
redrive_build_queue = optional(object({
enabled = bool
maxReceiveCount = number
}), {
enabled = false
maxReceiveCount = null
})
}))
| n/a | yes | | [pool\_lambda\_reserved\_concurrent\_executions](#input\_pool\_lambda\_reserved\_concurrent\_executions) | Amount of reserved concurrent executions for the scale-up lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations. | `number` | `1` | no | | [pool\_lambda\_timeout](#input\_pool\_lambda\_timeout) | Time out for the pool lambda in seconds. | `number` | `60` | no | | [prefix](#input\_prefix) | The prefix used for naming resources | `string` | `"github-actions"` | no | diff --git a/modules/multi-runner/main.tf b/modules/multi-runner/main.tf index 905cc7f793..6543d49e2f 100644 --- a/modules/multi-runner/main.tf +++ b/modules/multi-runner/main.tf @@ -4,11 +4,13 @@ locals { }) github_app_parameters = { - id = coalesce(var.github_app.id_ssm, module.ssm.parameters.github_app_id) - key_base64 = coalesce(var.github_app.key_base64_ssm, module.ssm.parameters.github_app_key_base64) + id = var.enterprise_pat == null ? coalesce(var.github_app.id_ssm, module.ssm.parameters.github_app_id) : null + key_base64 = var.enterprise_pat == null ? coalesce(var.github_app.key_base64_ssm, module.ssm.parameters.github_app_key_base64) : null webhook_secret = coalesce(var.github_app.webhook_secret_ssm, module.ssm.parameters.github_app_webhook_secret) } + enterprise_pat = var.enterprise_pat != null ? module.ssm.parameters.enterprise_pat : null + runner_extra_labels = { for k, v in var.multi_runner_config : k => sort(setunion(flatten(v.matcherConfig.labelMatchers), compact(v.runner_config.runner_extra_labels))) } runner_config = { for k, v in var.multi_runner_config : k => merge({ id = aws_sqs_queue.queued_builds[k].id, arn = aws_sqs_queue.queued_builds[k].arn, url = aws_sqs_queue.queued_builds[k].url }, merge(v, { runner_config = merge(v.runner_config, { runner_extra_labels = local.runner_extra_labels[k] }) })) } diff --git a/modules/multi-runner/outputs.tf b/modules/multi-runner/outputs.tf index 2f2b1d3458..795a1f4a96 100644 --- a/modules/multi-runner/outputs.tf +++ b/modules/multi-runner/outputs.tf @@ -45,10 +45,13 @@ output "webhook" { } output "ssm_parameters" { - value = { for k, v in local.github_app_parameters : k => { - name = v.name - arn = v.arn + value = { + for k, v in local.github_app_parameters : + k => { + name = v.name + arn = v.arn } + if v != null } } diff --git a/modules/multi-runner/runners.tf b/modules/multi-runner/runners.tf index 811ab36260..323bb26cd7 100644 --- a/modules/multi-runner/runners.tf +++ b/modules/multi-runner/runners.tf @@ -33,9 +33,12 @@ module "runners" { sqs_build_queue = { "arn" : each.value.arn, "url" : each.value.url } github_app_parameters = local.github_app_parameters + enterprise_pat = local.enterprise_pat ebs_optimized = each.value.runner_config.ebs_optimized enable_on_demand_failover_for_errors = each.value.runner_config.enable_on_demand_failover_for_errors enable_organization_runners = each.value.runner_config.enable_organization_runners + enable_enterprise_runners = each.value.runner_config.enable_enterprise_runners + enterprise_slug = each.value.runner_config.enterprise_slug enable_ephemeral_runners = each.value.runner_config.enable_ephemeral_runners enable_jit_config = each.value.runner_config.enable_jit_config enable_job_queued_check = each.value.runner_config.enable_job_queued_check diff --git a/modules/multi-runner/ssm.tf b/modules/multi-runner/ssm.tf index 6a3a234e6f..e59fe65a77 100644 --- a/modules/multi-runner/ssm.tf +++ b/modules/multi-runner/ssm.tf @@ -1,7 +1,8 @@ module "ssm" { - source = "../ssm" - kms_key_arn = var.kms_key_arn - path_prefix = "${local.ssm_root_path}/${var.ssm_paths.app}" - github_app = var.github_app - tags = local.tags + source = "../ssm" + kms_key_arn = var.kms_key_arn + path_prefix = "${local.ssm_root_path}/${var.ssm_paths.app}" + enterprise_pat = var.enterprise_pat + github_app = var.github_app + tags = local.tags } diff --git a/modules/multi-runner/variables.tf b/modules/multi-runner/variables.tf index edbdb33059..d28af7c8da 100644 --- a/modules/multi-runner/variables.tf +++ b/modules/multi-runner/variables.tf @@ -1,37 +1,48 @@ +variable "enterprise_pat" { + description = "GitHub enterprise PAT. Used when not authenticating via GitHub App." + type = string + default = null +} + variable "github_app" { description = < [ebs\_optimized](#input\_ebs\_optimized) | The EC2 EBS optimized configuration. | `bool` | `false` | no | | [egress\_rules](#input\_egress\_rules) | List of egress rules for the GitHub runner instances. |
list(object({
cidr_blocks = list(string)
ipv6_cidr_blocks = list(string)
prefix_list_ids = list(string)
from_port = number
protocol = string
security_groups = list(string)
self = bool
to_port = number
description = string
}))
|
[
{
"cidr_blocks": [
"0.0.0.0/0"
],
"description": null,
"from_port": 0,
"ipv6_cidr_blocks": [
"::/0"
],
"prefix_list_ids": null,
"protocol": "-1",
"security_groups": null,
"self": null,
"to_port": 0
}
]
| no | | [enable\_cloudwatch\_agent](#input\_enable\_cloudwatch\_agent) | Enabling the cloudwatch agent on the ec2 runner instances, the runner contains default config. Configuration can be overridden via `cloudwatch_config`. | `bool` | `true` | no | +| [enable\_enterprise\_runners](#input\_enable\_enterprise\_runners) | Register runners to organization, instead of repo or organization level | `bool` | n/a | yes | | [enable\_ephemeral\_runners](#input\_enable\_ephemeral\_runners) | Enable ephemeral runners, runners will only be used once. | `bool` | `false` | no | | [enable\_jit\_config](#input\_enable\_jit\_config) | Overwrite the default behavior for JIT configuration. By default JIT configuration is enabled for ephemeral runners and disabled for non-ephemeral runners. In case of GHES check first if the JIT config API is avaialbe. In case you upgradeing from 3.x to 4.x you can set `enable_jit_config` to `false` to avoid a breaking change when having your own AMI. | `bool` | `null` | no | | [enable\_job\_queued\_check](#input\_enable\_job\_queued\_check) | Only scale if the job event received by the scale up lambda is is in the state queued. By default enabled for non ephemeral runners and disabled for ephemeral. Set this variable to overwrite the default behavior. | `bool` | `null` | no | @@ -164,6 +165,8 @@ yarn run dist | [enable\_ssm\_on\_runners](#input\_enable\_ssm\_on\_runners) | Enable to allow access to the runner instances for debugging purposes via SSM. Note that this adds additional permissions to the runner instances. | `bool` | n/a | yes | | [enable\_user\_data\_debug\_logging](#input\_enable\_user\_data\_debug\_logging) | Option to enable debug logging for user-data, this logs all secrets as well. | `bool` | `false` | no | | [enable\_userdata](#input\_enable\_userdata) | Should the userdata script be enabled for the runner. Set this to false if you are using your own prebuilt AMI | `bool` | `true` | no | +| [enterprise\_pat](#input\_enterprise\_pat) | GitHub Enterprise PAT to use for registering runners to the enterprise. This is only required when `enable_enterprise_runners` is set to true. | `map(string)` | n/a | yes | +| [enterprise\_slug](#input\_enterprise\_slug) | Enterprise slug | `string` | n/a | yes | | [ghes\_ssl\_verify](#input\_ghes\_ssl\_verify) | GitHub Enterprise SSL verification. Set to 'false' when custom certificate (chains) is used for GitHub Enterprise Server (insecure). | `bool` | `true` | no | | [ghes\_url](#input\_ghes\_url) | GitHub Enterprise Server URL. DO NOT SET IF USING PUBLIC GITHUB..However if you are using Github Enterprise Cloud with data-residency (ghe.com), set the endpoint here. Example - https://companyname.ghe.com\| | `string` | `null` | no | | [github\_app\_parameters](#input\_github\_app\_parameters) | Parameter Store for GitHub App Parameters. |
object({
key_base64 = map(string)
id = map(string)
})
| n/a | yes | diff --git a/modules/runners/job-retry.tf b/modules/runners/job-retry.tf index e51c3903d4..3f7971f628 100644 --- a/modules/runners/job-retry.tf +++ b/modules/runners/job-retry.tf @@ -24,7 +24,10 @@ locals { zip = var.lambda_zip tracing_config = var.tracing_config github_app_parameters = var.github_app_parameters + enterprise_pat = var.enterprise_pat enable_organization_runners = var.enable_organization_runners + enable_enterprise_runners = var.enable_enterprise_runners + enterprise_slug = var.enterprise_slug sqs_build_queue = var.sqs_build_queue ghes_url = var.ghes_url } diff --git a/modules/runners/job-retry/README.md b/modules/runners/job-retry/README.md index 91089a213b..50cfaf4a47 100644 --- a/modules/runners/job-retry/README.md +++ b/modules/runners/job-retry/README.md @@ -42,7 +42,7 @@ The module is an inner module and used by the runner module when the opt-in feat | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [config](#input\_config) | Configuration for the spot termination watcher lambda function.

`aws_partition`: Partition for the base arn if not 'aws'
`architecture`: AWS Lambda architecture. Lambda functions using Graviton processors ('arm64') tend to have better price/performance than 'x86\_64' functions.
`environment_variables`: Environment variables for the lambda.
`enable_organization_runners`: Enable organization runners.
`enable_metric`: Enable metric for the lambda. If `spot_warning` is set to true, the lambda will emit a metric when it detects a spot termination warning.
'ghes\_url': Optional GitHub Enterprise Server URL.
'user\_agent': Optional User-Agent header for GitHub API requests.
'github\_app\_parameters': Parameter Store for GitHub App Parameters.
'kms\_key\_arn': Optional CMK Key ARN instead of using the default AWS managed key.
`lambda_principals`: Add extra principals to the role created for execution of the lambda, e.g. for local testing.
`lambda_tags`: Map of tags that will be added to created resources. By default resources will be tagged with name and environment.
`log_level`: Logging level for lambda logging. Valid values are 'silly', 'trace', 'debug', 'info', 'warn', 'error', 'fatal'.
`logging_kms_key_id`: Specifies the kms key id to encrypt the logs with
`logging_retention_in_days`: Specifies the number of days you want to retain log events for the lambda log group. Possible values are: 0, 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653.
`memory_size`: Memory size linit in MB of the lambda.
`metrics`: Configuration to enable metrics creation by the lambda.
`prefix`: The prefix used for naming resources.
`role_path`: The path that will be added to the role, if not set the environment name will be used.
`role_permissions_boundary`: Permissions boundary that will be added to the created role for the lambda.
`runtime`: AWS Lambda runtime.
`s3_bucket`: S3 bucket from which to specify lambda functions. This is an alternative to providing local files directly.
`s3_key`: S3 key for syncer lambda function. Required if using S3 bucket to specify lambdas.
`s3_object_version`: S3 object version for syncer lambda function. Useful if S3 versioning is enabled on source bucket.
`security_group_ids`: List of security group IDs associated with the Lambda function.
'sqs\_build\_queue': SQS queue for build events to re-publish job request.
`subnet_ids`: List of subnets in which the action runners will be launched, the subnets needs to be subnets in the `vpc_id`.
`tag_filters`: Map of tags that will be used to filter the resources to be tracked. Only for which all tags are present and starting with the same value as the value in the map will be tracked.
`tags`: Map of tags that will be added to created resources. By default resources will be tagged with name and environment.
`timeout`: Time out of the lambda in seconds.
`tracing_config`: Configuration for lambda tracing.
`zip`: File location of the lambda zip file. |
object({
aws_partition = optional(string, null)
architecture = optional(string, null)
enable_organization_runners = bool
environment_variables = optional(map(string), {})
ghes_url = optional(string, null)
user_agent = optional(string, null)
github_app_parameters = object({
key_base64 = map(string)
id = map(string)
})
kms_key_arn = optional(string, null)
lambda_tags = optional(map(string), {})
log_level = optional(string, null)
logging_kms_key_id = optional(string, null)
logging_retention_in_days = optional(number, null)
memory_size = optional(number, null)
metrics = optional(object({
enable = optional(bool, false)
namespace = optional(string, null)
metric = optional(object({
enable_github_app_rate_limit = optional(bool, true)
enable_job_retry = optional(bool, true)
}), {})
}), {})
prefix = optional(string, null)
principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
queue_encryption = optional(object({
kms_data_key_reuse_period_seconds = optional(number, null)
kms_master_key_id = optional(string, null)
sqs_managed_sse_enabled = optional(bool, true)
}), {})
role_path = optional(string, null)
role_permissions_boundary = optional(string, null)
runtime = optional(string, null)
security_group_ids = optional(list(string), [])
subnet_ids = optional(list(string), [])
s3_bucket = optional(string, null)
s3_key = optional(string, null)
s3_object_version = optional(string, null)
sqs_build_queue = object({
url = string
arn = string
})
tags = optional(map(string), {})
timeout = optional(number, 30)
tracing_config = optional(object({
mode = optional(string, null)
capture_http_requests = optional(bool, false)
capture_error = optional(bool, false)
}), {})
zip = optional(string, null)
})
| n/a | yes | +| [config](#input\_config) | Configuration for the spot termination watcher lambda function.

`aws_partition`: Partition for the base arn if not 'aws'
`architecture`: AWS Lambda architecture. Lambda functions using Graviton processors ('arm64') tend to have better price/performance than 'x86\_64' functions.
`environment_variables`: Environment variables for the lambda.
`enable_organization_runners`: Enable organization runners.
`enable_enterprise_runners`: Enable enterprise runners.
`enterprise_slug`: GitHub enterprise slug.
`enable_metric`: Enable metric for the lambda. If `spot_warning` is set to true, the lambda will emit a metric when it detects a spot termination warning.
'ghes\_url': Optional GitHub Enterprise Server URL.
'user\_agent': Optional User-Agent header for GitHub API requests.
'github\_app\_parameters': Parameter Store for GitHub App Parameters.
'enterprise\_pat = string': Personal Access Token for GitHub Enterprise. If set, the lambda will use this PAT to authenticate with the GitHub API.
'kms\_key\_arn': Optional CMK Key ARN instead of using the default AWS managed key.
`lambda_principals`: Add extra principals to the role created for execution of the lambda, e.g. for local testing.
`lambda_tags`: Map of tags that will be added to created resources. By default resources will be tagged with name and environment.
`log_level`: Logging level for lambda logging. Valid values are 'silly', 'trace', 'debug', 'info', 'warn', 'error', 'fatal'.
`logging_kms_key_id`: Specifies the kms key id to encrypt the logs with
`logging_retention_in_days`: Specifies the number of days you want to retain log events for the lambda log group. Possible values are: 0, 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653.
`memory_size`: Memory size linit in MB of the lambda.
`metrics`: Configuration to enable metrics creation by the lambda.
`prefix`: The prefix used for naming resources.
`role_path`: The path that will be added to the role, if not set the environment name will be used.
`role_permissions_boundary`: Permissions boundary that will be added to the created role for the lambda.
`runtime`: AWS Lambda runtime.
`s3_bucket`: S3 bucket from which to specify lambda functions. This is an alternative to providing local files directly.
`s3_key`: S3 key for syncer lambda function. Required if using S3 bucket to specify lambdas.
`s3_object_version`: S3 object version for syncer lambda function. Useful if S3 versioning is enabled on source bucket.
`security_group_ids`: List of security group IDs associated with the Lambda function.
'sqs\_build\_queue': SQS queue for build events to re-publish job request.
`subnet_ids`: List of subnets in which the action runners will be launched, the subnets needs to be subnets in the `vpc_id`.
`tag_filters`: Map of tags that will be used to filter the resources to be tracked. Only for which all tags are present and starting with the same value as the value in the map will be tracked.
`tags`: Map of tags that will be added to created resources. By default resources will be tagged with name and environment.
`timeout`: Time out of the lambda in seconds.
`tracing_config`: Configuration for lambda tracing.
`zip`: File location of the lambda zip file. |
object({
aws_partition = optional(string, null)
architecture = optional(string, null)
enable_organization_runners = bool
enable_enterprise_runners = bool
enterprise_slug = string
environment_variables = optional(map(string), {})
ghes_url = optional(string, null)
user_agent = optional(string, null)
github_app_parameters = object({
key_base64 = optional(map(string))
id = optional(map(string))
})
enterprise_pat = optional(map(string))
kms_key_arn = optional(string, null)
lambda_tags = optional(map(string), {})
log_level = optional(string, null)
logging_kms_key_id = optional(string, null)
logging_retention_in_days = optional(number, null)
memory_size = optional(number, null)
metrics = optional(object({
enable = optional(bool, false)
namespace = optional(string, null)
metric = optional(object({
enable_github_app_rate_limit = optional(bool, true)
enable_job_retry = optional(bool, true)
}), {})
}), {})
prefix = optional(string, null)
principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
queue_encryption = optional(object({
kms_data_key_reuse_period_seconds = optional(number, null)
kms_master_key_id = optional(string, null)
sqs_managed_sse_enabled = optional(bool, true)
}), {})
role_path = optional(string, null)
role_permissions_boundary = optional(string, null)
runtime = optional(string, null)
security_group_ids = optional(list(string), [])
subnet_ids = optional(list(string), [])
s3_bucket = optional(string, null)
s3_key = optional(string, null)
s3_object_version = optional(string, null)
sqs_build_queue = object({
url = string
arn = string
})
tags = optional(map(string), {})
timeout = optional(number, 30)
tracing_config = optional(object({
mode = optional(string, null)
capture_http_requests = optional(bool, false)
capture_error = optional(bool, false)
}), {})
zip = optional(string, null)
})
| n/a | yes | ## Outputs diff --git a/modules/runners/job-retry/main.tf b/modules/runners/job-retry/main.tf index 9561c7db71..10e9300a3d 100644 --- a/modules/runners/job-retry/main.tf +++ b/modules/runners/job-retry/main.tf @@ -4,13 +4,16 @@ locals { environment_variables = { ENABLE_ORGANIZATION_RUNNERS = var.config.enable_organization_runners + ENABLE_ENTERPRISE_RUNNERS = var.config.enable_enterprise_runners + ENTERPRISE_SLUG = var.config.enterprise_slug ENABLE_METRIC_JOB_RETRY = var.config.metrics.enable && var.config.metrics.metric.enable_job_retry ENABLE_METRIC_GITHUB_APP_RATE_LIMIT = var.config.metrics.enable && var.config.metrics.metric.enable_github_app_rate_limit GHES_URL = var.config.ghes_url USER_AGENT = var.config.user_agent JOB_QUEUE_SCALE_UP_URL = var.config.sqs_build_queue.url - PARAMETER_GITHUB_APP_ID_NAME = var.config.github_app_parameters.id.name - PARAMETER_GITHUB_APP_KEY_BASE64_NAME = var.config.github_app_parameters.key_base64.name + PARAMETER_GITHUB_APP_ID_NAME = var.config.enterprise_pat == null ? var.config.github_app_parameters.id.name : null + PARAMETER_GITHUB_APP_KEY_BASE64_NAME = var.config.enterprise_pat == null ? var.config.github_app_parameters.key_base64.name : null + PARAMETER_ENTERPRISE_PAT_NAME = var.config.enterprise_pat != null ? var.config.enterprise_pat.name : null } config = merge(var.config, { @@ -66,6 +69,7 @@ resource "aws_iam_role_policy" "job_retry" { sqs_job_retry_queue_arn = aws_sqs_queue.job_retry_check_queue.arn github_app_id_arn = var.config.github_app_parameters.id.arn github_app_key_base64_arn = var.config.github_app_parameters.key_base64.arn + enterprise_pat_arn = var.config.enterprise_pat }) } diff --git a/modules/runners/job-retry/policies/lambda.json b/modules/runners/job-retry/policies/lambda.json index 591ec04790..24e02cd4c3 100644 --- a/modules/runners/job-retry/policies/lambda.json +++ b/modules/runners/job-retry/policies/lambda.json @@ -7,8 +7,15 @@ "ssm:GetParameter" ], "Resource": [ - "${github_app_key_base64_arn}", - "${github_app_id_arn}" +%{ if github_app_key_base64_arn != null ~} + "${github_app_key_base64_arn}", +%{ endif ~} +%{ if github_app_id_arn != null ~} + "${github_app_id_arn}", +%{ endif ~} +%{ if enterprise_pat_arn != null ~} + "${enterprise_pat_arn}", +%{ endif ~} ] }, { diff --git a/modules/runners/job-retry/variables.tf b/modules/runners/job-retry/variables.tf index 4a8fe19fbf..927686ca67 100644 --- a/modules/runners/job-retry/variables.tf +++ b/modules/runners/job-retry/variables.tf @@ -6,10 +6,13 @@ variable "config" { `architecture`: AWS Lambda architecture. Lambda functions using Graviton processors ('arm64') tend to have better price/performance than 'x86_64' functions. `environment_variables`: Environment variables for the lambda. `enable_organization_runners`: Enable organization runners. + `enable_enterprise_runners`: Enable enterprise runners. + `enterprise_slug`: GitHub enterprise slug. `enable_metric`: Enable metric for the lambda. If `spot_warning` is set to true, the lambda will emit a metric when it detects a spot termination warning. 'ghes_url': Optional GitHub Enterprise Server URL. 'user_agent': Optional User-Agent header for GitHub API requests. 'github_app_parameters': Parameter Store for GitHub App Parameters. + 'enterprise_pat = string': Personal Access Token for GitHub Enterprise. If set, the lambda will use this PAT to authenticate with the GitHub API. 'kms_key_arn': Optional CMK Key ARN instead of using the default AWS managed key. `lambda_principals`: Add extra principals to the role created for execution of the lambda, e.g. for local testing. `lambda_tags`: Map of tags that will be added to created resources. By default resources will be tagged with name and environment. @@ -38,13 +41,16 @@ variable "config" { aws_partition = optional(string, null) architecture = optional(string, null) enable_organization_runners = bool + enable_enterprise_runners = bool + enterprise_slug = string environment_variables = optional(map(string), {}) ghes_url = optional(string, null) user_agent = optional(string, null) github_app_parameters = object({ - key_base64 = map(string) - id = map(string) + key_base64 = optional(map(string)) + id = optional(map(string)) }) + enterprise_pat = optional(map(string)) kms_key_arn = optional(string, null) lambda_tags = optional(map(string), {}) log_level = optional(string, null) diff --git a/modules/runners/policies/lambda-scale-down.json b/modules/runners/policies/lambda-scale-down.json index d35be746b7..4b4fa3c3d1 100644 --- a/modules/runners/policies/lambda-scale-down.json +++ b/modules/runners/policies/lambda-scale-down.json @@ -49,8 +49,15 @@ "ssm:GetParameter" ], "Resource": [ +%{ if github_app_key_base64_arn != null ~} "${github_app_key_base64_arn}", - "${github_app_id_arn}" +%{ endif ~} +%{ if github_app_id_arn != null ~} + "${github_app_id_arn}", +%{ endif ~} +%{ if enterprise_pat_arn != null ~} + "${enterprise_pat_arn}" +%{ endif ~} ] %{ if kms_key_arn != "" ~} }, diff --git a/modules/runners/policies/lambda-scale-up.json b/modules/runners/policies/lambda-scale-up.json index 1c6946b945..2444238d84 100644 --- a/modules/runners/policies/lambda-scale-up.json +++ b/modules/runners/policies/lambda-scale-up.json @@ -33,8 +33,15 @@ "ssm:GetParameter" ], "Resource": [ +%{ if github_app_key_base64_arn != null ~} "${github_app_key_base64_arn}", +%{ endif ~} +%{ if github_app_id_arn != null ~} "${github_app_id_arn}", +%{ endif ~} +%{ if enterprise_pat_arn != null ~} + "${enterprise_pat_arn}", +%{ endif ~} "${ssm_config_path}/*" ] }, diff --git a/modules/runners/pool.tf b/modules/runners/pool.tf index 2762008ebf..8d70ef4aab 100644 --- a/modules/runners/pool.tf +++ b/modules/runners/pool.tf @@ -11,6 +11,8 @@ module "pool" { } user_agent = var.user_agent github_app_parameters = var.github_app_parameters + enterprise_pat = var.enterprise_pat + enable_enterprise_runners = var.enable_enterprise_runners instance_allocation_strategy = var.instance_allocation_strategy instance_max_spot_price = var.instance_max_spot_price instance_target_capacity_type = var.instance_target_capacity_type @@ -61,5 +63,6 @@ module "pool" { } aws_partition = var.aws_partition + aws_region = var.aws_region tracing_config = var.tracing_config } diff --git a/modules/runners/pool/README.md b/modules/runners/pool/README.md index 052a8be60c..12113bf9fa 100644 --- a/modules/runners/pool/README.md +++ b/modules/runners/pool/README.md @@ -39,6 +39,7 @@ No modules. | [aws_lambda_function.pool](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource | | [aws_scheduler_schedule.pool](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/scheduler_schedule) | resource | | [aws_scheduler_schedule_group.pool](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/scheduler_schedule_group) | resource | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_iam_policy_document.lambda_assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.lambda_xray](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.scheduler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | @@ -49,7 +50,8 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [aws\_partition](#input\_aws\_partition) | (optional) partition for the arn if not 'aws' | `string` | `"aws"` | no | -| [config](#input\_config) | Lookup details in parent module. |
object({
lambda = object({
log_level = string
logging_retention_in_days = number
logging_kms_key_id = string
reserved_concurrent_executions = number
s3_bucket = string
s3_key = string
s3_object_version = string
security_group_ids = list(string)
runtime = string
architecture = string
memory_size = number
timeout = number
zip = string
subnet_ids = list(string)
})
tags = map(string)
ghes = object({
url = string
ssl_verify = string
})
github_app_parameters = object({
key_base64 = map(string)
id = map(string)
})
subnet_ids = list(string)
runner = object({
disable_runner_autoupdate = bool
ephemeral = bool
enable_jit_config = bool
enable_on_demand_failover_for_errors = list(string)
boot_time_in_minutes = number
labels = list(string)
launch_template = object({
name = string
})
group_name = string
name_prefix = string
pool_owner = string
role = object({
arn = string
})
})
instance_types = list(string)
instance_target_capacity_type = string
instance_allocation_strategy = string
instance_max_spot_price = string
prefix = string
pool = list(object({
schedule_expression = string
schedule_expression_timezone = string
size = number
}))
role_permissions_boundary = string
kms_key_arn = string
ami_kms_key_arn = string
ami_id_ssm_parameter_arn = string
role_path = string
ssm_token_path = string
ssm_config_path = string
ami_id_ssm_parameter_name = string
ami_id_ssm_parameter_read_policy_arn = string
arn_ssm_parameters_path_config = string
lambda_tags = map(string)
user_agent = string
})
| n/a | yes | +| [aws\_region](#input\_aws\_region) | AWS region. | `string` | n/a | yes | +| [config](#input\_config) | Lookup details in parent module. |
object({
enable_enterprise_runners = optional(bool)
lambda = object({
log_level = string
logging_retention_in_days = number
logging_kms_key_id = string
reserved_concurrent_executions = number
s3_bucket = string
s3_key = string
s3_object_version = string
security_group_ids = list(string)
runtime = string
architecture = string
memory_size = number
timeout = number
zip = string
subnet_ids = list(string)
})
tags = map(string)
ghes = object({
url = string
ssl_verify = string
})
github_app_parameters = object({
key_base64 = optional(map(string))
id = optional(map(string))
})
enterprise_pat = optional(map(string))
subnet_ids = list(string)
runner = object({
disable_runner_autoupdate = bool
ephemeral = bool
enable_jit_config = bool
enable_on_demand_failover_for_errors = list(string)
boot_time_in_minutes = number
labels = list(string)
launch_template = object({
name = string
})
group_name = string
name_prefix = string
pool_owner = string
role = object({
arn = string
})
})
instance_types = list(string)
instance_target_capacity_type = string
instance_allocation_strategy = string
instance_max_spot_price = string
prefix = string
pool = list(object({
schedule_expression = string
schedule_expression_timezone = string
size = number
}))
role_permissions_boundary = string
kms_key_arn = string
ami_kms_key_arn = string
ami_id_ssm_parameter_arn = string
role_path = string
ssm_token_path = string
ssm_config_path = string
ami_id_ssm_parameter_name = string
ami_id_ssm_parameter_read_policy_arn = string
arn_ssm_parameters_path_config = string
lambda_tags = map(string)
user_agent = string
})
| n/a | yes | | [tracing\_config](#input\_tracing\_config) | Configuration for lambda tracing. |
object({
mode = optional(string, null)
capture_http_requests = optional(bool, false)
capture_error = optional(bool, false)
})
| `{}` | no | ## Outputs diff --git a/modules/runners/pool/main.tf b/modules/runners/pool/main.tf index e141b22d25..db719c8d56 100644 --- a/modules/runners/pool/main.tf +++ b/modules/runners/pool/main.tf @@ -1,3 +1,5 @@ +data "aws_caller_identity" "current" {} + resource "aws_lambda_function" "pool" { s3_bucket = var.config.lambda.s3_bucket != null ? var.config.lambda.s3_bucket : null @@ -17,22 +19,25 @@ resource "aws_lambda_function" "pool" { environment { variables = { - AMI_ID_SSM_PARAMETER_NAME = var.config.ami_id_ssm_parameter_name - DISABLE_RUNNER_AUTOUPDATE = var.config.runner.disable_runner_autoupdate - ENABLE_EPHEMERAL_RUNNERS = var.config.runner.ephemeral - ENABLE_JIT_CONFIG = var.config.runner.enable_jit_config - ENVIRONMENT = var.config.prefix - GHES_URL = var.config.ghes.url - USER_AGENT = var.config.user_agent - INSTANCE_ALLOCATION_STRATEGY = var.config.instance_allocation_strategy - INSTANCE_MAX_SPOT_PRICE = var.config.instance_max_spot_price - INSTANCE_TARGET_CAPACITY_TYPE = var.config.instance_target_capacity_type - INSTANCE_TYPES = join(",", var.config.instance_types) - LAUNCH_TEMPLATE_NAME = var.config.runner.launch_template.name - LOG_LEVEL = var.config.lambda.log_level - NODE_TLS_REJECT_UNAUTHORIZED = var.config.ghes.url != null && !var.config.ghes.ssl_verify ? 0 : 1 - PARAMETER_GITHUB_APP_ID_NAME = var.config.github_app_parameters.id.name - PARAMETER_GITHUB_APP_KEY_BASE64_NAME = var.config.github_app_parameters.key_base64.name + AMI_ID_SSM_PARAMETER_NAME = var.config.ami_id_ssm_parameter_name + DISABLE_RUNNER_AUTOUPDATE = var.config.runner.disable_runner_autoupdate + ENABLE_EPHEMERAL_RUNNERS = var.config.runner.ephemeral + ENABLE_JIT_CONFIG = var.config.runner.enable_jit_config + ENVIRONMENT = var.config.prefix + GHES_URL = var.config.ghes.url + USER_AGENT = var.config.user_agent + INSTANCE_ALLOCATION_STRATEGY = var.config.instance_allocation_strategy + INSTANCE_MAX_SPOT_PRICE = var.config.instance_max_spot_price + INSTANCE_TARGET_CAPACITY_TYPE = var.config.instance_target_capacity_type + INSTANCE_TYPES = join(",", var.config.instance_types) + LAUNCH_TEMPLATE_NAME = var.config.runner.launch_template.name + LOG_LEVEL = var.config.lambda.log_level + NODE_TLS_REJECT_UNAUTHORIZED = var.config.ghes.url != null && !var.config.ghes.ssl_verify ? 0 : 1 + PARAMETER_GITHUB_APP_ID_NAME = var.config.enterprise_pat == null ? var.config.github_app_parameters.id.name : null + PARAMETER_GITHUB_APP_KEY_BASE64_NAME = var.config.enterprise_pat == null ? var.config.github_app_parameters.key_base64.name : null + PARAMETER_ENTERPRISE_PAT_NAME = var.config.enterprise_pat != null ? var.config.enterprise_pat.name : null + ENABLE_ENTERPRISE_RUNNERS = var.config.enable_enterprise_runners + POWERTOOLS_LOGGER_LOG_EVENT = var.config.lambda.log_level == "debug" ? "true" : "false" RUNNER_BOOT_TIME_IN_MINUTES = var.config.runner.boot_time_in_minutes RUNNER_LABELS = lower(join(",", var.config.runner.labels)) @@ -87,11 +92,14 @@ resource "aws_iam_role_policy" "pool" { policy = templatefile("${path.module}/policies/lambda-pool.json", { arn_ssm_parameters_path_config = var.config.arn_ssm_parameters_path_config arn_runner_instance_role = var.config.runner.role.arn - github_app_id_arn = var.config.github_app_parameters.id.arn - github_app_key_base64_arn = var.config.github_app_parameters.key_base64.arn + github_app_id_arn = var.config.enterprise_pat == null ? var.config.github_app_parameters.id.arn : null + github_app_key_base64_arn = var.config.enterprise_pat == null ? var.config.github_app_parameters.key_base64.arn : null + enterprise_pat_arn = var.config.enterprise_pat != null ? var.config.enterprise_pat.arn : null kms_key_arn = var.config.kms_key_arn ami_kms_key_arn = var.config.ami_kms_key_arn - ssm_ami_id_parameter_arn = var.config.ami_id_ssm_parameter_arn + ssm_config_path = "arn:${var.aws_partition}:ssm:${var.aws_region}:${data.aws_caller_identity.current.account_id}:parameter${var.config.ssm_config_path}" + + ssm_ami_id_parameter_arn = var.config.ami_id_ssm_parameter_arn }) } diff --git a/modules/runners/pool/policies/lambda-pool.json b/modules/runners/pool/policies/lambda-pool.json index b0360a825c..f6c827fe8b 100644 --- a/modules/runners/pool/policies/lambda-pool.json +++ b/modules/runners/pool/policies/lambda-pool.json @@ -54,8 +54,16 @@ "ssm:GetParameter" ], "Resource": [ +%{ if github_app_key_base64_arn != null ~} "${github_app_key_base64_arn}", - "${github_app_id_arn}" +%{ endif ~} +%{ if github_app_id_arn != null ~} + "${github_app_id_arn}", +%{ endif ~} +%{ if enterprise_pat_arn != null ~} + "${enterprise_pat_arn}", +%{ endif ~} + "${ssm_config_path}/*" ] %{ if kms_key_arn != "" ~} }, diff --git a/modules/runners/pool/variables.tf b/modules/runners/pool/variables.tf index f1e841cde6..b593169833 100644 --- a/modules/runners/pool/variables.tf +++ b/modules/runners/pool/variables.tf @@ -1,6 +1,7 @@ variable "config" { description = "Lookup details in parent module." type = object({ + enable_enterprise_runners = optional(bool) lambda = object({ log_level = string logging_retention_in_days = number @@ -23,10 +24,11 @@ variable "config" { ssl_verify = string }) github_app_parameters = object({ - key_base64 = map(string) - id = map(string) + key_base64 = optional(map(string)) + id = optional(map(string)) }) - subnet_ids = list(string) + enterprise_pat = optional(map(string)) + subnet_ids = list(string) runner = object({ disable_runner_autoupdate = bool ephemeral = bool @@ -85,4 +87,9 @@ variable "tracing_config" { default = {} } +variable "aws_region" { + description = "AWS region." + type = string +} + diff --git a/modules/runners/scale-down.tf b/modules/runners/scale-down.tf index d274e3d4f1..dde8a1c079 100644 --- a/modules/runners/scale-down.tf +++ b/modules/runners/scale-down.tf @@ -29,8 +29,9 @@ resource "aws_lambda_function" "scale_down" { LOG_LEVEL = var.log_level MINIMUM_RUNNING_TIME_IN_MINUTES = coalesce(var.minimum_running_time_in_minutes, local.min_runtime_defaults[var.runner_os]) NODE_TLS_REJECT_UNAUTHORIZED = var.ghes_url != null && !var.ghes_ssl_verify ? 0 : 1 - PARAMETER_GITHUB_APP_ID_NAME = var.github_app_parameters.id.name - PARAMETER_GITHUB_APP_KEY_BASE64_NAME = var.github_app_parameters.key_base64.name + PARAMETER_GITHUB_APP_ID_NAME = var.enterprise_pat == null ? var.github_app_parameters.id.name : null + PARAMETER_GITHUB_APP_KEY_BASE64_NAME = var.enterprise_pat == null ? var.github_app_parameters.key_base64.name : null + PARAMETER_ENTERPRISE_PAT_NAME = var.enterprise_pat != null ? var.enterprise_pat.name : null POWERTOOLS_LOGGER_LOG_EVENT = var.log_level == "debug" ? "true" : "false" RUNNER_BOOT_TIME_IN_MINUTES = var.runner_boot_time_in_minutes SCALE_DOWN_CONFIG = jsonencode(var.idle_config) @@ -97,8 +98,9 @@ resource "aws_iam_role_policy" "scale_down" { role = aws_iam_role.scale_down.name policy = templatefile("${path.module}/policies/lambda-scale-down.json", { environment = var.prefix - github_app_id_arn = var.github_app_parameters.id.arn - github_app_key_base64_arn = var.github_app_parameters.key_base64.arn + github_app_id_arn = var.enterprise_pat == null ? var.github_app_parameters.id.arn : null + github_app_key_base64_arn = var.enterprise_pat == null ? var.github_app_parameters.key_base64.arn : null + enterprise_pat_arn = var.enterprise_pat != null ? var.enterprise_pat.arn : null kms_key_arn = local.kms_key_arn }) } diff --git a/modules/runners/scale-up.tf b/modules/runners/scale-up.tf index 9230267c07..8ba9da8617 100644 --- a/modules/runners/scale-up.tf +++ b/modules/runners/scale-up.tf @@ -32,6 +32,8 @@ resource "aws_lambda_function" "scale_up" { ENABLE_JOB_QUEUED_CHECK = local.enable_job_queued_check ENABLE_METRIC_GITHUB_APP_RATE_LIMIT = var.metrics.enable && var.metrics.metric.enable_github_app_rate_limit ENABLE_ORGANIZATION_RUNNERS = var.enable_organization_runners + ENABLE_ENTERPRISE_RUNNERS = var.enable_enterprise_runners + ENTERPRISE_SLUG = var.enterprise_slug ENVIRONMENT = var.prefix GHES_URL = var.ghes_url USER_AGENT = var.user_agent @@ -43,8 +45,9 @@ resource "aws_lambda_function" "scale_up" { LOG_LEVEL = var.log_level MINIMUM_RUNNING_TIME_IN_MINUTES = coalesce(var.minimum_running_time_in_minutes, local.min_runtime_defaults[var.runner_os]) NODE_TLS_REJECT_UNAUTHORIZED = var.ghes_url != null && !var.ghes_ssl_verify ? 0 : 1 - PARAMETER_GITHUB_APP_ID_NAME = var.github_app_parameters.id.name - PARAMETER_GITHUB_APP_KEY_BASE64_NAME = var.github_app_parameters.key_base64.name + PARAMETER_GITHUB_APP_ID_NAME = var.enterprise_pat == null ? var.github_app_parameters.id.name : null + PARAMETER_GITHUB_APP_KEY_BASE64_NAME = var.enterprise_pat == null ? var.github_app_parameters.key_base64.name : null + PARAMETER_ENTERPRISE_PAT_NAME = var.enterprise_pat != null ? var.enterprise_pat.name : null POWERTOOLS_LOGGER_LOG_EVENT = var.log_level == "debug" ? "true" : "false" POWERTOOLS_METRICS_NAMESPACE = var.metrics.namespace POWERTOOLS_TRACE_ENABLED = var.tracing_config.mode != null ? true : false @@ -114,8 +117,9 @@ resource "aws_iam_role_policy" "scale_up" { policy = templatefile("${path.module}/policies/lambda-scale-up.json", { arn_runner_instance_role = aws_iam_role.runner.arn sqs_arn = var.sqs_build_queue.arn - github_app_id_arn = var.github_app_parameters.id.arn - github_app_key_base64_arn = var.github_app_parameters.key_base64.arn + github_app_id_arn = var.enterprise_pat == null ? var.github_app_parameters.id.arn : null + github_app_key_base64_arn = var.enterprise_pat == null ? var.github_app_parameters.key_base64.arn : null + enterprise_pat_arn = var.enterprise_pat != null ? var.enterprise_pat.arn : null ssm_config_path = "arn:${var.aws_partition}:ssm:${var.aws_region}:${data.aws_caller_identity.current.account_id}:parameter${var.ssm_paths.root}/${var.ssm_paths.config}" kms_key_arn = local.kms_key_arn ami_kms_key_arn = local.ami_kms_key_arn diff --git a/modules/runners/variables.tf b/modules/runners/variables.tf index a78231e7da..e527f14f42 100644 --- a/modules/runners/variables.tf +++ b/modules/runners/variables.tf @@ -219,6 +219,16 @@ variable "enable_organization_runners" { type = bool } +variable "enable_enterprise_runners" { + description = "Register runners to organization, instead of repo or organization level" + type = bool +} + +variable "enterprise_slug" { + description = "Enterprise slug" + type = string +} + variable "github_app_parameters" { description = "Parameter Store for GitHub App Parameters." type = object({ @@ -227,6 +237,11 @@ variable "github_app_parameters" { }) } +variable "enterprise_pat" { + description = "GitHub Enterprise PAT to use for registering runners to the enterprise. This is only required when `enable_enterprise_runners` is set to true." + type = map(string) +} + variable "lambda_scale_down_memory_size" { description = "Memory size limit in MB for scale down lambda." type = number diff --git a/modules/ssm/README.md b/modules/ssm/README.md index cb23d3aa87..f0c8eb95e8 100644 --- a/modules/ssm/README.md +++ b/modules/ssm/README.md @@ -29,12 +29,14 @@ No modules. | [aws_ssm_parameter.github_app_id](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | | [aws_ssm_parameter.github_app_key_base64](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | | [aws_ssm_parameter.github_app_webhook_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | +| [aws_ssm_parameter.github_enterprise_pat](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | resource | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [github\_app](#input\_github\_app) | GitHub app parameters, see your github app.
You can optionally create the SSM parameters yourself and provide the ARN and name here, through the `*_ssm` attributes.
If you chose to provide the configuration values directly here,
please ensure the key is the base64-encoded `.pem` file (the output of `base64 app.private-key.pem`, not the content of `private-key.pem`).
Note: the provided SSM parameters arn and name have a precedence over the actual value (i.e `key_base64_ssm` has a precedence over `key_base64` etc). |
object({
key_base64 = optional(string)
key_base64_ssm = optional(object({
arn = string
name = string
}))
id = optional(string)
id_ssm = optional(object({
arn = string
name = string
}))
webhook_secret = optional(string)
webhook_secret_ssm = optional(object({
arn = string
name = string
}))
})
| n/a | yes | +| [enterprise\_pat](#input\_enterprise\_pat) | GitHub enterprise PAT. Used only when enable\_enterprise\_runners is true. | `string` | `null` | no | +| [github\_app](#input\_github\_app) | GitHub app parameters. |
object({
key_base64 = optional(string)
key_base64_ssm = optional(object({
arn = string
name = string
}))
id = optional(string)
id_ssm = optional(object({
arn = string
name = string
}))
webhook_secret = optional(string)
webhook_secret_ssm = optional(object({
arn = string
name = string
}))
})
| n/a | yes | | [kms\_key\_arn](#input\_kms\_key\_arn) | Optional CMK Key ARN to be used for Parameter Store. | `string` | `null` | no | | [path\_prefix](#input\_path\_prefix) | The path prefix used for naming resources | `string` | n/a | yes | | [tags](#input\_tags) | Map of tags that will be added to created resources. By default resources will be tagged with name and environment. | `map(string)` | `{}` | no | diff --git a/modules/ssm/outputs.tf b/modules/ssm/outputs.tf index 4017f6ab3d..62b1f5ff11 100644 --- a/modules/ssm/outputs.tf +++ b/modules/ssm/outputs.tf @@ -1,16 +1,20 @@ output "parameters" { value = { github_app_id = { - name = var.github_app.id_ssm != null ? var.github_app.id_ssm.name : aws_ssm_parameter.github_app_id[0].name - arn = var.github_app.id_ssm != null ? var.github_app.id_ssm.arn : aws_ssm_parameter.github_app_id[0].arn + name = var.github_app.id_ssm != null ? var.github_app.id_ssm.name : var.enterprise_pat != null ? null : aws_ssm_parameter.github_app_id[0].name + arn = var.github_app.id_ssm != null ? var.github_app.id_ssm.arn : var.enterprise_pat != null ? null : aws_ssm_parameter.github_app_id[0].arn } github_app_key_base64 = { - name = var.github_app.key_base64_ssm != null ? var.github_app.key_base64_ssm.name : aws_ssm_parameter.github_app_key_base64[0].name - arn = var.github_app.key_base64_ssm != null ? var.github_app.key_base64_ssm.arn : aws_ssm_parameter.github_app_key_base64[0].arn + name = var.github_app.key_base64_ssm != null ? var.github_app.key_base64_ssm.name : var.enterprise_pat != null ? null : aws_ssm_parameter.github_app_key_base64[0].name + arn = var.github_app.key_base64_ssm != null ? var.github_app.key_base64_ssm.arn : var.enterprise_pat != null ? null : aws_ssm_parameter.github_app_key_base64[0].arn } github_app_webhook_secret = { name = var.github_app.webhook_secret_ssm != null ? var.github_app.webhook_secret_ssm.name : aws_ssm_parameter.github_app_webhook_secret[0].name arn = var.github_app.webhook_secret_ssm != null ? var.github_app.webhook_secret_ssm.arn : aws_ssm_parameter.github_app_webhook_secret[0].arn } + enterprise_pat = { + name = var.enterprise_pat != null ? aws_ssm_parameter.github_enterprise_pat[0].name : null + arn = var.enterprise_pat != null ? aws_ssm_parameter.github_enterprise_pat[0].arn : null + } } } diff --git a/modules/ssm/ssm.tf b/modules/ssm/ssm.tf index 3f13333e68..4b4f2969c1 100644 --- a/modules/ssm/ssm.tf +++ b/modules/ssm/ssm.tf @@ -1,5 +1,5 @@ resource "aws_ssm_parameter" "github_app_id" { - count = var.github_app.id_ssm != null ? 0 : 1 + count = var.github_app.id_ssm != null || var.enterprise_pat != null ? 0 : 1 name = "${var.path_prefix}/github_app_id" type = "SecureString" value = var.github_app.id @@ -8,7 +8,7 @@ resource "aws_ssm_parameter" "github_app_id" { } resource "aws_ssm_parameter" "github_app_key_base64" { - count = var.github_app.key_base64_ssm != null ? 0 : 1 + count = var.github_app.key_base64_ssm != null || var.enterprise_pat != null ? 0 : 1 name = "${var.path_prefix}/github_app_key_base64" type = "SecureString" value = var.github_app.key_base64 @@ -24,3 +24,12 @@ resource "aws_ssm_parameter" "github_app_webhook_secret" { key_id = local.kms_key_arn tags = var.tags } + +resource "aws_ssm_parameter" "github_enterprise_pat" { + count = var.enterprise_pat != null ? 1 : 0 + name = "${var.path_prefix}/github_enterprise_pat" + type = "SecureString" + value = var.enterprise_pat + key_id = local.kms_key_arn + tags = var.tags +} diff --git a/modules/ssm/variables.tf b/modules/ssm/variables.tf index 1eb796aea7..58956ba235 100644 --- a/modules/ssm/variables.tf +++ b/modules/ssm/variables.tf @@ -1,11 +1,14 @@ +variable "enterprise_pat" { + description = "GitHub enterprise PAT. Used only when enable_enterprise_runners is true." + type = string + default = null +} + variable "github_app" { description = <