From 6913c8737780a309b5eecd4c77e90ba00194f34f Mon Sep 17 00:00:00 2001 From: Ian Dominno <104934992+iadomi@users.noreply.github.com> Date: Fri, 18 Apr 2025 11:19:54 -0400 Subject: [PATCH 1/8] docs: (PSKD-1500) update CONTRIBUTING.md with new expectations (#626) * docs: (PSKD-1500) update CONTRIBUTING.md with new expectations Signed-off-by: Ian Dominno * docs: (PSKD-1500) update CONTRIBUTING.md with new expectations Signed-off-by: Ian Dominno --------- Signed-off-by: Ian Dominno --- CONTRIBUTING.md | 53 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 9 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index aa8bd768..ce4711c1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,14 +6,49 @@ We just ask that you follow our contribution guidelines when you do. Contributions to this project must be accompanied by a signed [Contributor Agreement](ContributorAgreement.txt). You (or your employer) retain the copyright to your contribution; this simply grants us permission to use and redistribute your contributions as part of the project. -## Code reviews -All submissions to this project—including submissions from project members—require review. -Our review process typically involves performing unit tests, development tests, integration tests, and security scans using internal SAS infrastructure. -For this reason, we don’t often merge pull requests directly from GitHub. +## Code Reviews +All submissions to this project—including submissions from project members—require +review. Our review process typically involves performing unit tests, development +tests, integration tests, and security scans. -Instead, we work with submissions internally first, vetting them to ensure they meet our security and quality standards. -We’ll do our best to work with contributors in public issues and pull requests; however, to ensure our code meets our internal compliance standards, we may need to incorporate your submission into a solution we push ourselves. +## Pull Request Requirement -This does not mean we don’t value or appreciate your contribution. -We simply need to review your code internally before merging it. -We work to ensure all contributors receive appropriate recognition for their contributions, at least by acknowledging them in our release notes. +### Conventional Commits +All pull requests must follow the [Conventional Commit](https://www.conventionalcommits.org/en/v1.0.0/) +standard for commit messages. This helps maintain a consistent and meaningful +commit history. Pull requests with commits that do not follow the Conventional +Commit format will not be merged. + +### Developer Certificate of Origin Sign-Off +This project requires all commits to be signed off in accordance with the [Developer Certificate of Origin (DCO)](https://developercertificate.org/). +By signing off your commits, you certify that you have the right to submit the +contribution under the open source license used by this project. + +To sign off your commits, use the --signoff flag with git commit: + +```bash +git commit --signoff -m "Your commit message" +``` + +This will add a Signed-off-by line to your commit message, e.g.: + +```bash +Signed-off-by: You Name +``` + +For more information, please refer to https://probot.github.io/apps/dco/ + +### Linter Analysis Checks +All pull requests must pass our automated analysis checks before they can be +merged. These checks include: + +- **Hadolint** – for Dockerfile best practices +- **ShellCheck** – for shell script issues +- **Ansible-lint** – for Ansible playbook and role validation + +## Security Scans +To ensure that all submissions meet our security and quality standards, we perform +security scans using internal SAS infrastructure. Contributions might be subjected +to security scans before they can be accepted. Reporting of any Common Vulnerabilities +and Exposures (CVEs) that are detected is not available in this project at this +time. From b96be2d336fa4a8a6ed27fb642f9309d9fd0e31f Mon Sep 17 00:00:00 2001 From: chjmil Date: Wed, 23 Apr 2025 14:23:58 -0400 Subject: [PATCH 2/8] docs: add required cadence variables to examples Signed-off-by: chjmil --- examples/ansible-vars-iac.yaml | 2 ++ examples/ansible-vars.yaml | 2 ++ examples/multi-tenancy/ansible-vars-multi-tenancy.yaml | 2 ++ 3 files changed, 6 insertions(+) diff --git a/examples/ansible-vars-iac.yaml b/examples/ansible-vars-iac.yaml index d7420d9c..48e7e37f 100644 --- a/examples/ansible-vars-iac.yaml +++ b/examples/ansible-vars-iac.yaml @@ -12,6 +12,8 @@ V4_CFG_MANAGE_STORAGE: true V4_CFG_SAS_API_KEY: V4_CFG_SAS_API_SECRET: V4_CFG_ORDER_NUMBER: +V4_CFG_CADENCE_NAME: # [lts|stable] +V4_CFG_CADENCE_VERSION: ## CR Access V4_CFG_CR_USER: diff --git a/examples/ansible-vars.yaml b/examples/ansible-vars.yaml index 2ae163ba..d6d85968 100644 --- a/examples/ansible-vars.yaml +++ b/examples/ansible-vars.yaml @@ -23,6 +23,8 @@ V4_CFG_MANAGE_STORAGE: true V4_CFG_SAS_API_KEY: V4_CFG_SAS_API_SECRET: V4_CFG_ORDER_NUMBER: +V4_CFG_CADENCE_NAME: # [lts|stable] +V4_CFG_CADENCE_VERSION: ## CR Access V4_CFG_CR_USER: diff --git a/examples/multi-tenancy/ansible-vars-multi-tenancy.yaml b/examples/multi-tenancy/ansible-vars-multi-tenancy.yaml index a0eed153..1876497d 100644 --- a/examples/multi-tenancy/ansible-vars-multi-tenancy.yaml +++ b/examples/multi-tenancy/ansible-vars-multi-tenancy.yaml @@ -15,6 +15,8 @@ V4_CFG_MANAGE_STORAGE: true V4_CFG_SAS_API_KEY: V4_CFG_SAS_API_SECRET: V4_CFG_ORDER_NUMBER: +V4_CFG_CADENCE_NAME: # [lts|stable] +V4_CFG_CADENCE_VERSION: ## CR Access V4_CFG_CR_USER: From 2019f50082d59a17230a8c2d76e22d09a6414781 Mon Sep 17 00:00:00 2001 From: chjmil Date: Thu, 1 May 2025 14:38:29 -0400 Subject: [PATCH 3/8] feat: init work on adding V4M Signed-off-by: chjmil --- README.md | 3 +- docs/CONFIG-VARS.md | 60 +++++++++++++++++++++++++++++++-- docs/user/AnsibleUsage.md | 19 ++++++----- docs/user/DockerVolumeMounts.md | 31 +++++++++++++++++ examples/ansible-vars-iac.yaml | 4 +++ 5 files changed, 106 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 89623449..bd2e9e53 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,7 @@ This project contains Ansible code that creates a baseline cluster in an existin - Create affinity rules such that processes are targeted to appropriately labeled nodes - Create pod disruption budgets for each service such that cluster maintenance will not let the last instance of a service go down (during a node maintenance operation, for example) - Use kustomize to mount user private (home) directories and data directories on CAS nodes and on compute server instances + - Deploy [SAS Viya Monitoring for Kubernetes](https://github.com/sassoftware/viya4-monitoring-kubernetes) - Deploy MPP or SMP CAS servers - Manage SAS Viya Platform Deployments @@ -311,7 +312,7 @@ Create and manage deployments using one of the following methods: ### DNS -During the installation, an ingress load balancer can be installed for the SAS Viya platform. The host name for these services must be registered with your DNS provider in order to resolve to the LoadBalancer endpoint. This can be done by creating a record for each unique ingress controller host. +During the installation, an ingress load balancer can be installed for the SAS Viya platform and for the monitoring and logging stack. The host name for these services must be registered with your DNS provider in order to resolve to the LoadBalancer endpoint. This can be done by creating a record for each unique ingress controller host. However, when you are managing multiple SAS Viya platform deployments, creating these records can be time-consuming. In such a case, SAS recommends creating a DNS record that points to the ingress controller's endpoint. The endpoint might be an IP address or FQDN, depending on the cloud provider. Take these steps: diff --git a/docs/CONFIG-VARS.md b/docs/CONFIG-VARS.md index 6994dcab..624ebc65 100644 --- a/docs/CONFIG-VARS.md +++ b/docs/CONFIG-VARS.md @@ -20,6 +20,9 @@ Supported configuration variables are listed in the table below. All variables - [Container Registry Access](#container-registry-access) - [Ingress](#ingress) - [Load Balancer](#load-balancer) + - [Monitoring and Logging](#monitoring-and-logging) + - [Monitoring](#monitoring) + - [Logging](#logging) - [TLS](#tls) - [PostgreSQL](#postgresql) - [CAS](#cas) @@ -66,7 +69,7 @@ Supported configuration variables are listed in the table below. All variables | :--- | ---: | ---: | ---: | ---: | ---: | ---: | | PROVIDER | Cloud provider | string | | true | [aws,azure,gcp,custom] | baseline, viya | | CLUSTER_NAME | Name of the Kubernetes cluster | string | | true | | baseline, viya | -| NAMESPACE | Kubernetes namespace in which to deploy | string | | true | | baseline, viya | +| NAMESPACE | Kubernetes namespace in which to deploy | string | | true | | baseline, viya, viya-monitoring | ### Authentication @@ -178,13 +181,66 @@ When V4_CFG_MANAGE_STORAGE is set to `true`, the `sas` and `pg-storage` storage | :--- | ---: | ---: | ---: | ---: | ---: | ---: | | V4_CFG_AWS_LB_SUBNETS | The AWS subnets and by association the AWS availability zones to deploy the load balancing service to. This variable sets an ingress-nginx annotation which interacts with the [Cloud Controller Manager](https://kubernetes.io/docs/tasks/administer-cluster/developing-cloud-controller-manager/) to set the subnets used by the AWS load balancer. Specifying a subnet value or values for this variable takes precedence over the Subnet Discovery method described in [AWS docs](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html) that relies on the tags applied to AWS subnets documented in scenario 2 of this [table.](https://github.com/sassoftware/viya4-iac-aws/blob/main/docs/user/BYOnetwork.md#supported-scenarios-and-requirements-for-using-existing-network-resources) This variable can be set with [BYO network scenarios 0-3](https://github.com/sassoftware/viya4-iac-aws/blob/main/docs/user/BYOnetwork.md#supported-scenarios-and-requirements-for-using-existing-network-resources). | string | | false | The value is either a comma separated list of subnet IDs, or a comma separated list of subnet names. Does not affect the subnets used for load balancers enabled with `V4_CFG_CAS_ENABLE_LOADBALANCER`, `V4_CFG_CONNECT_ENABLE_LOADBALANCER`, or `V4_CFG_CONSUL_ENABLE_LOADBALANCER`. | baseline | +## Monitoring and Logging + +| Name | Description | Type | Default | Required | Notes | Tasks | +| :--- | ---: | ---: | ---: | ---: | ---: | ---: | +| V4M_VERSION | Branch or tag of [viya4-monitoring-kubernetes](https://github.com/sassoftware/viya4-monitoring-kubernetes) | string | stable | false | | cluster-logging, cluster-monitoring, viya-monitoring | +| V4M_BASE_DOMAIN | Base domain in which subdomains for search, dashboards, Grafana, Prometheus, and Alertmanager are created | string | | false | This parameter or the per-service FQDNs must be set. | cluster-logging, cluster-monitoring, viya-monitoring | +| V4M_CERT | Path to TLS certificate to use for all monitoring/logging services | string | | false | As an alternative, you can set the per-service certificate. | cluster-logging, cluster-monitoring, viya-monitoring | +| V4M_KEY | Path to TLS key to use for all monitoring/logging services | string | | false | As an alternative, you can set the per-service certificate. | cluster-logging, cluster-monitoring, viya-monitoring | +| V4M_NODE_PLACEMENT_ENABLE | Whether to enable workload node placement for viya4-monitoring-kubernetes stack | bool | false | false | | cluster-logging, cluster-monitoring, viya-monitoring | +| V4M_STORAGECLASS | StorageClass name | string | v4m | false | When V4_CFG_MANAGE_STORAGE is false, set to the name of your pre-existing StorageClass that supports ReadWriteOnce. | cluster-logging, cluster-monitoring, viya-monitoring | +| V4M_ROUTING | Which routing type to use for viya4-monitoring-kubernetes applications | string | host-based | false | Supported values: [`host-based`, `path-based`] For host-based routing, the application name is part of the host name itself `https://dashboards.host.cluster.example.com/` For path-based routing, the host name is fixed and the application name is appended as a path on the URL `https://host.cluster.example.com/dashboards` | cluster-logging, cluster-monitoring | +| V4M_CUSTOM_CONFIG_USER_DIR | Path to the viya4-monitoring-kubernetes top-level `USER_DIR` folder on the local file system. The `USER_DIR` folder can contain a top-level `user.env` file and `logging` and `monitoring` folders where your logging and monitoring `user.env` and customization yaml files are located. **NOTE**: viya4-monitoring does not validate `user.env` or yaml file content pointed to by this variable. It is recommended to use file content that has been verified ahead of time. | string | null | false | The following V4M configuration variables are ignored by viya4-monitoring when `V4M_CUSTOM_CONFIG_USER_DIR` is set: [`V4M_ROUTING`, `V4M_BASE_DOMAIN`, all `V4M_*_FQDN` variables, all `V4M_*_PASSWORD` variables] [Additional documentation](https://documentation.sas.com/?cdcId=obsrvcdc&cdcVersion=v_001&docsetId=obsrvdply&docsetTarget=n0wgd3ju667sa9n1adnxs7hnsqt6.htm) describing the `USER_DIR` folder is available.| cluster-logging, cluster-monitoring | + +#### Open Source Kubernetes + +When deploying `cluster-logging` or `cluster-monitoring` applications to kubernetes cluster infrastructure provisioned with the [Open Source Kubernetes viya4-iac-k8s](https://github.com/sassoftware/viya4-iac-k8s) project, you must explicitly set the value for `V4M_STORAGECLASS` to a pre-existing Storage Class (for example: `local-storage`) regardless of the value set for `V4_CFG_MANAGE_STORAGE`. While other storage classes can be used, the `local-storage` class is **recommended** for the Viya Monitoring and Loggging tools. + +### Monitoring + +| Name | Description | Type | Default | Required | Notes | Tasks | +| :--- | ---: | ---: | ---: | ---: | ---: | ---: | +| V4M_MONITORING_NAMESPACE | Namespace for the monitoring resources | string | monitoring | false | | cluster-monitoring | +| V4M_PROMETHEUS_FQDN | FQDN to use for Prometheus ingress | string | prometheus. | false | | cluster-monitoring | +| V4M_PROMETHEUS_CERT | Path to TLS certificate to use for Prometheus ingress | string | | false | If neither this variable nor V4M_CERT is set, a self-signed certificate is used. | cluster-monitoring | +| V4M_PROMETHEUS_KEY | Path to TLS key to use for Prometheus ingress | string | | false | If neither this variable nor V4M_KEY is set, a self-signed certificate is used. | cluster-monitoring | +| | | | | | | | +| V4M_GRAFANA_FQDN | FQDN to use for Grafana ingress | string | grafana. | false | | cluster-monitoring | +| V4M_GRAFANA_CERT | Path to TLS certificate to use for Grafana ingress | string | | false | If neither this variable nor V4M_CERT is set, a self-signed certificate is used. | cluster-monitoring | +| V4M_GRAFANA_KEY | Path to TLS key to use for Grafana ingress | string | | false | If neither this variable nor V4M_KEY is set, a self-signed certificate is used. | cluster-monitoring | +| V4M_GRAFANA_PASSWORD | Grafana administrator password | string | randomly generated | false | If not provided, a random password is generated and written to the log output. | cluster-monitoring | +| | | | | | | | +| V4M_ALERTMANAGER_FQDN | FQDN to use for Alertmanager ingress | string | alertmanager. | false | | cluster-monitoring | +| V4M_ALERTMANAGER_CERT | Path to TLS certificate to use for Alertmanager ingress | string | | false | If neither this variable nor V4M_CERT is set, a self-signed certificate is used. | cluster-monitoring | +| V4M_ALERTMANAGER_KEY | Path to TLS key to use for Alertmanager ingress | string | | false | If neither this variable nor V4M_KEY is set, a self-signed certificate is used. | cluster-monitoring | + +### Logging + +| Name | Description | Type | Default | Required | Notes | Tasks | +| :--- | ---: | ---: | ---: | ---: | ---: | ---: | +| V4M_LOGGING_NAMESPACE | Namespace for the logging resources | string | logging | false | | cluster-logging | +| V4M_KIBANA_FQDN | FQDN to use for OpenSearch Dashboards ingress | string | dashboards. | false | | cluster-logging | +| V4M_KIBANA_CERT | Path to TLS certificate to use for OpenSearch Dashboards ingress | string | | false | If neither this variable nor V4M_CERT is set, a self-signed certificate is used. | cluster-logging | +| V4M_KIBANA_KEY | Path to TLS key to use for OpenSearch Dashboards ingress | string | | false | If neither this variable nor V4M_KEY is set, a self-signed certificate is used. | cluster-logging | +| V4M_KIBANA_PASSWORD | OpenSearch Dashboards administrator password | string | randomly generated | false | If not provided, a random password is generated and written to the log output. | cluster-logging | +| V4M_KIBANA_LOGADM_PASSWORD | OpenSearch Dashboards logadm user's password | string | randomly generated | false | If not provided, and if V4M_KIBANA_PASSWORD is not set, a random password is generated and written to the log output. | cluster-logging | +| V4M_KIBANASERVER_PASSWORD | OpenSearch Dashboards server password | string | randomly generated | false | If not provided, a random password is generated and written to the log output | cluster-logging | +| V4M_LOGCOLLECTOR_PASSWORD | Logcollector password | string | randomly generated | false | If not provided, a random password is generated and written to the log output | cluster-logging | +| V4M_METRICGETTER_PASSWORD | Metricgetter password | string | randomly generated | false | If not provided, a random password is generated and written to the log output | cluster-logging | +| | | | | | | | +| V4M_ELASTICSEARCH_FQDN | FQDN to use for OpenSearch ingress | string | search. | false | | cluster-logging | +| V4M_ELASTICSEARCH_CERT | Path to TLS certificate to use for OpenSearch ingress | string | | false | If both this and V4M_CERT are not set a self-signed certificate is used. | cluster-logging | +| V4M_ELASTICSEARCH_KEY | Path to TLS key to use for OpenSearch ingress | string | | false | If neither this variable nor V4M_KEY is set, a self-signed certificate is used. | cluster-logging | + ## TLS The SAS Viya platform supports two certificate generators: cert-manager and openssl. | Name | Description | Type | Default | Required | Notes | Tasks | | :--- | ---: | ---: | ---: | ---: | ---: | ---: | -| V4_CFG_TLS_GENERATOR | Which SAS-provided tool to use for certificate generation | string | openssl | false | Supported values: [`cert-manager`,`openssl`]. If set to `cert-manager`, `cert-manager` will be installed during baselining. | baseline, viya | +| V4_CFG_TLS_GENERATOR | Which SAS-provided tool to use for certificate generation | string | openssl | false | Supported values: [`cert-manager`,`openssl`]. If set to `cert-manager`, `cert-manager` will be installed during baselining. | baseline, viya, cluster-logging, cluster-monitoring | | V4_CFG_TLS_MODE | Which TLS mode to configure | string | front-door | false | Supported values: [`full-stack`,`front-door`,`disabled.`] When deploying full-stack you must set V4_CFG_TLS_TRUSTED_CA_CERTS to trust external postgres server ca. | all | | V4_CFG_TLS_CERT | Path to ingress certificate file | string | | false | If specified, used instead of cert-manager issued certificates | viya | | V4_CFG_TLS_KEY | Path to ingress key file | string | | false | Required when V4_CFG_TLS_CERT is specified | viya | diff --git a/docs/user/AnsibleUsage.md b/docs/user/AnsibleUsage.md index 32577b7d..c6fd39ed 100644 --- a/docs/user/AnsibleUsage.md +++ b/docs/user/AnsibleUsage.md @@ -49,10 +49,13 @@ More than one task can be run at the same time. An action can run against a sing | :--- | :--- | | baseline | Installs cluster level tooling needed for all SAS Viya platform deployments. These may include, cert-manager, ingress-nginx, nfs-client-provisioners and more. | | viya | Deploys the SAS Viya platform | +| cluster-logging | Installs cluster-wide logging using the [viya4-monitoring-kubernetes](https://github.com/sassoftware/viya4-monitoring-kubernetes) project. | +| cluster-monitoring | Installs cluster-wide monitoring using the [viya4-monitoring-kubernetes](https://github.com/sassoftware/viya4-monitoring-kubernetes) project. | +| viya-monitoring | Installs viya namespace level monitoring using the [viya4-monitoring-kubernetes](https://github.com/sassoftware/viya4-monitoring-kubernetes) project. | ### Examples -- I have a new cluster, deployed using one of the Viya4 IAC projects, and want to install everything +- I have a new cluster, deployed using one of the Viya4 IAC projects, and want to install everything. ```bash ansible-playbook \ @@ -60,10 +63,10 @@ More than one task can be run at the same time. An action can run against a sing -e CONFIG=$HOME/deployments/dev-cluster/dev-namespace/ansible-vars.yaml \ -e TFSTATE=$HOME/deployments/dev-cluster/terraform.tfstate \ -e JUMP_SVR_PRIVATE_KEY=$HOME/.ssh/id_rsa \ - playbooks/playbook.yaml --tags "baseline,viya,install" + playbooks/playbook.yaml --tags "baseline,viya,cluster-logging,cluster-monitoring,viya-monitoring,install" ``` -- I have a custom built cluster and want to install baseline dependencies only +- I have a custom built cluster and want to install baseline dependencies only. ```bash ansible-playbook \ @@ -85,7 +88,7 @@ More than one task can be run at the same time. An action can run against a sing playbooks/playbook.yaml --tags "viya,install" ``` -- I have an existing cluster with viya installed and want to install another viya instance in a different namespace +- I have an existing cluster with viya installed and want to install another viya instance in a different namespace with monitoring. ```bash ansible-playbook \ @@ -93,10 +96,10 @@ More than one task can be run at the same time. An action can run against a sing -e CONFIG=$HOME/deployments/dev-cluster/test-namespace/ansible-vars.yaml \ -e TFSTATE=$HOME/deployments/dev-cluster/terraform.tfstate \ -e JUMP_SVR_PRIVATE_KEY=$HOME/.ssh/id_rsa \ - playbooks/playbook.yaml --tags "viya,install" + playbooks/playbook.yaml --tags "viya,viya-monitoring,install" ``` -- I have a cluster with a single viya install. I want to uninstall everything +- I have a cluster with a single viya install as well as the monitoring and logging stack. I want to uninstall everything. ```bash ansible-playbook \ @@ -104,7 +107,7 @@ More than one task can be run at the same time. An action can run against a sing -e CONFIG=$HOME/deployments/dev-cluster/test-namespace/ansible-vars.yaml \ -e TFSTATE=$HOME/deployments/dev-cluster/terraform.tfstate \ -e JUMP_SVR_PRIVATE_KEY=$HOME/.ssh/id_rsa \ - playbooks/playbook.yaml --tags "baseline,viya,uninstall" + playbooks/playbook.yaml --tags "baseline,viya,cluster-logging,cluster-monitoring,viya-monitoring,uninstall" ``` ### Ansible Config @@ -117,5 +120,5 @@ export ANSIBLE_CONFIG=${WORKSPACE}/viya4-deployment/ansible.cfg ### Monitoring and Logging -To install SAS Viya Monitoring for Kubernetes, see the GitHub project https://github.com/sassoftware/viya4-monitoring-kubernetes for scripts and customization options +To get the full power of SAS Viya Monitoring for Kubernetes, it is recommended to deploy V4M independent of this project. See the GitHub project https://github.com/sassoftware/viya4-monitoring-kubernetes for scripts and customization options to deploy metric monitoring, alerts and log-message aggregation for SAS Viya. diff --git a/docs/user/DockerVolumeMounts.md b/docs/user/DockerVolumeMounts.md index 0b1bc223..b1aba9ad 100644 --- a/docs/user/DockerVolumeMounts.md +++ b/docs/user/DockerVolumeMounts.md @@ -25,6 +25,37 @@ Ansible vars to docker volume mounts mappings. For full listing of config vars s | V4_CFG_LICENSE | `--volume :/config/v4_cfg_license `| | V4_CFG_CERTS | `--volume :/config/v4_cfg_certs `| +## Monitoring and Logging + +| Ansible Var | Docker Mount | +| :--- | ---: | +| V4M_CERT | `--volume :/config/v4m_cert `| +| V4M_KEY | `--volume :/config/v4m_key `| +| V4M_CUSTOM_CONFIG_USER_DIR | `--volume :/config/v4m_custom_config_user_dir `| + +### Monitoring + +| Ansible Var | Docker Mount | +| :--- | ---: | +| V4M_PROMETHEUS_CERT | `--volume :/config/v4m_prometheus_cert `| +| V4M_PROMETHEUS_KEY | `--volume :/config/v4m_prometheus_key `| +| | | | | | | | +| V4M_GRAFANA_CERT | `--volume :/config/v4m_grafana_cert `| +| V4M_GRAFANA_KEY | `--volume :/config/v4m_grafana_key `| +| | | | | | | | +| V4M_ALERTMANAGER_CERT | `--volume :/config/v4m_alertmanager_cert `| +| V4M_ALERTMANAGER_KEY | `--volume :/config/v4m_alertmanager_key `| + +### Logging + +| Ansible Var | Docker Mount | +| :--- | ---: | +| V4M_KIBANA_CERT | `--volume :/config/v4m_kibana_cert `| +| V4M_KIBANA_KEY | `--volume :/config/v4m_kibana_key `| +| | | | | | | | +| V4M_ELASTICSEARCH_CERT | `--volume :/config/v4m_elasticsearch_cert `| +| V4M_ELASTICSEARCH_KEY | `--volume :/config/v4m_elasticsearch_key `| + ## TLS | Ansible Var | Docker Mount | diff --git a/examples/ansible-vars-iac.yaml b/examples/ansible-vars-iac.yaml index 48e7e37f..33378b03 100644 --- a/examples/ansible-vars-iac.yaml +++ b/examples/ansible-vars-iac.yaml @@ -35,6 +35,10 @@ V4_CFG_CONSUL_ENABLE_LOADBALANCER: false ## SAS/CONNECT V4_CFG_CONNECT_ENABLE_LOADBALANCER: false +## Monitoring and Logging +## uncomment and update the below values when deploying the viya4-monitoring-kubernetes stack +# V4M_BASE_DOMAIN: + ## Viya Start and Stop Schedule ## uncomment and update the values below with CronJob schedule expressions if you would ## like to start and stop your Viya Deployment on a schedule From 637d640bf7f94b2026175047f2276d9a5322e42a Mon Sep 17 00:00:00 2001 From: chjmil Date: Thu, 1 May 2025 14:50:01 -0400 Subject: [PATCH 4/8] feat: more reverted files Signed-off-by: chjmil --- playbooks/playbook.yaml | 12 ++ roles/common/tasks/task-validations.yaml | 20 +-- roles/monitoring/defaults/main.yaml | 46 +++++++ .../defaults/main.yaml:Zone.Identifier | 0 roles/monitoring/files/aws-storageclass.yaml | 20 +++ .../aws-storageclass.yaml:Zone.Identifier | 0 .../monitoring/files/azure-storageclass.yaml | 19 +++ .../azure-storageclass.yaml:Zone.Identifier | 0 roles/monitoring/files/gcp-storageclass.yaml | 19 +++ .../gcp-storageclass.yaml:Zone.Identifier | 0 .../tasks/cluster-logging-common.yaml | 95 ++++++++++++++ ...luster-logging-common.yaml:Zone.Identifier | 0 .../tasks/cluster-logging-custom-config.yaml | 68 ++++++++++ ...logging-custom-config.yaml:Zone.Identifier | 0 roles/monitoring/tasks/cluster-logging.yaml | 100 +++++++++++++++ .../cluster-logging.yaml:Zone.Identifier | 0 .../tasks/cluster-monitoring-common.yaml | 118 ++++++++++++++++++ ...ter-monitoring-common.yaml:Zone.Identifier | 0 .../cluster-monitoring-custom-config.yaml | 68 ++++++++++ ...itoring-custom-config.yaml:Zone.Identifier | 0 .../monitoring/tasks/cluster-monitoring.yaml | 76 +++++++++++ .../cluster-monitoring.yaml:Zone.Identifier | 0 roles/monitoring/tasks/main.yaml | 89 +++++++++++++ .../tasks/main.yaml:Zone.Identifier | 0 roles/monitoring/tasks/viya-monitoring.yaml | 31 +++++ .../viya-monitoring.yaml:Zone.Identifier | 0 .../host-based/user-values-opensearch.yaml | 14 +++ ...ser-values-opensearch.yaml:Zone.Identifier | 0 .../templates/host-based/user-values-osd.yaml | 16 +++ .../user-values-osd.yaml:Zone.Identifier | 0 .../host-based/user-values-prom-operator.yaml | 81 ++++++++++++ ...-values-prom-operator.yaml:Zone.Identifier | 0 .../path-based/user-values-opensearch.yaml | 18 +++ ...ser-values-opensearch.yaml:Zone.Identifier | 0 .../templates/path-based/user-values-osd.yaml | 45 +++++++ .../user-values-osd.yaml:Zone.Identifier | 0 .../path-based/user-values-prom-operator.yaml | 81 ++++++++++++ ...-values-prom-operator.yaml:Zone.Identifier | 0 .../monitoring/templates/path-based/user.env | 1 + .../path-based/user.env:Zone.Identifier | 0 roles/monitoring/vars/main.yaml | 36 ++++++ .../monitoring/vars/main.yaml:Zone.Identifier | 0 42 files changed, 1059 insertions(+), 14 deletions(-) create mode 100644 roles/monitoring/defaults/main.yaml create mode 100644 roles/monitoring/defaults/main.yaml:Zone.Identifier create mode 100644 roles/monitoring/files/aws-storageclass.yaml create mode 100644 roles/monitoring/files/aws-storageclass.yaml:Zone.Identifier create mode 100644 roles/monitoring/files/azure-storageclass.yaml create mode 100644 roles/monitoring/files/azure-storageclass.yaml:Zone.Identifier create mode 100644 roles/monitoring/files/gcp-storageclass.yaml create mode 100644 roles/monitoring/files/gcp-storageclass.yaml:Zone.Identifier create mode 100644 roles/monitoring/tasks/cluster-logging-common.yaml create mode 100644 roles/monitoring/tasks/cluster-logging-common.yaml:Zone.Identifier create mode 100644 roles/monitoring/tasks/cluster-logging-custom-config.yaml create mode 100644 roles/monitoring/tasks/cluster-logging-custom-config.yaml:Zone.Identifier create mode 100644 roles/monitoring/tasks/cluster-logging.yaml create mode 100644 roles/monitoring/tasks/cluster-logging.yaml:Zone.Identifier create mode 100644 roles/monitoring/tasks/cluster-monitoring-common.yaml create mode 100644 roles/monitoring/tasks/cluster-monitoring-common.yaml:Zone.Identifier create mode 100644 roles/monitoring/tasks/cluster-monitoring-custom-config.yaml create mode 100644 roles/monitoring/tasks/cluster-monitoring-custom-config.yaml:Zone.Identifier create mode 100644 roles/monitoring/tasks/cluster-monitoring.yaml create mode 100644 roles/monitoring/tasks/cluster-monitoring.yaml:Zone.Identifier create mode 100644 roles/monitoring/tasks/main.yaml create mode 100644 roles/monitoring/tasks/main.yaml:Zone.Identifier create mode 100644 roles/monitoring/tasks/viya-monitoring.yaml create mode 100644 roles/monitoring/tasks/viya-monitoring.yaml:Zone.Identifier create mode 100644 roles/monitoring/templates/host-based/user-values-opensearch.yaml create mode 100644 roles/monitoring/templates/host-based/user-values-opensearch.yaml:Zone.Identifier create mode 100644 roles/monitoring/templates/host-based/user-values-osd.yaml create mode 100644 roles/monitoring/templates/host-based/user-values-osd.yaml:Zone.Identifier create mode 100644 roles/monitoring/templates/host-based/user-values-prom-operator.yaml create mode 100644 roles/monitoring/templates/host-based/user-values-prom-operator.yaml:Zone.Identifier create mode 100644 roles/monitoring/templates/path-based/user-values-opensearch.yaml create mode 100644 roles/monitoring/templates/path-based/user-values-opensearch.yaml:Zone.Identifier create mode 100644 roles/monitoring/templates/path-based/user-values-osd.yaml create mode 100644 roles/monitoring/templates/path-based/user-values-osd.yaml:Zone.Identifier create mode 100644 roles/monitoring/templates/path-based/user-values-prom-operator.yaml create mode 100644 roles/monitoring/templates/path-based/user-values-prom-operator.yaml:Zone.Identifier create mode 100644 roles/monitoring/templates/path-based/user.env create mode 100644 roles/monitoring/templates/path-based/user.env:Zone.Identifier create mode 100644 roles/monitoring/vars/main.yaml create mode 100644 roles/monitoring/vars/main.yaml:Zone.Identifier diff --git a/playbooks/playbook.yaml b/playbooks/playbook.yaml index 6baf9206..9d7b01c0 100644 --- a/playbooks/playbook.yaml +++ b/playbooks/playbook.yaml @@ -50,6 +50,12 @@ when: ('baseline' in ansible_run_tags) and ('install' in ansible_run_tags) tags: - baseline + - name: Monitoring role - cluster + include_role: + name: monitoring + tags: + - cluster-monitoring + - cluster-logging - name: Multi-tenancy role include_role: name: multi-tenancy @@ -62,6 +68,12 @@ tags: - viya - multi-tenancy + - name: Monitoring role - namespace + include_role: + name: monitoring + tasks_from: viya-monitoring + tags: + - viya-monitoring - name: baseline role uninstall # noqa: name[casing] include_role: name: baseline diff --git a/roles/common/tasks/task-validations.yaml b/roles/common/tasks/task-validations.yaml index 999b60d0..1d13431e 100644 --- a/roles/common/tasks/task-validations.yaml +++ b/roles/common/tasks/task-validations.yaml @@ -6,24 +6,16 @@ tags: - always block: - - name: Task validation - Unsupported tasks - ansible.builtin.assert: - that: '{{ ["cluster-logging", "cluster-monitoring", "viya-monitoring"] | intersect(ansible_run_tags) | count == 0 }}' - msg: > - Invalid tags: {{ ansible_run_tags | join(',') }} - - The 'cluster-logging', 'cluster-monitoring', and 'viya-monitoring' task tags are not supported. - See usage documentation at docs/user/AnsibleUsage.md - - name: Task validation - ensure at least one action tag is used ansible.builtin.assert: that: '{{ ["install", "uninstall", "onboard", "cas-onboard", "offboard"] | intersect(ansible_run_tags) | count > 0 }}' msg: > Invalid tags: {{ ansible_run_tags | join(',') }} - You must define at least one action tag during execution. For the 'baseline', or 'viya', tasks - either 'install' or 'uninstall'. - See usage documentation at docs/user/AnsibleUsage.md + You must define at least one action tag during execution. For the 'baseline', 'viya', 'cluster-logging', + 'cluster-monitoring', or 'viya-monitoring' tasks either 'install' or 'uninstall'. For the 'multi-tenancy' task + either 'onboard', 'cas-onboard', or 'offboard'. See usage documentation at docs/user/AnsibleUsage.md and + docs/user/Multi-Tenancy.md - name: Task validation - ensure the viya and multi-tenancy tasks are not run simultaneously ansible.builtin.assert: @@ -60,9 +52,9 @@ msg: > Invalid tags: {{ ansible_run_tags | join(',') }} - The 'baseline' task requires either the + The 'baseline', 'cluster-logging', 'cluster-monitoring', and 'viya-monitoring' tasks require either the 'install' or 'uninstall' actions. See usage documentation at docs/user/AnsibleUsage.md - when: '["baseline"] | intersect(ansible_run_tags) | count > 0' + when: '["baseline", "cluster-logging", "cluster-monitoring", "viya-monitoring"] | intersect(ansible_run_tags) | count > 0' - name: Task validation - ensure the install and uninstall tasks are not run simultaneously ansible.builtin.assert: diff --git a/roles/monitoring/defaults/main.yaml b/roles/monitoring/defaults/main.yaml new file mode 100644 index 00000000..49f9c990 --- /dev/null +++ b/roles/monitoring/defaults/main.yaml @@ -0,0 +1,46 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +KUBECONFIG: ~/.kube/config +NAMESPACE: null +V4_CFG_MANAGE_STORAGE: true + +V4M_STORAGECLASS: v4m + +V4M_VERSION: stable +V4M_NODE_PLACEMENT_ENABLE: false +V4M_BASE_DOMAIN: "{{ V4_CFG_BASE_DOMAIN }}" +V4M_CERT: null +V4M_KEY: null +V4M_ROUTING: host-based +V4M_CUSTOM_CONFIG_USER_DIR: null + +V4M_LOGGING_NAMESPACE: logging +V4M_MONITORING_NAMESPACE: monitoring + +V4M_KIBANA_FQDN: dashboards.{{ V4M_BASE_DOMAIN }} +V4M_KIBANA_CERT: "{{ V4M_CERT }}" +V4M_KIBANA_KEY: "{{ V4M_KEY }}" +V4M_KIBANA_LOGADM_PASSWORD: "{{ V4M_KIBANA_PASSWORD if V4M_KIBANA_PASSWORD else lookup('password', '/dev/null chars=ascii_letters,digits') }}" +V4M_KIBANA_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" +V4M_KIBANASERVER_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" +V4M_LOGCOLLECTOR_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" +V4M_METRICGETTER_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" + +V4M_ELASTICSEARCH_FQDN: search.{{ V4M_BASE_DOMAIN }} +V4M_ELASTICSEARCH_CERT: "{{ V4M_CERT }}" +V4M_ELASTICSEARCH_KEY: "{{ V4M_KEY }}" + +V4M_PROMETHEUS_FQDN: prometheus.{{ V4M_BASE_DOMAIN }} +V4M_PROMETHEUS_CERT: "{{ V4M_CERT }}" +V4M_PROMETHEUS_KEY: "{{ V4M_KEY }}" + +V4M_ALERTMANAGER_FQDN: alertmanager.{{ V4M_BASE_DOMAIN }} +V4M_ALERTMANAGER_CERT: "{{ V4M_CERT }}" +V4M_ALERTMANAGER_KEY: "{{ V4M_KEY }}" + +V4M_GRAFANA_FQDN: grafana.{{ V4M_BASE_DOMAIN }} +V4M_GRAFANA_CERT: "{{ V4M_CERT }}" +V4M_GRAFANA_KEY: "{{ V4M_KEY }}" +V4M_GRAFANA_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" diff --git a/roles/monitoring/defaults/main.yaml:Zone.Identifier b/roles/monitoring/defaults/main.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/files/aws-storageclass.yaml b/roles/monitoring/files/aws-storageclass.yaml new file mode 100644 index 00000000..f6723a5d --- /dev/null +++ b/roles/monitoring/files/aws-storageclass.yaml @@ -0,0 +1,20 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + kubernetes.io/cluster-service: "true" + name: v4m +provisioner: kubernetes.io/aws-ebs +parameters: + fsType: ext4 + type: gp2 +reclaimPolicy: Delete +# Set binding mode to WaitForFirstConsumer to avoid +# volume node affinity issues +volumeBindingMode: WaitForFirstConsumer diff --git a/roles/monitoring/files/aws-storageclass.yaml:Zone.Identifier b/roles/monitoring/files/aws-storageclass.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/files/azure-storageclass.yaml b/roles/monitoring/files/azure-storageclass.yaml new file mode 100644 index 00000000..f275573f --- /dev/null +++ b/roles/monitoring/files/azure-storageclass.yaml @@ -0,0 +1,19 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + kubernetes.io/cluster-service: "true" + name: v4m +parameters: + skuName: Standard_LRS +provisioner: disk.csi.azure.com +reclaimPolicy: Delete +# Set binding mode to WaitForFirstConsumer to avoid +# volume node affinity issues +volumeBindingMode: WaitForFirstConsumer diff --git a/roles/monitoring/files/azure-storageclass.yaml:Zone.Identifier b/roles/monitoring/files/azure-storageclass.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/files/gcp-storageclass.yaml b/roles/monitoring/files/gcp-storageclass.yaml new file mode 100644 index 00000000..26fc0733 --- /dev/null +++ b/roles/monitoring/files/gcp-storageclass.yaml @@ -0,0 +1,19 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + kubernetes.io/cluster-service: "true" + name: v4m +provisioner: kubernetes.io/gce-pd +parameters: + type: pd-standard +reclaimPolicy: Delete +# Set binding mode to WaitForFirstConsumer to avoid +# volume node affinity issues +volumeBindingMode: WaitForFirstConsumer diff --git a/roles/monitoring/files/gcp-storageclass.yaml:Zone.Identifier b/roles/monitoring/files/gcp-storageclass.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/tasks/cluster-logging-common.yaml b/roles/monitoring/tasks/cluster-logging-common.yaml new file mode 100644 index 00000000..71bd64f9 --- /dev/null +++ b/roles/monitoring/tasks/cluster-logging-common.yaml @@ -0,0 +1,95 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +- name: cluster-logging - create userdir # noqa: name[casing] + file: + path: "{{ tmpdir.path }}/logging/" + state: directory + mode: "0770" + tags: + - install + - uninstall + - update + +- name: V4M - cluster logging config vars + include_tasks: cluster-logging.yaml + when: + - V4M_CUSTOM_CONFIG_USER_DIR is none + tags: + - install + - uninstall + - update + +- name: V4M - cluster logging custom config user dir + include_tasks: cluster-logging-custom-config.yaml + when: + - V4M_CUSTOM_CONFIG_USER_DIR is not none + tags: + - install + - update + +- name: cluster-logging - elasticsearch cert # noqa: name[casing] + kubernetes.core.k8s: + kubeconfig: "{{ KUBECONFIG }}" + state: present + definition: + kind: Secret + apiVersion: v1 + metadata: + name: elasticsearch-ingress-tls-secret + namespace: "{{ V4M_LOGGING_NAMESPACE }}" + data: + tls.crt: >- + {{ lookup('file', V4M_ELASTICSEARCH_CERT) | b64encode }} + tls.key: >- + {{ lookup('file', V4M_ELASTICSEARCH_KEY) | b64encode }} + type: kubernetes.io/tls + when: + - V4M_ELASTICSEARCH_CERT is not none + - V4M_ELASTICSEARCH_KEY is not none + tags: + - install + - update + +- name: cluster-logging - kibana cert # noqa: name[casing] + kubernetes.core.k8s: + kubeconfig: "{{ KUBECONFIG }}" + state: present + definition: + kind: Secret + apiVersion: v1 + metadata: + name: kibana-ingress-tls-secret + namespace: "{{ V4M_LOGGING_NAMESPACE }}" + data: + tls.crt: >- + {{ lookup('file', V4M_KIBANA_CERT) | b64encode }} + tls.key: >- + {{ lookup('file', V4M_KIBANA_KEY) | b64encode }} + type: kubernetes.io/tls + when: + - V4M_KIBANA_CERT is not none + - V4M_KIBANA_KEY is not none + tags: + - install + - update + +- name: cluster-logging - uninstall # noqa: name[casing] + command: + cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/logging/bin/remove_logging.sh" + environment: "{{ logging_map['env'] }}" + tags: + - uninstall + +- name: cluster-logging - delete namespace # noqa: name[casing] + kubernetes.core.k8s: + api_version: v1 + kind: Namespace + name: "{{ V4M_LOGGING_NAMESPACE }}" + wait: true + wait_timeout: 600 + kubeconfig: "{{ KUBECONFIG }}" + state: absent + tags: + - uninstall diff --git a/roles/monitoring/tasks/cluster-logging-common.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-logging-common.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/tasks/cluster-logging-custom-config.yaml b/roles/monitoring/tasks/cluster-logging-custom-config.yaml new file mode 100644 index 00000000..9937ac53 --- /dev/null +++ b/roles/monitoring/tasks/cluster-logging-custom-config.yaml @@ -0,0 +1,68 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +- name: V4M - custom config is valid path + stat: + path: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" + register: folder + tags: + - install + - uninstall + - update + +- name: V4M - custom config user dir check + fail: + msg: "{{ V4M_CUSTOM_CONFIG_USER_DIR }} does not exist." + when: + - not folder.stat.exists + tags: + - install + - uninstall + - update + +- name: cluster-logging - user dir find files # noqa: name[casing] + find: + paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" + file_type: file + register: user_dir_folder + tags: + - install + - update + +- name: cluster-logging - copy user dir files # noqa: name[casing] + copy: + src: "{{ item.path }}" + dest: "{{ tmpdir.path }}" + mode: "0660" + loop: "{{ user_dir_folder.files }}" + tags: + - install + - update + +- name: cluster-logging - find files # noqa: name[casing] + find: + paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}/logging/" + file_type: file + register: logging_folder + tags: + - install + - update + +- name: cluster-logging - copy user dir logging files # noqa: name[casing] + copy: + src: "{{ item.path }}" + dest: "{{ tmpdir.path }}/logging/" + mode: "0660" + loop: "{{ logging_folder.files }}" + tags: + - install + - update + +- name: cluster-logging - deploy # noqa: name[casing] + command: + cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/logging/bin/deploy_logging.sh" + environment: "{{ logging_map['env'] }}" + tags: + - install + - update diff --git a/roles/monitoring/tasks/cluster-logging-custom-config.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-logging-custom-config.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/tasks/cluster-logging.yaml b/roles/monitoring/tasks/cluster-logging.yaml new file mode 100644 index 00000000..3d476206 --- /dev/null +++ b/roles/monitoring/tasks/cluster-logging.yaml @@ -0,0 +1,100 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +- name: cluster-logging - lookup existing credentials # noqa: name[casing] + kubernetes.core.k8s_info: + api_version: v1 + kind: Secret + namespace: "{{ V4M_LOGGING_NAMESPACE }}" + kubeconfig: "{{ KUBECONFIG }}" + label_selectors: + - managed-by = v4m-es-script + register: logging_secrets + tags: + - install + +- name: Set password facts + set_fact: + V4M_KIBANA_PASSWORD: "{{ V4M_KIBANA_PASSWORD }}" + V4M_KIBANA_LOGADM_PASSWORD: "{{ V4M_KIBANA_LOGADM_PASSWORD }}" + V4M_KIBANASERVER_PASSWORD: "{{ V4M_KIBANASERVER_PASSWORD }}" + V4M_LOGCOLLECTOR_PASSWORD: "{{ V4M_LOGCOLLECTOR_PASSWORD }}" + V4M_METRICGETTER_PASSWORD: "{{ V4M_METRICGETTER_PASSWORD }}" + tags: + - install + +- name: cluster-logging - save credentials # noqa: name[casing] + set_fact: + "{{ logging_map['secret'][item.metadata.name] }}": "{{ item.data.password | b64decode }}" + with_items: "{{ logging_secrets.resources }}" + when: + - item.metadata is defined + - item.metadata.name is defined + - item.metadata.name in ("internal-user-admin", "internal-user-logadm", "internal-user-kibanaserver", "internal-user-logcollector", "internal-user-metricgetter") + tags: + - install + +- name: cluster-logging - output credentials # noqa: name[casing] + debug: + msg: + - "OpenSearch admin - username: admin, password: {{ V4M_KIBANA_PASSWORD }}" + - "OpenSearch admin - username: logadm, password: {{ V4M_KIBANA_LOGADM_PASSWORD }}" + tags: + - install + +- name: cluster-logging - host-based opensearch user values # noqa: name[casing] + template: + src: host-based/user-values-opensearch.yaml + dest: "{{ tmpdir.path }}/logging/user-values-opensearch.yaml" + mode: "0660" + when: + - V4M_ROUTING|lower == 'host-based' + tags: + - install + - update + - uninstall + +- name: cluster-logging - host-based osd user values # noqa: name[casing] + template: + src: host-based/user-values-osd.yaml + dest: "{{ tmpdir.path }}/logging/user-values-osd.yaml" + mode: "0660" + when: + - V4M_ROUTING|lower == 'host-based' + tags: + - install + - update + - uninstall + +- name: cluster-logging - path-based opensearch user values # noqa: name[casing] + template: + src: path-based/user-values-opensearch.yaml + dest: "{{ tmpdir.path }}/logging/user-values-opensearch.yaml" + mode: "0660" + when: + - V4M_ROUTING|lower == 'path-based' + tags: + - install + - update + - uninstall + +- name: cluster-logging - path-based osd user values # noqa: name[casing] + template: + src: path-based/user-values-osd.yaml + dest: "{{ tmpdir.path }}/logging/user-values-osd.yaml" + mode: "0660" + when: + - V4M_ROUTING|lower == 'path-based' + tags: + - install + - update + - uninstall + +- name: cluster-logging - deploy # noqa: name[casing] + command: + cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/logging/bin/deploy_logging.sh" + environment: "{{ logging_map['env'] }}" + tags: + - install + - update diff --git a/roles/monitoring/tasks/cluster-logging.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-logging.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/tasks/cluster-monitoring-common.yaml b/roles/monitoring/tasks/cluster-monitoring-common.yaml new file mode 100644 index 00000000..adcd19eb --- /dev/null +++ b/roles/monitoring/tasks/cluster-monitoring-common.yaml @@ -0,0 +1,118 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +- name: cluster-monitoring - create userdir # noqa: name[casing] + file: + path: "{{ tmpdir.path }}/monitoring/" + state: directory + mode: "0770" + tags: + - install + - uninstall + - update + +- name: V4M - cluster monitoring config vars + include_tasks: cluster-monitoring.yaml + when: + - V4M_CUSTOM_CONFIG_USER_DIR is none + tags: + - install + - uninstall + - update + +- name: V4M - cluster monitoring custom config user dir + include_tasks: cluster-monitoring-custom-config.yaml + when: + - V4M_CUSTOM_CONFIG_USER_DIR is not none + tags: + - install + - update + +- name: cluster-monitoring - grafana cert # noqa: name[casing] + kubernetes.core.k8s: + kubeconfig: "{{ KUBECONFIG }}" + state: present + definition: + kind: Secret + apiVersion: v1 + metadata: + name: grafana-ingress-tls-secret + namespace: "{{ V4M_MONITORING_NAMESPACE }}" + data: + tls.crt: >- + {{ lookup('file', V4M_GRAFANA_CERT) | b64encode }} + tls.key: >- + {{ lookup('file', V4M_GRAFANA_KEY) | b64encode }} + type: kubernetes.io/tls + when: + - V4M_GRAFANA_CERT is not none + - V4M_GRAFANA_KEY is not none + tags: + - install + - update + +- name: cluster-monitoring - prometheus cert # noqa: name[casing] + kubernetes.core.k8s: + kubeconfig: "{{ KUBECONFIG }}" + state: present + definition: + kind: Secret + apiVersion: v1 + metadata: + name: prometheus-ingress-tls-secret + namespace: "{{ V4M_MONITORING_NAMESPACE }}" + data: + tls.crt: >- + {{ lookup('file', V4M_PROMETHEUS_CERT) | b64encode }} + tls.key: >- + {{ lookup('file', V4M_PROMETHEUS_KEY) | b64encode }} + type: kubernetes.io/tls + when: + - V4M_PROMETHEUS_CERT is not none + - V4M_PROMETHEUS_KEY is not none + tags: + - install + - update + +- name: cluster-monitoring - alertmanager cert # noqa: name[casing] + kubernetes.core.k8s: + kubeconfig: "{{ KUBECONFIG }}" + state: present + definition: + kind: Secret + apiVersion: v1 + metadata: + name: alertmanager-ingress-tls-secret + namespace: "{{ V4M_MONITORING_NAMESPACE }}" + data: + tls.crt: >- + {{ lookup('file', V4M_ALERTMANAGER_CERT) | b64encode }} + tls.key: >- + {{ lookup('file', V4M_ALERTMANAGER_KEY) | b64encode }} + type: kubernetes.io/tls + when: + - V4M_ALERTMANAGER_CERT is not none + - V4M_ALERTMANAGER_KEY is not none + tags: + - install + - update + +- name: cluster-monitoring - uninstall # noqa: name[casing] + command: + cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/monitoring/bin/remove_monitoring_cluster.sh" + environment: "{{ monitoring_env }}" + tags: + - uninstall + +- name: cluster-monitoring - delete namespace # noqa: name[casing] + kubernetes.core.k8s: + api_version: v1 + kind: Namespace + name: "{{ V4M_MONITORING_NAMESPACE }}" + wait: true + wait_timeout: 600 + kubeconfig: "{{ KUBECONFIG }}" + state: absent + tags: + - uninstall diff --git a/roles/monitoring/tasks/cluster-monitoring-common.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-monitoring-common.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml b/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml new file mode 100644 index 00000000..4a9134b6 --- /dev/null +++ b/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml @@ -0,0 +1,68 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +- name: V4M - custom config is valid path + stat: + path: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" + register: folder + tags: + - install + - uninstall + - update + +- name: V4M - custom config user dir check + fail: + msg: "{{ V4M_CUSTOM_CONFIG_USER_DIR }} does not exist." + when: + - not folder.stat.exists + tags: + - install + - uninstall + - update + +- name: cluster-monitoring - user dir find files # noqa: name[casing] + find: + paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" + file_type: file + register: user_dir_folder + tags: + - install + - update + +- name: cluster-monitoring - copy user dir files # noqa: name[casing] + copy: + src: "{{ item.path }}" + dest: "{{ tmpdir.path }}" + mode: "0660" + loop: "{{ user_dir_folder.files }}" + tags: + - install + - update + +- name: cluster-monitoring - find files # noqa: name[casing] + find: + paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}/monitoring/" + file_type: file + register: monitoring_folder + tags: + - install + - update + +- name: cluster-monitoring - copy user dir monitoring files # noqa: name[casing] + copy: + src: "{{ item.path }}" + dest: "{{ tmpdir.path }}/monitoring/" + mode: "0660" + loop: "{{ monitoring_folder.files }}" + tags: + - install + - update + +- name: cluster-monitoring - deploy # noqa: name[casing] + command: + cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/monitoring/bin/deploy_monitoring_cluster.sh" + environment: "{{ monitoring_env }}" + tags: + - install + - update diff --git a/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/tasks/cluster-monitoring.yaml b/roles/monitoring/tasks/cluster-monitoring.yaml new file mode 100644 index 00000000..c9ceffb1 --- /dev/null +++ b/roles/monitoring/tasks/cluster-monitoring.yaml @@ -0,0 +1,76 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +- name: cluster-monitoring - lookup existing credentials # noqa: name[casing] + kubernetes.core.k8s_info: + api_version: v1 + kind: Secret + name: v4m-grafana + namespace: "{{ V4M_MONITORING_NAMESPACE }}" + kubeconfig: "{{ KUBECONFIG }}" + register: monitoring_creds + tags: + - install + - update + +- name: Set password fact + set_fact: + V4M_GRAFANA_PASSWORD: "{{ V4M_GRAFANA_PASSWORD }}" + tags: + - install + +- name: cluster-monitoring - save credentials # noqa: name[casing] + set_fact: + V4M_GRAFANA_PASSWORD: "{{ monitoring_creds.resources[0].data['admin-password'] | b64decode }}" + tags: + - install + - update + when: + - (monitoring_creds.resources | length) == 1 + +- name: cluster-monitoring - output credentials # noqa: name[casing] + debug: + msg: + - "Grafana - username: admin, password: {{ V4M_GRAFANA_PASSWORD }}" + tags: + - install + +- name: cluster-monitoring - host-based user values # noqa: name[casing] + template: + src: host-based/user-values-prom-operator.yaml + dest: "{{ tmpdir.path }}/monitoring/user-values-prom-operator.yaml" + mode: "0660" + when: + - V4M_ROUTING|lower == 'host-based' + tags: + - install + - update + - uninstall + +- name: cluster-monitoring - path-based user values # noqa: name[casing] + when: + - V4M_ROUTING|lower == 'path-based' + tags: + - install + - update + - uninstall + block: + - name: Copy operator yaml + template: + src: path-based/user-values-prom-operator.yaml + dest: "{{ tmpdir.path }}/monitoring/user-values-prom-operator.yaml" + mode: "0660" + - name: Copy user env + template: + src: path-based/user.env + dest: "{{ tmpdir.path }}/monitoring/user.env" + mode: "0660" + +- name: cluster-monitoring - deploy # noqa: name[casing] + command: + cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/monitoring/bin/deploy_monitoring_cluster.sh" + environment: "{{ monitoring_env }}" + tags: + - install + - update diff --git a/roles/monitoring/tasks/cluster-monitoring.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-monitoring.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/tasks/main.yaml b/roles/monitoring/tasks/main.yaml new file mode 100644 index 00000000..9b9d5054 --- /dev/null +++ b/roles/monitoring/tasks/main.yaml @@ -0,0 +1,89 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +- name: V4M - ensure supported value for V4M_ROUTING + ansible.builtin.assert: + that: '{{ ["host-based", "path-based"] | intersect([V4M_ROUTING]) | count == 1 }}' + msg: > + Invalid V4M_ROUTING value: {{ V4M_ROUTING }} + + Supported values for V4M_ROUTING are `host-based` or `path-based` + when: + - V4M_CUSTOM_CONFIG_USER_DIR is none + tags: + - install + - uninstall + - update + +- name: V4M - download + git: + repo: https://github.com/sassoftware/viya4-monitoring-kubernetes.git + dest: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/" + version: "{{ V4M_VERSION }}" + tags: + - install + - uninstall + - update + +- name: V4M - add storageclass + kubernetes.core.k8s: + kubeconfig: "{{ KUBECONFIG }}" + state: present + src: "{{ role_path }}/files/{{ PROVIDER }}-storageclass.yaml" + when: + - PROVIDER is not none + - PROVIDER in ["azure","aws","gcp"] + - V4_CFG_MANAGE_STORAGE is not none + - V4_CFG_MANAGE_STORAGE|bool + tags: + - install + - update + +- name: V4M - cluster monitoring + include_tasks: cluster-monitoring-common.yaml + tags: + - cluster-monitoring + +- name: V4M - cluster logging + include_tasks: cluster-logging-common.yaml + tags: + - cluster-logging + +- name: V4M - check if storage class is being used + ansible.builtin.shell: | + kubectl --kubeconfig {{ KUBECONFIG }} get pv --output=custom-columns='PORT:.spec.storageClassName' | grep -o v4m | wc -l + register: sc_users + when: + - PROVIDER is not none + - PROVIDER in ["azure","aws","gcp"] + - V4_CFG_MANAGE_STORAGE is not none + - V4_CFG_MANAGE_STORAGE|bool + tags: + - uninstall + +- name: V4M - storageclass uninstall status + ansible.builtin.debug: + msg: "Persistent Volumes still referring to the v4m Storage Class, skipping deletion" + when: + - PROVIDER is not none + - PROVIDER in ["azure","aws","gcp"] + - V4_CFG_MANAGE_STORAGE is not none + - V4_CFG_MANAGE_STORAGE|bool + - sc_users.stdout | int > 0 + tags: + - uninstall + +- name: V4M - remove storageclass + kubernetes.core.k8s: + kubeconfig: "{{ KUBECONFIG }}" + state: absent + src: "{{ role_path }}/files/{{ PROVIDER }}-storageclass.yaml" + when: + - PROVIDER is not none + - PROVIDER in ["azure","aws","gcp"] + - V4_CFG_MANAGE_STORAGE is not none + - V4_CFG_MANAGE_STORAGE|bool + - sc_users.stdout | int == 0 + tags: + - uninstall diff --git a/roles/monitoring/tasks/main.yaml:Zone.Identifier b/roles/monitoring/tasks/main.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/tasks/viya-monitoring.yaml b/roles/monitoring/tasks/viya-monitoring.yaml new file mode 100644 index 00000000..4ee2006b --- /dev/null +++ b/roles/monitoring/tasks/viya-monitoring.yaml @@ -0,0 +1,31 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +- name: viya-monitoring - download viya4-monitoring-kubernetes # noqa: name[casing] + git: + repo: https://github.com/sassoftware/viya4-monitoring-kubernetes.git + dest: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/" + version: "{{ V4M_VERSION }}" + tags: + - install + - uninstall + - update + when: + - "'cluster-logging' not in ansible_run_tags" + - "'cluster-monitoring' not in ansible_run_tags" + +- name: viya-monitoring - deploy # noqa: name[casing] + command: + cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/monitoring/bin/deploy_monitoring_viya.sh" + environment: "{{ monitoring_env }}" + tags: + - install + - update + +- name: viya-monitoring - uninstall # noqa: name[casing] + command: + cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/monitoring/bin/remove_monitoring_viya.sh" + environment: "{{ monitoring_env }}" + tags: + - uninstall diff --git a/roles/monitoring/tasks/viya-monitoring.yaml:Zone.Identifier b/roles/monitoring/tasks/viya-monitoring.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/templates/host-based/user-values-opensearch.yaml b/roles/monitoring/templates/host-based/user-values-opensearch.yaml new file mode 100644 index 00000000..5c19a839 --- /dev/null +++ b/roles/monitoring/templates/host-based/user-values-opensearch.yaml @@ -0,0 +1,14 @@ +persistence: + storageClass: {{ V4M_STORAGECLASS }} +ingress: + ingressClassName: nginx + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + enabled: true + path: / + hosts: + - {{ V4M_ELASTICSEARCH_FQDN }} + tls: + - secretName: elasticsearch-ingress-tls-secret + hosts: + - {{ V4M_ELASTICSEARCH_FQDN }} diff --git a/roles/monitoring/templates/host-based/user-values-opensearch.yaml:Zone.Identifier b/roles/monitoring/templates/host-based/user-values-opensearch.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/templates/host-based/user-values-osd.yaml b/roles/monitoring/templates/host-based/user-values-osd.yaml new file mode 100644 index 00000000..3f7f66a0 --- /dev/null +++ b/roles/monitoring/templates/host-based/user-values-osd.yaml @@ -0,0 +1,16 @@ +ingress: + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + enabled: true + ingressClassName: nginx + hosts: + - host: {{ V4M_KIBANA_FQDN }} + paths: + - path: / + backend: + serviceName: v4m-osd + servicePort: 5601 + tls: + - secretName: kibana-ingress-tls-secret + hosts: + - {{ V4M_KIBANA_FQDN }} diff --git a/roles/monitoring/templates/host-based/user-values-osd.yaml:Zone.Identifier b/roles/monitoring/templates/host-based/user-values-osd.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/templates/host-based/user-values-prom-operator.yaml b/roles/monitoring/templates/host-based/user-values-prom-operator.yaml new file mode 100644 index 00000000..30297faf --- /dev/null +++ b/roles/monitoring/templates/host-based/user-values-prom-operator.yaml @@ -0,0 +1,81 @@ +prometheus: + # Disable default configuration of NodePort + service: + type: ClusterIP + nodePort: null + # Define host-based ingress + ingress: + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + enabled: true + ingressClassName: nginx + tls: + - hosts: + - {{ V4M_PROMETHEUS_FQDN }} + secretName: prometheus-ingress-tls-secret + hosts: + - {{ V4M_PROMETHEUS_FQDN }} + prometheusSpec: + externalUrl: "https://{{ V4M_PROMETHEUS_FQDN }}" + alertingEndpoints: + - name: v4m-alertmanager + port: http-web + scheme: https + tlsConfig: + insecureSkipVerify: true + storageSpec: + volumeClaimTemplate: + spec: + storageClassName: {{ V4M_STORAGECLASS }} + +alertmanager: + # Disable default configuration of NodePort + service: + type: ClusterIP + nodePort: null + # Define host-based ingress + ingress: + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + ingressClassName: nginx + enabled: true + tls: + - hosts: + - {{ V4M_ALERTMANAGER_FQDN }} + secretName: alertmanager-ingress-tls-secret + hosts: + - {{ V4M_ALERTMANAGER_FQDN }} + alertmanagerSpec: + externalUrl: "https://{{ V4M_ALERTMANAGER_FQDN }}" + storage: + volumeClaimTemplate: + spec: + storageClassName: {{ V4M_STORAGECLASS }} + +grafana: + # Disable default configuration of NodePort + service: + type: ClusterIP + nodePort: null + # Define host-based ingress + ingress: + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + enabled: true + ingressClassName: nginx + tls: + - hosts: + - {{ V4M_GRAFANA_FQDN }} + secretName: grafana-ingress-tls-secret + hosts: + - {{ V4M_GRAFANA_FQDN }} + path: / + "grafana.ini": + server: + domain: {{ V4M_BASE_DOMAIN }} + root_url: "https://{{ V4M_GRAFANA_FQDN }}" + serve_from_sub_path: false + testFramework: + enabled: false + persistence: + storageClassName: {{ V4M_STORAGECLASS }} diff --git a/roles/monitoring/templates/host-based/user-values-prom-operator.yaml:Zone.Identifier b/roles/monitoring/templates/host-based/user-values-prom-operator.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/templates/path-based/user-values-opensearch.yaml b/roles/monitoring/templates/path-based/user-values-opensearch.yaml new file mode 100644 index 00000000..8a174956 --- /dev/null +++ b/roles/monitoring/templates/path-based/user-values-opensearch.yaml @@ -0,0 +1,18 @@ +# OpenSearch ingress is optional +ingress: + ingressClassName: nginx + annotations: + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/search/(.*) /$1 break; + rewrite (?i)/search$ / break; + nginx.ingress.kubernetes.io/rewrite-target: /search + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + enabled: true + path: /search + pathType: Prefix + hosts: + - {{ V4M_BASE_DOMAIN }} + tls: + - secretName: elasticsearch-ingress-tls-secret + hosts: + - {{ V4M_BASE_DOMAIN }} diff --git a/roles/monitoring/templates/path-based/user-values-opensearch.yaml:Zone.Identifier b/roles/monitoring/templates/path-based/user-values-opensearch.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/templates/path-based/user-values-osd.yaml b/roles/monitoring/templates/path-based/user-values-osd.yaml new file mode 100644 index 00000000..1ebf4c25 --- /dev/null +++ b/roles/monitoring/templates/path-based/user-values-osd.yaml @@ -0,0 +1,45 @@ +extraEnvs: +# SERVER_BASEPATH needed for path-based ingress +- name: SERVER_BASEPATH + value: /dashboards +# OSD_ENABLE_TLS, Username & password need to be set here since helm *replaces* array values +- name: OSD_ENABLE_TLS + valueFrom: + secretKeyRef: + name: v4m-osd-tls-enabled + key: enable_tls +- name: OPENSEARCH_USERNAME + valueFrom: + secretKeyRef: + name: internal-user-kibanaserver + key: username +- name: OPENSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: internal-user-kibanaserver + key: password +service: + type: ClusterIP + nodePort: null +ingress: + ingressClassName: nginx + annotations: + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/configuration-snippet: | + rewrite (?i)/dashboards/(.*) /$1 break; + rewrite (?i)/dashboards$ / break; + nginx.ingress.kubernetes.io/rewrite-target: /dashboards + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + enabled: true + hosts: + - host: {{ V4M_BASE_DOMAIN }} + paths: + - path: /dashboards + backend: + serviceName: v4m-osd + servicePort: 5601 + + tls: + - secretName: kibana-ingress-tls-secret + hosts: + - {{ V4M_BASE_DOMAIN }} diff --git a/roles/monitoring/templates/path-based/user-values-osd.yaml:Zone.Identifier b/roles/monitoring/templates/path-based/user-values-osd.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/templates/path-based/user-values-prom-operator.yaml b/roles/monitoring/templates/path-based/user-values-prom-operator.yaml new file mode 100644 index 00000000..1b40f137 --- /dev/null +++ b/roles/monitoring/templates/path-based/user-values-prom-operator.yaml @@ -0,0 +1,81 @@ +grafana: + # Disable default configuration of NodePort + service: + type: ClusterIP + nodePort: null + ingress: + ingressClassName: nginx + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + enabled: true + tls: + - hosts: + - {{ V4M_BASE_DOMAIN }} + secretName: grafana-ingress-tls-secret + hosts: + - {{ V4M_BASE_DOMAIN }} + path: /grafana + pathType: Prefix + "grafana.ini": + server: + domain: {{ V4M_BASE_DOMAIN }} + root_url: "https://{{ V4M_BASE_DOMAIN }}/grafana" + serve_from_sub_path: true + +# Note that Prometheus and Alertmanager do not have any +# authentication configured by default, exposing an +# unauthenticated application without other restrictions +# in place is insecure. + +prometheus: + # Disable default configuration of NodePort + service: + type: ClusterIP + nodePort: null + # Define path-based ingress + ingress: + ingressClassName: nginx + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + enabled: true + tls: + - hosts: + - {{ V4M_BASE_DOMAIN }} + secretName: prometheus-ingress-tls-secret + hosts: + - {{ V4M_BASE_DOMAIN }} + path: /prometheus + pathType: Prefix + prometheusSpec: + routePrefix: /prometheus + externalUrl: "https://{{ V4M_BASE_DOMAIN }}/prometheus" + alertingEndpoints: + - name: v4m-alertmanager + port: http-web + pathPrefix: "/alertmanager" + scheme: https + tlsConfig: + insecureSkipVerify: true + +alertmanager: + # Disable default configuration of NodePort + service: + type: ClusterIP + nodePort: null + # Define path-based ingress + ingress: + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + ingressClassName: nginx + enabled: true + tls: + - hosts: + - {{ V4M_BASE_DOMAIN }} + secretName: alertmanager-ingress-tls-secret + hosts: + - {{ V4M_BASE_DOMAIN }} + path: /alertmanager + pathType: Prefix + alertmanagerSpec: + routePrefix: /alertmanager + externalUrl: "https://{{ V4M_BASE_DOMAIN }}/alertmanager" diff --git a/roles/monitoring/templates/path-based/user-values-prom-operator.yaml:Zone.Identifier b/roles/monitoring/templates/path-based/user-values-prom-operator.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/templates/path-based/user.env b/roles/monitoring/templates/path-based/user.env new file mode 100644 index 00000000..b66d0bbf --- /dev/null +++ b/roles/monitoring/templates/path-based/user.env @@ -0,0 +1 @@ +MON_TLS_PATH_INGRESS=true diff --git a/roles/monitoring/templates/path-based/user.env:Zone.Identifier b/roles/monitoring/templates/path-based/user.env:Zone.Identifier new file mode 100644 index 00000000..e69de29b diff --git a/roles/monitoring/vars/main.yaml b/roles/monitoring/vars/main.yaml new file mode 100644 index 00000000..df9e6af3 --- /dev/null +++ b/roles/monitoring/vars/main.yaml @@ -0,0 +1,36 @@ +# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +--- +logging_map: + secret: + internal-user-admin: V4M_KIBANA_PASSWORD + internal-user-logadm: V4M_KIBANA_LOGADM_PASSWORD + internal-user-kibanaserver: V4M_KIBANASERVER_PASSWORD + internal-user-logcollector: V4M_LOGCOLLECTOR_PASSWORD + internal-user-metricgetter: V4M_METRICGETTER_PASSWORD + env: + USER_DIR: "{{ tmpdir.path }}" + TLS_ENABLE: "true" + LOG_KB_TLS_ENABLE: "true" + KUBECONFIG: "{{ KUBECONFIG }}" + LOG_COLOR_ENABLE: false + NODE_PLACEMENT_ENABLE: "{{ V4M_NODE_PLACEMENT_ENABLE }}" + ES_ADMIN_PASSWD: "{{ V4M_KIBANA_PASSWORD }}" + LOG_LOGADM_PASSWD: "{{ V4M_KIBANA_LOGADM_PASSWORD }}" + ES_KIBANASERVER_PASSWD: "{{ V4M_KIBANASERVER_PASSWORD }}" + ES_LOGCOLLECTOR_PASSWD: "{{ V4M_LOGCOLLECTOR_PASSWORD }}" + ES_METRICGETTER_PASSWD: "{{ V4M_METRICGETTER_PASSWORD }}" + LOG_NS: "{{ V4M_LOGGING_NAMESPACE }}" + CERT_GENERATOR: "{{ 'openssl' if (V4_CFG_TLS_GENERATOR is not defined or V4_CFG_TLS_GENERATOR == None) else V4_CFG_TLS_GENERATOR }}" + +monitoring_env: + USER_DIR: "{{ tmpdir.path }}" + TLS_ENABLE: "true" + KUBECONFIG: "{{ KUBECONFIG }}" + LOG_COLOR_ENABLE: false + NODE_PLACEMENT_ENABLE: "{{ V4M_NODE_PLACEMENT_ENABLE }}" + GRAFANA_ADMIN_PASSWORD: "{{ V4M_GRAFANA_PASSWORD }}" + VIYA_NS: "{{ NAMESPACE }}" + MON_NS: "{{ V4M_MONITORING_NAMESPACE }}" + CERT_GENERATOR: "{{ 'openssl' if (V4_CFG_TLS_GENERATOR is not defined or V4_CFG_TLS_GENERATOR == None) else V4_CFG_TLS_GENERATOR }}" diff --git a/roles/monitoring/vars/main.yaml:Zone.Identifier b/roles/monitoring/vars/main.yaml:Zone.Identifier new file mode 100644 index 00000000..e69de29b From 2a579a511682542670d49d89938de18b2aed426e Mon Sep 17 00:00:00 2001 From: chjmil Date: Thu, 1 May 2025 14:52:13 -0400 Subject: [PATCH 5/8] chore: remove zone.identifiers Signed-off-by: chjmil --- roles/monitoring/defaults/main.yaml:Zone.Identifier | 0 roles/monitoring/files/aws-storageclass.yaml:Zone.Identifier | 0 roles/monitoring/files/azure-storageclass.yaml:Zone.Identifier | 0 roles/monitoring/files/gcp-storageclass.yaml:Zone.Identifier | 0 .../monitoring/tasks/cluster-logging-common.yaml:Zone.Identifier | 0 .../tasks/cluster-logging-custom-config.yaml:Zone.Identifier | 0 roles/monitoring/tasks/cluster-logging.yaml:Zone.Identifier | 0 .../tasks/cluster-monitoring-common.yaml:Zone.Identifier | 0 .../tasks/cluster-monitoring-custom-config.yaml:Zone.Identifier | 0 roles/monitoring/tasks/cluster-monitoring.yaml:Zone.Identifier | 0 roles/monitoring/tasks/main.yaml:Zone.Identifier | 0 roles/monitoring/tasks/viya-monitoring.yaml:Zone.Identifier | 0 roles/monitoring/vars/main.yaml:Zone.Identifier | 0 13 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 roles/monitoring/defaults/main.yaml:Zone.Identifier delete mode 100644 roles/monitoring/files/aws-storageclass.yaml:Zone.Identifier delete mode 100644 roles/monitoring/files/azure-storageclass.yaml:Zone.Identifier delete mode 100644 roles/monitoring/files/gcp-storageclass.yaml:Zone.Identifier delete mode 100644 roles/monitoring/tasks/cluster-logging-common.yaml:Zone.Identifier delete mode 100644 roles/monitoring/tasks/cluster-logging-custom-config.yaml:Zone.Identifier delete mode 100644 roles/monitoring/tasks/cluster-logging.yaml:Zone.Identifier delete mode 100644 roles/monitoring/tasks/cluster-monitoring-common.yaml:Zone.Identifier delete mode 100644 roles/monitoring/tasks/cluster-monitoring-custom-config.yaml:Zone.Identifier delete mode 100644 roles/monitoring/tasks/cluster-monitoring.yaml:Zone.Identifier delete mode 100644 roles/monitoring/tasks/main.yaml:Zone.Identifier delete mode 100644 roles/monitoring/tasks/viya-monitoring.yaml:Zone.Identifier delete mode 100644 roles/monitoring/vars/main.yaml:Zone.Identifier diff --git a/roles/monitoring/defaults/main.yaml:Zone.Identifier b/roles/monitoring/defaults/main.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/files/aws-storageclass.yaml:Zone.Identifier b/roles/monitoring/files/aws-storageclass.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/files/azure-storageclass.yaml:Zone.Identifier b/roles/monitoring/files/azure-storageclass.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/files/gcp-storageclass.yaml:Zone.Identifier b/roles/monitoring/files/gcp-storageclass.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/tasks/cluster-logging-common.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-logging-common.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/tasks/cluster-logging-custom-config.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-logging-custom-config.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/tasks/cluster-logging.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-logging.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/tasks/cluster-monitoring-common.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-monitoring-common.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/tasks/cluster-monitoring.yaml:Zone.Identifier b/roles/monitoring/tasks/cluster-monitoring.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/tasks/main.yaml:Zone.Identifier b/roles/monitoring/tasks/main.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/tasks/viya-monitoring.yaml:Zone.Identifier b/roles/monitoring/tasks/viya-monitoring.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/vars/main.yaml:Zone.Identifier b/roles/monitoring/vars/main.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 From 5410c0a7870292e26be1dee79436b0e233839708 Mon Sep 17 00:00:00 2001 From: chjmil Date: Thu, 1 May 2025 14:57:19 -0400 Subject: [PATCH 6/8] chore: remove more zone identifier Signed-off-by: chjmil --- .../host-based/user-values-opensearch.yaml:Zone.Identifier | 0 .../templates/host-based/user-values-osd.yaml:Zone.Identifier | 0 .../host-based/user-values-prom-operator.yaml:Zone.Identifier | 0 .../path-based/user-values-opensearch.yaml:Zone.Identifier | 0 .../templates/path-based/user-values-osd.yaml:Zone.Identifier | 0 .../path-based/user-values-prom-operator.yaml:Zone.Identifier | 0 roles/monitoring/templates/path-based/user.env:Zone.Identifier | 0 7 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 roles/monitoring/templates/host-based/user-values-opensearch.yaml:Zone.Identifier delete mode 100644 roles/monitoring/templates/host-based/user-values-osd.yaml:Zone.Identifier delete mode 100644 roles/monitoring/templates/host-based/user-values-prom-operator.yaml:Zone.Identifier delete mode 100644 roles/monitoring/templates/path-based/user-values-opensearch.yaml:Zone.Identifier delete mode 100644 roles/monitoring/templates/path-based/user-values-osd.yaml:Zone.Identifier delete mode 100644 roles/monitoring/templates/path-based/user-values-prom-operator.yaml:Zone.Identifier delete mode 100644 roles/monitoring/templates/path-based/user.env:Zone.Identifier diff --git a/roles/monitoring/templates/host-based/user-values-opensearch.yaml:Zone.Identifier b/roles/monitoring/templates/host-based/user-values-opensearch.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/templates/host-based/user-values-osd.yaml:Zone.Identifier b/roles/monitoring/templates/host-based/user-values-osd.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/templates/host-based/user-values-prom-operator.yaml:Zone.Identifier b/roles/monitoring/templates/host-based/user-values-prom-operator.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/templates/path-based/user-values-opensearch.yaml:Zone.Identifier b/roles/monitoring/templates/path-based/user-values-opensearch.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/templates/path-based/user-values-osd.yaml:Zone.Identifier b/roles/monitoring/templates/path-based/user-values-osd.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/templates/path-based/user-values-prom-operator.yaml:Zone.Identifier b/roles/monitoring/templates/path-based/user-values-prom-operator.yaml:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 diff --git a/roles/monitoring/templates/path-based/user.env:Zone.Identifier b/roles/monitoring/templates/path-based/user.env:Zone.Identifier deleted file mode 100644 index e69de29b..00000000 From f527cb429f030f26a35d67fed5201c3b3efb7f31 Mon Sep 17 00:00:00 2001 From: chjmil Date: Thu, 1 May 2025 15:50:26 -0400 Subject: [PATCH 7/8] feat: attempt at restoring V4M functionality Signed-off-by: chjmil --- roles/monitoring/defaults/main.yaml | 22 +-- roles/monitoring/files/aws-storageclass.yaml | 2 +- .../monitoring/files/azure-storageclass.yaml | 2 +- roles/monitoring/files/gcp-storageclass.yaml | 2 +- .../tasks/cluster-logging-common.yaml | 38 +++--- .../tasks/cluster-logging-custom-config.yaml | 122 ++++++++--------- roles/monitoring/tasks/cluster-logging.yaml | 20 +-- .../tasks/cluster-monitoring-common.yaml | 128 +++++++++--------- .../cluster-monitoring-custom-config.yaml | 118 ++++++++-------- roles/monitoring/tasks/main.yaml | 7 +- roles/monitoring/tasks/viya-monitoring.yaml | 2 +- roles/monitoring/vars/main.yaml | 14 +- 12 files changed, 242 insertions(+), 235 deletions(-) diff --git a/roles/monitoring/defaults/main.yaml b/roles/monitoring/defaults/main.yaml index 49f9c990..f7238a01 100644 --- a/roles/monitoring/defaults/main.yaml +++ b/roles/monitoring/defaults/main.yaml @@ -1,4 +1,4 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- @@ -14,7 +14,7 @@ V4M_BASE_DOMAIN: "{{ V4_CFG_BASE_DOMAIN }}" V4M_CERT: null V4M_KEY: null V4M_ROUTING: host-based -V4M_CUSTOM_CONFIG_USER_DIR: null +# V4M_CUSTOM_CONFIG_USER_DIR: null V4M_LOGGING_NAMESPACE: logging V4M_MONITORING_NAMESPACE: monitoring @@ -24,21 +24,21 @@ V4M_KIBANA_CERT: "{{ V4M_CERT }}" V4M_KIBANA_KEY: "{{ V4M_KEY }}" V4M_KIBANA_LOGADM_PASSWORD: "{{ V4M_KIBANA_PASSWORD if V4M_KIBANA_PASSWORD else lookup('password', '/dev/null chars=ascii_letters,digits') }}" V4M_KIBANA_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" -V4M_KIBANASERVER_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" -V4M_LOGCOLLECTOR_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" -V4M_METRICGETTER_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" +# V4M_KIBANASERVER_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" +# V4M_LOGCOLLECTOR_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" +# V4M_METRICGETTER_PASSWORD: "{{ lookup('password', '/dev/null chars=ascii_letters,digits') }}" V4M_ELASTICSEARCH_FQDN: search.{{ V4M_BASE_DOMAIN }} V4M_ELASTICSEARCH_CERT: "{{ V4M_CERT }}" V4M_ELASTICSEARCH_KEY: "{{ V4M_KEY }}" -V4M_PROMETHEUS_FQDN: prometheus.{{ V4M_BASE_DOMAIN }} -V4M_PROMETHEUS_CERT: "{{ V4M_CERT }}" -V4M_PROMETHEUS_KEY: "{{ V4M_KEY }}" +# V4M_PROMETHEUS_FQDN: prometheus.{{ V4M_BASE_DOMAIN }} +# V4M_PROMETHEUS_CERT: "{{ V4M_CERT }}" +# V4M_PROMETHEUS_KEY: "{{ V4M_KEY }}" -V4M_ALERTMANAGER_FQDN: alertmanager.{{ V4M_BASE_DOMAIN }} -V4M_ALERTMANAGER_CERT: "{{ V4M_CERT }}" -V4M_ALERTMANAGER_KEY: "{{ V4M_KEY }}" +# V4M_ALERTMANAGER_FQDN: alertmanager.{{ V4M_BASE_DOMAIN }} +# V4M_ALERTMANAGER_CERT: "{{ V4M_CERT }}" +# V4M_ALERTMANAGER_KEY: "{{ V4M_KEY }}" V4M_GRAFANA_FQDN: grafana.{{ V4M_BASE_DOMAIN }} V4M_GRAFANA_CERT: "{{ V4M_CERT }}" diff --git a/roles/monitoring/files/aws-storageclass.yaml b/roles/monitoring/files/aws-storageclass.yaml index f6723a5d..b117c899 100644 --- a/roles/monitoring/files/aws-storageclass.yaml +++ b/roles/monitoring/files/aws-storageclass.yaml @@ -1,4 +1,4 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- diff --git a/roles/monitoring/files/azure-storageclass.yaml b/roles/monitoring/files/azure-storageclass.yaml index f275573f..81f14961 100644 --- a/roles/monitoring/files/azure-storageclass.yaml +++ b/roles/monitoring/files/azure-storageclass.yaml @@ -1,4 +1,4 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- diff --git a/roles/monitoring/files/gcp-storageclass.yaml b/roles/monitoring/files/gcp-storageclass.yaml index 26fc0733..7d21fe7c 100644 --- a/roles/monitoring/files/gcp-storageclass.yaml +++ b/roles/monitoring/files/gcp-storageclass.yaml @@ -1,4 +1,4 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- diff --git a/roles/monitoring/tasks/cluster-logging-common.yaml b/roles/monitoring/tasks/cluster-logging-common.yaml index 71bd64f9..a26c4252 100644 --- a/roles/monitoring/tasks/cluster-logging-common.yaml +++ b/roles/monitoring/tasks/cluster-logging-common.yaml @@ -1,33 +1,33 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- -- name: cluster-logging - create userdir # noqa: name[casing] - file: - path: "{{ tmpdir.path }}/logging/" - state: directory - mode: "0770" - tags: - - install - - uninstall - - update +# - name: cluster-logging - create userdir # noqa: name[casing] +# file: +# path: "{{ tmpdir.path }}/logging/" +# state: directory +# mode: "0770" +# tags: +# - install +# - uninstall +# - update - name: V4M - cluster logging config vars include_tasks: cluster-logging.yaml - when: - - V4M_CUSTOM_CONFIG_USER_DIR is none + # when: + # - V4M_CUSTOM_CONFIG_USER_DIR is none tags: - install - uninstall - update -- name: V4M - cluster logging custom config user dir - include_tasks: cluster-logging-custom-config.yaml - when: - - V4M_CUSTOM_CONFIG_USER_DIR is not none - tags: - - install - - update +# - name: V4M - cluster logging custom config user dir +# include_tasks: cluster-logging-custom-config.yaml +# when: +# - V4M_CUSTOM_CONFIG_USER_DIR is not none +# tags: +# - install +# - update - name: cluster-logging - elasticsearch cert # noqa: name[casing] kubernetes.core.k8s: diff --git a/roles/monitoring/tasks/cluster-logging-custom-config.yaml b/roles/monitoring/tasks/cluster-logging-custom-config.yaml index 9937ac53..b0d888b0 100644 --- a/roles/monitoring/tasks/cluster-logging-custom-config.yaml +++ b/roles/monitoring/tasks/cluster-logging-custom-config.yaml @@ -1,68 +1,68 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# # Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# # SPDX-License-Identifier: Apache-2.0 ---- -- name: V4M - custom config is valid path - stat: - path: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" - register: folder - tags: - - install - - uninstall - - update +# --- +# - name: V4M - custom config is valid path +# stat: +# path: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" +# register: folder +# tags: +# - install +# - uninstall +# - update -- name: V4M - custom config user dir check - fail: - msg: "{{ V4M_CUSTOM_CONFIG_USER_DIR }} does not exist." - when: - - not folder.stat.exists - tags: - - install - - uninstall - - update +# - name: V4M - custom config user dir check +# fail: +# msg: "{{ V4M_CUSTOM_CONFIG_USER_DIR }} does not exist." +# when: +# - not folder.stat.exists +# tags: +# - install +# - uninstall +# - update -- name: cluster-logging - user dir find files # noqa: name[casing] - find: - paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" - file_type: file - register: user_dir_folder - tags: - - install - - update +# - name: cluster-logging - user dir find files # noqa: name[casing] +# find: +# paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" +# file_type: file +# register: user_dir_folder +# tags: +# - install +# - update -- name: cluster-logging - copy user dir files # noqa: name[casing] - copy: - src: "{{ item.path }}" - dest: "{{ tmpdir.path }}" - mode: "0660" - loop: "{{ user_dir_folder.files }}" - tags: - - install - - update +# - name: cluster-logging - copy user dir files # noqa: name[casing] +# copy: +# src: "{{ item.path }}" +# dest: "{{ tmpdir.path }}" +# mode: "0660" +# loop: "{{ user_dir_folder.files }}" +# tags: +# - install +# - update -- name: cluster-logging - find files # noqa: name[casing] - find: - paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}/logging/" - file_type: file - register: logging_folder - tags: - - install - - update +# - name: cluster-logging - find files # noqa: name[casing] +# find: +# paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}/logging/" +# file_type: file +# register: logging_folder +# tags: +# - install +# - update -- name: cluster-logging - copy user dir logging files # noqa: name[casing] - copy: - src: "{{ item.path }}" - dest: "{{ tmpdir.path }}/logging/" - mode: "0660" - loop: "{{ logging_folder.files }}" - tags: - - install - - update +# - name: cluster-logging - copy user dir logging files # noqa: name[casing] +# copy: +# src: "{{ item.path }}" +# dest: "{{ tmpdir.path }}/logging/" +# mode: "0660" +# loop: "{{ logging_folder.files }}" +# tags: +# - install +# - update -- name: cluster-logging - deploy # noqa: name[casing] - command: - cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/logging/bin/deploy_logging.sh" - environment: "{{ logging_map['env'] }}" - tags: - - install - - update +# - name: cluster-logging - deploy # noqa: name[casing] +# command: +# cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/logging/bin/deploy_logging.sh" +# environment: "{{ logging_map['env'] }}" +# tags: +# - install +# - update diff --git a/roles/monitoring/tasks/cluster-logging.yaml b/roles/monitoring/tasks/cluster-logging.yaml index 3d476206..af183a0c 100644 --- a/roles/monitoring/tasks/cluster-logging.yaml +++ b/roles/monitoring/tasks/cluster-logging.yaml @@ -1,4 +1,4 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- @@ -18,9 +18,9 @@ set_fact: V4M_KIBANA_PASSWORD: "{{ V4M_KIBANA_PASSWORD }}" V4M_KIBANA_LOGADM_PASSWORD: "{{ V4M_KIBANA_LOGADM_PASSWORD }}" - V4M_KIBANASERVER_PASSWORD: "{{ V4M_KIBANASERVER_PASSWORD }}" - V4M_LOGCOLLECTOR_PASSWORD: "{{ V4M_LOGCOLLECTOR_PASSWORD }}" - V4M_METRICGETTER_PASSWORD: "{{ V4M_METRICGETTER_PASSWORD }}" + # V4M_KIBANASERVER_PASSWORD: "{{ V4M_KIBANASERVER_PASSWORD }}" + # V4M_LOGCOLLECTOR_PASSWORD: "{{ V4M_LOGCOLLECTOR_PASSWORD }}" + # V4M_METRICGETTER_PASSWORD: "{{ V4M_METRICGETTER_PASSWORD }}" tags: - install @@ -49,7 +49,8 @@ dest: "{{ tmpdir.path }}/logging/user-values-opensearch.yaml" mode: "0660" when: - - V4M_ROUTING|lower == 'host-based' + # - V4M_ROUTING|lower == 'host-based' + - V4M_ROUTING|lower == 'host-based' || V4M_ROUTING|lower == 'host' tags: - install - update @@ -61,7 +62,8 @@ dest: "{{ tmpdir.path }}/logging/user-values-osd.yaml" mode: "0660" when: - - V4M_ROUTING|lower == 'host-based' + # - V4M_ROUTING|lower == 'host-based' + - V4M_ROUTING|lower == 'host-based' || V4M_ROUTING|lower == 'host' tags: - install - update @@ -73,7 +75,8 @@ dest: "{{ tmpdir.path }}/logging/user-values-opensearch.yaml" mode: "0660" when: - - V4M_ROUTING|lower == 'path-based' + - V4M_ROUTING|lower == 'path-based' || V4M_ROUTING|lower == 'path' + # - V4M_ROUTING|lower == 'path-based' tags: - install - update @@ -85,7 +88,8 @@ dest: "{{ tmpdir.path }}/logging/user-values-osd.yaml" mode: "0660" when: - - V4M_ROUTING|lower == 'path-based' + - V4M_ROUTING|lower == 'path-based' || V4M_ROUTING|lower == 'path' + # - V4M_ROUTING|lower == 'path-based' tags: - install - update diff --git a/roles/monitoring/tasks/cluster-monitoring-common.yaml b/roles/monitoring/tasks/cluster-monitoring-common.yaml index adcd19eb..fcb46b88 100644 --- a/roles/monitoring/tasks/cluster-monitoring-common.yaml +++ b/roles/monitoring/tasks/cluster-monitoring-common.yaml @@ -1,33 +1,35 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- -- name: cluster-monitoring - create userdir # noqa: name[casing] - file: - path: "{{ tmpdir.path }}/monitoring/" - state: directory - mode: "0770" - tags: - - install - - uninstall - - update +# remove because we are only using env vars to handle it +# - name: cluster-monitoring - create userdir # noqa: name[casing] +# file: +# path: "{{ tmpdir.path }}/monitoring/" +# state: directory +# mode: "0770" +# tags: +# - install +# - uninstall +# - update - name: V4M - cluster monitoring config vars include_tasks: cluster-monitoring.yaml - when: - - V4M_CUSTOM_CONFIG_USER_DIR is none + # when: + # - V4M_CUSTOM_CONFIG_USER_DIR is none tags: - install - uninstall - update -- name: V4M - cluster monitoring custom config user dir - include_tasks: cluster-monitoring-custom-config.yaml - when: - - V4M_CUSTOM_CONFIG_USER_DIR is not none - tags: - - install - - update +# remove because we are not supporting custom configs +# - name: V4M - cluster monitoring custom config user dir +# include_tasks: cluster-monitoring-custom-config.yaml +# when: +# - V4M_CUSTOM_CONFIG_USER_DIR is not none +# tags: +# - install +# - update - name: cluster-monitoring - grafana cert # noqa: name[casing] kubernetes.core.k8s: @@ -52,51 +54,51 @@ - install - update -- name: cluster-monitoring - prometheus cert # noqa: name[casing] - kubernetes.core.k8s: - kubeconfig: "{{ KUBECONFIG }}" - state: present - definition: - kind: Secret - apiVersion: v1 - metadata: - name: prometheus-ingress-tls-secret - namespace: "{{ V4M_MONITORING_NAMESPACE }}" - data: - tls.crt: >- - {{ lookup('file', V4M_PROMETHEUS_CERT) | b64encode }} - tls.key: >- - {{ lookup('file', V4M_PROMETHEUS_KEY) | b64encode }} - type: kubernetes.io/tls - when: - - V4M_PROMETHEUS_CERT is not none - - V4M_PROMETHEUS_KEY is not none - tags: - - install - - update +# - name: cluster-monitoring - prometheus cert # noqa: name[casing] +# kubernetes.core.k8s: +# kubeconfig: "{{ KUBECONFIG }}" +# state: present +# definition: +# kind: Secret +# apiVersion: v1 +# metadata: +# name: prometheus-ingress-tls-secret +# namespace: "{{ V4M_MONITORING_NAMESPACE }}" +# data: +# tls.crt: >- +# {{ lookup('file', V4M_PROMETHEUS_CERT) | b64encode }} +# tls.key: >- +# {{ lookup('file', V4M_PROMETHEUS_KEY) | b64encode }} +# type: kubernetes.io/tls +# when: +# - V4M_PROMETHEUS_CERT is not none +# - V4M_PROMETHEUS_KEY is not none +# tags: +# - install +# - update -- name: cluster-monitoring - alertmanager cert # noqa: name[casing] - kubernetes.core.k8s: - kubeconfig: "{{ KUBECONFIG }}" - state: present - definition: - kind: Secret - apiVersion: v1 - metadata: - name: alertmanager-ingress-tls-secret - namespace: "{{ V4M_MONITORING_NAMESPACE }}" - data: - tls.crt: >- - {{ lookup('file', V4M_ALERTMANAGER_CERT) | b64encode }} - tls.key: >- - {{ lookup('file', V4M_ALERTMANAGER_KEY) | b64encode }} - type: kubernetes.io/tls - when: - - V4M_ALERTMANAGER_CERT is not none - - V4M_ALERTMANAGER_KEY is not none - tags: - - install - - update +# - name: cluster-monitoring - alertmanager cert # noqa: name[casing] +# kubernetes.core.k8s: +# kubeconfig: "{{ KUBECONFIG }}" +# state: present +# definition: +# kind: Secret +# apiVersion: v1 +# metadata: +# name: alertmanager-ingress-tls-secret +# namespace: "{{ V4M_MONITORING_NAMESPACE }}" +# data: +# tls.crt: >- +# {{ lookup('file', V4M_ALERTMANAGER_CERT) | b64encode }} +# tls.key: >- +# {{ lookup('file', V4M_ALERTMANAGER_KEY) | b64encode }} +# type: kubernetes.io/tls +# when: +# - V4M_ALERTMANAGER_CERT is not none +# - V4M_ALERTMANAGER_KEY is not none +# tags: +# - install +# - update - name: cluster-monitoring - uninstall # noqa: name[casing] command: diff --git a/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml b/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml index 4a9134b6..7beac3fa 100644 --- a/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml +++ b/roles/monitoring/tasks/cluster-monitoring-custom-config.yaml @@ -1,68 +1,68 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- -- name: V4M - custom config is valid path - stat: - path: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" - register: folder - tags: - - install - - uninstall - - update +# - name: V4M - custom config is valid path +# stat: +# path: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" +# register: folder +# tags: +# - install +# - uninstall +# - update -- name: V4M - custom config user dir check - fail: - msg: "{{ V4M_CUSTOM_CONFIG_USER_DIR }} does not exist." - when: - - not folder.stat.exists - tags: - - install - - uninstall - - update +# - name: V4M - custom config user dir check +# fail: +# msg: "{{ V4M_CUSTOM_CONFIG_USER_DIR }} does not exist." +# when: +# - not folder.stat.exists +# tags: +# - install +# - uninstall +# - update -- name: cluster-monitoring - user dir find files # noqa: name[casing] - find: - paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" - file_type: file - register: user_dir_folder - tags: - - install - - update +# - name: cluster-monitoring - user dir find files # noqa: name[casing] +# find: +# paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}" +# file_type: file +# register: user_dir_folder +# tags: +# - install +# - update -- name: cluster-monitoring - copy user dir files # noqa: name[casing] - copy: - src: "{{ item.path }}" - dest: "{{ tmpdir.path }}" - mode: "0660" - loop: "{{ user_dir_folder.files }}" - tags: - - install - - update +# - name: cluster-monitoring - copy user dir files # noqa: name[casing] +# copy: +# src: "{{ item.path }}" +# dest: "{{ tmpdir.path }}" +# mode: "0660" +# loop: "{{ user_dir_folder.files }}" +# tags: +# - install +# - update -- name: cluster-monitoring - find files # noqa: name[casing] - find: - paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}/monitoring/" - file_type: file - register: monitoring_folder - tags: - - install - - update +# - name: cluster-monitoring - find files # noqa: name[casing] +# find: +# paths: "{{ V4M_CUSTOM_CONFIG_USER_DIR }}/monitoring/" +# file_type: file +# register: monitoring_folder +# tags: +# - install +# - update -- name: cluster-monitoring - copy user dir monitoring files # noqa: name[casing] - copy: - src: "{{ item.path }}" - dest: "{{ tmpdir.path }}/monitoring/" - mode: "0660" - loop: "{{ monitoring_folder.files }}" - tags: - - install - - update +# - name: cluster-monitoring - copy user dir monitoring files # noqa: name[casing] +# copy: +# src: "{{ item.path }}" +# dest: "{{ tmpdir.path }}/monitoring/" +# mode: "0660" +# loop: "{{ monitoring_folder.files }}" +# tags: +# - install +# - update -- name: cluster-monitoring - deploy # noqa: name[casing] - command: - cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/monitoring/bin/deploy_monitoring_cluster.sh" - environment: "{{ monitoring_env }}" - tags: - - install - - update +# - name: cluster-monitoring - deploy # noqa: name[casing] +# command: +# cmd: "{{ tmpdir.path }}/viya4-monitoring-kubernetes/monitoring/bin/deploy_monitoring_cluster.sh" +# environment: "{{ monitoring_env }}" +# tags: +# - install +# - update diff --git a/roles/monitoring/tasks/main.yaml b/roles/monitoring/tasks/main.yaml index 9b9d5054..6dae6e65 100644 --- a/roles/monitoring/tasks/main.yaml +++ b/roles/monitoring/tasks/main.yaml @@ -1,14 +1,15 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- +# TODO check with Greg on this. Is it ok to support both host-based and host? - name: V4M - ensure supported value for V4M_ROUTING ansible.builtin.assert: - that: '{{ ["host-based", "path-based"] | intersect([V4M_ROUTING]) | count == 1 }}' + that: '{{ ["host-based", "path-based", "host", "path"] | intersect([V4M_ROUTING]) | count == 1 }}' msg: > Invalid V4M_ROUTING value: {{ V4M_ROUTING }} - Supported values for V4M_ROUTING are `host-based` or `path-based` + Supported values for V4M_ROUTING are `host` or `path` when: - V4M_CUSTOM_CONFIG_USER_DIR is none tags: diff --git a/roles/monitoring/tasks/viya-monitoring.yaml b/roles/monitoring/tasks/viya-monitoring.yaml index 4ee2006b..9fd8b1e9 100644 --- a/roles/monitoring/tasks/viya-monitoring.yaml +++ b/roles/monitoring/tasks/viya-monitoring.yaml @@ -1,4 +1,4 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- diff --git a/roles/monitoring/vars/main.yaml b/roles/monitoring/vars/main.yaml index df9e6af3..8ae1ea5d 100644 --- a/roles/monitoring/vars/main.yaml +++ b/roles/monitoring/vars/main.yaml @@ -1,4 +1,4 @@ -# Copyright © 2020-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. +# Copyright © 2020-2025, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 --- @@ -6,9 +6,9 @@ logging_map: secret: internal-user-admin: V4M_KIBANA_PASSWORD internal-user-logadm: V4M_KIBANA_LOGADM_PASSWORD - internal-user-kibanaserver: V4M_KIBANASERVER_PASSWORD - internal-user-logcollector: V4M_LOGCOLLECTOR_PASSWORD - internal-user-metricgetter: V4M_METRICGETTER_PASSWORD + # internal-user-kibanaserver: V4M_KIBANASERVER_PASSWORD + # internal-user-logcollector: V4M_LOGCOLLECTOR_PASSWORD + # internal-user-metricgetter: V4M_METRICGETTER_PASSWORD env: USER_DIR: "{{ tmpdir.path }}" TLS_ENABLE: "true" @@ -18,9 +18,9 @@ logging_map: NODE_PLACEMENT_ENABLE: "{{ V4M_NODE_PLACEMENT_ENABLE }}" ES_ADMIN_PASSWD: "{{ V4M_KIBANA_PASSWORD }}" LOG_LOGADM_PASSWD: "{{ V4M_KIBANA_LOGADM_PASSWORD }}" - ES_KIBANASERVER_PASSWD: "{{ V4M_KIBANASERVER_PASSWORD }}" - ES_LOGCOLLECTOR_PASSWD: "{{ V4M_LOGCOLLECTOR_PASSWORD }}" - ES_METRICGETTER_PASSWD: "{{ V4M_METRICGETTER_PASSWORD }}" + # ES_KIBANASERVER_PASSWD: "{{ V4M_KIBANASERVER_PASSWORD }}" + # ES_LOGCOLLECTOR_PASSWD: "{{ V4M_LOGCOLLECTOR_PASSWORD }}" + # ES_METRICGETTER_PASSWD: "{{ V4M_METRICGETTER_PASSWORD }}" LOG_NS: "{{ V4M_LOGGING_NAMESPACE }}" CERT_GENERATOR: "{{ 'openssl' if (V4_CFG_TLS_GENERATOR is not defined or V4_CFG_TLS_GENERATOR == None) else V4_CFG_TLS_GENERATOR }}" From d01bda8798450e2cb29244e6e0cdad3276bb0600 Mon Sep 17 00:00:00 2001 From: chjmil Date: Thu, 1 May 2025 17:09:49 -0400 Subject: [PATCH 8/8] chore: remove unnecessary yaml files Signed-off-by: chjmil --- roles/monitoring/tasks/main.yaml | 7 +- .../host-based/user-values-opensearch.yaml | 14 ---- .../templates/host-based/user-values-osd.yaml | 16 ---- .../host-based/user-values-prom-operator.yaml | 81 ------------------- .../path-based/user-values-opensearch.yaml | 18 ----- .../templates/path-based/user-values-osd.yaml | 45 ----------- .../path-based/user-values-prom-operator.yaml | 81 ------------------- .../monitoring/templates/path-based/user.env | 1 - roles/monitoring/vars/main.yaml | 3 + 9 files changed, 6 insertions(+), 260 deletions(-) delete mode 100644 roles/monitoring/templates/host-based/user-values-opensearch.yaml delete mode 100644 roles/monitoring/templates/host-based/user-values-osd.yaml delete mode 100644 roles/monitoring/templates/host-based/user-values-prom-operator.yaml delete mode 100644 roles/monitoring/templates/path-based/user-values-opensearch.yaml delete mode 100644 roles/monitoring/templates/path-based/user-values-osd.yaml delete mode 100644 roles/monitoring/templates/path-based/user-values-prom-operator.yaml delete mode 100644 roles/monitoring/templates/path-based/user.env diff --git a/roles/monitoring/tasks/main.yaml b/roles/monitoring/tasks/main.yaml index 6dae6e65..4ef2cfb2 100644 --- a/roles/monitoring/tasks/main.yaml +++ b/roles/monitoring/tasks/main.yaml @@ -2,16 +2,15 @@ # SPDX-License-Identifier: Apache-2.0 --- -# TODO check with Greg on this. Is it ok to support both host-based and host? - name: V4M - ensure supported value for V4M_ROUTING ansible.builtin.assert: that: '{{ ["host-based", "path-based", "host", "path"] | intersect([V4M_ROUTING]) | count == 1 }}' msg: > Invalid V4M_ROUTING value: {{ V4M_ROUTING }} - Supported values for V4M_ROUTING are `host` or `path` - when: - - V4M_CUSTOM_CONFIG_USER_DIR is none + Supported values for V4M_ROUTING are `host-based` or `path-based` + # when: + # - V4M_CUSTOM_CONFIG_USER_DIR is none tags: - install - uninstall diff --git a/roles/monitoring/templates/host-based/user-values-opensearch.yaml b/roles/monitoring/templates/host-based/user-values-opensearch.yaml deleted file mode 100644 index 5c19a839..00000000 --- a/roles/monitoring/templates/host-based/user-values-opensearch.yaml +++ /dev/null @@ -1,14 +0,0 @@ -persistence: - storageClass: {{ V4M_STORAGECLASS }} -ingress: - ingressClassName: nginx - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - enabled: true - path: / - hosts: - - {{ V4M_ELASTICSEARCH_FQDN }} - tls: - - secretName: elasticsearch-ingress-tls-secret - hosts: - - {{ V4M_ELASTICSEARCH_FQDN }} diff --git a/roles/monitoring/templates/host-based/user-values-osd.yaml b/roles/monitoring/templates/host-based/user-values-osd.yaml deleted file mode 100644 index 3f7f66a0..00000000 --- a/roles/monitoring/templates/host-based/user-values-osd.yaml +++ /dev/null @@ -1,16 +0,0 @@ -ingress: - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - enabled: true - ingressClassName: nginx - hosts: - - host: {{ V4M_KIBANA_FQDN }} - paths: - - path: / - backend: - serviceName: v4m-osd - servicePort: 5601 - tls: - - secretName: kibana-ingress-tls-secret - hosts: - - {{ V4M_KIBANA_FQDN }} diff --git a/roles/monitoring/templates/host-based/user-values-prom-operator.yaml b/roles/monitoring/templates/host-based/user-values-prom-operator.yaml deleted file mode 100644 index 30297faf..00000000 --- a/roles/monitoring/templates/host-based/user-values-prom-operator.yaml +++ /dev/null @@ -1,81 +0,0 @@ -prometheus: - # Disable default configuration of NodePort - service: - type: ClusterIP - nodePort: null - # Define host-based ingress - ingress: - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - enabled: true - ingressClassName: nginx - tls: - - hosts: - - {{ V4M_PROMETHEUS_FQDN }} - secretName: prometheus-ingress-tls-secret - hosts: - - {{ V4M_PROMETHEUS_FQDN }} - prometheusSpec: - externalUrl: "https://{{ V4M_PROMETHEUS_FQDN }}" - alertingEndpoints: - - name: v4m-alertmanager - port: http-web - scheme: https - tlsConfig: - insecureSkipVerify: true - storageSpec: - volumeClaimTemplate: - spec: - storageClassName: {{ V4M_STORAGECLASS }} - -alertmanager: - # Disable default configuration of NodePort - service: - type: ClusterIP - nodePort: null - # Define host-based ingress - ingress: - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - ingressClassName: nginx - enabled: true - tls: - - hosts: - - {{ V4M_ALERTMANAGER_FQDN }} - secretName: alertmanager-ingress-tls-secret - hosts: - - {{ V4M_ALERTMANAGER_FQDN }} - alertmanagerSpec: - externalUrl: "https://{{ V4M_ALERTMANAGER_FQDN }}" - storage: - volumeClaimTemplate: - spec: - storageClassName: {{ V4M_STORAGECLASS }} - -grafana: - # Disable default configuration of NodePort - service: - type: ClusterIP - nodePort: null - # Define host-based ingress - ingress: - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - enabled: true - ingressClassName: nginx - tls: - - hosts: - - {{ V4M_GRAFANA_FQDN }} - secretName: grafana-ingress-tls-secret - hosts: - - {{ V4M_GRAFANA_FQDN }} - path: / - "grafana.ini": - server: - domain: {{ V4M_BASE_DOMAIN }} - root_url: "https://{{ V4M_GRAFANA_FQDN }}" - serve_from_sub_path: false - testFramework: - enabled: false - persistence: - storageClassName: {{ V4M_STORAGECLASS }} diff --git a/roles/monitoring/templates/path-based/user-values-opensearch.yaml b/roles/monitoring/templates/path-based/user-values-opensearch.yaml deleted file mode 100644 index 8a174956..00000000 --- a/roles/monitoring/templates/path-based/user-values-opensearch.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# OpenSearch ingress is optional -ingress: - ingressClassName: nginx - annotations: - nginx.ingress.kubernetes.io/configuration-snippet: | - rewrite (?i)/search/(.*) /$1 break; - rewrite (?i)/search$ / break; - nginx.ingress.kubernetes.io/rewrite-target: /search - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - enabled: true - path: /search - pathType: Prefix - hosts: - - {{ V4M_BASE_DOMAIN }} - tls: - - secretName: elasticsearch-ingress-tls-secret - hosts: - - {{ V4M_BASE_DOMAIN }} diff --git a/roles/monitoring/templates/path-based/user-values-osd.yaml b/roles/monitoring/templates/path-based/user-values-osd.yaml deleted file mode 100644 index 1ebf4c25..00000000 --- a/roles/monitoring/templates/path-based/user-values-osd.yaml +++ /dev/null @@ -1,45 +0,0 @@ -extraEnvs: -# SERVER_BASEPATH needed for path-based ingress -- name: SERVER_BASEPATH - value: /dashboards -# OSD_ENABLE_TLS, Username & password need to be set here since helm *replaces* array values -- name: OSD_ENABLE_TLS - valueFrom: - secretKeyRef: - name: v4m-osd-tls-enabled - key: enable_tls -- name: OPENSEARCH_USERNAME - valueFrom: - secretKeyRef: - name: internal-user-kibanaserver - key: username -- name: OPENSEARCH_PASSWORD - valueFrom: - secretKeyRef: - name: internal-user-kibanaserver - key: password -service: - type: ClusterIP - nodePort: null -ingress: - ingressClassName: nginx - annotations: - nginx.ingress.kubernetes.io/affinity: "cookie" - nginx.ingress.kubernetes.io/configuration-snippet: | - rewrite (?i)/dashboards/(.*) /$1 break; - rewrite (?i)/dashboards$ / break; - nginx.ingress.kubernetes.io/rewrite-target: /dashboards - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - enabled: true - hosts: - - host: {{ V4M_BASE_DOMAIN }} - paths: - - path: /dashboards - backend: - serviceName: v4m-osd - servicePort: 5601 - - tls: - - secretName: kibana-ingress-tls-secret - hosts: - - {{ V4M_BASE_DOMAIN }} diff --git a/roles/monitoring/templates/path-based/user-values-prom-operator.yaml b/roles/monitoring/templates/path-based/user-values-prom-operator.yaml deleted file mode 100644 index 1b40f137..00000000 --- a/roles/monitoring/templates/path-based/user-values-prom-operator.yaml +++ /dev/null @@ -1,81 +0,0 @@ -grafana: - # Disable default configuration of NodePort - service: - type: ClusterIP - nodePort: null - ingress: - ingressClassName: nginx - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - enabled: true - tls: - - hosts: - - {{ V4M_BASE_DOMAIN }} - secretName: grafana-ingress-tls-secret - hosts: - - {{ V4M_BASE_DOMAIN }} - path: /grafana - pathType: Prefix - "grafana.ini": - server: - domain: {{ V4M_BASE_DOMAIN }} - root_url: "https://{{ V4M_BASE_DOMAIN }}/grafana" - serve_from_sub_path: true - -# Note that Prometheus and Alertmanager do not have any -# authentication configured by default, exposing an -# unauthenticated application without other restrictions -# in place is insecure. - -prometheus: - # Disable default configuration of NodePort - service: - type: ClusterIP - nodePort: null - # Define path-based ingress - ingress: - ingressClassName: nginx - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - enabled: true - tls: - - hosts: - - {{ V4M_BASE_DOMAIN }} - secretName: prometheus-ingress-tls-secret - hosts: - - {{ V4M_BASE_DOMAIN }} - path: /prometheus - pathType: Prefix - prometheusSpec: - routePrefix: /prometheus - externalUrl: "https://{{ V4M_BASE_DOMAIN }}/prometheus" - alertingEndpoints: - - name: v4m-alertmanager - port: http-web - pathPrefix: "/alertmanager" - scheme: https - tlsConfig: - insecureSkipVerify: true - -alertmanager: - # Disable default configuration of NodePort - service: - type: ClusterIP - nodePort: null - # Define path-based ingress - ingress: - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - ingressClassName: nginx - enabled: true - tls: - - hosts: - - {{ V4M_BASE_DOMAIN }} - secretName: alertmanager-ingress-tls-secret - hosts: - - {{ V4M_BASE_DOMAIN }} - path: /alertmanager - pathType: Prefix - alertmanagerSpec: - routePrefix: /alertmanager - externalUrl: "https://{{ V4M_BASE_DOMAIN }}/alertmanager" diff --git a/roles/monitoring/templates/path-based/user.env b/roles/monitoring/templates/path-based/user.env deleted file mode 100644 index b66d0bbf..00000000 --- a/roles/monitoring/templates/path-based/user.env +++ /dev/null @@ -1 +0,0 @@ -MON_TLS_PATH_INGRESS=true diff --git a/roles/monitoring/vars/main.yaml b/roles/monitoring/vars/main.yaml index 8ae1ea5d..c144dce4 100644 --- a/roles/monitoring/vars/main.yaml +++ b/roles/monitoring/vars/main.yaml @@ -34,3 +34,6 @@ monitoring_env: VIYA_NS: "{{ NAMESPACE }}" MON_NS: "{{ V4M_MONITORING_NAMESPACE }}" CERT_GENERATOR: "{{ 'openssl' if (V4_CFG_TLS_GENERATOR is not defined or V4_CFG_TLS_GENERATOR == None) else V4_CFG_TLS_GENERATOR }}" + # TODO Make sure to only set this in path based + MON_TLS_PATH_INGRESS: true + ROUTING: "{{ V4M_ROUTING }}"