From 83f3b0a00c6181f86417cf4ddce26f956a960878 Mon Sep 17 00:00:00 2001 From: Dhruv Shah Date: Fri, 10 Oct 2025 12:44:29 +0530 Subject: [PATCH] chore(deps): Migrate routing processor to routing connector for metrics collection Usage of routing connector is feature flagged and will be kept off/false by default. After sufficient notices and release notes, the feature flag to use routing connector in metrics collection will be turned on. Routing processor is currently removed from the otelcol-contrib version upstream, but to handle the migration better, sumo is still keeping routing processor in its version of otel col. Signed-off-by: Dhruv Shah --- .changelog/4025.added.txt | 1 + ci/check_configuration_keys.py | 1 + deploy/helm/sumologic/README.md | 1 + .../conf/metrics/otelcol/config.yaml | 66 +++ .../conf/metrics/otelcol/connectors.yaml | 82 ++++ .../conf/metrics/otelcol/pipeline.yaml | 7 +- .../conf/metrics/otelcol/processors.yaml | 2 +- deploy/helm/sumologic/values.yaml | 15 +- tests/helm/metrics_test.go | 76 ++++ .../custom_routing_connector.input.yaml | 13 + .../custom_routing_connector.output.yaml | 378 ++++++++++++++++ ...gic_mock_http_routing_connector.input.yaml | 12 + ...ic_mock_http_routing_connector.output.yaml | 420 ++++++++++++++++++ 13 files changed, 1067 insertions(+), 7 deletions(-) create mode 100644 .changelog/4025.added.txt create mode 100644 deploy/helm/sumologic/conf/metrics/otelcol/connectors.yaml create mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/custom_routing_connector.input.yaml create mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/custom_routing_connector.output.yaml create mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http_routing_connector.input.yaml create mode 100644 tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http_routing_connector.output.yaml diff --git a/.changelog/4025.added.txt b/.changelog/4025.added.txt new file mode 100644 index 0000000000..514cf5f5ce --- /dev/null +++ b/.changelog/4025.added.txt @@ -0,0 +1 @@ +chore(deps): Migrate routing processor to routing connector for metrics collection \ No newline at end of file diff --git a/ci/check_configuration_keys.py b/ci/check_configuration_keys.py index b29f35cc6c..ba30b9323d 100755 --- a/ci/check_configuration_keys.py +++ b/ci/check_configuration_keys.py @@ -47,6 +47,7 @@ 'sumologic.setup.debug', 'metrics-server.image.pullSecrets', 'sumologic.events.sourceCategory', + 'sumologic.metrics.useRoutingConnectors', } def main(values_path: str, readme_path: str, full_diff=False) -> None: diff --git a/deploy/helm/sumologic/README.md b/deploy/helm/sumologic/README.md index 60a136401a..f36d39a344 100644 --- a/deploy/helm/sumologic/README.md +++ b/deploy/helm/sumologic/README.md @@ -151,6 +151,7 @@ The following table lists the configurable parameters of the Sumo Logic chart an | `sumologic.metrics.dropHistogramBuckets` | Drop buckets from histogram and summary metrics, leaving only the sum and count components. | `true` | | `sumologic.metrics.allowHistogramRegex` | Allowlist for Histogram metrics, including the buckets | `"^$"` | | `sumologic.metrics.sourceType` | The type of the Sumo Logic source being used for metrics ingestion. Can be `http` or `otlp`. | `otlp` | +| `sumologic.metrics.useRoutingConnectors` | Enable to use routing connectors. | `false` | | `sumologic.traces.enabled` | Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_ | `true` | | `sumologic.traces.spans_per_request` | Maximum number of spans sent in single batch | `100` | | `sumologic.traces.sourceType` | The type of the Sumo Logic source being used for traces ingestion. Can be `http` for [HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/traces/) or `otlp` for [OTLP/HTTP Source](https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/otlp/). | `otlp` | diff --git a/deploy/helm/sumologic/conf/metrics/otelcol/config.yaml b/deploy/helm/sumologic/conf/metrics/otelcol/config.yaml index 2669922d31..22c357ab32 100644 --- a/deploy/helm/sumologic/conf/metrics/otelcol/config.yaml +++ b/deploy/helm/sumologic/conf/metrics/otelcol/config.yaml @@ -1,3 +1,8 @@ +{{- if and (.Values.sumologic.metrics.useRoutingConnectors) (eq .Values.sumologic.metrics.sourceType "http") }} +connectors: +{{ tpl (.Files.Get "conf/metrics/otelcol/connectors.yaml") . | indent 2 }} +{{- end}} + exporters: {{ tpl (.Files.Get "conf/metrics/otelcol/exporters.yaml") . | indent 2 }} @@ -53,6 +58,67 @@ service: pipelines: metrics: {{ tpl (.Files.Get "conf/metrics/otelcol/pipeline.yaml") . | indent 6 }} +{{- if and (.Values.sumologic.metrics.useRoutingConnectors) (eq .Values.sumologic.metrics.sourceType "http") }} + metrics/sumologic/default: + receivers: + - routing/default + exporters: + - sumologic/default +{{- if eq .Values.debug.metrics.metadata.print true }} + metrics/debug: + receivers: + - routing/default + exporters: + - debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + metrics/sumologic/sumologic-mock-default: + receivers: + - routing/default + exporters: + - sumologic/sumologic-mock-default + metrics/sumologic/sumologic-mock-http: + receivers: + - routing/default + exporters: + - sumologic/sumologic-mock-http +{{- end }} + metrics/apiserver: + receivers: + - routing/default + exporters: + - sumologic/apiserver + metrics/control_plane: + receivers: + - routing/default + exporters: + - sumologic/control_plane + metrics/controller: + receivers: + - routing/default + exporters: + - sumologic/controller + metrics/kubelet: + receivers: + - routing/default + exporters: + - sumologic/kubelet + metrics/node: + receivers: + - routing/default + exporters: + - sumologic/node + metrics/scheduler: + receivers: + - routing/default + exporters: + - sumologic/scheduler + metrics/state: + receivers: + - routing/default + exporters: + - sumologic/state +{{- end }} telemetry: logs: level: {{ .Values.metadata.metrics.logLevel }} diff --git a/deploy/helm/sumologic/conf/metrics/otelcol/connectors.yaml b/deploy/helm/sumologic/conf/metrics/otelcol/connectors.yaml new file mode 100644 index 0000000000..f9e59f2b05 --- /dev/null +++ b/deploy/helm/sumologic/conf/metrics/otelcol/connectors.yaml @@ -0,0 +1,82 @@ +routing/default: + default_pipelines: + - metrics/sumologic/default +{{- if eq .Values.debug.metrics.metadata.print true }} + - metrics/debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + - metrics/sumologic/sumologic-mock-default +{{- end }} + table: + - statement: route() where resource.attributes["job"] == "apiserver" + pipelines: + - metrics/apiserver +{{- if eq .Values.debug.metrics.metadata.print true }} + - metrics/debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + - metrics/sumologic/sumologic-mock-http +{{- end }} + - statement: route() where resource.attributes["job"] == "coredns" + pipelines: + - metrics/control_plane +{{- if eq .Values.debug.metrics.metadata.print true }} + - metrics/debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + - metrics/sumologic/sumologic-mock-http +{{- end }} + - statement: route() where resource.attributes["job"] == "kube-etcd" + pipelines: + - metrics/control_plane +{{- if eq .Values.debug.metrics.metadata.print true }} + - metrics/debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + - metrics/sumologic/sumologic-mock-http +{{- end }} + - statement: route() where resource.attributes["job"] == "kube-controller-manager" + pipelines: + - metrics/controller +{{- if eq .Values.debug.metrics.metadata.print true }} + - metrics/debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + - metrics/sumologic/sumologic-mock-http +{{- end }} + - statement: route() where resource.attributes["job"] == "kubelet" + pipelines: + - metrics/kubelet +{{- if eq .Values.debug.metrics.metadata.print true }} + - metrics/debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + - metrics/sumologic/sumologic-mock-http +{{- end }} + - statement: route() where resource.attributes["job"] == "node-exporter" + pipelines: + - metrics/node +{{- if eq .Values.debug.metrics.metadata.print true }} + - metrics/debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + - metrics/sumologic/sumologic-mock-http +{{- end }} + - statement: route() where resource.attributes["job"] == "kube-scheduler" + pipelines: + - metrics/scheduler +{{- if eq .Values.debug.metrics.metadata.print true }} + - metrics/debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + - metrics/sumologic/sumologic-mock-http +{{- end }} + - statement: route() where resource.attributes["job"] == "kube-state-metrics" + pipelines: + - metrics/state +{{- if eq .Values.debug.metrics.metadata.print true }} + - metrics/debug +{{- end }} +{{- if eq (include "sumologic-mock.forward-metrics-metadata" .) "true" }} + - metrics/sumologic/sumologic-mock-http +{{- end }} \ No newline at end of file diff --git a/deploy/helm/sumologic/conf/metrics/otelcol/pipeline.yaml b/deploy/helm/sumologic/conf/metrics/otelcol/pipeline.yaml index 8916effaf9..8ee1f1f5b3 100644 --- a/deploy/helm/sumologic/conf/metrics/otelcol/pipeline.yaml +++ b/deploy/helm/sumologic/conf/metrics/otelcol/pipeline.yaml @@ -1,4 +1,7 @@ exporters: +{{- if and (.Values.sumologic.metrics.useRoutingConnectors) (eq .Values.sumologic.metrics.sourceType "http") }} + - routing/default +{{- else }} - sumologic/default {{- if eq .Values.debug.metrics.metadata.print true }} - debug @@ -18,6 +21,8 @@ exporters: - sumologic/sumologic-mock-http {{- end }} {{- end }} +{{- end }} + processors: - memory_limiter - metricstransform @@ -45,7 +50,7 @@ processors: - filter/app_metrics {{- end }} - batch -{{- if eq .Values.sumologic.metrics.sourceType "http" }} +{{- if and (not (.Values.sumologic.metrics.useRoutingConnectors | default false)) (eq .Values.sumologic.metrics.sourceType "http") }} - routing {{- end }} receivers: diff --git a/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml b/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml index 5cb6d29fb2..324e2fc45c 100644 --- a/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml +++ b/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml @@ -165,7 +165,7 @@ resource/remove_k8s_pod_pod_name: - action: delete key: k8s.pod.pod_name -{{- if eq .Values.sumologic.metrics.sourceType "http" }} +{{- if and (not (.Values.sumologic.metrics.useRoutingConnectors | default false)) (eq .Values.sumologic.metrics.sourceType "http") }} routing: default_exporters: - sumologic/default diff --git a/deploy/helm/sumologic/values.yaml b/deploy/helm/sumologic/values.yaml index c5cad9d1dc..774da2582f 100644 --- a/deploy/helm/sumologic/values.yaml +++ b/deploy/helm/sumologic/values.yaml @@ -462,11 +462,14 @@ sumologic: fallbackExporters: [] table: [] ## - - ## ## exporter is name of the exporter - ## exporter: sumologic/otlp - ## ## statement is an OTTL condition which defines what data should be send to the exporter - ## ## see routing processor documentation for more details: - ## ## https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/aee4b75100530bce7edbf736fbcf76ac4f6ced6d/processor/routingprocessor/README.md#tech-preview-opentelemetry-transformation-language-statements-as-routing-conditions + ## ## exporters is an array of the exporter + ## exporters: + ## - sumologic/otlp + ## ## statement is an OTTL condition which defines what data should be sent to the exporters + ## ## see routing connector documentation for more details: + ## ## https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/aee4b75100530bce7edbf736fbcf76ac4f6ced6d/connector/routingconnector + ## routing connector expects you to define a pipeline, but here the user can just mention the exporter. + ## The helm deployment would convert exporters into correct pipelines and configure the connectors. ## statement: "" ## Fields to be created at Sumo Logic to ensure logs are tagged with @@ -764,6 +767,8 @@ sumologic: ## The type of source we send to in Sumo. The possible values are http and otlp. ## Consult the documentation for more information. sourceType: otlp + ## In order to use routing connectors, please enable this flag. + # useRoutingConnectors: true ### Traces configuration ## Set the enabled flag to false to disable traces from instrumentation ingestion. diff --git a/tests/helm/metrics_test.go b/tests/helm/metrics_test.go index 6555550ab2..3067d2dfd6 100644 --- a/tests/helm/metrics_test.go +++ b/tests/helm/metrics_test.go @@ -155,6 +155,7 @@ func TestMetadataSourceTypeOTLP(t *testing.T) { Rest map[string]interface{} `yaml:",inline"` } Processors map[string]interface{} + Connectors map[string]interface{} Service struct { Pipelines struct { Metrics struct { @@ -179,6 +180,7 @@ sumologic: assert.Equal(t, otelConfig.Exporters.Default.Endpoint, "${SUMO_ENDPOINT_DEFAULT_OTLP_METRICS_SOURCE}") assert.Len(t, otelConfig.Exporters.Rest, 0) assert.NotContains(t, otelConfig.Processors, "routing") + assert.NotContains(t, otelConfig.Connectors, "routing") assert.NotContains(t, otelConfig.Service.Pipelines.Metrics.Processors, "routing") assert.Equal(t, otelConfig.Service.Pipelines.Metrics.Exporters, []string{"sumologic/default"}) } @@ -187,18 +189,32 @@ func TestMetadataSourceTypeHTTP(t *testing.T) { t.Parallel() templatePath := "templates/metrics/otelcol/configmap.yaml" + type MetricsPipeline struct { + Receivers []string `yaml:"receivers,omitempty"` + Exporters []string `yaml:"exporters,omitempty"` + } + type OtelConfig struct { Exporters map[string]struct { MetricFormat string `yaml:"metric_format"` Endpoint string } `yaml:"exporters"` Processors map[string]interface{} + Connectors map[string]interface{} Service struct { Pipelines struct { Metrics struct { Processors []string `yaml:"processors"` Exporters []string `yaml:"exporters"` } + MetricsApiserver MetricsPipeline `yaml:"metrics/apiserver"` + MetricsControlPlane MetricsPipeline `yaml:"metrics/control_plane"` + MetricsController MetricsPipeline `yaml:"metrics/controller"` + MetricsKubelet MetricsPipeline `yaml:"metrics/kubelet"` + MetricsNode MetricsPipeline `yaml:"metrics/node"` + MetricsScheduler MetricsPipeline `yaml:"metrics/scheduler"` + MetricsState MetricsPipeline `yaml:"metrics/state"` + MetricsSumologicDefault MetricsPipeline `yaml:"metrics/sumologic/default"` } } } @@ -208,6 +224,7 @@ func TestMetadataSourceTypeHTTP(t *testing.T) { sumologic: metrics: sourceType: http + useRoutingConnectors: true ` otelConfigYaml := GetOtelConfigYaml(t, valuesYaml, templatePath) err := yaml.Unmarshal([]byte(otelConfigYaml), &otelConfig) @@ -217,6 +234,65 @@ sumologic: defaultExporter := otelConfig.Exporters["sumologic/default"] assert.Equal(t, "prometheus", defaultExporter.MetricFormat) assert.Equal(t, "${SUMO_ENDPOINT_DEFAULT_METRICS_SOURCE}", defaultExporter.Endpoint) + assert.Contains(t, otelConfig.Connectors, "routing/default") + assert.NotContains(t, otelConfig.Service.Pipelines.Metrics.Processors, "routing") + assert.Equal(t, otelConfig.Service.Pipelines.Metrics.Exporters, []string{"routing/default"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsApiserver.Receivers, []string{"routing/default"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsControlPlane.Receivers, []string{"routing/default"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsController.Receivers, []string{"routing/default"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsKubelet.Receivers, []string{"routing/default"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsNode.Receivers, []string{"routing/default"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsScheduler.Receivers, []string{"routing/default"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsState.Receivers, []string{"routing/default"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsSumologicDefault.Receivers, []string{"routing/default"}) + + assert.Equal(t, otelConfig.Service.Pipelines.MetricsApiserver.Exporters, []string{"sumologic/apiserver"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsControlPlane.Exporters, []string{"sumologic/control_plane"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsController.Exporters, []string{"sumologic/controller"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsKubelet.Exporters, []string{"sumologic/kubelet"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsNode.Exporters, []string{"sumologic/node"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsScheduler.Exporters, []string{"sumologic/scheduler"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsState.Exporters, []string{"sumologic/state"}) + assert.Equal(t, otelConfig.Service.Pipelines.MetricsSumologicDefault.Exporters, []string{"sumologic/default"}) + + // useRoutingConnectors: false which means routing processor should be used. + valuesYaml = ` +sumologic: + metrics: + sourceType: http + useRoutingConnectors: false +` + otelConfigYaml = GetOtelConfigYaml(t, valuesYaml, templatePath) + err = yaml.Unmarshal([]byte(otelConfigYaml), &otelConfig) + require.NoError(t, err) + + assert.Contains(t, otelConfig.Processors, "routing") + assert.Contains(t, otelConfig.Service.Pipelines.Metrics.Processors, "routing") + assert.Equal( + t, + []string{ + "sumologic/default", + "sumologic/apiserver", + "sumologic/control_plane", + "sumologic/controller", + "sumologic/kubelet", + "sumologic/node", + "sumologic/scheduler", + "sumologic/state", + }, + otelConfig.Service.Pipelines.Metrics.Exporters, + ) + + // useRoutingConnectors flag not present, which means routing processor should be used. + valuesYaml = ` +sumologic: + metrics: + sourceType: http +` + otelConfigYaml = GetOtelConfigYaml(t, valuesYaml, templatePath) + err = yaml.Unmarshal([]byte(otelConfigYaml), &otelConfig) + require.NoError(t, err) + assert.Contains(t, otelConfig.Processors, "routing") assert.Contains(t, otelConfig.Service.Pipelines.Metrics.Processors, "routing") assert.Equal( diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom_routing_connector.input.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom_routing_connector.input.yaml new file mode 100644 index 0000000000..b7db033410 --- /dev/null +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom_routing_connector.input.yaml @@ -0,0 +1,13 @@ +sumologic: + ipv6mode: true + metrics: + sourceType: http + useRoutingConnectors: true +metadata: + metrics: + autoscaling: + enabled: false +debug: + metrics: + metadata: + print: true diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom_routing_connector.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom_routing_connector.output.yaml new file mode 100644 index 0000000000..7b8b44378e --- /dev/null +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom_routing_connector.output.yaml @@ -0,0 +1,378 @@ +--- +# Source: sumologic/templates/metrics/otelcol/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: RELEASE-NAME-sumologic-otelcol-metrics + namespace: sumologic + labels: + app: RELEASE-NAME-sumologic-otelcol-metrics + chart: "sumologic-%CURRENT_CHART_VERSION%" + release: "RELEASE-NAME" + heritage: "Helm" +data: + config.yaml: | + connectors: + routing/default: + default_pipelines: + - metrics/sumologic/default + - metrics/debug + table: + - pipelines: + - metrics/apiserver + - metrics/debug + statement: route() where resource.attributes["job"] == "apiserver" + - pipelines: + - metrics/control_plane + - metrics/debug + statement: route() where resource.attributes["job"] == "coredns" + - pipelines: + - metrics/control_plane + - metrics/debug + statement: route() where resource.attributes["job"] == "kube-etcd" + - pipelines: + - metrics/controller + - metrics/debug + statement: route() where resource.attributes["job"] == "kube-controller-manager" + - pipelines: + - metrics/kubelet + - metrics/debug + statement: route() where resource.attributes["job"] == "kubelet" + - pipelines: + - metrics/node + - metrics/debug + statement: route() where resource.attributes["job"] == "node-exporter" + - pipelines: + - metrics/scheduler + - metrics/debug + statement: route() where resource.attributes["job"] == "kube-scheduler" + - pipelines: + - metrics/state + - metrics/debug + statement: route() where resource.attributes["job"] == "kube-state-metrics" + exporters: + debug: + verbosity: detailed + sumologic/apiserver: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_APISERVER_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/control_plane: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_CONTROL_PLANE_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/controller: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_CONTROLLER_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/default: + client: k8s_%CURRENT_CHART_VERSION% + decompose_otlp_histograms: true + endpoint: ${SUMO_ENDPOINT_DEFAULT_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/kubelet: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_KUBELET_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/node: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_NODE_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/scheduler: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_SCHEDULER_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/state: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_STATE_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + extensions: + file_storage: + compaction: + directory: /tmp + on_rebound: true + directory: /var/lib/storage/otc + timeout: 10s + health_check: + endpoint: '[${env:MY_POD_IP}]:13133' + pprof: {} + processors: + batch: + send_batch_max_size: 2048 + send_batch_size: 1024 + timeout: 1s + filter/drop_unnecessary_metrics: + error_mode: ignore + metrics: + metric: + - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") + - (not IsMatch(name, "^$")) and (type == METRIC_DATA_TYPE_HISTOGRAM or type + == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY + or IsMatch(name, ".*_bucket")) + groupbyattrs: + keys: + - container + - namespace + - pod + - service + groupbyattrs/group_by_name: + keys: + - __name__ + - job + k8s_tagger: + extract: + delimiter: _ + labels: + - key: '*' + tag_name: pod_labels_%s + metadata: + - daemonSetName + - deploymentName + - nodeName + - replicaSetName + - serviceName + - statefulSetName + owner_lookup_enabled: true + passthrough: false + pod_association: + - from: build_hostname + memory_limiter: + check_interval: 5s + limit_percentage: 90 + spike_limit_percentage: 20 + metricstransform: + transforms: + - action: update + include: ^prometheus_remote_write_(.*)$$ + match_type: regexp + new_name: $$1 + resource: + attributes: + - action: upsert + from_attribute: namespace + key: k8s.namespace.name + - action: delete + key: namespace + - action: upsert + from_attribute: pod + key: k8s.pod.name + - action: delete + key: pod + - action: upsert + from_attribute: container + key: k8s.container.name + - action: delete + key: container + - action: upsert + from_attribute: node + key: k8s.node.name + - action: delete + key: node + - action: upsert + from_attribute: service + key: prometheus_service + - action: delete + key: service + - action: upsert + from_attribute: service.name + key: job + - action: delete + key: service.name + - action: upsert + key: _origin + value: kubernetes + - action: upsert + key: cluster + value: kubernetes + resource/delete_source_metadata: + attributes: + - action: delete + key: _sourceCategory + - action: delete + key: _sourceHost + - action: delete + key: _sourceName + resource/remove_k8s_pod_pod_name: + attributes: + - action: delete + key: k8s.pod.pod_name + source: + collector: kubernetes + exclude: + k8s.namespace.name: "" + sumologic: + add_cloud_namespace: false + transform/remove_name: + error_mode: ignore + metric_statements: + - context: resource + statements: + - delete_key(attributes, "__name__") + transform/set_name: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["__name__"], metric.name) where IsMatch(metric.name, "^cloudprovider_.*") + receivers: + otlp: + protocols: + http: + endpoint: '[${env:MY_POD_IP}]:4318' + telegraf: + agent_config: | + [agent] + interval = "30s" + flush_interval = "30s" + omit_hostname = true + [[inputs.http_listener_v2]] + # wait longer than prometheus + read_timeout = "30s" + write_timeout = "30s" + service_address = ":9888" + data_format = "prometheusremotewrite" + paths = [ + "/prometheus.metrics" + ] + service: + extensions: + - health_check + - file_storage + - pprof + pipelines: + metrics: + exporters: + - routing/default + processors: + - memory_limiter + - metricstransform + - groupbyattrs + - resource + - k8s_tagger + - source + - sumologic + - resource/remove_k8s_pod_pod_name + - resource/delete_source_metadata + - transform/set_name + - groupbyattrs/group_by_name + - transform/remove_name + - filter/drop_unnecessary_metrics + - batch + receivers: + - telegraf + - otlp + metrics/apiserver: + exporters: + - sumologic/apiserver + receivers: + - routing/default + metrics/control_plane: + exporters: + - sumologic/control_plane + receivers: + - routing/default + metrics/controller: + exporters: + - sumologic/controller + receivers: + - routing/default + metrics/debug: + exporters: + - debug + receivers: + - routing/default + metrics/kubelet: + exporters: + - sumologic/kubelet + receivers: + - routing/default + metrics/node: + exporters: + - sumologic/node + receivers: + - routing/default + metrics/scheduler: + exporters: + - sumologic/scheduler + receivers: + - routing/default + metrics/state: + exporters: + - sumologic/state + receivers: + - routing/default + metrics/sumologic/default: + exporters: + - sumologic/default + receivers: + - routing/default + telemetry: + logs: + level: info + metrics: + level: normal + readers: + - pull: + exporter: + prometheus: + host: ${env:MY_POD_IP} + port: 8888 + without_scope_info: true + without_type_suffix: true + without_units: true diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http_routing_connector.input.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http_routing_connector.input.yaml new file mode 100644 index 0000000000..539de6d4c3 --- /dev/null +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http_routing_connector.input.yaml @@ -0,0 +1,12 @@ +debug: + sumologicMock: + enabled: true + metrics: + metadata: + print: true + forwardToSumologicMock: true + stopLogsIngestion: true +sumologic: + metrics: + sourceType: http + useRoutingConnectors: true diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http_routing_connector.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http_routing_connector.output.yaml new file mode 100644 index 0000000000..d91e7467af --- /dev/null +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http_routing_connector.output.yaml @@ -0,0 +1,420 @@ +--- +# Source: sumologic/templates/metrics/otelcol/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: RELEASE-NAME-sumologic-otelcol-metrics + namespace: sumologic + labels: + app: RELEASE-NAME-sumologic-otelcol-metrics + chart: "sumologic-%CURRENT_CHART_VERSION%" + release: "RELEASE-NAME" + heritage: "Helm" +data: + config.yaml: | + connectors: + routing/default: + default_pipelines: + - metrics/sumologic/default + - metrics/debug + - metrics/sumologic/sumologic-mock-default + table: + - pipelines: + - metrics/apiserver + - metrics/debug + - metrics/sumologic/sumologic-mock-http + statement: route() where resource.attributes["job"] == "apiserver" + - pipelines: + - metrics/control_plane + - metrics/debug + - metrics/sumologic/sumologic-mock-http + statement: route() where resource.attributes["job"] == "coredns" + - pipelines: + - metrics/control_plane + - metrics/debug + - metrics/sumologic/sumologic-mock-http + statement: route() where resource.attributes["job"] == "kube-etcd" + - pipelines: + - metrics/controller + - metrics/debug + - metrics/sumologic/sumologic-mock-http + statement: route() where resource.attributes["job"] == "kube-controller-manager" + - pipelines: + - metrics/kubelet + - metrics/debug + - metrics/sumologic/sumologic-mock-http + statement: route() where resource.attributes["job"] == "kubelet" + - pipelines: + - metrics/node + - metrics/debug + - metrics/sumologic/sumologic-mock-http + statement: route() where resource.attributes["job"] == "node-exporter" + - pipelines: + - metrics/scheduler + - metrics/debug + - metrics/sumologic/sumologic-mock-http + statement: route() where resource.attributes["job"] == "kube-scheduler" + - pipelines: + - metrics/state + - metrics/debug + - metrics/sumologic/sumologic-mock-http + statement: route() where resource.attributes["job"] == "kube-state-metrics" + exporters: + debug: + verbosity: detailed + sumologic/apiserver: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_APISERVER_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/control_plane: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_CONTROL_PLANE_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/controller: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_CONTROLLER_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/default: + client: k8s_%CURRENT_CHART_VERSION% + decompose_otlp_histograms: true + endpoint: ${SUMO_ENDPOINT_DEFAULT_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/kubelet: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_KUBELET_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/node: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_NODE_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/scheduler: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_SCHEDULER_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/state: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: ${SUMO_ENDPOINT_STATE_METRICS_SOURCE} + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/sumologic-mock-default: + client: k8s_%CURRENT_CHART_VERSION% + decompose_otlp_histograms: true + endpoint: http://RELEASE-NAME-sumologic-mock.sumologic.svc.cluster.local.:3000/receiver + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + sumologic/sumologic-mock-http: + client: k8s_%CURRENT_CHART_VERSION% + endpoint: http://RELEASE-NAME-sumologic-mock.sumologic.svc.cluster.local.:3000/receiver + max_request_body_size: 16777216 + metric_format: prometheus + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 10000 + storage: file_storage + timeout: 30s + extensions: + file_storage: + compaction: + directory: /tmp + on_rebound: true + directory: /var/lib/storage/otc + timeout: 10s + health_check: + endpoint: ${env:MY_POD_IP}:13133 + pprof: {} + processors: + batch: + send_batch_max_size: 2048 + send_batch_size: 1024 + timeout: 1s + filter/drop_unnecessary_metrics: + error_mode: ignore + metrics: + metric: + - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") + - (not IsMatch(name, "^$")) and (type == METRIC_DATA_TYPE_HISTOGRAM or type + == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY + or IsMatch(name, ".*_bucket")) + groupbyattrs: + keys: + - container + - namespace + - pod + - service + groupbyattrs/group_by_name: + keys: + - __name__ + - job + k8s_tagger: + extract: + delimiter: _ + labels: + - key: '*' + tag_name: pod_labels_%s + metadata: + - daemonSetName + - deploymentName + - nodeName + - replicaSetName + - serviceName + - statefulSetName + owner_lookup_enabled: true + passthrough: false + pod_association: + - from: build_hostname + memory_limiter: + check_interval: 5s + limit_percentage: 90 + spike_limit_percentage: 20 + metricstransform: + transforms: + - action: update + include: ^prometheus_remote_write_(.*)$$ + match_type: regexp + new_name: $$1 + resource: + attributes: + - action: upsert + from_attribute: namespace + key: k8s.namespace.name + - action: delete + key: namespace + - action: upsert + from_attribute: pod + key: k8s.pod.name + - action: delete + key: pod + - action: upsert + from_attribute: container + key: k8s.container.name + - action: delete + key: container + - action: upsert + from_attribute: node + key: k8s.node.name + - action: delete + key: node + - action: upsert + from_attribute: service + key: prometheus_service + - action: delete + key: service + - action: upsert + from_attribute: service.name + key: job + - action: delete + key: service.name + - action: upsert + key: _origin + value: kubernetes + - action: upsert + key: cluster + value: kubernetes + resource/delete_source_metadata: + attributes: + - action: delete + key: _sourceCategory + - action: delete + key: _sourceHost + - action: delete + key: _sourceName + resource/remove_k8s_pod_pod_name: + attributes: + - action: delete + key: k8s.pod.pod_name + source: + collector: kubernetes + exclude: + k8s.namespace.name: "" + sumologic: + add_cloud_namespace: false + transform/remove_name: + error_mode: ignore + metric_statements: + - context: resource + statements: + - delete_key(attributes, "__name__") + transform/set_name: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["__name__"], metric.name) where IsMatch(metric.name, "^cloudprovider_.*") + receivers: + otlp: + protocols: + http: + endpoint: ${env:MY_POD_IP}:4318 + telegraf: + agent_config: | + [agent] + interval = "30s" + flush_interval = "30s" + omit_hostname = true + [[inputs.http_listener_v2]] + # wait longer than prometheus + read_timeout = "30s" + write_timeout = "30s" + service_address = ":9888" + data_format = "prometheusremotewrite" + paths = [ + "/prometheus.metrics" + ] + service: + extensions: + - health_check + - file_storage + - pprof + pipelines: + metrics: + exporters: + - routing/default + processors: + - memory_limiter + - metricstransform + - groupbyattrs + - resource + - k8s_tagger + - source + - sumologic + - resource/remove_k8s_pod_pod_name + - resource/delete_source_metadata + - transform/set_name + - groupbyattrs/group_by_name + - transform/remove_name + - filter/drop_unnecessary_metrics + - batch + receivers: + - telegraf + - otlp + metrics/apiserver: + exporters: + - sumologic/apiserver + receivers: + - routing/default + metrics/control_plane: + exporters: + - sumologic/control_plane + receivers: + - routing/default + metrics/controller: + exporters: + - sumologic/controller + receivers: + - routing/default + metrics/debug: + exporters: + - debug + receivers: + - routing/default + metrics/kubelet: + exporters: + - sumologic/kubelet + receivers: + - routing/default + metrics/node: + exporters: + - sumologic/node + receivers: + - routing/default + metrics/scheduler: + exporters: + - sumologic/scheduler + receivers: + - routing/default + metrics/state: + exporters: + - sumologic/state + receivers: + - routing/default + metrics/sumologic/default: + exporters: + - sumologic/default + receivers: + - routing/default + metrics/sumologic/sumologic-mock-default: + exporters: + - sumologic/sumologic-mock-default + receivers: + - routing/default + metrics/sumologic/sumologic-mock-http: + exporters: + - sumologic/sumologic-mock-http + receivers: + - routing/default + telemetry: + logs: + level: info + metrics: + level: normal + readers: + - pull: + exporter: + prometheus: + host: ${env:MY_POD_IP} + port: 8888 + without_scope_info: true + without_type_suffix: true + without_units: true