From d90ef9b00697c9c55f5f44acbf53229df56e625b Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 2 Aug 2025 00:01:06 +0000 Subject: [PATCH 1/5] Initial plan From 4180b79e5e4e0794db4c25fe4d70ac8ae0fbb7f0 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 2 Aug 2025 00:17:21 +0000 Subject: [PATCH 2/5] feat: Add support for Resource attributes in ETW logs exporter Co-authored-by: cijothomas <5232798+cijothomas@users.noreply.github.com> --- opentelemetry-etw-logs/CHANGELOG.md | 9 ++ opentelemetry-etw-logs/src/exporter/mod.rs | 90 +++++++++++++-- .../src/exporter/options.rs | 18 +++ opentelemetry-etw-logs/src/exporter/part_c.rs | 11 +- opentelemetry-etw-logs/src/lib.rs | 59 ++++++++++ opentelemetry-etw-logs/src/processor.rs | 105 ++++++++++++++++++ 6 files changed, 283 insertions(+), 9 deletions(-) diff --git a/opentelemetry-etw-logs/CHANGELOG.md b/opentelemetry-etw-logs/CHANGELOG.md index 01b99c1e1..c6b00b56c 100644 --- a/opentelemetry-etw-logs/CHANGELOG.md +++ b/opentelemetry-etw-logs/CHANGELOG.md @@ -2,6 +2,15 @@ ## vNext +- Added a `with_resource_attributes` method to the processor builder, allowing + users to specify which resource attribute keys are exported with each log + record. + - By default, the Resource attributes `"service.name"` and + `"service.instance.id"` continue to be exported as `cloud.roleName` and + `cloud.roleInstance`. + - This feature enables exporting additional resource attributes beyond the + defaults. + ## v0.9.1 - Added `Processor::builder_etw_compat_only()` method that builds a processor using a provider name that is fully compatible with ETW requirements (dropping UserEvents provider name compatibility) by allowing hyphens (`-`). diff --git a/opentelemetry-etw-logs/src/exporter/mod.rs b/opentelemetry-etw-logs/src/exporter/mod.rs index 92b82b609..4bc490bb5 100644 --- a/opentelemetry-etw-logs/src/exporter/mod.rs +++ b/opentelemetry-etw-logs/src/exporter/mod.rs @@ -1,12 +1,14 @@ use std::cell::RefCell; +use std::collections::HashSet; use std::fmt::Debug; use std::pin::Pin; use std::sync::Arc; +use std::borrow::Cow; use tracelogging_dynamic as tld; use opentelemetry::logs::Severity; -use opentelemetry::Key; +use opentelemetry::{Key, Value, logs::AnyValue}; use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult}; pub(crate) mod common; @@ -26,12 +28,14 @@ thread_local! { struct Resource { pub cloud_role: Option, pub cloud_role_instance: Option, + pub attributes_from_resource: Vec<(Key, AnyValue)>, } pub(crate) struct ETWExporter { provider: Pin>, resource: Resource, options: Options, + resource_attribute_keys: HashSet>, } fn enabled_callback_noop( @@ -65,9 +69,12 @@ impl ETWExporter { provider.as_ref().register(); } + let resource_attribute_keys = options.resource_attribute_keys().clone(); + ETWExporter { provider, resource: Default::default(), + resource_attribute_keys, options, } } @@ -110,7 +117,7 @@ impl ETWExporter { part_a::populate_part_a(event, &self.resource, log_record, field_tag); - let event_id = part_c::populate_part_c(event, log_record, field_tag); + let event_id = part_c::populate_part_c(event, log_record, &self.resource, field_tag); part_b::populate_part_b(event, log_record, otel_level, event_id); @@ -150,12 +157,23 @@ impl opentelemetry_sdk::logs::LogExporter for ETWExporter { } fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) { - self.resource.cloud_role = resource - .get(&Key::from_static_str("service.name")) - .map(|v| v.to_string()); - self.resource.cloud_role_instance = resource - .get(&Key::from_static_str("service.instance.id")) - .map(|v| v.to_string()); + // Clear previous resource attributes + self.resource.attributes_from_resource.clear(); + + // Process resource attributes + for (key, value) in resource.iter() { + // Special handling for cloud role and instance + // as they are used in PartA of the Common Schema format. + if key.as_str() == "service.name" { + self.resource.cloud_role = Some(value.to_string()); + } else if key.as_str() == "service.instance.id" { + self.resource.cloud_role_instance = Some(value.to_string()); + } else if self.resource_attribute_keys.contains(key.as_str()) { + self.resource.attributes_from_resource + .push((key.clone(), val_to_any_value(value))); + } + // Other attributes are ignored + } } fn shutdown(&self) -> OTelSdkResult { @@ -169,6 +187,16 @@ impl opentelemetry_sdk::logs::LogExporter for ETWExporter { } } +fn val_to_any_value(val: &Value) -> AnyValue { + match val { + Value::Bool(b) => AnyValue::Boolean(*b), + Value::I64(i) => AnyValue::Int(*i), + Value::F64(f) => AnyValue::Double(*f), + Value::String(s) => AnyValue::String(s.clone()), + _ => AnyValue::String("".into()), + } +} + #[cfg(test)] mod tests { use opentelemetry_sdk::logs::LogExporter; @@ -224,6 +252,52 @@ mod tests { assert!(result.is_ok()); } + #[test] + fn test_event_resources_with_custom_attributes() { + use opentelemetry::logs::LogRecord; + use opentelemetry::KeyValue; + + let mut log_record = common::test_utils::new_sdk_log_record(); + log_record.set_event_name("event-name"); + + // Create exporter with custom resource attributes + let options = Options::new("test_provider") + .with_resource_attributes(vec!["custom_attribute1", "custom_attribute2"]); + + let mut exporter = ETWExporter::new(options); + + exporter.set_resource( + &opentelemetry_sdk::Resource::builder() + .with_attributes([ + KeyValue::new("service.name", "test-service"), + KeyValue::new("service.instance.id", "test-instance"), + KeyValue::new("custom_attribute1", "value1"), + KeyValue::new("custom_attribute2", "value2"), + KeyValue::new("custom_attribute3", "value3"), // This should be ignored + ]) + .build(), + ); + + // Verify that only the configured attributes are stored + assert_eq!(exporter.resource.cloud_role, Some("test-service".to_string())); + assert_eq!(exporter.resource.cloud_role_instance, Some("test-instance".to_string())); + assert_eq!(exporter.resource.attributes_from_resource.len(), 2); + + // Check that the correct attributes are stored + let attrs: std::collections::HashMap = exporter.resource.attributes_from_resource + .iter() + .map(|(k, v)| (k.as_str().to_string(), format!("{:?}", v))) + .collect(); + assert!(attrs.contains_key("custom_attribute1")); + assert!(attrs.contains_key("custom_attribute2")); + assert!(!attrs.contains_key("custom_attribute3")); + + let instrumentation = common::test_utils::new_instrumentation_scope(); + let result = exporter.export_log_data(&log_record, &instrumentation); + + assert!(result.is_ok()); + } + #[test] fn test_debug() { let exporter = common::test_utils::new_etw_exporter(); diff --git a/opentelemetry-etw-logs/src/exporter/options.rs b/opentelemetry-etw-logs/src/exporter/options.rs index 55d5f0d9f..dad00ffea 100644 --- a/opentelemetry-etw-logs/src/exporter/options.rs +++ b/opentelemetry-etw-logs/src/exporter/options.rs @@ -1,5 +1,6 @@ use opentelemetry_sdk::logs::SdkLogRecord; use std::borrow::Cow; +use std::collections::HashSet; use std::error::Error; type BoxedEventNameCallback = Box; @@ -8,6 +9,7 @@ type BoxedEventNameCallback = Box; pub(crate) struct Options { provider_name: Cow<'static, str>, event_name_callback: Option, + resource_attribute_keys: HashSet>, } impl Options { @@ -15,6 +17,7 @@ impl Options { Options { provider_name: provider_name.into(), event_name_callback: None, + resource_attribute_keys: HashSet::new(), } } @@ -23,6 +26,21 @@ impl Options { &self.provider_name } + /// Returns the resource attribute keys that will be exported with each log record. + pub(crate) fn resource_attribute_keys(&self) -> &HashSet> { + &self.resource_attribute_keys + } + + /// Sets the resource attributes for the exporter. + pub(crate) fn with_resource_attributes(mut self, attributes: I) -> Self + where + I: IntoIterator, + S: Into>, + { + self.resource_attribute_keys = attributes.into_iter().map(|s| s.into()).collect(); + self + } + /// Returns the default event name that will be used for the ETW events. pub(crate) fn default_event_name(&self) -> &str { "Log" diff --git a/opentelemetry-etw-logs/src/exporter/part_c.rs b/opentelemetry-etw-logs/src/exporter/part_c.rs index ccbdb5c4b..d82cd88eb 100644 --- a/opentelemetry-etw-logs/src/exporter/part_c.rs +++ b/opentelemetry-etw-logs/src/exporter/part_c.rs @@ -6,6 +6,7 @@ pub(crate) const EVENT_ID: &str = "event_id"; pub(crate) fn populate_part_c( event: &mut tld::EventBuilder, log_record: &opentelemetry_sdk::logs::SdkLogRecord, + resource: &super::Resource, field_tag: u32, ) -> Option { //populate CS PartC @@ -25,9 +26,17 @@ pub(crate) fn populate_part_c( } } + // Count resource attributes + cs_c_count += resource.attributes_from_resource.len(); + // If there are additional PartC attributes, add them to the event if cs_c_count > 0 { - event.add_struct("PartC", cs_c_count, field_tag); + event.add_struct("PartC", cs_c_count.try_into().unwrap_or(u8::MAX), field_tag); + + // Add resource attributes first + for (key, value) in &resource.attributes_from_resource { + super::common::add_attribute_to_event(event, key, value); + } // TODO: This 2nd iteration is not optimal, and can be optimized for (key, value) in log_record.attributes_iter() { diff --git a/opentelemetry-etw-logs/src/lib.rs b/opentelemetry-etw-logs/src/lib.rs index 2fc6c3c0f..87a6d0269 100644 --- a/opentelemetry-etw-logs/src/lib.rs +++ b/opentelemetry-etw-logs/src/lib.rs @@ -1,5 +1,64 @@ //! The ETW exporter will enable applications to use OpenTelemetry API //! to capture the telemetry events, and write them to the ETW subsystem. +//! +//! ## Resource Attribute Handling +//! +//! **Important**: By default, resource attributes are NOT exported with log records. +//! The ETW exporter only automatically exports these specific resource attributes: +//! +//! - **`service.name`** → Exported as `cloud.roleName` in PartA of Common Schema +//! - **`service.instance.id`** → Exported as `cloud.roleInstance` in PartA of Common Schema +//! +//! All other resource attributes are ignored unless explicitly specified. +//! +//! ### Opting in to Additional Resource Attributes +//! +//! To export additional resource attributes, use the `with_resource_attributes()` method: +//! +//! ```rust +//! use opentelemetry_sdk::logs::SdkLoggerProvider; +//! use opentelemetry_sdk::Resource; +//! use opentelemetry_etw_logs::Processor; +//! use opentelemetry::KeyValue; +//! +//! let etw_processor = Processor::builder("myprovider") +//! // Only export specific resource attributes +//! .with_resource_attributes(["custom_attribute1", "custom_attribute2"]) +//! .build() +//! .unwrap(); +//! +//! let provider = SdkLoggerProvider::builder() +//! .with_resource( +//! Resource::builder_empty() +//! .with_service_name("example") +//! .with_attribute(KeyValue::new("custom_attribute1", "value1")) +//! .with_attribute(KeyValue::new("custom_attribute2", "value2")) +//! .with_attribute(KeyValue::new("custom_attribute3", "value3")) // This won't be exported +//! .build(), +//! ) +//! .with_log_processor(etw_processor) +//! .build(); +//! ``` +//! +//! ### Performance Considerations for ETW +//! +//! **Warning**: Each specified resource attribute will be serialized and sent +//! with EVERY log record. This is different from OTLP exporters where resource +//! attributes are serialized once per batch. Consider the performance impact +//! when selecting which attributes to export. +//! +//! **Recommendation**: Be selective about which resource attributes to export. +//! Since ETW writes to a local kernel buffer and requires a local +//! listener/agent, the agent can often deduce many resource attributes without +//! requiring them to be sent with each log: +//! +//! - **Infrastructure attributes** (datacenter, region, availability zone) can +//! be determined by the local agent. +//! - **Host attributes** (hostname, IP address, OS version) are available locally. +//! - **Deployment attributes** (environment, cluster) may be known to the agent. +//! +//! Focus on attributes that are truly specific to your application instance +//! and cannot be easily determined by the local agent. #![warn(missing_debug_implementations, missing_docs)] diff --git a/opentelemetry-etw-logs/src/processor.rs b/opentelemetry-etw-logs/src/processor.rs index 1311ef351..444b225fa 100644 --- a/opentelemetry-etw-logs/src/processor.rs +++ b/opentelemetry-etw-logs/src/processor.rs @@ -2,6 +2,7 @@ use opentelemetry::InstrumentationScope; use opentelemetry_sdk::error::OTelSdkResult; use opentelemetry_sdk::logs::{LogBatch, LogExporter, SdkLogRecord}; use opentelemetry_sdk::Resource; +use std::borrow::Cow; use std::error::Error; use std::fmt::Debug; @@ -153,6 +154,44 @@ impl ProcessorBuilder { self } + /// Sets the resource attributes for the processor. + /// + /// This specifies which resource attributes should be exported with each log record. + /// + /// # Performance Considerations + /// + /// **Warning**: Each specified resource attribute will be serialized and sent + /// with EVERY log record. This is different from OTLP exporters where resource + /// attributes are serialized once per batch. Consider the performance impact + /// when selecting which attributes to export. + /// + /// # Best Practices for ETW + /// + /// **Recommendation**: Be selective about which resource attributes to export. + /// Since ETW writes to a local kernel buffer and requires a local + /// listener/agent, the agent can often deduce many resource attributes without + /// requiring them to be sent with each log: + /// + /// - **Infrastructure attributes** (datacenter, region, availability zone) can + /// be determined by the local agent. + /// - **Host attributes** (hostname, IP address, OS version) are available locally. + /// - **Deployment attributes** (environment, cluster) may be known to the agent. + /// + /// Focus on attributes that are truly specific to your application instance + /// and cannot be easily determined by the local agent. + /// + /// Nevertheless, if there are attributes that are fixed and must be emitted + /// with every log, modeling them as Resource attributes and using this method + /// is much more efficient than emitting them explicitly with every log. + pub fn with_resource_attributes(mut self, attributes: I) -> Self + where + I: IntoIterator, + S: Into>, + { + self.options = self.options.with_resource_attributes(attributes); + self + } + /// Builds the processor with given options, returning `Error` if it fails. pub fn build(self) -> Result> { self.validate()?; @@ -310,6 +349,42 @@ mod tests { }); } + #[test] + fn tracing_integration_test_with_resource_attributes() { + use opentelemetry::KeyValue; + use opentelemetry_appender_tracing::layer; + use opentelemetry_sdk::Resource; + use tracing::error; + use tracing_subscriber::prelude::*; + + let processor = Processor::builder("provider_name") + .with_resource_attributes(["custom_attribute1", "custom_attribute2"]) + .build() + .unwrap(); + + let logger_provider = SdkLoggerProvider::builder() + .with_resource( + Resource::builder() + .with_service_name("test-service") + .with_attribute(KeyValue::new("custom_attribute1", "value1")) + .with_attribute(KeyValue::new("custom_attribute2", "value2")) + .with_attribute(KeyValue::new("custom_attribute3", "value3")) // Should be ignored + .build(), + ) + .with_log_processor(processor) + .build(); + + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + let _guard = tracing_subscriber::registry().with(layer).set_default(); + + error!( + name: "event-name", + event_id = 20, + user_name = "otel user", + user_email = "otel@opentelemetry.io" + ); + } + #[test] fn test_validate_empty_name() { assert_eq!( @@ -427,4 +502,34 @@ mod tests { ); assert!(result.is_ok()); } + + #[test] + fn test_with_resource_attributes() { + use opentelemetry::KeyValue; + use opentelemetry_sdk::logs::LogProcessor; + use opentelemetry_sdk::Resource; + + let processor = Processor::builder("test_provider") + .with_resource_attributes(vec!["custom_attribute1", "custom_attribute2"]) + .build() + .unwrap(); + + let mut processor = processor; // Make mutable for set_resource + + let resource = Resource::builder() + .with_attributes([ + KeyValue::new("service.name", "test-service"), + KeyValue::new("service.instance.id", "test-instance"), + KeyValue::new("custom_attribute1", "value1"), + KeyValue::new("custom_attribute2", "value2"), + KeyValue::new("custom_attribute3", "value3"), // This should be ignored + ]) + .build(); + + processor.set_resource(&resource); + + // Test that the processor was created successfully + // The actual resource attributes will be tested in the exporter tests + assert!(processor.force_flush().is_ok()); + } } From 0a01dad76201eee60f8c71ed0f751d4fb390fdbd Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 6 Aug 2025 16:15:37 +0000 Subject: [PATCH 3/5] fix: Format code using cargo fmt to resolve lint failures Co-authored-by: cijothomas <5232798+cijothomas@users.noreply.github.com> --- opentelemetry-etw-logs/src/exporter/mod.rs | 27 ++++++++++++++-------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/opentelemetry-etw-logs/src/exporter/mod.rs b/opentelemetry-etw-logs/src/exporter/mod.rs index 4bc490bb5..c34828a95 100644 --- a/opentelemetry-etw-logs/src/exporter/mod.rs +++ b/opentelemetry-etw-logs/src/exporter/mod.rs @@ -1,14 +1,14 @@ +use std::borrow::Cow; use std::cell::RefCell; use std::collections::HashSet; use std::fmt::Debug; use std::pin::Pin; use std::sync::Arc; -use std::borrow::Cow; use tracelogging_dynamic as tld; use opentelemetry::logs::Severity; -use opentelemetry::{Key, Value, logs::AnyValue}; +use opentelemetry::{logs::AnyValue, Key, Value}; use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult}; pub(crate) mod common; @@ -159,7 +159,7 @@ impl opentelemetry_sdk::logs::LogExporter for ETWExporter { fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) { // Clear previous resource attributes self.resource.attributes_from_resource.clear(); - + // Process resource attributes for (key, value) in resource.iter() { // Special handling for cloud role and instance @@ -169,7 +169,8 @@ impl opentelemetry_sdk::logs::LogExporter for ETWExporter { } else if key.as_str() == "service.instance.id" { self.resource.cloud_role_instance = Some(value.to_string()); } else if self.resource_attribute_keys.contains(key.as_str()) { - self.resource.attributes_from_resource + self.resource + .attributes_from_resource .push((key.clone(), val_to_any_value(value))); } // Other attributes are ignored @@ -263,9 +264,9 @@ mod tests { // Create exporter with custom resource attributes let options = Options::new("test_provider") .with_resource_attributes(vec!["custom_attribute1", "custom_attribute2"]); - + let mut exporter = ETWExporter::new(options); - + exporter.set_resource( &opentelemetry_sdk::Resource::builder() .with_attributes([ @@ -279,12 +280,20 @@ mod tests { ); // Verify that only the configured attributes are stored - assert_eq!(exporter.resource.cloud_role, Some("test-service".to_string())); - assert_eq!(exporter.resource.cloud_role_instance, Some("test-instance".to_string())); + assert_eq!( + exporter.resource.cloud_role, + Some("test-service".to_string()) + ); + assert_eq!( + exporter.resource.cloud_role_instance, + Some("test-instance".to_string()) + ); assert_eq!(exporter.resource.attributes_from_resource.len(), 2); // Check that the correct attributes are stored - let attrs: std::collections::HashMap = exporter.resource.attributes_from_resource + let attrs: std::collections::HashMap = exporter + .resource + .attributes_from_resource .iter() .map(|(k, v)| (k.as_str().to_string(), format!("{:?}", v))) .collect(); From 2d0899e3ee0853bcc0b1ba6e60ac3999594c253a Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 5 Nov 2025 18:01:14 +0000 Subject: [PATCH 4/5] Merge branch 'main' into copilot/fix-409 to resolve conflicts Co-authored-by: cijothomas <5232798+cijothomas@users.noreply.github.com> --- .github/workflows/ci.yml | 70 +- .github/workflows/codeql-analysis.yml | 10 +- .github/workflows/fossa.yml | 4 +- .github/workflows/ossf-scorecard.yml | 10 +- .github/workflows/pr_naming.yml | 2 +- Cargo.toml | 14 +- opentelemetry-aws/CHANGELOG.md | 6 + opentelemetry-aws/Cargo.toml | 2 +- opentelemetry-contrib/CHANGELOG.md | 4 + opentelemetry-contrib/Cargo.toml | 2 +- .../trace/propagator/binary/base64_format.rs | 8 +- .../propagator/binary/binary_propagator.rs | 14 +- .../propagator/trace_context_response.rs | 22 +- opentelemetry-datadog/CHANGELOG.md | 5 + opentelemetry-datadog/Cargo.toml | 4 +- .../benches/datadog_exporter.rs | 7 +- .../src/exporter/model/mod.rs | 7 +- opentelemetry-datadog/src/lib.rs | 28 +- opentelemetry-etw-logs/CHANGELOG.md | 4 + opentelemetry-etw-logs/Cargo.toml | 2 +- opentelemetry-etw-metrics/CHANGELOG.md | 6 + opentelemetry-etw-metrics/Cargo.toml | 12 +- opentelemetry-etw-metrics/README.md | 2 +- .../geneva-uploader-ffi/CHANGELOG.md | 20 + .../geneva-uploader-ffi/Cargo.toml | 24 +- .../geneva-uploader-ffi/README.md | 5 + .../geneva-uploader-ffi/examples/Makefile | 84 + .../examples/logs_example.c | 164 ++ .../examples/otlp_builder/Cargo.toml | 21 + .../examples/otlp_builder/src/builder.rs | 70 + .../examples/otlp_builder/src/lib.rs | 230 +++ .../examples/spans_example.c | 164 ++ .../include/geneva_errors.h | 49 + .../geneva-uploader-ffi/include/geneva_ffi.h | 204 +++ .../geneva-uploader-ffi/src/lib.rs | 1430 +++++++++++++++++ .../geneva-uploader/CHANGELOG.md | 26 + .../geneva-uploader/Cargo.toml | 22 +- .../geneva-uploader/README.md | 5 + .../geneva-uploader/src/bench.rs | 13 +- .../geneva-uploader/src/client.rs | 228 ++- .../src/config_service/client.rs | 500 +++++- .../geneva-uploader/src/config_service/mod.rs | 16 +- .../src/ingestion_service/mod.rs | 8 +- .../src/ingestion_service/uploader.rs | 75 +- .../geneva-uploader/src/lib.rs | 5 +- .../src/payload_encoder/central_blob.rs | 11 - .../src/payload_encoder/otlp_encoder.rs | 745 ++++++++- .../CHANGELOG.md | 20 + .../opentelemetry-exporter-geneva/Cargo.toml | 18 +- .../opentelemetry-exporter-geneva/README.md | 5 + .../examples/Dockerfile | 45 + .../examples/README.md | 360 +++++ .../examples/basic.rs | 78 +- .../examples/basic_msi_test.rs | 180 +++ .../examples/basic_workload_identity_test.rs | 159 ++ .../examples/trace_basic.rs | 207 +++ .../opentelemetry-exporter-geneva/src/lib.rs | 2 + .../src/logs/exporter.rs | 49 +- .../src/trace/exporter.rs | 91 ++ .../src/trace/mod.rs | 2 + .../CHANGELOG.md | 4 +- .../Cargo.toml | 2 +- .../CHANGELOG.md | 42 + .../Cargo.toml | 8 +- .../examples/axum-http-service/Cargo.toml | 2 +- .../examples/hyper-http-service/Cargo.toml | 2 +- .../src/lib.rs | 275 +++- opentelemetry-resource-detectors/CHANGELOG.md | 5 + opentelemetry-resource-detectors/Cargo.toml | 2 +- opentelemetry-stackdriver/CHANGELOG.md | 4 + opentelemetry-stackdriver/Cargo.toml | 2 +- opentelemetry-user-events-logs/CHANGELOG.md | 4 + opentelemetry-user-events-logs/Cargo.toml | 10 +- .../src/logs/processor.rs | 2 +- .../CHANGELOG.md | 5 + opentelemetry-user-events-metrics/Cargo.toml | 10 +- opentelemetry-user-events-trace/CHANGELOG.md | 4 + opentelemetry-user-events-trace/Cargo.toml | 6 +- stress/Cargo.toml | 12 +- stress/src/geneva_exporter.rs | 79 +- 80 files changed, 5669 insertions(+), 391 deletions(-) create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/CHANGELOG.md create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/README.md create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/Makefile create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/logs_example.c create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/Cargo.toml create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/src/builder.rs create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/src/lib.rs create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/spans_example.c create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/include/geneva_errors.h create mode 100644 opentelemetry-exporter-geneva/geneva-uploader-ffi/include/geneva_ffi.h create mode 100644 opentelemetry-exporter-geneva/geneva-uploader/CHANGELOG.md create mode 100644 opentelemetry-exporter-geneva/geneva-uploader/README.md create mode 100644 opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/CHANGELOG.md create mode 100644 opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/README.md create mode 100644 opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/Dockerfile create mode 100644 opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/README.md create mode 100644 opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic_msi_test.rs create mode 100644 opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic_workload_identity_test.rs create mode 100644 opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/trace_basic.rs create mode 100644 opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/trace/exporter.rs create mode 100644 opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/trace/mod.rs create mode 100644 opentelemetry-instrumentation-tower/CHANGELOG.md diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 97f170953..cb871d1e0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ jobs: # test only stable version of Rust on Windows and MacOS include: - rust: stable - os: windows-2025 + os: windows-latest - rust: stable os: macos-latest - rust: stable @@ -34,7 +34,7 @@ jobs: continue-on-error: ${{ matrix.rust == 'beta' }} steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit @@ -45,10 +45,10 @@ jobs: sudo rm -rf /usr/local/lib/android sudo rm -rf /usr/share/dotnet df -h - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true - - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b + - uses: dtolnay/rust-toolchain@6d653acede28d24f02e3cd41383119e8b1b35921 with: toolchain: ${{ matrix.rust }} components: rustfmt @@ -58,28 +58,28 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Test (Windows) - if: ${{ matrix.os == 'windows-2025'}} + if: ${{ matrix.os == 'windows-latest'}} run: ./scripts/test.ps1 shell: pwsh - name: Test (Unix) - if: ${{ matrix.os != 'windows-2025'}} + if: ${{ matrix.os != 'windows-latest'}} run: bash ./scripts/test.sh lint: strategy: matrix: # clippy must be run in every OS to lint platform-specific code - os: [ubuntu-latest, windows-2025, macos-latest, ubuntu-22.04-arm] + os: [ubuntu-latest, windows-latest, macos-latest, ubuntu-22.04-arm] runs-on: ${{ matrix.os }} steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true - - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b + - uses: dtolnay/rust-toolchain@6d653acede28d24f02e3cd41383119e8b1b35921 with: toolchain: stable components: rustfmt,clippy @@ -89,11 +89,11 @@ jobs: - name: Format run: cargo fmt --all -- --check - name: Lint (Windows) - if: ${{ matrix.os == 'windows-2025'}} + if: ${{ matrix.os == 'windows-latest'}} run: ./scripts/lint.ps1 shell: pwsh - name: Lint (Unix) - if: ${{ matrix.os != 'windows-2025'}} + if: ${{ matrix.os != 'windows-latest'}} run: ./scripts/lint.sh msrv: strategy: @@ -103,17 +103,17 @@ jobs: continue-on-error: true steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true - - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b + - uses: dtolnay/rust-toolchain@6d653acede28d24f02e3cd41383119e8b1b35921 with: toolchain: stable - - uses: taiki-e/install-action@d31232495ad76f47aad66e3501e47780b49f0f3e # v2.57.5 + - uses: taiki-e/install-action@47be02f2de8a32619316956f6117e150bdc6763f # v2.62.44 with: tool: cargo-msrv - name: Check MSRV for all crates @@ -123,12 +123,12 @@ jobs: continue-on-error: true # Prevent sudden announcement of a new advisory from failing ci steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: EmbarkStudios/cargo-deny-action@30f817c6f72275c6d54dc744fbca09ebc958599f # v2.0.12 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: EmbarkStudios/cargo-deny-action@f2ba7abc2abebaf185c833c3961145a3c275caad # v2.0.13 with: command: check advisories docs: @@ -136,12 +136,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: dtolnay/rust-toolchain@6d653acede28d24f02e3cd41383119e8b1b35921 with: toolchain: stable - uses: arduino/setup-protoc@c65c819552d16ad3c9b72d9dfd5ba5237b9c906b # v3.0.0 @@ -157,14 +157,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true - - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b + - uses: dtolnay/rust-toolchain@6d653acede28d24f02e3cd41383119e8b1b35921 with: toolchain: stable components: llvm-tools-preview @@ -172,7 +172,7 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: cargo install cargo-llvm-cov - uses: taiki-e/install-action@d31232495ad76f47aad66e3501e47780b49f0f3e # v2.57.5 + uses: taiki-e/install-action@47be02f2de8a32619316956f6117e150bdc6763f # v2.62.44 with: tool: cargo-llvm-cov - name: cargo generate-lockfile @@ -181,7 +181,7 @@ jobs: - name: cargo llvm-cov run: cargo llvm-cov --locked --all-features --workspace --lcov --output-path lcov.info - name: Upload to codecov.io - uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3 + uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1 with: fail_ci_if_error: true cargo-machete: @@ -189,17 +189,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true - - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b + - uses: dtolnay/rust-toolchain@6d653acede28d24f02e3cd41383119e8b1b35921 with: toolchain: stable - - uses: taiki-e/install-action@d31232495ad76f47aad66e3501e47780b49f0f3e # v2.57.5 + - uses: taiki-e/install-action@47be02f2de8a32619316956f6117e150bdc6763f # v2.62.44 with: tool: cargo-machete - name: cargo machete @@ -209,17 +209,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true - - uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b + - uses: dtolnay/rust-toolchain@6d653acede28d24f02e3cd41383119e8b1b35921 with: toolchain: stable - - uses: taiki-e/install-action@d31232495ad76f47aad66e3501e47780b49f0f3e # v2.57.5 + - uses: taiki-e/install-action@47be02f2de8a32619316956f6117e150bdc6763f # v2.62.44 with: tool: cargo-workspace-lints - name: cargo workspace-lints diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ee26b81a5..0c97b0f2c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,22 +24,22 @@ jobs: steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: submodules: true - name: Initialize CodeQL - uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5 with: languages: rust - name: Autobuild - uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5 diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yml index 61ff03fcf..a532772ad 100644 --- a/.github/workflows/fossa.yml +++ b/.github/workflows/fossa.yml @@ -13,11 +13,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0 with: diff --git a/.github/workflows/ossf-scorecard.yml b/.github/workflows/ossf-scorecard.yml index c59afd0e3..ce93c765b 100644 --- a/.github/workflows/ossf-scorecard.yml +++ b/.github/workflows/ossf-scorecard.yml @@ -21,15 +21,15 @@ jobs: id-token: write steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false - - uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 + - uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: results.sarif results_format: sarif @@ -39,7 +39,7 @@ jobs: # uploads of run results in SARIF format to the repository Actions tab. # https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts - name: "Upload artifact" - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: SARIF file path: results.sarif @@ -48,6 +48,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5 with: sarif_file: results.sarif diff --git a/.github/workflows/pr_naming.yml b/.github/workflows/pr_naming.yml index addfa94f7..9ecc2406d 100644 --- a/.github/workflows/pr_naming.yml +++ b/.github/workflows/pr_naming.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0 + uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 with: egress-policy: audit diff --git a/Cargo.toml b/Cargo.toml index 0813f8f9b..de89943b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,13 +29,13 @@ resolver = "2" debug = 1 [workspace.dependencies] -opentelemetry = "0.30" -opentelemetry-appender-tracing = "0.30" -opentelemetry-http = "0.30" -opentelemetry-proto = { version = "0.30", default-features = false } -opentelemetry_sdk = { version = "0.30", default-features = false } -opentelemetry-stdout = "0.30" -opentelemetry-semantic-conventions = { version = "0.30", features = [ +opentelemetry = "0.31" +opentelemetry-appender-tracing = "0.31" +opentelemetry-http = "0.31" +opentelemetry-proto = { version = "0.31", default-features = false } +opentelemetry_sdk = { version = "0.31", default-features = false } +opentelemetry-stdout = "0.31" +opentelemetry-semantic-conventions = { version = "0.31", features = [ "semconv_experimental", ] } criterion = "0.7" diff --git a/opentelemetry-aws/CHANGELOG.md b/opentelemetry-aws/CHANGELOG.md index aeec7337e..fea499a22 100644 --- a/opentelemetry-aws/CHANGELOG.md +++ b/opentelemetry-aws/CHANGELOG.md @@ -2,6 +2,12 @@ ## vNext +## v0.19.0 + +### Changed + +- Bump opentelemetry and opentelemetry_sdk versions to 0.31.0 + ## v0.18.0 ### Changed diff --git a/opentelemetry-aws/Cargo.toml b/opentelemetry-aws/Cargo.toml index da608ecf3..a69401ea3 100644 --- a/opentelemetry-aws/Cargo.toml +++ b/opentelemetry-aws/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-aws" -version = "0.18.0" +version = "0.19.0" description = "AWS exporters and propagators for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-aws" repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-aws" diff --git a/opentelemetry-contrib/CHANGELOG.md b/opentelemetry-contrib/CHANGELOG.md index 7e18e781e..1c8f06431 100644 --- a/opentelemetry-contrib/CHANGELOG.md +++ b/opentelemetry-contrib/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +## v0.23.0 + +- Bump opentelemetry and opentelemetry_sdk versions to 0.31 + ## v0.22.0 - Bump opentelemetry and opentelemetry_sdk versions to 0.30 diff --git a/opentelemetry-contrib/Cargo.toml b/opentelemetry-contrib/Cargo.toml index 434bcbade..ca66a33b9 100644 --- a/opentelemetry-contrib/Cargo.toml +++ b/opentelemetry-contrib/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-contrib" -version = "0.22.0" +version = "0.23.0" description = "Rust contrib repo for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-contrib" repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-contrib" diff --git a/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs b/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs index 4a5fa0153..861ad59ac 100644 --- a/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs +++ b/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs @@ -49,13 +49,13 @@ mod tests { fn to_base64_data() -> Vec<(SpanContext, String)> { vec![ (SpanContext::new( - TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), - SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), + TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), + SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), "AABL+S81d7NNpqPOkp0ODkc2AQDwZ6oLqQK3AgE=".to_string() ), (SpanContext::new( - TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), - SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), + TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), + SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), "AABL+S81d7NNpqPOkp0ODkc2AQDwZ6oLqQK3AgA=".to_string() ), ] diff --git a/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs b/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs index a56df6d65..29871bf4e 100644 --- a/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs +++ b/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs @@ -95,16 +95,16 @@ mod tests { vec![ // Context with sampled (SpanContext::new( - TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), - SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), [ + TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), + SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), [ 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, 0x02, 0x01, ]), // Context without sampled (SpanContext::new( - TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), - SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), [ + TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), + SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), [ 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, 0x02, 0x00, @@ -118,19 +118,19 @@ mod tests { fn from_bytes_data() -> Vec<(SpanContext, Vec)> { vec![ // Future version of the proto - (SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), vec![ + (SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), vec![ 0x02, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, 0x02, 0x01, ]), // current version with sampled - (SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), vec![ + (SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), vec![ 0x02, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, 0x02, 0x01, ]), // valid context without option - (SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), vec![ + (SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), vec![ 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, ]), diff --git a/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs b/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs index 51a2ec8dc..9429edf2d 100644 --- a/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs +++ b/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs @@ -146,14 +146,14 @@ mod tests { #[rustfmt::skip] fn extract_data() -> Vec<(&'static str, SpanContext)> { vec![ - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default())), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-08", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default())), - ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-XYZxsf09", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01-", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), - ("01-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-08", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default())), + ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-XYZxsf09", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01-", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("01-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), ] } @@ -182,9 +182,9 @@ mod tests { #[rustfmt::skip] fn inject_data() -> Vec<(&'static str, SpanContext)> { vec![ - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::from_str("foo=bar").unwrap())), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::from_str("foo=bar").unwrap())), - ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::new(0xff), true, TraceState::from_str("foo=bar").unwrap())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::from_str("foo=bar").unwrap())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::from_str("foo=bar").unwrap())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from(0x00f0_67aa_0ba9_02b7), TraceFlags::new(0xff), true, TraceState::from_str("foo=bar").unwrap())), ("", SpanContext::empty_context()), ] } diff --git a/opentelemetry-datadog/CHANGELOG.md b/opentelemetry-datadog/CHANGELOG.md index ceafc107b..4aa54ab80 100644 --- a/opentelemetry-datadog/CHANGELOG.md +++ b/opentelemetry-datadog/CHANGELOG.md @@ -2,6 +2,11 @@ ## vNext +## v0.19.0 + +- Bump opentelemetry and opentelemetry_sdk versions to 0.31 +- Bump opentelemetry-http and opentelemetry-semantic-conventions versions to 0.31 + ## v0.18.0 - Bump opentelemetry and opentelemetry_sdk versions to 0.30 diff --git a/opentelemetry-datadog/Cargo.toml b/opentelemetry-datadog/Cargo.toml index 902f8bfbe..0d1a23073 100644 --- a/opentelemetry-datadog/Cargo.toml +++ b/opentelemetry-datadog/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-datadog" -version = "0.18.0" +version = "0.19.0" description = "Datadog exporters and propagators for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-datadog" repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-datadog" @@ -18,7 +18,7 @@ rustdoc-args = ["--cfg", "docsrs"] [features] default = ["intern-ahash"] agent-sampling = [] -reqwest-blocking-client = ["reqwest/blocking", "opentelemetry-http/reqwest"] +reqwest-blocking-client = ["reqwest/blocking", "opentelemetry-http/reqwest-blocking"] reqwest-client = ["reqwest", "opentelemetry-http/reqwest"] surf-client = ["dep:surf"] intern-ahash = ["ahash"] diff --git a/opentelemetry-datadog/benches/datadog_exporter.rs b/opentelemetry-datadog/benches/datadog_exporter.rs index 005b0b06d..bffac62bd 100644 --- a/opentelemetry-datadog/benches/datadog_exporter.rs +++ b/opentelemetry-datadog/benches/datadog_exporter.rs @@ -126,8 +126,8 @@ fn get_array_of_booleans(rng: &mut ThreadRng) -> Value { fn get_span(trace_id: u128, parent_span_id: u64, span_id: u64, rng: &mut ThreadRng) -> SpanData { let span_context = SpanContext::new( - TraceId::from_u128(trace_id), - SpanId::from_u64(span_id), + TraceId::from(trace_id), + SpanId::from(span_id), TraceFlags::default(), false, TraceState::default(), @@ -165,7 +165,8 @@ fn get_span(trace_id: u128, parent_span_id: u64, span_id: u64, rng: &mut ThreadR SpanData { span_context, - parent_span_id: SpanId::from_u64(parent_span_id), + parent_span_id: SpanId::from(parent_span_id), + parent_span_is_remote: false, span_kind: SpanKind::Client, name: "resource".into(), start_time, diff --git a/opentelemetry-datadog/src/exporter/model/mod.rs b/opentelemetry-datadog/src/exporter/model/mod.rs index 9b0ae66ed..e45dd34aa 100644 --- a/opentelemetry-datadog/src/exporter/model/mod.rs +++ b/opentelemetry-datadog/src/exporter/model/mod.rs @@ -215,8 +215,8 @@ pub(crate) mod tests { pub(crate) fn get_span(trace_id: u128, parent_span_id: u64, span_id: u64) -> trace::SpanData { let span_context = SpanContext::new( - TraceId::from_u128(trace_id), - SpanId::from_u64(span_id), + TraceId::from(trace_id), + SpanId::from(span_id), TraceFlags::default(), false, TraceState::default(), @@ -232,7 +232,8 @@ pub(crate) mod tests { trace::SpanData { span_context, - parent_span_id: SpanId::from_u64(parent_span_id), + parent_span_id: SpanId::from(parent_span_id), + parent_span_is_remote: false, span_kind: SpanKind::Client, name: "resource".into(), start_time, diff --git a/opentelemetry-datadog/src/lib.rs b/opentelemetry-datadog/src/lib.rs index 82bc3c5c8..aceb0d032 100644 --- a/opentelemetry-datadog/src/lib.rs +++ b/opentelemetry-datadog/src/lib.rs @@ -485,20 +485,20 @@ mod propagator { (vec![], SpanContext::empty_context()), (vec![(DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::empty_context()), (vec![(DATADOG_TRACE_ID_HEADER, "garbage")], SpanContext::empty_context()), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "garbage")], SpanContext::new(TraceId::from_u128(1234), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(false).build())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(true).build())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "garbage")], SpanContext::new(TraceId::from(1234), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(false).build())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(true).build())), ]; #[cfg(not(feature = "agent-sampling"))] return vec![ (vec![], SpanContext::empty_context()), (vec![(DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::empty_context()), (vec![(DATADOG_TRACE_ID_HEADER, "garbage")], SpanContext::empty_context()), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "garbage")], SpanContext::new(TraceId::from_u128(1234), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "garbage")], SpanContext::new(TraceId::from(1234), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TraceFlags::default(), true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TraceFlags::SAMPLED, true, TraceState::default())), ]; } @@ -510,9 +510,9 @@ mod propagator { (vec![], SpanContext::new(TraceId::INVALID, SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(false).build())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(true).build())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(false).build())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(true).build())), ]; #[cfg(not(feature = "agent-sampling"))] return vec![ @@ -520,9 +520,9 @@ mod propagator { (vec![], SpanContext::new(TraceId::INVALID, SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TraceFlags::default(), true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from(1234), SpanId::from(12), TraceFlags::SAMPLED, true, TraceState::default())), ]; } diff --git a/opentelemetry-etw-logs/CHANGELOG.md b/opentelemetry-etw-logs/CHANGELOG.md index c6b00b56c..5bf38a514 100644 --- a/opentelemetry-etw-logs/CHANGELOG.md +++ b/opentelemetry-etw-logs/CHANGELOG.md @@ -11,6 +11,10 @@ - This feature enables exporting additional resource attributes beyond the defaults. +## v0.10.0 + +- Bump opentelemetry and opentelemetry_sdk versions to 0.31 + ## v0.9.1 - Added `Processor::builder_etw_compat_only()` method that builds a processor using a provider name that is fully compatible with ETW requirements (dropping UserEvents provider name compatibility) by allowing hyphens (`-`). diff --git a/opentelemetry-etw-logs/Cargo.toml b/opentelemetry-etw-logs/Cargo.toml index a343b2c72..408214d03 100644 --- a/opentelemetry-etw-logs/Cargo.toml +++ b/opentelemetry-etw-logs/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "opentelemetry-etw-logs" description = "OpenTelemetry logs exporter to ETW (Event Tracing for Windows)" -version = "0.9.1" +version = "0.10.0" edition = "2021" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-etw-logs" repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-etw-logs" diff --git a/opentelemetry-etw-metrics/CHANGELOG.md b/opentelemetry-etw-metrics/CHANGELOG.md index d6bad9afe..9be9e3a15 100644 --- a/opentelemetry-etw-metrics/CHANGELOG.md +++ b/opentelemetry-etw-metrics/CHANGELOG.md @@ -2,6 +2,12 @@ ## vNext +## v0.10.0 + +- Bump opentelemetry and opentelemetry_sdk versions to 0.31 +- Bump opentelemetry-proto version to 0.31 +- Bump prost version to 0.14 + ## v0.9.0 Released 2025-Jun-19 diff --git a/opentelemetry-etw-metrics/Cargo.toml b/opentelemetry-etw-metrics/Cargo.toml index e8ddaa676..7b9ad91c7 100644 --- a/opentelemetry-etw-metrics/Cargo.toml +++ b/opentelemetry-etw-metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-etw-metrics" -version = "0.9.0" +version = "0.10.0" edition = "2021" description = "OpenTelemetry metrics exporter to ETW (Event Tracing for Windows)" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-etw-metrics" @@ -11,17 +11,17 @@ license = "Apache-2.0" rust-version = "1.75.0" [dependencies] -opentelemetry = { version = "0.30", features = ["metrics"] } -opentelemetry_sdk = { version = "0.30", features = ["metrics"] } -opentelemetry-proto = { version = "0.30", features = ["gen-tonic", "metrics"] } -prost = "0.13" +opentelemetry = { version = "0.31", features = ["metrics"] } +opentelemetry_sdk = { version = "0.31", features = ["metrics"] } +opentelemetry-proto = { version = "0.31", features = ["gen-tonic", "metrics"], default-features = false } +prost = "0.14" tracelogging = "1.2.4" tracing = { version = "0.1", optional = true } [dev-dependencies] tokio = { version = "1.0", features = ["full"] } criterion = { workspace = true, features = ["html_reports"] } tracing-subscriber = { version = "0.3", features = ["env-filter","registry", "std", "fmt"] } -opentelemetry-proto = { version = "0.30", features = ["gen-tonic", "metrics", "gen-tonic-messages"] } +opentelemetry-proto = { version = "0.31", features = ["gen-tonic", "metrics", "gen-tonic-messages"], default-features = false } [features] internal-logs = ["tracing", "opentelemetry/internal-logs", "opentelemetry_sdk/internal-logs", "opentelemetry-proto/internal-logs"] diff --git a/opentelemetry-etw-metrics/README.md b/opentelemetry-etw-metrics/README.md index 0a8b41ead..d1dcc6e1f 100644 --- a/opentelemetry-etw-metrics/README.md +++ b/opentelemetry-etw-metrics/README.md @@ -6,7 +6,7 @@ | Status | | | ------------- |-----------| -| Stability | beta | +| Stability | stable | | Owners | [Cijo Thomas](https://github.com/cijothomas), [Lalit Kumar Bhasin](https://github.com/lalitb), [Matthew Boddewyn](https://github.com/mattbodd), [Pato Sandaña](https://github.com/psandana)| This crate contains OpenTelemetry Metrics exporter to diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/CHANGELOG.md b/opentelemetry-exporter-geneva/geneva-uploader-ffi/CHANGELOG.md new file mode 100644 index 000000000..14d0819e5 --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/CHANGELOG.md @@ -0,0 +1,20 @@ +# Changelog + +## [0.3.0] - 2025-10-17 + +### Added +- Error message parameters to FFI functions for better diagnostics + +## [0.2.0] - 2025-09-24 + +### Added +- FFI bindings for spans upload functionality + +### Changed +- Bump opentelemetry-proto version to 0.31 +- Bump prost version to 0.14 + +## [0.1.0] - 2025-08-18 + +### Added +- Initial release of geneva-uploader-ffi diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/Cargo.toml b/opentelemetry-exporter-geneva/geneva-uploader-ffi/Cargo.toml index 164f7ac11..9523d3309 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader-ffi/Cargo.toml +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/Cargo.toml @@ -1,11 +1,31 @@ [package] name = "geneva-uploader-ffi" -version = "0.1.0" +description = "FFI bindings for Geneva uploader" +version = "0.3.0" edition = "2021" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-exporter-geneva/geneva-uploader-ffi" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-exporter-geneva/geneva-uploader-ffi" +rust-version = "1.85.0" +keywords = ["opentelemetry", "geneva", "ffi", "uploader"] license = "Apache-2.0" -rust-version = "1.75.0" + +[lib] +crate-type = ["cdylib", "staticlib", "rlib"] [dependencies] +geneva-uploader = { path = "../geneva-uploader", version = "0.3.0" } +opentelemetry-proto = { workspace = true, default-features = false, features = ["logs", "trace", "gen-tonic-messages"] } +tokio = { version = "1.0", features = ["rt-multi-thread"] } +prost = "0.14" + +[features] +mock_auth = ["geneva-uploader/mock_auth"] [lints] workspace = true + +[dev-dependencies] +otlp_builder = { path = "examples/otlp_builder" } +wiremock = "=0.5.22" +base64 = "0.22" +chrono = "0.4" diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/README.md b/opentelemetry-exporter-geneva/geneva-uploader-ffi/README.md new file mode 100644 index 000000000..e902f9986 --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/README.md @@ -0,0 +1,5 @@ +# geneva-uploader-ffi + +The geneva-uploader-ffi is designed for Microsoft products to send data to public-facing end-points which route to Microsoft's internal data pipeline. It is not meant to be used outside of Microsoft products and is open sourced to demonstrate best practices and to be transparent about what is being collected. + +geneva-uploader-ffi: FFI (Foreign Function Interface) layer for integrating with other languages. diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/Makefile b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/Makefile new file mode 100644 index 000000000..6ea2041ec --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/Makefile @@ -0,0 +1,84 @@ +# Makefile for building and running the C FFI example (local to examples/) +# This focuses only on compiling c_example.c against the built Rust FFI libs. + +.PHONY: all run build-rust build-otlp logs-example spans-example clean verify-header help + +# Defaults: build only (do not run) +all: build-rust build-otlp $(LOGS_BINARY) $(SPANS_BINARY) + +# Paths relative to this examples/ directory +RUST_CRATE_DIR := .. +INCLUDE_DIR := ../include +LIB_DIR := ../../../target/release +LIB_DIR_DEPS := $(LIB_DIR)/deps +LOGS_BINARY := logs_example_test +SPANS_BINARY := spans_example_test + +# Build the Rust FFI library (release) +build-rust: + @echo "Building Rust FFI library..." + @cd $(RUST_CRATE_DIR) && cargo build --release + +# Build the example-only otlp_builder (release) +build-otlp: + @echo "Building otlp_builder (cdylib + rlib)..." + @cd otlp_builder && cargo build --release + +# Build the logs example binary +$(LOGS_BINARY): logs_example.c build-rust build-otlp + @echo "Building logs example..." + @gcc -std=c11 -o $(LOGS_BINARY) logs_example.c \ + -I$(INCLUDE_DIR) \ + -L$(LIB_DIR) -L$(LIB_DIR_DEPS) \ + -lgeneva_uploader_ffi -lotlp_builder \ + -Wl,-rpath,@loader_path/../../../target/release \ + -Wl,-rpath,@loader_path/../../../target/release/deps \ + -lpthread -ldl -lm + +# Build the spans example binary +$(SPANS_BINARY): spans_example.c build-rust build-otlp + @echo "Building spans example..." + @gcc -std=c11 -o $(SPANS_BINARY) spans_example.c \ + -I$(INCLUDE_DIR) \ + -L$(LIB_DIR) -L$(LIB_DIR_DEPS) \ + -lgeneva_uploader_ffi -lotlp_builder \ + -Wl,-rpath,@loader_path/../../../target/release \ + -Wl,-rpath,@loader_path/../../../target/release/deps \ + -lpthread -ldl -lm + +# Run the logs example with proper dynamic library path for macOS/Linux +run-logs: $(LOGS_BINARY) + @echo "Running logs example..." + @DYLD_LIBRARY_PATH=$(LIB_DIR):$(LIB_DIR_DEPS) LD_LIBRARY_PATH=$(LIB_DIR):$(LIB_DIR_DEPS) ./$(LOGS_BINARY) + +# Run the spans example with proper dynamic library path for macOS/Linux +run-spans: $(SPANS_BINARY) + @echo "Running spans example..." + @DYLD_LIBRARY_PATH=$(LIB_DIR):$(LIB_DIR_DEPS) LD_LIBRARY_PATH=$(LIB_DIR):$(LIB_DIR_DEPS) ./$(SPANS_BINARY) + +# Aliases +logs-example: run-logs +spans-example: run-spans + +# Quick header verification (compile-only) +verify-header: + @echo "Verifying C header compatibility..." + @gcc -c logs_example.c -I$(INCLUDE_DIR) -o /tmp/test_logs_header.o && echo "✓ Logs example C header is valid" || echo "✗ Logs example C header has issues" + @gcc -c spans_example.c -I$(INCLUDE_DIR) -o /tmp/test_spans_header.o && echo "✓ Spans example C header is valid" || echo "✗ Spans example C header has issues" + @rm -f /tmp/test_logs_header.o /tmp/test_spans_header.o + +# Clean example build artifacts +clean: + @rm -f $(LOGS_BINARY) $(SPANS_BINARY) + +help: + @echo "Targets:" + @echo " all - Build Rust lib + otlp_builder + both example binaries (default)" + @echo " run-logs - Build and run the logs example (requires env vars)" + @echo " run-spans - Build and run the spans example (requires env vars)" + @echo " logs-example - Same as 'run-logs'" + @echo " spans-example - Same as 'run-spans'" + @echo " build-rust - Build the Rust FFI library in release mode" + @echo " build-otlp - Build the otlp_builder cdylib/rlib (release)" + @echo " verify-header - Compile-check the headers with both examples" + @echo " clean - Remove both example binaries" diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/logs_example.c b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/logs_example.c new file mode 100644 index 000000000..b0b54dff1 --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/logs_example.c @@ -0,0 +1,164 @@ +/* + * Geneva FFI C Example (synchronous only) + * + * This example demonstrates: + * - Reading configuration from environment + * - Creating a Geneva client via geneva_client_new (out-param) + * - Encoding/compressing ResourceLogs + * - Uploading batches synchronously with geneva_upload_batch_sync + * + * Note: The non-blocking callback-based mechanism has been removed. + */ + +#include +#include +#include +#include +#include +#include "../include/geneva_ffi.h" + +/* Prototypes from the example-only builder dylib (otlp_builder) */ +extern int geneva_build_otlp_logs_minimal(const char* body_utf8, + const char* resource_key, + const char* resource_value, + uint8_t** out_ptr, + size_t* out_len); +extern void geneva_free_buffer(uint8_t* ptr, size_t len); + +/* Helper to read env or default */ +static const char* get_env_or_default(const char* name, const char* defval) { + const char* v = getenv(name); + return v ? v : defval; +} + + +int main(void) { + printf("Geneva FFI Example (synchronous API)\n"); + printf("====================================\n\n"); + + /* Required env */ + const char* endpoint = getenv("GENEVA_ENDPOINT"); + const char* environment = getenv("GENEVA_ENVIRONMENT"); + const char* account = getenv("GENEVA_ACCOUNT"); + const char* namespaceName = getenv("GENEVA_NAMESPACE"); + const char* region = getenv("GENEVA_REGION"); + const char* cfg_ver_str = getenv("GENEVA_CONFIG_MAJOR_VERSION"); + + if (!endpoint || !environment || !account || !namespaceName || !region || !cfg_ver_str) { + printf("Missing required environment variables!\n"); + printf(" GENEVA_ENDPOINT\n"); + printf(" GENEVA_ENVIRONMENT\n"); + printf(" GENEVA_ACCOUNT\n"); + printf(" GENEVA_NAMESPACE\n"); + printf(" GENEVA_REGION\n"); + printf(" GENEVA_CONFIG_MAJOR_VERSION\n"); + return 1; + } + + int cfg_ver = atoi(cfg_ver_str); + if (cfg_ver <= 0) { + printf("Invalid GENEVA_CONFIG_MAJOR_VERSION: %s\n", cfg_ver_str); + return 1; + } + + /* Optional env with defaults */ + const char* tenant = get_env_or_default("GENEVA_TENANT", "default-tenant"); + const char* role_name = get_env_or_default("GENEVA_ROLE_NAME", "default-role"); + const char* role_instance= get_env_or_default("GENEVA_ROLE_INSTANCE", "default-instance"); + + /* Certificate auth if both provided; otherwise system managed identity */ + const char* cert_path = getenv("GENEVA_CERT_PATH"); + const char* cert_password = getenv("GENEVA_CERT_PASSWORD"); + uint32_t auth_method = (cert_path && cert_password) ? GENEVA_AUTH_CERTIFICATE : GENEVA_AUTH_SYSTEM_MANAGED_IDENTITY; + + printf("Configuration:\n"); + printf(" Endpoint: %s\n", endpoint); + printf(" Environment: %s\n", environment); + printf(" Account: %s\n", account); + printf(" Namespace: %s\n", namespaceName); + printf(" Region: %s\n", region); + printf(" Config Major Version: %d\n", cfg_ver); + printf(" Tenant: %s\n", tenant); + printf(" Role Name: %s\n", role_name); + printf(" Role Instance: %s\n", role_instance); + printf(" Auth Method: %s\n", auth_method == GENEVA_AUTH_CERTIFICATE ? "Certificate" : "System Managed Identity"); + if (auth_method == GENEVA_AUTH_CERTIFICATE) { + printf(" Cert Path: %s\n", cert_path); + } + printf("\n"); + + /* Build config */ + GenevaConfig cfg = { + .endpoint = endpoint, + .environment = environment, + .account = account, + .namespace_name = namespaceName, + .region = region, + .config_major_version = (uint32_t)cfg_ver, + .auth_method = auth_method, + .tenant = tenant, + .role_name = role_name, + .role_instance = role_instance, + .msi_resource = NULL, /* Optional MSI resource - can be set via environment if needed */ + }; + if (auth_method == GENEVA_AUTH_CERTIFICATE) { + cfg.auth.cert.cert_path = cert_path; + cfg.auth.cert.cert_password = cert_password; + } + + /* Create client */ + GenevaClientHandle* client = NULL; + char err_buf[512]; + GenevaError rc = geneva_client_new(&cfg, &client, err_buf, sizeof(err_buf)); + if (rc != GENEVA_SUCCESS || client == NULL) { + printf("Failed to create Geneva client (code=%d): %s\n", rc, err_buf); + return 1; + } + printf("Geneva client created.\n"); + + /* Create ExportLogsServiceRequest bytes via FFI builder */ + size_t data_len = 0; + uint8_t* data = NULL; + GenevaError brc = geneva_build_otlp_logs_minimal("hello from c ffi", "service.name", "c-ffi-example", &data, &data_len); + if (brc != GENEVA_SUCCESS || data == NULL || data_len == 0) { + printf("Failed to build OTLP payload (code=%d)\n", brc); + geneva_client_free(client); + return 1; + } + + /* Encode and compress to batches */ + EncodedBatchesHandle* batches = NULL; + GenevaError enc_rc = geneva_encode_and_compress_logs(client, data, data_len, &batches, err_buf, sizeof(err_buf)); + if (enc_rc != GENEVA_SUCCESS || batches == NULL) { + printf("Encode/compress failed (code=%d): %s\n", enc_rc, err_buf); + geneva_free_buffer(data, data_len); + geneva_client_free(client); + return 1; + } + + size_t n = geneva_batches_len(batches); + printf("Encoded %zu batch(es)\n", n); + + /* Upload synchronously, batch by batch */ + GenevaError first_err = GENEVA_SUCCESS; + for (size_t i = 0; i < n; i++) { + GenevaError r = geneva_upload_batch_sync(client, batches, i, err_buf, sizeof(err_buf)); + if (r != GENEVA_SUCCESS) { + first_err = r; + printf("Batch %zu upload failed with error %d: %s\n", i, r, err_buf); + break; + } + } + + /* Cleanup */ + geneva_batches_free(batches); + geneva_free_buffer(data, data_len); + geneva_client_free(client); + + if (first_err == GENEVA_SUCCESS) { + printf("All batches uploaded successfully.\n"); + return 0; + } + printf("Upload finished with error code: %d\n", first_err); + return 1; +} diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/Cargo.toml b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/Cargo.toml new file mode 100644 index 000000000..1600cdbe0 --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "otlp_builder" +description = "OTLP builder utility for Geneva uploader FFI examples" +version = "0.1.0" +edition = "2021" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder" +rust-version = "1.85.0" +keywords = ["opentelemetry", "geneva", "otlp", "builder"] +license = "Apache-2.0" + +[lib] +crate-type = ["rlib", "cdylib"] + +[lints] +workspace = true + +[dependencies] +# Use the repo's opentelemetry-proto crate directly +opentelemetry-proto = { version = "0.31", default-features = false, features = ["logs", "trace", "gen-tonic-messages"] } +prost = "0.14" diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/src/builder.rs b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/src/builder.rs new file mode 100644 index 000000000..1f7e3f929 --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/src/builder.rs @@ -0,0 +1,70 @@ +use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest; +use opentelemetry_proto::tonic::common::v1::any_value::Value as AnyValueValue; +use opentelemetry_proto::tonic::common::v1::{AnyValue, KeyValue}; +use opentelemetry_proto::tonic::logs::v1::{LogRecord, ResourceLogs, ScopeLogs}; +use opentelemetry_proto::tonic::resource::v1::Resource; +use prost::Message; +use std::time::{SystemTime, UNIX_EPOCH}; + +/// Pure Rust helper to build a minimal OTLP ExportLogsServiceRequest as bytes. +/// This is shared by the C example dylib and test-only usage via include! from lib.rs tests. +/// +/// **Note**: This function is only intended for examples and unit tests, not for external use. +pub fn build_otlp_logs_minimal( + event_name: &str, + body: &str, + resource_kv: Option<(&str, &str)>, +) -> Vec { + let mut resource_attrs: Vec = Vec::new(); + if let Some((k, v)) = resource_kv { + resource_attrs.push(KeyValue { + key: k.to_string(), + value: Some(AnyValue { + value: Some(AnyValueValue::StringValue(v.to_string())), + }), + }); + } + + let now_nanos: u64 = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_nanos() as u64; + + let log_record = LogRecord { + time_unix_nano: now_nanos, + observed_time_unix_nano: 0, + severity_number: 0, + severity_text: String::new(), + event_name: event_name.to_string(), + body: Some(AnyValue { + value: Some(AnyValueValue::StringValue(body.to_string())), + }), + attributes: Vec::new(), + dropped_attributes_count: 0, + flags: 0, + trace_id: Vec::new(), + span_id: Vec::new(), + }; + + let scope_logs = ScopeLogs { + scope: None, + log_records: vec![log_record], + schema_url: String::new(), + }; + + let resource_logs = ResourceLogs { + resource: Some(Resource { + attributes: resource_attrs, + dropped_attributes_count: 0, + entity_refs: Vec::new(), + }), + scope_logs: vec![scope_logs], + schema_url: String::new(), + }; + + let req = ExportLogsServiceRequest { + resource_logs: vec![resource_logs], + }; + + req.encode_to_vec() +} diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/src/lib.rs b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/src/lib.rs new file mode 100644 index 000000000..0ba64e6aa --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/otlp_builder/src/lib.rs @@ -0,0 +1,230 @@ +#![allow(unsafe_op_in_unsafe_fn)] +#![allow(unknown_lints)] +#![allow(unsafe_attr_outside_unsafe)] + +use std::ffi::CStr; +use std::os::raw::c_char; + +use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest; +use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; +use opentelemetry_proto::tonic::common::v1::any_value::Value as AnyValueValue; +use opentelemetry_proto::tonic::common::v1::{AnyValue, KeyValue}; +use opentelemetry_proto::tonic::logs::v1::{LogRecord, ResourceLogs, ScopeLogs}; +use opentelemetry_proto::tonic::resource::v1::Resource; +use opentelemetry_proto::tonic::trace::v1::{ResourceSpans, ScopeSpans, Span, Status}; +use prost::Message; +use std::time::{SystemTime, UNIX_EPOCH}; + +pub mod builder; + +#[no_mangle] +unsafe extern "C" fn geneva_build_otlp_logs_minimal( + body_utf8: *const c_char, + resource_key: *const c_char, + resource_value: *const c_char, + out_ptr: *mut *mut u8, + out_len: *mut usize, +) -> i32 { + // Return codes aligned with GenevaError: + // 0 = GENEVA_SUCCESS + // 4 = GENEVA_INVALID_DATA + // 100 = GENEVA_ERR_NULL_POINTER + + if out_ptr.is_null() || out_len.is_null() { + return 100; + } + *out_ptr = std::ptr::null_mut(); + *out_len = 0; + + if body_utf8.is_null() { + return 100; + } + + let body = match CStr::from_ptr(body_utf8).to_str() { + Ok(s) => s.to_string(), + Err(_) => return 4, + }; + + let mut resource_attrs: Vec = Vec::new(); + if !resource_key.is_null() && !resource_value.is_null() { + let key = match CStr::from_ptr(resource_key).to_str() { + Ok(s) => s.to_string(), + Err(_) => return 4, + }; + let val = match CStr::from_ptr(resource_value).to_str() { + Ok(s) => s.to_string(), + Err(_) => return 4, + }; + resource_attrs.push(KeyValue { + key, + value: Some(AnyValue { + value: Some(AnyValueValue::StringValue(val)), + }), + }); + } + + let now_nanos: u64 = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_nanos() as u64; + + let log_record = LogRecord { + time_unix_nano: now_nanos, + observed_time_unix_nano: 0, + severity_number: 0, + severity_text: String::new(), + event_name: "Log".to_string(), + body: Some(AnyValue { + value: Some(AnyValueValue::StringValue(body)), + }), + attributes: Vec::new(), + dropped_attributes_count: 0, + flags: 0, + trace_id: Vec::new(), + span_id: Vec::new(), + }; + + let scope_logs = ScopeLogs { + scope: None, + log_records: vec![log_record], + schema_url: String::new(), + }; + + let resource_logs = ResourceLogs { + resource: Some(Resource { + attributes: resource_attrs, + dropped_attributes_count: 0, + entity_refs: Vec::new(), + }), + scope_logs: vec![scope_logs], + schema_url: String::new(), + }; + + let req = ExportLogsServiceRequest { + resource_logs: vec![resource_logs], + }; + + let mut bytes = req.encode_to_vec(); + let len = bytes.len(); + let ptr = bytes.as_mut_ptr(); + std::mem::forget(bytes); + + *out_ptr = ptr; + *out_len = len; + 0 +} + +#[no_mangle] +unsafe extern "C" fn geneva_build_otlp_spans_minimal( + span_name: *const c_char, + resource_key: *const c_char, + resource_value: *const c_char, + out_ptr: *mut *mut u8, + out_len: *mut usize, +) -> i32 { + // Return codes aligned with GenevaError: + // 0 = GENEVA_SUCCESS + // 4 = GENEVA_INVALID_DATA + // 100 = GENEVA_ERR_NULL_POINTER + + if out_ptr.is_null() || out_len.is_null() { + return 100; + } + *out_ptr = std::ptr::null_mut(); + *out_len = 0; + + if span_name.is_null() { + return 100; + } + + let name = match CStr::from_ptr(span_name).to_str() { + Ok(s) => s.to_string(), + Err(_) => return 4, + }; + + let mut resource_attrs: Vec = Vec::new(); + if !resource_key.is_null() && !resource_value.is_null() { + let key = match CStr::from_ptr(resource_key).to_str() { + Ok(s) => s.to_string(), + Err(_) => return 4, + }; + let val = match CStr::from_ptr(resource_value).to_str() { + Ok(s) => s.to_string(), + Err(_) => return 4, + }; + resource_attrs.push(KeyValue { + key, + value: Some(AnyValue { + value: Some(AnyValueValue::StringValue(val)), + }), + }); + } + + let now_nanos: u64 = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_nanos() as u64; + + // Generate a simple trace ID (16 bytes) + let trace_id: Vec = (0..16).map(|i| (now_nanos >> (i * 4)) as u8).collect(); + // Generate a simple span ID (8 bytes) + let span_id: Vec = (0..8).map(|i| (now_nanos >> (i * 8)) as u8).collect(); + + let span = Span { + trace_id, + span_id, + trace_state: String::new(), + parent_span_id: Vec::new(), + flags: 0, + name, + kind: 0, // SPAN_KIND_UNSPECIFIED + start_time_unix_nano: now_nanos, + end_time_unix_nano: now_nanos + 1000000, // 1ms duration + attributes: Vec::new(), + dropped_attributes_count: 0, + events: Vec::new(), + dropped_events_count: 0, + links: Vec::new(), + dropped_links_count: 0, + status: Some(Status { + code: 1, // STATUS_CODE_OK + message: String::new(), + }), + }; + + let scope_spans = ScopeSpans { + scope: None, + spans: vec![span], + schema_url: String::new(), + }; + + let resource_spans = ResourceSpans { + resource: Some(Resource { + attributes: resource_attrs, + dropped_attributes_count: 0, + entity_refs: Vec::new(), + }), + scope_spans: vec![scope_spans], + schema_url: String::new(), + }; + + let req = ExportTraceServiceRequest { + resource_spans: vec![resource_spans], + }; + + let mut bytes = req.encode_to_vec(); + let len = bytes.len(); + let ptr = bytes.as_mut_ptr(); + std::mem::forget(bytes); + + *out_ptr = ptr; + *out_len = len; + 0 +} + +#[no_mangle] +unsafe extern "C" fn geneva_free_buffer(ptr: *mut u8, len: usize) { + if !ptr.is_null() && len > 0 { + let _ = Vec::from_raw_parts(ptr, len, len); + } +} diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/spans_example.c b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/spans_example.c new file mode 100644 index 000000000..6e29a5d4f --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/examples/spans_example.c @@ -0,0 +1,164 @@ +/* + * Geneva FFI C Spans Example (synchronous only) + * + * This example demonstrates: + * - Reading configuration from environment + * - Creating a Geneva client via geneva_client_new (out-param) + * - Encoding/compressing ResourceSpans + * - Uploading batches synchronously with geneva_upload_batch_sync + * + * Note: The non-blocking callback-based mechanism has been removed. + */ + +#include +#include +#include +#include +#include +#include "../include/geneva_ffi.h" + +/* Prototypes from the example-only builder dylib (otlp_builder) */ +extern int geneva_build_otlp_spans_minimal(const char* span_name, + const char* resource_key, + const char* resource_value, + uint8_t** out_ptr, + size_t* out_len); +extern void geneva_free_buffer(uint8_t* ptr, size_t len); + +/* Helper to read env or default */ +static const char* get_env_or_default(const char* name, const char* defval) { + const char* v = getenv(name); + return v ? v : defval; +} + + +int main(void) { + printf("Geneva FFI Spans Example (synchronous API)\n"); + printf("==========================================\n\n"); + + /* Required env */ + const char* endpoint = getenv("GENEVA_ENDPOINT"); + const char* environment = getenv("GENEVA_ENVIRONMENT"); + const char* account = getenv("GENEVA_ACCOUNT"); + const char* namespaceName = getenv("GENEVA_NAMESPACE"); + const char* region = getenv("GENEVA_REGION"); + const char* cfg_ver_str = getenv("GENEVA_CONFIG_MAJOR_VERSION"); + + if (!endpoint || !environment || !account || !namespaceName || !region || !cfg_ver_str) { + printf("Missing required environment variables!\n"); + printf(" GENEVA_ENDPOINT\n"); + printf(" GENEVA_ENVIRONMENT\n"); + printf(" GENEVA_ACCOUNT\n"); + printf(" GENEVA_NAMESPACE\n"); + printf(" GENEVA_REGION\n"); + printf(" GENEVA_CONFIG_MAJOR_VERSION\n"); + return 1; + } + + int cfg_ver = atoi(cfg_ver_str); + if (cfg_ver <= 0) { + printf("Invalid GENEVA_CONFIG_MAJOR_VERSION: %s\n", cfg_ver_str); + return 1; + } + + /* Optional env with defaults */ + const char* tenant = get_env_or_default("GENEVA_TENANT", "default-tenant"); + const char* role_name = get_env_or_default("GENEVA_ROLE_NAME", "default-role"); + const char* role_instance= get_env_or_default("GENEVA_ROLE_INSTANCE", "default-instance"); + + /* Certificate auth if both provided; otherwise system managed identity */ + const char* cert_path = getenv("GENEVA_CERT_PATH"); + const char* cert_password = getenv("GENEVA_CERT_PASSWORD"); + uint32_t auth_method = (cert_path && cert_password) ? GENEVA_AUTH_CERTIFICATE : GENEVA_AUTH_SYSTEM_MANAGED_IDENTITY; + + printf("Configuration:\n"); + printf(" Endpoint: %s\n", endpoint); + printf(" Environment: %s\n", environment); + printf(" Account: %s\n", account); + printf(" Namespace: %s\n", namespaceName); + printf(" Region: %s\n", region); + printf(" Config Major Version: %d\n", cfg_ver); + printf(" Tenant: %s\n", tenant); + printf(" Role Name: %s\n", role_name); + printf(" Role Instance: %s\n", role_instance); + printf(" Auth Method: %s\n", auth_method == GENEVA_AUTH_CERTIFICATE ? "Certificate" : "System Managed Identity"); + if (auth_method == GENEVA_AUTH_CERTIFICATE) { + printf(" Cert Path: %s\n", cert_path); + } + printf("\n"); + + /* Build config */ + GenevaConfig cfg = { + .endpoint = endpoint, + .environment = environment, + .account = account, + .namespace_name = namespaceName, + .region = region, + .config_major_version = (uint32_t)cfg_ver, + .auth_method = auth_method, + .tenant = tenant, + .role_name = role_name, + .role_instance = role_instance, + .msi_resource = NULL, /* Optional MSI resource - can be set via environment if needed */ + }; + if (auth_method == GENEVA_AUTH_CERTIFICATE) { + cfg.auth.cert.cert_path = cert_path; + cfg.auth.cert.cert_password = cert_password; + } + + /* Create client */ + GenevaClientHandle* client = NULL; + char err_buf[512]; + GenevaError rc = geneva_client_new(&cfg, &client, err_buf, sizeof(err_buf)); + if (rc != GENEVA_SUCCESS || client == NULL) { + printf("Failed to create Geneva client (code=%d): %s\n", rc, err_buf); + return 1; + } + printf("Geneva client created.\n"); + + /* Create ExportSpansServiceRequest bytes via FFI builder */ + size_t data_len = 0; + uint8_t* data = NULL; + GenevaError brc = geneva_build_otlp_spans_minimal("test-span", "service.name", "c-ffi-spans-example", &data, &data_len); + if (brc != GENEVA_SUCCESS || data == NULL || data_len == 0) { + printf("Failed to build OTLP spans payload (code=%d)\n", brc); + geneva_client_free(client); + return 1; + } + + /* Encode and compress spans to batches */ + EncodedBatchesHandle* batches = NULL; + GenevaError enc_rc = geneva_encode_and_compress_spans(client, data, data_len, &batches, err_buf, sizeof(err_buf)); + if (enc_rc != GENEVA_SUCCESS || batches == NULL) { + printf("Spans encode/compress failed (code=%d): %s\n", enc_rc, err_buf); + geneva_free_buffer(data, data_len); + geneva_client_free(client); + return 1; + } + + size_t n = geneva_batches_len(batches); + printf("Encoded %zu span batch(es)\n", n); + + /* Upload spans synchronously, batch by batch */ + GenevaError first_err = GENEVA_SUCCESS; + for (size_t i = 0; i < n; i++) { + GenevaError r = geneva_upload_batch_sync(client, batches, i, err_buf, sizeof(err_buf)); + if (r != GENEVA_SUCCESS) { + first_err = r; + printf("Span batch %zu upload failed with error %d: %s\n", i, r, err_buf); + break; + } + } + + /* Cleanup */ + geneva_batches_free(batches); + geneva_free_buffer(data, data_len); + geneva_client_free(client); + + if (first_err == GENEVA_SUCCESS) { + printf("All span batches uploaded successfully.\n"); + return 0; + } + printf("Span upload finished with error code: %d\n", first_err); + return 1; +} \ No newline at end of file diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/include/geneva_errors.h b/opentelemetry-exporter-geneva/geneva-uploader-ffi/include/geneva_errors.h new file mode 100644 index 000000000..f574d706b --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/include/geneva_errors.h @@ -0,0 +1,49 @@ +#ifndef GENEVA_ERRORS_H +#define GENEVA_ERRORS_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Error codes returned by FFI functions (blocking API only). + NOTE: Values must remain stable for ABI compatibility. */ +typedef enum { + /* Base codes (stable) */ + GENEVA_SUCCESS = 0, + GENEVA_INVALID_CONFIG = 1, + GENEVA_INITIALIZATION_FAILED = 2, + GENEVA_UPLOAD_FAILED = 3, + GENEVA_INVALID_DATA = 4, + GENEVA_INTERNAL_ERROR = 5, + + /* Granular argument/data errors (only those currently used) */ + GENEVA_ERR_NULL_POINTER = 100, + GENEVA_ERR_EMPTY_INPUT = 101, + GENEVA_ERR_DECODE_FAILED = 102, + GENEVA_ERR_INDEX_OUT_OF_RANGE = 103, + GENEVA_ERR_INVALID_HANDLE = 104, + + /* Granular config/auth errors (only those currently used) */ + GENEVA_ERR_INVALID_AUTH_METHOD = 110, + GENEVA_ERR_INVALID_CERT_CONFIG = 111, + GENEVA_ERR_INVALID_WORKLOAD_IDENTITY_CONFIG = 112, + GENEVA_ERR_INVALID_USER_MSI_CONFIG = 113, + GENEVA_ERR_INVALID_USER_MSI_BY_OBJECT_ID_CONFIG = 114, + GENEVA_ERR_INVALID_USER_MSI_BY_RESOURCE_ID_CONFIG = 115, + + /* Missing required config fields (granular INVALID_CONFIG) */ + GENEVA_ERR_MISSING_ENDPOINT = 130, + GENEVA_ERR_MISSING_ENVIRONMENT = 131, + GENEVA_ERR_MISSING_ACCOUNT = 132, + GENEVA_ERR_MISSING_NAMESPACE = 133, + GENEVA_ERR_MISSING_REGION = 134, + GENEVA_ERR_MISSING_TENANT = 135, + GENEVA_ERR_MISSING_ROLE_NAME = 136, + GENEVA_ERR_MISSING_ROLE_INSTANCE = 137 +} GenevaError; + +#ifdef __cplusplus +} +#endif + +#endif /* GENEVA_ERRORS_H */ diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/include/geneva_ffi.h b/opentelemetry-exporter-geneva/geneva-uploader-ffi/include/geneva_ffi.h new file mode 100644 index 000000000..2a68c4126 --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/include/geneva_ffi.h @@ -0,0 +1,204 @@ +#ifndef GENEVA_FFI_H +#define GENEVA_FFI_H + +#include +#include +#include "geneva_errors.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Opaque handles +typedef struct GenevaClientHandle GenevaClientHandle; +typedef struct EncodedBatchesHandle EncodedBatchesHandle; + +// Authentication method constants +#define GENEVA_AUTH_SYSTEM_MANAGED_IDENTITY 0 +#define GENEVA_AUTH_CERTIFICATE 1 +#define GENEVA_AUTH_WORKLOAD_IDENTITY 2 +#define GENEVA_AUTH_USER_MANAGED_IDENTITY 3 +#define GENEVA_AUTH_USER_MANAGED_IDENTITY_BY_OBJECT_ID 4 +#define GENEVA_AUTH_USER_MANAGED_IDENTITY_BY_RESOURCE_ID 5 + +/* Configuration for certificate auth (valid only when auth_method == GENEVA_AUTH_CERTIFICATE) */ +typedef struct { + const char* cert_path; /* Path to certificate file */ + const char* cert_password; /* Certificate password */ +} GenevaCertAuthConfig; + +/* Configuration for Workload Identity auth (valid only when auth_method == GENEVA_AUTH_WORKLOAD_IDENTITY) */ +typedef struct { + const char* resource; /* Azure AD resource URI (e.g., "https://monitor.azure.com") */ +} GenevaWorkloadIdentityAuthConfig; + +/* Configuration for User-assigned Managed Identity by client ID (valid only when auth_method == GENEVA_AUTH_USER_MANAGED_IDENTITY) */ +typedef struct { + const char* client_id; /* Azure AD client ID */ +} GenevaUserManagedIdentityAuthConfig; + +/* Configuration for User-assigned Managed Identity by object ID (valid only when auth_method == GENEVA_AUTH_USER_MANAGED_IDENTITY_BY_OBJECT_ID) */ +typedef struct { + const char* object_id; /* Azure AD object ID */ +} GenevaUserManagedIdentityByObjectIdAuthConfig; + +/* Configuration for User-assigned Managed Identity by resource ID (valid only when auth_method == GENEVA_AUTH_USER_MANAGED_IDENTITY_BY_RESOURCE_ID) */ +typedef struct { + const char* resource_id; /* Azure resource ID */ +} GenevaUserManagedIdentityByResourceIdAuthConfig; + +/* Tagged union for auth-specific configuration. + The active member is determined by 'auth_method' in GenevaConfig. + + NOTE: When auth_method is GENEVA_AUTH_SYSTEM_MANAGED_IDENTITY (0), + the union is not accessed and can be zero-initialized. */ +typedef union { + GenevaCertAuthConfig cert; /* Valid when auth_method == GENEVA_AUTH_CERTIFICATE */ + GenevaWorkloadIdentityAuthConfig workload_identity; /* Valid when auth_method == GENEVA_AUTH_WORKLOAD_IDENTITY */ + GenevaUserManagedIdentityAuthConfig user_msi; /* Valid when auth_method == GENEVA_AUTH_USER_MANAGED_IDENTITY */ + GenevaUserManagedIdentityByObjectIdAuthConfig user_msi_objid; /* Valid when auth_method == GENEVA_AUTH_USER_MANAGED_IDENTITY_BY_OBJECT_ID */ + GenevaUserManagedIdentityByResourceIdAuthConfig user_msi_resid; /* Valid when auth_method == GENEVA_AUTH_USER_MANAGED_IDENTITY_BY_RESOURCE_ID */ +} GenevaAuthConfig; + +/* Configuration structure for Geneva client (C-compatible, tagged union) + * + * IMPORTANT - Resource/Scope Configuration: + * Different auth methods require different resource configuration: + * + * - SystemManagedIdentity (0): Requires msi_resource field + * - Certificate (1): No resource needed (uses mTLS) + * - WorkloadIdentity (2): Requires auth.workload_identity.resource field + * - UserManagedIdentity by client ID (3): Requires msi_resource field + * - UserManagedIdentity by object ID (4): Requires msi_resource field + * - UserManagedIdentity by resource ID (5): Requires msi_resource field + * + * The msi_resource field specifies the Azure AD resource URI for token acquisition + * (e.g., "https://monitor.azure.com" for Azure Monitor in Public Cloud). + * + * Note: For user-assigned identities (3, 4, 5), the auth struct specifies WHICH + * identity to use (client_id/object_id/resource_id), while msi_resource specifies + * WHAT Azure resource to request tokens FOR. These are separate concerns. + */ +typedef struct { + const char* endpoint; + const char* environment; + const char* account; + const char* namespace_name; + const char* region; + uint32_t config_major_version; + uint32_t auth_method; /* 0 = System MSI, 1 = Certificate, 2 = Workload Identity, 3 = User MSI by client ID, 4 = User MSI by object ID, 5 = User MSI by resource ID */ + const char* tenant; + const char* role_name; + const char* role_instance; + GenevaAuthConfig auth; /* Active member selected by auth_method */ + const char* msi_resource; /* Azure AD resource URI for MSI auth (auth methods 0, 3, 4, 5). Not used for auth methods 1, 2. Nullable. */ +} GenevaConfig; + +/* Create a new Geneva client. + - On success returns GENEVA_SUCCESS and writes *out_handle. + - On failure returns an error code and optionally writes diagnostic message to err_msg_out. + + Parameters: + - config: Configuration structure (required) + - out_handle: Receives the client handle on success (required) + - err_msg_out: Optional buffer to receive error message (can be NULL). + Message will be NUL-terminated and truncated if buffer too small. + Recommended size: >= 256 bytes for full diagnostics. + - err_msg_len: Size of err_msg_out buffer in bytes (ignored if err_msg_out is NULL) + + IMPORTANT: Caller must call geneva_client_free() on the returned handle + to avoid memory leaks. All strings in config are copied; caller retains + ownership of config strings and may free them after this call returns. */ +GenevaError geneva_client_new(const GenevaConfig* config, + GenevaClientHandle** out_handle, + char* err_msg_out, + size_t err_msg_len); + + +/* 1) Encode and compress logs into batches (synchronous). + `data` is a protobuf-encoded ExportLogsServiceRequest. + - On success returns GENEVA_SUCCESS and writes *out_batches. + - On failure returns an error code and optionally writes diagnostic message to err_msg_out. + + Parameters: + - handle: Client handle from geneva_client_new (required) + - data: Protobuf-encoded ExportLogsServiceRequest (required) + - data_len: Length of data buffer (required) + - out_batches: Receives the batches handle on success (required) + - err_msg_out: Optional buffer to receive error message (can be NULL). + Message will be NUL-terminated and truncated if buffer too small. + Recommended size: >= 256 bytes. + - err_msg_len: Size of err_msg_out buffer in bytes (ignored if err_msg_out is NULL) + + Caller must free *out_batches with geneva_batches_free. */ +GenevaError geneva_encode_and_compress_logs(GenevaClientHandle* handle, + const uint8_t* data, + size_t data_len, + EncodedBatchesHandle** out_batches, + char* err_msg_out, + size_t err_msg_len); + +/* 1.1) Encode and compress spans into batches (synchronous). + `data` is a protobuf-encoded ExportTraceServiceRequest. + - On success returns GENEVA_SUCCESS and writes *out_batches. + - On failure returns an error code and optionally writes diagnostic message to err_msg_out. + + Parameters: + - handle: Client handle from geneva_client_new (required) + - data: Protobuf-encoded ExportTraceServiceRequest (required) + - data_len: Length of data buffer (required) + - out_batches: Receives the batches handle on success (required) + - err_msg_out: Optional buffer to receive error message (can be NULL). + Message will be NUL-terminated and truncated if buffer too small. + Recommended size: >= 256 bytes. + - err_msg_len: Size of err_msg_out buffer in bytes (ignored if err_msg_out is NULL) + + Caller must free *out_batches with geneva_batches_free. */ +GenevaError geneva_encode_and_compress_spans(GenevaClientHandle* handle, + const uint8_t* data, + size_t data_len, + EncodedBatchesHandle** out_batches, + char* err_msg_out, + size_t err_msg_len); + +// 2) Query number of batches. +size_t geneva_batches_len(const EncodedBatchesHandle* batches); + +/* 3) Upload a single batch by index (synchronous). + - On success returns GENEVA_SUCCESS. + - On failure returns an error code and optionally writes diagnostic message to err_msg_out. + + Parameters: + - handle: Client handle from geneva_client_new (required) + - batches: Batches handle from encode/compress function (required) + - index: Index of batch to upload (must be < geneva_batches_len(batches)) + - err_msg_out: Optional buffer to receive error message (can be NULL). + Message will be NUL-terminated and truncated if buffer too small. + Recommended size: >= 256 bytes. + - err_msg_len: Size of err_msg_out buffer in bytes (ignored if err_msg_out is NULL) */ +GenevaError geneva_upload_batch_sync(GenevaClientHandle* handle, + const EncodedBatchesHandle* batches, + size_t index, + char* err_msg_out, + size_t err_msg_len); + + +/* 5) Free the batches handle. */ +void geneva_batches_free(EncodedBatchesHandle* batches); + + + +/* Frees a Geneva client handle and all associated resources. + + IMPORTANT: This must be called for every handle returned by geneva_client_new() + to avoid memory leaks. After calling this function, the handle must not be used. + + Safe to call with NULL (no-op). */ +void geneva_client_free(GenevaClientHandle* handle); + + +#ifdef __cplusplus +} +#endif + +#endif // GENEVA_FFI_H diff --git a/opentelemetry-exporter-geneva/geneva-uploader-ffi/src/lib.rs b/opentelemetry-exporter-geneva/geneva-uploader-ffi/src/lib.rs index 8b1378917..58be44d32 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader-ffi/src/lib.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader-ffi/src/lib.rs @@ -1 +1,1431 @@ +//! C-compatible FFI bindings for geneva-uploader +// Allow #[repr(C)] and other FFI attributes without wrapping in unsafe blocks (standard FFI practice) +#![allow(unsafe_attr_outside_unsafe)] + +use std::ffi::CStr; +use std::os::raw::{c_char, c_uint}; +use std::ptr; +use std::sync::OnceLock; +use tokio::runtime::Runtime; + +use geneva_uploader::client::{EncodedBatch, GenevaClient, GenevaClientConfig}; +use geneva_uploader::AuthMethod; +use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest; +use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; +use prost::Message; +use std::path::PathBuf; + +/// Magic number for handle validation +const GENEVA_HANDLE_MAGIC: u64 = 0xFEED_BEEF; + +/// Shared Tokio runtime for async operations +/// TODO: Consider making runtime configurable via FFI in the future: +/// - Thread count configuration (currently uses available_parallelism()) +/// - Runtime type selection (multi_thread vs current_thread) +/// - Per-client runtimes vs shared global runtime +/// - External runtime integration (accept user-provided runtime handle) +/// - Runtime lifecycle management for FFI (shutdown, cleanup) +static RUNTIME: OnceLock = OnceLock::new(); // TODO - Consider using LazyLock once msrv is 1.80. + +fn runtime() -> &'static Runtime { + RUNTIME.get_or_init(|| { + tokio::runtime::Builder::new_multi_thread() + .worker_threads( + std::thread::available_parallelism() + .map(|n| n.get()) + .unwrap_or(4), + ) + .thread_name("geneva-ffi-worker") + .enable_time() + .enable_io() // Only enable time + I/O for Geneva's needs + .build() + .expect("Failed to create Tokio runtime for Geneva FFI") + }) +} + +/// Trait for handles that support validation +trait ValidatedHandle { + fn magic(&self) -> u64; + fn set_magic(&mut self, magic: u64); +} + +/// Generic validation function that works for any ValidatedHandle +unsafe fn validate_handle(handle: *const T) -> GenevaError { + if handle.is_null() { + return GenevaError::NullPointer; + } + + let handle_ref = unsafe { handle.as_ref().unwrap() }; + + if handle_ref.magic() != GENEVA_HANDLE_MAGIC { + return GenevaError::InvalidHandle; + } + + GenevaError::Success +} + +/// Generic function to clear magic number on free +unsafe fn clear_handle_magic(handle: *mut T) { + if !handle.is_null() { + if let Some(h) = unsafe { handle.as_mut() } { + h.set_magic(0); + } + } +} + +/// Opaque handle for GenevaClient +pub struct GenevaClientHandle { + magic: u64, // Magic number for handle validation + client: GenevaClient, +} + +impl ValidatedHandle for GenevaClientHandle { + fn magic(&self) -> u64 { + self.magic + } + + fn set_magic(&mut self, magic: u64) { + self.magic = magic; + } +} + +/// Opaque handle holding encoded batches +pub struct EncodedBatchesHandle { + magic: u64, + batches: Vec, +} + +impl ValidatedHandle for EncodedBatchesHandle { + fn magic(&self) -> u64 { + self.magic + } + + fn set_magic(&mut self, magic: u64) { + self.magic = magic; + } +} + +/// Configuration for certificate auth (valid only when auth_method == 1) +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GenevaCertAuthConfig { + pub cert_path: *const c_char, // Path to certificate file + pub cert_password: *const c_char, // Certificate password +} + +/// Configuration for Workload Identity auth (valid only when auth_method == 2) +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GenevaWorkloadIdentityAuthConfig { + pub resource: *const c_char, // Azure AD resource URI (e.g., "https://monitor.azure.com") +} + +/// Configuration for User-assigned Managed Identity by client ID (valid only when auth_method == 3) +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GenevaUserManagedIdentityAuthConfig { + pub client_id: *const c_char, // Azure AD client ID +} + +/// Configuration for User-assigned Managed Identity by object ID (valid only when auth_method == 4) +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GenevaUserManagedIdentityByObjectIdAuthConfig { + pub object_id: *const c_char, // Azure AD object ID +} + +/// Configuration for User-assigned Managed Identity by resource ID (valid only when auth_method == 5) +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GenevaUserManagedIdentityByResourceIdAuthConfig { + pub resource_id: *const c_char, // Azure resource ID +} + +#[repr(C)] +pub union GenevaAuthConfig { + pub cert: GenevaCertAuthConfig, // Valid when auth_method == 1 + pub workload_identity: GenevaWorkloadIdentityAuthConfig, // Valid when auth_method == 2 + pub user_msi: GenevaUserManagedIdentityAuthConfig, // Valid when auth_method == 3 + pub user_msi_objid: GenevaUserManagedIdentityByObjectIdAuthConfig, // Valid when auth_method == 4 + pub user_msi_resid: GenevaUserManagedIdentityByResourceIdAuthConfig, // Valid when auth_method == 5 +} + +/// Configuration structure for Geneva client (C-compatible, tagged union) +/// +/// # Auth Methods +/// - 0 = SystemManagedIdentity (auto-detected VM/AKS system-assigned identity) +/// - 1 = Certificate (mTLS with PKCS#12 certificate) +/// - 2 = WorkloadIdentity (explicit Azure Workload Identity for AKS) +/// - 3 = UserManagedIdentity (by client ID) +/// - 4 = UserManagedIdentityByObjectId (by object ID) +/// - 5 = UserManagedIdentityByResourceId (by resource ID) +/// +/// # Resource Configuration +/// Different auth methods require different resource configuration: +/// - **Auth methods 0, 3, 4, 5 (MSI variants)**: Use the `msi_resource` field to specify the Azure AD resource URI +/// - **Auth method 2 (WorkloadIdentity)**: Use `auth.workload_identity.resource` field +/// - **Auth method 1 (Certificate)**: No resource needed +/// +/// The `msi_resource` field specifies the Azure AD resource URI for token acquisition +/// (e.g., ). For user-assigned identities (3, 4, 5), the +/// auth union specifies WHICH identity to use, while `msi_resource` specifies WHAT +/// Azure resource to request tokens FOR. These are orthogonal concerns. +#[repr(C)] +pub struct GenevaConfig { + pub endpoint: *const c_char, + pub environment: *const c_char, + pub account: *const c_char, + pub namespace_name: *const c_char, + pub region: *const c_char, + pub config_major_version: c_uint, + pub auth_method: c_uint, + pub tenant: *const c_char, + pub role_name: *const c_char, + pub role_instance: *const c_char, + pub auth: GenevaAuthConfig, // Active member selected by auth_method + pub msi_resource: *const c_char, // Azure AD resource URI for MSI auth (auth methods 0, 3, 4, 5). Not used for auth methods 1, 2. Nullable. +} + +/// Error codes returned by FFI functions +/// TODO: Use cbindgen to auto-generate geneva_errors.h from this enum to eliminate duplication +#[repr(C)] +#[derive(PartialEq)] +pub enum GenevaError { + // Base codes (stable) + Success = 0, + InvalidConfig = 1, + InitializationFailed = 2, + UploadFailed = 3, + InvalidData = 4, + InternalError = 5, + + // Granular argument/data errors (used) + NullPointer = 100, + EmptyInput = 101, + DecodeFailed = 102, + IndexOutOfRange = 103, + InvalidHandle = 104, + + // Granular config/auth errors (used) + InvalidAuthMethod = 110, + InvalidCertConfig = 111, + InvalidWorkloadIdentityConfig = 112, + InvalidUserMsiConfig = 113, + InvalidUserMsiByObjectIdConfig = 114, + InvalidUserMsiByResourceIdConfig = 115, + + // Missing required config (granular INVALID_CONFIG) + MissingEndpoint = 130, + MissingEnvironment = 131, + MissingAccount = 132, + MissingNamespace = 133, + MissingRegion = 134, + MissingTenant = 135, + MissingRoleName = 136, + MissingRoleInstance = 137, +} + +/// Safely converts a C string to Rust String +unsafe fn c_str_to_string(ptr: *const c_char, field_name: &str) -> Result { + if ptr.is_null() { + return Err(format!("Field '{field_name}' is null")); + } + + match unsafe { CStr::from_ptr(ptr) }.to_str() { + Ok(s) => Ok(s.to_string()), + Err(_) => Err(format!("Invalid UTF-8 in field '{field_name}'")), + } +} + +/// Writes error message to caller-provided buffer if available +/// +/// This function has zero allocation cost when err_msg_out is NULL or err_msg_len is 0. +/// Only allocates (via Display::to_string) when caller requests error details. +unsafe fn write_error_if_provided( + err_msg_out: *mut c_char, + err_msg_len: usize, + error: &impl std::fmt::Display, +) { + if !err_msg_out.is_null() && err_msg_len > 0 { + let error_string = error.to_string(); + let bytes_to_copy = error_string.len().min(err_msg_len - 1); + if bytes_to_copy > 0 { + unsafe { + std::ptr::copy_nonoverlapping( + error_string.as_ptr() as *const c_char, + err_msg_out, + bytes_to_copy, + ); + } + } + // Always null-terminate if we have space + unsafe { + *err_msg_out.add(bytes_to_copy) = 0; + } + } +} + +/// Creates a new Geneva client with explicit result semantics (no TLS needed). +/// +/// On success: returns GenevaError::Success and writes a non-null handle into *out_handle. +/// On failure: returns an error code and writes a diagnostic message into err_msg_out if provided. +/// +/// # Safety +/// - config must be a valid pointer to a GenevaConfig struct +/// - out_handle must be a valid pointer to receive the client handle +/// - err_msg_out: optional buffer to receive error message (can be NULL) +/// - err_msg_len: size of err_msg_out buffer +/// - caller must eventually call geneva_client_free on the returned handle +#[no_mangle] +pub unsafe extern "C" fn geneva_client_new( + config: *const GenevaConfig, + out_handle: *mut *mut GenevaClientHandle, + err_msg_out: *mut c_char, + err_msg_len: usize, +) -> GenevaError { + // Validate pointers + if config.is_null() || out_handle.is_null() { + return GenevaError::NullPointer; + } + unsafe { *out_handle = ptr::null_mut() }; + + let config = unsafe { config.as_ref().unwrap() }; + + // Validate required fields with granular error codes + if config.endpoint.is_null() { + return GenevaError::MissingEndpoint; + } + if config.environment.is_null() { + return GenevaError::MissingEnvironment; + } + if config.account.is_null() { + return GenevaError::MissingAccount; + } + if config.namespace_name.is_null() { + return GenevaError::MissingNamespace; + } + if config.region.is_null() { + return GenevaError::MissingRegion; + } + if config.tenant.is_null() { + return GenevaError::MissingTenant; + } + if config.role_name.is_null() { + return GenevaError::MissingRoleName; + } + if config.role_instance.is_null() { + return GenevaError::MissingRoleInstance; + } + + // Convert C strings to Rust strings + let endpoint = match unsafe { c_str_to_string(config.endpoint, "endpoint") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + let environment = match unsafe { c_str_to_string(config.environment, "environment") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + let account = match unsafe { c_str_to_string(config.account, "account") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + let namespace = match unsafe { c_str_to_string(config.namespace_name, "namespace_name") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + let region = match unsafe { c_str_to_string(config.region, "region") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + let tenant = match unsafe { c_str_to_string(config.tenant, "tenant") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + let role_name = match unsafe { c_str_to_string(config.role_name, "role_name") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + let role_instance = match unsafe { c_str_to_string(config.role_instance, "role_instance") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + + // Auth method conversion + let auth_method = match config.auth_method { + 0 => { + // System-assigned Managed Identity + AuthMethod::SystemManagedIdentity + } + + 1 => { + // Certificate authentication: read fields from tagged union + let cert = unsafe { config.auth.cert }; + if cert.cert_path.is_null() { + return GenevaError::InvalidCertConfig; + } + if cert.cert_password.is_null() { + return GenevaError::InvalidCertConfig; + } + let cert_path = match unsafe { c_str_to_string(cert.cert_path, "cert_path") } { + Ok(s) => PathBuf::from(s), + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + let cert_password = + match unsafe { c_str_to_string(cert.cert_password, "cert_password") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + AuthMethod::Certificate { + path: cert_path, + password: cert_password, + } + } + + 2 => { + // Workload Identity authentication + let workload_identity = unsafe { config.auth.workload_identity }; + if workload_identity.resource.is_null() { + return GenevaError::InvalidWorkloadIdentityConfig; + } + let resource = match unsafe { c_str_to_string(workload_identity.resource, "resource") } + { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + AuthMethod::WorkloadIdentity { resource } + } + + 3 => { + // User-assigned Managed Identity by client ID + let user_msi = unsafe { config.auth.user_msi }; + if user_msi.client_id.is_null() { + return GenevaError::InvalidUserMsiConfig; + } + let client_id = match unsafe { c_str_to_string(user_msi.client_id, "client_id") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + AuthMethod::UserManagedIdentity { client_id } + } + + 4 => { + // User-assigned Managed Identity by object ID + let user_msi_objid = unsafe { config.auth.user_msi_objid }; + if user_msi_objid.object_id.is_null() { + return GenevaError::InvalidUserMsiByObjectIdConfig; + } + let object_id = match unsafe { c_str_to_string(user_msi_objid.object_id, "object_id") } + { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + AuthMethod::UserManagedIdentityByObjectId { object_id } + } + + 5 => { + // User-assigned Managed Identity by resource ID + let user_msi_resid = unsafe { config.auth.user_msi_resid }; + if user_msi_resid.resource_id.is_null() { + return GenevaError::InvalidUserMsiByResourceIdConfig; + } + let resource_id = + match unsafe { c_str_to_string(user_msi_resid.resource_id, "resource_id") } { + Ok(s) => s, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + }; + AuthMethod::UserManagedIdentityByResourceId { resource_id } + } + + _ => { + return GenevaError::InvalidAuthMethod; + } + }; + + // Parse optional MSI resource + let msi_resource = if !config.msi_resource.is_null() { + match unsafe { c_str_to_string(config.msi_resource, "msi_resource") } { + Ok(s) => Some(s), + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InvalidConfig; + } + } + } else { + None + }; + + // Build client config + let geneva_config = GenevaClientConfig { + endpoint, + environment, + account, + namespace, + region, + config_major_version: config.config_major_version, + auth_method, + tenant, + role_name, + role_instance, + msi_resource, + }; + + // Create client + let client = match GenevaClient::new(geneva_config) { + Ok(client) => client, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::InitializationFailed; + } + }; + + let handle = GenevaClientHandle { + magic: GENEVA_HANDLE_MAGIC, + client, + }; + unsafe { *out_handle = Box::into_raw(Box::new(handle)) }; + GenevaError::Success +} + +/// Encode and compress logs into batches (synchronous) +/// +/// # Safety +/// - handle must be a valid pointer returned by geneva_client_new +/// - data must be a valid pointer to protobuf-encoded ExportLogsServiceRequest +/// - data_len must be the correct length of the data +/// - out_batches must be non-null; on success it receives a non-null pointer the caller must free with geneva_batches_free +/// - err_msg_out: optional buffer to receive error message (can be NULL) +/// - err_msg_len: size of err_msg_out buffer +#[no_mangle] +pub unsafe extern "C" fn geneva_encode_and_compress_logs( + handle: *mut GenevaClientHandle, + data: *const u8, + data_len: usize, + out_batches: *mut *mut EncodedBatchesHandle, + err_msg_out: *mut c_char, + err_msg_len: usize, +) -> GenevaError { + if out_batches.is_null() { + return GenevaError::NullPointer; + } + unsafe { *out_batches = ptr::null_mut() }; + + if handle.is_null() { + return GenevaError::NullPointer; + } + if data.is_null() { + return GenevaError::NullPointer; + } + if data_len == 0 { + return GenevaError::EmptyInput; + } + + // Validate handle first + let validation_result = unsafe { validate_handle(handle) }; + if validation_result != GenevaError::Success { + return validation_result; + } + + let handle_ref = unsafe { handle.as_ref().unwrap() }; + let data_slice = unsafe { std::slice::from_raw_parts(data, data_len) }; + + let logs_data: ExportLogsServiceRequest = match Message::decode(data_slice) { + Ok(data) => data, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::DecodeFailed; + } + }; + + let resource_logs = logs_data.resource_logs; + match handle_ref.client.encode_and_compress_logs(&resource_logs) { + Ok(batches) => { + let h = EncodedBatchesHandle { + magic: GENEVA_HANDLE_MAGIC, + batches, + }; + unsafe { *out_batches = Box::into_raw(Box::new(h)) }; + GenevaError::Success + } + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + GenevaError::InternalError + } + } +} + +/// Encode and compress spans into batches (synchronous) +/// +/// # Safety +/// - handle must be a valid pointer returned by geneva_client_new +/// - data must be a valid pointer to protobuf-encoded ExportTraceServiceRequest +/// - data_len must be the correct length of the data +/// - out_batches must be non-null; on success it receives a non-null pointer the caller must free with geneva_batches_free +/// - err_msg_out: optional buffer to receive error message (can be NULL) +/// - err_msg_len: size of err_msg_out buffer +#[no_mangle] +pub unsafe extern "C" fn geneva_encode_and_compress_spans( + handle: *mut GenevaClientHandle, + data: *const u8, + data_len: usize, + out_batches: *mut *mut EncodedBatchesHandle, + err_msg_out: *mut c_char, + err_msg_len: usize, +) -> GenevaError { + if out_batches.is_null() { + return GenevaError::NullPointer; + } + unsafe { *out_batches = ptr::null_mut() }; + + if handle.is_null() { + return GenevaError::NullPointer; + } + if data.is_null() { + return GenevaError::NullPointer; + } + if data_len == 0 { + return GenevaError::EmptyInput; + } + + // Validate handle first + let validation_result = unsafe { validate_handle(handle) }; + if validation_result != GenevaError::Success { + return validation_result; + } + + let handle_ref = unsafe { handle.as_ref().unwrap() }; + let data_slice = unsafe { std::slice::from_raw_parts(data, data_len) }; + + let spans_data: ExportTraceServiceRequest = match Message::decode(data_slice) { + Ok(data) => data, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + return GenevaError::DecodeFailed; + } + }; + + let resource_spans = spans_data.resource_spans; + match handle_ref.client.encode_and_compress_spans(&resource_spans) { + Ok(batches) => { + let h = EncodedBatchesHandle { + magic: GENEVA_HANDLE_MAGIC, + batches, + }; + unsafe { *out_batches = Box::into_raw(Box::new(h)) }; + GenevaError::Success + } + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + GenevaError::InternalError + } + } +} + +/// Returns the number of batches in the encoded batches handle +/// +/// # Safety +/// - batches must be a valid pointer returned by geneva_encode_and_compress_logs, or null +#[no_mangle] +pub unsafe extern "C" fn geneva_batches_len(batches: *const EncodedBatchesHandle) -> usize { + // Validate batches + match unsafe { validate_handle(batches) } { + GenevaError::Success => { + // Safe to dereference after validation + let batches_ref = unsafe { batches.as_ref().unwrap() }; + batches_ref.batches.len() + } + _ => 0, // Return 0 for invalid handles + } +} + +/// Uploads a specific batch synchronously +/// +/// # Safety +/// - handle must be a valid pointer returned by geneva_client_new +/// - batches must be a valid pointer returned by geneva_encode_and_compress_logs +/// - index must be less than the value returned by geneva_batches_len +/// - err_msg_out: optional buffer to receive error message (can be NULL) +/// - err_msg_len: size of err_msg_out buffer +#[no_mangle] +pub unsafe extern "C" fn geneva_upload_batch_sync( + handle: *mut GenevaClientHandle, + batches: *const EncodedBatchesHandle, + index: usize, + err_msg_out: *mut c_char, + err_msg_len: usize, +) -> GenevaError { + // Validate client handle + match unsafe { validate_handle(handle) } { + GenevaError::Success => {} + error => return error, + } + // validate batches + match unsafe { validate_handle(batches) } { + GenevaError::Success => {} + error => return error, + } + + // Now we know both handles are valid, safe to dereference + let handle_ref = unsafe { handle.as_ref().unwrap() }; + let batches_ref = unsafe { batches.as_ref().unwrap() }; + + if index >= batches_ref.batches.len() { + return GenevaError::IndexOutOfRange; + } + + let batch = &batches_ref.batches[index]; + let client = &handle_ref.client; + let res = runtime().block_on(async move { client.upload_batch(batch).await }); + match res { + Ok(_) => GenevaError::Success, + Err(e) => { + unsafe { write_error_if_provided(err_msg_out, err_msg_len, &e) }; + GenevaError::UploadFailed + } + } +} + +/// Frees encoded batches handle +/// +/// # Safety +/// - batches must be a valid pointer returned by geneva_encode_and_compress_logs, or null +/// - batches must not be used after calling this function +#[no_mangle] +pub unsafe extern "C" fn geneva_batches_free(batches: *mut EncodedBatchesHandle) { + if !batches.is_null() { + unsafe { clear_handle_magic(batches) }; + let _ = unsafe { Box::from_raw(batches) }; + } +} + +// Frees a Geneva client handle +/// +/// # Safety +/// - client handle must be a valid pointer returned by geneva_client_new +/// - client handle must not be used after calling this function +#[no_mangle] +pub unsafe extern "C" fn geneva_client_free(handle: *mut GenevaClientHandle) { + if !handle.is_null() { + unsafe { clear_handle_magic(handle) }; + let _ = unsafe { Box::from_raw(handle) }; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::ffi::CString; + + // Build a minimal unsigned JWT with the Endpoint claim and an exp. Matches what extract_endpoint_from_token expects. + #[allow(dead_code)] + fn generate_mock_jwt_and_expiry(endpoint: &str, ttl_secs: i64) -> (String, String) { + use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; + use chrono::{Duration, Utc}; + + let header = r#"{"alg":"none","typ":"JWT"}"#; + let exp = Utc::now() + Duration::seconds(ttl_secs); + let payload = format!(r#"{{"Endpoint":"{endpoint}","exp":{}}}"#, exp.timestamp()); + + let header_b64 = URL_SAFE_NO_PAD.encode(header.as_bytes()); + let payload_b64 = URL_SAFE_NO_PAD.encode(payload.as_bytes()); + let token = format!("{}.{}.{sig}", header_b64, payload_b64, sig = "dummy"); + + (token, exp.to_rfc3339()) + } + + #[test] + fn test_geneva_client_new_with_null_config() { + unsafe { + let mut out: *mut GenevaClientHandle = std::ptr::null_mut(); + let rc = geneva_client_new(std::ptr::null(), &mut out, ptr::null_mut(), 0); + assert_eq!(rc as u32, GenevaError::NullPointer as u32); + assert!(out.is_null()); + } + } + + #[test] + fn test_upload_batch_sync_with_nulls() { + unsafe { + let result = + geneva_upload_batch_sync(ptr::null_mut(), ptr::null(), 0, ptr::null_mut(), 0); + assert_eq!(result as u32, GenevaError::NullPointer as u32); + } + } + + #[test] + fn test_encode_with_nulls() { + unsafe { + let mut out: *mut EncodedBatchesHandle = std::ptr::null_mut(); + let rc = geneva_encode_and_compress_logs( + ptr::null_mut(), + ptr::null(), + 0, + &mut out, + ptr::null_mut(), + 0, + ); + assert_eq!(rc as u32, GenevaError::NullPointer as u32); + assert!(out.is_null()); + } + } + + #[test] + fn test_geneva_client_free_with_null() { + unsafe { + // Should not crash + geneva_client_free(ptr::null_mut()); + } + } + + #[test] + fn test_null_field_validation() { + unsafe { + // Test with missing endpoint + let environment = CString::new("test").unwrap(); + let account = CString::new("testaccount").unwrap(); + let namespace = CString::new("testns").unwrap(); + let region = CString::new("testregion").unwrap(); + let tenant = CString::new("testtenant").unwrap(); + let role_name = CString::new("testrole").unwrap(); + let role_instance = CString::new("testinstance").unwrap(); + + let config = GenevaConfig { + endpoint: ptr::null(), // Missing endpoint should cause failure + environment: environment.as_ptr(), + account: account.as_ptr(), + namespace_name: namespace.as_ptr(), + region: region.as_ptr(), + config_major_version: 1, + auth_method: 0, // SystemManagedIdentity - union not used + tenant: tenant.as_ptr(), + role_name: role_name.as_ptr(), + role_instance: role_instance.as_ptr(), + // SAFETY: GenevaAuthConfig only contains raw pointers (*const c_char). + // Zero-initializing raw pointers creates null pointers, which is valid. + // The union is never accessed for SystemManagedIdentity (auth_method 0). + auth: std::mem::zeroed(), + msi_resource: ptr::null(), + }; + + let mut out: *mut GenevaClientHandle = std::ptr::null_mut(); + let rc = geneva_client_new(&config, &mut out, ptr::null_mut(), 0); + assert_eq!(rc as u32, GenevaError::MissingEndpoint as u32); + assert!(out.is_null()); + } + } + + #[test] + fn test_invalid_auth_method() { + unsafe { + let endpoint = CString::new("https://test.geneva.com").unwrap(); + let environment = CString::new("test").unwrap(); + let account = CString::new("testaccount").unwrap(); + let namespace = CString::new("testns").unwrap(); + let region = CString::new("testregion").unwrap(); + let tenant = CString::new("testtenant").unwrap(); + let role_name = CString::new("testrole").unwrap(); + let role_instance = CString::new("testinstance").unwrap(); + + let config = GenevaConfig { + endpoint: endpoint.as_ptr(), + environment: environment.as_ptr(), + account: account.as_ptr(), + namespace_name: namespace.as_ptr(), + region: region.as_ptr(), + config_major_version: 1, + auth_method: 99, // Invalid auth method - union not used + tenant: tenant.as_ptr(), + role_name: role_name.as_ptr(), + role_instance: role_instance.as_ptr(), + auth: std::mem::zeroed(), // Union not accessed for invalid auth method + msi_resource: ptr::null(), + }; + + let mut out: *mut GenevaClientHandle = std::ptr::null_mut(); + let rc = geneva_client_new(&config, &mut out, ptr::null_mut(), 0); + assert_eq!(rc as u32, GenevaError::InvalidAuthMethod as u32); + assert!(out.is_null()); + } + } + + #[test] + fn test_certificate_auth_missing_cert_path() { + unsafe { + let endpoint = CString::new("https://test.geneva.com").unwrap(); + let environment = CString::new("test").unwrap(); + let account = CString::new("testaccount").unwrap(); + let namespace = CString::new("testns").unwrap(); + let region = CString::new("testregion").unwrap(); + let tenant = CString::new("testtenant").unwrap(); + let role_name = CString::new("testrole").unwrap(); + let role_instance = CString::new("testinstance").unwrap(); + + let config = GenevaConfig { + endpoint: endpoint.as_ptr(), + environment: environment.as_ptr(), + account: account.as_ptr(), + namespace_name: namespace.as_ptr(), + region: region.as_ptr(), + config_major_version: 1, + auth_method: 1, // Certificate auth + tenant: tenant.as_ptr(), + role_name: role_name.as_ptr(), + role_instance: role_instance.as_ptr(), + auth: GenevaAuthConfig { + cert: GenevaCertAuthConfig { + cert_path: ptr::null(), + cert_password: ptr::null(), + }, + }, + msi_resource: ptr::null(), + }; + + let mut out: *mut GenevaClientHandle = std::ptr::null_mut(); + let rc = geneva_client_new(&config, &mut out, ptr::null_mut(), 0); + assert_eq!(rc as u32, GenevaError::InvalidCertConfig as u32); + assert!(out.is_null()); + } + } + + #[test] + fn test_workload_identity_auth_missing_resource() { + unsafe { + let endpoint = CString::new("https://test.geneva.com").unwrap(); + let environment = CString::new("test").unwrap(); + let account = CString::new("testaccount").unwrap(); + let namespace = CString::new("testns").unwrap(); + let region = CString::new("testregion").unwrap(); + let tenant = CString::new("testtenant").unwrap(); + let role_name = CString::new("testrole").unwrap(); + let role_instance = CString::new("testinstance").unwrap(); + + let config = GenevaConfig { + endpoint: endpoint.as_ptr(), + environment: environment.as_ptr(), + account: account.as_ptr(), + namespace_name: namespace.as_ptr(), + region: region.as_ptr(), + config_major_version: 1, + auth_method: 2, // Workload Identity + tenant: tenant.as_ptr(), + role_name: role_name.as_ptr(), + role_instance: role_instance.as_ptr(), + auth: GenevaAuthConfig { + workload_identity: GenevaWorkloadIdentityAuthConfig { + resource: ptr::null(), + }, + }, + msi_resource: ptr::null(), + }; + + let mut out: *mut GenevaClientHandle = std::ptr::null_mut(); + let rc = geneva_client_new(&config, &mut out, ptr::null_mut(), 0); + assert_eq!(rc as u32, GenevaError::InvalidWorkloadIdentityConfig as u32); + assert!(out.is_null()); + } + } + + #[test] + fn test_user_msi_auth_missing_client_id() { + unsafe { + let endpoint = CString::new("https://test.geneva.com").unwrap(); + let environment = CString::new("test").unwrap(); + let account = CString::new("testaccount").unwrap(); + let namespace = CString::new("testns").unwrap(); + let region = CString::new("testregion").unwrap(); + let tenant = CString::new("testtenant").unwrap(); + let role_name = CString::new("testrole").unwrap(); + let role_instance = CString::new("testinstance").unwrap(); + + let config = GenevaConfig { + endpoint: endpoint.as_ptr(), + environment: environment.as_ptr(), + account: account.as_ptr(), + namespace_name: namespace.as_ptr(), + region: region.as_ptr(), + config_major_version: 1, + auth_method: 3, // User Managed Identity by client ID + tenant: tenant.as_ptr(), + role_name: role_name.as_ptr(), + role_instance: role_instance.as_ptr(), + auth: GenevaAuthConfig { + user_msi: GenevaUserManagedIdentityAuthConfig { + client_id: ptr::null(), + }, + }, + msi_resource: ptr::null(), + }; + + let mut out: *mut GenevaClientHandle = std::ptr::null_mut(); + let rc = geneva_client_new(&config, &mut out, ptr::null_mut(), 0); + assert_eq!(rc as u32, GenevaError::InvalidUserMsiConfig as u32); + assert!(out.is_null()); + } + } + + #[test] + fn test_user_msi_auth_by_object_id_missing() { + unsafe { + let endpoint = CString::new("https://test.geneva.com").unwrap(); + let environment = CString::new("test").unwrap(); + let account = CString::new("testaccount").unwrap(); + let namespace = CString::new("testns").unwrap(); + let region = CString::new("testregion").unwrap(); + let tenant = CString::new("testtenant").unwrap(); + let role_name = CString::new("testrole").unwrap(); + let role_instance = CString::new("testinstance").unwrap(); + + let config = GenevaConfig { + endpoint: endpoint.as_ptr(), + environment: environment.as_ptr(), + account: account.as_ptr(), + namespace_name: namespace.as_ptr(), + region: region.as_ptr(), + config_major_version: 1, + auth_method: 4, // User Managed Identity by object ID + tenant: tenant.as_ptr(), + role_name: role_name.as_ptr(), + role_instance: role_instance.as_ptr(), + auth: GenevaAuthConfig { + user_msi_objid: GenevaUserManagedIdentityByObjectIdAuthConfig { + object_id: ptr::null(), + }, + }, + msi_resource: ptr::null(), + }; + + let mut out: *mut GenevaClientHandle = std::ptr::null_mut(); + let rc = geneva_client_new(&config, &mut out, ptr::null_mut(), 0); + assert_eq!( + rc as u32, + GenevaError::InvalidUserMsiByObjectIdConfig as u32 + ); + assert!(out.is_null()); + } + } + + #[test] + fn test_user_msi_auth_by_resource_id_missing() { + unsafe { + let endpoint = CString::new("https://test.geneva.com").unwrap(); + let environment = CString::new("test").unwrap(); + let account = CString::new("testaccount").unwrap(); + let namespace = CString::new("testns").unwrap(); + let region = CString::new("testregion").unwrap(); + let tenant = CString::new("testtenant").unwrap(); + let role_name = CString::new("testrole").unwrap(); + let role_instance = CString::new("testinstance").unwrap(); + + let config = GenevaConfig { + endpoint: endpoint.as_ptr(), + environment: environment.as_ptr(), + account: account.as_ptr(), + namespace_name: namespace.as_ptr(), + region: region.as_ptr(), + config_major_version: 1, + auth_method: 5, // User Managed Identity by resource ID + tenant: tenant.as_ptr(), + role_name: role_name.as_ptr(), + role_instance: role_instance.as_ptr(), + auth: GenevaAuthConfig { + user_msi_resid: GenevaUserManagedIdentityByResourceIdAuthConfig { + resource_id: ptr::null(), + }, + }, + msi_resource: ptr::null(), + }; + + let mut out: *mut GenevaClientHandle = std::ptr::null_mut(); + let rc = geneva_client_new(&config, &mut out, ptr::null_mut(), 0); + assert_eq!( + rc as u32, + GenevaError::InvalidUserMsiByResourceIdConfig as u32 + ); + assert!(out.is_null()); + } + } + + #[test] + fn test_certificate_auth_missing_cert_password() { + unsafe { + let endpoint = CString::new("https://test.geneva.com").unwrap(); + let environment = CString::new("test").unwrap(); + let account = CString::new("testaccount").unwrap(); + let namespace = CString::new("testns").unwrap(); + let region = CString::new("testregion").unwrap(); + let tenant = CString::new("testtenant").unwrap(); + let role_name = CString::new("testrole").unwrap(); + let role_instance = CString::new("testinstance").unwrap(); + let cert_path = CString::new("/path/to/cert.p12").unwrap(); + + let config = GenevaConfig { + endpoint: endpoint.as_ptr(), + environment: environment.as_ptr(), + account: account.as_ptr(), + namespace_name: namespace.as_ptr(), + region: region.as_ptr(), + config_major_version: 1, + auth_method: 1, // Certificate auth + tenant: tenant.as_ptr(), + role_name: role_name.as_ptr(), + role_instance: role_instance.as_ptr(), + auth: GenevaAuthConfig { + cert: GenevaCertAuthConfig { + cert_path: cert_path.as_ptr(), + cert_password: ptr::null(), + }, + }, + msi_resource: ptr::null(), + }; + + let mut out: *mut GenevaClientHandle = std::ptr::null_mut(); + let rc = geneva_client_new(&config, &mut out, ptr::null_mut(), 0); + assert_eq!(rc as u32, GenevaError::InvalidCertConfig as u32); + assert!(out.is_null()); + } + } + + #[test] + fn test_batches_len_with_null() { + unsafe { + let n = geneva_batches_len(ptr::null()); + assert_eq!(n, 0, "batches_len should return 0 for null pointer"); + } + } + + #[test] + fn test_batches_free_with_null() { + unsafe { + geneva_batches_free(ptr::null_mut()); + } + } + + // Integration-style test: encode via FFI then upload via FFI using MockAuth + Wiremock server. + // Uses otlp_builder to construct an ExportLogsServiceRequest payload. + #[test] + #[cfg(feature = "mock_auth")] + fn test_encode_and_upload_with_mock_server() { + use otlp_builder::builder::build_otlp_logs_minimal; + use wiremock::matchers::method; + use wiremock::{Mock, MockServer, ResponseTemplate}; + + // Start mock server on the shared runtime used by the FFI code + let mock_server = runtime().block_on(async { MockServer::start().await }); + let ingestion_endpoint = mock_server.uri(); + + // Build JWT dynamically so the Endpoint claim matches the mock server, and compute a fresh expiry + let (auth_token, auth_token_expiry) = + generate_mock_jwt_and_expiry(&ingestion_endpoint, 24 * 3600); + + // Mock config service (GET) + runtime().block_on(async { + Mock::given(method("GET")) + .respond_with(ResponseTemplate::new(200).set_body_string(format!( + r#"{{ + "IngestionGatewayInfo": {{ + "Endpoint": "{ingestion_endpoint}", + "AuthToken": "{auth_token}", + "AuthTokenExpiryTime": "{auth_token_expiry}" + }}, + "StorageAccountKeys": [{{ + "AccountMonikerName": "testdiagaccount", + "AccountGroupName": "testgroup", + "IsPrimaryMoniker": true + }}], + "TagId": "test" + }}"# + ))) + .mount(&mock_server) + .await; + + // Mock ingestion service (POST) + Mock::given(method("POST")) + .respond_with( + ResponseTemplate::new(202).set_body_string(r#"{"ticket":"accepted"}"#), + ) + .mount(&mock_server) + .await; + }); + + // Build a real GenevaClient using MockAuth (no mTLS), then wrap it in the FFI handle. + let cfg = GenevaClientConfig { + endpoint: mock_server.uri(), + environment: "test".to_string(), + account: "test".to_string(), + namespace: "testns".to_string(), + region: "testregion".to_string(), + config_major_version: 1, + auth_method: AuthMethod::MockAuth, + tenant: "testtenant".to_string(), + role_name: "testrole".to_string(), + role_instance: "testinstance".to_string(), + msi_resource: None, + }; + let client = GenevaClient::new(cfg).expect("failed to create GenevaClient with MockAuth"); + + // Wrap into an FFI-compatible handle + let handle = GenevaClientHandle { + magic: GENEVA_HANDLE_MAGIC, + client, + }; + // Keep the boxed handle alive until we explicitly free it via FFI + let mut handle_box = Box::new(handle); + let handle_ptr: *mut GenevaClientHandle = &mut *handle_box; + + // Build minimal OTLP logs payload bytes using the test helper + let bytes = build_otlp_logs_minimal("TestEvent", "hello-world", Some(("rk", "rv"))); + + // Encode via FFI + let mut batches_ptr: *mut EncodedBatchesHandle = std::ptr::null_mut(); + let rc = unsafe { + geneva_encode_and_compress_logs( + handle_ptr, + bytes.as_ptr(), + bytes.len(), + &mut batches_ptr, + ptr::null_mut(), + 0, + ) + }; + assert_eq!(rc as u32, GenevaError::Success as u32, "encode failed"); + assert!( + !batches_ptr.is_null(), + "out_batches should be non-null on success" + ); + + // Validate number of batches and upload first batch via FFI (sync) + let len = unsafe { geneva_batches_len(batches_ptr) }; + assert!(len >= 1, "expected at least one encoded batch"); + + // Attempt upload (ignore return code; we will assert via recorded requests) + let _ = unsafe { + geneva_upload_batch_sync(handle_ptr, batches_ptr as *const _, 0, ptr::null_mut(), 0) + }; + + // Cleanup: free batches and client + unsafe { + geneva_batches_free(batches_ptr); + } + // Transfer ownership of handle_box to the FFI free function + let raw_handle = Box::into_raw(handle_box); + unsafe { + geneva_client_free(raw_handle); + } + + // Keep mock_server in scope until end of test + drop(mock_server); + } + + // Verifies batching groups by LogRecord.event_name: + // multiple different event_names in one request produce multiple batches, + // and each batch upload hits ingestion with the corresponding event query param. + #[test] + #[cfg(feature = "mock_auth")] + fn test_encode_batching_by_event_name_and_upload() { + use wiremock::http::Method; + use wiremock::matchers::method; + use wiremock::{Mock, MockServer, ResponseTemplate}; + + // Start mock server + let mock_server = runtime().block_on(async { MockServer::start().await }); + let ingestion_endpoint = mock_server.uri(); + let (auth_token, auth_token_expiry) = + generate_mock_jwt_and_expiry(&ingestion_endpoint, 24 * 3600); + + // Mock Geneva Config (GET) and Ingestion (POST) + runtime().block_on(async { + Mock::given(method("GET")) + .respond_with(ResponseTemplate::new(200).set_body_string(format!( + r#"{{ + "IngestionGatewayInfo": {{ + "Endpoint": "{ingestion_endpoint}", + "AuthToken": "{auth_token}", + "AuthTokenExpiryTime": "{auth_token_expiry}" + }}, + "StorageAccountKeys": [{{ + "AccountMonikerName": "testdiagaccount", + "AccountGroupName": "testgroup", + "IsPrimaryMoniker": true + }}], + "TagId": "test" + }}"# + ))) + .mount(&mock_server) + .await; + + Mock::given(method("POST")) + .respond_with( + ResponseTemplate::new(202).set_body_string(r#"{"ticket":"accepted"}"#), + ) + .mount(&mock_server) + .await; + }); + + // Build client with MockAuth + let cfg = GenevaClientConfig { + endpoint: mock_server.uri(), + environment: "test".to_string(), + account: "test".to_string(), + namespace: "testns".to_string(), + region: "testregion".to_string(), + config_major_version: 1, + auth_method: AuthMethod::MockAuth, + tenant: "testtenant".to_string(), + role_name: "testrole".to_string(), + role_instance: "testinstance".to_string(), + msi_resource: None, + }; + let client = GenevaClient::new(cfg).expect("failed to create GenevaClient with MockAuth"); + + // Wrap client into FFI handle + let mut handle_box = Box::new(GenevaClientHandle { + magic: GENEVA_HANDLE_MAGIC, + client, + }); + let handle_ptr: *mut GenevaClientHandle = &mut *handle_box; + + // Build ExportLogsServiceRequest with two different event_names + let log1 = opentelemetry_proto::tonic::logs::v1::LogRecord { + observed_time_unix_nano: 1_700_000_000_000_000_001, + event_name: "EventA".to_string(), + severity_number: 9, + ..Default::default() + }; + let log2 = opentelemetry_proto::tonic::logs::v1::LogRecord { + observed_time_unix_nano: 1_700_000_000_000_000_002, + event_name: "EventB".to_string(), + severity_number: 10, + ..Default::default() + }; + let scope_logs = opentelemetry_proto::tonic::logs::v1::ScopeLogs { + log_records: vec![log1, log2], + ..Default::default() + }; + let resource_logs = opentelemetry_proto::tonic::logs::v1::ResourceLogs { + scope_logs: vec![scope_logs], + ..Default::default() + }; + let req = ExportLogsServiceRequest { + resource_logs: vec![resource_logs], + }; + let bytes = req.encode_to_vec(); + + // Encode via FFI + let mut batches_ptr: *mut EncodedBatchesHandle = std::ptr::null_mut(); + let rc = unsafe { + geneva_encode_and_compress_logs( + handle_ptr, + bytes.as_ptr(), + bytes.len(), + &mut batches_ptr, + ptr::null_mut(), + 0, + ) + }; + assert_eq!(rc as u32, GenevaError::Success as u32, "encode failed"); + assert!(!batches_ptr.is_null()); + + // Expect 2 batches (EventA, EventB) + let len = unsafe { geneva_batches_len(batches_ptr) }; + assert_eq!(len, 2, "expected 2 batches grouped by event_name"); + + // Upload all batches + for i in 0..len { + let _ = unsafe { + geneva_upload_batch_sync(handle_ptr, batches_ptr as *const _, i, ptr::null_mut(), 0) + }; + } + + // Verify requests contain event=EventA and event=EventB in their URLs + // Poll until both POSTs appear or timeout to avoid flakiness + let (urls, has_a, has_b) = runtime().block_on(async { + use tokio::time::{sleep, Duration}; + let mut last_urls: Vec = Vec::new(); + for _ in 0..200 { + let reqs = mock_server.received_requests().await.unwrap(); + let posts: Vec = reqs + .iter() + .filter(|r| r.method == Method::Post) + .map(|r| r.url.to_string()) + .collect(); + + let has_a = posts.iter().any(|u| u.contains("event=EventA")); + let has_b = posts.iter().any(|u| u.contains("event=EventB")); + if has_a && has_b { + return (posts, true, true); + } + + if !posts.is_empty() { + last_urls = posts.clone(); + } + + sleep(Duration::from_millis(20)).await; + } + + if last_urls.is_empty() { + let reqs = mock_server.received_requests().await.unwrap(); + last_urls = reqs.into_iter().map(|r| r.url.to_string()).collect(); + } + let has_a = last_urls.iter().any(|u| u.contains("event=EventA")); + let has_b = last_urls.iter().any(|u| u.contains("event=EventB")); + (last_urls, has_a, has_b) + }); + assert!( + has_a, + "Expected request containing event=EventA; got: {urls:?}" + ); + assert!( + has_b, + "Expected request containing event=EventB; got: {urls:?}" + ); + + // Cleanup + unsafe { geneva_batches_free(batches_ptr) }; + let raw_handle = Box::into_raw(handle_box); + unsafe { geneva_client_free(raw_handle) }; + drop(mock_server); + } +} diff --git a/opentelemetry-exporter-geneva/geneva-uploader/CHANGELOG.md b/opentelemetry-exporter-geneva/geneva-uploader/CHANGELOG.md new file mode 100644 index 000000000..4d115c08f --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader/CHANGELOG.md @@ -0,0 +1,26 @@ +# Changelog + +## [Unreleased] + +### Changed +- Updated `azure_core` dependency from 0.27.0 to 0.29.0 +- Updated `azure_identity` dependency from 0.27.0 to 0.29.0 + +## [0.3.0] - 2025-10-17 + +### Changed +- Minor internal updates + +## [0.2.0] - 2025-09-24 + +### Added +- HTTP/1.1 upload support with keep-alive connections +- Support for Span upload + +### Changed +- Bump opentelemetry-proto version to 0.31 + +## [0.1.0] - 2025-08-18 + +### Added +- Initial release of geneva-uploader diff --git a/opentelemetry-exporter-geneva/geneva-uploader/Cargo.toml b/opentelemetry-exporter-geneva/geneva-uploader/Cargo.toml index a76397ac1..0e2983ab5 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/Cargo.toml +++ b/opentelemetry-exporter-geneva/geneva-uploader/Cargo.toml @@ -1,26 +1,34 @@ [package] name = "geneva-uploader" -version = "0.1.0" +description = "Upload telemetry data to Geneva logs service" +version = "0.3.0" edition = "2021" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-exporter-geneva/geneva-uploader" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-exporter-geneva/geneva-uploader" +rust-version = "1.85.0" +keywords = ["opentelemetry", "geneva", "logs", "uploader"] license = "Apache-2.0" -rust-version = "1.75.0" [dependencies] -opentelemetry-proto = {workspace = true, default-features = false, features = ["logs", "gen-tonic-messages"]} +opentelemetry-proto = {workspace = true, default-features = false, features = ["logs", "trace", "gen-tonic-messages"]} base64 = "0.22" serde = { version = "1.0", features = ["derive"] } serde_json = { version = "1.0", features = ["raw_value"] } uuid = { version = "1.0", features = ["v4"] } # TODO - support both native-tls and rustls -reqwest = { version = "0.12", features = ["native-tls", "native-tls-alpn"]} -native-tls = "0.2" +# http2 feature is required by hyper-util even when using http1_only() +reqwest = { version = "0.12", features = ["native-tls", "native-tls-alpn", "http2"], default-features = false} +native-tls = "0.2" thiserror = "2.0" chrono = "0.4" url = "2.2" md5 = "0.8.0" hex = "0.4" lz4_flex = { version = "0.11", features = ["safe-encode"], default-features = false } -futures = "0.3" +# Azure Identity dependencies - using public crates.io versions +azure_identity = "0.29" +azure_core = "0.29" +tracing = "0.1" [features] self_signed_certs = [] # Empty by default for security @@ -32,7 +40,7 @@ tokio = { version = "1", features = ["full"] } rcgen = "0.14" openssl = { version = "0.10", features = ["vendored"] } tempfile = "3.5" -wiremock = "0.6" +wiremock = "=0.5.22" futures = "0.3" num_cpus = "1.16" lz4_flex = { version = "0.11" } diff --git a/opentelemetry-exporter-geneva/geneva-uploader/README.md b/opentelemetry-exporter-geneva/geneva-uploader/README.md new file mode 100644 index 000000000..3a9d9c277 --- /dev/null +++ b/opentelemetry-exporter-geneva/geneva-uploader/README.md @@ -0,0 +1,5 @@ +# geneva-uploader + +The geneva-uploader is designed for Microsoft products to send data to public-facing end-points which route to Microsoft's internal data pipeline. It is not meant to be used outside of Microsoft products and is open sourced to demonstrate best practices and to be transparent about what is being collected. + +geneva-uploader: Core uploader responsible for sending telemetry data to the Geneva backend. diff --git a/opentelemetry-exporter-geneva/geneva-uploader/src/bench.rs b/opentelemetry-exporter-geneva/geneva-uploader/src/bench.rs index 78e18c2b7..169b19eca 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/src/bench.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader/src/bench.rs @@ -178,8 +178,9 @@ mod benchmarks { .collect(); b.iter(|| { - let res = - encoder.encode_log_batch(black_box(logs.iter()), black_box(metadata)); + let res = encoder + .encode_log_batch(black_box(logs.iter()), black_box(metadata)) + .unwrap(); black_box(res); // double sure the return value is generated }); }, @@ -207,7 +208,9 @@ mod benchmarks { b.iter(|| { let res = black_box( - encoder.encode_log_batch(black_box(logs.iter()), black_box(metadata)), + encoder + .encode_log_batch(black_box(logs.iter()), black_box(metadata)) + .unwrap(), ); black_box(res); // double sure the return value is generated }); @@ -232,7 +235,9 @@ mod benchmarks { b.iter(|| { let res = black_box( - encoder.encode_log_batch(black_box(logs.iter()), black_box(metadata)), + encoder + .encode_log_batch(black_box(logs.iter()), black_box(metadata)) + .unwrap(), ); black_box(res); }); diff --git a/opentelemetry-exporter-geneva/geneva-uploader/src/client.rs b/opentelemetry-exporter-geneva/geneva-uploader/src/client.rs index 72bf5dd42..195e44630 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/src/client.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader/src/client.rs @@ -1,12 +1,23 @@ //! High-level GenevaClient for user code. Wraps config_service and ingestion_service. use crate::config_service::client::{AuthMethod, GenevaConfigClient, GenevaConfigClientConfig}; +// ManagedIdentitySelector removed; no re-export needed. use crate::ingestion_service::uploader::{GenevaUploader, GenevaUploaderConfig}; -use crate::payload_encoder::lz4_chunked_compression::lz4_chunked_compression; use crate::payload_encoder::otlp_encoder::OtlpEncoder; -use futures::stream::{self, StreamExt}; use opentelemetry_proto::tonic::logs::v1::ResourceLogs; +use opentelemetry_proto::tonic::trace::v1::ResourceSpans; use std::sync::Arc; +use tracing::{debug, info}; + +/// Public batch type (already LZ4 chunked compressed). +/// Produced by `OtlpEncoder::encode_log_batch` and returned to callers. +#[derive(Debug, Clone)] +pub struct EncodedBatch { + pub event_name: String, + pub data: Vec, + pub metadata: crate::payload_encoder::central_blob::BatchMetadata, + pub row_count: usize, +} /// Configuration for GenevaClient (user-facing) #[derive(Clone, Debug)] @@ -21,9 +32,8 @@ pub struct GenevaClientConfig { pub tenant: String, pub role_name: String, pub role_instance: String, - /// Maximum number of concurrent uploads. If None, defaults to number of CPU cores. - pub max_concurrent_uploads: Option, - // Add event name/version here if constant, or per-upload if you want them per call. + pub msi_resource: Option, // Required for Managed Identity variants + // Add event name/version here if constant, or per-upload if you want them per call. } /// Main user-facing client for Geneva ingestion. @@ -32,13 +42,41 @@ pub struct GenevaClient { uploader: Arc, encoder: OtlpEncoder, metadata: String, - max_concurrent_uploads: usize, } impl GenevaClient { - /// Construct a new client with minimal configuration. Fetches and caches ingestion info as needed. - pub async fn new(cfg: GenevaClientConfig) -> Result { - // Build config client config + pub fn new(cfg: GenevaClientConfig) -> Result { + info!( + name: "client.new", + target: "geneva-uploader", + endpoint = %cfg.endpoint, + namespace = %cfg.namespace, + account = %cfg.account, + "Initializing GenevaClient" + ); + + // Validate MSI resource presence for managed identity variants + match cfg.auth_method { + AuthMethod::SystemManagedIdentity + | AuthMethod::UserManagedIdentity { .. } + | AuthMethod::UserManagedIdentityByObjectId { .. } + | AuthMethod::UserManagedIdentityByResourceId { .. } => { + if cfg.msi_resource.is_none() { + debug!( + name: "client.new.validate_msi_resource", + target: "geneva-uploader", + "Validation failed: msi_resource must be provided for managed identity auth" + ); + return Err( + "msi_resource must be provided for managed identity auth".to_string() + ); + } + } + AuthMethod::Certificate { .. } => {} + AuthMethod::WorkloadIdentity { .. } => {} + #[cfg(feature = "mock_auth")] + AuthMethod::MockAuth => {} + } let config_client_config = GenevaConfigClientConfig { endpoint: cfg.endpoint, environment: cfg.environment.clone(), @@ -47,27 +85,31 @@ impl GenevaClient { region: cfg.region, config_major_version: cfg.config_major_version, auth_method: cfg.auth_method, + msi_resource: cfg.msi_resource, }; - let config_client = Arc::new( - GenevaConfigClient::new(config_client_config) - .map_err(|e| format!("GenevaConfigClient init failed: {e}"))?, - ); + let config_client = + Arc::new(GenevaConfigClient::new(config_client_config).map_err(|e| { + debug!( + name: "client.new.config_client_init", + target: "geneva-uploader", + error = %e, + "GenevaConfigClient init failed" + ); + format!("GenevaConfigClient init failed: {e}") + })?); let source_identity = format!( "Tenant={}/Role={}/RoleInstance={}", cfg.tenant, cfg.role_name, cfg.role_instance ); - // Define config_version before using it let config_version = format!("Ver{}v0", cfg.config_major_version); - // Metadata string for the blob let metadata = format!( "namespace={}/eventVersion={}/tenant={}/role={}/roleinstance={}", cfg.namespace, config_version, cfg.tenant, cfg.role_name, cfg.role_instance, ); - // Uploader config let uploader_config = GenevaUploaderConfig { namespace: cfg.namespace.clone(), source_identity, @@ -75,60 +117,126 @@ impl GenevaClient { config_version: config_version.clone(), }; - let uploader = GenevaUploader::from_config_client(config_client, uploader_config) - .await - .map_err(|e| format!("GenevaUploader init failed: {e}"))?; - let max_concurrent_uploads = cfg.max_concurrent_uploads.unwrap_or_else(|| { - // TODO - Use a more sophisticated method to determine concurrency if needed - // currently using number of CPU cores - std::thread::available_parallelism() - .map(|p| p.get()) - .unwrap_or(4) - }); + let uploader = + GenevaUploader::from_config_client(config_client, uploader_config).map_err(|e| { + debug!( + name: "client.new.uploader_init", + target: "geneva-uploader", + error = %e, + "GenevaUploader init failed" + ); + format!("GenevaUploader init failed: {e}") + })?; + + info!( + name: "client.new.complete", + target: "geneva-uploader", + "GenevaClient initialized successfully" + ); + Ok(Self { uploader: Arc::new(uploader), encoder: OtlpEncoder::new(), metadata, - max_concurrent_uploads, }) } - /// Upload OTLP logs (as ResourceLogs). - pub async fn upload_logs(&self, logs: &[ResourceLogs]) -> Result<(), String> { + /// Encode OTLP logs into LZ4 chunked compressed batches. + pub fn encode_and_compress_logs( + &self, + logs: &[ResourceLogs], + ) -> Result, String> { + debug!( + name: "client.encode_and_compress_logs", + target: "geneva-uploader", + resource_logs_count = logs.len(), + "Encoding and compressing resource logs" + ); + let log_iter = logs .iter() .flat_map(|resource_log| resource_log.scope_logs.iter()) .flat_map(|scope_log| scope_log.log_records.iter()); - // TODO: Investigate using tokio::spawn_blocking for event encoding to avoid blocking - // the async executor thread for CPU-intensive work. - let blobs = self.encoder.encode_log_batch(log_iter, &self.metadata); - - // create an iterator that yields futures for each upload - let upload_futures = blobs.into_iter().map(|batch| { - async move { - // TODO: Investigate using tokio::spawn_blocking for LZ4 compression to avoid blocking - // the async executor thread for CPU-intensive work. - let compressed_blob = lz4_chunked_compression(&batch.data).map_err(|e| { - format!("LZ4 compression failed: {e} Event: {}", batch.event_name) - })?; - self.uploader - .upload(compressed_blob, &batch.event_name, &batch.metadata) - .await - .map(|_| ()) - .map_err(|e| format!("Geneva upload failed: {e} Event: {}", batch.event_name)) - } - }); - // Execute uploads concurrently with configurable concurrency - let errors: Vec = stream::iter(upload_futures) - .buffer_unordered(self.max_concurrent_uploads) - .filter_map(|result| async move { result.err() }) - .collect() - .await; - - // Return error if any uploads failed - if !errors.is_empty() { - return Err(format!("Upload failures: {}", errors.join("; "))); - } - Ok(()) + + self.encoder + .encode_log_batch(log_iter, &self.metadata) + .map_err(|e| { + debug!( + name: "client.encode_and_compress_logs.error", + target: "geneva-uploader", + error = %e, + "Log compression failed" + ); + format!("Compression failed: {e}") + }) + } + + /// Encode OTLP spans into LZ4 chunked compressed batches. + pub fn encode_and_compress_spans( + &self, + spans: &[ResourceSpans], + ) -> Result, String> { + debug!( + name: "client.encode_and_compress_spans", + target: "geneva-uploader", + resource_spans_count = spans.len(), + "Encoding and compressing resource spans" + ); + + let span_iter = spans + .iter() + .flat_map(|resource_span| resource_span.scope_spans.iter()) + .flat_map(|scope_span| scope_span.spans.iter()); + + self.encoder + .encode_span_batch(span_iter, &self.metadata) + .map_err(|e| { + debug!( + name: "client.encode_and_compress_spans.error", + target: "geneva-uploader", + error = %e, + "Span compression failed" + ); + format!("Compression failed: {e}") + }) + } + + /// Upload a single compressed batch. + /// This allows for granular control over uploads, including custom retry logic for individual batches. + pub async fn upload_batch(&self, batch: &EncodedBatch) -> Result<(), String> { + debug!( + name: "client.upload_batch", + target: "geneva-uploader", + event_name = %batch.event_name, + size = batch.data.len(), + "Uploading batch" + ); + + self.uploader + .upload( + batch.data.clone(), + &batch.event_name, + &batch.metadata, + batch.row_count, + ) + .await + .map(|_| { + debug!( + name: "client.upload_batch.success", + target: "geneva-uploader", + event_name = %batch.event_name, + "Successfully uploaded batch" + ); + }) + .map_err(|e| { + debug!( + name: "client.upload_batch.error", + target: "geneva-uploader", + event_name = %batch.event_name, + error = %e, + "Geneva upload failed" + ); + format!("Geneva upload failed: {e} Event: {}", batch.event_name) + }) } } diff --git a/opentelemetry-exporter-geneva/geneva-uploader/src/config_service/client.rs b/opentelemetry-exporter-geneva/geneva-uploader/src/config_service/client.rs index 123b814af..474f99309 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/src/config_service/client.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader/src/config_service/client.rs @@ -1,13 +1,14 @@ -// Geneva Config Client with TLS (PKCS#12) and TODO: Managed Identity support +// Geneva Config Client with TLS (PKCS#12) and Azure Workload Identity support TODO: Azure Arc support use base64::{engine::general_purpose, Engine as _}; use reqwest::{ - header::{HeaderMap, HeaderValue, ACCEPT, USER_AGENT}, + header::{HeaderMap, HeaderValue, ACCEPT, AUTHORIZATION, USER_AGENT}, Client, }; use serde::Deserialize; use std::time::Duration; use thiserror::Error; +use tracing::{debug, info}; use uuid::Uuid; use chrono::{DateTime, Utc}; @@ -18,11 +19,20 @@ use std::fs; use std::path::PathBuf; use std::sync::RwLock; +// Azure Identity imports for MSI and Workload Identity authentication +use azure_core::credentials::TokenCredential; +use azure_identity::{ + ManagedIdentityCredential, ManagedIdentityCredentialOptions, UserAssignedId, + WorkloadIdentityCredential, +}; + /// Authentication methods for the Geneva Config Client. /// -/// The client supports two authentication methods: -/// - Certificate-based authentication using PKCS#12 (.p12) files -/// - Managed Identity (Azure) - planned for future implementation +/// The client supports the following authentication methods: +/// - Certificate-based authentication (mTLS) using PKCS#12 (.p12) files +/// - Azure Managed Identity (System-assigned or User-assigned) +/// - Azure Workload Identity (Federated Identity for Kubernetes) +/// - Mock authentication for testing (feature-gated) /// /// # Certificate Format /// Certificates should be in PKCS#12 (.p12) format for client TLS authentication. @@ -53,10 +63,27 @@ pub enum AuthMethod { /// * `path` - Path to the PKCS#12 (.p12) certificate file /// * `password` - Password to decrypt the PKCS#12 file Certificate { path: PathBuf, password: String }, - /// Azure Managed Identity authentication + /// System-assigned managed identity (auto-detected) + SystemManagedIdentity, + /// User-assigned managed identity by client ID + UserManagedIdentity { client_id: String }, + /// User-assigned managed identity by object ID + UserManagedIdentityByObjectId { object_id: String }, + /// User-assigned managed identity by resource ID + UserManagedIdentityByResourceId { resource_id: String }, + /// Azure Workload Identity authentication (Federated Identity for Kubernetes) + /// + /// The following environment variables must be set in the pod spec: + /// * `AZURE_CLIENT_ID` - Azure AD Application (client) ID (set explicitly in pod env) + /// * `AZURE_TENANT_ID` - Azure AD Tenant ID (set explicitly in pod env) + /// * `AZURE_FEDERATED_TOKEN_FILE` - Path to service account token file (auto-injected by workload identity webhook) /// - /// Note(TODO): This is not yet implemented. - ManagedIdentity, + /// These variables are automatically read by the Azure Identity SDK at runtime. + /// + /// # Arguments + /// * `resource` - Azure AD resource URI for token acquisition + /// (e.g., for Azure Public Cloud) + WorkloadIdentity { resource: String }, #[cfg(feature = "mock_auth")] MockAuth, // No authentication, used for testing purposes } @@ -64,14 +91,16 @@ pub enum AuthMethod { #[derive(Debug, Error)] pub(crate) enum GenevaConfigClientError { // Authentication-related errors - #[error("Authentication method not implemented: {0}")] - AuthMethodNotImplemented(String), #[error("Missing Auth Info: {0}")] AuthInfoNotFound(String), #[error("Invalid or malformed JWT token: {0}")] JwtTokenError(String), #[error("Certificate error: {0}")] Certificate(String), + #[error("Workload Identity authentication error: {0}")] + WorkloadIdentityAuth(String), + #[error("MSI authentication error: {0}")] + MsiAuth(String), // Networking / HTTP / TLS #[error("HTTP error: {0}")] @@ -129,6 +158,7 @@ pub(crate) struct GenevaConfigClientConfig { pub(crate) region: String, pub(crate) config_major_version: u32, pub(crate) auth_method: AuthMethod, // agent_identity and agent_version are hardcoded for now + pub(crate) msi_resource: Option, // Required when using any Managed Identity variant } #[allow(dead_code)] @@ -192,7 +222,6 @@ pub(crate) struct GenevaConfigClient { precomputed_url_prefix: String, agent_identity: String, agent_version: String, - static_headers: HeaderMap, } impl fmt::Debug for GenevaConfigClient { @@ -202,7 +231,6 @@ impl fmt::Debug for GenevaConfigClient { .field("precomputed_url_prefix", &self.precomputed_url_prefix) .field("agent_identity", &self.agent_identity) .field("agent_version", &self.agent_version) - .field("static_headers", &self.static_headers) .finish() } } @@ -225,54 +253,124 @@ impl GenevaConfigClient { /// * `GenevaConfigClientError::AuthMethodNotImplemented` - If the specified authentication method is not yet supported #[allow(dead_code)] pub(crate) fn new(config: GenevaConfigClientConfig) -> Result { + info!( + name: "config_client.new", + target: "geneva-uploader", + endpoint = %config.endpoint, + account = %config.account, + namespace = %config.namespace, + "Initializing GenevaConfigClient" + ); + + let agent_identity = "GenevaUploader"; + let agent_version = "0.1"; + let mut client_builder = Client::builder() .http1_only() - .timeout(Duration::from_secs(30)); //TODO - make this configurable + .timeout(Duration::from_secs(30)) //TODO - make this configurable + .default_headers(Self::build_static_headers(agent_identity, agent_version)); match &config.auth_method { // TODO: Certificate auth would be removed in favor of managed identity., // This is for testing, so we can use self-signed certs, and password in plain text. AuthMethod::Certificate { path, password } => { + info!( + name: "config_client.new.certificate_auth", + target: "geneva-uploader", + "Using Certificate authentication" + ); // Read the PKCS#12 file - let p12_bytes = fs::read(path) - .map_err(|e| GenevaConfigClientError::Certificate(e.to_string()))?; - let identity = Identity::from_pkcs12(&p12_bytes, password) - .map_err(|e| GenevaConfigClientError::Certificate(e.to_string()))?; + let p12_bytes = fs::read(path).map_err(|e| { + debug!( + name: "config_client.new.certificate_read_error", + target: "geneva-uploader", + error = %e, + "Failed to read certificate file" + ); + GenevaConfigClientError::Certificate(e.to_string()) + })?; + let identity = Identity::from_pkcs12(&p12_bytes, password).map_err(|e| { + debug!( + name: "config_client.new.certificate_parse_error", + target: "geneva-uploader", + error = %e, + "Failed to parse PKCS#12 certificate" + ); + GenevaConfigClientError::Certificate(e.to_string()) + })?; //TODO - use use_native_tls instead of preconfigured_tls once we no longer need self-signed certs // and TLS 1.2 as the exclusive protocol. let tls_connector = configure_tls_connector(native_tls::TlsConnector::builder(), identity) .build() - .map_err(|e| GenevaConfigClientError::Certificate(e.to_string()))?; + .map_err(|e| { + debug!( + name: "config_client.new.tls_connector_error", + target: "geneva-uploader", + error = %e, + "Failed to build TLS connector" + ); + GenevaConfigClientError::Certificate(e.to_string()) + })?; client_builder = client_builder.use_preconfigured_tls(tls_connector); } - AuthMethod::ManagedIdentity => { - return Err(GenevaConfigClientError::AuthMethodNotImplemented( - "Managed Identity authentication is not implemented yet".into(), - )); + AuthMethod::WorkloadIdentity { .. } => { + info!( + name: "config_client.new.workload_identity_auth", + target: "geneva-uploader", + "Using Workload Identity authentication" + ); + // No special HTTP client configuration needed for Workload Identity + // Authentication is done via Bearer token in request headers + } + AuthMethod::SystemManagedIdentity + | AuthMethod::UserManagedIdentity { .. } + | AuthMethod::UserManagedIdentityByObjectId { .. } + | AuthMethod::UserManagedIdentityByResourceId { .. } => { + info!( + name: "config_client.new.managed_identity_auth", + target: "geneva-uploader", + "Using Managed Identity authentication" + ); + /* no special HTTP client changes needed */ } #[cfg(feature = "mock_auth")] AuthMethod::MockAuth => { + debug!( + name: "config_client.new.mock_auth_warning", + target: "geneva-uploader", + "WARNING: Using MockAuth for GenevaConfigClient. This should only be used in tests!" + ); // Mock authentication for testing purposes, no actual auth needed // Just use the default client builder eprintln!("WARNING: Using MockAuth for GenevaConfigClient. This should only be used in tests!"); } } - let agent_identity = "GenevaUploader"; - let agent_version = "0.1"; - let static_headers = Self::build_static_headers(agent_identity, agent_version); - let identity = format!("Tenant=Default/Role=GcsClient/RoleInstance={agent_identity}"); let encoded_identity = general_purpose::STANDARD.encode(&identity); let version_str = format!("Ver{0}v0", config.config_major_version); + // Use different API endpoints based on authentication method + // Certificate auth uses "api", MSI auth and Workload Identity use "userapi" + let api_path = match &config.auth_method { + AuthMethod::Certificate { .. } => "api", + AuthMethod::SystemManagedIdentity + | AuthMethod::UserManagedIdentity { .. } + | AuthMethod::UserManagedIdentityByObjectId { .. } + | AuthMethod::UserManagedIdentityByResourceId { .. } + | AuthMethod::WorkloadIdentity { .. } => "userapi", + #[cfg(feature = "mock_auth")] + AuthMethod::MockAuth => "api", // treat mock like certificate path for URL shape + }; + let mut pre_url = String::with_capacity(config.endpoint.len() + 200); write!( &mut pre_url, - "{}/api/agent/v3/{}/{}/MonitoringStorageKeys/?Namespace={}&Region={}&Identity={}&OSType={}&ConfigMajorVersion={}", + "{}/{}/agent/v3/{}/{}/MonitoringStorageKeys/?Namespace={}&Region={}&Identity={}&OSType={}&ConfigMajorVersion={}", config.endpoint.trim_end_matches('/'), + api_path, config.environment, config.account, config.namespace, @@ -291,7 +389,6 @@ impl GenevaConfigClient { precomputed_url_prefix: pre_url, agent_identity: agent_identity.to_string(), // TODO make this configurable agent_version: "1.0".to_string(), // TODO make this configurable - static_headers, }) } @@ -310,6 +407,177 @@ impl GenevaConfigClient { headers } + /// Get Azure AD token using Workload Identity (Federated Identity) + /// + /// Reads AZURE_CLIENT_ID, AZURE_TENANT_ID, and AZURE_FEDERATED_TOKEN_FILE from environment variables. + /// In Kubernetes: + /// - AZURE_CLIENT_ID and AZURE_TENANT_ID must be set explicitly in the pod spec + /// - AZURE_FEDERATED_TOKEN_FILE is auto-injected by the workload identity webhook + async fn get_workload_identity_token(&self) -> Result { + debug!( + name: "config_client.get_workload_identity_token", + target: "geneva-uploader", + "Acquiring Workload Identity token" + ); + + let resource = match &self.config.auth_method { + AuthMethod::WorkloadIdentity { resource } => resource, + _ => { + debug!( + name: "config_client.get_workload_identity_token.invalid_auth_method", + target: "geneva-uploader", + "get_workload_identity_token called but auth method is not WorkloadIdentity" + ); + return Err(GenevaConfigClientError::WorkloadIdentityAuth( + "get_workload_identity_token called but auth method is not WorkloadIdentity" + .to_string(), + )); + } + }; + + // TODO: Extract scope generation logic into helper function shared with get_msi_token() + let base = resource.trim_end_matches("/.default").trim_end_matches('/'); + let mut scope_candidates: Vec = vec![format!("{base}/.default"), base.to_string()]; + // TODO - below check is not required, as we alread trim "/" + if !base.ends_with('/') { + scope_candidates.push(format!("{base}/")); + } + + // TODO: Consider caching WorkloadIdentityCredential if profiling shows credential creation overhead + // Pass None to let azure_identity crate read AZURE_CLIENT_ID, AZURE_TENANT_ID, + // and AZURE_FEDERATED_TOKEN_FILE from environment variables automatically + let credential = WorkloadIdentityCredential::new(None).map_err(|e| { + debug!( + name: "config_client.get_workload_identity_token.create_credential_error", + target: "geneva-uploader", + error = %e, + "Failed to create WorkloadIdentityCredential" + ); + GenevaConfigClientError::WorkloadIdentityAuth(format!( + "Failed to create WorkloadIdentityCredential. Ensure AZURE_CLIENT_ID, AZURE_TENANT_ID, and AZURE_FEDERATED_TOKEN_FILE environment variables are set: {e}" + )) + })?; + + let mut last_err: Option = None; + for scope in &scope_candidates { + //TODO - It looks like the get_token API accepts a slice of &str + match credential.get_token(&[scope.as_str()], None).await { + Ok(token) => { + info!( + name: "config_client.get_workload_identity_token.success", + target: "geneva-uploader", + "Successfully acquired Workload Identity token" + ); + return Ok(token.token.secret().to_string()); + } + Err(e) => last_err = Some(e.to_string()), + } + } + + let detail = last_err.unwrap_or_else(|| "no error detail".into()); + debug!( + name: "config_client.get_workload_identity_token.failed", + target: "geneva-uploader", + scopes = %scope_candidates.join(", "), + error = %detail, + "Workload Identity token acquisition failed" + ); + Err(GenevaConfigClientError::WorkloadIdentityAuth(format!( + "Workload Identity token acquisition failed. Scopes tried: {scopes}. Last error: {detail}", + scopes = scope_candidates.join(", ") + ))) + } + + /// Get MSI token for GCS authentication + async fn get_msi_token(&self) -> Result { + debug!( + name: "config_client.get_msi_token", + target: "geneva-uploader", + "Acquiring Managed Identity token" + ); + + let resource = self.config.msi_resource.as_ref().ok_or_else(|| { + debug!( + name: "config_client.get_msi_token.missing_msi_resource", + target: "geneva-uploader", + "msi_resource not set in config (required for Managed Identity auth)" + ); + GenevaConfigClientError::MsiAuth( + "msi_resource not set in config (required for Managed Identity auth)".to_string(), + ) + })?; + + // TODO: Extract scope generation logic into helper function shared with get_workload_identity_token() + let base = resource.trim_end_matches("/.default").trim_end_matches('/'); + let mut scope_candidates: Vec = vec![format!("{base}/.default"), base.to_string()]; + // TODO - below check is not required, as we alread trim "/" + if !base.ends_with('/') { + scope_candidates.push(format!("{base}/")); + } + + let user_assigned_id = match &self.config.auth_method { + AuthMethod::SystemManagedIdentity => None, + AuthMethod::UserManagedIdentity { client_id } => { + Some(UserAssignedId::ClientId(client_id.clone())) + } + AuthMethod::UserManagedIdentityByObjectId { object_id } => { + Some(UserAssignedId::ObjectId(object_id.clone())) + } + AuthMethod::UserManagedIdentityByResourceId { resource_id } => { + Some(UserAssignedId::ResourceId(resource_id.clone())) + } + _ => { + return Err(GenevaConfigClientError::MsiAuth( + "get_msi_token called but auth method is not a managed identity variant" + .to_string(), + )) + } + }; + + // TODO: Consider caching ManagedIdentityCredential if profiling shows credential creation overhead + let options = ManagedIdentityCredentialOptions { + user_assigned_id, + ..Default::default() + }; + let credential = ManagedIdentityCredential::new(Some(options)).map_err(|e| { + debug!( + name: "config_client.get_msi_token.create_credential_error", + target: "geneva-uploader", + error = %e, + "Failed to create MSI credential" + ); + GenevaConfigClientError::MsiAuth(format!("Failed to create MSI credential: {e}")) + })?; + + let mut last_err: Option = None; + for scope in &scope_candidates { + match credential.get_token(&[scope.as_str()], None).await { + Ok(token) => { + info!( + name: "config_client.get_msi_token.success", + target: "geneva-uploader", + "Successfully acquired Managed Identity token" + ); + return Ok(token.token.secret().to_string()); + } + Err(e) => last_err = Some(e.to_string()), + } + } + + let detail = last_err.unwrap_or_else(|| "no error detail".into()); + debug!( + name: "config_client.get_msi_token.failed", + target: "geneva-uploader", + scopes = %scope_candidates.join(", "), + error = %detail, + "Managed Identity token acquisition failed" + ); + Err(GenevaConfigClientError::MsiAuth(format!( + "Managed Identity token acquisition failed. Scopes tried: {scopes}. Last error: {detail}. IMDS fallback intentionally disabled.", + scopes = scope_candidates.join(", ") + ))) + } + /// Retrieves ingestion gateway information from the Geneva Config Service. /// /// # HTTP API Details @@ -357,16 +625,34 @@ impl GenevaConfigClient { pub(crate) async fn get_ingestion_info( &self, ) -> Result<(IngestionGatewayInfo, MonikerInfo, String)> { + debug!( + name: "config_client.get_ingestion_info", + target: "geneva-uploader", + "Getting ingestion info (checking cache first)" + ); + // First, try to read from cache (shared read access) if let Ok(guard) = self.cached_data.read() { if let Some(cached_data) = guard.as_ref() { let expiry = cached_data.token_expiry; if expiry > Utc::now() + chrono::Duration::minutes(5) { + debug!( + name: "config_client.get_ingestion_info.cache_hit", + target: "geneva-uploader", + expiry = %expiry, + "Using cached ingestion info" + ); return Ok(( cached_data.auth_info.0.clone(), cached_data.auth_info.1.clone(), cached_data.token_endpoint.clone(), )); + } else { + debug!( + name: "config_client.get_ingestion_info.cache_expired", + target: "geneva-uploader", + "Cached token expired or expiring soon, fetching fresh data" + ); } } } @@ -381,7 +667,16 @@ impl GenevaConfigClient { GenevaConfigClientError::InternalError("Failed to parse token expiry".into()) })?; - let token_endpoint = extract_endpoint_from_token(&fresh_ingestion_gateway_info.auth_token)?; + let token_endpoint = + match extract_endpoint_from_token(&fresh_ingestion_gateway_info.auth_token) { + Ok(ep) => ep, + Err(err) => { + // Fallback: some tokens legitimately omit the Endpoint claim; use server endpoint. + #[cfg(debug_assertions)] + eprintln!("[geneva][debug] token Endpoint claim missing or unparsable: {err}"); + fresh_ingestion_gateway_info.endpoint.clone() + } + }; // Now update the cache with exclusive write access let mut guard = self @@ -418,31 +713,88 @@ impl GenevaConfigClient { /// Internal method that actually fetches data from Geneva Config Service async fn fetch_ingestion_info(&self) -> Result<(IngestionGatewayInfo, MonikerInfo)> { - let tag_id = Uuid::new_v4().to_string(); //TODO - uuid is costly, check if counter is enough? - let mut url = String::with_capacity(self.precomputed_url_prefix.len() + 50); // Pre-allocate with reasonable capacity + info!( + name: "config_client.fetch_ingestion_info", + target: "geneva-uploader", + "Fetching fresh ingestion info from Geneva Config Service" + ); + + let tag_id = Uuid::new_v4().to_string(); // TODO: consider cheaper counter if perf-critical + let mut url = String::with_capacity(self.precomputed_url_prefix.len() + 50); write!(&mut url, "{}&TagId={tag_id}", self.precomputed_url_prefix).map_err(|e| { + debug!( + name: "config_client.fetch_ingestion_info.write_url_error", + target: "geneva-uploader", + error = %e, + "Failed to write URL" + ); GenevaConfigClientError::InternalError(format!("Failed to write URL: {e}")) })?; let req_id = Uuid::new_v4().to_string(); - let mut request = self - .http_client - .get(&url) - .headers(self.static_headers.clone()); // Clone only cheap references + debug!( + name: "config_client.fetch_ingestion_info.request", + target: "geneva-uploader", + request_id = %req_id, + "Sending config request with request_id" + ); + + let mut request = self.http_client.get(&url); request = request.header("x-ms-client-request-id", req_id); - let response = request - .send() - .await - .map_err(GenevaConfigClientError::Http)?; - // Check if the response is successful + + // Add appropriate authentication header + match &self.config.auth_method { + AuthMethod::WorkloadIdentity { .. } => { + let token = self.get_workload_identity_token().await?; + request = request.header(AUTHORIZATION, format!("Bearer {}", token)); + } + AuthMethod::SystemManagedIdentity + | AuthMethod::UserManagedIdentity { .. } + | AuthMethod::UserManagedIdentityByObjectId { .. } + | AuthMethod::UserManagedIdentityByResourceId { .. } => { + let token = self.get_msi_token().await?; + request = request.header(AUTHORIZATION, format!("Bearer {}", token)); + } + AuthMethod::Certificate { .. } => { /* mTLS only */ } + #[cfg(feature = "mock_auth")] + AuthMethod::MockAuth => { /* no auth header */ } + } + + // Send HTTP request + let response = match request.send().await { + Ok(resp) => resp, + Err(e) => { + debug!( + name: "config_client.fetch_ingestion_info.http_error", + target: "geneva-uploader", + error = %e, + "Config service HTTP request failed" + ); + return Err(GenevaConfigClientError::Http(e)); + } + }; + let status = response.status(); let body = response.text().await?; + if status.is_success() { - let parsed = match serde_json::from_str::(&body) { - Ok(response) => response, + debug!( + name: "config_client.fetch_ingestion_info.response", + target: "geneva-uploader", + "Config service returned success status" + ); + + let parsed: GenevaResponse = match serde_json::from_str::(&body) { + Ok(p) => p, Err(e) => { + debug!( + name: "config_client.fetch_ingestion_info.parse_error", + target: "geneva-uploader", + error = %e, + "Failed to parse config service response" + ); return Err(GenevaConfigClientError::AuthInfoNotFound(format!( "Failed to parse response: {e}" ))); @@ -451,19 +803,39 @@ impl GenevaConfigClient { for account in parsed.storage_account_keys { if account.is_primary_moniker && account.account_moniker_name.contains("diag") { + // Move (not clone) the strings out of the StorageAccountKey; no extra allocation + let account_moniker_name = account.account_moniker_name; + let account_group_name = account.account_group_name; let moniker_info = MonikerInfo { - name: account.account_moniker_name, - account_group: account.account_group_name, + name: account_moniker_name, + account_group: account_group_name, }; - + info!( + name: "config_client.fetch_ingestion_info.success", + target: "geneva-uploader", + moniker = %moniker_info.name, + "Successfully retrieved ingestion info" + ); return Ok((parsed.ingestion_gateway_info, moniker_info)); } } + debug!( + name: "config_client.fetch_ingestion_info.no_diag_moniker", + target: "geneva-uploader", + "No primary diag moniker found in storage accounts" + ); Err(GenevaConfigClientError::MonikerNotFound( "No primary diag moniker found in storage accounts".to_string(), )) } else { + debug!( + name: "config_client.fetch_ingestion_info.error_status", + target: "geneva-uploader", + status = status.as_u16(), + body = %body, + "Config service returned error" + ); Err(GenevaConfigClientError::RequestFailed { status: status.as_u16(), message: body, @@ -506,12 +878,23 @@ fn extract_endpoint_from_token(token: &str) -> Result { _ => payload.to_string(), }; - // Decode the Base64-encoded payload into raw bytes - let decoded = general_purpose::URL_SAFE_NO_PAD - .decode(payload) - .map_err(|e| { - GenevaConfigClientError::JwtTokenError(format!("Failed to decode JWT: {e}")) - })?; + // Decode the Base64-encoded payload into raw bytes. + // Try URL-safe (with and without padding), then fall back to standard Base64. + let decoded = match general_purpose::URL_SAFE_NO_PAD.decode(&payload) { + Ok(b) => b, + Err(e_url_no_pad) => match general_purpose::URL_SAFE.decode(&payload) { + Ok(b) => b, + Err(e_url_pad) => match general_purpose::STANDARD.decode(&payload) { + Ok(b) => b, + Err(e_std) => { + return Err(GenevaConfigClientError::JwtTokenError(format!( + "Failed to decode JWT (URL_SAFE_NO_PAD, URL_SAFE, and STANDARD): \ + no_pad_err={e_url_no_pad}; pad_err={e_url_pad}; std_err={e_std}" + ))) + } + }, + }, + }; // Convert the raw bytes into a UTF-8 string let decoded_str = String::from_utf8(decoded).map_err(|e| { @@ -522,15 +905,12 @@ fn extract_endpoint_from_token(token: &str) -> Result { let payload_json: serde_json::Value = serde_json::from_str(&decoded_str).map_err(GenevaConfigClientError::SerdeJson)?; - // Extract "Endpoint" from JWT payload as a string, or fail if missing or invalid. - let endpoint = payload_json["Endpoint"] - .as_str() - .ok_or_else(|| { - GenevaConfigClientError::JwtTokenError("No Endpoint claim in JWT token".to_string()) - })? - .to_string(); - - Ok(endpoint) + if let Some(ep) = payload_json["Endpoint"].as_str() { + return Ok(ep.to_string()); + } + Err(GenevaConfigClientError::JwtTokenError( + "No Endpoint claim in JWT token".to_string(), + )) } #[cfg(feature = "self_signed_certs")] diff --git a/opentelemetry-exporter-geneva/geneva-uploader/src/config_service/mod.rs b/opentelemetry-exporter-geneva/geneva-uploader/src/config_service/mod.rs index c41ecfa2d..dbf454ce7 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/src/config_service/mod.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader/src/config_service/mod.rs @@ -20,12 +20,19 @@ mod tests { namespace: "ns".to_string(), region: "region".to_string(), config_major_version: 1, - auth_method: AuthMethod::ManagedIdentity, + auth_method: AuthMethod::WorkloadIdentity { + resource: "https://monitor.azure.com".to_string(), + }, + msi_resource: None, }; assert_eq!(config.environment, "env"); assert_eq!(config.account, "acct"); - assert!(matches!(config.auth_method, AuthMethod::ManagedIdentity)); + + match config.auth_method { + AuthMethod::WorkloadIdentity { .. } => {} + _ => panic!("expected WorkloadIdentity variant"), + } } fn generate_self_signed_p12() -> (NamedTempFile, String) { @@ -107,6 +114,7 @@ mod tests { path: PathBuf::from(temp_p12_file.path().to_string_lossy().to_string()), password, }, + msi_resource: None, }; let client = GenevaConfigClient::new(config).unwrap(); @@ -152,6 +160,7 @@ mod tests { path: PathBuf::from(temp_p12_file.path().to_string_lossy().to_string()), password, }, + msi_resource: None, }; let client = GenevaConfigClient::new(config).unwrap(); @@ -200,6 +209,7 @@ mod tests { path: PathBuf::from(temp_p12_file.path().to_string_lossy().to_string()), password, }, + msi_resource: None, }; let client = GenevaConfigClient::new(config).unwrap(); @@ -231,6 +241,7 @@ mod tests { path: PathBuf::from("/nonexistent/path.p12".to_string()), password: "test".to_string(), }, + msi_resource: None, }; let result = GenevaConfigClient::new(config); @@ -294,6 +305,7 @@ mod tests { path: PathBuf::from(cert_path), password: cert_password, }, + msi_resource: None, }; println!("Connecting to real Geneva Config service..."); diff --git a/opentelemetry-exporter-geneva/geneva-uploader/src/ingestion_service/mod.rs b/opentelemetry-exporter-geneva/geneva-uploader/src/ingestion_service/mod.rs index 55eed1c9d..64e395bf1 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/src/ingestion_service/mod.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader/src/ingestion_service/mod.rs @@ -64,6 +64,7 @@ mod tests { path: cert_path, password: cert_password, }, + msi_resource: None, }; // Build client and uploader @@ -71,7 +72,6 @@ mod tests { GenevaConfigClient::new(config).expect("Failed to create config client"); let uploader = GenevaUploader::from_config_client(Arc::new(config_client), uploader_config) - .await .expect("Failed to create uploader"); // Event name/version @@ -128,7 +128,7 @@ mod tests { let response = ctx .uploader - .upload(ctx.data, &ctx.event_name, &metadata) + .upload(ctx.data, &ctx.event_name, &metadata, 1) .await .expect("Upload failed"); @@ -195,7 +195,7 @@ mod tests { let _ = ctx .uploader - .upload(ctx.data.clone(), &ctx.event_name, &warmup_metadata) + .upload(ctx.data.clone(), &ctx.event_name, &warmup_metadata, 1) .await .expect("Warm-up upload failed"); let warmup_elapsed = start_warmup.elapsed(); @@ -221,7 +221,7 @@ mod tests { }; let resp = uploader - .upload(data, &event_name, &metadata) + .upload(data, &event_name, &metadata, 1) .await .unwrap_or_else(|_| panic!("Upload {i} failed")); let elapsed = start.elapsed(); diff --git a/opentelemetry-exporter-geneva/geneva-uploader/src/ingestion_service/uploader.rs b/opentelemetry-exporter-geneva/geneva-uploader/src/ingestion_service/uploader.rs index 5da88da59..f002647d4 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/src/ingestion_service/uploader.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader/src/ingestion_service/uploader.rs @@ -9,6 +9,7 @@ use std::fmt::Write; use std::sync::Arc; use std::time::Duration; use thiserror::Error; +use tracing::debug; use url::form_urlencoded::byte_serialize; use uuid::Uuid; @@ -88,12 +89,13 @@ impl From for GenevaUploaderError { pub(crate) type Result = std::result::Result; -#[allow(dead_code)] /// Response from the ingestion API when submitting data #[derive(Debug, Clone, Deserialize)] pub(crate) struct IngestionResponse { + #[allow(dead_code)] pub(crate) ticket: String, #[serde(flatten)] + #[allow(dead_code)] pub(crate) extra: HashMap, } @@ -125,7 +127,7 @@ impl GenevaUploader { /// # Returns /// * `Result` with authenticated client and resolved moniker/endpoint #[allow(dead_code)] - pub(crate) async fn from_config_client( + pub(crate) fn from_config_client( config_client: Arc, uploader_config: GenevaUploaderConfig, ) -> Result { @@ -134,18 +136,24 @@ impl GenevaUploader { header::ACCEPT, header::HeaderValue::from_static("application/json"), ); - let http_client = Client::builder() - .timeout(Duration::from_secs(30)) - .default_headers(headers) - .build()?; + let client = Self::build_h1_client(headers)?; Ok(Self { config_client, config: uploader_config, - http_client, + http_client: client, }) } + fn build_h1_client(headers: header::HeaderMap) -> Result { + Ok(Client::builder() + .timeout(Duration::from_secs(30)) + .default_headers(headers) + .http1_only() + .tcp_keepalive(Some(Duration::from_secs(60))) + .build()?) + } + /// Creates the GIG upload URI with required parameters #[allow(dead_code)] fn create_upload_uri( @@ -155,6 +163,7 @@ impl GenevaUploader { data_size: usize, event_name: &str, metadata: &BatchMetadata, + row_count: usize, ) -> Result { // Get already formatted schema IDs and format timestamps using BatchMetadata methods let schema_ids = &metadata.schema_ids; @@ -173,7 +182,7 @@ impl GenevaUploader { // Create the query string let mut query = String::with_capacity(512); // Preallocate enough space for the query string (decided based on expected size) - write!(&mut query, "api/v1/ingestion/ingest?endpoint={}&moniker={}&namespace={}&event={}&version={}&sourceUniqueId={}&sourceIdentity={}&startTime={}&endTime={}&format=centralbond/lz4hc&dataSize={}&minLevel={}&schemaIds={}", + write!(&mut query, "api/v1/ingestion/ingest?endpoint={}&moniker={}&namespace={}&event={}&version={}&sourceUniqueId={}&sourceIdentity={}&startTime={}&endTime={}&format=centralbond/lz4hc&dataSize={}&minLevel={}&schemaIds={}&rowCount={}", encoded_monitoring_endpoint, moniker, self.config.namespace, @@ -185,7 +194,8 @@ impl GenevaUploader { end_time_str, data_size, 2, - schema_ids + schema_ids, + row_count ).map_err(|e| GenevaUploaderError::InternalError(format!("Failed to write query string: {e}")))?; Ok(query) } @@ -197,6 +207,7 @@ impl GenevaUploader { /// * `event_name` - Name of the event /// * `event_version` - Version of the event /// * `metadata` - Batch metadata containing timestamps and schema information + /// * `row_count` - Number of rows/events in the batch /// /// # Returns /// * `Result` - The response containing the ticket ID or an error @@ -206,7 +217,16 @@ impl GenevaUploader { data: Vec, event_name: &str, metadata: &BatchMetadata, + row_count: usize, ) -> Result { + debug!( + name: "uploader.upload", + target: "geneva-uploader", + event_name = %event_name, + size = data.len(), + "Starting upload" + ); + // Always get fresh auth info let (auth_info, moniker_info, monitoring_endpoint) = self.config_client.get_ingestion_info().await?; @@ -217,12 +237,22 @@ impl GenevaUploader { data_size, event_name, metadata, + row_count, )?; let full_url = format!( "{}/{}", auth_info.endpoint.trim_end_matches('/'), upload_uri ); + + debug!( + name: "uploader.upload.post", + target: "geneva-uploader", + event_name = %event_name, + moniker = %moniker_info.name, + "Posting to ingestion gateway" + ); + // Send the upload request let response = self .http_client @@ -238,11 +268,34 @@ impl GenevaUploader { let body = response.text().await?; if status == reqwest::StatusCode::ACCEPTED { - let ingest_response: IngestionResponse = - serde_json::from_str(&body).map_err(GenevaUploaderError::SerdeJson)?; + let ingest_response: IngestionResponse = serde_json::from_str(&body).map_err(|e| { + debug!( + name: "uploader.upload.parse_error", + target: "geneva-uploader", + error = %e, + "Failed to parse ingestion response" + ); + GenevaUploaderError::SerdeJson(e) + })?; + + debug!( + name: "uploader.upload.success", + target: "geneva-uploader", + event_name = %event_name, + ticket = %ingest_response.ticket, + "Upload successful" + ); Ok(ingest_response) } else { + debug!( + name: "uploader.upload.failed", + target: "geneva-uploader", + event_name = %event_name, + status = status.as_u16(), + body = %body, + "Upload failed" + ); Err(GenevaUploaderError::UploadFailed { status: status.as_u16(), message: body, diff --git a/opentelemetry-exporter-geneva/geneva-uploader/src/lib.rs b/opentelemetry-exporter-geneva/geneva-uploader/src/lib.rs index e322626cc..6c0cbfe8f 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/src/lib.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader/src/lib.rs @@ -1,6 +1,6 @@ mod config_service; mod ingestion_service; -pub mod payload_encoder; +mod payload_encoder; pub mod client; @@ -14,8 +14,9 @@ pub(crate) use config_service::client::{ #[allow(unused_imports)] pub(crate) use ingestion_service::uploader::{ - GenevaUploader, GenevaUploaderConfig, GenevaUploaderError, IngestionResponse, Result, + GenevaUploader, GenevaUploaderConfig, GenevaUploaderError, Result, }; +pub use client::EncodedBatch; pub use client::{GenevaClient, GenevaClientConfig}; pub use config_service::client::AuthMethod; diff --git a/opentelemetry-exporter-geneva/geneva-uploader/src/payload_encoder/central_blob.rs b/opentelemetry-exporter-geneva/geneva-uploader/src/payload_encoder/central_blob.rs index b60ac270c..1a0fa0ec3 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/src/payload_encoder/central_blob.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader/src/payload_encoder/central_blob.rs @@ -52,17 +52,6 @@ impl BatchMetadata { } } -/// Represents an encoded batch with all necessary metadata -#[derive(Debug, Clone)] -pub(crate) struct EncodedBatch { - /// The event name for this batch - pub(crate) event_name: String, - /// The encoded binary data - pub(crate) data: Vec, - /// Batch metadata containing timestamps and schema information - pub(crate) metadata: BatchMetadata, -} - /// Helper to encode UTF-8 Rust str to UTF-16LE bytes /// TODO - consider avoiding temporary allocation, by passing a mutable buffer #[allow(dead_code)] diff --git a/opentelemetry-exporter-geneva/geneva-uploader/src/payload_encoder/otlp_encoder.rs b/opentelemetry-exporter-geneva/geneva-uploader/src/payload_encoder/otlp_encoder.rs index e8b20c463..d2b17e25d 100644 --- a/opentelemetry-exporter-geneva/geneva-uploader/src/payload_encoder/otlp_encoder.rs +++ b/opentelemetry-exporter-geneva/geneva-uploader/src/payload_encoder/otlp_encoder.rs @@ -1,12 +1,16 @@ +use crate::client::EncodedBatch; use crate::payload_encoder::bond_encoder::{BondDataType, BondEncodedSchema, BondWriter, FieldDef}; use crate::payload_encoder::central_blob::{ - BatchMetadata, CentralBlob, CentralEventEntry, CentralSchemaEntry, EncodedBatch, + BatchMetadata, CentralBlob, CentralEventEntry, CentralSchemaEntry, }; +use crate::payload_encoder::lz4_chunked_compression::lz4_chunked_compression; use chrono::{TimeZone, Utc}; use opentelemetry_proto::tonic::common::v1::any_value::Value; use opentelemetry_proto::tonic::logs::v1::LogRecord; +use opentelemetry_proto::tonic::trace::v1::Span; use std::borrow::Cow; use std::sync::Arc; +use tracing::debug; const FIELD_ENV_NAME: &str = "env_name"; const FIELD_ENV_VER: &str = "env_ver"; @@ -20,6 +24,15 @@ const FIELD_SEVERITY_NUMBER: &str = "SeverityNumber"; const FIELD_SEVERITY_TEXT: &str = "SeverityText"; const FIELD_BODY: &str = "body"; +// Span-specific field constants +const FIELD_KIND: &str = "kind"; +const FIELD_START_TIME: &str = "startTime"; +const FIELD_SUCCESS: &str = "success"; +const FIELD_TRACE_STATE: &str = "traceState"; +const FIELD_PARENT_ID: &str = "parentId"; +const FIELD_LINKS: &str = "links"; +const FIELD_STATUS_MESSAGE: &str = "statusMessage"; + /// Encoder to write OTLP payload in bond form. #[derive(Clone)] pub(crate) struct OtlpEncoder; @@ -29,8 +42,14 @@ impl OtlpEncoder { OtlpEncoder {} } - /// Encode a batch of logs into a vector of (event_name, bytes, schema_ids, start_time_nanos, end_time_nanos) - pub(crate) fn encode_log_batch<'a, I>(&self, logs: I, metadata: &str) -> Vec + /// Encode a batch of logs into a vector of (event_name, compressed_bytes, schema_ids, start_time_nanos, end_time_nanos) + /// The returned `data` field contains LZ4 chunked compressed bytes. + /// On compression failure, the error is returned (no logging, no fallback). + pub(crate) fn encode_log_batch<'a, I>( + &self, + logs: I, + metadata: &str, + ) -> Result, String> where I: IntoIterator, { @@ -70,7 +89,7 @@ impl OtlpEncoder { } } - let mut batches: HashMap = HashMap::new(); + let mut batches: HashMap<&str, BatchData> = HashMap::new(); for log_record in logs { // Get the timestamp - prefer time_unix_nano, fall back to observed_time_unix_nano if time_unix_nano is 0 @@ -91,23 +110,20 @@ impl OtlpEncoder { let (field_info, schema_id) = Self::determine_fields_and_schema_id(log_record, event_name_str); - let schema_entry = Self::create_schema(schema_id, field_info.as_slice()); // 2. Encode row let row_buffer = self.write_row_data(log_record, &field_info); let level = log_record.severity_number as u8; // 3. Create or get existing batch entry with metadata tracking - let entry = batches - .entry(event_name_str.to_string()) - .or_insert_with(|| BatchData { - schemas: Vec::new(), - events: Vec::new(), - metadata: BatchMetadata { - start_time: timestamp, - end_time: timestamp, - schema_ids: String::new(), - }, - }); + let entry = batches.entry(event_name_str).or_insert_with(|| BatchData { + schemas: Vec::new(), + events: Vec::new(), + metadata: BatchMetadata { + start_time: timestamp, + end_time: timestamp, + schema_ids: String::new(), + }, + }); // Update timestamp range if timestamp != 0 { @@ -117,6 +133,7 @@ impl OtlpEncoder { // 4. Add schema entry if not already present (multiple schemas per event_name batch) if !entry.schemas.iter().any(|s| s.id == schema_id) { + let schema_entry = Self::create_schema(schema_id, field_info); entry.schemas.push(schema_entry); } @@ -136,6 +153,9 @@ impl OtlpEncoder { let schema_ids_string = batch_data.format_schema_ids(); batch_data.metadata.schema_ids = schema_ids_string; + let schemas_count = batch_data.schemas.len(); + let events_count = batch_data.events.len(); + let blob = CentralBlob { version: 1, format: 2, @@ -143,14 +163,172 @@ impl OtlpEncoder { schemas: batch_data.schemas, events: batch_data.events, }; - let bytes = blob.to_bytes(); + let uncompressed = blob.to_bytes(); + let compressed = lz4_chunked_compression(&uncompressed).map_err(|e| { + debug!( + name: "encoder.encode_log_batch.compress_error", + target: "geneva-uploader", + event_name = %batch_event_name, + error = %e, + "LZ4 compression failed" + ); + format!("compression failed: {e}") + })?; + + debug!( + name: "encoder.encode_log_batch", + target: "geneva-uploader", + event_name = %batch_event_name, + schemas = schemas_count, + events = events_count, + uncompressed_size = uncompressed.len(), + compressed_size = compressed.len(), + "Encoded log batch" + ); + blobs.push(EncodedBatch { - event_name: batch_event_name, - data: bytes, + event_name: batch_event_name.to_string(), + data: compressed, metadata: batch_data.metadata, + row_count: events_count, }); } - blobs + Ok(blobs) + } + + /// Encode a batch of spans into a single payload + /// All spans are grouped into a single batch with event_name "Span" for routing + /// The returned `data` field contains LZ4 chunked compressed bytes. + /// On compression failure, the error is returned (no logging, no fallback). + pub(crate) fn encode_span_batch<'a, I>( + &self, + spans: I, + metadata: &str, + ) -> Result, String> + where + I: IntoIterator, + { + // All spans use "Span" as event name for routing - no grouping by span name + const EVENT_NAME: &str = "Span"; + + let mut schemas = Vec::new(); + let mut events = Vec::new(); + let mut start_time = u64::MAX; + let mut end_time = 0u64; + + for span in spans { + // 1. Get schema with optimized single-pass field collection and schema ID calculation + let (field_info, schema_id) = + Self::determine_span_fields_and_schema_id(span, EVENT_NAME); + + // 2. Encode row + let row_buffer = self.write_span_row_data(span, &field_info); + let level = 5; // Default level for spans (INFO equivalent) + + // 3. Update timestamp range + if span.start_time_unix_nano != 0 { + start_time = start_time.min(span.start_time_unix_nano); + } + if span.end_time_unix_nano != 0 { + end_time = end_time.max(span.end_time_unix_nano); + } + + // 4. Add schema entry if not already present + // TODO - This can have collision if different spans have same schema ID but different fields + if !schemas + .iter() + .any(|s: &CentralSchemaEntry| s.id == schema_id) + { + let schema_entry = Self::create_span_schema(schema_id, field_info); + schemas.push(schema_entry); + } + + // 5. Create CentralEventEntry + let central_event = CentralEventEntry { + schema_id, + level, + event_name: Arc::new(EVENT_NAME.to_string()), + row: row_buffer, + }; + events.push(central_event); + } + + // Handle case with no spans + if events.is_empty() { + return Ok(Vec::new()); + } + + // Format schema IDs + // TODO - this can be shared code with log batch + let schema_ids_string = { + use std::fmt::Write; + if schemas.is_empty() { + String::new() + } else { + let estimated_capacity = schemas.len() * 32 + schemas.len().saturating_sub(1); + schemas.iter().enumerate().fold( + String::with_capacity(estimated_capacity), + |mut acc, (i, s)| { + if i > 0 { + acc.push(';'); + } + let md5_hash = md5::compute(s.id.to_le_bytes()); + write!(&mut acc, "{md5_hash:x}").unwrap(); + acc + }, + ) + } + }; + + // Create single batch with all spans + let batch_metadata = BatchMetadata { + start_time: if start_time == u64::MAX { + 0 + } else { + start_time + }, + end_time, + schema_ids: schema_ids_string, + }; + + let schemas_count = schemas.len(); + let events_count = events.len(); + let blob = CentralBlob { + version: 1, + format: 2, + metadata: metadata.to_string(), + schemas, + events, + }; + + let uncompressed = blob.to_bytes(); + let compressed = lz4_chunked_compression(&uncompressed).map_err(|e| { + debug!( + name: "encoder.encode_span_batch.compress_error", + target: "geneva-uploader", + error = %e, + "LZ4 compression failed for spans" + ); + format!("compression failed: {e}") + })?; + + debug!( + name: "encoder.encode_span_batch", + target: "geneva-uploader", + event_name = EVENT_NAME, + schemas = schemas_count, + spans = events_count, + uncompressed_size = uncompressed.len(), + compressed_size = compressed.len(), + "Encoded span batch" + ); + + Ok(vec![EncodedBatch { + event_name: EVENT_NAME.to_string(), + data: compressed, + metadata: batch_metadata, + row_count: events_count, + }]) } /// Determine fields and calculate schema ID in a single pass for optimal performance @@ -180,7 +358,7 @@ impl OtlpEncoder { fields.push((FIELD_SPAN_ID.into(), BondDataType::BT_STRING)); } if log.flags != 0 { - fields.push((FIELD_TRACE_FLAGS.into(), BondDataType::BT_INT32)); + fields.push((FIELD_TRACE_FLAGS.into(), BondDataType::BT_UINT32)); } // Part B - Core log fields @@ -235,10 +413,112 @@ impl OtlpEncoder { (field_defs, schema_id) } + /// Determine span fields and calculate schema ID in a single pass for optimal performance + fn determine_span_fields_and_schema_id(span: &Span, event_name: &str) -> (Vec, u64) { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + + // Pre-allocate with estimated capacity to avoid reallocations + let estimated_capacity = 15 + span.attributes.len(); // 7 always + 8 max conditional + attributes + let mut fields = Vec::with_capacity(estimated_capacity); + + // Initialize hasher for schema ID calculation + let mut hasher = DefaultHasher::new(); + event_name.hash(&mut hasher); + + // Part A - Always present fields for spans + fields.push((Cow::Borrowed(FIELD_ENV_NAME), BondDataType::BT_STRING)); + fields.push((FIELD_ENV_VER.into(), BondDataType::BT_STRING)); + fields.push((FIELD_TIMESTAMP.into(), BondDataType::BT_STRING)); + fields.push((FIELD_ENV_TIME.into(), BondDataType::BT_STRING)); + + // Span-specific required fields + fields.push((FIELD_KIND.into(), BondDataType::BT_INT32)); + fields.push((FIELD_START_TIME.into(), BondDataType::BT_STRING)); + fields.push((FIELD_SUCCESS.into(), BondDataType::BT_BOOL)); + + // Part A extension - Conditional fields + if !span.trace_id.is_empty() { + fields.push((FIELD_TRACE_ID.into(), BondDataType::BT_STRING)); + } + if !span.span_id.is_empty() { + fields.push((FIELD_SPAN_ID.into(), BondDataType::BT_STRING)); + } + if span.flags != 0 { + fields.push((FIELD_TRACE_FLAGS.into(), BondDataType::BT_UINT32)); + } + + // Part B - Span-specific optional fields + if !span.name.is_empty() { + fields.push((FIELD_NAME.into(), BondDataType::BT_STRING)); + } + if !span.trace_state.is_empty() { + fields.push((FIELD_TRACE_STATE.into(), BondDataType::BT_STRING)); + } + if !span.parent_span_id.is_empty() { + fields.push((FIELD_PARENT_ID.into(), BondDataType::BT_STRING)); + } + if !span.links.is_empty() { + fields.push((FIELD_LINKS.into(), BondDataType::BT_STRING)); + } + if let Some(status) = &span.status { + if !status.message.is_empty() { + fields.push((FIELD_STATUS_MESSAGE.into(), BondDataType::BT_STRING)); + } + } + + // Part C - Dynamic attributes + for attr in &span.attributes { + if let Some(val) = attr.value.as_ref().and_then(|v| v.value.as_ref()) { + let type_id = match val { + Value::StringValue(_) => BondDataType::BT_STRING, + Value::IntValue(_) => BondDataType::BT_INT64, + Value::DoubleValue(_) => BondDataType::BT_DOUBLE, + Value::BoolValue(_) => BondDataType::BT_BOOL, + _ => continue, + }; + fields.push((attr.key.clone().into(), type_id)); + } + } + + // Hash field names and types while converting to FieldDef + let field_defs: Vec = fields + .into_iter() + .enumerate() + .map(|(i, (name, type_id))| { + // Hash field name and type for schema ID + name.hash(&mut hasher); + type_id.hash(&mut hasher); + + FieldDef { + name, + type_id, + field_id: (i + 1) as u16, + } + }) + .collect(); + + let schema_id = hasher.finish(); + (field_defs, schema_id) + } + /// Create schema - always creates a new CentralSchemaEntry - fn create_schema(schema_id: u64, field_info: &[FieldDef]) -> CentralSchemaEntry { - let schema = - BondEncodedSchema::from_fields("OtlpLogRecord", "telemetry", field_info.to_vec()); //TODO - use actual struct name and namespace + fn create_schema(schema_id: u64, field_info: Vec) -> CentralSchemaEntry { + let schema = BondEncodedSchema::from_fields("OtlpLogRecord", "telemetry", field_info); //TODO - use actual struct name and namespace + + let schema_bytes = schema.as_bytes(); + let schema_md5 = md5::compute(schema_bytes).0; + + CentralSchemaEntry { + id: schema_id, + md5: schema_md5, + schema, + } + } + + /// Create span schema - always creates a new CentralSchemaEntry + fn create_span_schema(schema_id: u64, field_info: Vec) -> CentralSchemaEntry { + let schema = BondEncodedSchema::from_fields("OtlpSpanRecord", "telemetry", field_info); let schema_bytes = schema.as_bytes(); let schema_md5 = md5::compute(schema_bytes).0; @@ -250,6 +530,89 @@ impl OtlpEncoder { } } + /// Write span row data directly from Span + // TODO - code duplication between write_span_row_data() and write_row_data() - consider extracting common field handling + fn write_span_row_data(&self, span: &Span, fields: &[FieldDef]) -> Vec { + let mut buffer = Vec::with_capacity(fields.len() * 50); + + // Pre-calculate timestamp (use start time as primary timestamp for both fields) + let formatted_timestamp = Self::format_timestamp(span.start_time_unix_nano); + + for field in fields { + match field.name.as_ref() { + FIELD_ENV_NAME => BondWriter::write_string(&mut buffer, "TestEnv"), // TODO - placeholder + FIELD_ENV_VER => BondWriter::write_string(&mut buffer, "4.0"), // TODO - placeholder + FIELD_TIMESTAMP | FIELD_ENV_TIME => { + BondWriter::write_string(&mut buffer, &formatted_timestamp); + } + FIELD_KIND => { + BondWriter::write_numeric(&mut buffer, span.kind); + } + FIELD_START_TIME => { + BondWriter::write_string(&mut buffer, &formatted_timestamp); + } + FIELD_SUCCESS => { + // Determine success based on status + let success = match &span.status { + Some(status) => { + use opentelemetry_proto::tonic::trace::v1::status::StatusCode; + match StatusCode::try_from(status.code) { + Ok(StatusCode::Ok) => true, + Ok(StatusCode::Error) => false, + _ => true, // Unset or unknown defaults to true + } + } + None => true, // No status defaults to true + }; + BondWriter::write_bool(&mut buffer, success); + } + FIELD_TRACE_ID => { + let hex_bytes = Self::encode_id_to_hex::<32>(&span.trace_id); + let hex_str = std::str::from_utf8(&hex_bytes).unwrap(); + BondWriter::write_string(&mut buffer, hex_str); + } + FIELD_SPAN_ID => { + let hex_bytes = Self::encode_id_to_hex::<16>(&span.span_id); + let hex_str = std::str::from_utf8(&hex_bytes).unwrap(); + BondWriter::write_string(&mut buffer, hex_str); + } + FIELD_TRACE_FLAGS => { + BondWriter::write_numeric(&mut buffer, span.flags); + } + FIELD_NAME => { + BondWriter::write_string(&mut buffer, &span.name); + } + FIELD_TRACE_STATE => { + BondWriter::write_string(&mut buffer, &span.trace_state); + } + FIELD_PARENT_ID => { + let hex_bytes = Self::encode_id_to_hex::<16>(&span.parent_span_id); + let hex_str = std::str::from_utf8(&hex_bytes).unwrap(); + BondWriter::write_string(&mut buffer, hex_str); + } + FIELD_LINKS => { + // Manual JSON building to avoid intermediate allocations + let links_json = Self::serialize_links(&span.links); + BondWriter::write_string(&mut buffer, &links_json); + } + FIELD_STATUS_MESSAGE => { + if let Some(status) = &span.status { + BondWriter::write_string(&mut buffer, &status.message); + } + } + _ => { + // Handle dynamic attributes + // TODO - optimize better - we could update determine_fields to also return a vec of bytes which has bond serialized attributes + if let Some(attr) = span.attributes.iter().find(|a| a.key == field.name) { + self.write_attribute_value(&mut buffer, attr, field.type_id); + } + } + } + } + + buffer + } + /// Write row data directly from LogRecord fn write_row_data(&self, log: &LogRecord, sorted_fields: &[FieldDef]) -> Vec { let mut buffer = Vec::with_capacity(sorted_fields.len() * 50); //TODO - estimate better @@ -282,7 +645,7 @@ impl OtlpEncoder { BondWriter::write_string(&mut buffer, hex_str); } FIELD_TRACE_FLAGS => { - BondWriter::write_numeric(&mut buffer, log.flags as i32); + BondWriter::write_numeric(&mut buffer, log.flags); } FIELD_NAME => { BondWriter::write_string(&mut buffer, &log.event_name); @@ -320,6 +683,45 @@ impl OtlpEncoder { hex_bytes } + /// Links serialization + fn serialize_links(links: &[opentelemetry_proto::tonic::trace::v1::span::Link]) -> String { + if links.is_empty() { + return "[]".to_string(); + } + + // Estimate capacity: Each link needs ~80 chars for JSON structure + 32 chars for trace_id + 16 chars for span_id + // JSON overhead: {"toSpanId":"","toTraceId":""} = ~30 chars + commas/brackets + let estimated_capacity = links.len() * 128 + 2; // Extra buffer for safety + let mut json = String::with_capacity(estimated_capacity); + + json.push('['); + + for (i, link) in links.iter().enumerate() { + if i > 0 { + json.push(','); + } + + json.push_str(r#"{"toSpanId":""#); + + // Write hex directly to avoid temporary string allocation + for &byte in &link.span_id { + json.push_str(&format!("{:02x}", byte)); + } + + json.push_str(r#"","toTraceId":""#); + + // Write hex directly to avoid temporary string allocation + for &byte in &link.trace_id { + json.push_str(&format!("{:02x}", byte)); + } + + json.push_str(r#""}"#); + } + + json.push(']'); + json + } + /// Format timestamp from nanoseconds fn format_timestamp(nanos: u64) -> String { let secs = (nanos / 1_000_000_000) as i64; @@ -391,7 +793,7 @@ mod tests { }); let metadata = "namespace=testNamespace/eventVersion=Ver1v0"; - let result = encoder.encode_log_batch([log].iter(), metadata); + let result = encoder.encode_log_batch([log].iter(), metadata).unwrap(); assert!(!result.is_empty()); } @@ -438,7 +840,9 @@ mod tests { let metadata = "namespace=test"; // Encode multiple log records with different schema structures but same event_name - let result = encoder.encode_log_batch([log1, log2, log3].iter(), metadata); + let result = encoder + .encode_log_batch([log1, log2, log3].iter(), metadata) + .unwrap(); // Should create one batch (same event_name = "user_action") assert_eq!(result.len(), 1); @@ -495,7 +899,7 @@ mod tests { ..Default::default() }; - let result = encoder.encode_log_batch([log].iter(), "test"); + let result = encoder.encode_log_batch([log].iter(), "test").unwrap(); assert_eq!(result.len(), 1); assert_eq!(result[0].event_name, "test_event"); @@ -535,7 +939,9 @@ mod tests { }), }); - let result = encoder.encode_log_batch([log1, log2, log3].iter(), "test"); + let result = encoder + .encode_log_batch([log1, log2, log3].iter(), "test") + .unwrap(); // All should be in one batch with same event_name assert_eq!(result.len(), 1); @@ -561,7 +967,9 @@ mod tests { ..Default::default() }; - let result = encoder.encode_log_batch([log1, log2].iter(), "test"); + let result = encoder + .encode_log_batch([log1, log2].iter(), "test") + .unwrap(); // Should create 2 separate batches assert_eq!(result.len(), 2); @@ -584,7 +992,7 @@ mod tests { ..Default::default() }; - let result = encoder.encode_log_batch([log].iter(), "test"); + let result = encoder.encode_log_batch([log].iter(), "test").unwrap(); assert_eq!(result.len(), 1); assert_eq!(result[0].event_name, "Log"); // Should default to "Log" @@ -630,7 +1038,9 @@ mod tests { }), }); - let result = encoder.encode_log_batch([log1, log2, log3, log4].iter(), "test"); + let result = encoder + .encode_log_batch([log1, log2, log3, log4].iter(), "test") + .unwrap(); // Should create 3 batches: "user_action", "system_alert", "Log" assert_eq!(result.len(), 3); @@ -657,4 +1067,275 @@ mod tests { assert!(!log_batch.data.is_empty()); // Should have encoded data assert_eq!(log_batch.metadata.schema_ids.matches(';').count(), 0); // 1 schema = 0 semicolons } + + #[test] + fn test_span_encoding() { + let encoder = OtlpEncoder::new(); + + let mut span = Span { + trace_id: vec![1; 16], + span_id: vec![2; 8], + parent_span_id: vec![3; 8], + name: "test_span".to_string(), + kind: 1, // CLIENT + start_time_unix_nano: 1_700_000_000_000_000_000, + end_time_unix_nano: 1_700_000_001_000_000_000, + flags: 1, + trace_state: "key=value".to_string(), + ..Default::default() + }; + + // Add some attributes + span.attributes.push(KeyValue { + key: "http.method".to_string(), + value: Some(AnyValue { + value: Some(Value::StringValue("GET".to_string())), + }), + }); + + span.attributes.push(KeyValue { + key: "http.status_code".to_string(), + value: Some(AnyValue { + value: Some(Value::IntValue(200)), + }), + }); + + let metadata = "namespace=testNamespace/eventVersion=Ver1v0"; + let result = encoder.encode_span_batch([span].iter(), metadata).unwrap(); + + assert_eq!(result.len(), 1); + assert_eq!(result[0].event_name, "Span"); // All spans use "Span" event name for routing + assert!(!result[0].data.is_empty()); + } + + #[test] + fn test_span_with_links() { + use opentelemetry_proto::tonic::trace::v1::span::Link; + + let encoder = OtlpEncoder::new(); + + let mut span = Span { + trace_id: vec![1; 16], + span_id: vec![2; 8], + name: "linked_span".to_string(), + kind: 2, // SERVER + start_time_unix_nano: 1_700_000_000_000_000_000, + end_time_unix_nano: 1_700_000_001_000_000_000, + ..Default::default() + }; + + // Add some links + span.links.push(Link { + trace_id: vec![4; 16], + span_id: vec![5; 8], + ..Default::default() + }); + + span.links.push(Link { + trace_id: vec![6; 16], + span_id: vec![7; 8], + ..Default::default() + }); + + let result = encoder.encode_span_batch([span].iter(), "test").unwrap(); + + assert_eq!(result.len(), 1); + assert_eq!(result[0].event_name, "Span"); // All spans use "Span" event name for routing + assert!(!result[0].data.is_empty()); + } + + #[test] + fn test_span_with_status() { + use opentelemetry_proto::tonic::trace::v1::{status::StatusCode, Status}; + + let encoder = OtlpEncoder::new(); + + let mut span = Span { + trace_id: vec![1; 16], + span_id: vec![2; 8], + name: "error_span".to_string(), + kind: 1, + start_time_unix_nano: 1_700_000_000_000_000_000, + end_time_unix_nano: 1_700_000_001_000_000_000, + ..Default::default() + }; + + span.status = Some(Status { + message: "Something went wrong".to_string(), + code: StatusCode::Error as i32, + }); + + let result = encoder.encode_span_batch([span].iter(), "test").unwrap(); + + assert_eq!(result.len(), 1); + assert_eq!(result[0].event_name, "Span"); // All spans use "Span" event name for routing + assert!(!result[0].data.is_empty()); + } + + #[test] + fn test_multiple_spans_same_name() { + let encoder = OtlpEncoder::new(); + + let span1 = Span { + trace_id: vec![1; 16], + span_id: vec![2; 8], + name: "database_query".to_string(), + kind: 3, // CLIENT + start_time_unix_nano: 1_700_000_000_000_000_000, + end_time_unix_nano: 1_700_000_001_000_000_000, + ..Default::default() + }; + + let span2 = Span { + trace_id: vec![3; 16], + span_id: vec![4; 8], + name: "database_query".to_string(), // Same name as span1 + kind: 3, + start_time_unix_nano: 1_700_000_002_000_000_000, + end_time_unix_nano: 1_700_000_003_000_000_000, + ..Default::default() + }; + + // Verify that both spans have name field in schema + let (fields1, _) = OtlpEncoder::determine_span_fields_and_schema_id(&span1, "Span"); + let name_field_present1 = fields1 + .iter() + .any(|field| field.name.as_ref() == FIELD_NAME); + assert!( + name_field_present1, + "Span with non-empty name should include 'name' field in schema" + ); + + let (fields2, _) = OtlpEncoder::determine_span_fields_and_schema_id(&span2, "Span"); + let name_field_present2 = fields2 + .iter() + .any(|field| field.name.as_ref() == FIELD_NAME); + assert!( + name_field_present2, + "Span with non-empty name should include 'name' field in schema" + ); + + let result = encoder + .encode_span_batch([span1, span2].iter(), "test") + .unwrap(); + + // Should create one batch with same event_name + assert_eq!(result.len(), 1); + assert_eq!(result[0].event_name, "Span"); // All spans use "Span" event name for routing + assert!(!result[0].data.is_empty()); + // Should have 1 schema ID since both spans have same schema structure + assert_eq!(result[0].metadata.schema_ids.matches(';').count(), 0); // 1 schema = 0 semicolons + } + + #[test] + fn test_optimized_links_serialization() { + use opentelemetry_proto::tonic::trace::v1::span::Link; + + // Test that optimized serialization produces correct JSON output + let links = vec![ + Link { + trace_id: vec![ + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, + 0xab, 0xcd, 0xef, + ], + span_id: vec![0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10], + ..Default::default() + }, + Link { + trace_id: vec![ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, + 0xee, 0xff, 0x00, + ], + span_id: vec![0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77], + ..Default::default() + }, + ]; + + let result = OtlpEncoder::serialize_links(&links); + + // Verify JSON structure is correct + assert!(result.starts_with('[')); + assert!(result.ends_with(']')); + assert!(result.contains("toSpanId")); + assert!(result.contains("toTraceId")); + + // Verify it contains the expected hex values + assert!(result.contains("fedcba9876543210")); // First span_id + assert!(result.contains("0123456789abcdef0123456789abcdef")); // First trace_id + assert!(result.contains("0011223344556677")); // Second span_id + assert!(result.contains("112233445566778899aabbccddeeff00")); // Second trace_id + + // Test empty links + let empty_result = OtlpEncoder::serialize_links(&[]); + assert_eq!(empty_result, "[]"); + + // Test single link + let single_link = vec![Link { + trace_id: vec![0x12; 16], + span_id: vec![0x34; 8], + ..Default::default() + }]; + let single_result = OtlpEncoder::serialize_links(&single_link); + assert!(single_result.contains("3434343434343434")); // span_id + assert!(single_result.contains("12121212121212121212121212121212")); // trace_id + // Single item should have one comma (between toSpanId and toTraceId) but no comma between items + assert_eq!(single_result.matches(',').count(), 1); // Only one comma for field separation + assert!(single_result.starts_with('[')); + assert!(single_result.ends_with(']')); + } + + #[test] + fn test_row_count_in_encoded_batch() { + let encoder = OtlpEncoder::new(); + + // Test with logs + let logs = [ + LogRecord { + observed_time_unix_nano: 1_700_000_000_000_000_000, + event_name: "test_event".to_string(), + severity_number: 9, + ..Default::default() + }, + LogRecord { + observed_time_unix_nano: 1_700_000_001_000_000_000, + event_name: "test_event".to_string(), + severity_number: 10, + ..Default::default() + }, + LogRecord { + observed_time_unix_nano: 1_700_000_002_000_000_000, + event_name: "test_event".to_string(), + severity_number: 11, + ..Default::default() + }, + ]; + + let result = encoder + .encode_log_batch(logs.iter(), "namespace=test") + .unwrap(); + + assert_eq!(result.len(), 1); + assert_eq!(result[0].row_count, 3); + + // Test with spans + let spans = [ + Span { + start_time_unix_nano: 1_700_000_000_000_000_000, + end_time_unix_nano: 1_700_000_001_000_000_000, + ..Default::default() + }, + Span { + start_time_unix_nano: 1_700_000_002_000_000_000, + end_time_unix_nano: 1_700_000_003_000_000_000, + ..Default::default() + }, + ]; + + let span_result = encoder + .encode_span_batch(spans.iter(), "namespace=test") + .unwrap(); + + assert_eq!(span_result.len(), 1); + assert_eq!(span_result[0].row_count, 2); + } } diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/CHANGELOG.md b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/CHANGELOG.md new file mode 100644 index 000000000..a01a255e2 --- /dev/null +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/CHANGELOG.md @@ -0,0 +1,20 @@ +# Changelog + +## [0.3.0] - 2025-10-17 + +### Changed +- Bump geneva-uploader version to 0.3.0 + +## [0.2.0] - 2025-09-24 + +### Added +- Spans upload functionality + +### Changed +- Bump opentelemetry and opentelemetry_sdk versions to 0.31 +- Bump opentelemetry-proto version to 0.31 + +## [0.1.0] - 2025-08-18 + +### Added +- Initial release of opentelemetry-exporter-geneva diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/Cargo.toml b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/Cargo.toml index 001efa516..8afe85d58 100644 --- a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/Cargo.toml +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/Cargo.toml @@ -1,17 +1,23 @@ [package] name = "opentelemetry-exporter-geneva" -version = "0.1.0" +description = "OpenTelemetry exporter for Geneva logs and traces" +version = "0.3.0" edition = "2021" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva" +rust-version = "1.85.0" +keywords = ["opentelemetry", "geneva", "logs", "traces", "exporter"] license = "Apache-2.0" -rust-version = "1.75.0" [dependencies] -opentelemetry_sdk = {workspace = true, default-features = false, features = ["logs"]} -opentelemetry-proto = {workspace = true, default-features = false, features = ["logs"]} -geneva-uploader = {path = "../geneva-uploader/", version = "0.1.0"} +opentelemetry_sdk = {workspace = true, default-features = false, features = ["logs", "trace"]} +opentelemetry-proto = {workspace = true, default-features = false, features = ["logs", "trace"]} +geneva-uploader = { path = "../geneva-uploader", version = "0.3.0" } +futures = "0.3" [dev-dependencies] opentelemetry-appender-tracing = {workspace = true} +opentelemetry = {workspace = true} opentelemetry_sdk = { workspace = true, features = ["logs", "trace", "experimental_logs_batch_log_processor_with_async_runtime", "experimental_async_runtime", "rt-tokio"] } tracing = { version = "0.1", default-features = false, features = ["std"] } tracing-core = "0.1.31" @@ -19,4 +25,4 @@ tracing-subscriber = { version = "0.3.0", default-features = false, features = [ tokio = { version = "1", features = ["rt-multi-thread", "macros"] } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/README.md b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/README.md new file mode 100644 index 000000000..7f0616f63 --- /dev/null +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/README.md @@ -0,0 +1,5 @@ +# opentelemetry-exporter-geneva + +The opentelemetry-exporter-geneva is designed for Microsoft products to send data to public-facing end-points which route to Microsoft's internal data pipeline. It is not meant to be used outside of Microsoft products and is open sourced to demonstrate best practices and to be transparent about what is being collected. + +opentelemetry-exporter-geneva: OpenTelemetry-compliant exporter for Geneva diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/Dockerfile b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/Dockerfile new file mode 100644 index 000000000..8e45c4544 --- /dev/null +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/Dockerfile @@ -0,0 +1,45 @@ +# Dockerfile for Geneva Uploader Workload Identity Test + # + # This Dockerfile must be built from the repository root to access the workspace: + # cd /path/to/opentelemetry-rust-contrib + # docker build -f opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/Dockerfile -t geneva-uploader-test:latest . + # + # Or using ACR: + # az acr build --registry --image geneva-uploader-test:latest \ + # --file opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/Dockerfile . + + FROM rust:1.85-slim AS builder + + # Install build dependencies + RUN apt-get update && apt-get install -y \ + pkg-config \ + libssl-dev \ + && rm -rf /var/lib/apt/lists/* + + WORKDIR /app + + # Copy the entire workspace from repository root + COPY . . + + # Build the example + WORKDIR /app/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva + RUN cargo build --release --example basic_workload_identity_test + + # Runtime stage + FROM debian:bookworm-slim + + # Install runtime dependencies + RUN apt-get update && apt-get install -y \ + ca-certificates \ + libssl3 \ + && rm -rf /var/lib/apt/lists/* + + # Copy the binary + COPY --from=builder /app/target/release/examples/basic_workload_identity_test /usr/local/bin/geneva-uploader-test + + # Run as non-root user + RUN useradd -m -u 1000 appuser + USER appuser + + ENTRYPOINT ["/usr/local/bin/geneva-uploader-test"] + diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/README.md b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/README.md new file mode 100644 index 000000000..37ab414c4 --- /dev/null +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/README.md @@ -0,0 +1,360 @@ +# Geneva Exporter - Workload Identity Example + +This example demonstrates how to use Azure Workload Identity to authenticate to Geneva Config Service (GCS) from an Azure Kubernetes Service (AKS) cluster. + +## Prerequisites + +- Azure CLI (`az`) installed and authenticated +- `kubectl` configured to access your AKS cluster +- AKS cluster with OIDC Issuer and Workload Identity enabled +- Azure Container Registry (ACR) attached to your AKS cluster +- Access to Geneva/Jarvis portal for registering managed identities + +## Architecture + +Azure Workload Identity enables Kubernetes pods to authenticate to Azure services using **User-Assigned Managed Identities** with federated identity credentials. This approach uses Managed Identities, NOT App Registrations, simplifying credential management. + +**Authentication Flow**: +1. Pod runs with a Kubernetes service account +2. Kubernetes injects a service account JWT token into the pod +3. Application exchanges the Kubernetes token for an Azure AD access token using the Managed Identity +4. Azure AD access token is used to authenticate to Geneva Config Service + +**Key Difference**: Traditional Workload Identity setups often use App Registrations with client secrets. This implementation uses **User-Assigned Managed Identities** instead, which eliminates the need to manage secrets or certificates. + +## Step 1: Enable Workload Identity on AKS (if not already enabled) + +```bash +# Check if OIDC issuer is enabled +az aks show --resource-group --name --query "oidcIssuerProfile.issuerUrl" -o tsv + +# If not enabled, enable it +az aks update \ + --resource-group \ + --name \ + --enable-oidc-issuer \ + --enable-workload-identity +``` + +## Step 2: Create User-Assigned Managed Identity + +**Important**: We create a **User-Assigned Managed Identity**, NOT an Azure AD App Registration. Workload Identity with Managed Identities is simpler and doesn't require managing client secrets or certificates. + +```bash +# Set variables +RESOURCE_GROUP="" +LOCATION="" # e.g., eastus2 +IDENTITY_NAME="geneva-uploader-identity-$(openssl rand -hex 3)" + +# Create the managed identity (NOT an App Registration) +az identity create \ + --resource-group $RESOURCE_GROUP \ + --name $IDENTITY_NAME \ + --location $LOCATION + +# Get the client ID and principal ID +export AZURE_CLIENT_ID=$(az identity show --resource-group $RESOURCE_GROUP --name $IDENTITY_NAME --query clientId -o tsv) +export PRINCIPAL_ID=$(az identity show --resource-group $RESOURCE_GROUP --name $IDENTITY_NAME --query principalId -o tsv) + +echo "Client ID: $AZURE_CLIENT_ID" +echo "Principal ID: $PRINCIPAL_ID" + +# Note: The AZURE_CLIENT_ID here is the managed identity's client ID, not an App Registration +``` + +## Step 3: Create Kubernetes Service Account + +```bash +# Set Kubernetes variables +NAMESPACE="default" # or your preferred namespace +SERVICE_ACCOUNT_NAME="geneva-uploader-sa" + +# Create service account with workload identity annotation +cat < --query "oidcIssuerProfile.issuerUrl" -o tsv) + +# Create federated credential +FEDERATED_CREDENTIAL_NAME="geneva-fedcred-$(openssl rand -hex 3)" + +az identity federated-credential create \ + --name $FEDERATED_CREDENTIAL_NAME \ + --identity-name $IDENTITY_NAME \ + --resource-group $RESOURCE_GROUP \ + --issuer $AKS_OIDC_ISSUER \ + --subject system:serviceaccount:$NAMESPACE:$SERVICE_ACCOUNT_NAME \ + --audience api://AzureADTokenExchange + +echo "Federated credential created: $FEDERATED_CREDENTIAL_NAME" +``` + +## Step 5: Register Managed Identity in Geneva Portal + +Register the managed identity using the **Principal ID (Object ID)** from Step 2. Wait 5-10 minutes for propagation. + +## Step 6: Get Your Azure Tenant ID + +```bash +export AZURE_TENANT_ID=$(az account show --query tenantId -o tsv) +echo "Tenant ID: $AZURE_TENANT_ID" +``` + +## Step 7: Build and Push Docker Image + +```bash +# Navigate to the workspace root +cd /path/to/opentelemetry-rust-contrib + +# Set ACR variables +ACR_NAME="" +IMAGE_NAME="geneva-uploader-workload-identity-test" +IMAGE_TAG="latest" + +# Build the image +docker build \ + -f opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/Dockerfile \ + -t $ACR_NAME.azurecr.io/$IMAGE_NAME:$IMAGE_TAG \ + . + +# Push to ACR +az acr login --name $ACR_NAME +docker push $ACR_NAME.azurecr.io/$IMAGE_NAME:$IMAGE_TAG +``` + +## Step 8: Create ConfigMap with Geneva Configuration + +```bash +# Create ConfigMap with your Geneva environment configuration +cat <` +- Workload identity webhook is running in the cluster (it injects `AZURE_FEDERATED_TOKEN_FILE`) + +### Token exchange fails with "invalid_client" + +**Cause**: Federated credential not configured correctly. + +**Fix**: Verify: +- Federated credential issuer matches AKS OIDC issuer exactly +- Subject is `system:serviceaccount::` +- Audience is `api://AzureADTokenExchange` + +### "Invalid scope" error + +**Cause**: Wrong resource URI for your Azure cloud. + +**Fix**: Update `GENEVA_WORKLOAD_IDENTITY_RESOURCE` in ConfigMap: +- Azure Public: `https://monitor.azure.com` +- Azure Government: `https://monitor.azure.us` +- Azure China: `https://monitor.azure.cn` + +### Logs show success but no data in Geneva + +**Possible causes**: +1. Managed identity not registered in Geneva (wait 5-10 minutes after registration) +2. Identity doesn't have correct permissions in Geneva account +3. Wrong Geneva endpoint or account configuration + +**Fix**: +- Verify identity in Geneva portal +- Check Geneva account permissions +- Review ConfigMap values against Geneva documentation + +### Check workload identity webhook status + +```bash +kubectl get pods -n kube-system | grep workload-identity +kubectl logs -n kube-system -l app.kubernetes.io/name=workload-identity-webhook +``` + +## Example kubectl Commands + +```bash +# Watch pod status +kubectl get pod geneva-uploader-test -n $NAMESPACE -w + +# Get detailed pod info +kubectl describe pod geneva-uploader-test -n $NAMESPACE + +# Stream logs +kubectl logs -f geneva-uploader-test -n $NAMESPACE + +# Check service account +kubectl get serviceaccount $SERVICE_ACCOUNT_NAME -n $NAMESPACE -o yaml + +# Check ConfigMap +kubectl get configmap geneva-config -n $NAMESPACE -o yaml + +# Delete and redeploy +kubectl delete pod geneva-uploader-test -n $NAMESPACE +# Then re-run Step 9 +``` + +## Cleanup + +```bash +# Delete Kubernetes resources +kubectl delete pod geneva-uploader-test -n $NAMESPACE +kubectl delete configmap geneva-config -n $NAMESPACE +kubectl delete serviceaccount $SERVICE_ACCOUNT_NAME -n $NAMESPACE + +# Delete Azure resources +az identity federated-credential delete \ + --name $FEDERATED_CREDENTIAL_NAME \ + --identity-name $IDENTITY_NAME \ + --resource-group $RESOURCE_GROUP + +az identity delete \ + --resource-group $RESOURCE_GROUP \ + --name $IDENTITY_NAME + +# Remove from Jarvis (Geneva portal) manually +``` + +## References + +- [Azure Workload Identity Documentation](https://azure.github.io/azure-workload-identity/) +- [AKS Workload Identity Overview](https://learn.microsoft.com/azure/aks/workload-identity-overview) + diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic.rs b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic.rs index e655212a9..6763edb4d 100644 --- a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic.rs +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic.rs @@ -1,4 +1,30 @@ //! run with `$ cargo run --example basic +//! +//! # Geneva Uploader Internal Logs +//! +//! By default, this example enables DEBUG level logs for geneva-uploader, showing all internal +//! operations including initialization, auth, encoding, compression, and uploads. +//! +//! ## Default behavior (no RUST_LOG needed) +//! ```bash +//! cargo run --example basic +//! ``` +//! This shows DEBUG level logs from geneva-uploader. +//! +//! ## Override to INFO level (initialization, auth token acquisition, GCS config only) +//! ```bash +//! RUST_LOG=geneva-uploader=info cargo run --example basic +//! ``` +//! +//! ## Disable geneva-uploader logs +//! ```bash +//! RUST_LOG=geneva-uploader=off cargo run --example basic +//! ``` +//! +//! ## Filter out noisy dependencies while keeping geneva-uploader at DEBUG +//! ```bash +//! RUST_LOG=hyper=off,reqwest=off cargo run --example basic +//! ``` use geneva_uploader::client::{GenevaClient, GenevaClientConfig}; use geneva_uploader::AuthMethod; @@ -62,12 +88,10 @@ async fn main() { tenant, role_name, role_instance, - max_concurrent_uploads: None, // Use default + msi_resource: None, }; - let geneva_client = GenevaClient::new(config) - .await - .expect("Failed to create GenevaClient"); + let geneva_client = GenevaClient::new(config).expect("Failed to create GenevaClient"); let exporter = GenevaExporter::new(geneva_client); let batch_processor = BatchLogProcessor::builder(exporter, Tokio) @@ -104,13 +128,32 @@ async fn main() { .add_directive("reqwest=off".parse().unwrap()); let otel_layer = layer::OpenTelemetryTracingBridge::new(&provider).with_filter(filter_otel); - // Create a new tracing::Fmt layer to print the logs to stdout. It has a - // default filter of `info` level and above, and `debug` and above for logs - // from OpenTelemetry crates. The filter levels can be customized as needed. - let filter_fmt = EnvFilter::new("info") + // Create a new tracing::Fmt layer to print the logs to stdout. + // Default filter: info level for most logs, debug level for opentelemetry, hyper, reqwest, and geneva-uploader. + // Users can override these defaults with RUST_LOG (later directives override earlier ones). + // Examples: + // cargo run --example basic # Uses defaults (geneva-uploader=debug) + // RUST_LOG=geneva-uploader=info cargo run --example basic # Override to info level + // RUST_LOG=geneva-uploader=off cargo run --example basic # Disable geneva-uploader logs + // RUST_LOG=hyper=off,reqwest=off cargo run --example basic # Quiet noisy deps, keep geneva-uploader=debug + let mut filter_fmt = EnvFilter::new("info") + .add_directive("opentelemetry=debug".parse().unwrap()) .add_directive("hyper=debug".parse().unwrap()) .add_directive("reqwest=debug".parse().unwrap()) - .add_directive("opentelemetry=debug".parse().unwrap()); + .add_directive("geneva-uploader=debug".parse().unwrap()); + + if let Ok(spec) = std::env::var("RUST_LOG") { + for part in spec.split(',') { + let p = part.trim(); + if p.is_empty() { + continue; + } + if let Ok(d) = p.parse() { + filter_fmt = filter_fmt.add_directive(d); + } + } + } + let fmt_layer = tracing_subscriber::fmt::layer() .with_thread_names(true) .with_filter(filter_fmt); @@ -120,28 +163,19 @@ async fn main() { .with(fmt_layer) .init(); - // User registration event + // Generate logs to trigger batch processing and GCS calls info!(name: "Log", target: "my-system", event_id = 20, user_name = "user1", user_email = "user1@opentelemetry.io", message = "Registration successful"); - // User checkout event info!(name: "Log", target: "my-system", event_id = 51, user_name = "user2", user_email = "user2@opentelemetry.io", message = "Checkout successful"); - // Login event info!(name: "Log", target: "my-system", event_id = 30, user_name = "user3", user_email = "user3@opentelemetry.io", message = "User login successful"); - // Payment processed info!(name: "Log", target: "my-system", event_id = 52, user_name = "user2", user_email = "user2@opentelemetry.io", message = "Payment processed successfully"); - // Error event - Failed login error!(name: "Log", target: "my-system", event_id = 31, user_name = "user4", user_email = "user4@opentelemetry.io", message = "Login failed - invalid credentials"); - // Warning event - Cart abandoned warn!(name: "Log", target: "my-system", event_id = 53, user_name = "user5", user_email = "user5@opentelemetry.io", message = "Shopping cart abandoned"); - - // Password reset info!(name: "Log", target: "my-system", event_id = 32, user_name = "user1", user_email = "user1@opentelemetry.io", message = "Password reset requested"); - - // Order shipped info!(name: "Log", target: "my-system", event_id = 54, user_name = "user2", user_email = "user2@opentelemetry.io", message = "Order shipped successfully"); - // sleep for a while - println!("Sleeping for 5 seconds..."); - thread::sleep(Duration::from_secs(5)); + println!("Sleeping for 30 seconds..."); + thread::sleep(Duration::from_secs(30)); + let _ = provider.shutdown(); println!("Shutting down provider"); } diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic_msi_test.rs b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic_msi_test.rs new file mode 100644 index 000000000..4d2963173 --- /dev/null +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic_msi_test.rs @@ -0,0 +1,180 @@ +//! run with `$ cargo run --example basic_msi_test` + +use geneva_uploader::client::{GenevaClient, GenevaClientConfig}; +use geneva_uploader::AuthMethod; +use opentelemetry_appender_tracing::layer; +use opentelemetry_exporter_geneva::GenevaExporter; +use opentelemetry_sdk::logs::log_processor_with_async_runtime::BatchLogProcessor; +use opentelemetry_sdk::runtime::Tokio; +use opentelemetry_sdk::{ + logs::{BatchConfig, SdkLoggerProvider}, + Resource, +}; +use std::env; +use std::thread; +use std::time::Duration; +use tracing::{error, info, warn}; +use tracing_subscriber::{prelude::*, EnvFilter}; + +/* +Environment variables required: + +export GENEVA_ENDPOINT="https://abc.azurewebsites.net" +export GENEVA_ENVIRONMENT="Test" +export GENEVA_ACCOUNT="PipelineAgent2Demo" +export GENEVA_NAMESPACE="PAdemo2" +export GENEVA_REGION="eastus" +export GENEVA_CONFIG_MAJOR_VERSION=2 +export MONITORING_GCS_AUTH_ID_TYPE="AuthMSIToken" +export GENEVA_MSI_RESOURCE="https://abc.azurewebsites.net" # Resource (audience) base used for MSI token (supply for your cloud) + +# Identity selection: +# System-assigned: leave MONITORING_MANAGED_ID_IDENTIFIER and MONITORING_MANAGED_ID_VALUE unset +# User-assigned: set both vars below (choose exactly one identifier type) +export MONITORING_MANAGED_ID_IDENTIFIER="object_id" # object_id|client_id|mi_res_id|resource_id|system (system => ignore value) +export MONITORING_MANAGED_ID_VALUE="" # required if identifier != system; GUID for object_id/client_id, ARM path for mi_res_id +*/ + +#[tokio::main] +async fn main() { + let endpoint = env::var("GENEVA_ENDPOINT").expect("GENEVA_ENDPOINT is required"); + let environment = env::var("GENEVA_ENVIRONMENT").expect("GENEVA_ENVIRONMENT is required"); + let account = env::var("GENEVA_ACCOUNT").expect("GENEVA_ACCOUNT is required"); + let namespace = env::var("GENEVA_NAMESPACE").expect("GENEVA_NAMESPACE is required"); + let region = env::var("GENEVA_REGION").expect("GENEVA_REGION is required"); + let config_major_version: u32 = env::var("GENEVA_CONFIG_MAJOR_VERSION") + .expect("GENEVA_CONFIG_MAJOR_VERSION is required") + .parse() + .expect("GENEVA_CONFIG_MAJOR_VERSION must be a u32"); + let msi_resource = env::var("GENEVA_MSI_RESOURCE").ok(); + + let tenant = env::var("GENEVA_TENANT").unwrap_or_else(|_| "default-tenant".to_string()); + let role_name = env::var("GENEVA_ROLE_NAME").unwrap_or_else(|_| "default-role".to_string()); + let role_instance = + env::var("GENEVA_ROLE_INSTANCE").unwrap_or_else(|_| "default-instance".to_string()); + + // Determine authentication method based on environment variables (MSI only for this example) + let auth_method = match env::var("MONITORING_GCS_AUTH_ID_TYPE").as_deref() { + Ok("AuthMSIToken") => { + let auth_method = match env::var("MONITORING_MANAGED_ID_IDENTIFIER") { + Err(_) => AuthMethod::SystemManagedIdentity, + Ok(raw) => { + let key = raw.to_ascii_lowercase(); + match key.as_str() { + "system" => AuthMethod::SystemManagedIdentity, + "client_id" => { + let v = env::var("MONITORING_MANAGED_ID_VALUE").expect( + "MONITORING_MANAGED_ID_VALUE required when MONITORING_MANAGED_ID_IDENTIFIER=client_id", + ); + AuthMethod::UserManagedIdentity { client_id: v } + } + "object_id" => { + let v = env::var("MONITORING_MANAGED_ID_VALUE").expect( + "MONITORING_MANAGED_ID_VALUE required when MONITORING_MANAGED_ID_IDENTIFIER=object_id", + ); + AuthMethod::UserManagedIdentityByObjectId { object_id: v } + } + "mi_res_id" | "resource_id" => { + let v = env::var("MONITORING_MANAGED_ID_VALUE").expect( + "MONITORING_MANAGED_ID_VALUE required when MONITORING_MANAGED_ID_IDENTIFIER=mi_res_id/resource_id", + ); + AuthMethod::UserManagedIdentityByResourceId { resource_id: v } + } + other => panic!( + "Unsupported MONITORING_MANAGED_ID_IDENTIFIER value: {other}. Expected one of: system | object_id | client_id | mi_res_id | resource_id" + ), + } + } + }; + auth_method + } + _ => panic!( + "This example requires MSI authentication. Set MONITORING_GCS_AUTH_ID_TYPE=AuthMSIToken" + ), + }; + + let config = GenevaClientConfig { + endpoint, + environment, + account, + namespace, + region, + config_major_version, + tenant, + role_name, + role_instance, + auth_method, + msi_resource, + }; + + // GenevaClient::new is synchronous (returns Result), so no await is needed here. + let geneva_client = GenevaClient::new(config).expect("Failed to create GenevaClient"); + + let exporter = GenevaExporter::new(geneva_client); + let batch_processor = BatchLogProcessor::builder(exporter, Tokio) + .with_batch_config(BatchConfig::default()) + .build(); + + let provider: SdkLoggerProvider = SdkLoggerProvider::builder() + .with_resource( + Resource::builder() + .with_service_name("geneva-exporter-msi-test") + .build(), + ) + .with_log_processor(batch_processor) + .build(); + + // To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal + // logging is properly suppressed. However, logs emitted by external components + // (such as reqwest, tonic, etc.) are not suppressed as they do not propagate + // OpenTelemetry context. Until this issue is addressed + // (https://github.com/open-telemetry/opentelemetry-rust/issues/2877), + // filtering like this is the best way to suppress such logs. + // + // The filter levels are set as follows: + // - Allow `info` level and above by default. + // - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`. + // + // Note: This filtering will also drop logs from these components even when + // they are used outside of the OTLP Exporter. + let filter_otel = EnvFilter::new("info") + .add_directive("hyper=off".parse().unwrap()) + .add_directive("opentelemetry=off".parse().unwrap()) + .add_directive("tonic=off".parse().unwrap()) + .add_directive("h2=off".parse().unwrap()) + .add_directive("reqwest=off".parse().unwrap()); + let otel_layer = layer::OpenTelemetryTracingBridge::new(&provider).with_filter(filter_otel); + + // Create a new tracing::Fmt layer to print the logs to stdout. It has a + // default filter of `info` level and above, and `debug` and above for logs + // from OpenTelemetry crates. The filter levels can be customized as needed. + let filter_fmt = EnvFilter::new("info") + .add_directive("hyper=debug".parse().unwrap()) + .add_directive("reqwest=debug".parse().unwrap()) + .add_directive("opentelemetry=debug".parse().unwrap()) + .add_directive("geneva-uploader=debug".parse().unwrap()); + let fmt_layer = tracing_subscriber::fmt::layer() + .with_thread_names(true) + .with_filter(filter_fmt); + + tracing_subscriber::registry() + .with(otel_layer) + .with(fmt_layer) + .init(); + + // Generate logs to trigger batch processing and GCS calls + info!(name: "Log", target: "my-system", event_id = 20, user_name = "user1", user_email = "user1@opentelemetry.io", message = "Registration successful"); + info!(name: "Log", target: "my-system", event_id = 51, user_name = "user2", user_email = "user2@opentelemetry.io", message = "Checkout successful"); + info!(name: "Log", target: "my-system", event_id = 30, user_name = "user3", user_email = "user3@opentelemetry.io", message = "User login successful"); + info!(name: "Log", target: "my-system", event_id = 52, user_name = "user2", user_email = "user2@opentelemetry.io", message = "Payment processed successfully"); + error!(name: "Log", target: "my-system", event_id = 31, user_name = "user4", user_email = "user4@opentelemetry.io", message = "Login failed - invalid credentials"); + warn!(name: "Log", target: "my-system", event_id = 53, user_name = "user5", user_email = "user5@opentelemetry.io", message = "Shopping cart abandoned"); + info!(name: "Log", target: "my-system", event_id = 32, user_name = "user1", user_email = "user1@opentelemetry.io", message = "Password reset requested"); + info!(name: "Log", target: "my-system", event_id = 54, user_name = "user2", user_email = "user2@opentelemetry.io", message = "Order shipped successfully"); + + println!("Sleeping for 30 seconds..."); + thread::sleep(Duration::from_secs(30)); + + let _ = provider.shutdown(); + println!("Shutting down provider"); +} diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic_workload_identity_test.rs b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic_workload_identity_test.rs new file mode 100644 index 000000000..6c4ec918c --- /dev/null +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/basic_workload_identity_test.rs @@ -0,0 +1,159 @@ +//! run with `$ cargo run --example basic_workload_identity_test` + +use geneva_uploader::client::{GenevaClient, GenevaClientConfig}; +use geneva_uploader::AuthMethod; +use opentelemetry_appender_tracing::layer; +use opentelemetry_exporter_geneva::GenevaExporter; +use opentelemetry_sdk::logs::log_processor_with_async_runtime::BatchLogProcessor; +use opentelemetry_sdk::runtime::Tokio; +use opentelemetry_sdk::{ + logs::{BatchConfig, SdkLoggerProvider}, + Resource, +}; +use std::env; +use std::thread; +use std::time::Duration; +use tracing::{error, info, warn}; +use tracing_subscriber::{prelude::*, EnvFilter}; + +/* +Environment variables required: + +export GENEVA_ENDPOINT="https://abc.azurewebsites.net" +export GENEVA_ENVIRONMENT="Test" +export GENEVA_ACCOUNT="PipelineAgent2Demo" +export GENEVA_NAMESPACE="PAdemo2" +export GENEVA_REGION="eastus" +export GENEVA_CONFIG_MAJOR_VERSION=2 +export MONITORING_GCS_AUTH_ID_TYPE="AuthWorkloadIdentity" +export GENEVA_WORKLOAD_IDENTITY_RESOURCE="https://abc.azurewebsites.net" # Resource (audience) base for token exchange + +# Azure Workload Identity configuration: +export AZURE_CLIENT_ID="" # Azure AD Application (client) ID +export AZURE_TENANT_ID="" # Azure AD Tenant ID +export AZURE_FEDERATED_TOKEN_FILE="/var/run/secrets/azure/tokens/azure-identity-token" # Path to service account token (Kubernetes default) + +# Optional: Override the token file path +# export WORKLOAD_IDENTITY_TOKEN_FILE="/custom/path/to/token" +*/ + +#[tokio::main] +async fn main() { + let endpoint = env::var("GENEVA_ENDPOINT").expect("GENEVA_ENDPOINT is required"); + let environment = env::var("GENEVA_ENVIRONMENT").expect("GENEVA_ENVIRONMENT is required"); + let account = env::var("GENEVA_ACCOUNT").expect("GENEVA_ACCOUNT is required"); + let namespace = env::var("GENEVA_NAMESPACE").expect("GENEVA_NAMESPACE is required"); + let region = env::var("GENEVA_REGION").expect("GENEVA_REGION is required"); + let config_major_version: u32 = env::var("GENEVA_CONFIG_MAJOR_VERSION") + .expect("GENEVA_CONFIG_MAJOR_VERSION is required") + .parse() + .expect("GENEVA_CONFIG_MAJOR_VERSION must be a u32"); + + let tenant = env::var("GENEVA_TENANT").unwrap_or_else(|_| "default-tenant".to_string()); + let role_name = env::var("GENEVA_ROLE_NAME").unwrap_or_else(|_| "default-role".to_string()); + let role_instance = + env::var("GENEVA_ROLE_INSTANCE").unwrap_or_else(|_| "default-instance".to_string()); + + // Determine authentication method based on environment variables + let auth_method = match env::var("MONITORING_GCS_AUTH_ID_TYPE").as_deref() { + Ok("AuthWorkloadIdentity") => { + let resource = env::var("GENEVA_WORKLOAD_IDENTITY_RESOURCE") + .expect("GENEVA_WORKLOAD_IDENTITY_RESOURCE required for Workload Identity auth"); + + // Note: AZURE_CLIENT_ID, AZURE_TENANT_ID, and AZURE_FEDERATED_TOKEN_FILE + // are read automatically by the azure_identity crate from environment variables. + // These are typically set by the Azure Workload Identity webhook in Kubernetes. + AuthMethod::WorkloadIdentity { + resource, + } + } + _ => panic!( + "This example requires Workload Identity authentication. Set MONITORING_GCS_AUTH_ID_TYPE=AuthWorkloadIdentity" + ), + }; + + let config = GenevaClientConfig { + endpoint, + environment, + account, + namespace, + region, + config_major_version, + tenant, + role_name, + role_instance, + auth_method, + msi_resource: None, // Not used for Workload Identity + }; + + // GenevaClient::new is synchronous (returns Result), so no await is needed here. + let geneva_client = GenevaClient::new(config).expect("Failed to create GenevaClient"); + + let exporter = GenevaExporter::new(geneva_client); + let batch_processor = BatchLogProcessor::builder(exporter, Tokio) + .with_batch_config(BatchConfig::default()) + .build(); + + let provider: SdkLoggerProvider = SdkLoggerProvider::builder() + .with_resource( + Resource::builder() + .with_service_name("geneva-exporter-workload-identity-test") + .build(), + ) + .with_log_processor(batch_processor) + .build(); + + // To prevent a telemetry-induced-telemetry loop, OpenTelemetry's own internal + // logging is properly suppressed. However, logs emitted by external components + // (such as reqwest, tonic, etc.) are not suppressed as they do not propagate + // OpenTelemetry context. Until this issue is addressed + // (https://github.com/open-telemetry/opentelemetry-rust/issues/2877), + // filtering like this is the best way to suppress such logs. + // + // The filter levels are set as follows: + // - Allow `info` level and above by default. + // - Completely restrict logs from `hyper`, `tonic`, `h2`, and `reqwest`. + // + // Note: This filtering will also drop logs from these components even when + // they are used outside of the OTLP Exporter. + let filter_otel = EnvFilter::new("info") + .add_directive("hyper=off".parse().unwrap()) + .add_directive("opentelemetry=off".parse().unwrap()) + .add_directive("tonic=off".parse().unwrap()) + .add_directive("h2=off".parse().unwrap()) + .add_directive("reqwest=off".parse().unwrap()); + let otel_layer = layer::OpenTelemetryTracingBridge::new(&provider).with_filter(filter_otel); + + // Create a new tracing::Fmt layer to print the logs to stdout. It has a + // default filter of `info` level and above, and `debug` and above for logs + // from OpenTelemetry crates. The filter levels can be customized as needed. + let filter_fmt = EnvFilter::new("info") + .add_directive("hyper=debug".parse().unwrap()) + .add_directive("reqwest=debug".parse().unwrap()) + .add_directive("opentelemetry=debug".parse().unwrap()) + .add_directive("geneva-uploader=debug".parse().unwrap()); + let fmt_layer = tracing_subscriber::fmt::layer() + .with_thread_names(true) + .with_filter(filter_fmt); + + tracing_subscriber::registry() + .with(otel_layer) + .with(fmt_layer) + .init(); + + // Generate logs to trigger batch processing and GCS calls + info!(name: "Log", target: "my-system", event_id = 20, user_name = "user1", user_email = "user1@opentelemetry.io", message = "Registration successful"); + info!(name: "Log", target: "my-system", event_id = 51, user_name = "user2", user_email = "user2@opentelemetry.io", message = "Checkout successful"); + info!(name: "Log", target: "my-system", event_id = 30, user_name = "user3", user_email = "user3@opentelemetry.io", message = "User login successful"); + info!(name: "Log", target: "my-system", event_id = 52, user_name = "user2", user_email = "user2@opentelemetry.io", message = "Payment processed successfully"); + error!(name: "Log", target: "my-system", event_id = 31, user_name = "user4", user_email = "user4@opentelemetry.io", message = "Login failed - invalid credentials"); + warn!(name: "Log", target: "my-system", event_id = 53, user_name = "user5", user_email = "user5@opentelemetry.io", message = "Shopping cart abandoned"); + info!(name: "Log", target: "my-system", event_id = 32, user_name = "user1", user_email = "user1@opentelemetry.io", message = "Password reset requested"); + info!(name: "Log", target: "my-system", event_id = 54, user_name = "user2", user_email = "user2@opentelemetry.io", message = "Order shipped successfully"); + + println!("Sleeping for 30 seconds..."); + thread::sleep(Duration::from_secs(30)); + + let _ = provider.shutdown(); + println!("Shutting down provider"); +} diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/trace_basic.rs b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/trace_basic.rs new file mode 100644 index 000000000..3388bf4f2 --- /dev/null +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/examples/trace_basic.rs @@ -0,0 +1,207 @@ +//! run with `$ cargo run --example trace_basic + +use geneva_uploader::client::{GenevaClient, GenevaClientConfig}; +use geneva_uploader::AuthMethod; +use opentelemetry::{global, trace::Tracer, KeyValue}; +use opentelemetry_exporter_geneva::GenevaTraceExporter; +use opentelemetry_sdk::trace::{SdkTracerProvider, SimpleSpanProcessor}; +use opentelemetry_sdk::Resource; +use std::env; +use std::path::PathBuf; +use std::thread; +use std::time::Duration; + +/* +export GENEVA_ENDPOINT="https://abc.azurewebsites.net" +export GENEVA_ENVIRONMENT="Test" +export GENEVA_ACCOUNT="myaccount" +export GENEVA_NAMESPACE="myns" +export GENEVA_REGION="eastus" +export GENEVA_CERT_PATH="/tmp/client.p12" +export GENEVA_CERT_PASSWORD="password" +export GENEVA_CONFIG_MAJOR_VERSION=2 +*/ + +#[tokio::main] +async fn main() { + let endpoint = env::var("GENEVA_ENDPOINT").expect("GENEVA_ENDPOINT is required"); + let environment = env::var("GENEVA_ENVIRONMENT").expect("GENEVA_ENVIRONMENT is required"); + let account = env::var("GENEVA_ACCOUNT").expect("GENEVA_ACCOUNT is required"); + let namespace = env::var("GENEVA_NAMESPACE").expect("GENEVA_NAMESPACE is required"); + let region = env::var("GENEVA_REGION").expect("GENEVA_REGION is required"); + let cert_path = + PathBuf::from(env::var("GENEVA_CERT_PATH").expect("GENEVA_CERT_PATH is required")); + let cert_password = env::var("GENEVA_CERT_PASSWORD").expect("GENEVA_CERT_PASSWORD is required"); + let config_major_version: u32 = env::var("GENEVA_CONFIG_MAJOR_VERSION") + .expect("GENEVA_CONFIG_MAJOR_VERSION is required") + .parse() + .expect("GENEVA_CONFIG_MAJOR_VERSION must be a u32"); + + let tenant = env::var("GENEVA_TENANT").unwrap_or_else(|_| "default-tenant".to_string()); + let role_name = env::var("GENEVA_ROLE_NAME").unwrap_or_else(|_| "default-role".to_string()); + let role_instance = + env::var("GENEVA_ROLE_INSTANCE").unwrap_or_else(|_| "default-instance".to_string()); + + let config = GenevaClientConfig { + endpoint, + environment, + account, + namespace, + region, + config_major_version, + auth_method: AuthMethod::Certificate { + path: cert_path, + password: cert_password, + }, + tenant, + role_name, + role_instance, + msi_resource: None, + }; + + let geneva_client = GenevaClient::new(config).expect("Failed to create GenevaClient"); + + // Create Geneva trace exporter + let exporter = GenevaTraceExporter::new(geneva_client); + + // Create simple span processor (exports spans immediately) + let span_processor = SimpleSpanProcessor::new(exporter); + + // Create tracer provider + let tracer_provider = SdkTracerProvider::builder() + .with_span_processor(span_processor) + .with_resource( + Resource::builder() + .with_service_name("geneva-trace-exporter-example") + .build(), + ) + .build(); + + // Set the global tracer provider + global::set_tracer_provider(tracer_provider.clone()); + + // Get a tracer + let tracer = global::tracer("geneva-trace-example"); + + // Create some example spans + println!("Creating example spans..."); + + // Example 1: User registration flow + { + let _registration_span = tracer + .span_builder("user_registration") + .with_attributes(vec![ + KeyValue::new("user.id", "user123"), + KeyValue::new("user.email", "user123@example.com"), + KeyValue::new("operation.type", "registration"), + ]) + .start(&tracer); + + // Database operation span + { + let _db_span = tracer + .span_builder("database_query") + .with_attributes(vec![ + KeyValue::new("db.system", "postgresql"), + KeyValue::new("db.name", "users"), + KeyValue::new("db.operation", "INSERT"), + KeyValue::new( + "db.statement", + "INSERT INTO users (email, name) VALUES (?, ?)", + ), + ]) + .start(&tracer); + thread::sleep(Duration::from_millis(50)); // Simulate database work + } // db_span ends here + + // Email operation span + { + let _email_span = tracer + .span_builder("send_welcome_email") + .with_attributes(vec![ + KeyValue::new("http.method", "POST"), + KeyValue::new("http.url", "https://api.email-service.com/send"), + KeyValue::new("http.status_code", 200), + KeyValue::new("email.type", "welcome"), + ]) + .start(&tracer); + thread::sleep(Duration::from_millis(100)); // Simulate HTTP request + } // email_span ends here + } // registration_span ends here + + // Example 2: E-commerce checkout flow + { + let _checkout_span = tracer + .span_builder("checkout_process") + .with_attributes(vec![ + KeyValue::new("user.id", "user456"), + KeyValue::new("cart.total", 99.99), + KeyValue::new("currency", "USD"), + ]) + .start(&tracer); + + // Payment processing span + { + let _payment_span = tracer + .span_builder("process_payment") + .with_attributes(vec![ + KeyValue::new("payment.method", "credit_card"), + KeyValue::new("payment.amount", 99.99), + KeyValue::new("payment.processor", "stripe"), + ]) + .start(&tracer); + thread::sleep(Duration::from_millis(200)); // Simulate payment processing + } // payment_span ends here + + // Inventory update span + { + let _inventory_span = tracer + .span_builder("update_inventory") + .with_attributes(vec![ + KeyValue::new("product.id", "prod789"), + KeyValue::new("quantity.reserved", 2), + KeyValue::new("inventory.operation", "reserve"), + ]) + .start(&tracer); + thread::sleep(Duration::from_millis(30)); // Simulate inventory update + } // inventory_span ends here + } // checkout_span ends here + + // Example 3: Error scenario - failed login + { + let _failed_login_span = tracer + .span_builder("user_login") + .with_attributes(vec![ + KeyValue::new("user.email", "invalid@example.com"), + KeyValue::new("login.result", "failed"), + KeyValue::new("error.type", "authentication_error"), + ]) + .start(&tracer); + thread::sleep(Duration::from_millis(10)); // Simulate failed login + } // failed_login_span ends here + + // Example 4: API request + { + let _api_span = tracer + .span_builder("api_request") + .with_attributes(vec![ + KeyValue::new("http.method", "GET"), + KeyValue::new("http.route", "/api/users/:id"), + KeyValue::new("http.status_code", 200), + KeyValue::new("user.id", "user789"), + ]) + .start(&tracer); + thread::sleep(Duration::from_millis(75)); // Simulate API processing + } // api_span ends here + + println!("Spans created and exported successfully!"); + + // SimpleSpanProcessor exports spans immediately, so no need to wait + println!("All spans have been exported to Geneva!"); + + // Shutdown the tracer provider + tracer_provider + .shutdown() + .expect("Failed to shutdown tracer provider"); + println!("Tracer provider shut down successfully!"); +} diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/lib.rs b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/lib.rs index c70a029c1..a0e316cd8 100644 --- a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/lib.rs +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/lib.rs @@ -4,5 +4,7 @@ #![warn(missing_debug_implementations, missing_docs)] mod logs; +mod trace; pub use logs::*; +pub use trace::*; diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/logs/exporter.rs b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/logs/exporter.rs index 941cff4eb..bfc73f5d9 100644 --- a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/logs/exporter.rs +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/logs/exporter.rs @@ -1,25 +1,37 @@ use core::fmt; +use futures::stream::{self, StreamExt}; use geneva_uploader::client::GenevaClient; use opentelemetry_proto::transform::common::tonic::ResourceAttributesWithSchema; use opentelemetry_proto::transform::logs::tonic::group_logs_by_resource_and_scope; use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult}; use opentelemetry_sdk::logs::LogBatch; -use std::sync::atomic; +use std::sync::{atomic, Arc}; /// An OpenTelemetry exporter that writes logs to Geneva exporter pub struct GenevaExporter { resource: ResourceAttributesWithSchema, _is_shutdown: atomic::AtomicBool, - geneva_client: GenevaClient, + geneva_client: Arc, + max_concurrent_uploads: usize, } +// TODO - Add builder pattern for GenevaExporter to allow more flexible configuration impl GenevaExporter { /// Create a new GenavaExporter pub fn new(geneva_client: GenevaClient) -> Self { + Self::new_with_concurrency(geneva_client, 4) // Default to 4 concurrent uploads + } + + /// Create a new GenavaExporter with custom concurrency level + pub fn new_with_concurrency( + geneva_client: GenevaClient, + max_concurrent_uploads: usize, + ) -> Self { Self { resource: ResourceAttributesWithSchema::default(), _is_shutdown: atomic::AtomicBool::new(false), - geneva_client, + geneva_client: Arc::new(geneva_client), + max_concurrent_uploads, } } } @@ -33,9 +45,36 @@ impl fmt::Debug for GenevaExporter { impl opentelemetry_sdk::logs::LogExporter for GenevaExporter { async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult { let otlp = group_logs_by_resource_and_scope(batch, &self.resource); - if let Err(e) = self.geneva_client.upload_logs(&otlp).await { - return Err(OTelSdkError::InternalFailure(e)); + + // Encode and compress logs into batches + let compressed_batches = match self.geneva_client.encode_and_compress_logs(&otlp) { + Ok(batches) => batches, + Err(e) => return Err(OTelSdkError::InternalFailure(e)), + }; + + // Execute uploads concurrently within the same async task using buffer_unordered. + // This processes up to max_concurrent_uploads batches simultaneously without + // spawning new tasks or threads, using async I/O concurrency instead. + // All batch uploads are processed asynchronously in the same task context that + // called the export() method. + let errors: Vec = stream::iter(compressed_batches) + .map(|batch| { + let client = self.geneva_client.clone(); + async move { client.upload_batch(&batch).await } + }) + .buffer_unordered(self.max_concurrent_uploads) + .filter_map(|result| async move { result.err() }) + .collect() + .await; + + // Return error if any uploads failed + if !errors.is_empty() { + return Err(OTelSdkError::InternalFailure(format!( + "Upload failures: {}", + errors.join("; ") + ))); } + Ok(()) } diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/trace/exporter.rs b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/trace/exporter.rs new file mode 100644 index 000000000..a4341818f --- /dev/null +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/trace/exporter.rs @@ -0,0 +1,91 @@ +use core::fmt; +use futures::stream::{self, StreamExt}; +use geneva_uploader::client::GenevaClient; +use opentelemetry_proto::transform::common::tonic::ResourceAttributesWithSchema; +use opentelemetry_proto::transform::trace::tonic::group_spans_by_resource_and_scope; +use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult}; +use opentelemetry_sdk::trace::SpanExporter; +use std::sync::{atomic, Arc}; + +/// An OpenTelemetry exporter that writes spans to Geneva exporter +pub struct GenevaTraceExporter { + resource: ResourceAttributesWithSchema, + _is_shutdown: atomic::AtomicBool, + geneva_client: Arc, + max_concurrent_uploads: usize, +} + +// TODO - Add builder pattern for GenevaTraceExporter to allow more flexible configuration +impl GenevaTraceExporter { + /// Create a new GenevaTraceExporter + pub fn new(geneva_client: GenevaClient) -> Self { + Self::new_with_concurrency(geneva_client, 4) // Default to 4 concurrent uploads + } + + /// Create a new GenevaTraceExporter with custom concurrency level + pub fn new_with_concurrency( + geneva_client: GenevaClient, + max_concurrent_uploads: usize, + ) -> Self { + Self { + resource: ResourceAttributesWithSchema::default(), + _is_shutdown: atomic::AtomicBool::new(false), + geneva_client: Arc::new(geneva_client), + max_concurrent_uploads, + } + } +} + +impl fmt::Debug for GenevaTraceExporter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("Geneva trace exporter") + } +} + +impl SpanExporter for GenevaTraceExporter { + async fn export(&self, batch: Vec) -> OTelSdkResult { + let otlp = group_spans_by_resource_and_scope(batch, &self.resource); + + // Encode and compress spans into batches + let compressed_batches = match self.geneva_client.encode_and_compress_spans(&otlp) { + Ok(batches) => batches, + Err(e) => return Err(OTelSdkError::InternalFailure(e)), + }; + + // Execute uploads concurrently within the same async task using buffer_unordered. + // This processes up to max_concurrent_uploads batches simultaneously without + // spawning new tasks or threads, using async I/O concurrency instead. + // All batch uploads are processed asynchronously in the same task context that + // called the export() method. + let errors: Vec = stream::iter(compressed_batches) + .map(|batch| { + let client = self.geneva_client.clone(); + async move { client.upload_batch(&batch).await } + }) + .buffer_unordered(self.max_concurrent_uploads) + .filter_map(|result| async move { result.err() }) + .collect() + .await; + + // Return error if any uploads failed + if !errors.is_empty() { + return Err(OTelSdkError::InternalFailure(format!( + "Upload failures: {}", + errors.join("; ") + ))); + } + + Ok(()) + } + + fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) { + self.resource = resource.into(); + } + + fn shutdown(&mut self) -> OTelSdkResult { + // Set shutdown flag to true + self._is_shutdown.store(true, atomic::Ordering::Relaxed); + // TODO: Use the is_shutdown value in export() method to prevent exports after shutdown + Ok(()) + } +} diff --git a/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/trace/mod.rs b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/trace/mod.rs new file mode 100644 index 000000000..9ec92e365 --- /dev/null +++ b/opentelemetry-exporter-geneva/opentelemetry-exporter-geneva/src/trace/mod.rs @@ -0,0 +1,2 @@ +mod exporter; +pub use exporter::GenevaTraceExporter; diff --git a/opentelemetry-instrumentation-actix-web/CHANGELOG.md b/opentelemetry-instrumentation-actix-web/CHANGELOG.md index 1d2397195..4237174ca 100644 --- a/opentelemetry-instrumentation-actix-web/CHANGELOG.md +++ b/opentelemetry-instrumentation-actix-web/CHANGELOG.md @@ -2,10 +2,12 @@ ## vNext +## v0.23.0 + ### Changed * Remove `opentelemetry-prometheus`, `opentelemetry_sdk`, `prometheus` and `tracing` dependencies -* Update `opentelemetry` and `opentelemetry-semantic-conventions` dependencies to 0.30 +* Update `opentelemetry` and `opentelemetry-semantic-conventions` dependencies to 0.31 * **Breaking** Rename crate to `opentelemetry-instrumentation-actix-web` * **Breaking** Remove `metrics-prometheus` feature and use `metric` feature instead * **Breaking** Remove Prometheus middleware `PrometheusMetricsHandler` and use OTLP exporter instead diff --git a/opentelemetry-instrumentation-actix-web/Cargo.toml b/opentelemetry-instrumentation-actix-web/Cargo.toml index a47928424..32d0a6c8f 100644 --- a/opentelemetry-instrumentation-actix-web/Cargo.toml +++ b/opentelemetry-instrumentation-actix-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-instrumentation-actix-web" -version = "0.22.0" +version = "0.23.0" description = "OpenTelemetry instrumentation for Actix Web apps" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-instrumentation-actix-web" repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-instrumentation-actix-web" diff --git a/opentelemetry-instrumentation-tower/CHANGELOG.md b/opentelemetry-instrumentation-tower/CHANGELOG.md new file mode 100644 index 000000000..c9eb6c6ea --- /dev/null +++ b/opentelemetry-instrumentation-tower/CHANGELOG.md @@ -0,0 +1,42 @@ +# Changelog + +## vNext + +## v0.17.0 + +### Changed + +* Update to OpenTelemetry v0.31 +* Migrate to use `opentelemetry-semantic-conventions` package for metric names and attribute keys instead of hardcoded strings +* Add dependency on otel semantic conventions crate and use constants from it instead of hardcoded attribute names. The values are unchanged + - `HTTP_SERVER_ACTIVE_REQUESTS_METRIC` now uses `semconv::metric::HTTP_SERVER_ACTIVE_REQUESTS` + - `HTTP_SERVER_REQUEST_BODY_SIZE_METRIC` now uses `semconv::metric::HTTP_SERVER_REQUEST_BODY_SIZE` + - `HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC` now uses `semconv::metric::HTTP_SERVER_RESPONSE_BODY_SIZE` + - `HTTP_SERVER_DURATION_METRIC` now uses `semconv::metric::HTTP_SERVER_REQUEST_DURATION` +* Update attribute keys to use semantic conventions constants: + - `NETWORK_PROTOCOL_NAME_LABEL` now uses `semconv::attribute::NETWORK_PROTOCOL_NAME` + - `HTTP_REQUEST_METHOD_LABEL` now uses `semconv::attribute::HTTP_REQUEST_METHOD` + - `HTTP_ROUTE_LABEL` now uses `semconv::attribute::HTTP_ROUTE` + - `HTTP_RESPONSE_STATUS_CODE_LABEL` now uses `semconv::attribute::HTTP_RESPONSE_STATUS_CODE` + +### Added + +* Add comprehensive test coverage for all HTTP server metrics with attribute validation + +## v0.16.0 + +Initial release of OpenTelemetry Tower instrumentation middleware for HTTP metrics collection. + +### Added + +* HTTP server metrics middleware for Tower-compatible services +* Support for Axum framework via `axum` feature flag +* Metrics collection for: + - `http.server.request.duration` - Request duration histogram + - `http.server.active_requests` - Active requests counter + - `http.server.request.body.size` - Request body size histogram + - `http.server.response.body.size` - Response body size histogram +* Configurable request duration histogram boundaries +* Custom request and response attribute extractors +* Automatic protocol version, HTTP method, URL scheme, and status code labeling +* Route extraction for Axum applications diff --git a/opentelemetry-instrumentation-tower/Cargo.toml b/opentelemetry-instrumentation-tower/Cargo.toml index d50b964f2..e1ae34241 100644 --- a/opentelemetry-instrumentation-tower/Cargo.toml +++ b/opentelemetry-instrumentation-tower/Cargo.toml @@ -3,7 +3,7 @@ name = "opentelemetry-instrumentation-tower" edition = "2021" rust-version = "1.75.0" -version = "0.16.0" +version = "0.17.0" license = "Apache-2.0" description = "OpenTelemetry Metrics Middleware for Tower-compatible Rust HTTP servers" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib" @@ -22,11 +22,15 @@ futures-util = { version = "0.3", default-features = false } http = { version = "1", features = ["std"], default-features = false } http-body = { version = "1", default-features = false } opentelemetry = { workspace = true, features = ["futures", "metrics"]} +opentelemetry-semantic-conventions = { workspace = true, features = ["semconv_experimental"] } pin-project-lite = { version = "0.2", default-features = false } tower-service = { version = "0.3", default-features = false } tower-layer = { version = "0.3", default-features = false } [dev-dependencies] +opentelemetry_sdk = { workspace = true, features = ["metrics", "testing"] } +tokio = { version = "1.0", features = ["macros", "rt"] } +tower = { version = "0.5", features = ["util"] } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/opentelemetry-instrumentation-tower/examples/axum-http-service/Cargo.toml b/opentelemetry-instrumentation-tower/examples/axum-http-service/Cargo.toml index 211dcff5f..986bb69d3 100644 --- a/opentelemetry-instrumentation-tower/examples/axum-http-service/Cargo.toml +++ b/opentelemetry-instrumentation-tower/examples/axum-http-service/Cargo.toml @@ -12,7 +12,7 @@ axum = { features = ["http1", "tokio"], version = "0.8", default-features = fals bytes = { version = "1", default-features = false } opentelemetry = { workspace = true} opentelemetry_sdk = { workspace = true, default-features = false } -opentelemetry-otlp = { version = "0.30.0", features = ["grpc-tonic", "metrics"], default-features = false } +opentelemetry-otlp = { version = "0.31.0", features = ["grpc-tonic", "metrics"], default-features = false } tokio = { version = "1", features = ["rt-multi-thread"], default-features = false } rand_09 = { package = "rand", version = "0.9" } diff --git a/opentelemetry-instrumentation-tower/examples/hyper-http-service/Cargo.toml b/opentelemetry-instrumentation-tower/examples/hyper-http-service/Cargo.toml index 7a5e110cc..b04da5798 100644 --- a/opentelemetry-instrumentation-tower/examples/hyper-http-service/Cargo.toml +++ b/opentelemetry-instrumentation-tower/examples/hyper-http-service/Cargo.toml @@ -13,7 +13,7 @@ http-body-util = { version = "0.1", default-features = false } hyper-util = { version = "0.1", features = ["http1", "service", "server", "tokio"], default-features = false } opentelemetry = { workspace = true} opentelemetry_sdk = { workspace = true, default-features = false } -opentelemetry-otlp = { version = "0.30.0", features = ["grpc-tonic", "metrics"], default-features = false } +opentelemetry-otlp = { version = "0.31.0", features = ["grpc-tonic", "metrics"], default-features = false } tokio = { version = "1", features = ["rt-multi-thread", "macros"], default-features = false } tower = { version = "0.5", default-features = false } rand_09 = { package = "rand", version = "0.9" } diff --git a/opentelemetry-instrumentation-tower/src/lib.rs b/opentelemetry-instrumentation-tower/src/lib.rs index efd84d6a1..afaae6921 100644 --- a/opentelemetry-instrumentation-tower/src/lib.rs +++ b/opentelemetry-instrumentation-tower/src/lib.rs @@ -13,11 +13,12 @@ use axum::extract::MatchedPath; use futures_util::ready; use opentelemetry::metrics::{Histogram, Meter, UpDownCounter}; use opentelemetry::KeyValue; +use opentelemetry_semantic_conventions as semconv; use pin_project_lite::pin_project; use tower_layer::Layer; use tower_service::Service; -const HTTP_SERVER_DURATION_METRIC: &str = "http.server.request.duration"; +const HTTP_SERVER_DURATION_METRIC: &str = semconv::metric::HTTP_SERVER_REQUEST_DURATION; const HTTP_SERVER_DURATION_UNIT: &str = "s"; const _OTEL_DEFAULT_HTTP_SERVER_DURATION_BOUNDARIES: [f64; 14] = [ @@ -31,23 +32,23 @@ const _OTEL_DEFAULT_HTTP_SERVER_DURATION_BOUNDARIES: [f64; 14] = [ const LIBRARY_DEFAULT_HTTP_SERVER_DURATION_BOUNDARIES: [f64; 14] = [ 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, ]; -const HTTP_SERVER_ACTIVE_REQUESTS_METRIC: &str = "http.server.active_requests"; +const HTTP_SERVER_ACTIVE_REQUESTS_METRIC: &str = semconv::metric::HTTP_SERVER_ACTIVE_REQUESTS; const HTTP_SERVER_ACTIVE_REQUESTS_UNIT: &str = "{request}"; -const HTTP_SERVER_REQUEST_BODY_SIZE_METRIC: &str = "http.server.request.body.size"; +const HTTP_SERVER_REQUEST_BODY_SIZE_METRIC: &str = semconv::metric::HTTP_SERVER_REQUEST_BODY_SIZE; const HTTP_SERVER_REQUEST_BODY_SIZE_UNIT: &str = "By"; -const HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC: &str = "http.server.response.body.size"; +const HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC: &str = semconv::metric::HTTP_SERVER_RESPONSE_BODY_SIZE; const HTTP_SERVER_RESPONSE_BODY_SIZE_UNIT: &str = "By"; -const NETWORK_PROTOCOL_NAME_LABEL: &str = "network.protocol.name"; +const NETWORK_PROTOCOL_NAME_LABEL: &str = semconv::attribute::NETWORK_PROTOCOL_NAME; const NETWORK_PROTOCOL_VERSION_LABEL: &str = "network.protocol.version"; const URL_SCHEME_LABEL: &str = "url.scheme"; -const HTTP_REQUEST_METHOD_LABEL: &str = "http.request.method"; -#[allow(dead_code)] // cargo check is not smart -const HTTP_ROUTE_LABEL: &str = "http.route"; -const HTTP_RESPONSE_STATUS_CODE_LABEL: &str = "http.response.status_code"; +const HTTP_REQUEST_METHOD_LABEL: &str = semconv::attribute::HTTP_REQUEST_METHOD; +#[cfg(feature = "axum")] +const HTTP_ROUTE_LABEL: &str = semconv::attribute::HTTP_ROUTE; +const HTTP_RESPONSE_STATUS_CODE_LABEL: &str = semconv::attribute::HTTP_RESPONSE_STATUS_CODE; /// Trait for extracting custom attributes from HTTP requests pub trait RequestAttributeExtractor: Clone + Send + Sync + 'static { @@ -505,3 +506,259 @@ fn split_and_format_protocol_version(http_version: http::Version) -> (String, St }; (String::from("http"), String::from(version_str)) } + +#[cfg(test)] +mod tests { + use super::*; + use http::{Request, Response, StatusCode}; + use opentelemetry::metrics::MeterProvider; + use opentelemetry_sdk::metrics::{ + data::{AggregatedMetrics, MetricData}, + InMemoryMetricExporter, PeriodicReader, SdkMeterProvider, + }; + use std::time::Duration; + use tower::Service; + + #[tokio::test] + async fn test_metrics_labels() { + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()) + .with_interval(Duration::from_millis(100)) + .build(); + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + let meter = meter_provider.meter("test"); + + let layer = HTTPMetricsLayerBuilder::builder() + .with_meter(meter) + .build() + .unwrap(); + + let service = tower::service_fn(|_req: Request| async { + Ok::<_, std::convert::Infallible>( + Response::builder() + .status(StatusCode::OK) + .body(String::from("Hello, World!")) + .unwrap(), + ) + }); + + let mut service = layer.layer(service); + + let request = Request::builder() + .method("GET") + .uri("https://example.com/test") + .body("test body".to_string()) + .unwrap(); + + let _response = service.call(request).await.unwrap(); + + tokio::time::sleep(Duration::from_millis(500)).await; + + let metrics = exporter.get_finished_metrics().unwrap(); + assert!(!metrics.is_empty()); + + let resource_metrics = &metrics[0]; + let scope_metrics = resource_metrics + .scope_metrics() + .next() + .expect("Should have scope metrics"); + + let duration_metric = scope_metrics + .metrics() + .find(|m| m.name() == HTTP_SERVER_DURATION_METRIC) + .expect("Duration metric should exist"); + + if let AggregatedMetrics::F64(MetricData::Histogram(histogram)) = duration_metric.data() { + let data_point = histogram + .data_points() + .next() + .expect("Should have data point"); + let attributes: Vec<_> = data_point.attributes().collect(); + + // Duration metric should have 5 attributes: protocol_name, protocol_version, url_scheme, method, status_code + assert_eq!( + attributes.len(), + 5, + "Duration metric should have exactly 5 attributes" + ); + + let protocol_name = attributes + .iter() + .find(|kv| kv.key.as_str() == NETWORK_PROTOCOL_NAME_LABEL) + .expect("Protocol name should be present"); + assert_eq!(protocol_name.value.as_str(), "http"); + + let protocol_version = attributes + .iter() + .find(|kv| kv.key.as_str() == NETWORK_PROTOCOL_VERSION_LABEL) + .expect("Protocol version should be present"); + assert_eq!(protocol_version.value.as_str(), "1.1"); + + let url_scheme = attributes + .iter() + .find(|kv| kv.key.as_str() == URL_SCHEME_LABEL) + .expect("URL scheme should be present"); + assert_eq!(url_scheme.value.as_str(), "https"); + + let method = attributes + .iter() + .find(|kv| kv.key.as_str() == HTTP_REQUEST_METHOD_LABEL) + .expect("HTTP method should be present"); + assert_eq!(method.value.as_str(), "GET"); + + let status_code = attributes + .iter() + .find(|kv| kv.key.as_str() == HTTP_RESPONSE_STATUS_CODE_LABEL) + .expect("Status code should be present"); + if let opentelemetry::Value::I64(code) = &status_code.value { + assert_eq!(*code, 200); + } else { + panic!("Expected i64 status code"); + } + } else { + panic!("Expected histogram data for duration metric"); + } + + let request_body_size_metric = scope_metrics + .metrics() + .find(|m| m.name() == HTTP_SERVER_REQUEST_BODY_SIZE_METRIC); + + if let Some(metric) = request_body_size_metric { + if let AggregatedMetrics::F64(MetricData::Histogram(histogram)) = metric.data() { + let data_point = histogram + .data_points() + .next() + .expect("Should have data point"); + let attributes: Vec<_> = data_point.attributes().collect(); + + // Request body size metric should have 5 attributes: protocol_name, protocol_version, url_scheme, method, status_code + assert_eq!( + attributes.len(), + 5, + "Request body size metric should have exactly 5 attributes" + ); + + let protocol_name = attributes + .iter() + .find(|kv| kv.key.as_str() == NETWORK_PROTOCOL_NAME_LABEL) + .expect("Protocol name should be present in request body size"); + assert_eq!(protocol_name.value.as_str(), "http"); + + let protocol_version = attributes + .iter() + .find(|kv| kv.key.as_str() == NETWORK_PROTOCOL_VERSION_LABEL) + .expect("Protocol version should be present in request body size"); + assert_eq!(protocol_version.value.as_str(), "1.1"); + + let url_scheme = attributes + .iter() + .find(|kv| kv.key.as_str() == URL_SCHEME_LABEL) + .expect("URL scheme should be present in request body size"); + assert_eq!(url_scheme.value.as_str(), "https"); + + let method = attributes + .iter() + .find(|kv| kv.key.as_str() == HTTP_REQUEST_METHOD_LABEL) + .expect("HTTP method should be present in request body size"); + assert_eq!(method.value.as_str(), "GET"); + + let status_code = attributes + .iter() + .find(|kv| kv.key.as_str() == HTTP_RESPONSE_STATUS_CODE_LABEL) + .expect("Status code should be present in request body size"); + if let opentelemetry::Value::I64(code) = &status_code.value { + assert_eq!(*code, 200); + } else { + panic!("Expected i64 status code"); + } + } + } + + // Test response body size metric + let response_body_size_metric = scope_metrics + .metrics() + .find(|m| m.name() == HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC); + + if let Some(metric) = response_body_size_metric { + if let AggregatedMetrics::F64(MetricData::Histogram(histogram)) = metric.data() { + let data_point = histogram + .data_points() + .next() + .expect("Should have data point"); + let attributes: Vec<_> = data_point.attributes().collect(); + + // Response body size metric should have 5 attributes: protocol_name, protocol_version, url_scheme, method, status_code + assert_eq!( + attributes.len(), + 5, + "Response body size metric should have exactly 5 attributes" + ); + + let protocol_name = attributes + .iter() + .find(|kv| kv.key.as_str() == NETWORK_PROTOCOL_NAME_LABEL) + .expect("Protocol name should be present in response body size"); + assert_eq!(protocol_name.value.as_str(), "http"); + + let protocol_version = attributes + .iter() + .find(|kv| kv.key.as_str() == NETWORK_PROTOCOL_VERSION_LABEL) + .expect("Protocol version should be present in response body size"); + assert_eq!(protocol_version.value.as_str(), "1.1"); + + let url_scheme = attributes + .iter() + .find(|kv| kv.key.as_str() == URL_SCHEME_LABEL) + .expect("URL scheme should be present in response body size"); + assert_eq!(url_scheme.value.as_str(), "https"); + + let method = attributes + .iter() + .find(|kv| kv.key.as_str() == HTTP_REQUEST_METHOD_LABEL) + .expect("HTTP method should be present in response body size"); + assert_eq!(method.value.as_str(), "GET"); + + let status_code = attributes + .iter() + .find(|kv| kv.key.as_str() == HTTP_RESPONSE_STATUS_CODE_LABEL) + .expect("Status code should be present in response body size"); + if let opentelemetry::Value::I64(code) = &status_code.value { + assert_eq!(*code, 200); + } else { + panic!("Expected i64 status code"); + } + } + } + + // Test active requests metric + let active_requests_metric = scope_metrics + .metrics() + .find(|m| m.name() == HTTP_SERVER_ACTIVE_REQUESTS_METRIC); + + if let Some(metric) = active_requests_metric { + if let AggregatedMetrics::I64(MetricData::Sum(sum)) = metric.data() { + let data_point = sum.data_points().next().expect("Should have data point"); + let attributes: Vec<_> = data_point.attributes().collect(); + + // Active requests metric should have 2 attributes: method, url_scheme + assert_eq!( + attributes.len(), + 2, + "Active requests metric should have exactly 2 attributes" + ); + + let method = attributes + .iter() + .find(|kv| kv.key.as_str() == HTTP_REQUEST_METHOD_LABEL) + .expect("HTTP method should be present in active requests"); + assert_eq!(method.value.as_str(), "GET"); + + let url_scheme = attributes + .iter() + .find(|kv| kv.key.as_str() == URL_SCHEME_LABEL) + .expect("URL scheme should be present in active requests"); + assert_eq!(url_scheme.value.as_str(), "https"); + } + } + } +} diff --git a/opentelemetry-resource-detectors/CHANGELOG.md b/opentelemetry-resource-detectors/CHANGELOG.md index c410e058d..f7d485103 100644 --- a/opentelemetry-resource-detectors/CHANGELOG.md +++ b/opentelemetry-resource-detectors/CHANGELOG.md @@ -2,6 +2,11 @@ ## vNext +## v0.10.0 + +- Bump opentelemetry and opentelemetry_sdk versions to 0.31 +- Bump opentelemetry-semantic-conventions version to 0.31 + ## v0.9.0 - Bump opentelemetry and opentelemetry_sdk versions to 0.30 diff --git a/opentelemetry-resource-detectors/Cargo.toml b/opentelemetry-resource-detectors/Cargo.toml index 41ae9a264..d013d3cd1 100644 --- a/opentelemetry-resource-detectors/Cargo.toml +++ b/opentelemetry-resource-detectors/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-resource-detectors" -version = "0.9.0" +version = "0.10.0" edition = "2021" description = "A collection of community supported resource detectors for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-resource-detectors" diff --git a/opentelemetry-stackdriver/CHANGELOG.md b/opentelemetry-stackdriver/CHANGELOG.md index 09b571c9d..d4b029322 100644 --- a/opentelemetry-stackdriver/CHANGELOG.md +++ b/opentelemetry-stackdriver/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +## v0.28.0 + +- Update to opentelemetry v0.31.0, opentelemetry_sdk v0.31.0, opentelemetry-semantic-conventions v0.31.0 + ## v0.27.0 - Update `tonic` dependency version to 0.13 diff --git a/opentelemetry-stackdriver/Cargo.toml b/opentelemetry-stackdriver/Cargo.toml index 915299e43..1a87992cb 100644 --- a/opentelemetry-stackdriver/Cargo.toml +++ b/opentelemetry-stackdriver/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-stackdriver" -version = "0.27.0" +version = "0.28.0" description = "A Rust opentelemetry exporter that uploads traces to Google Stackdriver trace." documentation = "https://docs.rs/opentelemetry-stackdriver/" repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib" diff --git a/opentelemetry-user-events-logs/CHANGELOG.md b/opentelemetry-user-events-logs/CHANGELOG.md index 5056443b7..68024ca0b 100644 --- a/opentelemetry-user-events-logs/CHANGELOG.md +++ b/opentelemetry-user-events-logs/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +## v0.15.0 + +- Bump opentelemetry and opentelemetry_sdk versions to 0.31 + ## v0.14.0 Released 2025-July-24 diff --git a/opentelemetry-user-events-logs/Cargo.toml b/opentelemetry-user-events-logs/Cargo.toml index cef983e0e..f895befdb 100644 --- a/opentelemetry-user-events-logs/Cargo.toml +++ b/opentelemetry-user-events-logs/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "opentelemetry-user-events-logs" description = "OpenTelemetry Logs Exporter for Linux user_events" -version = "0.14.0" +version = "0.15.0" edition = "2021" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-logs" repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-logs" @@ -13,15 +13,15 @@ license = "Apache-2.0" [dependencies] eventheader = "0.4.0" eventheader_dynamic = "0.4.0" -opentelemetry = { version= "0.30", features = ["logs"] } -opentelemetry_sdk = { version= "0.30", features = ["logs"] } +opentelemetry = { version= "0.31", features = ["logs"] } +opentelemetry_sdk = { version= "0.31", features = ["logs"] } chrono = { version = "0.4", default-features = false, features = ["std"] } tracing = { version = "0.1", optional = true } futures-executor = "0.3" [dev-dependencies] -opentelemetry-appender-tracing = { version= "0.30" } -opentelemetry_sdk = { version= "0.30", features = ["logs", "trace"] } +opentelemetry-appender-tracing = { version= "0.31" } +opentelemetry_sdk = { version= "0.31", features = ["logs", "trace"] } tracing = { version = "0.1", default-features = false, features = ["std"] } tracing-core = "0.1.31" tracing-subscriber = { version = "0.3.0", default-features = false, features = ["env-filter", "fmt", "registry", "std"] } diff --git a/opentelemetry-user-events-logs/src/logs/processor.rs b/opentelemetry-user-events-logs/src/logs/processor.rs index 85f194dff..5d53ee903 100644 --- a/opentelemetry-user-events-logs/src/logs/processor.rs +++ b/opentelemetry-user-events-logs/src/logs/processor.rs @@ -36,7 +36,7 @@ where impl Processor { /// Creates a builder for configuring a user_events Processor - pub fn builder(provider_name: &str) -> ProcessorBuilder { + pub fn builder(provider_name: &str) -> ProcessorBuilder<'_> { ProcessorBuilder::new(provider_name) } } diff --git a/opentelemetry-user-events-metrics/CHANGELOG.md b/opentelemetry-user-events-metrics/CHANGELOG.md index 445507196..96e9a57e2 100644 --- a/opentelemetry-user-events-metrics/CHANGELOG.md +++ b/opentelemetry-user-events-metrics/CHANGELOG.md @@ -2,6 +2,11 @@ ## vNext +## v0.12.0 + +- Bump opentelemetry and opentelemetry_sdk versions to 0.31 +- Bump opentelemetry-proto version to 0.31 + ## v0.11.0 Released 2025-May-27 diff --git a/opentelemetry-user-events-metrics/Cargo.toml b/opentelemetry-user-events-metrics/Cargo.toml index 56c63fca9..3ac920786 100644 --- a/opentelemetry-user-events-metrics/Cargo.toml +++ b/opentelemetry-user-events-metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-user-events-metrics" -version = "0.11.0" +version = "0.12.0" description = "OpenTelemetry metrics exporter to user events" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-metrics" repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-metrics" @@ -11,11 +11,11 @@ edition = "2021" rust-version = "1.75.0" [dependencies] -opentelemetry = { version= "0.30", features = ["metrics"] } -opentelemetry_sdk = { version= "0.30", features = ["metrics"] } -opentelemetry-proto = { version= "0.30", features = ["gen-tonic", "metrics"] } +opentelemetry = { version= "0.31", features = ["metrics"] } +opentelemetry_sdk = { version= "0.31", features = ["metrics"] } +opentelemetry-proto = { version= "0.31", features = ["gen-tonic", "metrics"], default-features = false } eventheader = { version = "= 0.4.1" } -prost = "0.13" +prost = "0.14" tracing = {version = "0.1", optional = true} [dev-dependencies] diff --git a/opentelemetry-user-events-trace/CHANGELOG.md b/opentelemetry-user-events-trace/CHANGELOG.md index 0b1019e9b..bdb7a1072 100644 --- a/opentelemetry-user-events-trace/CHANGELOG.md +++ b/opentelemetry-user-events-trace/CHANGELOG.md @@ -2,6 +2,10 @@ ## vNext +## v0.4.0 + +- Bump opentelemetry and opentelemetry_sdk versions to 0.31 + ## v0.3.0 Released 2025-July-24 diff --git a/opentelemetry-user-events-trace/Cargo.toml b/opentelemetry-user-events-trace/Cargo.toml index 48f220926..bc5a027b8 100644 --- a/opentelemetry-user-events-trace/Cargo.toml +++ b/opentelemetry-user-events-trace/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "opentelemetry-user-events-trace" description = "OpenTelemetry-Rust exporter to user_events" -version = "0.3.0" +version = "0.4.0" edition = "2021" homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-traces" repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-traces" @@ -13,8 +13,8 @@ license = "Apache-2.0" [dependencies] eventheader = "0.4.0" eventheader_dynamic = "0.4.0" -opentelemetry = { version= "0.30", features = ["trace"] } -opentelemetry_sdk = { version= "0.30", features = ["trace"] } +opentelemetry = { version= "0.31", features = ["trace"] } +opentelemetry_sdk = { version= "0.31", features = ["trace"] } chrono = { version = "0.4", default-features = false, features = ["std"] } tracing = { version = "0.1", optional = true } futures-executor = "0.3" diff --git a/stress/Cargo.toml b/stress/Cargo.toml index f84159e14..080cc6577 100644 --- a/stress/Cargo.toml +++ b/stress/Cargo.toml @@ -3,7 +3,7 @@ name = "stress" version = "0.1.0" edition = "2021" license = "Apache-2.0" -rust-version = "1.75.0" +rust-version = "1.85.0" publish = false [[bin]] @@ -26,17 +26,17 @@ num_cpus = "1.15.0" num-format = "0.4.4" sysinfo = { version = "0.36", optional = true } tokio = { version = "1", features = ["full", "test-util"] } -wiremock = "0.6" +wiremock = "=0.5.22" futures = "0.3" -opentelemetry-appender-tracing = { version = "0.30", features= ["spec_unstable_logs_enabled"] } -opentelemetry_sdk = { version = "0.30", features = ["logs", "spec_unstable_logs_enabled"] } -opentelemetry-proto = { version = "0.30"} +opentelemetry-appender-tracing = { version = "0.31", features= ["spec_unstable_logs_enabled"] } +opentelemetry_sdk = { version = "0.31", features = ["logs", "spec_unstable_logs_enabled"] } +opentelemetry-proto = { version = "0.31", default-features = false, features = ["logs", "gen-tonic-messages"] } opentelemetry-user-events-logs = { path = "../opentelemetry-user-events-logs", features = ["spec_unstable_logs_enabled"]} opentelemetry-etw-logs = { path = "../opentelemetry-etw-logs"} tracing = { version = "0.1", default-features = false, features = ["std"] } tracing-subscriber = { version = "0.3.0", default-features = false, features = ["env-filter","registry", "std"] } -geneva-uploader = { version = "0.1.0", path = "../opentelemetry-exporter-geneva/geneva-uploader", features = ["mock_auth"]} +geneva-uploader = { version = "0.3.0", path = "../opentelemetry-exporter-geneva/geneva-uploader", features = ["mock_auth"]} [features] stats = ["sysinfo"] diff --git a/stress/src/geneva_exporter.rs b/stress/src/geneva_exporter.rs index 691f077ed..f3bdb088e 100644 --- a/stress/src/geneva_exporter.rs +++ b/stress/src/geneva_exporter.rs @@ -48,14 +48,15 @@ use wiremock::matchers::{method, path_regex}; use wiremock::{Mock, MockServer, ResponseTemplate}; // Helper functions -fn create_test_logs() -> Vec { +fn create_test_logs(base_timestamp: u64) -> Vec { let mut log_records = Vec::new(); // Create 10 simple log records for i in 0..10 { + let timestamp = base_timestamp + i * 1_000_000; // 1 ms apart let log = LogRecord { - observed_time_unix_nano: 1700000000000000000 + i, - event_name: "StressTestEvent".to_string(), + observed_time_unix_nano: timestamp, + event_name: "Log".to_string(), severity_number: 9, severity_text: "INFO".to_string(), body: Some(AnyValue { @@ -123,10 +124,10 @@ async fn init_client() -> Result<(GenevaClient, Option), Box Result<(GenevaClient, Option), Box Result<(GenevaClient, Option), Box Result<(), Box> { + // Get timestamp for events + let base_timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos() as u64; + // Get concurrency from the appropriate position let concurrency = args .get(args_start_idx) @@ -268,11 +275,19 @@ async fn async_main( // Initialize client and test data let (client, _mock_uri) = init_client().await?; let client = Arc::new(client); - let logs = Arc::new(create_test_logs()); + let logs = Arc::new(create_test_logs(base_timestamp)); // Warm up the ingestion token cache println!("Warming up token cache..."); - client.upload_logs(&logs).await?; + let warm_batches = client + .encode_and_compress_logs(&logs) + .map_err(|e| format!("Failed to encode logs: {e}"))?; + for batch in &warm_batches { + client + .upload_batch(batch) + .await + .map_err(|e| format!("Failed to upload batch: {e}"))?; + } println!("\nStarting Geneva exporter stress test using stream-based approach"); println!("Press Ctrl+C to stop continuous tests\n"); @@ -290,7 +305,18 @@ async fn async_main( ThroughputTest::run_continuous("Geneva Upload", config, move || { let client = client.clone(); let logs = logs.clone(); - async move { client.upload_logs(&logs).await } + async move { + let batches = client.encode_and_compress_logs(&logs)?; + + // Upload batches sequentially TODO - use buffer_unordered for concurrency + for batch in &batches { + client + .upload_batch(batch) + .await + .map_err(|e| format!("Failed to upload batch: {e}"))?; + } + Ok::<(), String>(()) + } }) .await; } @@ -311,7 +337,20 @@ async fn async_main( let stats = ThroughputTest::run_fixed("Geneva Upload", config, move || { let client = client.clone(); let logs = logs.clone(); - async move { client.upload_logs(&logs).await } + async move { + let batches = client + .encode_and_compress_logs(&logs) + .map_err(|e| format!("Failed to encode logs: {e}"))?; + + // Upload batches sequentially - TODO - use buffer_unordered for concurrency + for batch in &batches { + client + .upload_batch(batch) + .await + .map_err(|e| format!("Failed to upload batch: {e}"))?; + } + Ok::<(), String>(()) + } }) .await; @@ -330,10 +369,16 @@ async fn async_main( let client = client.clone(); let logs = logs.clone(); async move { - client - .upload_logs(&logs) - .await - .map_err(std::io::Error::other) + let batches = match client.encode_and_compress_logs(&logs) { + Ok(batches) => batches, + Err(e) => return Err(format!("Failed to encode logs: {e}")), + }; + for batch in &batches { + if let Err(e) = client.upload_batch(batch).await { + return Err(format!("Failed to upload batch: {e}")); + } + } + Ok(()) } }, ) From 05525e37dcef20f5d1f03f292f9e3c06bb921df0 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 6 Nov 2025 16:11:29 +0000 Subject: [PATCH 5/5] Add debug logging for ignored resource attributes Co-authored-by: cijothomas <5232798+cijothomas@users.noreply.github.com> --- opentelemetry-etw-logs/src/exporter/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/opentelemetry-etw-logs/src/exporter/mod.rs b/opentelemetry-etw-logs/src/exporter/mod.rs index c34828a95..3c5a81c77 100644 --- a/opentelemetry-etw-logs/src/exporter/mod.rs +++ b/opentelemetry-etw-logs/src/exporter/mod.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use tracelogging_dynamic as tld; use opentelemetry::logs::Severity; -use opentelemetry::{logs::AnyValue, Key, Value}; +use opentelemetry::{logs::AnyValue, otel_debug, Key, Value}; use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult}; pub(crate) mod common; @@ -172,8 +172,10 @@ impl opentelemetry_sdk::logs::LogExporter for ETWExporter { self.resource .attributes_from_resource .push((key.clone(), val_to_any_value(value))); + } else { + // Other attributes are ignored + otel_debug!(name: "UserEvents.ResourceAttributeIgnored", key = key.as_str(), message = "To include this attribute, add it via with_resource_attributes() method in the processor builder."); } - // Other attributes are ignored } }