diff --git a/Cargo.lock b/Cargo.lock index 215330614c..78b17e3efb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3769,6 +3769,15 @@ dependencies = [ "escape8259", ] +[[package]] +name = "linked_list_allocator" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286" +dependencies = [ + "spinning_top", +] + [[package]] name = "linkme" version = "0.3.33" @@ -4622,6 +4631,14 @@ dependencies = [ "libc", ] +[[package]] +name = "nostd_spin_channel" +version = "0.0.0" +dependencies = [ + "spin 0.10.0", + "thiserror 2.0.16", +] + [[package]] name = "ntapi" version = "0.4.1" @@ -5071,6 +5088,28 @@ dependencies = [ "thiserror 2.0.16", ] +[[package]] +name = "opentmk" +version = "0.0.0" +dependencies = [ + "bitfield-struct 0.11.0", + "cfg-if", + "hvdef", + "lazy_static", + "linked_list_allocator", + "log", + "memory_range", + "minimal_rt", + "nostd_spin_channel", + "serde", + "serde_json", + "spin 0.10.0", + "thiserror 2.0.16", + "uefi", + "x86_64", + "zerocopy 0.8.25", +] + [[package]] name = "openvmm" version = "0.0.0" @@ -6842,6 +6881,18 @@ name = "spin" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spinning_top" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0" +dependencies = [ + "lock_api", +] [[package]] name = "spki" @@ -9441,6 +9492,12 @@ dependencies = [ "vmsocket", ] +[[package]] +name = "volatile" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "442887c63f2c839b346c192d047a7c87e73d0689c9157b00b53dcc27dd5ea793" + [[package]] name = "vpci" version = "0.0.0" @@ -10174,6 +10231,18 @@ dependencies = [ "tap", ] +[[package]] +name = "x86_64" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f042214de98141e9c8706e8192b73f56494087cc55ebec28ce10f26c5c364ae" +dependencies = [ + "bit_field", + "bitflags 2.9.3", + "rustversion", + "volatile", +] + [[package]] name = "x86defs" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 178ee4242f..4a2d3871e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,6 +47,8 @@ members = [ "vm/loader/igvmfilegen", "vm/vmgs/vmgs_lib", "vm/vmgs/vmgstool", + # opentmk + "opentmk", ] exclude = [ "xsync", @@ -126,6 +128,7 @@ mesh_rpc = { path = "support/mesh/mesh_rpc" } mesh_worker = { path = "support/mesh/mesh_worker" } mesh_tracing = { path = "support/mesh_tracing" } minircu = { path = "support/minircu" } +nostd_spin_channel = { path = "support/nostd_spin_channel"} open_enum = { path = "support/open_enum" } openssl_kdf = { path = "support/openssl_kdf" } openssl_crypto_only = { path = "support/openssl_crypto_only" } @@ -467,9 +470,11 @@ jiff = "0.2.14" kvm-bindings = "0.14.0" # Use of these specific REPO will go away when changes are taken upstream. landlock = "0.4.1" +lazy_static = "1.4.0" libc = "0.2" libfuzzer-sys = "0.4" libtest-mimic = "0.8" +linked_list_allocator = "0.10.5" linkme = "0.3.9" log = "0.4" loom = "0.7.2" @@ -506,8 +511,8 @@ rusqlite = "0.37" rustc-hash = "2.1.1" rustyline = "17" seccompiler = "0.5" -serde = "1.0.185" -serde_json = "1.0" +serde = { version = "1.0.185", default-features = false } +serde_json = { version = "1.0", default-features = false } serde_yaml = "0.9" sha2 = { version = "0.10.8", default-features = false } shell-words = "1.1" @@ -517,6 +522,7 @@ smallbox = "0.8" smallvec = "1.8" smoltcp = { version = "0.8", default-features = false } socket2 = "0.6" +spin = "0.10.0" stackfuture = "0.3" static_assertions = "1.1" syn = "2" @@ -544,6 +550,7 @@ windows = "0.62" windows-service = "0.8" windows-sys = "0.61" windows-version = "0.1.4" +x86_64 = { version = "0.15.2", default-features = false } xshell = "=0.2.2" # pin to 0.2.2 to work around https://github.com/matklad/xshell/issues/63 xshell-macros = "0.2" # We add the derive feature here since the vast majority of our crates use it. diff --git a/flowey/flowey_cli/Cargo.toml b/flowey/flowey_cli/Cargo.toml index 525cb6feb2..f8346546b7 100644 --- a/flowey/flowey_cli/Cargo.toml +++ b/flowey/flowey_cli/Cargo.toml @@ -18,8 +18,8 @@ fs-err.workspace = true log.workspace = true parking_lot.workspace = true petgraph.workspace = true -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true, features = ["raw_value"] } +serde = { workspace = true, features = ["std", "derive"] } +serde_json = { workspace = true, features = ["std", "raw_value"] } serde_yaml.workspace = true toml_edit = { workspace = true, features = ["serde"] } xshell.workspace = true diff --git a/flowey/flowey_core/Cargo.toml b/flowey/flowey_core/Cargo.toml index 66f2a156cd..1f58a6b00e 100644 --- a/flowey/flowey_core/Cargo.toml +++ b/flowey/flowey_core/Cargo.toml @@ -11,7 +11,7 @@ anyhow.workspace = true fs-err.workspace = true linkme.workspace = true serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } serde_yaml.workspace = true [dev-dependencies] diff --git a/flowey/flowey_hvlite/Cargo.toml b/flowey/flowey_hvlite/Cargo.toml index b85a4ddc27..871ae2e345 100644 --- a/flowey/flowey_hvlite/Cargo.toml +++ b/flowey/flowey_hvlite/Cargo.toml @@ -17,7 +17,7 @@ vmm_test_images = { workspace = true, features = ["serde", "clap"] } anyhow.workspace = true clap = { workspace = true, features = ["derive"] } log.workspace = true -serde.workspace = true +serde = { workspace = true, features = ["std"] } target-lexicon = { workspace = true, features = ["serde_support"] } [lints] diff --git a/flowey/flowey_lib_common/Cargo.toml b/flowey/flowey_lib_common/Cargo.toml index 79f4aedce4..66aae1c00f 100644 --- a/flowey/flowey_lib_common/Cargo.toml +++ b/flowey/flowey_lib_common/Cargo.toml @@ -16,8 +16,8 @@ home.workspace = true log.workspace = true rlimit.workspace = true rustc-hash.workspace = true -serde.workspace = true -serde_json.workspace = true +serde = { workspace = true, features = ["std"] } +serde_json = { workspace = true, features = ["std"] } target-lexicon = { workspace = true, features = ["serde_support"] } toml_edit.workspace = true which.workspace = true diff --git a/flowey/flowey_lib_hvlite/Cargo.toml b/flowey/flowey_lib_hvlite/Cargo.toml index 358629e83e..c3f5cdc435 100644 --- a/flowey/flowey_lib_hvlite/Cargo.toml +++ b/flowey/flowey_lib_hvlite/Cargo.toml @@ -17,8 +17,8 @@ igvmfilegen_config.workspace = true anyhow.workspace = true fs-err.workspace = true log.workspace = true -serde.workspace = true -serde_json.workspace = true +serde = { workspace = true, features = ["std"] } +serde_json = { workspace = true, features = ["std"] } target-lexicon = { workspace = true, features = ["serde_support"] } which.workspace = true xshell.workspace = true diff --git a/openhcl/openhcl_attestation_protocol/Cargo.toml b/openhcl/openhcl_attestation_protocol/Cargo.toml index f99e8b068a..e8e7dc8ef2 100644 --- a/openhcl/openhcl_attestation_protocol/Cargo.toml +++ b/openhcl/openhcl_attestation_protocol/Cargo.toml @@ -18,7 +18,7 @@ base64.workspace = true base64-serde.workspace = true hex.workspace = true serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } zerocopy.workspace = true [lints] diff --git a/openhcl/underhill_attestation/Cargo.toml b/openhcl/underhill_attestation/Cargo.toml index 57bbd0a7ab..63aeae3537 100644 --- a/openhcl/underhill_attestation/Cargo.toml +++ b/openhcl/underhill_attestation/Cargo.toml @@ -28,8 +28,8 @@ base64.workspace = true base64-serde.workspace = true getrandom.workspace = true openssl.workspace = true -serde.workspace = true -serde_json.workspace = true +serde = { workspace = true, features = ["std"] } +serde_json = { workspace = true, features = ["std"] } static_assertions.workspace = true thiserror.workspace = true time = { workspace = true, features = ["macros"] } diff --git a/openhcl/underhill_core/Cargo.toml b/openhcl/underhill_core/Cargo.toml index 3021b22335..8bcec0a73d 100644 --- a/openhcl/underhill_core/Cargo.toml +++ b/openhcl/underhill_core/Cargo.toml @@ -164,7 +164,7 @@ libc.workspace = true parking_lot.workspace = true serde = { workspace = true, features = ["derive"] } serde_helpers.workspace = true -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } socket2.workspace = true thiserror = { workspace = true, features = ["std"] } time = { workspace = true, features = ["macros"] } diff --git a/opentmk/Cargo.toml b/opentmk/Cargo.toml new file mode 100644 index 0000000000..6faf4c6519 --- /dev/null +++ b/opentmk/Cargo.toml @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +[package] +name = "opentmk" +edition.workspace = true +rust-version.workspace = true + +[dependencies] +bitfield-struct.workspace = true +cfg-if.workspace = true +hvdef.workspace = true +lazy_static = { workspace = true, features = ["spin_no_std"] } +linked_list_allocator.workspace = true +log.workspace = true +memory_range.workspace = true +minimal_rt.workspace = true +spin.workspace = true +serde = { workspace = true, features = ["derive"]} +serde_json = { workspace = true, features = ["alloc"] } +thiserror.workspace = true +uefi = { workspace = true, features = ["alloc"] } +x86_64 = { workspace = true, features = ["instructions"] } +zerocopy.workspace = true +nostd_spin_channel.workspace = true + +[lints] +workspace = true + +[build-dependencies] diff --git a/opentmk/README.md b/opentmk/README.md new file mode 100644 index 0000000000..a2658e8753 --- /dev/null +++ b/opentmk/README.md @@ -0,0 +1,3 @@ +# OpenTMK + +See the guide for more info on how to build/run the code in this crate. diff --git a/opentmk/build.rs b/opentmk/build.rs new file mode 100644 index 0000000000..003055f53e --- /dev/null +++ b/opentmk/build.rs @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#![expect(missing_docs)] + +fn main() { + // Allow a cfg of nightly to avoid using a feature, see main.rs. + println!("cargo:rustc-check-cfg=cfg(nightly)"); +} diff --git a/opentmk/src/arch/aarch64/hypercall.rs b/opentmk/src/arch/aarch64/hypercall.rs new file mode 100644 index 0000000000..0f72338c96 --- /dev/null +++ b/opentmk/src/arch/aarch64/hypercall.rs @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Hypercall interface for AArch64 architecture. + +/// Writes a synthetic register to tell the hypervisor the OS ID. +fn report_os_id(guest_os_id: u64) { + // On ARM64, to be able to make hypercalls, one needs first to set the Guest OS ID + // synthetic register using a hypercall. Can't use `Hvcall::set_register` at that will + // lead to the infinite recursion as that function will first try initializing hypercalls + // with setting a register. + // + // Only one very specific HvSetVpRegisters hypercall is allowed to set the Guest OS ID + // (this is TLFS section 17.4.4.1.1 and 5.3), and that must be the fast hypercall. + let _ = minimal_rt::arch::hypercall::set_register_fast( + hvdef::HvArm64RegisterName::GuestOsId.into(), + guest_os_id.into(), + ); +} + +pub(crate) fn initialize(guest_os_id: u64) { + // We are assuming we are running under a Microsoft hypervisor. + report_os_id(guest_os_id); +} + +/// Call before jumping to kernel. +pub(crate) fn uninitialize() { + report_os_id(0); +} diff --git a/opentmk/src/arch/aarch64/mod.rs b/opentmk/src/arch/aarch64/mod.rs new file mode 100644 index 0000000000..10ce1300c1 --- /dev/null +++ b/opentmk/src/arch/aarch64/mod.rs @@ -0,0 +1,4 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +pub mod hypercall; diff --git a/opentmk/src/arch/mod.rs b/opentmk/src/arch/mod.rs new file mode 100644 index 0000000000..591bf020d8 --- /dev/null +++ b/opentmk/src/arch/mod.rs @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Imports and re-exports architecture-specific implementations. + +cfg_if::cfg_if!( + if #[cfg(target_arch = "x86_64")] { // xtask-fmt allow-target-arch sys-crate + mod x86_64; + pub use x86_64::*; + + } else if #[cfg(target_arch = "aarch64")] { // xtask-fmt allow-target-arch sys-crate + mod aarch64; + pub use aarch64::*; + } else { + compile_error!("target_arch is not supported"); + } +); diff --git a/opentmk/src/arch/x86_64/hypercall.rs b/opentmk/src/arch/x86_64/hypercall.rs new file mode 100644 index 0000000000..911e6267f7 --- /dev/null +++ b/opentmk/src/arch/x86_64/hypercall.rs @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Hypercall interface for x86_64 architecture. + +// UNSAFETY: This module contains unsafe code to perform low-level operations such as managing the hypercall page access +#![expect(unsafe_code)] + +use core::ptr::addr_of; + +use hvdef::HV_PAGE_SIZE; +use minimal_rt::arch::hypercall::HYPERCALL_PAGE; +use minimal_rt::arch::msr::read_msr; +use minimal_rt::arch::msr::write_msr; + +/// Writes an MSR to tell the hypervisor the OS ID. +fn report_os_id(guest_os_id: u64) { + // SAFETY: Using the contract established in the Hyper-V TLFS. + unsafe { + write_msr(hvdef::HV_X64_MSR_GUEST_OS_ID, guest_os_id); + }; +} + +/// Writes an MSR to tell the hypervisor where the hypercall page is +pub fn write_hypercall_msr(enable: bool) { + // SAFETY: Using the contract established in the Hyper-V TLFS. + let hypercall_contents = hvdef::hypercall::MsrHypercallContents::from(unsafe { + read_msr(hvdef::HV_X64_MSR_HYPERCALL) + }); + + let hypercall_page_num = addr_of!(HYPERCALL_PAGE) as u64 / HV_PAGE_SIZE; + + if !(!enable || !hypercall_contents.enable()) { + return; + } + let new_hv_contents: hvdef::hypercall::MsrHypercallContents = hypercall_contents + .with_enable(enable) + .with_gpn(if enable { hypercall_page_num } else { 0 }); + + // SAFETY: Using the contract established in the Hyper-V TLFS. + unsafe { write_msr(hvdef::HV_X64_MSR_HYPERCALL, new_hv_contents.into()) }; +} + +/// Has to be called before using hypercalls. +pub fn initialize(guest_os_id: u64) { + // We are assuming we are running under a Microsoft hypervisor, so there is + // no need to check any cpuid leaves. + report_os_id(guest_os_id); + write_hypercall_msr(true); +} + +/// Call to uninitialize hypercalL page overlay +pub fn uninitialize() { + write_hypercall_msr(false); + report_os_id(0); +} diff --git a/opentmk/src/arch/x86_64/interrupt.rs b/opentmk/src/arch/x86_64/interrupt.rs new file mode 100644 index 0000000000..5b05c1c5ca --- /dev/null +++ b/opentmk/src/arch/x86_64/interrupt.rs @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! x86_64-specific interrupt handling implementation. +//! + +use lazy_static::lazy_static; +use spin::Mutex; +use x86_64::structures::idt::InterruptDescriptorTable; +use x86_64::structures::idt::InterruptStackFrame; + +use super::interrupt_handler_register::register_interrupt_handler; +use super::interrupt_handler_register::set_common_handler; + +lazy_static! { + static ref IDT: InterruptDescriptorTable = { + let mut idt = InterruptDescriptorTable::new(); + register_interrupt_handler(&mut idt); + idt.double_fault.set_handler_fn(handler_double_fault); + idt + }; +} + +static mut HANDLERS: [fn(); 256] = [no_op; 256]; +static MUTEX: Mutex<()> = Mutex::new(()); +fn no_op() {} + +fn common_handler(_stack_frame: InterruptStackFrame, interrupt: u8) { + // SAFETY: Handlers are initialized to no_op and only set via set_handler which is + // protected by a mutex. + unsafe { + HANDLERS[interrupt as usize](); + } +} + +/// Sets the handler for a specific interrupt number. +pub fn set_handler(interrupt: u8, handler: fn()) { + let _lock = MUTEX.lock(); + // SAFETY: handlers is protected by a mutex. + unsafe { + HANDLERS[interrupt as usize] = handler; + } +} + +extern "x86-interrupt" fn handler_double_fault( + stack_frame: InterruptStackFrame, + _error_code: u64, +) -> ! { + log::error!( + "EXCEPTION:\n\tERROR_CODE: {}\n\tDOUBLE FAULT\n{:#?}", + _error_code, + stack_frame + ); + loop { + core::hint::spin_loop(); + } +} + +/// Initialize the IDT +pub fn init() { + IDT.load(); + set_common_handler(common_handler); + x86_64::instructions::interrupts::enable(); +} diff --git a/opentmk/src/arch/x86_64/interrupt_handler_register.rs b/opentmk/src/arch/x86_64/interrupt_handler_register.rs new file mode 100644 index 0000000000..1db1a3a678 --- /dev/null +++ b/opentmk/src/arch/x86_64/interrupt_handler_register.rs @@ -0,0 +1,596 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#![expect(dead_code)] +use spin::Mutex; +use x86_64::structures::idt::InterruptDescriptorTable; +use x86_64::structures::idt::InterruptStackFrame; +use x86_64::structures::idt::PageFaultErrorCode; +static mut COMMON_HANDLER: fn(InterruptStackFrame, u8) = common_handler; +static COMMON_HANDLER_MUTEX: Mutex<()> = Mutex::new(()); + +#[unsafe(no_mangle)] +fn abstraction_handle(stack_frame: InterruptStackFrame, interrupt: u8) { + // SAFETY: COMMON_HANDLER is only set via set_common_handler which is protected by a mutex. + unsafe { (COMMON_HANDLER)(stack_frame, interrupt) }; + log::debug!("Interrupt: {}", interrupt); +} + +macro_rules! create_fn { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) { + abstraction_handle(stack_frame, $i); + } + }; +} + +macro_rules! create_fn_create_with_errorcode { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) { + abstraction_handle(stack_frame, $i); + } + }; +} + +macro_rules! create_fn_divergent_create_with_errorcode { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) -> ! { + abstraction_handle(stack_frame, $i); + loop {} + } + }; +} + +macro_rules! create_fn_divergent_create { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) -> ! { + abstraction_handle(stack_frame, $i); + loop {} + } + }; +} + +static mut BACKUP_RSP: u64 = 0; + +macro_rules! create_page_fault_fn { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name( + stack_frame: InterruptStackFrame, + _error_code: PageFaultErrorCode, + ) { + abstraction_handle(stack_frame, $i); + } + }; +} + +macro_rules! register_interrupt_handler { + ($idt: expr, $i: expr, $name: ident) => { + $idt[$i].set_handler_fn($name); + }; +} + +fn common_handler(_stack_frame: InterruptStackFrame, interrupt: u8) { + log::info!("Default interrupt handler fired: {}", interrupt); +} + +pub fn set_common_handler(handler: fn(InterruptStackFrame, u8)) { + let _guard = COMMON_HANDLER_MUTEX.lock(); + // SAFETY: COMMON_HANDLER is only set via this function which is protected by a mutex. + unsafe { + COMMON_HANDLER = handler; + } +} + +extern "x86-interrupt" fn no_op(_stack_frame: InterruptStackFrame) {} + +pub fn register_interrupt_handler(idt: &mut InterruptDescriptorTable) { + idt.divide_error.set_handler_fn(handler_0); + idt.debug.set_handler_fn(handler_1); + idt.non_maskable_interrupt.set_handler_fn(handler_2); + idt.breakpoint.set_handler_fn(handler_3); + idt.overflow.set_handler_fn(handler_4); + idt.bound_range_exceeded.set_handler_fn(handler_5); + idt.invalid_opcode.set_handler_fn(handler_6); + idt.device_not_available.set_handler_fn(handler_7); + idt.double_fault.set_handler_fn(handler_8); + register_interrupt_handler!(idt, 9, handler_9); + idt.invalid_tss.set_handler_fn(handler_10); + idt.segment_not_present.set_handler_fn(handler_11); + idt.stack_segment_fault.set_handler_fn(handler_12); + idt.general_protection_fault.set_handler_fn(handler_13); + idt.page_fault.set_handler_fn(handler_14); + // Vector 15 is reserved + idt.x87_floating_point.set_handler_fn(handler_16); + idt.alignment_check.set_handler_fn(handler_17); + idt.machine_check.set_handler_fn(handler_18); + idt.simd_floating_point.set_handler_fn(handler_19); + idt.virtualization.set_handler_fn(handler_20); + idt.cp_protection_exception.set_handler_fn(handler_21); + // Vector 22-27 is reserved + idt.hv_injection_exception.set_handler_fn(handler_28); + idt.vmm_communication_exception.set_handler_fn(handler_29); + idt.security_exception.set_handler_fn(handler_30); + // Vector 31 is reserved + + register_interrupt_handler!(idt, 32, handler_32); + register_interrupt_handler!(idt, 33, handler_33); + register_interrupt_handler!(idt, 34, handler_34); + register_interrupt_handler!(idt, 35, handler_35); + register_interrupt_handler!(idt, 36, handler_36); + register_interrupt_handler!(idt, 37, handler_37); + register_interrupt_handler!(idt, 38, handler_38); + register_interrupt_handler!(idt, 39, handler_39); + register_interrupt_handler!(idt, 40, handler_40); + register_interrupt_handler!(idt, 41, handler_41); + register_interrupt_handler!(idt, 42, handler_42); + register_interrupt_handler!(idt, 43, handler_43); + register_interrupt_handler!(idt, 44, handler_44); + register_interrupt_handler!(idt, 45, handler_45); + register_interrupt_handler!(idt, 46, handler_46); + register_interrupt_handler!(idt, 47, handler_47); + register_interrupt_handler!(idt, 48, handler_48); + register_interrupt_handler!(idt, 49, handler_49); + register_interrupt_handler!(idt, 50, handler_50); + register_interrupt_handler!(idt, 51, handler_51); + register_interrupt_handler!(idt, 52, handler_52); + register_interrupt_handler!(idt, 53, handler_53); + register_interrupt_handler!(idt, 54, handler_54); + register_interrupt_handler!(idt, 55, handler_55); + register_interrupt_handler!(idt, 56, handler_56); + register_interrupt_handler!(idt, 57, handler_57); + register_interrupt_handler!(idt, 58, handler_58); + register_interrupt_handler!(idt, 59, handler_59); + register_interrupt_handler!(idt, 60, handler_60); + register_interrupt_handler!(idt, 61, handler_61); + register_interrupt_handler!(idt, 62, handler_62); + register_interrupt_handler!(idt, 63, handler_63); + register_interrupt_handler!(idt, 64, handler_64); + register_interrupt_handler!(idt, 65, handler_65); + register_interrupt_handler!(idt, 66, handler_66); + register_interrupt_handler!(idt, 67, handler_67); + register_interrupt_handler!(idt, 68, handler_68); + register_interrupt_handler!(idt, 69, handler_69); + register_interrupt_handler!(idt, 70, handler_70); + register_interrupt_handler!(idt, 71, handler_71); + register_interrupt_handler!(idt, 72, handler_72); + register_interrupt_handler!(idt, 73, handler_73); + register_interrupt_handler!(idt, 74, handler_74); + register_interrupt_handler!(idt, 75, handler_75); + register_interrupt_handler!(idt, 76, handler_76); + register_interrupt_handler!(idt, 77, handler_77); + register_interrupt_handler!(idt, 78, handler_78); + register_interrupt_handler!(idt, 79, handler_79); + register_interrupt_handler!(idt, 80, handler_80); + register_interrupt_handler!(idt, 81, handler_81); + register_interrupt_handler!(idt, 82, handler_82); + register_interrupt_handler!(idt, 83, handler_83); + register_interrupt_handler!(idt, 84, handler_84); + register_interrupt_handler!(idt, 85, handler_85); + register_interrupt_handler!(idt, 86, handler_86); + register_interrupt_handler!(idt, 87, handler_87); + register_interrupt_handler!(idt, 88, handler_88); + register_interrupt_handler!(idt, 89, handler_89); + register_interrupt_handler!(idt, 90, handler_90); + register_interrupt_handler!(idt, 91, handler_91); + register_interrupt_handler!(idt, 92, handler_92); + register_interrupt_handler!(idt, 93, handler_93); + register_interrupt_handler!(idt, 94, handler_94); + register_interrupt_handler!(idt, 95, handler_95); + register_interrupt_handler!(idt, 96, handler_96); + register_interrupt_handler!(idt, 97, handler_97); + register_interrupt_handler!(idt, 98, handler_98); + register_interrupt_handler!(idt, 99, handler_99); + register_interrupt_handler!(idt, 100, handler_100); + register_interrupt_handler!(idt, 101, handler_101); + register_interrupt_handler!(idt, 102, handler_102); + register_interrupt_handler!(idt, 103, handler_103); + register_interrupt_handler!(idt, 104, handler_104); + register_interrupt_handler!(idt, 105, handler_105); + register_interrupt_handler!(idt, 106, handler_106); + register_interrupt_handler!(idt, 107, handler_107); + register_interrupt_handler!(idt, 108, handler_108); + register_interrupt_handler!(idt, 109, handler_109); + register_interrupt_handler!(idt, 110, handler_110); + register_interrupt_handler!(idt, 111, handler_111); + register_interrupt_handler!(idt, 112, handler_112); + register_interrupt_handler!(idt, 113, handler_113); + register_interrupt_handler!(idt, 114, handler_114); + register_interrupt_handler!(idt, 115, handler_115); + register_interrupt_handler!(idt, 116, handler_116); + register_interrupt_handler!(idt, 117, handler_117); + register_interrupt_handler!(idt, 118, handler_118); + register_interrupt_handler!(idt, 119, handler_119); + register_interrupt_handler!(idt, 120, handler_120); + register_interrupt_handler!(idt, 121, handler_121); + register_interrupt_handler!(idt, 122, handler_122); + register_interrupt_handler!(idt, 123, handler_123); + register_interrupt_handler!(idt, 124, handler_124); + register_interrupt_handler!(idt, 125, handler_125); + register_interrupt_handler!(idt, 126, handler_126); + register_interrupt_handler!(idt, 127, handler_127); + register_interrupt_handler!(idt, 128, handler_128); + register_interrupt_handler!(idt, 129, handler_129); + register_interrupt_handler!(idt, 130, handler_130); + register_interrupt_handler!(idt, 131, handler_131); + register_interrupt_handler!(idt, 132, handler_132); + register_interrupt_handler!(idt, 133, handler_133); + register_interrupt_handler!(idt, 134, handler_134); + register_interrupt_handler!(idt, 135, handler_135); + register_interrupt_handler!(idt, 136, handler_136); + register_interrupt_handler!(idt, 137, handler_137); + register_interrupt_handler!(idt, 138, handler_138); + register_interrupt_handler!(idt, 139, handler_139); + register_interrupt_handler!(idt, 140, handler_140); + register_interrupt_handler!(idt, 141, handler_141); + register_interrupt_handler!(idt, 142, handler_142); + register_interrupt_handler!(idt, 143, handler_143); + register_interrupt_handler!(idt, 144, handler_144); + register_interrupt_handler!(idt, 145, handler_145); + register_interrupt_handler!(idt, 146, handler_146); + register_interrupt_handler!(idt, 147, handler_147); + register_interrupt_handler!(idt, 148, handler_148); + register_interrupt_handler!(idt, 149, handler_149); + register_interrupt_handler!(idt, 150, handler_150); + register_interrupt_handler!(idt, 151, handler_151); + register_interrupt_handler!(idt, 152, handler_152); + register_interrupt_handler!(idt, 153, handler_153); + register_interrupt_handler!(idt, 154, handler_154); + register_interrupt_handler!(idt, 155, handler_155); + register_interrupt_handler!(idt, 156, handler_156); + register_interrupt_handler!(idt, 157, handler_157); + register_interrupt_handler!(idt, 158, handler_158); + register_interrupt_handler!(idt, 159, handler_159); + register_interrupt_handler!(idt, 160, handler_160); + register_interrupt_handler!(idt, 161, handler_161); + register_interrupt_handler!(idt, 162, handler_162); + register_interrupt_handler!(idt, 163, handler_163); + register_interrupt_handler!(idt, 164, handler_164); + register_interrupt_handler!(idt, 165, handler_165); + register_interrupt_handler!(idt, 166, handler_166); + register_interrupt_handler!(idt, 167, handler_167); + register_interrupt_handler!(idt, 168, handler_168); + register_interrupt_handler!(idt, 169, handler_169); + register_interrupt_handler!(idt, 170, handler_170); + register_interrupt_handler!(idt, 171, handler_171); + register_interrupt_handler!(idt, 172, handler_172); + register_interrupt_handler!(idt, 173, handler_173); + register_interrupt_handler!(idt, 174, handler_174); + register_interrupt_handler!(idt, 175, handler_175); + register_interrupt_handler!(idt, 176, handler_176); + register_interrupt_handler!(idt, 177, handler_177); + register_interrupt_handler!(idt, 178, handler_178); + register_interrupt_handler!(idt, 179, handler_179); + register_interrupt_handler!(idt, 180, handler_180); + register_interrupt_handler!(idt, 181, handler_181); + register_interrupt_handler!(idt, 182, handler_182); + register_interrupt_handler!(idt, 183, handler_183); + register_interrupt_handler!(idt, 184, handler_184); + register_interrupt_handler!(idt, 185, handler_185); + register_interrupt_handler!(idt, 186, handler_186); + register_interrupt_handler!(idt, 187, handler_187); + register_interrupt_handler!(idt, 188, handler_188); + register_interrupt_handler!(idt, 189, handler_189); + register_interrupt_handler!(idt, 190, handler_190); + register_interrupt_handler!(idt, 191, handler_191); + register_interrupt_handler!(idt, 192, handler_192); + register_interrupt_handler!(idt, 193, handler_193); + register_interrupt_handler!(idt, 194, handler_194); + register_interrupt_handler!(idt, 195, handler_195); + register_interrupt_handler!(idt, 196, handler_196); + register_interrupt_handler!(idt, 197, handler_197); + register_interrupt_handler!(idt, 198, handler_198); + register_interrupt_handler!(idt, 199, handler_199); + register_interrupt_handler!(idt, 200, handler_200); + register_interrupt_handler!(idt, 201, handler_201); + register_interrupt_handler!(idt, 202, handler_202); + register_interrupt_handler!(idt, 203, handler_203); + register_interrupt_handler!(idt, 204, handler_204); + register_interrupt_handler!(idt, 205, handler_205); + register_interrupt_handler!(idt, 206, handler_206); + register_interrupt_handler!(idt, 207, handler_207); + register_interrupt_handler!(idt, 208, handler_208); + register_interrupt_handler!(idt, 209, handler_209); + register_interrupt_handler!(idt, 210, handler_210); + register_interrupt_handler!(idt, 211, handler_211); + register_interrupt_handler!(idt, 212, handler_212); + register_interrupt_handler!(idt, 213, handler_213); + register_interrupt_handler!(idt, 214, handler_214); + register_interrupt_handler!(idt, 215, handler_215); + register_interrupt_handler!(idt, 216, handler_216); + register_interrupt_handler!(idt, 217, handler_217); + register_interrupt_handler!(idt, 218, handler_218); + register_interrupt_handler!(idt, 219, handler_219); + register_interrupt_handler!(idt, 220, handler_220); + register_interrupt_handler!(idt, 221, handler_221); + register_interrupt_handler!(idt, 222, handler_222); + register_interrupt_handler!(idt, 223, handler_223); + register_interrupt_handler!(idt, 224, handler_224); + register_interrupt_handler!(idt, 225, handler_225); + register_interrupt_handler!(idt, 226, handler_226); + register_interrupt_handler!(idt, 227, handler_227); + register_interrupt_handler!(idt, 228, handler_228); + register_interrupt_handler!(idt, 229, handler_229); + register_interrupt_handler!(idt, 230, handler_230); + register_interrupt_handler!(idt, 231, handler_231); + register_interrupt_handler!(idt, 232, handler_232); + register_interrupt_handler!(idt, 233, handler_233); + register_interrupt_handler!(idt, 234, handler_234); + register_interrupt_handler!(idt, 235, handler_235); + register_interrupt_handler!(idt, 236, handler_236); + register_interrupt_handler!(idt, 237, handler_237); + register_interrupt_handler!(idt, 238, handler_238); + register_interrupt_handler!(idt, 239, handler_239); + register_interrupt_handler!(idt, 240, handler_240); + register_interrupt_handler!(idt, 241, handler_241); + register_interrupt_handler!(idt, 242, handler_242); + register_interrupt_handler!(idt, 243, handler_243); + register_interrupt_handler!(idt, 244, handler_244); + register_interrupt_handler!(idt, 245, handler_245); + register_interrupt_handler!(idt, 246, handler_246); + register_interrupt_handler!(idt, 247, handler_247); + register_interrupt_handler!(idt, 248, handler_248); + register_interrupt_handler!(idt, 249, handler_249); + register_interrupt_handler!(idt, 250, handler_250); + register_interrupt_handler!(idt, 251, handler_251); + register_interrupt_handler!(idt, 252, handler_252); + register_interrupt_handler!(idt, 253, handler_253); + register_interrupt_handler!(idt, 254, handler_254); + register_interrupt_handler!(idt, 255, handler_255); +} + +create_fn!(handler_0, 0); +create_fn!(handler_1, 1); +create_fn!(handler_2, 2); +create_fn!(handler_3, 3); +create_fn!(handler_4, 4); +create_fn!(handler_5, 5); +create_fn!(handler_6, 6); +create_fn!(handler_7, 7); +create_fn_divergent_create_with_errorcode!(handler_8, 8); +create_fn!(handler_9, 9); +create_fn_create_with_errorcode!(handler_10, 10); +create_fn_create_with_errorcode!(handler_11, 11); +create_fn_create_with_errorcode!(handler_12, 12); +create_fn_create_with_errorcode!(handler_13, 13); +create_page_fault_fn!(handler_14, 14); +create_fn!(handler_15, 15); +create_fn!(handler_16, 16); +create_fn_create_with_errorcode!(handler_17, 17); +create_fn_divergent_create!(handler_18, 18); +create_fn!(handler_19, 19); +create_fn!(handler_20, 20); +create_fn_create_with_errorcode!(handler_21, 21); +create_fn!(handler_22, 22); +create_fn!(handler_23, 23); +create_fn!(handler_24, 24); +create_fn!(handler_25, 25); +create_fn!(handler_26, 26); +create_fn!(handler_27, 27); +create_fn!(handler_28, 28); +create_fn_create_with_errorcode!(handler_29, 29); +create_fn_create_with_errorcode!(handler_30, 30); +create_fn!(handler_31, 31); +create_fn!(handler_32, 32); +create_fn!(handler_33, 33); +create_fn!(handler_34, 34); +create_fn!(handler_35, 35); +create_fn!(handler_36, 36); +create_fn!(handler_37, 37); +create_fn!(handler_38, 38); +create_fn!(handler_39, 39); +create_fn!(handler_40, 40); +create_fn!(handler_41, 41); +create_fn!(handler_42, 42); +create_fn!(handler_43, 43); +create_fn!(handler_44, 44); +create_fn!(handler_45, 45); +create_fn!(handler_46, 46); +create_fn!(handler_47, 47); +create_fn!(handler_48, 48); +create_fn!(handler_49, 49); +create_fn!(handler_50, 50); +create_fn!(handler_51, 51); +create_fn!(handler_52, 52); +create_fn!(handler_53, 53); +create_fn!(handler_54, 54); +create_fn!(handler_55, 55); +create_fn!(handler_56, 56); +create_fn!(handler_57, 57); +create_fn!(handler_58, 58); +create_fn!(handler_59, 59); +create_fn!(handler_60, 60); +create_fn!(handler_61, 61); +create_fn!(handler_62, 62); +create_fn!(handler_63, 63); +create_fn!(handler_64, 64); +create_fn!(handler_65, 65); +create_fn!(handler_66, 66); +create_fn!(handler_67, 67); +create_fn!(handler_68, 68); +create_fn!(handler_69, 69); +create_fn!(handler_70, 70); +create_fn!(handler_71, 71); +create_fn!(handler_72, 72); +create_fn!(handler_73, 73); +create_fn!(handler_74, 74); +create_fn!(handler_75, 75); +create_fn!(handler_76, 76); +create_fn!(handler_77, 77); +create_fn!(handler_78, 78); +create_fn!(handler_79, 79); +create_fn!(handler_80, 80); +create_fn!(handler_81, 81); +create_fn!(handler_82, 82); +create_fn!(handler_83, 83); +create_fn!(handler_84, 84); +create_fn!(handler_85, 85); +create_fn!(handler_86, 86); +create_fn!(handler_87, 87); +create_fn!(handler_88, 88); +create_fn!(handler_89, 89); +create_fn!(handler_90, 90); +create_fn!(handler_91, 91); +create_fn!(handler_92, 92); +create_fn!(handler_93, 93); +create_fn!(handler_94, 94); +create_fn!(handler_95, 95); +create_fn!(handler_96, 96); +create_fn!(handler_97, 97); +create_fn!(handler_98, 98); +create_fn!(handler_99, 99); +create_fn!(handler_100, 100); +create_fn!(handler_101, 101); +create_fn!(handler_102, 102); +create_fn!(handler_103, 103); +create_fn!(handler_104, 104); +create_fn!(handler_105, 105); +create_fn!(handler_106, 106); +create_fn!(handler_107, 107); +create_fn!(handler_108, 108); +create_fn!(handler_109, 109); +create_fn!(handler_110, 110); +create_fn!(handler_111, 111); +create_fn!(handler_112, 112); +create_fn!(handler_113, 113); +create_fn!(handler_114, 114); +create_fn!(handler_115, 115); +create_fn!(handler_116, 116); +create_fn!(handler_117, 117); +create_fn!(handler_118, 118); +create_fn!(handler_119, 119); +create_fn!(handler_120, 120); +create_fn!(handler_121, 121); +create_fn!(handler_122, 122); +create_fn!(handler_123, 123); +create_fn!(handler_124, 124); +create_fn!(handler_125, 125); +create_fn!(handler_126, 126); +create_fn!(handler_127, 127); +create_fn!(handler_128, 128); +create_fn!(handler_129, 129); +create_fn!(handler_130, 130); +create_fn!(handler_131, 131); +create_fn!(handler_132, 132); +create_fn!(handler_133, 133); +create_fn!(handler_134, 134); +create_fn!(handler_135, 135); +create_fn!(handler_136, 136); +create_fn!(handler_137, 137); +create_fn!(handler_138, 138); +create_fn!(handler_139, 139); +create_fn!(handler_140, 140); +create_fn!(handler_141, 141); +create_fn!(handler_142, 142); +create_fn!(handler_143, 143); +create_fn!(handler_144, 144); +create_fn!(handler_145, 145); +create_fn!(handler_146, 146); +create_fn!(handler_147, 147); +create_fn!(handler_148, 148); +create_fn!(handler_149, 149); +create_fn!(handler_150, 150); +create_fn!(handler_151, 151); +create_fn!(handler_152, 152); +create_fn!(handler_153, 153); +create_fn!(handler_154, 154); +create_fn!(handler_155, 155); +create_fn!(handler_156, 156); +create_fn!(handler_157, 157); +create_fn!(handler_158, 158); +create_fn!(handler_159, 159); +create_fn!(handler_160, 160); +create_fn!(handler_161, 161); +create_fn!(handler_162, 162); +create_fn!(handler_163, 163); +create_fn!(handler_164, 164); +create_fn!(handler_165, 165); +create_fn!(handler_166, 166); +create_fn!(handler_167, 167); +create_fn!(handler_168, 168); +create_fn!(handler_169, 169); +create_fn!(handler_170, 170); +create_fn!(handler_171, 171); +create_fn!(handler_172, 172); +create_fn!(handler_173, 173); +create_fn!(handler_174, 174); +create_fn!(handler_175, 175); +create_fn!(handler_176, 176); +create_fn!(handler_177, 177); +create_fn!(handler_178, 178); +create_fn!(handler_179, 179); +create_fn!(handler_180, 180); +create_fn!(handler_181, 181); +create_fn!(handler_182, 182); +create_fn!(handler_183, 183); +create_fn!(handler_184, 184); +create_fn!(handler_185, 185); +create_fn!(handler_186, 186); +create_fn!(handler_187, 187); +create_fn!(handler_188, 188); +create_fn!(handler_189, 189); +create_fn!(handler_190, 190); +create_fn!(handler_191, 191); +create_fn!(handler_192, 192); +create_fn!(handler_193, 193); +create_fn!(handler_194, 194); +create_fn!(handler_195, 195); +create_fn!(handler_196, 196); +create_fn!(handler_197, 197); +create_fn!(handler_198, 198); +create_fn!(handler_199, 199); +create_fn!(handler_200, 200); +create_fn!(handler_201, 201); +create_fn!(handler_202, 202); +create_fn!(handler_203, 203); +create_fn!(handler_204, 204); +create_fn!(handler_205, 205); +create_fn!(handler_206, 206); +create_fn!(handler_207, 207); +create_fn!(handler_208, 208); +create_fn!(handler_209, 209); +create_fn!(handler_210, 210); +create_fn!(handler_211, 211); +create_fn!(handler_212, 212); +create_fn!(handler_213, 213); +create_fn!(handler_214, 214); +create_fn!(handler_215, 215); +create_fn!(handler_216, 216); +create_fn!(handler_217, 217); +create_fn!(handler_218, 218); +create_fn!(handler_219, 219); +create_fn!(handler_220, 220); +create_fn!(handler_221, 221); +create_fn!(handler_222, 222); +create_fn!(handler_223, 223); +create_fn!(handler_224, 224); +create_fn!(handler_225, 225); +create_fn!(handler_226, 226); +create_fn!(handler_227, 227); +create_fn!(handler_228, 228); +create_fn!(handler_229, 229); +create_fn!(handler_230, 230); +create_fn!(handler_231, 231); +create_fn!(handler_232, 232); +create_fn!(handler_233, 233); +create_fn!(handler_234, 234); +create_fn!(handler_235, 235); +create_fn!(handler_236, 236); +create_fn!(handler_237, 237); +create_fn!(handler_238, 238); +create_fn!(handler_239, 239); +create_fn!(handler_240, 240); +create_fn!(handler_241, 241); +create_fn!(handler_242, 242); +create_fn!(handler_243, 243); +create_fn!(handler_244, 244); +create_fn!(handler_245, 245); +create_fn!(handler_246, 246); +create_fn!(handler_247, 247); +create_fn!(handler_248, 248); +create_fn!(handler_249, 249); +create_fn!(handler_250, 250); +create_fn!(handler_251, 251); +create_fn!(handler_252, 252); +create_fn!(handler_253, 253); +create_fn!(handler_254, 254); +create_fn!(handler_255, 255); diff --git a/opentmk/src/arch/x86_64/io.rs b/opentmk/src/arch/x86_64/io.rs new file mode 100644 index 0000000000..aec59cad9e --- /dev/null +++ b/opentmk/src/arch/x86_64/io.rs @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use core::arch::asm; + +/// Write a byte to a port. +pub fn outb(port: u16, data: u8) { + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "out dx, al", + in("dx") port, + in("al") data, + } + } +} + +/// Read a byte from a port. +pub fn inb(port: u16) -> u8 { + let mut data; + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "in al, dx", + in("dx") port, + out("al") data, + } + } + data +} + +/// Read a double word from a port. +pub fn inl(port: u16) -> u32 { + let mut data; + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "in eax, dx", + in("dx") port, + out("eax") data, + } + } + data +} + +/// Write a double word to a port. +/// This is a no-op on x86. +pub fn outl(port: u16, data: u32) { + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "out dx, eax", + in("dx") port, + in("eax") data, + } + } +} diff --git a/opentmk/src/arch/x86_64/mod.rs b/opentmk/src/arch/x86_64/mod.rs new file mode 100644 index 0000000000..65d549eec9 --- /dev/null +++ b/opentmk/src/arch/x86_64/mod.rs @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +pub mod hypercall; +#[cfg(nightly)] +pub mod interrupt; +#[cfg(nightly)] +mod interrupt_handler_register; +mod io; +pub mod rtc; +pub mod serial; +pub mod tpm; diff --git a/opentmk/src/arch/x86_64/rtc.rs b/opentmk/src/arch/x86_64/rtc.rs new file mode 100644 index 0000000000..294e1b9b87 --- /dev/null +++ b/opentmk/src/arch/x86_64/rtc.rs @@ -0,0 +1,167 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! x86_64-specific implementation for reading from the RTC (Real-Time Clock) and CMOS. + +use super::io::inb; +use super::io::outb; +// CMOS/RTC I/O ports +const CMOS_ADDRESS: u16 = 0x70; +const CMOS_DATA: u16 = 0x71; + +// RTC register addresses +const RTC_SECONDS: u8 = 0x00; +const RTC_MINUTES: u8 = 0x02; +const RTC_HOURS: u8 = 0x04; +const RTC_DAY: u8 = 0x07; +const RTC_MONTH: u8 = 0x08; +const RTC_YEAR: u8 = 0x09; +const RTC_STATUS_A: u8 = 0x0A; +const RTC_STATUS_B: u8 = 0x0B; + +#[repr(C)] +#[derive(Debug, Clone, Copy)] +/// Represents date and time read from the RTC. +pub struct DateTime { + seconds: u8, + minutes: u8, + hours: u8, + day: u8, + month: u8, + year: u8, +} + +// implement display as ISO 8601 format +impl core::fmt::Display for DateTime { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}Z", + 2000 + self.year as u64, + self.month, + self.day, + self.hours, + self.minutes, + self.seconds + ) + } +} + +/// convert datetime to Unix epoch +impl DateTime { + /// Converts the DateTime to seconds since the Unix epoch (1970-01-01T00:00:00Z). + pub fn unix_epoch_sec(&self) -> u64 { + // Check if a year is a leap year + let is_leap_year = |year: u64| -> bool { + (year.is_multiple_of(4) && !year.is_multiple_of(100)) || year.is_multiple_of(400) + }; + + // Define days in each month (0-indexed array) + let days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; + + // Calculate days since Unix epoch (1970-01-01) + let year = 2000 + self.year as u64; + let month = self.month as u64; + let day = self.day as u64; + + // Days from years + let mut days = 0u64; + for y in 1970..year { + days += 365 + if is_leap_year(y) { 1 } else { 0 }; + } + + // Add days from months in current year + for m in 1..month { + days += days_in_month[m as usize - 1] as u64; + // Add leap day if February and leap year + if m == 2 && is_leap_year(year) { + days += 1; + } + } + + // Add days of current month + days += day - 1; // -1 because we want elapsed days + let hours = self.hours as u64; + let minutes = self.minutes as u64; + let seconds = self.seconds as u64; + + (days * 24 + hours) * 3600 + (minutes * 60) + seconds + } +} + +// Read from CMOS/RTC register +fn read_cmos(reg: u8) -> u8 { + outb(CMOS_ADDRESS, reg); + inb(CMOS_DATA) +} + +// Check if RTC update is in progress +fn rtc_update_in_progress() -> bool { + read_cmos(RTC_STATUS_A) & 0x80 != 0 +} + +// Convert BCD to binary if needed +fn bcd_to_binary(bcd: u8) -> u8 { + (bcd & 0x0F) + ((bcd >> 4) * 10) +} + +/// Read current date and time from RTC +pub fn read_rtc() -> DateTime { + // Wait for any update to complete + while rtc_update_in_progress() {} + + let mut datetime = DateTime { + seconds: read_cmos(RTC_SECONDS), + minutes: read_cmos(RTC_MINUTES), + hours: read_cmos(RTC_HOURS), + day: read_cmos(RTC_DAY), + month: read_cmos(RTC_MONTH), + year: read_cmos(RTC_YEAR), + }; + + // Check if we need to wait for another update cycle + while rtc_update_in_progress() {} + + // Read again to ensure consistency + let seconds_check = read_cmos(RTC_SECONDS); + if seconds_check != datetime.seconds { + datetime.seconds = seconds_check; + datetime.minutes = read_cmos(RTC_MINUTES); + datetime.hours = read_cmos(RTC_HOURS); + datetime.day = read_cmos(RTC_DAY); + datetime.month = read_cmos(RTC_MONTH); + datetime.year = read_cmos(RTC_YEAR); + } + + // Check RTC format (BCD vs binary) + let status_b = read_cmos(RTC_STATUS_B); + let is_bcd = (status_b & 0x04) == 0; + + if is_bcd { + datetime.seconds = bcd_to_binary(datetime.seconds); + datetime.minutes = bcd_to_binary(datetime.minutes); + datetime.hours = bcd_to_binary(datetime.hours); + datetime.day = bcd_to_binary(datetime.day); + datetime.month = bcd_to_binary(datetime.month); + datetime.year = bcd_to_binary(datetime.year); + } + + // Handle 12-hour format if needed + if (status_b & 0x02) == 0 && (datetime.hours & 0x80) != 0 { + datetime.hours = ((datetime.hours & 0x7F) + 12) % 24; + } + + datetime +} + +/// Busy-wait delay for specified seconds using RTC +pub fn delay_sec(seconds: u64) { + let start = read_rtc().unix_epoch_sec(); + let end = start + seconds; + loop { + let current = read_rtc().unix_epoch_sec(); + if current >= end { + break; + } + } +} diff --git a/opentmk/src/arch/x86_64/serial.rs b/opentmk/src/arch/x86_64/serial.rs new file mode 100644 index 0000000000..1def2797ab --- /dev/null +++ b/opentmk/src/arch/x86_64/serial.rs @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Serial output for debugging. + +use core::fmt; + +use spin::Mutex; + +use super::io; + +/// Serial port addresses. +/// These are the standard COM ports used in x86 systems. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum SerialPort { + /// COM1 serial port + COM1, + /// COM2 serial port + COM2, + /// COM3 serial port + COM3, + /// COM4 serial port + COM4, +} + +impl SerialPort { + /// Convert the SerialPort enum to its u16 representation. + pub fn value(self) -> u16 { + match self { + SerialPort::COM1 => 0x3F8, + SerialPort::COM2 => 0x2F8, + SerialPort::COM3 => 0x3E8, + SerialPort::COM4 => 0x2E8, + } + } +} + +/// A trait to access io ports used by the serial device. +pub trait IoAccess { + /// Issue an in byte instruction. + /// + /// # Safety + /// + /// The caller must be sure that the given port is safe to read from. + unsafe fn inb(&self, port: u16) -> u8; + /// Issue an out byte instruction. + /// + /// # Safety + /// + /// The caller must be sure that the given port is safe to write to, and that the + /// given value is safe for it. + unsafe fn outb(&self, port: u16, data: u8); +} + +/// A struct to access io ports using in/out instructions. +pub struct InstrIoAccess; + +impl IoAccess for InstrIoAccess { + unsafe fn inb(&self, port: u16) -> u8 { + io::inb(port) + } + + unsafe fn outb(&self, port: u16, data: u8) { + io::outb(port, data) + } +} + +/// A writer for the UART COM Ports. +pub struct Serial { + io: T, + serial_port: SerialPort, + mutex: Mutex<()>, +} + +impl Serial { + /// Initialize the serial port. + pub const fn new(serial_port: SerialPort, io: T) -> Self { + Self { + io, + serial_port, + mutex: Mutex::new(()), + } + } + + /// Initialize the serial port. + pub fn init(&self) { + // SAFETY: Initializing the serial port is safe. + unsafe { + self.io.outb(self.serial_port.value() + 1, 0x00); // Disable all interrupts + self.io.outb(self.serial_port.value() + 2, 0xC7); // Enable FIFO, clear them, with 14-byte threshold + self.io.outb(self.serial_port.value() + 4, 0x0F); + } + } + + fn write_byte(&self, b: u8) { + // SAFETY: Reading and writing text to the serial device is safe. + unsafe { + while self.io.inb(self.serial_port.value() + 5) & 0x20 == 0 {} + self.io.outb(self.serial_port.value(), b); + } + } +} + +impl fmt::Write for Serial { + fn write_str(&mut self, s: &str) -> fmt::Result { + let _guard = self.mutex.lock(); + for &b in s.as_bytes() { + if b == b'\n' { + self.write_byte(b'\r'); + } + self.write_byte(b); + } + Ok(()) + } +} diff --git a/opentmk/src/arch/x86_64/tpm.rs b/opentmk/src/arch/x86_64/tpm.rs new file mode 100644 index 0000000000..022440e54d --- /dev/null +++ b/opentmk/src/arch/x86_64/tpm.rs @@ -0,0 +1,118 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! x86_64-specific implementation of TPM device access. + +#![expect(dead_code)] + +use crate::devices::tpm::TpmDevice; + +const TPM_DEVICE_MMIO_REGION_BASE_ADDRESS: u64 = 0xfed40000; +const TPM_DEVICE_MMIO_REGION_SIZE: u64 = 0x70; + +const TPM_DEVICE_IO_PORT_RANGE_BEGIN: u16 = 0x1040; +const TPM_DEVICE_IO_PORT_RANGE_END: u16 = 0x1048; + +const TPM_DEVICE_IO_PORT_CONTROL_OFFSET: u16 = 0; +const TPM_DEVICE_IO_PORT_DATA_OFFSET: u16 = 4; + +const TPM_DEVICE_MMIO_PORT_REGION_BASE_ADDRESS: u64 = TPM_DEVICE_MMIO_REGION_BASE_ADDRESS + 0x80; +const TPM_DEVICE_MMIO_PORT_CONTROL: u64 = + TPM_DEVICE_MMIO_PORT_REGION_BASE_ADDRESS + TPM_DEVICE_IO_PORT_CONTROL_OFFSET as u64; +const TPM_DEVICE_MMIO_PORT_DATA: u64 = + TPM_DEVICE_MMIO_PORT_REGION_BASE_ADDRESS + TPM_DEVICE_IO_PORT_DATA_OFFSET as u64; +const TPM_DEVICE_MMIO_PORT_REGION_SIZE: u64 = 0x8; + +/// Represents a TPM device accessible via MMIO and IO ports. +/// This struct provides methods to interact with the TPM device +/// using the TpmDevice trait. +pub struct Tpm<'a> { + command_buffer: Option<&'a mut [u8]>, + response_buffer: Option<&'a mut [u8]>, +} + +impl<'a> Tpm<'a> { + /// Creates a new TpmDevice instance. + pub fn new() -> Self { + Tpm { + command_buffer: None, + response_buffer: None, + } + } + + fn get_control_port(command: u32) -> u32 { + let control_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_CONTROL_OFFSET; + let data_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_DATA_OFFSET; + super::io::outl(control_port, command); + super::io::inl(data_port) + } +} + +impl<'a> TpmDevice<'a> for Tpm<'a> { + fn set_command_buffer(&mut self, buffer: &'a mut [u8]) { + self.command_buffer = Some(buffer); + } + + fn set_response_buffer(&mut self, buffer: &'a mut [u8]) { + self.response_buffer = Some(buffer); + } + + fn get_tcg_protocol_version() -> u32 { + Tpm::get_control_port(64) + } + + fn get_mapped_shared_memory() -> u32 { + let data_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_DATA_OFFSET; + Tpm::get_control_port(0x2); + super::io::inl(data_port) + } + + fn map_shared_memory(gpa: u32) -> u32 { + let control_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_CONTROL_OFFSET; + let data_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_DATA_OFFSET; + super::io::outl(control_port, 0x1); + super::io::outl(data_port, gpa); + super::io::outl(control_port, 0x2); + super::io::inl(data_port) + } + + fn submit_command(&mut self, buffer: &[u8]) -> [u8; 4096] { + assert!(buffer.len() <= 4096); + self.copy_to_command_buffer(buffer); + + Self::execute_command_no_check(); + + let mut response = [0; 4096]; + self.copy_from_response_buffer(&mut response); + response + } + + #[expect( + clippy::while_immutable_condition, + reason = "tpm device updates status of MMIO read" + )] + fn execute_command_no_check() { + let command_exec_mmio_addr = TPM_DEVICE_MMIO_REGION_BASE_ADDRESS + 0x4c; + let command_exec_mmio_ptr = command_exec_mmio_addr as *mut u32; + + // SAFETY: we are writing to a valid memory-mapped IO register. + unsafe { + *command_exec_mmio_ptr = 0x1; + } + + // SAFETY: we are reading from a valid memory-mapped IO register. + while unsafe { *command_exec_mmio_ptr } == 0x1 { + core::hint::spin_loop(); + } + } + + fn copy_to_command_buffer(&mut self, buffer: &[u8]) { + assert!(buffer.len() <= 4096); + self.command_buffer.as_mut().unwrap()[..buffer.len()].copy_from_slice(buffer); + } + + fn copy_from_response_buffer(&self, buffer: &mut [u8]) { + assert!(buffer.len() <= 4096); + buffer.copy_from_slice(self.response_buffer.as_ref().unwrap()); + } +} diff --git a/opentmk/src/context.rs b/opentmk/src/context.rs new file mode 100644 index 0000000000..58ae8a2e43 --- /dev/null +++ b/opentmk/src/context.rs @@ -0,0 +1,165 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Platform abstraction traits for OpenTMK. +//! This module defines traits that abstract platform-specific functionalities such as +//! interrupt handling, MSR access, virtual processor management, and VTL management. +//! These traits allow OpenTMK to support multiple platforms by providing +//! platform-specific implementations. +//! + +use alloc::boxed::Box; +use core::ops::Range; + +use hvdef::Vtl; + +use crate::tmkdefs::TmkResult; + +#[cfg(nightly)] +/// Trait for platforms that support secure-world intercepts. +pub trait SecureInterceptPlatformTrait { + /// Installs a secure-world intercept for the given interrupt. + /// + /// The platform must arrange that the supplied `interrupt_idx` + /// triggers a VM-exit or any other mechanism that transfers control + /// to the TMK secure handler. + /// + /// Returns `Ok(())` on success or an error wrapped in `TmkResult`. + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()>; +} + +#[cfg(nightly)] +/// Trait for platforms that support Interrupts. +pub trait InterruptPlatformTrait { + /// Associates an interrupt vector with a handler inside the + /// non-secure world. + /// + /// * `interrupt_idx` – IDT/GIC index to program + /// * `handler` – Function that will be executed when the interrupt + /// fires. + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()>; + + /// Finalises platform specific interrupt setup (enables the table, + /// unmasks lines, etc.). + fn setup_interrupt_handler(&mut self) -> TmkResult<()>; +} + +/// Trait for platforms that support reading and writing to Model Specific Registers (MSRs). +pub trait MsrPlatformTrait { + /// Reads the content of `msr`. + /// + /// Returns the 64-bit value currently stored in that MSR. + /// # Safety + /// Caller must ensure that reading the specified MSR is a safe operation. + unsafe fn read_msr(&mut self, msr: u32) -> TmkResult; + + /// Writes `value` into `msr`. + /// # Safety + /// Caller must ensure that writing to the specified MSR is a safe operation. + unsafe fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()>; +} + +/// Trait for platforms that support Virtual Processors (VPs) and VTL management. +pub trait VirtualProcessorPlatformTrait +where + T: VtlPlatformTrait, +{ + /// Returns the index of the virtual CPU currently executing this + /// code. + fn get_current_vp(&self) -> TmkResult; + + /// Sets the architecture specific register identified by `reg`. + fn set_register(&mut self, reg: u32, value: u128) -> TmkResult<()>; + + /// Reads the architecture specific register identified by `reg`. + fn get_register(&mut self, reg: u32) -> TmkResult; + + /// Sets the architecture specific register identified by `reg` for VTL + fn set_register_vtl(&mut self, reg: u32, value: u128, vtl: Vtl) -> TmkResult<()>; + + /// Reads the architecture specific register identified by `reg` for a VTL + fn get_register_vtl(&mut self, reg: u32, vtl: Vtl) -> TmkResult; + + /// Total number of online VPs in the partition. + fn get_vp_count(&self) -> TmkResult; + + /// Queues `cmd` to run later on the VP described inside the + /// `VpExecToken`. + fn queue_command_vp(&mut self, cmd: VpExecToken) -> TmkResult<()>; + + /// Synchronously executes `cmd` on its target VP. + fn start_on_vp(&mut self, cmd: VpExecToken) -> TmkResult<()>; + + /// Starts the target VP (if required) and executes `cmd` with a + /// platform provided default VTL context. + fn start_running_vp_with_default_context(&mut self, cmd: VpExecToken) -> TmkResult<()>; +} + +/// Trait for platforms that support Virtual Trust Levels (VTLs). +pub trait VtlPlatformTrait { + /// Applies VTL protection to the supplied physical address range. + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()>; + + /// Enables the given `vtl` on `vp_index` with a default context. + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + + /// Returns the VTL level the caller is currently executing in. + fn get_current_vtl(&self) -> TmkResult; + + /// Performs partition wide initialisation for a given `vtl`. + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()>; + + /// Platform specific global VTL preparation (stage 2 translation, + /// EPT, etc.). + fn setup_vtl_protection(&mut self) -> TmkResult<()>; + + /// Switches the current hardware thread to the higher privileged VTL. + fn switch_to_high_vtl(&mut self); + + /// Switches the current hardware thread back to the lower privileged VTL. + fn switch_to_low_vtl(&mut self); + + /// Sets the state of a register on a VP in a specific VTL. + fn set_vp_register_with_vtl( + &mut self, + register_index: u32, + value: u64, + vtl: Vtl, + ) -> TmkResult<()>; + + /// Gets the state of a register on a VP in a specific VTL. + fn get_vp_register_with_vtl(&mut self, register_index: u32, vtl: Vtl) -> TmkResult; +} + +/// A token that describes a command to be executed on a specific VP and VTL. +pub struct VpExecToken { + vp_index: u32, + vtl: Vtl, + cmd: Option>, +} + +impl VpExecToken { + /// Creates a new executor targeting `vp_index` running in `vtl`. + pub fn new(vp_index: u32, vtl: Vtl) -> Self { + VpExecToken { + vp_index, + vtl, + cmd: None, + } + } + + /// Stores a closure `cmd` that will be executed on the target VP. + /// + /// The closure receives a mutable reference to the platform-specific + /// type `T` that implements `VtlPlatformTrait`. + pub fn command(mut self, cmd: impl FnOnce(&mut T) + 'static + Send) -> Self { + self.cmd = Some(Box::new(cmd)); + self + } + + /// Extracts the tuple `(vp_index, vtl, cmd)` consuming `self`. + pub fn get(mut self) -> (u32, Vtl, Option>) { + let cmd = self.cmd.take(); + (self.vp_index, self.vtl, cmd) + } +} diff --git a/opentmk/src/devices/mod.rs b/opentmk/src/devices/mod.rs new file mode 100644 index 0000000000..8ddd037ac3 --- /dev/null +++ b/opentmk/src/devices/mod.rs @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Device modules for OpenTMK. +//! This module includes implementations for various virtual devices used in OpenTMK. +pub mod tpm; diff --git a/opentmk/src/devices/tpm/mod.rs b/opentmk/src/devices/tpm/mod.rs new file mode 100644 index 0000000000..5976aaf613 --- /dev/null +++ b/opentmk/src/devices/tpm/mod.rs @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! TPM device interface and utility functions. + +use zerocopy::IntoBytes; + +pub mod tpm_protocol; +use tpm_protocol::SessionTagEnum; +use tpm_protocol::TpmCommandError; +use tpm_protocol::protocol::SelfTestCmd; +use tpm_protocol::protocol::TpmCommand; + +/// Trait representing a TPM device. +pub trait TpmDevice<'a> { + /// Sets the command buffer. + fn set_command_buffer(&mut self, buffer: &'a mut [u8]); + /// Sets the response buffer. + fn set_response_buffer(&mut self, buffer: &'a mut [u8]); + /// Copies data from the response buffer to the provided buffer. + fn copy_from_response_buffer(&self, buffer: &mut [u8]); + /// Copies data to the command buffer from the provided buffer. + fn copy_to_command_buffer(&mut self, buffer: &[u8]); + /// Gets the TPM protocol version. + fn get_tcg_protocol_version() -> u32 + where + Self: Sized; + /// Gets the mapped shared memory address. + fn get_mapped_shared_memory() -> u32 + where + Self: Sized; + /// Maps the shared memory to the given GPA. + fn map_shared_memory(gpa: u32) -> u32 + where + Self: Sized; + /// Submits a command to the TPM and returns the response. + fn submit_command(&mut self, buffer: &[u8]) -> [u8; 4096]; + /// Executes the command without checking the command buffer. + /// Used for finer control. + fn execute_command_no_check() + where + Self: Sized; +} + +/// Utility functions for TPM operations. +pub struct TpmUtil; +impl TpmUtil { + /// Returns a TPM self-test command buffer. + pub fn get_self_test_cmd() -> [u8; 4096] { + let session_tag = SessionTagEnum::NoSessions; + let cmd = SelfTestCmd::new(session_tag.into(), true); + let mut buffer = [0; 4096]; + buffer[..cmd.as_bytes().len()].copy_from_slice(cmd.as_bytes()); + buffer + } + + /// Executes a TPM self-test using the provided TPM device. + pub fn exec_self_test(tpm_device: &mut dyn TpmDevice<'_>) -> Result<(), TpmCommandError> { + let session_tag = SessionTagEnum::NoSessions; + let cmd = SelfTestCmd::new(session_tag.into(), true); + let response = tpm_device.submit_command(cmd.as_bytes()); + + match SelfTestCmd::base_validate_reply(&response, session_tag) { + Err(error) => Err(TpmCommandError::InvalidResponse(error)), + Ok((res, false)) => Err(TpmCommandError::TpmCommandFailed { + response_code: res.header.response_code.get(), + })?, + Ok((_res, true)) => Ok(()), + } + } +} diff --git a/opentmk/src/devices/tpm/tpm_protocol.rs b/opentmk/src/devices/tpm/tpm_protocol.rs new file mode 100644 index 0000000000..591dd32343 --- /dev/null +++ b/opentmk/src/devices/tpm/tpm_protocol.rs @@ -0,0 +1,4062 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! TPM 2.0 Protocol types, as defined in the spec + +//! NOTE: once the `tpm-rs` project matures, this hand-rolled code should be *deleted* and +//! replaced with types from that `tpm-rs` project. + +#![expect(missing_docs)] +use alloc::vec::Vec; + +use bitfield_struct::bitfield; +use thiserror::Error; +use zerocopy::FromBytes; +use zerocopy::FromZeros; +use zerocopy::Immutable; +use zerocopy::IntoBytes; +use zerocopy::KnownLayout; + +use self::packed_nums::*; + +#[expect(non_camel_case_types)] +mod packed_nums { + pub type u16_be = zerocopy::U16; + pub type u32_be = zerocopy::U32; + pub type u64_be = zerocopy::U64; +} + +#[derive(Debug, Error)] +pub enum InvalidInput { + #[error("input data size too large for buffer - input size > upper bound: {0} > {1}")] + BufferSizeTooLarge(usize, usize), + #[error("input list length too long - input length > upper bound: {0} > {1}")] + PcrSelectionsLengthTooLong(usize, usize), + #[error("input payload size too large - input size > upper bound: {0} > {1}")] + NvPublicPayloadTooLarge(usize, usize), +} + +#[derive(Debug, Error)] +pub enum TpmProtoError { + #[error("input user_auth to TpmsSensitiveCreate is invalid")] + TpmsSensitiveCreateUserAuth(#[source] InvalidInput), + #[error("input data to TpmsSensitiveCreate is invalid")] + TpmsSensitiveCreateData(#[source] InvalidInput), + #[error("input auth_policy to TpmtPublic is invalid")] + TpmtPublicAuthPolicy(#[source] InvalidInput), + #[error("input unique to TpmtPublic is invalid")] + TpmtPublicUnique(#[source] InvalidInput), + #[error("input auth_policy to TpmsNvPublic is invalid")] + TpmsNvPublicAuthPolicy(#[source] InvalidInput), + #[error("input outside_info to CreatePrimary is invalid")] + CreatePrimaryOutsideInfo(#[source] InvalidInput), + #[error("input creation_pcr to CreatePrimary is invalid")] + CreatePrimaryCreationPcr(#[source] InvalidInput), + #[error("input auth to NvDefineSpace is invalid")] + NvDefineSpaceAuth(#[source] InvalidInput), + #[error("input public_info to NvDefineSpace is invalid")] + NvDefineSpacePublicInfo(#[source] InvalidInput), + #[error("input data to NvWrite is invalid")] + NvWriteData(#[source] InvalidInput), + #[error("input pcr_allocation to PcrAllocate is invalid")] + PcrAllocatePcrAllocation(#[source] InvalidInput), + #[error("input data to Import is invalid")] + ImportData(#[source] InvalidInput), +} + +#[derive(Debug, Error)] +pub enum ResponseValidationError { + #[error("response size is too small to fit into the buffer")] + ResponseSizeTooSmall, + #[error( + "size {size} specified in the response header does not meet the minimal size of command type {expected_size}, command succeeded: {command_succeeded}" + )] + HeaderResponseSizeMismatch { + size: u32, + expected_size: usize, + command_succeeded: bool, + }, + #[error( + "unexpected session tag {response_session_tag} specified in the response header, expected: {expected_session_tag}, command succeeded: {command_succeeded}" + )] + HeaderSessionTagMismatch { + response_session_tag: u16, + expected_session_tag: u16, + command_succeeded: bool, + }, +} + +#[repr(transparent)] +#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct ReservedHandle(pub u32_be); + +impl PartialEq for u32 { + fn eq(&self, other: &ReservedHandle) -> bool { + other.0.get() == *self + } +} + +impl ReservedHandle { + pub const fn new(kind: u8, offset: u32) -> ReservedHandle { + ReservedHandle(new_u32_be((kind as u32) << 24 | offset)) + } +} + +pub const TPM20_HT_NV_INDEX: u8 = 0x01; +pub const TPM20_HT_PERMANENT: u8 = 0x40; +pub const TPM20_HT_PERSISTENT: u8 = 0x81; + +pub const TPM20_RH_OWNER: ReservedHandle = ReservedHandle::new(TPM20_HT_PERMANENT, 0x01); +pub const TPM20_RH_PLATFORM: ReservedHandle = ReservedHandle::new(TPM20_HT_PERMANENT, 0x0c); +pub const TPM20_RH_ENDORSEMENT: ReservedHandle = ReservedHandle::new(TPM20_HT_PERMANENT, 0x0b); +// `TPM_RS_PW` (not `TPM_RH_PW`) +// See Table 28, Section 7.4, "Trusted Platform Module Library Part 2: Structures", revision 1.38. +pub const TPM20_RS_PW: ReservedHandle = ReservedHandle::new(TPM20_HT_PERMANENT, 0x09); + +// Based on Section 2.2, "Registry of Reserved TPM 2.0 Handles and Localities", version 1.1. +pub const NV_INDEX_RANGE_BASE_PLATFORM_MANUFACTURER: u32 = + (TPM20_HT_NV_INDEX as u32) << 24 | 0x400000; +pub const NV_INDEX_RANGE_BASE_TCG_ASSIGNED: u32 = (TPM20_HT_NV_INDEX as u32) << 24 | 0xc00000; + +// The suggested minimal size for the buffer in `TPM2B_MAX_BUFFER`. +// See Table 79, Section 10.4.8, "Trusted Platform Module Library Part 2: Structures", revision 1.38. +pub const MAX_DIGEST_BUFFER_SIZE: usize = 1024; + +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)] +pub struct SessionTag(pub u16_be); + +impl PartialEq for u16 { + fn eq(&self, other: &SessionTag) -> bool { + other.0.get() == *self + } +} + +impl SessionTag { + const fn new(val: u16) -> SessionTag { + SessionTag(new_u16_be(val)) + } +} + +#[derive(Error, Debug)] +pub enum TpmCommandError { + #[error("failed to execute the TPM command")] + TpmExecuteCommand, + #[error("invalid response from the TPM command")] + InvalidResponse(#[source] ResponseValidationError), + #[error("invalid input parameter for the TPM command")] + InvalidInputParameter(#[source] TpmProtoError), + #[error("TPM command failed, response code: {response_code:#x}")] + TpmCommandFailed { response_code: u32 }, + #[error("failed to create the TPM command struct")] + TpmCommandCreationFailed(#[source] TpmProtoError), +} + +#[derive(Debug, Copy, Clone)] +#[repr(u16)] +pub enum SessionTagEnum { + // No structure type specified + Null = 0x8000, + + // A command/response for a command defined in this specification. The + // command/response has no attached sessions. If a command has an + // error and the command tag value is either TPM_ST_NO_SESSIONS or + // TPM_ST_SESSIONS, then this tag value is used for the response code. + NoSessions = 0x8001, + + // A command/response for a command defined in this specification. The + // command/response has one or more attached sessions and the sessionOffset + // field is present. + Sessions = 0x8002, + AttestClock = 0x8014, + AttestCommandAudit = 0x8015, + AttestSessionAudit = 0x8016, + AttestCertify = 0x8017, + AttestQuote = 0x8018, + AttestTick = 0x8019, + AttestTickstamp = 0x801A, + AttestTransport = 0x801B, + AttestCreation = 0x801C, + AttestNv = 0x801D, + // Tickets + Creation = 0x8021, + Verified = 0x8022, + Auth = 0x8023, + Hashcheck = 0x8024, + + // Structure describing a Field Upgrade Policy + FuManifest = 0x8029, +} + +impl From for SessionTag { + fn from(x: SessionTagEnum) -> Self { + SessionTag::new(x as u16) + } +} + +impl SessionTagEnum { + pub fn from_u16(val: u16) -> Option { + let ret = match val { + 0x8000 => Self::Null, + 0x8001 => Self::NoSessions, + 0x8002 => Self::Sessions, + 0x8014 => Self::AttestClock, + 0x8015 => Self::AttestCommandAudit, + 0x8016 => Self::AttestSessionAudit, + 0x8017 => Self::AttestCertify, + 0x8018 => Self::AttestQuote, + 0x8019 => Self::AttestTick, + 0x801A => Self::AttestTickstamp, + 0x801B => Self::AttestTransport, + 0x801C => Self::AttestCreation, + 0x801D => Self::AttestNv, + 0x8021 => Self::Creation, + 0x8022 => Self::Verified, + 0x8023 => Self::Auth, + 0x8024 => Self::Hashcheck, + 0x8029 => Self::FuManifest, + _ => return None, + }; + Some(ret) + } +} + +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct CommandCode(pub u32_be); + +impl PartialEq for u32 { + fn eq(&self, other: &CommandCode) -> bool { + other.0.get() == *self + } +} + +impl CommandCode { + const fn new(val: u32) -> CommandCode { + CommandCode(new_u32_be(val)) + } + + pub fn into_enum(self) -> Option { + CommandCodeEnum::from_u32(self.0.get()) + } +} + +#[expect(non_camel_case_types, clippy::upper_case_acronyms)] +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(u32)] +pub enum CommandCodeEnum { + NV_UndefineSpaceSpecial = 0x0000011f, + EvictControl = 0x00000120, + HierarchyControl = 0x00000121, + NV_UndefineSpace = 0x00000122, + ChangeEPS = 0x00000124, + ChangePPS = 0x00000125, + Clear = 0x00000126, + ClearControl = 0x00000127, + ClockSet = 0x00000128, + HierarchyChangeAuth = 0x00000129, + NV_DefineSpace = 0x0000012a, + PCR_Allocate = 0x0000012b, + PCR_SetAuthPolicy = 0x0000012c, + PP_Commands = 0x0000012d, + SetPrimaryPolicy = 0x0000012e, + FieldUpgradeStart = 0x0000012f, + ClockRateAdjust = 0x00000130, + CreatePrimary = 0x00000131, + NV_GlobalWriteLock = 0x00000132, + GetCommandAuditDigest = 0x00000133, + NV_Increment = 0x00000134, + NV_SetBits = 0x00000135, + NV_Extend = 0x00000136, + NV_Write = 0x00000137, + NV_WriteLock = 0x00000138, + DictionaryAttackLockReset = 0x00000139, + DictionaryAttackParameters = 0x0000013a, + NV_ChangeAuth = 0x0000013b, + PCR_Event = 0x0000013c, + PCR_Reset = 0x0000013d, + SequenceComplete = 0x0000013e, + SetAlgorithmSet = 0x0000013f, + SetCommandCodeAuditStatus = 0x00000140, + FieldUpgradeData = 0x00000141, + IncrementalSelfTest = 0x00000142, + SelfTest = 0x00000143, + Startup = 0x00000144, + Shutdown = 0x00000145, + StirRandom = 0x00000146, + ActivateCredential = 0x00000147, + Certify = 0x00000148, + PolicyNV = 0x00000149, + CertifyCreation = 0x0000014a, + Duplicate = 0x0000014b, + GetTime = 0x0000014c, + GetSessionAuditDigest = 0x0000014d, + NV_Read = 0x0000014e, + NV_ReadLock = 0x0000014f, + ObjectChangeAuth = 0x00000150, + PolicySecret = 0x00000151, + Rewrap = 0x00000152, + Create = 0x00000153, + ECDH_ZGen = 0x00000154, + HMAC = 0x00000155, + Import = 0x00000156, + Load = 0x00000157, + Quote = 0x00000158, + RSA_Decrypt = 0x00000159, + HMAC_Start = 0x0000015b, + SequenceUpdate = 0x0000015c, + Sign = 0x0000015d, + Unseal = 0x0000015e, + PolicySigned = 0x00000160, + ContextLoad = 0x00000161, + ContextSave = 0x00000162, + ECDH_KeyGen = 0x00000163, + EncryptDecrypt = 0x00000164, + FlushContext = 0x00000165, + LoadExternal = 0x00000167, + MakeCredential = 0x00000168, + NV_ReadPublic = 0x00000169, + PolicyAuthorize = 0x0000016a, + PolicyAuthValue = 0x0000016b, + PolicyCommandCode = 0x0000016c, + PolicyCounterTimer = 0x0000016d, + PolicyCpHash = 0x0000016e, + PolicyLocality = 0x0000016f, + PolicyNameHash = 0x00000170, + PolicyOR = 0x00000171, + PolicyTicket = 0x00000172, + ReadPublic = 0x00000173, + RSA_Encrypt = 0x00000174, + StartAuthSession = 0x00000176, + VerifySignature = 0x00000177, + ECC_Parameters = 0x00000178, + FirmwareRead = 0x00000179, + GetCapability = 0x0000017a, + GetRandom = 0x0000017b, + GetTestResult = 0x0000017c, + Hash = 0x0000017d, + PCR_Read = 0x0000017e, + PolicyPCR = 0x0000017f, + PolicyRestart = 0x00000180, + ReadClock = 0x00000181, + PCR_Extend = 0x00000182, + PCR_SetAuthValue = 0x00000183, + NV_Certify = 0x00000184, + EventSequenceComplete = 0x00000185, + HashSequenceStart = 0x00000186, + PolicyPhysicalPresence = 0x00000187, + PolicyDuplicationSelect = 0x00000188, + PolicyGetDigest = 0x00000189, + TestParms = 0x0000018a, + Commit = 0x0000018b, + PolicyPassword = 0x0000018c, + ZGen_2Phase = 0x0000018d, + EC_Ephemeral = 0x0000018e, + PolicyNvWritten = 0x0000018f, + PolicyTemplate = 0x00000190, + CreateLoaded = 0x00000191, + PolicyAuthorizeNV = 0x00000192, + EncryptDecrypt2 = 0x00000193, + AC_GetCapability = 0x00000194, + AC_Send = 0x00000195, + Policy_AC_SendSelect = 0x00000196, + CertifyX509 = 0x00000197, + ACT_SetTimeout = 0x00000198, +} + +impl From for CommandCode { + fn from(x: CommandCodeEnum) -> Self { + CommandCode::new(x as u32) + } +} + +impl CommandCodeEnum { + pub fn from_u32(val: u32) -> Option { + let ret = match val { + 0x0000011f => Self::NV_UndefineSpaceSpecial, + 0x00000120 => Self::EvictControl, + 0x00000121 => Self::HierarchyControl, + 0x00000122 => Self::NV_UndefineSpace, + 0x00000124 => Self::ChangeEPS, + 0x00000125 => Self::ChangePPS, + 0x00000126 => Self::Clear, + 0x00000127 => Self::ClearControl, + 0x00000128 => Self::ClockSet, + 0x00000129 => Self::HierarchyChangeAuth, + 0x0000012a => Self::NV_DefineSpace, + 0x0000012b => Self::PCR_Allocate, + 0x0000012c => Self::PCR_SetAuthPolicy, + 0x0000012d => Self::PP_Commands, + 0x0000012e => Self::SetPrimaryPolicy, + 0x0000012f => Self::FieldUpgradeStart, + 0x00000130 => Self::ClockRateAdjust, + 0x00000131 => Self::CreatePrimary, + 0x00000132 => Self::NV_GlobalWriteLock, + 0x00000133 => Self::GetCommandAuditDigest, + 0x00000134 => Self::NV_Increment, + 0x00000135 => Self::NV_SetBits, + 0x00000136 => Self::NV_Extend, + 0x00000137 => Self::NV_Write, + 0x00000138 => Self::NV_WriteLock, + 0x00000139 => Self::DictionaryAttackLockReset, + 0x0000013a => Self::DictionaryAttackParameters, + 0x0000013b => Self::NV_ChangeAuth, + 0x0000013c => Self::PCR_Event, + 0x0000013d => Self::PCR_Reset, + 0x0000013e => Self::SequenceComplete, + 0x0000013f => Self::SetAlgorithmSet, + 0x00000140 => Self::SetCommandCodeAuditStatus, + 0x00000141 => Self::FieldUpgradeData, + 0x00000142 => Self::IncrementalSelfTest, + 0x00000143 => Self::SelfTest, + 0x00000144 => Self::Startup, + 0x00000145 => Self::Shutdown, + 0x00000146 => Self::StirRandom, + 0x00000147 => Self::ActivateCredential, + 0x00000148 => Self::Certify, + 0x00000149 => Self::PolicyNV, + 0x0000014a => Self::CertifyCreation, + 0x0000014b => Self::Duplicate, + 0x0000014c => Self::GetTime, + 0x0000014d => Self::GetSessionAuditDigest, + 0x0000014e => Self::NV_Read, + 0x0000014f => Self::NV_ReadLock, + 0x00000150 => Self::ObjectChangeAuth, + 0x00000151 => Self::PolicySecret, + 0x00000152 => Self::Rewrap, + 0x00000153 => Self::Create, + 0x00000154 => Self::ECDH_ZGen, + 0x00000155 => Self::HMAC, + 0x00000156 => Self::Import, + 0x00000157 => Self::Load, + 0x00000158 => Self::Quote, + 0x00000159 => Self::RSA_Decrypt, + 0x0000015b => Self::HMAC_Start, + 0x0000015c => Self::SequenceUpdate, + 0x0000015d => Self::Sign, + 0x0000015e => Self::Unseal, + 0x00000160 => Self::PolicySigned, + 0x00000161 => Self::ContextLoad, + 0x00000162 => Self::ContextSave, + 0x00000163 => Self::ECDH_KeyGen, + 0x00000164 => Self::EncryptDecrypt, + 0x00000165 => Self::FlushContext, + 0x00000167 => Self::LoadExternal, + 0x00000168 => Self::MakeCredential, + 0x00000169 => Self::NV_ReadPublic, + 0x0000016a => Self::PolicyAuthorize, + 0x0000016b => Self::PolicyAuthValue, + 0x0000016c => Self::PolicyCommandCode, + 0x0000016d => Self::PolicyCounterTimer, + 0x0000016e => Self::PolicyCpHash, + 0x0000016f => Self::PolicyLocality, + 0x00000170 => Self::PolicyNameHash, + 0x00000171 => Self::PolicyOR, + 0x00000172 => Self::PolicyTicket, + 0x00000173 => Self::ReadPublic, + 0x00000174 => Self::RSA_Encrypt, + 0x00000176 => Self::StartAuthSession, + 0x00000177 => Self::VerifySignature, + 0x00000178 => Self::ECC_Parameters, + 0x00000179 => Self::FirmwareRead, + 0x0000017a => Self::GetCapability, + 0x0000017b => Self::GetRandom, + 0x0000017c => Self::GetTestResult, + 0x0000017d => Self::Hash, + 0x0000017e => Self::PCR_Read, + 0x0000017f => Self::PolicyPCR, + 0x00000180 => Self::PolicyRestart, + 0x00000181 => Self::ReadClock, + 0x00000182 => Self::PCR_Extend, + 0x00000183 => Self::PCR_SetAuthValue, + 0x00000184 => Self::NV_Certify, + 0x00000185 => Self::EventSequenceComplete, + 0x00000186 => Self::HashSequenceStart, + 0x00000187 => Self::PolicyPhysicalPresence, + 0x00000188 => Self::PolicyDuplicationSelect, + 0x00000189 => Self::PolicyGetDigest, + 0x0000018a => Self::TestParms, + 0x0000018b => Self::Commit, + 0x0000018c => Self::PolicyPassword, + 0x0000018d => Self::ZGen_2Phase, + 0x0000018e => Self::EC_Ephemeral, + 0x0000018f => Self::PolicyNvWritten, + 0x00000190 => Self::PolicyTemplate, + 0x00000191 => Self::CreateLoaded, + 0x00000192 => Self::PolicyAuthorizeNV, + 0x00000193 => Self::EncryptDecrypt2, + 0x00000194 => Self::AC_GetCapability, + 0x00000195 => Self::AC_Send, + 0x00000196 => Self::Policy_AC_SendSelect, + 0x00000197 => Self::CertifyX509, + 0x00000198 => Self::ACT_SetTimeout, + _ => return None, + }; + + Some(ret) + } +} + +const FLAG_FMT1: u32 = 0x0080; +const FLAG_VER1: u32 = 0x0100; +const FLAG_WARN: u32 = 0x0800 + FLAG_VER1; + +#[repr(u32)] +pub enum ResponseCode { + Success = 0x000, + /// The given handle value is not valid or cannot be used for this + /// command. + Value = FLAG_FMT1 + 0x004, + /// Hierarchy is not enabled or is not correct for the use. + Hierarchy = FLAG_FMT1 + 0x0005, + /// The handle is not correct for the use. + Handle = FLAG_FMT1 + 0x000B, + /// The authorization HMAC check failed. + AuthFail = FLAG_FMT1 + 0x000E, + /// Structure is the wrong size. + Size = FLAG_FMT1 + 0x0015, + /// The TPM was unable to unmarshal a value because there were not + /// enough bytes in the input buffer. + Insufficient = FLAG_FMT1 + 0x001A, + /// Integrity check fail. + Integrity = FLAG_FMT1 + 0x001F, + /// TPM is in failure mode. + Failure = FLAG_VER1 + 0x0001, + /// Use of an authorization session with a context command. + AuthContext = FLAG_VER1 + 0x0045, + /// The NV index is used before being initialized or the state saved by + /// TPM20_CC_Shutdown could not be restored. + NvUninitialized = FLAG_VER1 + 0x04A, + /// ... + Sensitive = FLAG_VER1 + 0x055, + /// Gap for session context ID is too large. + ContextGap = FLAG_WARN + 0x001, + /// Out of memory for object contexts. + ObjectMemory = FLAG_WARN + 0x002, + /// Out of memory for session contexts. + SessionMemory = FLAG_WARN + 0x003, + /// Out of shared object/session memory or need space for internal + /// operations. + Memory = FLAG_WARN + 0x004, + /// Out of session handles - a session must be flushed before a new + /// session may be created. + SessionHandles = FLAG_WARN + 0x005, + /// Out of object handles - the handle space for objects is depleted and + /// a reboot is required . + /// NOTE:This cannot occur on the reference implementation. + ObjectHandles = FLAG_WARN + 0x006, + /// The TPM has suspended operation on the command. Forward progress was + /// made and the command may be retried. + Yielded = FLAG_WARN + 0x008, + /// The command was cancelled. The command may be retried. + Cancelled = FLAG_WARN + 0x009, + /// TPM is performing self tests. + Testing = FLAG_WARN + 0x00A, + /// The TPM is rate-limiting accesses to prevent wearout of NV. + NvRate = FLAG_WARN + 0x020, + /// Commands are not being accepted because the TPM is in DA lockout + /// mode. + Lockout = FLAG_WARN + 0x021, + /// The TPM was not able to start the command. Retry might work. + Retry = FLAG_WARN + 0x022, + /// The command may require writing of NV and NV is not current + /// accessible. + NvUnavailable = FLAG_WARN + 0x023, + /// This value is reserved and shall not be returned by the TPM. + NotUsed = FLAG_WARN + 0x07F, + /// Add to a parameter-, handle-, or session-related error. + Rc1 = 0x100, +} + +impl ResponseCode { + pub fn from_u32(val: u32) -> Option { + let ret = match val { + x if x == ResponseCode::Success as u32 => ResponseCode::Success, + x if x == ResponseCode::Value as u32 => ResponseCode::Value, + x if x == ResponseCode::Hierarchy as u32 => ResponseCode::Hierarchy, + x if x == ResponseCode::Handle as u32 => ResponseCode::Handle, + x if x == ResponseCode::AuthFail as u32 => ResponseCode::AuthFail, + x if x == ResponseCode::Size as u32 => ResponseCode::Size, + x if x == ResponseCode::Insufficient as u32 => ResponseCode::Insufficient, + x if x == ResponseCode::Integrity as u32 => ResponseCode::Integrity, + x if x == ResponseCode::Failure as u32 => ResponseCode::Failure, + x if x == ResponseCode::AuthContext as u32 => ResponseCode::AuthContext, + x if x == ResponseCode::NvUninitialized as u32 => ResponseCode::NvUninitialized, + x if x == ResponseCode::Sensitive as u32 => ResponseCode::Sensitive, + x if x == ResponseCode::ContextGap as u32 => ResponseCode::ContextGap, + x if x == ResponseCode::ObjectMemory as u32 => ResponseCode::ObjectMemory, + x if x == ResponseCode::SessionMemory as u32 => ResponseCode::SessionMemory, + x if x == ResponseCode::Memory as u32 => ResponseCode::Memory, + x if x == ResponseCode::SessionHandles as u32 => ResponseCode::SessionHandles, + x if x == ResponseCode::ObjectHandles as u32 => ResponseCode::ObjectHandles, + x if x == ResponseCode::Yielded as u32 => ResponseCode::Yielded, + x if x == ResponseCode::Cancelled as u32 => ResponseCode::Cancelled, + x if x == ResponseCode::Testing as u32 => ResponseCode::Testing, + x if x == ResponseCode::NvRate as u32 => ResponseCode::NvRate, + x if x == ResponseCode::Lockout as u32 => ResponseCode::Lockout, + x if x == ResponseCode::Retry as u32 => ResponseCode::Retry, + x if x == ResponseCode::NvUnavailable as u32 => ResponseCode::NvUnavailable, + x if x == ResponseCode::NotUsed as u32 => ResponseCode::NotUsed, + _ => return None, + }; + Some(ret) + } +} + +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct AlgId(pub u16_be); + +impl PartialEq for u16 { + fn eq(&self, other: &AlgId) -> bool { + other.0.get() == *self + } +} + +impl AlgId { + const fn new(val: u16) -> AlgId { + AlgId(new_u16_be(val)) + } +} + +#[expect(clippy::upper_case_acronyms)] +#[derive(Debug)] +#[repr(u16)] +pub enum AlgIdEnum { + RSA = 0x0001, + SHA = 0x0004, + AES = 0x0006, + SHA256 = 0x000b, + SHA384 = 0x000c, + SHA512 = 0x000d, + NULL = 0x0010, + SM3_256 = 0x0012, + RSASSA = 0x0014, + CFB = 0x0043, +} + +impl From for AlgId { + fn from(x: AlgIdEnum) -> Self { + AlgId::new(x as u16) + } +} + +impl AlgIdEnum { + pub fn from_u16(val: u16) -> Option { + let ret = match val { + 0x0004 => Self::SHA, + 0x000b => Self::SHA256, + 0x000c => Self::SHA384, + 0x000d => Self::SHA512, + 0x0012 => Self::SM3_256, + _ => return None, + }; + + Some(ret) + } +} + +/// `TPMA_OBJECT` +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct TpmaObject(pub u32_be); + +impl TpmaObject { + const fn new(val: u32) -> Self { + Self(new_u32_be(val)) + } +} + +impl From for TpmaObject { + fn from(x: TpmaObjectBits) -> Self { + let val: u32 = x.into(); + Self::new(val) + } +} + +impl From for TpmaObject { + fn from(x: u32) -> Self { + Self::new(x) + } +} + +#[bitfield(u32)] +pub struct TpmaObjectBits { + _reserved0: bool, + pub fixed_tpm: bool, + pub st_clear: bool, + _reserved1: bool, + pub fixed_parent: bool, + pub sensitive_data_origin: bool, + pub user_with_auth: bool, + pub admin_with_policy: bool, + #[bits(2)] + _reserved2: u8, + pub no_da: bool, + pub encrypted_duplication: bool, + #[bits(4)] + _reserved3: u8, + pub restricted: bool, + pub decrypt: bool, + pub sign_encrypt: bool, + #[bits(13)] + _reserved4: u16, +} + +/// `TPMA_NV` +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct TpmaNv(pub u32_be); + +impl TpmaNv { + const fn new(val: u32) -> Self { + Self(new_u32_be(val)) + } +} + +impl From for TpmaNv { + fn from(x: TpmaNvBits) -> Self { + let val: u32 = x.into(); + Self::new(val) + } +} + +impl From for TpmaNv { + fn from(x: u32) -> Self { + Self::new(x) + } +} + +#[bitfield(u32)] +pub struct TpmaNvBits { + pub nv_ppwrite: bool, + pub nv_ownerwrite: bool, + pub nv_authwrite: bool, + pub nv_policywrite: bool, + // bits 7:4: `TPM_NT` + // 0001 - `tpm_nt_counter` + pub nt_counter: bool, + // 0010 - `tpm_nt_bits` + pub nt_bits: bool, + // 0100 - `tpm_nt_extend` + pub nt_extend: bool, + _unused0: bool, + // bits 9:8 are reserved + #[bits(2)] + _reserved1: u8, + pub nv_policy_delete: bool, + pub nv_writelocked: bool, + pub nv_writeall: bool, + pub nv_writedefine: bool, + pub nv_write_stclear: bool, + pub nv_globallock: bool, + pub nv_ppread: bool, + pub nv_ownerread: bool, + pub nv_authread: bool, + pub nv_policyread: bool, + // bits 24:20 are reserved + #[bits(5)] + _reserved2: u8, + pub nv_no_da: bool, + pub nv_orderly: bool, + pub nv_clear_stclear: bool, + pub nv_readlocked: bool, + pub nv_written: bool, + pub nv_platformcreate: bool, + pub nv_read_stclear: bool, +} + +/// Workaround to allow constructing a zerocopy U64 in a const context. +const fn new_u64_be(val: u64) -> u64_be { + u64_be::from_bytes(val.to_be_bytes()) +} + +/// Workaround to allow constructing a zerocopy U32 in a const context. +const fn new_u32_be(val: u32) -> u32_be { + u32_be::from_bytes(val.to_be_bytes()) +} + +/// Workaround to allow constructing a zerocopy U16 in a const context. +const fn new_u16_be(val: u16) -> u16_be { + u16_be::from_bytes(val.to_be_bytes()) +} + +/// TPM command / response definitions +pub mod protocol { + use super::*; + + /// Common structs shared between multiple command / response structs + pub mod common { + use super::*; + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct CmdHeader { + pub session_tag: SessionTag, + pub size: u32_be, + pub command_code: CommandCode, + } + + impl CmdHeader { + /// Construct a header for a fixed-size command + pub fn new( + session_tag: SessionTag, + command_code: CommandCode, + ) -> CmdHeader { + CmdHeader { + session_tag, + size: (size_of::() as u32).into(), + command_code, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ReplyHeader { + pub session_tag: u16_be, + pub size: u32_be, + pub response_code: u32_be, + } + + impl ReplyHeader { + /// Performs a few command-agnostic validation checks: + /// - Ensures the size matches the size_of the provided `FullReply` type + /// - Compares provided session_tag + /// + /// Returns Ok(bool) if the validation passes. The bool value indicates whether + /// the response_code is [`ResponseCode::Success`] or not. + /// Returns Err(ResponseValidationError) otherwise. + pub fn base_validation( + &self, + session_tag: SessionTag, + expected_size: u32, + ) -> Result { + // Response code other than Success indicates that the command fails + // See Section 6.2, "Trusted Platform Module Library Part 3: Commands", revision 1.38. + let command_succeeded = ResponseCode::from_u32(self.response_code.get()) + .map(|c| matches!(c, ResponseCode::Success)) + .unwrap_or(false); + + let (expected_tag, expected_size) = if command_succeeded { + (session_tag, expected_size as usize) + } else { + // If the command fails, the expected tag should be NoSessions and the minimal size + // of the response should be the size of the header. + // See Section 6.1, "Trusted Platform Module Library Part 3: Commands", revision 1.38. + // + // DEVNOTE: we do not handle the special case caused by sending unsupported commands where + // the session tag will be `TPM_RC_BAD_TAG` instead. + (SessionTagEnum::NoSessions.into(), size_of::()) + }; + + if self.session_tag.get() != expected_tag { + Err(ResponseValidationError::HeaderSessionTagMismatch { + response_session_tag: self.session_tag.get(), + expected_session_tag: session_tag.0.get(), + command_succeeded, + })? + } + + // Allow the size specified in the header to be equal to or larger than the expected size in case + // that the expected size does not take the authorization area into account. + if (self.size.get() as usize) < expected_size { + Err(ResponseValidationError::HeaderResponseSizeMismatch { + size: self.size.get(), + expected_size, + command_succeeded, + })? + } + + Ok(command_succeeded) + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct CmdAuth { + handle: ReservedHandle, + nonce_2b: u16_be, + session: u8, + auth_2b: u16_be, + } + + impl CmdAuth { + pub fn new(handle: ReservedHandle, nonce_2b: u16, session: u8, auth_2b: u16) -> Self { + CmdAuth { + handle, + nonce_2b: nonce_2b.into(), + session, + auth_2b: auth_2b.into(), + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ReplyAuth { + pub nonce_2b: u16_be, + pub session: u8, + pub auth_2b: u16_be, + } + } + + use common::CmdHeader; + use common::ReplyHeader; + + /// Marker trait for a struct that corresponds to a TPM Command + pub trait TpmCommand: IntoBytes + FromBytes + Sized + Immutable + KnownLayout { + type Reply: TpmReply; + + fn base_validate_reply( + reply_buf: &[u8], + session_tag: impl Into, + ) -> Result<(Self::Reply, bool), ResponseValidationError> { + let res = Self::Reply::deserialize(reply_buf) + .ok_or(ResponseValidationError::ResponseSizeTooSmall)?; + let succeeded = res.base_validation(session_tag.into())?; + + Ok((res, succeeded)) + } + } + + /// Marker trait for a struct that corresponds to a TPM Reply + pub trait TpmReply: IntoBytes + FromBytes + Sized + Immutable + KnownLayout { + type Command: TpmCommand; + + fn base_validation( + &self, + session_tag: SessionTag, + ) -> Result { + // `Reply::deserialize` guarantees this should not fail + let header = ReplyHeader::ref_from_prefix(self.as_bytes()) + .expect("unexpected response size") + .0; // TODO: zerocopy: error (https://github.com/microsoft/openvmm/issues/759) + header.base_validation(session_tag, self.payload_size() as u32) + } + fn deserialize(bytes: &[u8]) -> Option; + fn payload_size(&self) -> usize; + } + + /// General type for TPM 2.0 sized buffers. + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct Tpm2bBuffer { + pub size: u16_be, + // Use value that is large enough as the buffer size so that we + // only need to define one struct. + pub buffer: [u8; MAX_DIGEST_BUFFER_SIZE], + } + + impl Tpm2bBuffer { + /// Create a `Tpm2bBuffer` from a slice. + pub fn new(data: &[u8]) -> Result { + let size = data.len(); + if size > MAX_DIGEST_BUFFER_SIZE { + Err(InvalidInput::BufferSizeTooLarge( + size, + MAX_DIGEST_BUFFER_SIZE, + ))? + } + + let mut buffer = [0u8; MAX_DIGEST_BUFFER_SIZE]; + buffer[..size].copy_from_slice(data); + + Ok(Self { + size: new_u16_be(size as u16), + buffer, + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.size.as_bytes()); + buffer.extend_from_slice(&self.buffer[..self.size.get() as usize]); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + + let size: u16 = u16_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + if size as usize > MAX_DIGEST_BUFFER_SIZE { + return None; + } + + start = end; + end += size as usize; + if bytes.len() < end { + return None; + } + let mut buffer = [0u8; MAX_DIGEST_BUFFER_SIZE]; + buffer[..size as usize].copy_from_slice(&bytes[start..end]); + + Some(Self { + size: size.into(), + buffer, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.size); + payload_size += self.size.get() as usize; + + payload_size + } + } + + /// `TPML_PCR_SELECTION` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmlPcrSelection { + pub count: u32_be, + pub pcr_selections: [PcrSelection; 5], + } + + impl TpmlPcrSelection { + pub fn new(pcr_selections: &[PcrSelection]) -> Result { + let count = pcr_selections.len(); + if count > 5 { + Err(InvalidInput::PcrSelectionsLengthTooLong(count, 5))? + } + + let mut base = [PcrSelection::new_zeroed(); 5]; + base[..count].copy_from_slice(pcr_selections); + + Ok(Self { + count: new_u32_be(count as u32), + pcr_selections: base, + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.count.as_bytes()); + for i in 0..self.count.get() { + buffer.extend_from_slice(&self.pcr_selections[i as usize].serialize()); + } + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + if bytes.len() < end { + return None; + } + + let count: u32 = u32_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + if count > 5 { + return None; + } + + let mut pcr_selections = [PcrSelection::new_zeroed(); 5]; + for i in 0..count { + start = end; + pcr_selections[i as usize] = PcrSelection::deserialize(&bytes[start..])?; + end += pcr_selections[i as usize].payload_size(); + } + + Some(Self { + count: count.into(), + pcr_selections, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + let count = self.count; + + payload_size += size_of_val(&count); + for i in 0..count.get() { + payload_size += self.pcr_selections[i as usize].payload_size(); + } + + payload_size + } + } + + /// `TPMS_SENSITIVE_CREATE` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmsSensitiveCreate { + user_auth: Tpm2bBuffer, + data: Tpm2bBuffer, + } + + impl TpmsSensitiveCreate { + pub fn new(user_auth: &[u8], data: &[u8]) -> Result { + let user_auth = + Tpm2bBuffer::new(user_auth).map_err(TpmProtoError::TpmsSensitiveCreateUserAuth)?; + let data = Tpm2bBuffer::new(data).map_err(TpmProtoError::TpmsSensitiveCreateData)?; + Ok(Self { user_auth, data }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(&self.user_auth.serialize()); + buffer.extend_from_slice(&self.data.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += self.user_auth.payload_size(); + payload_size += self.data.payload_size(); + + payload_size + } + } + + /// `TPM2B_SENSITIVE_CREATE` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct Tpm2bSensitiveCreate { + size: u16_be, + sensitive: TpmsSensitiveCreate, + } + + impl Tpm2bSensitiveCreate { + pub fn new(sensitive: TpmsSensitiveCreate) -> Self { + let size = sensitive.payload_size() as u16; + Self { + size: size.into(), + sensitive, + } + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.size.as_bytes()); + buffer.extend_from_slice(&self.sensitive.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + let size = self.size; + + payload_size += size_of_val(&size); + payload_size += self.sensitive.payload_size(); + + payload_size + } + } + + /// `TPMT_RSA_SCHEME` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout, PartialEq)] + pub struct TpmtRsaScheme { + scheme: AlgId, + hash_alg: AlgId, + } + + impl TpmtRsaScheme { + pub fn new(scheme: AlgId, hash_alg: Option) -> Self { + let hash_alg = hash_alg.map_or_else(|| AlgId::new(0), |v| v); + + Self { scheme, hash_alg } + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.scheme.as_bytes()); + + // No parameters when algorithm is NULL + if self.scheme != AlgIdEnum::NULL.into() { + // Only support scheme with hash (e.g., RSASSA) for now + buffer.extend_from_slice(self.hash_alg.as_bytes()); + } + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + if bytes.len() < end { + return None; + } + + let scheme = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + let hash_alg = if scheme != AlgIdEnum::NULL.into() { + start = end; + end += size_of::(); + AlgId::read_from_prefix(&bytes[start..end]).ok()?.0 // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + } else { + AlgId::new(0) + }; + + Some(Self { scheme, hash_alg }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.scheme); + + if self.scheme != AlgIdEnum::NULL.into() { + payload_size += size_of_val(&self.hash_alg); + } + + payload_size + } + } + + /// `TPMT_SYM_DEF_OBJECT` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout, PartialEq)] + pub struct TpmtSymDefObject { + algorithm: AlgId, + key_bits: u16_be, + mode: AlgId, + } + + impl TpmtSymDefObject { + pub fn new(algorithm: AlgId, key_bits: Option, mode: Option) -> Self { + let key_bits = key_bits.map_or_else(|| new_u16_be(0), |v| v.into()); + let mode = mode.map_or_else(|| AlgId::new(0), |v| v); + + Self { + algorithm, + key_bits, + mode, + } + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.algorithm.as_bytes()); + + // No parameters when algorithm is NULL + if self.algorithm != AlgIdEnum::NULL.into() { + buffer.extend_from_slice(self.key_bits.as_bytes()); + buffer.extend_from_slice(self.mode.as_bytes()); + } + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + if bytes.len() < end { + return None; + } + + let algorithm = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + let (key_bits, mode) = if algorithm != AlgIdEnum::NULL.into() { + start = end; + end += size_of::(); + let key_bits = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + let mode = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + (key_bits, mode) + } else { + (new_u16_be(0), AlgId::new(0)) + }; + + Some(Self { + algorithm, + key_bits, + mode, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.algorithm); + + if self.algorithm != AlgIdEnum::NULL.into() { + payload_size += size_of_val(&self.key_bits); + payload_size += size_of_val(&self.mode); + } + + payload_size + } + } + + /// `TPMS_RSA_PARMS` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout, PartialEq)] + pub struct TpmsRsaParams { + symmetric: TpmtSymDefObject, + scheme: TpmtRsaScheme, + key_bits: u16_be, + pub exponent: u32_be, + } + + impl TpmsRsaParams { + pub fn new( + symmetric: TpmtSymDefObject, + scheme: TpmtRsaScheme, + key_bits: u16, + exponent: u32, + ) -> Self { + Self { + symmetric, + scheme, + key_bits: key_bits.into(), + exponent: exponent.into(), + } + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(&self.symmetric.serialize()); + buffer.extend_from_slice(&self.scheme.serialize()); + buffer.extend_from_slice(self.key_bits.as_bytes()); + buffer.extend_from_slice(self.exponent.as_bytes()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = 0; + + let symmetric = TpmtSymDefObject::deserialize(&bytes[start..])?; + end += symmetric.payload_size(); + + start = end; + let scheme = TpmtRsaScheme::deserialize(&bytes[start..])?; + end += scheme.payload_size(); + + // TODO: zerocopy: as of zerocopy 0.8 this can be simplified with `read_from_bytes`....ok()?, to avoid (https://github.com/microsoft/openvmm/issues/759) + // manual size checks. Leaving this code as-is to reduce risk of the 0.7 -> 0.8 move. + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let key_bits = u16_be::read_from_bytes(&bytes[start..end]).ok()?; + + // TODO: zerocopy: as of zerocopy 0.8 this can be simplified with `read_from_bytes`....ok()?, to avoid (https://github.com/microsoft/openvmm/issues/759) + // manual size checks. Leaving this code as-is to reduce risk of the 0.7 -> 0.8 move. + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let exponent = u32_be::read_from_bytes(&bytes[start..end]).ok()?; + + Some(Self { + symmetric, + scheme, + key_bits, + exponent, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += self.symmetric.payload_size(); + payload_size += self.scheme.payload_size(); + payload_size += size_of_val(&self.key_bits); + payload_size += size_of_val(&self.exponent); + + payload_size + } + } + + /// `TPMT_PUBLIC` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmtPublic { + my_type: AlgId, + name_alg: AlgId, + object_attributes: TpmaObject, + auth_policy: Tpm2bBuffer, + // `TPMS_RSA_PARAMS` + pub parameters: TpmsRsaParams, + // `TPM2B_PUBLIC_KEY_RSA` + pub unique: Tpm2bBuffer, + } + + impl TpmtPublic { + pub fn new( + my_type: AlgId, + name_alg: AlgId, + object_attributes: TpmaObjectBits, + auth_policy: &[u8], + parameters: TpmsRsaParams, + unique: &[u8], + ) -> Result { + let auth_policy = + Tpm2bBuffer::new(auth_policy).map_err(TpmProtoError::TpmtPublicAuthPolicy)?; + let unique = Tpm2bBuffer::new(unique).map_err(TpmProtoError::TpmtPublicUnique)?; + Ok(Self { + my_type, + name_alg, + object_attributes: object_attributes.into(), + auth_policy, + parameters, + unique, + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.my_type.as_bytes()); + buffer.extend_from_slice(self.name_alg.as_bytes()); + buffer.extend_from_slice(self.object_attributes.as_bytes()); + buffer.extend_from_slice(&self.auth_policy.serialize()); + buffer.extend_from_slice(&self.parameters.serialize()); + buffer.extend_from_slice(&self.unique.serialize()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let r#type = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let name_alg = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let object_attributes: u32 = u32_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let auth_policy = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += auth_policy.payload_size(); + if bytes.len() < end { + return None; + } + + start = end; + let parameters = TpmsRsaParams::deserialize(&bytes[start..])?; + end += parameters.payload_size(); + + start = end; + let unique = Tpm2bBuffer::deserialize(&bytes[start..])?; + + Some(Self { + my_type: r#type, + name_alg, + object_attributes: object_attributes.into(), + auth_policy, + parameters, + unique, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.my_type); + payload_size += size_of_val(&self.name_alg); + payload_size += size_of_val(&self.object_attributes); + payload_size += self.auth_policy.payload_size(); + payload_size += self.parameters.payload_size(); + payload_size += self.unique.payload_size(); + + payload_size + } + } + + /// `TPM2B_PUBLIC` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct Tpm2bPublic { + pub size: u16_be, + pub public_area: TpmtPublic, + } + + impl Tpm2bPublic { + pub fn new(public_area: TpmtPublic) -> Self { + let size = public_area.payload_size() as u16; + Self { + size: size.into(), + public_area, + } + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.size.as_bytes()); + buffer.extend_from_slice(&self.public_area.serialize()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let end = size_of::(); + + if bytes.len() < end { + return None; + } + + let size = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let public_area = TpmtPublic::deserialize(&bytes[start..])?; + + Some(Self { size, public_area }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.size); + payload_size += self.public_area.payload_size(); + + payload_size + } + } + + /// `TPMS_CREATION_DATA` + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmsCreationData { + pcr_select: TpmlPcrSelection, + pcr_digest: Tpm2bBuffer, + locality: u8, + parent_name_alg: AlgId, + parent_name: Tpm2bBuffer, + parent_qualified_name: Tpm2bBuffer, + outside_info: Tpm2bBuffer, + } + + impl TpmsCreationData { + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = 0; + + let pcr_select = TpmlPcrSelection::deserialize(&bytes[start..])?; + end += pcr_select.payload_size(); + + start = end; + let pcr_digest = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += pcr_digest.payload_size(); + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let locality = bytes[start]; + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let parent_name_alg = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let parent_name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += parent_name.payload_size(); + + start = end; + let parent_qualified_name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += parent_qualified_name.payload_size(); + + start = end; + let outside_info = Tpm2bBuffer::deserialize(&bytes[start..])?; + + Some(Self { + pcr_select, + pcr_digest, + locality, + parent_name_alg, + parent_name, + parent_qualified_name, + outside_info, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += self.pcr_select.payload_size(); + payload_size += self.pcr_digest.payload_size(); + payload_size += size_of_val(&self.locality); + payload_size += size_of_val(&self.parent_name_alg); + payload_size += self.parent_name.payload_size(); + payload_size += self.parent_qualified_name.payload_size(); + payload_size += self.outside_info.payload_size(); + + payload_size + } + } + + /// `TPM2B_CREATION_DATA` + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + #[repr(C)] + pub struct Tpm2bCreationData { + size: u16_be, + creation_data: TpmsCreationData, + } + + impl Tpm2bCreationData { + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let end = size_of::(); + + if bytes.len() < end { + return None; + } + + let size = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let creation_data = TpmsCreationData::deserialize(&bytes[start..])?; + + Some(Self { + size, + creation_data, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.size); + payload_size += self.creation_data.payload_size(); + + payload_size + } + } + + /// `TPMT_TK_CREATION` + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmtTkCreation { + tag: SessionTag, + hierarchy: ReservedHandle, + digest: Tpm2bBuffer, + } + + impl TpmtTkCreation { + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let tag = SessionTag::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let hierarchy = ReservedHandle::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let digest = Tpm2bBuffer::deserialize(&bytes[start..])?; + + Some(Self { + tag, + hierarchy, + digest, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.tag); + payload_size += size_of_val(&self.hierarchy); + payload_size += self.digest.payload_size(); + + payload_size + } + } + + /// `TPMS_NV_PUBLIC` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmsNvPublic { + nv_index: u32_be, + name_alg: AlgId, + pub attributes: TpmaNv, + auth_policy: Tpm2bBuffer, + pub data_size: u16_be, + } + + impl TpmsNvPublic { + pub fn new( + nv_index: u32, + name_alg: AlgId, + attributes: TpmaNvBits, + auth_policy: &[u8], + data_size: u16, + ) -> Result { + let auth_policy = + Tpm2bBuffer::new(auth_policy).map_err(TpmProtoError::TpmsNvPublicAuthPolicy)?; + + Ok(Self { + nv_index: nv_index.into(), + name_alg, + attributes: attributes.into(), + auth_policy, + data_size: data_size.into(), + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.nv_index.as_bytes()); + buffer.extend_from_slice(self.name_alg.as_bytes()); + buffer.extend_from_slice(self.attributes.as_bytes()); + buffer.extend_from_slice(&self.auth_policy.serialize()); + buffer.extend_from_slice(self.data_size.as_bytes()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let nv_index: u32 = u32_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let name_alg = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let attributes: u32 = u32_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let auth_policy = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += auth_policy.payload_size(); + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let data_size = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + Some(Self { + nv_index: nv_index.into(), + name_alg, + attributes: attributes.into(), + auth_policy, + data_size, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.nv_index); + payload_size += size_of_val(&self.name_alg); + payload_size += size_of_val(&self.attributes); + payload_size += self.auth_policy.payload_size(); + payload_size += size_of_val(&self.data_size); + + payload_size + } + } + + /// `TPM2B_NV_PUBLIC` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct Tpm2bNvPublic { + size: u16_be, + pub nv_public: TpmsNvPublic, + } + + impl Tpm2bNvPublic { + pub fn new(nv_public: TpmsNvPublic) -> Result { + let size = nv_public.payload_size(); + if size > u16::MAX.into() { + Err(InvalidInput::NvPublicPayloadTooLarge(size, u16::MAX.into()))? + } + + Ok(Self { + size: (size as u16).into(), + nv_public, + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.size.as_bytes()); + buffer.extend_from_slice(&self.nv_public.serialize()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let end = size_of::(); + + if bytes.len() < end { + return None; + } + + let size = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let nv_public = TpmsNvPublic::deserialize(&bytes[start..])?; + + Some(Self { size, nv_public }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.size); + payload_size += self.nv_public.payload_size(); + + payload_size + } + } + + // === ClearControl === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ClearControlCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + auth_size: u32_be, + auth: common::CmdAuth, + disable: u8, + } + + impl ClearControlCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + disable: bool, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::ClearControl.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + disable: disable as u8, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ClearControlReply { + pub header: ReplyHeader, + pub param_size: u32_be, + pub auth: common::ReplyAuth, + } + + impl TpmCommand for ClearControlCmd { + type Reply = ClearControlReply; + } + + impl TpmReply for ClearControlReply { + type Command = ClearControlCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Clear === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ClearCmd { + header: CmdHeader, + + auth_handle: ReservedHandle, + auth_size: u32_be, + auth: common::CmdAuth, + } + + impl ClearCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::Clear.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ClearReply { + pub header: ReplyHeader, + pub param_size: u32_be, + pub auth: common::ReplyAuth, + } + + impl TpmCommand for ClearCmd { + type Reply = ClearReply; + } + + impl TpmReply for ClearReply { + type Command = ClearCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Startup === // + + pub enum StartupType { + Clear, + State, + } + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct StartupCmd { + header: CmdHeader, + startup_type: u16_be, + } + + impl StartupCmd { + pub fn new(session_tag: SessionTag, startup_type: StartupType) -> StartupCmd { + StartupCmd { + header: CmdHeader::new::(session_tag, CommandCodeEnum::Startup.into()), + startup_type: match startup_type { + StartupType::Clear => 0, + StartupType::State => 1, + } + .into(), + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct StartupReply { + pub header: ReplyHeader, + } + + impl TpmCommand for StartupCmd { + type Reply = StartupReply; + } + + impl TpmReply for StartupReply { + type Command = StartupCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Self Test === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct SelfTestCmd { + header: CmdHeader, + full_test: u8, + } + + impl SelfTestCmd { + pub fn new(session_tag: SessionTag, full_test: bool) -> SelfTestCmd { + SelfTestCmd { + header: CmdHeader::new::(session_tag, CommandCodeEnum::SelfTest.into()), + full_test: full_test as u8, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct SelfTestReply { + pub header: ReplyHeader, + } + + impl TpmCommand for SelfTestCmd { + type Reply = SelfTestReply; + } + + impl TpmReply for SelfTestReply { + type Command = SelfTestCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Hierarchy Control === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct HierarchyControlCmd { + header: CmdHeader, + + auth_handle: ReservedHandle, + auth_size: u32_be, + auth: common::CmdAuth, + + hierarchy: ReservedHandle, + state: u8, + } + + impl HierarchyControlCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + hierarchy: ReservedHandle, + state: bool, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::HierarchyControl.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + hierarchy, + state: state as u8, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct HierarchyControlReply { + pub header: ReplyHeader, + pub param_size: u32_be, + pub auth: common::ReplyAuth, + } + + impl TpmCommand for HierarchyControlCmd { + type Reply = HierarchyControlReply; + } + + impl TpmReply for HierarchyControlReply { + type Command = HierarchyControlCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Pcr Allocate === // + + #[repr(C)] + #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct PcrSelection { + pub hash: AlgId, + pub size_of_select: u8, + pub bitmap: [u8; 3], + } + + impl PcrSelection { + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.hash.as_bytes()); + buffer.extend_from_slice(self.size_of_select.as_bytes()); + buffer.extend_from_slice(&self.bitmap[..self.size_of_select as usize]); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let hash = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let size_of_select = bytes[start]; + if size_of_select > 3 { + return None; + } + + start = end; + end += size_of_select as usize; + if bytes.len() < end { + return None; + } + let mut bitmap = [0u8; 3]; + bitmap[..size_of_select as usize].copy_from_slice(&bytes[start..end]); + + Some(Self { + hash, + size_of_select, + bitmap, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.hash); + payload_size += size_of_val(&self.size_of_select); + payload_size += self.size_of_select as usize; + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct PcrAllocateCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameters + pcr_allocation: TpmlPcrSelection, + } + + impl PcrAllocateCmd { + pub const HASH_ALG_TO_ID: [(u32, AlgId); 5] = [ + (1 << 0, AlgId::new(AlgIdEnum::SHA as u16)), + (1 << 1, AlgId::new(AlgIdEnum::SHA256 as u16)), + (1 << 2, AlgId::new(AlgIdEnum::SHA384 as u16)), + (1 << 3, AlgId::new(AlgIdEnum::SHA512 as u16)), + (1 << 4, AlgId::new(AlgIdEnum::SM3_256 as u16)), + ]; + + /// # Panics + /// + /// `pcr_selections` must be have a len less than `TCG_BOOT_HASH_COUNT` + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + pcr_selections: &[PcrSelection], + ) -> Result { + let pcr_allocation = TpmlPcrSelection::new(pcr_selections) + .map_err(TpmProtoError::PcrAllocatePcrAllocation)?; + + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::PCR_Allocate.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + pcr_allocation, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + Ok(cmd) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + buffer.extend_from_slice(&self.pcr_allocation.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + payload_size += self.pcr_allocation.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct PcrAllocateReply { + pub header: ReplyHeader, + pub auth_size: u32_be, + pub allocation_success: u8, + pub max_pcr: u32_be, + pub size_needed: u32_be, + pub size_available: u32_be, + + pub auth: common::ReplyAuth, + } + + impl TpmCommand for PcrAllocateCmd { + type Reply = PcrAllocateReply; + } + + impl TpmReply for PcrAllocateReply { + type Command = PcrAllocateCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === ChangeSeed === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ChangeSeedCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + auth_size: u32_be, + auth: common::CmdAuth, + } + + impl ChangeSeedCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + command_code: CommandCodeEnum, + ) -> Self { + Self { + header: CmdHeader::new::(session, command_code.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ChangeSeedReply { + pub header: ReplyHeader, + pub param_size: u32_be, + + pub auth: common::ReplyAuth, + } + + impl TpmCommand for ChangeSeedCmd { + type Reply = ChangeSeedReply; + } + + impl TpmReply for ChangeSeedReply { + type Command = ChangeSeedCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: option-to-error (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === CreatePrimary === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct CreatePrimaryCmd { + pub header: CmdHeader, + primary_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameters + in_sensitive: Tpm2bSensitiveCreate, + in_public: Tpm2bPublic, + outside_info: Tpm2bBuffer, + creation_pcr: TpmlPcrSelection, + } + + impl CreatePrimaryCmd { + pub fn new( + session: SessionTag, + primary_handle: ReservedHandle, + auth: common::CmdAuth, + in_sensitive_user_auth: &[u8], + in_sensitive_data: &[u8], + in_public: TpmtPublic, + outside_info: &[u8], + creation_pcr: &[PcrSelection], + ) -> Result { + let sensitive_create = + TpmsSensitiveCreate::new(in_sensitive_user_auth, in_sensitive_data)?; + let in_sensitive = Tpm2bSensitiveCreate::new(sensitive_create); + let in_public = Tpm2bPublic::new(in_public); + let outside_info = + Tpm2bBuffer::new(outside_info).map_err(TpmProtoError::CreatePrimaryOutsideInfo)?; + let creation_pcr = TpmlPcrSelection::new(creation_pcr) + .map_err(TpmProtoError::CreatePrimaryCreationPcr)?; + + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::CreatePrimary.into()), + primary_handle, + auth_size: (size_of::() as u32).into(), + auth, + in_sensitive, + in_public, + outside_info, + creation_pcr, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + Ok(cmd) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.primary_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + buffer.extend_from_slice(&self.in_sensitive.serialize()); + buffer.extend_from_slice(&self.in_public.serialize()); + buffer.extend_from_slice(&self.outside_info.serialize()); + buffer.extend_from_slice(&self.creation_pcr.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.primary_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + payload_size += self.in_sensitive.payload_size(); + payload_size += self.in_public.payload_size(); + payload_size += self.outside_info.payload_size(); + payload_size += self.creation_pcr.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct CreatePrimaryReply { + pub header: ReplyHeader, + pub object_handle: ReservedHandle, + // Parameter size + param_size: u32_be, + // Parameters + pub out_public: Tpm2bPublic, + creation_data: Tpm2bCreationData, + creation_hash: Tpm2bBuffer, + creation_ticket: TpmtTkCreation, + name: Tpm2bBuffer, + // Authorization area + auth: common::ReplyAuth, + } + + impl TpmCommand for CreatePrimaryCmd { + type Reply = CreatePrimaryReply; + } + + impl TpmReply for CreatePrimaryReply { + type Command = CreatePrimaryCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + let mut cmd = CreatePrimaryReply::new_zeroed(); + cmd.header = header; + return Some(cmd); + } + + start = end; + end += size_of::(); + let object_handle = ReservedHandle::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + let param_size = u32_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let out_public = Tpm2bPublic::deserialize(&bytes[start..])?; + end += out_public.payload_size(); + + start = end; + let creation_data = Tpm2bCreationData::deserialize(&bytes[start..])?; + end += creation_data.payload_size(); + + start = end; + let creation_hash = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += creation_hash.payload_size(); + + start = end; + let creation_ticket = TpmtTkCreation::deserialize(&bytes[start..])?; + end += creation_ticket.payload_size(); + + start = end; + let name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += name.payload_size(); + + start = end; + end += size_of::(); + let auth = common::ReplyAuth::read_from_prefix(&bytes[start..end]) + .ok()? + .0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + object_handle, + param_size, + out_public, + creation_data, + creation_hash, + creation_ticket, + name, + auth, + }) + } + + fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.object_handle); + payload_size += size_of_val(&self.param_size); + payload_size += self.out_public.payload_size(); + payload_size += self.creation_data.payload_size(); + payload_size += self.creation_hash.payload_size(); + payload_size += self.creation_ticket.payload_size(); + payload_size += self.name.payload_size(); + payload_size += size_of_val(&self.auth); + + payload_size + } + } + + // === FlushContext === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct FlushContextCmd { + pub header: CmdHeader, + // Parameter + flush_handle: ReservedHandle, + } + + impl FlushContextCmd { + pub fn new(flush_handle: ReservedHandle) -> Self { + Self { + header: CmdHeader::new::( + SessionTagEnum::NoSessions.into(), + CommandCodeEnum::FlushContext.into(), + ), + flush_handle, + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct FlushContextReply { + pub header: ReplyHeader, + } + + impl TpmCommand for FlushContextCmd { + type Reply = FlushContextReply; + } + + impl TpmReply for FlushContextReply { + type Command = FlushContextCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === EvictControl === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct EvictControlCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + object_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameter + persistent_handle: ReservedHandle, + } + + impl EvictControlCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + object_handle: ReservedHandle, + auth: common::CmdAuth, + persistent_handle: ReservedHandle, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::EvictControl.into()), + auth_handle, + object_handle, + auth_size: (size_of::() as u32).into(), + auth, + persistent_handle, + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct EvictControlReply { + pub header: ReplyHeader, + } + + impl TpmCommand for EvictControlCmd { + type Reply = EvictControlReply; + } + + impl TpmReply for EvictControlReply { + type Command = EvictControlCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: error-to-option (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === ReadPublic === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ReadPublicCmd { + header: CmdHeader, + object_handle: ReservedHandle, + } + + impl ReadPublicCmd { + pub fn new(session: SessionTag, object_handle: ReservedHandle) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::ReadPublic.into()), + object_handle, + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct ReadPublicReply { + pub header: ReplyHeader, + pub out_public: Tpm2bPublic, + name: Tpm2bBuffer, + qualified_name: Tpm2bBuffer, + } + + impl TpmCommand for ReadPublicCmd { + type Reply = ReadPublicReply; + } + + impl TpmReply for ReadPublicReply { + type Command = ReadPublicCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + out_public: Tpm2bPublic::new_zeroed(), + name: Tpm2bBuffer::new_zeroed(), + qualified_name: Tpm2bBuffer::new_zeroed(), + }); + } + + start = end; + let out_public = Tpm2bPublic::deserialize(&bytes[start..])?; + end += out_public.payload_size(); + + start = end; + let name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += name.payload_size(); + + start = end; + let qualified_name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += qualified_name.payload_size(); + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + out_public, + name, + qualified_name, + }) + } + + fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of::(); + payload_size += self.out_public.payload_size(); + payload_size += self.name.payload_size(); + payload_size += self.qualified_name.payload_size(); + + payload_size + } + } + + // === Nv DefineSpace === // + + #[repr(C)] + #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvDefineSpaceCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth_cmd: common::CmdAuth, + // Parameters + auth: Tpm2bBuffer, + public_info: Tpm2bNvPublic, + } + + impl NvDefineSpaceCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth_cmd: common::CmdAuth, + auth: u64, + public_info: TpmsNvPublic, + ) -> Result { + let auth = new_u64_be(auth); + let auth = + Tpm2bBuffer::new(auth.as_bytes()).map_err(TpmProtoError::NvDefineSpaceAuth)?; + let public_info = + Tpm2bNvPublic::new(public_info).map_err(TpmProtoError::NvDefineSpacePublicInfo)?; + + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_DefineSpace.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth_cmd, + auth, + public_info, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + Ok(cmd) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth_cmd.as_bytes()); + buffer.extend_from_slice(&self.auth.serialize()); + buffer.extend_from_slice(&self.public_info.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth_cmd); + payload_size += self.auth.payload_size(); + payload_size += self.public_info.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvDefineSpaceReply { + pub header: ReplyHeader, + } + + impl TpmCommand for NvDefineSpaceCmd { + type Reply = NvDefineSpaceReply; + } + + impl TpmReply for NvDefineSpaceReply { + type Command = NvDefineSpaceCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Nv UndefineSpace === // + + #[repr(C)] + #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvUndefineSpaceCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + nv_index: u32_be, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + } + + impl NvUndefineSpaceCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + nv_index: u32, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_UndefineSpace.into()), + auth_handle, + nv_index: nv_index.into(), + auth_size: (size_of::() as u32).into(), + auth, + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvUndefineSpaceReply { + pub header: ReplyHeader, + } + + impl TpmCommand for NvUndefineSpaceCmd { + type Reply = NvUndefineSpaceReply; + } + + impl TpmReply for NvUndefineSpaceReply { + type Command = NvUndefineSpaceCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Nv ReadPublic === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct NvReadPublicCmd { + header: CmdHeader, + nv_index: u32_be, + } + + impl NvReadPublicCmd { + pub fn new(session: SessionTag, nv_index: u32) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_ReadPublic.into()), + nv_index: nv_index.into(), + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvReadPublicReply { + pub header: ReplyHeader, + // Parameters + pub nv_public: Tpm2bNvPublic, + nv_name: Tpm2bBuffer, + } + + impl TpmCommand for NvReadPublicCmd { + type Reply = NvReadPublicReply; + } + + impl TpmReply for NvReadPublicReply { + type Command = NvReadPublicCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + nv_public: Tpm2bNvPublic::new_zeroed(), + nv_name: Tpm2bBuffer::new_zeroed(), + }); + } + + start = end; + let nv_public = Tpm2bNvPublic::deserialize(&bytes[start..])?; + end += nv_public.payload_size(); + + start = end; + let nv_name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += nv_name.payload_size(); + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + nv_public, + nv_name, + }) + } + + fn payload_size(&self) -> usize { + let mut size = 0; + + size += size_of::(); + size += self.nv_public.payload_size(); + size += self.nv_name.payload_size(); + + size + } + } + + // === Nv Write === // + + #[repr(C)] + #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvWriteCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + pub nv_index: u32_be, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + auth_value: u64_be, + // Parameters + pub data: Tpm2bBuffer, + pub offset: u16_be, + } + + impl NvWriteCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + auth_value: u64, + nv_index: u32, + data: &[u8], + offset: u16, + ) -> Result { + let data = Tpm2bBuffer::new(data).map_err(TpmProtoError::NvWriteData)?; + // If `auth_handle` is not the owner, assuming password-based authorization is used. + let auth_value_size = if auth_handle != TPM20_RH_OWNER { + size_of::() as u32 + } else { + 0 + }; + + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_Write.into()), + auth_handle, + nv_index: nv_index.into(), + auth_size: (size_of::() as u32 + auth_value_size).into(), + auth, + auth_value: auth_value.into(), + data, + offset: offset.into(), + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + Ok(cmd) + } + + pub fn update_write_data(&mut self, data: &[u8], offset: u16) -> Result<(), TpmProtoError> { + let data = Tpm2bBuffer::new(data).map_err(TpmProtoError::NvWriteData)?; + + self.data = data; + self.offset = offset.into(); + self.header.size = new_u32_be(self.payload_size() as u32); + + Ok(()) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.nv_index.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + if self.auth_handle != TPM20_RH_OWNER { + buffer.extend_from_slice(self.auth_value.as_bytes()); + } + buffer.extend_from_slice(&self.data.serialize()); + buffer.extend_from_slice(self.offset.as_bytes()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.nv_index); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + if self.auth_handle != TPM20_RH_OWNER { + payload_size += size_of_val(&self.auth_value); + } + payload_size += self.data.payload_size(); + payload_size += size_of_val(&self.offset); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvWriteReply { + pub header: ReplyHeader, + } + + impl TpmCommand for NvWriteCmd { + type Reply = NvWriteReply; + } + + impl TpmReply for NvWriteReply { + type Command = NvWriteCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Nv Read === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct NvReadCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + pub nv_index: u32_be, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameters + size: u16_be, + pub offset: u16_be, + } + + impl NvReadCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + nv_index: u32, + auth: common::CmdAuth, + size: u16, + offset: u16, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_Read.into()), + auth_handle, + nv_index: nv_index.into(), + auth_size: (size_of::() as u32).into(), + auth, + size: size.into(), + offset: offset.into(), + } + } + + pub fn update_read_parameters(&mut self, size: u16, offset: u16) { + self.size = size.into(); + self.offset = offset.into(); + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let header = CmdHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.command_code != CommandCodeEnum::NV_Read.into() { + return None; + } + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth_handle = ReservedHandle::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let nv_index = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth_size = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Skip authorization area + end += auth_size.get() as usize; + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let size = u16_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let offset = u16_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + Some(Self { + header, + auth_handle, + nv_index, + auth_size, + auth: common::CmdAuth::new(ReservedHandle(0.into()), 0, 0, 0), + size, + offset, + }) + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvReadReply { + pub header: ReplyHeader, + pub parameter_size: u32_be, + // Parameter + pub data: Tpm2bBuffer, + // Authorization area + pub auth: common::ReplyAuth, + } + + impl TpmCommand for NvReadCmd { + type Reply = NvReadReply; + } + + impl TpmReply for NvReadReply { + type Command = NvReadCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + parameter_size: 0.into(), + data: Tpm2bBuffer::new_zeroed(), + auth: common::ReplyAuth::new_zeroed(), + }); + } + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let parameter_size = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let data = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += data.payload_size(); + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth = common::ReplyAuth::read_from_prefix(&bytes[start..end]) + .ok()? + .0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + parameter_size, + data, + auth, + }) + } + + fn payload_size(&self) -> usize { + let mut size = 0; + + size += size_of::(); + size += self.data.payload_size(); + + size + } + } + + // === Import === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ImportCmd { + pub header: CmdHeader, + pub auth_handle: ReservedHandle, + // Authorization area + pub auth_size: u32_be, + pub auth: common::CmdAuth, + // Parameters + // `TPM2B_DATA` + pub encryption_key: Tpm2bBuffer, + // `TPM2B_PUBLIC` + pub object_public: Tpm2bPublic, + // `TPM2B_PRIVATE` + pub duplicate: Tpm2bBuffer, + // `TPM2B_ENCRYPTED_SECRET` + pub in_sym_seed: Tpm2bBuffer, + // `TPMT_SYM_DEF_OBJECT` + pub symmetric_alg: TpmtSymDefObject, + } + + impl ImportCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + encryption_key: &Tpm2bBuffer, + object_public: &Tpm2bPublic, + duplicate: &Tpm2bBuffer, + in_sym_seed: &Tpm2bBuffer, + symmetric_alg: &TpmtSymDefObject, + ) -> Self { + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::Import.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + encryption_key: *encryption_key, + object_public: *object_public, + duplicate: *duplicate, + in_sym_seed: *in_sym_seed, + symmetric_alg: *symmetric_alg, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + cmd + } + + /// Deserialize the command payload assuming no inner wrapping key + pub fn deserialize_no_wrapping_key(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = 0; + + // When there is no inner wrapper for `duplicate`, `encryption_key` + // should be an empty buffer and `symmetric_alg` should be `TPM_ALG_NULL`. + // See Table 42, Section 13.3.2, "Trusted Platform Module Library Part 3: Commands", revision 1.38. + let encryption_key = Tpm2bBuffer::new_zeroed(); + let symmetric_alg = TpmtSymDefObject::new(AlgIdEnum::NULL.into(), None, None); + + let object_public = Tpm2bPublic::deserialize(&bytes[start..])?; + end += object_public.payload_size(); + + start = end; + let duplicate = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += duplicate.payload_size(); + + start = end; + let in_sym_seed = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += in_sym_seed.payload_size(); + + // Handle zero paddings applied to valid payload + if bytes.len() < end { + return None; + } + + Some(Self { + header: CmdHeader::new_zeroed(), + auth_handle: ReservedHandle(0.into()), + auth_size: 0.into(), + auth: common::CmdAuth::new_zeroed(), + encryption_key, + object_public, + duplicate, + in_sym_seed, + symmetric_alg, + }) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + buffer.extend_from_slice(&self.encryption_key.serialize()); + buffer.extend_from_slice(&self.object_public.serialize()); + buffer.extend_from_slice(&self.duplicate.serialize()); + buffer.extend_from_slice(&self.in_sym_seed.serialize()); + buffer.extend_from_slice(&self.symmetric_alg.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + payload_size += self.encryption_key.payload_size(); + payload_size += self.object_public.payload_size(); + payload_size += self.duplicate.payload_size(); + payload_size += self.in_sym_seed.payload_size(); + payload_size += self.symmetric_alg.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct ImportReply { + pub header: ReplyHeader, + pub parameter_size: u32_be, + // Parameter + // `TPM2B_PRIVATE` + pub out_private: Tpm2bBuffer, + // Authorization area + pub auth: common::ReplyAuth, + } + + impl TpmCommand for ImportCmd { + type Reply = ImportReply; + } + + impl TpmReply for ImportReply { + type Command = ImportCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + parameter_size: 0.into(), + out_private: Tpm2bBuffer::new_zeroed(), + auth: common::ReplyAuth::new_zeroed(), + }); + } + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let parameter_size = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + let expected_auth_start = end + parameter_size.get() as usize; + + start = end; + let out_private = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += out_private.payload_size(); + + start = end; + if start != expected_auth_start { + return None; + } + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth = common::ReplyAuth::read_from_prefix(&bytes[start..end]) + .ok()? + .0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + parameter_size, + out_private, + auth, + }) + } + + fn payload_size(&self) -> usize { + let mut size = 0; + + size += size_of::(); + size += self.out_private.payload_size(); + + size + } + } + + // === Load === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct LoadCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameters + // `TPM2B_PRIVATE` + in_private: Tpm2bBuffer, + // `TPM2B_PUBLIC` + in_public: Tpm2bPublic, + } + + impl LoadCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + in_private: &Tpm2bBuffer, + in_public: &Tpm2bPublic, + ) -> Self { + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::Load.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + in_private: *in_private, + in_public: *in_public, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + cmd + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + buffer.extend_from_slice(&self.in_private.serialize()); + buffer.extend_from_slice(&self.in_public.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + payload_size += self.in_private.payload_size(); + payload_size += self.in_public.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct LoadReply { + pub header: ReplyHeader, + pub object_handle: ReservedHandle, + pub parameter_size: u32_be, + // Parameter + // `TPM2B_NAME` + pub name: Tpm2bBuffer, + // Authorization area + pub auth: common::ReplyAuth, + } + + impl TpmCommand for LoadCmd { + type Reply = LoadReply; + } + + impl TpmReply for LoadReply { + type Command = LoadCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + object_handle: ReservedHandle::new_zeroed(), + parameter_size: 0.into(), + name: Tpm2bBuffer::new_zeroed(), + auth: common::ReplyAuth::new_zeroed(), + }); + } + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let object_handle = ReservedHandle::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let parameter_size = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + let expected_auth_start = end + parameter_size.get() as usize; + + start = end; + let name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += name.payload_size(); + + start = end; + if start != expected_auth_start { + return None; + } + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth = common::ReplyAuth::read_from_prefix(&bytes[start..end]) + .ok()? + .0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + object_handle, + parameter_size, + name, + auth, + }) + } + + fn payload_size(&self) -> usize { + let mut size = 0; + + size += size_of::(); + size += size_of::(); + size += self.name.payload_size(); + + size + } + } +} + +#[cfg(test)] +mod tests { + use super::protocol::common::*; + use super::protocol::*; + use super::*; + + #[test] + fn test_create_primary() { + const AK_PUB_EXPECTED_CMD: [u8; 321] = [ + 0x80, 0x02, 0x00, 0x00, 0x01, 0x41, 0x00, 0x00, 0x01, 0x31, 0x40, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x09, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x18, 0x00, 0x01, 0x00, 0x0b, 0x00, 0x05, 0x04, + 0x72, 0x00, 0x00, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0b, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + + const AK_PUB_REPLY_SUCCEED: [u8; 488] = [ + 0x80, 0x02, 0x00, 0x00, 0x01, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0xd1, 0x01, 0x18, 0x00, 0x01, 0x00, 0x0b, 0x00, 0x05, 0x04, 0x72, + 0x00, 0x00, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0xc8, 0x38, 0xd1, 0x52, 0x00, 0x00, 0xe9, 0x3c, 0x89, 0x4c, 0x52, 0xfb, + 0x79, 0x7b, 0xc4, 0x14, 0x28, 0x5f, 0xaa, 0x50, 0x78, 0x9a, 0x31, 0x2b, 0x4d, 0xfe, + 0xad, 0xad, 0x97, 0x28, 0x49, 0xb2, 0x39, 0x77, 0x5e, 0x06, 0x49, 0xb7, 0x93, 0xf5, + 0x2f, 0x84, 0x85, 0x2e, 0x17, 0x87, 0x52, 0x96, 0x36, 0x74, 0x76, 0x21, 0x5f, 0xc2, + 0x90, 0x81, 0xf7, 0xe9, 0xd8, 0xac, 0x07, 0x60, 0xaf, 0x83, 0xa2, 0x08, 0xda, 0x94, + 0x77, 0x2c, 0x73, 0x9c, 0xd4, 0x80, 0x47, 0x43, 0xa6, 0x4e, 0x36, 0xc3, 0x7e, 0xe2, + 0x9c, 0xfb, 0xf1, 0x7e, 0x36, 0x8e, 0x7a, 0x86, 0xde, 0x3d, 0x4e, 0x8a, 0x3a, 0xce, + 0x7a, 0xa1, 0x58, 0xf6, 0xdb, 0x49, 0x3e, 0xc2, 0x2e, 0xcb, 0x4a, 0xbc, 0x19, 0x81, + 0xd5, 0x5d, 0x4f, 0x57, 0x39, 0xf5, 0x9e, 0x02, 0x56, 0x91, 0x37, 0xc2, 0x87, 0x96, + 0x26, 0xd8, 0x4a, 0x45, 0x16, 0x01, 0xe0, 0x2e, 0x20, 0x95, 0x75, 0xb8, 0x20, 0x6d, + 0x83, 0x54, 0x65, 0x3d, 0x66, 0xf4, 0x8a, 0x43, 0x84, 0x9f, 0xa6, 0xc5, 0x2c, 0x08, + 0xe7, 0x59, 0x8e, 0x1f, 0x6d, 0xea, 0x32, 0x5b, 0x36, 0x8e, 0xd1, 0xf3, 0x09, 0x60, + 0x86, 0xdb, 0x55, 0xc9, 0xf0, 0xf9, 0x79, 0x87, 0x71, 0x1c, 0x7c, 0x98, 0xa4, 0xc8, + 0x91, 0x77, 0xa7, 0x95, 0x82, 0x19, 0xcc, 0x9d, 0xde, 0x4d, 0x7b, 0xf7, 0xc1, 0x31, + 0x5b, 0xae, 0x45, 0x6e, 0x6b, 0xf1, 0xaf, 0x89, 0x07, 0x91, 0x80, 0x9d, 0xe5, 0x49, + 0xfc, 0x5e, 0xb2, 0x15, 0x67, 0xcf, 0x05, 0xbb, 0xb3, 0x98, 0x54, 0x34, 0x45, 0x2c, + 0xc3, 0x3d, 0x09, 0x8e, 0x8d, 0x60, 0xba, 0x67, 0xd9, 0xbe, 0x1c, 0x2a, 0x2c, 0x2a, + 0xfa, 0xed, 0x26, 0x81, 0x96, 0x48, 0x17, 0xb3, 0xa6, 0x90, 0x9a, 0x78, 0xa5, 0xac, + 0x80, 0xb2, 0xbe, 0xff, 0x3d, 0x35, 0x00, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, + 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, + 0x78, 0x52, 0xb8, 0x55, 0x01, 0x00, 0x10, 0x00, 0x04, 0x40, 0x00, 0x00, 0x0b, 0x00, + 0x04, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x20, 0x28, 0xd0, 0x26, 0xfa, 0xfd, + 0x74, 0x91, 0x06, 0x74, 0x3e, 0x27, 0xc4, 0x28, 0x05, 0x51, 0x58, 0x5e, 0x5d, 0x17, + 0x66, 0x8e, 0xb5, 0x21, 0x83, 0x5e, 0xd6, 0x01, 0x27, 0xef, 0xfc, 0x05, 0xd4, 0x80, + 0x21, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x30, 0xfb, 0xfe, 0xd4, 0xe7, 0x9f, 0xc5, 0x2f, + 0xfd, 0x7c, 0xe0, 0x4a, 0x97, 0xb5, 0xec, 0x61, 0x59, 0x4d, 0x43, 0x19, 0x29, 0xc0, + 0x4f, 0xef, 0xda, 0xdc, 0xe1, 0x48, 0x4d, 0xbd, 0x3d, 0x47, 0x0e, 0xe3, 0x2f, 0xd4, + 0xf9, 0x57, 0x4f, 0x77, 0x0f, 0x58, 0x5c, 0x73, 0x58, 0xc2, 0x2d, 0xd7, 0x4a, 0x00, + 0x22, 0x00, 0x0b, 0x92, 0x57, 0x64, 0x38, 0x21, 0xf9, 0x68, 0xe9, 0xfc, 0x47, 0xfa, + 0xbf, 0x9c, 0x56, 0x49, 0x7a, 0x63, 0xc2, 0xc0, 0x8a, 0x12, 0x80, 0x49, 0x73, 0xc3, + 0x8b, 0x00, 0x06, 0x99, 0xe9, 0xfc, 0x22, 0x00, 0x00, 0x01, 0x00, 0x00, + ]; + + const EK_PUB_EXPECTED_CMD: [u8; 355] = [ + 0x80, 0x02, 0x00, 0x00, 0x01, 0x63, 0x00, 0x00, 0x01, 0x31, 0x40, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x09, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x00, 0x01, 0x00, 0x0b, 0x00, 0x03, 0x00, + 0xb2, 0x00, 0x20, 0x83, 0x71, 0x97, 0x67, 0x44, 0x84, 0xb3, 0xf8, 0x1a, 0x90, 0xcc, + 0x8d, 0x46, 0xa5, 0xd7, 0x24, 0xfd, 0x52, 0xd7, 0x6e, 0x06, 0x52, 0x0b, 0x64, 0xf2, + 0xa1, 0xda, 0x1b, 0x33, 0x14, 0x69, 0xaa, 0x00, 0x06, 0x00, 0x80, 0x00, 0x43, 0x00, + 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + + const EK_PUB_REPLY_SUCCEED: [u8; 522] = [ + 0x80, 0x02, 0x00, 0x00, 0x02, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0xf3, 0x01, 0x3a, 0x00, 0x01, 0x00, 0x0b, 0x00, 0x03, 0x00, 0xb2, + 0x00, 0x20, 0x83, 0x71, 0x97, 0x67, 0x44, 0x84, 0xb3, 0xf8, 0x1a, 0x90, 0xcc, 0x8d, + 0x46, 0xa5, 0xd7, 0x24, 0xfd, 0x52, 0xd7, 0x6e, 0x06, 0x52, 0x0b, 0x64, 0xf2, 0xa1, + 0xda, 0x1b, 0x33, 0x14, 0x69, 0xaa, 0x00, 0x06, 0x00, 0x80, 0x00, 0x43, 0x00, 0x10, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x9e, 0x9c, 0x1b, 0x40, 0x00, 0x00, + 0xea, 0x2f, 0xd5, 0xd7, 0xde, 0x9b, 0x18, 0x83, 0x55, 0x00, 0x09, 0x53, 0x13, 0xa8, + 0x88, 0x10, 0x24, 0x46, 0x44, 0xa8, 0x2d, 0x62, 0xd3, 0x24, 0xe5, 0xf9, 0xcd, 0xca, + 0x61, 0xb7, 0xd8, 0x15, 0x98, 0xf8, 0x56, 0x64, 0x14, 0x7b, 0x40, 0x5a, 0x47, 0xbd, + 0xd1, 0xc8, 0x7d, 0x1f, 0x93, 0x72, 0x3f, 0x03, 0xe0, 0x29, 0x38, 0x08, 0x03, 0xae, + 0x62, 0x13, 0x10, 0xf5, 0x88, 0x5f, 0x86, 0x84, 0x82, 0xfb, 0xda, 0xd8, 0x78, 0xfd, + 0x02, 0x9e, 0x88, 0x5c, 0xaf, 0x30, 0xd4, 0x3d, 0x41, 0xb2, 0xb7, 0x7a, 0x36, 0xa5, + 0x95, 0x37, 0x08, 0x44, 0x20, 0x10, 0xb3, 0x6c, 0xd0, 0x6d, 0xe9, 0xab, 0xce, 0x35, + 0xc0, 0x82, 0x52, 0x06, 0x41, 0x4c, 0xc5, 0x48, 0x5b, 0xe6, 0x22, 0x00, 0x7e, 0x1d, + 0x4b, 0x68, 0x80, 0x34, 0xe9, 0xea, 0x6e, 0xf9, 0xf7, 0xf7, 0x84, 0xbe, 0x56, 0xdf, + 0xea, 0x85, 0x97, 0x1b, 0x03, 0x5c, 0x5c, 0x9f, 0xf4, 0x72, 0xef, 0xe7, 0xfe, 0x5e, + 0x73, 0x2f, 0xf1, 0xdd, 0x40, 0x80, 0x16, 0x8d, 0x1b, 0x95, 0xee, 0xec, 0x21, 0x1c, + 0x30, 0x84, 0x25, 0x08, 0x8d, 0x0e, 0xda, 0x5b, 0x00, 0x9c, 0x49, 0x8b, 0xc8, 0xb3, + 0x48, 0x9a, 0xc9, 0x19, 0x0f, 0x68, 0xc7, 0x0a, 0x7a, 0x65, 0x35, 0xa0, 0x09, 0x23, + 0x88, 0x3f, 0x97, 0x53, 0x4e, 0xbc, 0x08, 0xc0, 0x5b, 0x69, 0x94, 0xcc, 0xd9, 0xb9, + 0xea, 0x8c, 0x20, 0x9e, 0x1a, 0xf9, 0x57, 0x08, 0x1a, 0xe0, 0x2d, 0x88, 0x56, 0x1f, + 0x9f, 0x50, 0x2e, 0x12, 0xf2, 0x69, 0x9a, 0xdf, 0x30, 0x56, 0xc1, 0xf0, 0x31, 0xef, + 0x64, 0xd5, 0x34, 0x02, 0x15, 0xf4, 0xd7, 0x7b, 0x76, 0xd9, 0x99, 0x24, 0x83, 0x99, + 0xa5, 0x05, 0xc1, 0xcd, 0xa6, 0xbd, 0xc3, 0x3d, 0x7c, 0x1e, 0x94, 0xdd, 0x00, 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, + 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, 0x01, 0x00, 0x10, 0x00, + 0x04, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x04, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, + 0x20, 0x28, 0xd0, 0x26, 0xfa, 0xfd, 0x74, 0x91, 0x06, 0x74, 0x3e, 0x27, 0xc4, 0x28, + 0x05, 0x51, 0x58, 0x5e, 0x5d, 0x17, 0x66, 0x8e, 0xb5, 0x21, 0x83, 0x5e, 0xd6, 0x01, + 0x27, 0xef, 0xfc, 0x05, 0xd4, 0x80, 0x21, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x30, 0xe2, + 0xf2, 0x64, 0xc3, 0xd7, 0x9e, 0xc1, 0x07, 0xbb, 0x49, 0x74, 0x67, 0xd3, 0xc7, 0xf6, + 0xb7, 0x8c, 0xe3, 0x2e, 0x28, 0x36, 0xa6, 0x1f, 0x6f, 0x0b, 0xbd, 0xe3, 0x8e, 0x77, + 0xa1, 0x8c, 0x50, 0xe4, 0xaa, 0xa4, 0x01, 0x61, 0xb4, 0x7a, 0x4a, 0x3b, 0x5d, 0xac, + 0xe1, 0xd1, 0x65, 0x69, 0x1e, 0x00, 0x22, 0x00, 0x0b, 0xe5, 0x6f, 0x0f, 0xae, 0x8d, + 0x0f, 0x91, 0xb9, 0x84, 0x17, 0xc3, 0x86, 0x13, 0xa6, 0x12, 0xbe, 0xec, 0x85, 0xf9, + 0x0b, 0xd3, 0xfe, 0x4f, 0x3d, 0x79, 0x7d, 0x6d, 0x3c, 0xc5, 0xcc, 0xb1, 0x5b, 0x00, + 0x00, 0x01, 0x00, 0x00, + ]; + + const REPLY_FAIL: [u8; 10] = [0x80, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x02, 0xda]; + + // Create AK pub + let symmetric = TpmtSymDefObject::new(AlgIdEnum::NULL.into(), None, None); + let scheme = TpmtRsaScheme::new(AlgIdEnum::RSASSA.into(), Some(AlgIdEnum::SHA256.into())); + let rsa_params = TpmsRsaParams::new(symmetric, scheme, 2048, 0); + + let object_attributes = TpmaObjectBits::new() + .with_fixed_tpm(true) + .with_fixed_parent(true) + .with_sensitive_data_origin(true) + .with_user_with_auth(true) + .with_no_da(true) + .with_restricted(true) + .with_sign_encrypt(true); + + let result = TpmtPublic::new( + AlgIdEnum::RSA.into(), + AlgIdEnum::SHA256.into(), + object_attributes, + &[], + rsa_params, + &[0u8; 256], + ); + assert!(result.is_ok()); + let in_public = result.unwrap(); + + let result = CreatePrimaryCmd::new( + SessionTagEnum::Sessions.into(), + TPM20_RH_ENDORSEMENT, + CmdAuth::new(TPM20_RS_PW, 0, 0, 0), + &[], + &[], + in_public, + &[], + &[], + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + + assert_eq!(bytes, AK_PUB_EXPECTED_CMD); + + let mut reply = [0u8; 4096]; + reply[..AK_PUB_REPLY_SUCCEED.len()].copy_from_slice(&AK_PUB_REPLY_SUCCEED); + + let response = CreatePrimaryReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + assert_eq!(response.object_handle.0.get(), 0x80000000); + + reply[..REPLY_FAIL.len()].copy_from_slice(&REPLY_FAIL); + + let response = CreatePrimaryReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x2da); + + // Create EK pub + const AUTH_POLICY_A_SHA_256: [u8; 32] = [ + 0x83, 0x71, 0x97, 0x67, 0x44, 0x84, 0xB3, 0xF8, 0x1A, 0x90, 0xCC, 0x8D, 0x46, 0xA5, + 0xD7, 0x24, 0xFD, 0x52, 0xD7, 0x6E, 0x06, 0x52, 0x0B, 0x64, 0xF2, 0xA1, 0xDA, 0x1B, + 0x33, 0x14, 0x69, 0xAA, + ]; + let symmetric = TpmtSymDefObject::new( + AlgIdEnum::AES.into(), + Some(128), + Some(AlgIdEnum::CFB.into()), + ); + let scheme = TpmtRsaScheme::new(AlgIdEnum::NULL.into(), None); + let rsa_params = TpmsRsaParams::new(symmetric, scheme, 2048, 0); + + let object_attributes = TpmaObjectBits::new() + .with_fixed_tpm(true) + .with_fixed_parent(true) + .with_sensitive_data_origin(true) + .with_admin_with_policy(true) + .with_restricted(true) + .with_decrypt(true); + + let result = TpmtPublic::new( + AlgIdEnum::RSA.into(), + AlgIdEnum::SHA256.into(), + object_attributes, + &AUTH_POLICY_A_SHA_256, + rsa_params, + &[0u8; 256], + ); + assert!(result.is_ok()); + let in_public = result.unwrap(); + + let result = CreatePrimaryCmd::new( + SessionTagEnum::Sessions.into(), + TPM20_RH_ENDORSEMENT, + CmdAuth::new(TPM20_RS_PW, 0, 0, 0), + &[], + &[], + in_public, + &[], + &[], + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + + assert_eq!(bytes, EK_PUB_EXPECTED_CMD); + + reply[..EK_PUB_REPLY_SUCCEED.len()].copy_from_slice(&EK_PUB_REPLY_SUCCEED); + + let response = CreatePrimaryReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + assert_eq!(response.object_handle.0.get(), 0x80000000); + } + + #[test] + fn test_read_public() { + const REPLY_SUCCEED: [u8; 364] = [ + 0x80, 0x01, 0x00, 0x00, 0x01, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x01, 0x18, 0x00, 0x01, + 0x00, 0x0b, 0x00, 0x05, 0x04, 0x72, 0x00, 0x00, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0b, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xa6, 0xaf, 0x71, 0xec, 0x00, 0x00, + 0xe0, 0x69, 0xa5, 0xc5, 0xcd, 0x94, 0x59, 0x3b, 0x79, 0xe6, 0xee, 0x14, 0xd3, 0x50, + 0xfb, 0x0b, 0xa9, 0x03, 0x51, 0xbf, 0x23, 0xc5, 0x15, 0xdc, 0xbc, 0x4a, 0x3b, 0xaa, + 0xef, 0x12, 0x3c, 0x24, 0x47, 0xf2, 0x81, 0xf6, 0x85, 0xf4, 0x8c, 0x16, 0x14, 0x10, + 0x3c, 0x3b, 0x2e, 0x7b, 0x04, 0x5e, 0x25, 0x66, 0xcd, 0x8d, 0x86, 0x0b, 0x8c, 0x2b, + 0x5f, 0xca, 0x36, 0x1d, 0x5f, 0xff, 0xbf, 0x70, 0x63, 0x79, 0x5b, 0x7f, 0x93, 0x94, + 0x6d, 0xbd, 0x6e, 0x4f, 0x22, 0x94, 0x93, 0x87, 0xe1, 0x63, 0x4d, 0xa4, 0x9a, 0x2f, + 0xad, 0x90, 0x4c, 0xc9, 0x37, 0x14, 0x59, 0xd3, 0x03, 0x6d, 0x37, 0x98, 0xd4, 0x85, + 0x19, 0x9b, 0x93, 0x7e, 0x61, 0x93, 0x6d, 0x1c, 0xe0, 0xe6, 0x72, 0x71, 0x81, 0x45, + 0xe0, 0xea, 0x5f, 0xb4, 0x6a, 0x9a, 0x3e, 0x86, 0x60, 0x86, 0xaf, 0xfc, 0x86, 0x0f, + 0x0d, 0xe8, 0x81, 0x46, 0x59, 0xad, 0xeb, 0x6f, 0xef, 0x38, 0x5e, 0x53, 0xea, 0x91, + 0xcb, 0xa9, 0xf8, 0x31, 0xcd, 0x52, 0x85, 0x55, 0xa8, 0x91, 0x68, 0xd8, 0xdd, 0x20, + 0x67, 0x21, 0x30, 0x03, 0xcd, 0x48, 0x3b, 0xb0, 0x33, 0x16, 0xb4, 0xf0, 0x06, 0x55, + 0xdf, 0x15, 0xd2, 0x65, 0x55, 0x2f, 0xec, 0xec, 0xc5, 0x74, 0xea, 0xd8, 0x0f, 0x29, + 0xac, 0x24, 0x38, 0x32, 0x34, 0x1f, 0xb3, 0x20, 0x28, 0xf6, 0x55, 0xfb, 0x51, 0xf1, + 0x22, 0xa3, 0x5e, 0x38, 0xc6, 0xa5, 0xa4, 0xe0, 0xc2, 0xa3, 0x50, 0x27, 0xf6, 0x1d, + 0x55, 0x8e, 0x95, 0xe9, 0x95, 0x26, 0x8e, 0x70, 0x35, 0x7b, 0x73, 0xbb, 0x8e, 0xf2, + 0xdc, 0x37, 0x30, 0x99, 0x20, 0x2e, 0x1f, 0x09, 0xbd, 0x85, 0x24, 0x44, 0x05, 0x8f, + 0x11, 0xc4, 0xb5, 0x71, 0xc1, 0x2e, 0x52, 0xf6, 0x2e, 0x6f, 0x9a, 0x11, 0x00, 0x22, + 0x00, 0x0b, 0x61, 0xca, 0x8b, 0xec, 0x0f, 0x9e, 0xc1, 0x38, 0x35, 0xd3, 0x43, 0x58, + 0x77, 0xdf, 0x53, 0x82, 0xe7, 0xb2, 0xff, 0x7b, 0xe4, 0x6c, 0xfb, 0x34, 0xa4, 0x28, + 0xdd, 0xda, 0xcb, 0xe9, 0x50, 0x50, 0x00, 0x22, 0x00, 0x0b, 0x51, 0xfa, 0x43, 0xbd, + 0x35, 0x01, 0xd6, 0x66, 0xa0, 0x4d, 0xc8, 0x03, 0x4f, 0xa1, 0x64, 0xa0, 0x91, 0x63, + 0x3c, 0x27, 0xd5, 0x90, 0xa3, 0x7a, 0xae, 0xbc, 0x52, 0xcc, 0x4e, 0x9a, 0xa3, 0x66, + ]; + + const REPLY_FAIL: [u8; 10] = [0x80, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x01, 0x8b]; + + let mut reply = [0u8; 4096]; + reply[..REPLY_SUCCEED.len()].copy_from_slice(&REPLY_SUCCEED); + + let response: Option = ReadPublicReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + + reply[..REPLY_FAIL.len()].copy_from_slice(&REPLY_FAIL); + + let response = ReadPublicReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x18b); + } + + #[test] + fn test_nv_read_public() { + const REPLY_SUCCEED: [u8; 62] = [ + 0x80, 0x01, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x01, 0x40, + 0x00, 0x01, 0x00, 0x0b, 0x42, 0x06, 0x00, 0x04, 0x00, 0x00, 0x10, 0x00, 0x00, 0x22, + 0x00, 0x0b, 0xc1, 0x0f, 0x8d, 0x61, 0x77, 0xea, 0xd0, 0x29, 0x52, 0xa6, 0x2d, 0x3a, + 0x39, 0xc7, 0x22, 0x0b, 0xb9, 0xa1, 0xe1, 0xfe, 0x08, 0x68, 0xa8, 0x6f, 0x5f, 0x10, + 0xd6, 0x86, 0x83, 0x28, 0x79, 0x3e, + ]; + + const REPLY_FAIL: [u8; 10] = [0x80, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x01, 0x8b]; + + let mut reply = [0u8; 4096]; + reply[..REPLY_SUCCEED.len()].copy_from_slice(&REPLY_SUCCEED); + + let response = NvReadPublicReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + + reply[..REPLY_FAIL.len()].copy_from_slice(&REPLY_FAIL); + + let response = NvReadPublicReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x18b); + } + + #[test] + fn test_define_space() { + const EXPECTED_CMD: [u8; 53] = [ + 0x80, 0x02, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x01, 0x2a, 0x40, 0x00, 0x00, 0x0c, + 0x00, 0x00, 0x00, 0x09, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x08, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00, 0x00, 0x0e, 0x01, 0xc1, 0x01, + 0xd0, 0x00, 0x0b, 0x42, 0x06, 0x00, 0x04, 0x00, 0x00, 0x10, 0x00, + ]; + + let auth_value: u64 = 0x7766554433221100; + + let attributes = TpmaNvBits::new() + .with_nv_authread(true) + .with_nv_authwrite(true) + .with_nv_ownerread(true) + .with_nv_platformcreate(true) + .with_nv_no_da(true); + + let result = TpmsNvPublic::new(0x1c101d0, AlgIdEnum::SHA256.into(), attributes, &[], 4096); + assert!(result.is_ok()); + let nv_public = result.unwrap(); + + let result = NvDefineSpaceCmd::new( + SessionTagEnum::Sessions.into(), + TPM20_RH_PLATFORM, + CmdAuth::new(TPM20_RS_PW, 0, 0, 0), + auth_value, + nv_public, + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + assert_eq!(bytes, EXPECTED_CMD); + } + + #[test] + fn test_nv_write_authwrite() { + const EXPECTED_CMD: [u8; 171] = [ + 0x80, 0x02, 0x00, 0x00, 0x00, 0xab, 0x00, 0x00, 0x01, 0x37, 0x01, 0xc1, 0x01, 0xd0, + 0x01, 0xc1, 0x01, 0xd0, 0x00, 0x00, 0x00, 0x11, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00, 0x00, 0x80, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, + ]; + let auth_value: u64 = 0x7766554433221100; + + let result = NvWriteCmd::new( + SessionTagEnum::Sessions.into(), + ReservedHandle(0x1c101d0.into()), + CmdAuth::new(TPM20_RS_PW, 0, 0, size_of_val(&auth_value) as u16), + auth_value, + 0x1c101d0, + &[1u8; 128], + 0, + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + assert_eq!(bytes, EXPECTED_CMD); + } + + #[test] + fn test_nv_write_ownerwrite() { + const EXPECTED_CMD: [u8; 163] = [ + 0x80, 0x02, 0x00, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x01, 0x37, 0x40, 0x00, 0x00, 0x01, + 0x01, 0xc1, 0x01, 0xd0, 0x00, 0x00, 0x00, 0x09, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, + ]; + + let result = NvWriteCmd::new( + SessionTagEnum::Sessions.into(), + TPM20_RH_OWNER, + CmdAuth::new(TPM20_RS_PW, 0, 0, 0), + 0, + 0x1c101d0, + &[1u8; 128], + 0, + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + assert_eq!(bytes, EXPECTED_CMD); + } + + #[test] + fn test_nv_read() { + const REPLY_SUCCEED: [u8; 85] = [ + 0x80, 0x02, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, + 0x00, 0x40, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, + 0xdd, 0xee, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, + ]; + + const EXPECTED_DATA: [u8; 64] = [ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, + 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + + let mut reply = [0u8; 4096]; + reply[..REPLY_SUCCEED.len()].copy_from_slice(&REPLY_SUCCEED); + + let response = NvReadReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + assert_eq!(response.data.buffer[..EXPECTED_DATA.len()], EXPECTED_DATA); + } +} diff --git a/opentmk/src/main.rs b/opentmk/src/main.rs new file mode 100644 index 0000000000..ad75941c38 --- /dev/null +++ b/opentmk/src/main.rs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// UNSAFETY: This crate contains unsafe code to perform low-level operations such as managing memory, handling interrupts, and invoking hypercalls. +#![expect(unsafe_code)] +#![cfg_attr(nightly, feature(abi_x86_interrupt))] +#![doc = include_str!("../README.md")] +#![cfg_attr(all(not(test), target_os = "uefi"), no_main)] +#![cfg_attr(all(not(test), target_os = "uefi"), no_std)] + +// Actual entrypoint is `uefi::uefi_main`, via the `#[entry]` macro +#[cfg(any(test, not(target_os = "uefi")))] +fn main() {} + +#[macro_use] +extern crate alloc; + +pub mod arch; +pub mod context; +pub mod devices; +pub mod platform; +pub mod tests; +pub mod tmk_assert; +pub mod tmk_logger; +pub mod tmkdefs; +#[cfg(target_os = "uefi")] +mod uefi; diff --git a/opentmk/src/platform/hyperv/arch/aarch64/ctx.rs b/opentmk/src/platform/hyperv/arch/aarch64/ctx.rs new file mode 100644 index 0000000000..090866d0ca --- /dev/null +++ b/opentmk/src/platform/hyperv/arch/aarch64/ctx.rs @@ -0,0 +1,163 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Platform-specific context implementations for AArch64 Hyper-V. +//! + +use core::ops::Range; + +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecToken; +use crate::context::VtlPlatformTrait; +use crate::platform::hyperv::ctx::HvTestCtx; +use crate::platform::hyperv::ctx::vtl_transform; +use crate::tmkdefs::TmkError; +use crate::tmkdefs::TmkResult; +use hvdef::AlignedU128; +use hvdef::HvRegisterValue; +use hvdef::Vtl; +use hvdef::hypercall::HvInputVtl; +use hvdef::hypercall::InitialVpContextArm64; +use memory_range::MemoryRange; + +impl VirtualProcessorPlatformTrait for HvTestCtx { + /// Fetch the content of the specified architectural register from + /// the current VTL for the executing VP. + fn get_register(&mut self, reg: u32) -> TmkResult { + let reg = hvdef::HvArm64RegisterName(reg); + let val = self.hvcall.get_register(reg.into(), None)?.as_u128(); + Ok(val) + } + + /// Set the architecture specific register identified by `reg`. + fn set_register(&mut self, reg: u32, val: u128) -> TmkResult<()> { + let reg = hvdef::HvArm64RegisterName(reg); + let value = HvRegisterValue::from(val); + self.hvcall.set_register(reg.into(), value, None)?; + Ok(()) + } + + fn get_vp_count(&self) -> TmkResult { + unimplemented!(); + } + + fn queue_command_vp(&mut self, _cmd: VpExecToken) -> TmkResult<()> { + unimplemented!(); + } + + fn start_on_vp(&mut self, _cmd: VpExecToken) -> TmkResult<()> { + unimplemented!(); + } + + /// Start the given VP in the current VTL using a freshly captured + /// context. + fn start_running_vp_with_default_context( + &mut self, + cmd: VpExecToken, + ) -> TmkResult<()> { + let (vp_index, vtl, _cmd) = cmd.get(); + let vp_ctx = self.get_default_context(vtl)?; + self.hvcall + .start_virtual_processor(vp_index, vtl, Some(vp_ctx))?; + Ok(()) + } + + /// Return the index of the VP that is currently executing this code. + fn get_current_vp(&self) -> TmkResult { + Ok(self.my_vp_idx) + } + + fn set_register_vtl(&mut self, reg: u32, value: u128, vtl: Vtl) -> TmkResult<()> { + let reg = hvdef::HvArm64RegisterName(reg); + let value = HvRegisterValue::from(value); + self.hvcall + .set_register(reg.into(), value, Some(vtl_transform(vtl)))?; + Ok(()) + } + + fn get_register_vtl(&mut self, reg: u32, vtl: Vtl) -> TmkResult { + let reg = hvdef::HvArm64RegisterName(reg); + let val = self + .hvcall + .get_register(reg.into(), Some(vtl_transform(vtl)))? + .as_u128(); + Ok(val) + } +} + +impl VtlPlatformTrait for HvTestCtx { + /// Apply VTL protections to the supplied GPA range so that only the + /// provided VTL can access it. + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()> { + self.hvcall + .apply_vtl_protections(MemoryRange::new(range), vtl)?; + Ok(()) + } + + /// Enable the specified VTL on a VP and seed it with a default + /// context captured from the current execution environment. + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { + let vp_ctx = self.get_default_context(vtl)?; + self.hvcall.enable_vp_vtl(vp_index, vtl, Some(vp_ctx))?; + Ok(()) + } + + /// Return the VTL in which the current code is running. + fn get_current_vtl(&self) -> TmkResult { + Ok(self.my_vtl) + } + + /// Enable VTL support for the entire partition. + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()> { + self.hvcall + .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl)?; + log::info!("enabled {:?} for the partition.", vtl); + Ok(()) + } + + /// Turn on VTL protections for the currently running VTL. + fn setup_vtl_protection(&mut self) -> TmkResult<()> { + self.hvcall.enable_vtl_protection(HvInputVtl::CURRENT_VTL)?; + log::info!("enabled vtl protections for the partition."); + Ok(()) + } + + /// Switch execution from the current (low) VTL to the next higher + /// one (`vtl_call`). + fn switch_to_high_vtl(&mut self) {} + + /// Return from a high VTL back to the low VTL (`vtl_return`). + fn switch_to_low_vtl(&mut self) {} + + fn set_vp_register_with_vtl( + &mut self, + register_index: u32, + value: u64, + vtl: Vtl, + ) -> TmkResult<()> { + let vtl = vtl_transform(vtl); + let value = AlignedU128::from(value); + let reg_value = HvRegisterValue(value); + self.hvcall + .set_register(hvdef::HvRegisterName(register_index), reg_value, Some(vtl)) + .map_err(|e| e.into()) + } + + fn get_vp_register_with_vtl(&mut self, register_index: u32, vtl: Vtl) -> TmkResult { + let vtl = vtl_transform(vtl); + self.hvcall + .get_register(hvdef::HvRegisterName(register_index), Some(vtl)) + .map(|v| v.as_u64()) + .map_err(|e| e.into()) + } +} + +impl HvTestCtx { + fn get_default_context(&mut self, _vtl: Vtl) -> Result { + unimplemented!("aarch64 not implemented"); + } + + pub(crate) fn get_vp_idx() -> u32 { + unimplemented!() + } +} diff --git a/opentmk/src/platform/hyperv/arch/aarch64/hypercall.rs b/opentmk/src/platform/hyperv/arch/aarch64/hypercall.rs new file mode 100644 index 0000000000..f0b532e8e4 --- /dev/null +++ b/opentmk/src/platform/hyperv/arch/aarch64/hypercall.rs @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use hvdef::Vtl; +use hvdef::hypercall::InitialVpContextArm64; +use zerocopy::IntoBytes; + +use crate::platform::hyperv::arch::hypercall::HvCall; + +impl HvCall { + /// Starts a virtual processor (VP) with the specified VTL and context on aarch64. + pub fn start_virtual_processor( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::StartVirtualProcessorArm64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + target_vtl: target_vtl.into(), + vp_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + rsvd0: 0u8, + rsvd1: 0u16, + }; + + header + .write_to_prefix(self.input_page().buffer.as_mut_slice()) + .expect("size of start_virtual_processor header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallStartVirtualProcessor, None); + output.result() + } + + /// Enables a VTL for a specific virtual processor (VP) on aarch64. + pub fn enable_vp_vtl( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::EnableVpVtlArm64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + // The VTL value here is just a u8 and not the otherwise usual + // HvInputVtl value. + target_vtl: target_vtl.into(), + reserved: [0; 3], + vp_vtl_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + }; + + _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None); + output.result() + } + + /// Placeholder for VTL call on aarch64. + pub fn vtl_call() { + unimplemented!(); + } + + /// Placeholder for VTL return on aarch64. + pub fn vtl_return() { + unimplemented!(); + } +} diff --git a/opentmk/src/platform/hyperv/arch/aarch64/mod.rs b/opentmk/src/platform/hyperv/arch/aarch64/mod.rs new file mode 100644 index 0000000000..d73e4be1f4 --- /dev/null +++ b/opentmk/src/platform/hyperv/arch/aarch64/mod.rs @@ -0,0 +1,5 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +pub mod ctx; +pub mod hypercall; diff --git a/opentmk/src/platform/hyperv/arch/hypercall.rs b/opentmk/src/platform/hyperv/arch/hypercall.rs new file mode 100644 index 0000000000..5406a3d156 --- /dev/null +++ b/opentmk/src/platform/hyperv/arch/hypercall.rs @@ -0,0 +1,272 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Hypercall infrastructure. + +// UNSAFETY: This module contains unsafe code to perform low-level operations such as invoking hypercalls +#![expect(unsafe_code)] + +use core::mem::size_of; +use core::sync::atomic::AtomicU16; +use core::sync::atomic::Ordering; + +use hvdef::HV_PAGE_SIZE; +use hvdef::HvRegisterValue; +use hvdef::HvRegisterVsmPartitionConfig; +use hvdef::HvX64RegisterName; +use hvdef::Vtl; +use hvdef::hypercall::EnablePartitionVtlFlags; +use hvdef::hypercall::HvInputVtl; +use memory_range::MemoryRange; +use minimal_rt::arch::hypercall::invoke_hypercall; +use zerocopy::FromBytes; +use zerocopy::IntoBytes; + +/// Page-aligned, page-sized buffer for use with hypercalls +#[repr(C, align(4096))] +pub(crate) struct HvcallPage { + pub(crate) buffer: [u8; HV_PAGE_SIZE as usize], +} + +impl HvcallPage { + pub const fn new() -> Self { + HvcallPage { + buffer: [0; HV_PAGE_SIZE as usize], + } + } + + /// Address of the hypercall page. + fn address(&self) -> u64 { + let addr = self.buffer.as_ptr() as u64; + // These should be page-aligned + assert!(addr.is_multiple_of(HV_PAGE_SIZE)); + addr + } +} + +/// Hypercall interface. +pub struct HvCall { + pub(crate) input_page: HvcallPage, + pub(crate) output_page: HvcallPage, +} + +static HV_PAGE_INIT_STATUS: AtomicU16 = AtomicU16::new(0); + +impl HvCall { + /// Hypercall to apply vtl protections (NO ACCESS) to the pages from address start to end + pub fn apply_vtl_protections( + &mut self, + range: MemoryRange, + vtl: Vtl, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let header = hvdef::hypercall::ModifyVtlProtectionMask { + partition_id: hvdef::HV_PARTITION_ID_SELF, + map_flags: hvdef::HV_MAP_GPA_PERMISSIONS_NONE, + target_vtl: HvInputVtl::new() + .with_target_vtl_value(vtl.into()) + .with_use_target_vtl(true), + reserved: [0; 3], + }; + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + for i in 0..count { + let page_num = current_page + i; + let _ = page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + input_offset += size_of::(); + } + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallModifyVtlProtectionMask, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + /// Makes a hypercall. + /// rep_count is Some for rep hypercalls + pub(crate) fn dispatch_hvcall( + &mut self, + code: hvdef::HypercallCode, + rep_count: Option, + ) -> hvdef::hypercall::HypercallOutput { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(code.0) + .with_rep_count(rep_count.unwrap_or_default()); + + // SAFETY: Invoking hypercall per TLFS spec + unsafe { + invoke_hypercall( + control, + self.input_page().address(), + self.output_page().address(), + ) + } + } + + /// Enables a VTL for the specified partition. + pub fn enable_partition_vtl( + &mut self, + partition_id: u64, + target_vtl: Vtl, + ) -> Result<(), hvdef::HvError> { + let flags: EnablePartitionVtlFlags = EnablePartitionVtlFlags::new() + .with_enable_mbec(false) + .with_enable_supervisor_shadow_stack(false); + + let header = hvdef::hypercall::EnablePartitionVtl { + partition_id, + target_vtl: target_vtl.into(), + flags, + reserved_z0: 0, + reserved_z1: 0, + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnablePartitionVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + /// Enables VTL protection for the specified VTL. + pub fn enable_vtl_protection(&mut self, vtl: HvInputVtl) -> Result<(), hvdef::HvError> { + let mut hvreg: HvRegisterVsmPartitionConfig = HvRegisterVsmPartitionConfig::new(); + hvreg.set_enable_vtl_protection(true); + hvreg.set_default_vtl_protection_mask(0xF); + let bits = hvreg.into_bits(); + let hvre: HvRegisterValue = HvRegisterValue::from(bits); + self.set_register( + HvX64RegisterName::VsmPartitionConfig.into(), + hvre, + Some(vtl), + ) + } + + /// Hypercall for getting a register value. + pub fn get_register( + &mut self, + name: hvdef::HvRegisterName, + vtl: Option, + ) -> Result { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = name.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallGetVpRegisters, Some(1)); + output.result()?; + let value = HvRegisterValue::read_from_prefix(&self.output_page().buffer).unwrap(); + + Ok(value.0) + } + + /// Initializes the hypercall interface. + pub fn initialize(&mut self) { + let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1); + // This is an idempotent operation, so we can call it multiple times. + // we proceed and initialize the hypercall interface because we don't know the current vtl + // This prohibit us to call this selectively for new VTLs + crate::arch::hypercall::initialize(guest_os_id.into()); + + HV_PAGE_INIT_STATUS.fetch_add(1, Ordering::SeqCst); + } + + /// Returns a mutable reference to the hypercall input page. + pub(crate) fn input_page(&mut self) -> &mut HvcallPage { + &mut self.input_page + } + + /// Creates a new `HvCall` instance. + pub const fn new() -> Self { + HvCall { + input_page: HvcallPage::new(), + output_page: HvcallPage::new(), + } + } + + /// Returns a mutable reference to the hypercall output page. + pub(crate) fn output_page(&mut self) -> &mut HvcallPage { + &mut self.output_page + } + + /// Hypercall for setting a register to a value. + pub fn set_register( + &mut self, + name: hvdef::HvRegisterName, + value: HvRegisterValue, + vtl: Option, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let reg = hvdef::hypercall::HvRegisterAssoc { + name, + pad: Default::default(), + value, + }; + + let _ = reg.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(1)); + + output.result() + } + + /// call to initialize the hypercall interface + pub fn uninitialize(&mut self) { + crate::arch::hypercall::uninitialize(); + } + + /// Returns the environment's VTL. + pub fn vtl(&mut self) -> Vtl { + self.get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into(), None) + .map_or(Vtl::Vtl0, |status| { + hvdef::HvRegisterVsmVpStatus::from(status.as_u64()) + .active_vtl() + .try_into() + .unwrap() + }) + } +} + +impl Drop for HvCall { + fn drop(&mut self) { + let seq = HV_PAGE_INIT_STATUS.fetch_sub(1, Ordering::SeqCst); + if seq == 0 { + self.uninitialize(); + } + } +} diff --git a/opentmk/src/platform/hyperv/arch/mod.rs b/opentmk/src/platform/hyperv/arch/mod.rs new file mode 100644 index 0000000000..0e0fe5b748 --- /dev/null +++ b/opentmk/src/platform/hyperv/arch/mod.rs @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Hyper-V platform architecture-specific modules. + +pub mod hypercall; + +cfg_if::cfg_if!( + if #[cfg(target_arch = "x86_64")] { // xtask-fmt allow-target-arch sys-crate + mod x86_64; + pub use x86_64::*; + } else if #[cfg(target_arch = "aarch64")] { // xtask-fmt allow-target-arch sys-crate + mod aarch64; + pub use aarch64::*; + } else { + compile_error!("target_arch is not supported"); + } +); diff --git a/opentmk/src/platform/hyperv/arch/x86_64/ctx.rs b/opentmk/src/platform/hyperv/arch/x86_64/ctx.rs new file mode 100644 index 0000000000..649bdd1373 --- /dev/null +++ b/opentmk/src/platform/hyperv/arch/x86_64/ctx.rs @@ -0,0 +1,456 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! x86_64-specific implementation of Hyper-V test context implementation + +use alloc::alloc::alloc; +use alloc::boxed::Box; +use core::alloc::Layout; +use core::arch::asm; +use core::ops::Range; +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch sys-crate +use hvdef::hypercall::InitialVpContextX64; + +use hvdef::AlignedU128; +use hvdef::HvRegisterValue; +use hvdef::HvX64RegisterName; +use hvdef::Vtl; +use hvdef::hypercall::HvInputVtl; +use memory_range::MemoryRange; +use minimal_rt::arch::msr::read_msr; +use minimal_rt::arch::msr::write_msr; + +#[cfg(nightly)] +use crate::context::InterruptPlatformTrait; +use crate::context::MsrPlatformTrait; +#[cfg(nightly)] +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecToken; +use crate::context::VtlPlatformTrait; +use crate::platform::hyperv::arch::hypercall::HvCall; +use crate::platform::hyperv::ctx::HvTestCtx; +use crate::platform::hyperv::ctx::cmdt; +use crate::platform::hyperv::ctx::get_vp_set; +use crate::platform::hyperv::ctx::vtl_transform; +use crate::tmkdefs::TmkError; +use crate::tmkdefs::TmkResult; + +#[cfg(nightly)] +impl SecureInterceptPlatformTrait for HvTestCtx { + /// Configure the Secure Interrupt Message Page (SIMP) and the first + /// SynIC interrupt (SINT0) so that the hypervisor can vector + /// hypervisor side notifications back to the guest. + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()> { + let layout = Layout::from_size_align(4096, 4096).map_err(|_| TmkError::AllocationFailed)?; + + // SAFETY: the pointer is managed carefully and is not deallocated until the end of the test. + let ptr = unsafe { alloc(layout) }; + let gpn = (ptr as u64) >> 12; + let reg = (gpn << 12) | 0x1; + + // SAFETY: we are writing to a valid MSR. + unsafe { self.write_msr(hvdef::HV_X64_MSR_SIMP, reg)? }; + log::info!("Successfully set the SIMP register."); + + // SAFETY: we are writing to a valid MSR. + let reg = unsafe { self.read_msr(hvdef::HV_X64_MSR_SINT0)? }; + let mut reg: hvdef::HvSynicSint = reg.into(); + reg.set_vector(interrupt_idx); + reg.set_masked(false); + reg.set_auto_eoi(true); + + // SAFETY: we are writing to a valid MSR. + unsafe { self.write_msr(hvdef::HV_X64_MSR_SINT0, reg.into())? }; + log::info!("Successfully set the SINT0 register."); + Ok(()) + } +} + +#[cfg(nightly)] +impl InterruptPlatformTrait for HvTestCtx { + /// Install an interrupt handler for the supplied vector on x86-64. + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()> { + crate::arch::interrupt::set_handler(interrupt_idx, handler); + Ok(()) + } + + /// Initialise the minimal in-guest interrupt infrastructure + fn setup_interrupt_handler(&mut self) -> TmkResult<()> { + crate::arch::interrupt::init(); + Ok(()) + } +} + +impl MsrPlatformTrait for HvTestCtx { + /// Read an MSR directly from the CPU and return the raw value. + unsafe fn read_msr(&mut self, msr: u32) -> TmkResult { + // SAFETY: tests should only read to valid MSRs. Caller must ensure safety. + let r = unsafe { read_msr(msr) }; + Ok(r) + } + + /// Write an MSR directly on the CPU. + unsafe fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()> { + // SAFETY: tests should only write to valid MSRs. Caller must ensure safety. + unsafe { write_msr(msr, value) }; + Ok(()) + } +} + +impl VirtualProcessorPlatformTrait for HvTestCtx { + /// Fetch the content of the specified architectural register from + /// the current VTL for the executing VP. + fn get_register(&mut self, reg: u32) -> TmkResult { + let reg = HvX64RegisterName(reg); + let val = self.hvcall.get_register(reg.into(), None)?.as_u128(); + Ok(val) + } + + /// Set the architecture specific register identified by `reg`. + fn set_register(&mut self, reg: u32, val: u128) -> TmkResult<()> { + let reg = HvX64RegisterName(reg); + let value = HvRegisterValue::from(val); + self.hvcall.set_register(reg.into(), value, None)?; + + Ok(()) + } + + /// Return the number of logical processors present in the machine + fn get_vp_count(&self) -> TmkResult { + // TODO: use ACPI to get the actual count + Ok(4) + } + + /// Push a command onto the per-VP linked-list so it will be executed + /// by the busy-loop running in `exec_handler`. No scheduling happens + /// here – we simply enqueue. + fn queue_command_vp(&mut self, cmd: VpExecToken) -> TmkResult<()> { + let (vp_index, vtl, cmd) = cmd.get(); + let cmd = cmd.ok_or(TmkError::QueueCommandFailed)?; + cmdt() + .lock() + .get_mut(&vp_index) + .unwrap() + .push_back((cmd, vtl)); + Ok(()) + } + + #[inline(never)] + /// Ensure the target VP is running in the requested VTL and queue + /// the command for execution. + /// – If the VP is not yet running, it is started with a default + /// context. + /// – If the command targets a different VTL than the current one, + /// control is switched via `vtl_call` / `vtl_return` so that the + /// executor loop can pick the command up. + /// in short every VP acts as an executor engine and + /// spins in `exec_handler` waiting for work. + fn start_on_vp(&mut self, cmd: VpExecToken) -> TmkResult<()> { + let (vp_index, vtl, cmd) = cmd.get(); + let cmd = cmd.ok_or(TmkError::InvalidParameter)?; + if vtl >= Vtl::Vtl2 { + return Err(TmkError::InvalidParameter); + } + let is_vp_running = get_vp_set().lock().get(&vp_index).cloned(); + if let Some(_running_vtl) = is_vp_running { + log::debug!("both vtl0 and vtl1 are running for VP: {:?}", vp_index); + } else { + if vp_index == 0 { + let vp_context = self.get_default_context(Vtl::Vtl1)?; + self.hvcall.enable_vp_vtl(0, Vtl::Vtl1, Some(vp_context))?; + + cmdt().lock().get_mut(&vp_index).unwrap().push_back(( + Box::new(move |ctx| { + ctx.switch_to_low_vtl(); + }), + Vtl::Vtl1, + )); + self.switch_to_high_vtl(); + get_vp_set().lock().insert(vp_index); + } else { + let (tx, rx) = nostd_spin_channel::Channel::>::new().split(); + let self_vp_idx = self.my_vp_idx; + cmdt().lock().get_mut(&self_vp_idx).unwrap().push_back(( + Box::new(move |ctx| { + log::debug!("starting VP{} in VTL1 of vp{}", vp_index, self_vp_idx); + let r = ctx.enable_vp_vtl_with_default_context(vp_index, Vtl::Vtl1); + if r.is_err() { + log::error!("failed to enable VTL1 for VP{}: {:?}", vp_index, r); + let _ = tx.send(r); + return; + } + log::debug!("successfully enabled VTL1 for VP{}", vp_index); + let r = ctx.start_running_vp_with_default_context(VpExecToken::new( + vp_index, + Vtl::Vtl0, + )); + if r.is_err() { + log::error!("failed to start VP{}: {:?}", vp_index, r); + let _ = tx.send(r); + return; + } + log::debug!("successfully started VP{}", vp_index); + let _ = tx.send(Ok(())); + ctx.switch_to_low_vtl(); + }), + Vtl::Vtl1, + )); + self.switch_to_high_vtl(); + let rx = rx.recv(); + if let Ok(r) = rx { + r?; + } + get_vp_set().lock().insert(vp_index); + } + } + cmdt() + .lock() + .get_mut(&vp_index) + .unwrap() + .push_back((cmd, vtl)); + + if vp_index == self.my_vp_idx && self.my_vtl != vtl { + if vtl == Vtl::Vtl0 { + self.switch_to_low_vtl(); + } else { + self.switch_to_high_vtl(); + } + } + Ok(()) + } + + /// Start the given VP in the current VTL using a freshly captured + /// context. + fn start_running_vp_with_default_context( + &mut self, + cmd: VpExecToken, + ) -> TmkResult<()> { + let (vp_index, vtl, _cmd) = cmd.get(); + let vp_ctx = self.get_default_context(vtl)?; + self.hvcall + .start_virtual_processor(vp_index, vtl, Some(vp_ctx))?; + Ok(()) + } + + /// Return the index of the VP that is currently executing this code. + fn get_current_vp(&self) -> TmkResult { + Ok(self.my_vp_idx) + } + + fn set_register_vtl(&mut self, reg: u32, value: u128, vtl: Vtl) -> TmkResult<()> { + let reg = HvX64RegisterName(reg); + let value = HvRegisterValue::from(value); + self.hvcall + .set_register(reg.into(), value, Some(vtl_transform(vtl)))?; + + Ok(()) + } + + fn get_register_vtl(&mut self, reg: u32, vtl: Vtl) -> TmkResult { + let reg = HvX64RegisterName(reg); + let val = self + .hvcall + .get_register(reg.into(), Some(vtl_transform(vtl)))? + .as_u128(); + Ok(val) + } +} + +impl VtlPlatformTrait for HvTestCtx { + /// Apply VTL protections to the supplied GPA range so that only the + /// provided VTL can access it. + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()> { + self.hvcall + .apply_vtl_protections(MemoryRange::new(range), vtl)?; + Ok(()) + } + + /// Enable the specified VTL on a VP and seed it with a default + /// context captured from the current execution environment. + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { + let vp_ctx = self.get_default_context(vtl)?; + self.hvcall.enable_vp_vtl(vp_index, vtl, Some(vp_ctx))?; + Ok(()) + } + + /// Return the VTL in which the current code is running. + fn get_current_vtl(&self) -> TmkResult { + Ok(self.my_vtl) + } + + /// Enable VTL support for the entire partition. + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()> { + self.hvcall + .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl)?; + log::info!("enabled vtl protections for the partition."); + Ok(()) + } + + /// Turn on VTL protections for the currently running VTL. + fn setup_vtl_protection(&mut self) -> TmkResult<()> { + self.hvcall.enable_vtl_protection(HvInputVtl::CURRENT_VTL)?; + log::info!("enabled vtl protections for the partition."); + Ok(()) + } + + /// Switch execution from the current (low) VTL to the next higher + /// one (`vtl_call`). + #[inline(never)] + fn switch_to_high_vtl(&mut self) { + // SAFETY: we are calling a valid function that switches to high VTL. With valid instructions + // to save restore register states. + unsafe { + asm!( + " + push rax + push rbx + push rcx + push rdx + push rdi + push rsi + push rbp + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + call {call_address} + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop rbp + pop rsi + pop rdi + pop rdx + pop rcx + pop rbx + pop rax", + call_address = sym HvCall::vtl_call, + ); + } + } + + /// Return from a high VTL back to the low VTL (`vtl_return`). + #[inline(never)] + fn switch_to_low_vtl(&mut self) { + // SAFETY: we are calling a valid function that switches to low VTL. With valid instructions + // to save restore register states. + unsafe { + asm!( + " + push rax + push rbx + push rcx + push rdx + push rdi + push rsi + push rbp + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + call {call_address} + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop rbp + pop rsi + pop rdi + pop rdx + pop rcx + pop rbx + pop rax", + call_address = sym HvCall::vtl_return, + ); + } + } + + // Set the state of a virtual processor (VP) with the specified VTL. + fn set_vp_register_with_vtl( + &mut self, + register_index: u32, + value: u64, + vtl: Vtl, + ) -> TmkResult<()> { + let vtl = vtl_transform(vtl); + let value = AlignedU128::from(value); + let reg_value = HvRegisterValue(value); + self.hvcall + .set_register(hvdef::HvRegisterName(register_index), reg_value, Some(vtl)) + .map_err(|e| e.into()) + } + + fn get_vp_register_with_vtl(&mut self, register_index: u32, vtl: Vtl) -> TmkResult { + let vtl = vtl_transform(vtl); + self.hvcall + .get_register(hvdef::HvRegisterName(register_index), Some(vtl)) + .map(|v| v.as_u64()) + .map_err(|e| e.into()) + } +} + +impl HvTestCtx { + /// Return the index of the VP that is currently executing this code. + pub(crate) fn get_vp_idx() -> u32 { + // SAFETY: we are executing a valid CPUID instruction. + let result = unsafe { core::arch::x86_64::__cpuid(0x1) }; + (result.ebx >> 24) & 0xFF + } + + /// Capture the current VP context, patch the entry point and stack + /// so that the new VP starts in `exec_handler`. + pub(crate) fn get_default_context( + &mut self, + vtl: Vtl, + ) -> Result { + let handler = match vtl { + Vtl::Vtl0 => HvTestCtx::general_exec_handler, + Vtl::Vtl1 => HvTestCtx::secure_exec_handler, + _ => return Err(TmkError::InvalidParameter), + }; + self.exec_fn_with_current_context(handler) + } + + /// Helper to return an arbitrary function with a captured VP context + /// that can later be used to start a new VP/VTL instance. + fn exec_fn_with_current_context( + &mut self, + func: fn(), + ) -> Result { + let mut vp_context: InitialVpContextX64 = self + .hvcall + .get_current_vtl_vp_context() + .expect("Failed to get current VTL context"); + let stack_layout = Layout::from_size_align(1024 * 1024, 16) + .expect("Failed to create layout for stack allocation"); + // SAFETY: the pointer is managed carefully and is not deallocated until the end of the test. + let allocated_stack_ptr = unsafe { alloc(stack_layout) }; + if allocated_stack_ptr.is_null() { + return Err(TmkError::AllocationFailed); + } + let stack_size = stack_layout.size(); + let stack_top = allocated_stack_ptr as u64 + stack_size as u64; + let fn_address = func as usize as u64; + vp_context.rip = fn_address; + vp_context.rsp = stack_top; + Ok(vp_context) + } +} diff --git a/opentmk/src/platform/hyperv/arch/x86_64/hypercall.rs b/opentmk/src/platform/hyperv/arch/x86_64/hypercall.rs new file mode 100644 index 0000000000..3b3f2b9ace --- /dev/null +++ b/opentmk/src/platform/hyperv/arch/x86_64/hypercall.rs @@ -0,0 +1,207 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// UNSAFETY: This module contains unsafe code to perform low-level operations such as invoking hypercalls +#![expect(unsafe_code)] + +use core::arch::asm; + +use hvdef::Vtl; +use hvdef::hypercall::InitialVpContextX64; +use zerocopy::IntoBytes; + +use crate::platform::hyperv::arch::hypercall::HvCall; + +// avoiding inline for debuggability in release builds. +#[inline(never)] +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch cpu-intrinsic +/// Invokes a hypercall specifically for switching to a VTL context. +/// +/// # Safety +/// The caller must ensure that the hypercall is invoked in a context where it is safe to do so. +unsafe fn invoke_hypercall_vtl(control: hvdef::hypercall::Control) { + // SAFETY: the caller guarantees the safety of this operation. + unsafe { + core::arch::asm! { + "call {hypercall_page}", + hypercall_page = sym minimal_rt::arch::hypercall::HYPERCALL_PAGE, + inout("rcx") u64::from(control) => _, + in("rdx") 0, + in("rax") 0, + } + } +} + +impl HvCall { + /// Starts a virtual processor (VP) with the specified VTL and context on x86_64. + pub fn start_virtual_processor( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::StartVirtualProcessorX64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + target_vtl: target_vtl.into(), + vp_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + rsvd0: 0u8, + rsvd1: 0u16, + }; + + header + .write_to_prefix(self.input_page().buffer.as_mut()) + .expect("size of start_virtual_processor header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallStartVirtualProcessor, None); + output.result() + } + + /// Enables a VTL for a specific virtual processor (VP) on x86_64. + pub fn enable_vp_vtl( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::EnableVpVtlX64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + target_vtl: target_vtl.into(), + reserved: [0; 3], + vp_vtl_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + }; + + header + .write_to_prefix(self.input_page().buffer.as_mut_slice()) + .expect("size of enable_vp_vtl header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None); + output.result() + } + + /// Retrieves the current VTL context by reading the necessary registers. + pub fn get_current_vtl_vp_context(&mut self) -> Result { + use minimal_rt::arch::msr::read_msr; + use zerocopy::FromZeros; + let mut context: InitialVpContextX64 = FromZeros::new_zeroed(); + + let rsp: u64; + // SAFETY: we are reading the stack pointer register. + unsafe { asm!("mov {0:r}, rsp", out(reg) rsp, options(nomem, nostack)) }; + + let cr0; + // SAFETY: we are reading the control register. + unsafe { asm!("mov {0:r}, cr0", out(reg) cr0, options(nomem, nostack)) }; + let cr3; + // SAFETY: we are reading the control register. + unsafe { asm!("mov {0:r}, cr3", out(reg) cr3, options(nomem, nostack)) }; + let cr4; + // SAFETY: we are reading the control register. + unsafe { asm!("mov {0:r}, cr4", out(reg) cr4, options(nomem, nostack)) }; + + let rflags: u64; + // SAFETY: we are reading the rflags register. + unsafe { + asm!( + "pushfq", + "pop {0}", + out(reg) rflags, + ); + } + + context.cr0 = cr0; + context.cr3 = cr3; + context.cr4 = cr4; + + context.rsp = rsp; + context.rip = 0; + + context.rflags = rflags; + + // load segment registers + + let cs: u16; + let ss: u16; + let ds: u16; + let es: u16; + let fs: u16; + let gs: u16; + + // SAFETY: we are reading the segment registers. + unsafe { + asm!(" + mov {0:x}, cs + mov {1:x}, ss + mov {2:x}, ds + mov {3:x}, es + mov {4:x}, fs + mov {5:x}, gs + ", out(reg) cs, out(reg) ss, out(reg) ds, out(reg) es, out(reg) fs, out(reg) gs, options(nomem, nostack)) + } + + context.cs.selector = cs; + context.cs.attributes = 0xA09B; + context.cs.limit = 0xFFFFFFFF; + + context.ss.selector = ss; + context.ss.attributes = 0xC093; + context.ss.limit = 0xFFFFFFFF; + + context.ds.selector = ds; + context.ds.attributes = 0xC093; + context.ds.limit = 0xFFFFFFFF; + + context.es.selector = es; + context.es.attributes = 0xC093; + context.es.limit = 0xFFFFFFFF; + + context.fs.selector = fs; + context.fs.attributes = 0xC093; + context.fs.limit = 0xFFFFFFFF; + + context.gs.selector = gs; + context.gs.attributes = 0xC093; + context.gs.limit = 0xFFFFFFFF; + + context.tr.selector = 0; + context.tr.attributes = 0x8B; + context.tr.limit = 0xFFFF; + + let idt = x86_64::instructions::tables::sidt(); + context.idtr.base = idt.base.as_u64(); + context.idtr.limit = idt.limit; + + let gdtr = x86_64::instructions::tables::sgdt(); + context.gdtr.base = gdtr.base.as_u64(); + context.gdtr.limit = gdtr.limit; + + // SAFETY: we are reading a valid MSR. + let efer = unsafe { read_msr(0xC0000080) }; + context.efer = efer; + + Ok(context) + } + + // avoiding inline for debuggability in release builds. + #[inline(never)] + /// Invokes the VtlCall hypercall. + pub(crate) fn vtl_call() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlCall.0) + .with_rep_count(0); + // SAFETY: This is safe because we are calling a hypercall with a valid control structure. + unsafe { invoke_hypercall_vtl(control) }; + } + + // avoiding inline for debuggability in release builds. + #[inline(never)] + /// Invokes the VtlReturn hypercall. + pub(crate) fn vtl_return() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlReturn.0) + .with_rep_count(0); + // SAFETY: This is safe because we are calling a hypercall with a valid control structure. + unsafe { invoke_hypercall_vtl(control) }; + } +} diff --git a/opentmk/src/platform/hyperv/arch/x86_64/mod.rs b/opentmk/src/platform/hyperv/arch/x86_64/mod.rs new file mode 100644 index 0000000000..d73e4be1f4 --- /dev/null +++ b/opentmk/src/platform/hyperv/arch/x86_64/mod.rs @@ -0,0 +1,5 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +pub mod ctx; +pub mod hypercall; diff --git a/opentmk/src/platform/hyperv/ctx.rs b/opentmk/src/platform/hyperv/ctx.rs new file mode 100644 index 0000000000..4588e7fce4 --- /dev/null +++ b/opentmk/src/platform/hyperv/ctx.rs @@ -0,0 +1,240 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Hyper-V test context implementation. + +// vp_set is only used in x86_64 for now, since aarch support is not complete +#![cfg_attr(target_arch = "aarch64", expect(dead_code))] // xtask-fmt allow-target-arch sys-crate +use alloc::boxed::Box; +use alloc::collections::btree_map::BTreeMap; +use alloc::collections::btree_set::BTreeSet; +use alloc::collections::linked_list::LinkedList; +use core::fmt::Display; + +use hvdef::Vtl; +use hvdef::hypercall::HvInputVtl; +use spin::Mutex; + +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VtlPlatformTrait; +use crate::platform::hyperv::arch::hypercall::HvCall; +use crate::tmkdefs::TmkError; +use crate::tmkdefs::TmkResult; + +type CommandTable = BTreeMap, Vtl)>>; +static mut CMD: Mutex = Mutex::new(BTreeMap::new()); +static VP_SET: Mutex> = Mutex::new(BTreeSet::new()); + +#[expect(static_mut_refs)] +pub(crate) fn cmdt() -> &'static Mutex { + // SAFETY: CMD is only mutated through safe APIs and is protected by a Mutex. + unsafe { &CMD } +} + +pub(crate) fn get_vp_set() -> &'static Mutex> { + // SAFETY: VP_SET is only mutated through safe APIs and is protected by a Mutex. + &VP_SET +} + +fn register_command_queue(vp_index: u32) { + log::trace!("registering command queue for vp: {}", vp_index); + if cmdt().lock().get(&vp_index).is_none() { + cmdt().lock().insert(vp_index, LinkedList::new()); + log::trace!("registered command queue for vp: {}", vp_index); + } else { + log::trace!("command queue already registered for vp: {}", vp_index); + } +} + +/// The execution context passed to the test functions. +pub struct HvTestCtx { + /// The hypercall interface. + /// Exposed publicly for test code to make hypercalls in specialized cases. + pub hvcall: HvCall, + /// The index of the VP on which this context is running. + pub my_vp_idx: u32, + /// The VTL on which this context is running. + pub my_vtl: Vtl, +} + +impl Display for HvTestCtx { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "HvTestCtx {{ vp_idx: {}, vtl: {:?} }}", + self.my_vp_idx, self.my_vtl + ) + } +} + +pub(crate) fn vtl_transform(vtl: Vtl) -> HvInputVtl { + let vtl = match vtl { + Vtl::Vtl0 => 0, + Vtl::Vtl1 => 1, + Vtl::Vtl2 => 2, + }; + HvInputVtl::new() + .with_target_vtl_value(vtl) + .with_use_target_vtl(true) +} + +#[cfg_attr(target_arch = "aarch64", expect(dead_code))] // xtask-fmt allow-target-arch sys-crate +impl HvTestCtx { + /// Construct an *un-initialised* test context. + /// Call [`HvTestCtx::init`] before using the value. + pub const fn new() -> Self { + HvTestCtx { + hvcall: HvCall::new(), + my_vp_idx: 0, + my_vtl: Vtl::Vtl0, + } + } + + /// Perform the one-time initialisation sequence: + /// – initialise the hypercall page, + /// – discover the VP count and create command queues, + /// – record the current VTL. + pub fn init(&mut self, vtl: Vtl) -> TmkResult<()> { + self.hvcall.initialize(); + let vp_count = self.get_vp_count()?; + for i in 0..vp_count { + register_command_queue(i); + } + self.my_vtl = vtl; + self.my_vp_idx = Self::get_vp_idx(); + Ok(()) + } + + pub(crate) fn secure_exec_handler() { + HvTestCtx::exec_handler(Vtl::Vtl1); + } + + pub(crate) fn general_exec_handler() { + HvTestCtx::exec_handler(Vtl::Vtl0); + } + + /// Busy-loop executor that runs on every VP. + /// Extracts commands from the per-VP queue and executes them in the + /// appropriate VTL, switching VTLs when necessary. + fn exec_handler(vtl: Vtl) { + let mut ctx = HvTestCtx::new(); + ctx.init(vtl).expect("error: failed to init on a VP"); + loop { + let mut vtl: Option = None; + let mut cmd: Option> = None; + + { + let mut cmdt = cmdt().lock(); + let d = cmdt.get_mut(&ctx.my_vp_idx); + if let Some(d) = d { + if !d.is_empty() { + let (_c, v) = d.front().unwrap(); + if *v == ctx.my_vtl { + let (c, _v) = d.pop_front().unwrap(); + cmd = Some(c); + } else { + vtl = Some(*v); + } + } + } + } + + if let Some(vtl) = vtl { + if vtl == Vtl::Vtl0 { + ctx.switch_to_low_vtl(); + } else { + ctx.switch_to_high_vtl(); + } + } + + if let Some(cmd) = cmd { + cmd(&mut ctx); + } + } + } +} + +impl From for TmkError { + fn from(e: hvdef::HvError) -> Self { + log::debug!("Converting hvdef::HvError::{:?} to TmkError", e); + let tmk_error_type = match e { + hvdef::HvError::InvalidHypercallCode => TmkError::InvalidHypercallCode, + hvdef::HvError::InvalidHypercallInput => TmkError::InvalidHypercallInput, + hvdef::HvError::InvalidAlignment => TmkError::InvalidAlignment, + hvdef::HvError::InvalidParameter => TmkError::InvalidParameter, + hvdef::HvError::AccessDenied => TmkError::AccessDenied, + hvdef::HvError::InvalidPartitionState => TmkError::InvalidPartitionState, + hvdef::HvError::OperationDenied => TmkError::OperationDenied, + hvdef::HvError::UnknownProperty => TmkError::UnknownProperty, + hvdef::HvError::PropertyValueOutOfRange => TmkError::PropertyValueOutOfRange, + hvdef::HvError::InsufficientMemory => TmkError::InsufficientMemory, + hvdef::HvError::PartitionTooDeep => TmkError::PartitionTooDeep, + hvdef::HvError::InvalidPartitionId => TmkError::InvalidPartitionId, + hvdef::HvError::InvalidVpIndex => TmkError::InvalidVpIndex, + hvdef::HvError::NotFound => TmkError::NotFound, + hvdef::HvError::InvalidPortId => TmkError::InvalidPortId, + hvdef::HvError::InvalidConnectionId => TmkError::InvalidConnectionId, + hvdef::HvError::InsufficientBuffers => TmkError::InsufficientBuffers, + hvdef::HvError::NotAcknowledged => TmkError::NotAcknowledged, + hvdef::HvError::InvalidVpState => TmkError::InvalidVpState, + hvdef::HvError::Acknowledged => TmkError::Acknowledged, + hvdef::HvError::InvalidSaveRestoreState => TmkError::InvalidSaveRestoreState, + hvdef::HvError::InvalidSynicState => TmkError::InvalidSynicState, + hvdef::HvError::ObjectInUse => TmkError::ObjectInUse, + hvdef::HvError::InvalidProximityDomainInfo => TmkError::InvalidProximityDomainInfo, + hvdef::HvError::NoData => TmkError::NoData, + hvdef::HvError::Inactive => TmkError::Inactive, + hvdef::HvError::NoResources => TmkError::NoResources, + hvdef::HvError::FeatureUnavailable => TmkError::FeatureUnavailable, + hvdef::HvError::PartialPacket => TmkError::PartialPacket, + hvdef::HvError::ProcessorFeatureNotSupported => TmkError::ProcessorFeatureNotSupported, + hvdef::HvError::ProcessorCacheLineFlushSizeIncompatible => { + TmkError::ProcessorCacheLineFlushSizeIncompatible + } + hvdef::HvError::InsufficientBuffer => TmkError::InsufficientBuffer, + hvdef::HvError::IncompatibleProcessor => TmkError::IncompatibleProcessor, + hvdef::HvError::InsufficientDeviceDomains => TmkError::InsufficientDeviceDomains, + hvdef::HvError::CpuidFeatureValidationError => TmkError::CpuidFeatureValidationError, + hvdef::HvError::CpuidXsaveFeatureValidationError => { + TmkError::CpuidXsaveFeatureValidationError + } + hvdef::HvError::ProcessorStartupTimeout => TmkError::ProcessorStartupTimeout, + hvdef::HvError::SmxEnabled => TmkError::SmxEnabled, + hvdef::HvError::InvalidLpIndex => TmkError::InvalidLpIndex, + hvdef::HvError::InvalidRegisterValue => TmkError::InvalidRegisterValue, + hvdef::HvError::InvalidVtlState => TmkError::InvalidVtlState, + hvdef::HvError::NxNotDetected => TmkError::NxNotDetected, + hvdef::HvError::InvalidDeviceId => TmkError::InvalidDeviceId, + hvdef::HvError::InvalidDeviceState => TmkError::InvalidDeviceState, + hvdef::HvError::PendingPageRequests => TmkError::PendingPageRequests, + hvdef::HvError::PageRequestInvalid => TmkError::PageRequestInvalid, + hvdef::HvError::KeyAlreadyExists => TmkError::KeyAlreadyExists, + hvdef::HvError::DeviceAlreadyInDomain => TmkError::DeviceAlreadyInDomain, + hvdef::HvError::InvalidCpuGroupId => TmkError::InvalidCpuGroupId, + hvdef::HvError::InvalidCpuGroupState => TmkError::InvalidCpuGroupState, + hvdef::HvError::OperationFailed => TmkError::OperationFailed, + hvdef::HvError::NotAllowedWithNestedVirtActive => { + TmkError::NotAllowedWithNestedVirtActive + } + hvdef::HvError::InsufficientRootMemory => TmkError::InsufficientRootMemory, + hvdef::HvError::EventBufferAlreadyFreed => TmkError::EventBufferAlreadyFreed, + hvdef::HvError::Timeout => TmkError::Timeout, + hvdef::HvError::VtlAlreadyEnabled => TmkError::VtlAlreadyEnabled, + hvdef::HvError::UnknownRegisterName => TmkError::UnknownRegisterName, + // Add any other specific mappings here if hvdef::HvError has more variants + _ => { + log::warn!( + "Unhandled hvdef::HvError variant: {:?}. Mapping to TmkError::OperationFailed.", + e + ); + TmkError::OperationFailed // Generic fallback + } + }; + log::debug!( + "Mapped hvdef::HvError::{:?} to TmkError::{:?}", + e, + tmk_error_type + ); + tmk_error_type + } +} diff --git a/opentmk/src/platform/hyperv/mod.rs b/opentmk/src/platform/hyperv/mod.rs new file mode 100644 index 0000000000..4c26637b70 --- /dev/null +++ b/opentmk/src/platform/hyperv/mod.rs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Hyper-V platform modules. +//! This module provides the architecture-specific implementations for the Hyper-V platform. +//! It also includes platform implementation for test context management. +//! + +pub mod arch; +pub mod ctx; diff --git a/opentmk/src/platform/mod.rs b/opentmk/src/platform/mod.rs new file mode 100644 index 0000000000..8403c574f3 --- /dev/null +++ b/opentmk/src/platform/mod.rs @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Platform-specific modules for OpenTMK. + +pub mod hyperv; diff --git a/opentmk/src/tests/hyperv/hv_error_vp_start.rs b/opentmk/src/tests/hyperv/hv_error_vp_start.rs new file mode 100644 index 0000000000..f5f3db9f35 --- /dev/null +++ b/opentmk/src/tests/hyperv/hv_error_vp_start.rs @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use hvdef::Vtl; +use nostd_spin_channel::Channel; + +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecToken; +use crate::context::VtlPlatformTrait; +use crate::tmk_assert; + +/// Executes a series of negitive tests to validate VTL and VP functionalities. +pub fn exec(ctx: &mut T) +where + T: VtlPlatformTrait + VirtualProcessorPlatformTrait, +{ + // Skipping VTL setup for now to test the negative case + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 4"); + + // Testing BSP VTL1 Bringup + { + let (tx, _rx) = Channel::new().split(); + + let result = ctx.start_on_vp(VpExecToken::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + tx.send(()) + .expect("Failed to send message through the channel"); + ctx.switch_to_low_vtl(); + })); + + tmk_assert!(result.is_err(), "start_on_vp should fail"); + tmk_assert!( + result.unwrap_err() == crate::tmkdefs::TmkError::InvalidVtlState, + "start_on_vp should fail with InvalidVtlState" + ); + log::info!("result on start_on_vp: {:?}", result); + } +} diff --git a/opentmk/src/tests/hyperv/hv_memory_protect_read.rs b/opentmk/src/tests/hyperv/hv_memory_protect_read.rs new file mode 100644 index 0000000000..438b6d746b --- /dev/null +++ b/opentmk/src/tests/hyperv/hv_memory_protect_read.rs @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use alloc::alloc::alloc; +use core::alloc::Layout; +use core::arch::asm; +use core::cell::RefCell; +use core::ops::Range; + +use hvdef::Vtl; +use nostd_spin_channel::Channel; + +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecToken; +use crate::context::VtlPlatformTrait; +use crate::create_function_with_restore; +use crate::tmk_assert; + +static mut HEAP_ALLOC_PTR: RefCell<*mut u8> = RefCell::new(0 as *mut u8); + +static mut RETURN_VALUE: u8 = 0; + +// Without inline the compiler may optimize away the call and the VTL switch may +// distort the architectural registers +#[inline(never)] +#[expect(static_mut_refs)] +// writing to a static generates a warning. we safely handle RETURN_VALUE so ignoring it here. +fn violate_heap() { + unsafe { + let alloc_ptr = *HEAP_ALLOC_PTR.borrow(); + // after a VTL switch we can't trust the value returned by eax + RETURN_VALUE = *(alloc_ptr.add(10)); + } +} +create_function_with_restore!(f_violate_heap, violate_heap); + +/// Executes a series of tests to validate memory protection between VTLs. +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 4"); + + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + log::info!("set intercept handler successfully!"); + + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let r = ctx.start_on_vp(VpExecToken::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + let r = ctx.set_interrupt_idx(0x30, move || { + log::info!("interrupt handled for 0x30!"); + }); + tmk_assert!(r.is_ok(), "set_interrupt_idx should succeed"); + + let layout = + Layout::from_size_align(1024 * 1024, 4096).expect("msg: failed to create layout"); + // SAFETY: we are allocating memory to heap, we don't free it in this test. + let ptr = unsafe { alloc(layout) }; + log::info!("allocated some memory in the heap from vtl1"); + + #[expect(static_mut_refs)] + // writing to a static generates a warning. we safely handle HEAP_ALLOC_PTR so ignoring it here. + unsafe { + let mut z = HEAP_ALLOC_PTR.borrow_mut(); + *z = ptr; + *ptr.add(10) = 0xA2; + } + + let size = layout.size(); + let r = ctx.setup_vtl_protection(); + tmk_assert!(r.is_ok(), "setup_vtl_protection should succeed"); + log::info!("enabled vtl protections for the partition."); + + let range = Range { + start: ptr as u64, + end: ptr as u64 + size as u64, + }; + + let r = ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); + tmk_assert!(r.is_ok(), "apply_vtl_protection_for_memory should succeed"); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + })); + tmk_assert!(r.is_ok(), "start_on_vp should succeed"); + + let (tx, rx) = Channel::new().split(); + + let r = ctx.start_on_vp( + VpExecToken::new(0x2, Vtl::Vtl1).command(move |ctx: &mut T| { + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + log::info!("successfully started running VTL1 on vp2."); + }), + ); + tmk_assert!(r.is_ok(), "start_on_vp should succeed"); + + let r = ctx.start_on_vp( + VpExecToken::new(0x2, Vtl::Vtl0).command(move |ctx: &mut T| { + log::info!("successfully started running VTL0 on vp2."); + + let r = + ctx.queue_command_vp(VpExecToken::new(2, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("after intercept successfully started running VTL1 on vp2."); + ctx.switch_to_low_vtl(); + })); + tmk_assert!(r.is_ok(), "queue_command_vp should succeed"); + + f_violate_heap(); + + #[expect(static_mut_refs)] + // reading a reference to a shared static reference generates a error. we safely handle RETURN_VALUE so ignoring it here. + // SAFETY: we are reading a static variable that is written to only once. + unsafe { + log::info!( + "reading mutated heap memory from vtl0(it should not be 0xA2): 0x{:x}", + RETURN_VALUE + ); + tmk_assert!( + RETURN_VALUE != 0xA2, + "heap memory should not be accessible from vtl0" + ); + } + + _ = tx.send(()); + }), + ); + tmk_assert!(r.is_ok(), "start_on_vp should succeed"); + + _ = rx.recv(); + + log::info!("we are in vtl0 now!"); + log::info!("we reached the end of the test"); +} diff --git a/opentmk/src/tests/hyperv/hv_memory_protect_write.rs b/opentmk/src/tests/hyperv/hv_memory_protect_write.rs new file mode 100644 index 0000000000..571b6de978 --- /dev/null +++ b/opentmk/src/tests/hyperv/hv_memory_protect_write.rs @@ -0,0 +1,141 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use alloc::alloc::alloc; +use core::alloc::Layout; +use core::arch::asm; +use core::cell::RefCell; +use core::ops::Range; + +use hvdef::Vtl; +use nostd_spin_channel::Channel; +use spin::Mutex; + +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecToken; +use crate::context::VtlPlatformTrait; +use crate::create_function_with_restore; +use crate::tmk_assert; + +static mut HEAP_ALLOC_PTR: RefCell<*mut u8> = RefCell::new(0 as *mut u8); +static FAULT_CALLED: Mutex = Mutex::new(false); + +// Without inline the compiler may optimize away the call and the VTL switch may +// distort the architectural registers +#[inline(never)] +// SAFETY: we safely handle HEAP_ALLOC_PTR so ignoring it here. +#[expect(static_mut_refs)] +fn violate_heap() { + unsafe { + let alloc_ptr = *HEAP_ALLOC_PTR.borrow(); + *(alloc_ptr.add(10)) = 0x56; + } +} +create_function_with_restore!(f_violate_heap, violate_heap); + +/// Executes a series of tests to validate memory protection between VTLs. +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 4"); + + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + log::info!("set intercept handler successfully!"); + + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let r = ctx.start_on_vp(VpExecToken::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + let r = ctx.set_interrupt_idx(0x30, move || { + log::info!("interrupt handled for 0x30!"); + let mut status = FAULT_CALLED.lock(); + *status = true; + }); + tmk_assert!(r.is_ok(), "set_interrupt_idx should succeed"); + + let layout = + Layout::from_size_align(1024 * 1024, 4096).expect("msg: failed to create layout"); + // SAFETY: we are allocating memory to heap, we don't free it in this test. + let ptr = unsafe { alloc(layout) }; + log::info!("allocated some memory in the heap from vtl1"); + + #[expect(static_mut_refs)] + unsafe { + let mut z = HEAP_ALLOC_PTR.borrow_mut(); + *z = ptr; + *ptr.add(10) = 0xA2; + } + + let size = layout.size(); + let r = ctx.setup_vtl_protection(); + tmk_assert!(r.is_ok(), "setup_vtl_protection should succeed"); + log::info!("enabled vtl protections for the partition."); + + let range = Range { + start: ptr as u64, + end: ptr as u64 + size as u64, + }; + + let r = ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); + tmk_assert!(r.is_ok(), "apply_vtl_protection_for_memory should succeed"); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + })); + tmk_assert!(r.is_ok(), "start_on_vp should succeed"); + + let (tx, rx) = Channel::new().split(); + + let r = ctx.start_on_vp( + VpExecToken::new(0x2, Vtl::Vtl1).command(move |ctx: &mut T| { + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + log::info!("successfully started running VTL1 on vp2."); + }), + ); + tmk_assert!(r.is_ok(), "start_on_vp should succeed"); + + let r = ctx.start_on_vp( + VpExecToken::new(0x2, Vtl::Vtl0).command(move |ctx: &mut T| { + log::info!("successfully started running VTL0 on vp2."); + + let r = + ctx.queue_command_vp(VpExecToken::new(2, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("after intercept successfully started running VTL1 on vp2."); + ctx.switch_to_low_vtl(); + })); + tmk_assert!(r.is_ok(), "queue_command_vp should succeed"); + + f_violate_heap(); + _ = tx.send(()); + }), + ); + tmk_assert!(r.is_ok(), "start_on_vp should succeed"); + + _ = rx.recv(); + + let fault_called = *FAULT_CALLED.lock(); + tmk_assert!(fault_called, "Secure intercept should be received"); + + log::info!("we are in vtl0 now!"); + log::info!("we reached the end of the test"); +} diff --git a/opentmk/src/tests/hyperv/hv_processor.rs b/opentmk/src/tests/hyperv/hv_processor.rs new file mode 100644 index 0000000000..563e91a7bd --- /dev/null +++ b/opentmk/src/tests/hyperv/hv_processor.rs @@ -0,0 +1,102 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use hvdef::Vtl; +use nostd_spin_channel::Channel; + +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecToken; +use crate::context::VtlPlatformTrait; +use crate::tmk_assert; + +/// Executes a series of tests to validate VTL and VP functionalities. +pub fn exec(ctx: &mut T) +where + T: VtlPlatformTrait + VirtualProcessorPlatformTrait, +{ + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 4"); + + // Testing BSP VTL Bringup + { + let (tx, rx) = Channel::new().split(); + let result = ctx.start_on_vp(VpExecToken::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + tx.send(()) + .expect("Failed to send message through the channel"); + ctx.switch_to_low_vtl(); + })); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); + _ = rx.recv(); + } + + for i in 1..vp_count { + // Testing VTL1 + { + let (tx, rx) = Channel::new().split(); + let result = + ctx.start_on_vp(VpExecToken::new(i, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl1 for VP {}", i)); + _ = tx.send(()); + })); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); + _ = rx.recv(); + } + + // Testing VTL0 + { + let (tx, rx) = Channel::new().split(); + let result = + ctx.start_on_vp(VpExecToken::new(i, Vtl::Vtl0).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl0, format!("vtl should be Vtl0 for VP {}", i)); + _ = tx.send(()); + })); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); + _ = rx.recv(); + } + } + + log::warn!("All VPs have been tested"); +} diff --git a/opentmk/src/tests/hyperv/hv_register_intercept.rs b/opentmk/src/tests/hyperv/hv_register_intercept.rs new file mode 100644 index 0000000000..fac755e48f --- /dev/null +++ b/opentmk/src/tests/hyperv/hv_register_intercept.rs @@ -0,0 +1,102 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use core::arch::asm; + +use spin::Mutex; + +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VtlPlatformTrait; +use crate::create_function_with_restore; +use crate::tmk_assert; + +static FAULT_CALLED: Mutex = Mutex::new(false); + +// Without inline the compiler may optimize away the call and the VTL switch may +// distort the architectural registers +#[inline(never)] +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch cpu-intrinsic +fn violate_reg_rule() { + // SAFETY: we are writing to a valid MSR + unsafe { + asm!( + "mov ecx, 0x1B", + "wrmsr", + out("eax") _, + out("edx") _, + out("ecx") _, + ); + } +} + +// The macro `create_function_with_restore!` generates a wrapper function (`f_violate_reg_rule`) +// that calls `violate_reg_rule` and restores the processor state as needed for virtualization tests. +// Usage: create_function_with_restore!(wrapper_fn_name, target_fn_name); +create_function_with_restore!(f_violate_reg_rule, violate_reg_rule); + +/// Executes a series of tests to validate secure register intercept functionality. +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + use hvdef::Vtl; + + use crate::context::VpExecToken; + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 4"); + + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + log::info!("set intercept handler successfully!"); + + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let r = ctx.start_on_vp(VpExecToken::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + let r = ctx.set_interrupt_idx(0x30, move || { + log::info!("interrupt handled for 0x30!"); + let mut status = FAULT_CALLED.lock(); + *status = true; + }); + tmk_assert!(r.is_ok(), "set_interrupt_idx should succeed"); + + let r = ctx.set_register(0x000E0000, 0x0000000000001000); + tmk_assert!(r.is_ok(), "set_register should succeed to write Control register"); + + let r = ctx.get_register(0x000E0000); + tmk_assert!(r.is_ok(), "get_register should succeed to read Control register"); + + let reg_values = r.unwrap(); + tmk_assert!(reg_values == 0x0000000000001000, format!("register value should be 0x0000000000001000, got {:x}", reg_values)); + + log::info!("Switching to VTL0: attempting to read a protected register to verify security enforcement and intercept handling."); + + ctx.switch_to_low_vtl(); + })); + tmk_assert!(r.is_ok(), "start_on_vp should succeed"); + + _ = ctx.queue_command_vp(VpExecToken::new(0x0, Vtl::Vtl1).command(|ctx: &mut T| { + log::info!("successfully resumed running VTL1 on vp0 after intercept"); + ctx.switch_to_low_vtl(); + })); + + f_violate_reg_rule(); + + let fault_called = *FAULT_CALLED.lock(); + tmk_assert!(fault_called, "Secure intercept should be received"); + + log::info!("we are in vtl0 now!"); + log::info!("we reached the end of the test"); +} diff --git a/opentmk/src/tests/hyperv/hv_tpm_read_cvm.rs b/opentmk/src/tests/hyperv/hv_tpm_read_cvm.rs new file mode 100644 index 0000000000..60a9cb2919 --- /dev/null +++ b/opentmk/src/tests/hyperv/hv_tpm_read_cvm.rs @@ -0,0 +1,94 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use core::ops::Range; + +use hvdef::Vtl; + +use crate::arch::tpm::Tpm; +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecToken; +use crate::context::VtlPlatformTrait; +use crate::devices::tpm::{TpmDevice, TpmUtil}; +use crate::tmk_assert; + +/// Executes a series of tests to validate TPM read violation in a Hyper-V environment. +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + let mut _tpm = Tpm::new(); + let protocol_version = Tpm::get_tcg_protocol_version(); + log::warn!("TPM protocol version: 0x{:x}", protocol_version); + + let tpm_gpa = Tpm::get_mapped_shared_memory(); + log::warn!("TPM CMD buffer from vTPM Device: 0x{:x}", tpm_gpa); + let tpm_ptr = (tpm_gpa as u64) as *mut u8; + + // build slice from pointer + // SAFETY: we trust the address set by UEFI is valid + let tpm_command = unsafe { core::slice::from_raw_parts_mut(tpm_ptr, 4096) }; + // SAFETY: we trust the address set by UEFI is valid + let tpm_response = unsafe { core::slice::from_raw_parts_mut(tpm_ptr.add(4096), 4096) }; + + _tpm.set_command_buffer(tpm_command); + _tpm.set_response_buffer(tpm_response); + + let result = TpmUtil::exec_self_test(&mut _tpm); + + log::warn!("TPM self test result: {:?}", result); + tmk_assert!(result.is_ok(), "TPM self test is successful"); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 4"); + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + log::info!("set intercept handler successfully!"); + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let command_range = Range { + start: tpm_gpa as u64, + end: tpm_gpa as u64 + 4096, + }; + + let _r = ctx.start_on_vp(VpExecToken::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + let r = ctx.setup_vtl_protection(); + tmk_assert!(r.is_ok(), "setup_vtl_protection should succeed"); + log::info!("enabled vtl protections for the partition."); + ctx.switch_to_low_vtl(); + })); + + let r = ctx.set_interrupt_idx(18, || { + log::warn!("successfully intercepted interrupt 18"); + panic!("MC should cause a system abort"); + }); + tmk_assert!(r.is_ok(), "set_interrupt_idx should succeed"); + + let cmd = TpmUtil::get_self_test_cmd(); + _tpm.copy_to_command_buffer(&cmd); + log::warn!("TPM self test command copied to buffer"); + + let r = ctx.start_on_vp(VpExecToken::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let r = ctx.apply_vtl_protection_for_memory(command_range, Vtl::Vtl1); + tmk_assert!(r.is_ok(), "apply_vtl_protection_for_memory should succeed"); + + ctx.switch_to_low_vtl(); + })); + tmk_assert!(r.is_ok(), "start_on_vp should succeed"); + + log::warn!("about to execute TPM self test command.."); + Tpm::execute_command_no_check(); + log::warn!("TPM self test command executed"); +} diff --git a/opentmk/src/tests/hyperv/hv_tpm_write_cvm.rs b/opentmk/src/tests/hyperv/hv_tpm_write_cvm.rs new file mode 100644 index 0000000000..2d018501b6 --- /dev/null +++ b/opentmk/src/tests/hyperv/hv_tpm_write_cvm.rs @@ -0,0 +1,92 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use core::ops::Range; + +use hvdef::Vtl; + +use crate::arch::tpm::Tpm; +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecToken; +use crate::context::VtlPlatformTrait; +use crate::devices::tpm::{TpmDevice, TpmUtil}; +use crate::tmk_assert; + +/// Executes a series of tests to validate TPM write violation in a Hyper-V environment. +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + let mut _tpm = Tpm::new(); + let protocol_version = Tpm::get_tcg_protocol_version(); + log::warn!("TPM protocol version: 0x{:x}", protocol_version); + let tpm_gpa: u32 = Tpm::get_mapped_shared_memory(); + log::warn!("TPM CMD buffer from vTPM Device: 0x{:x}", tpm_gpa); + let tpm_ptr = (tpm_gpa as u64) as *mut u8; + + // build slice from pointer + // SAFETY: we trust the address set by UEFI is valid + let tpm_command = unsafe { core::slice::from_raw_parts_mut(tpm_ptr, 4096) }; + // SAFETY: we trust the address set by UEFI is valid + let tpm_response = unsafe { core::slice::from_raw_parts_mut(tpm_ptr.add(4096), 4096) }; + + _tpm.set_command_buffer(tpm_command); + _tpm.set_response_buffer(tpm_response); + + let result = TpmUtil::exec_self_test(&mut _tpm); + + log::warn!("TPM self test result: {:?}", result); + tmk_assert!(result.is_ok(), "TPM self test is successful"); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 4"); + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + log::info!("set intercept handler successfully!"); + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let response_rage = Range { + start: tpm_gpa as u64 + 4096, + end: tpm_gpa as u64 + 4096 * 2, + }; + + let _r = ctx.start_on_vp(VpExecToken::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + let r = ctx.setup_vtl_protection(); + tmk_assert!(r.is_ok(), "setup_vtl_protection should succeed"); + + log::info!("enabled vtl protections for the partition."); + + let r = ctx.apply_vtl_protection_for_memory(response_rage, Vtl::Vtl1); + tmk_assert!(r.is_ok(), "apply_vtl_protection_for_memory should succeed"); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + })); + + let r = ctx.set_interrupt_idx(18, || { + log::warn!("successfully intercepted interrupt 18"); + panic!("MC should cause a system abort"); + }); + tmk_assert!(r.is_ok(), "set_interrupt_idx should succeed"); + + let cmd = TpmUtil::get_self_test_cmd(); + + _tpm.copy_to_command_buffer(&cmd); + log::warn!("TPM self test command copied to buffer"); + log::warn!("about to execute TPM self test command.."); + Tpm::execute_command_no_check(); + log::warn!("TPM self test command executed"); +} diff --git a/opentmk/src/tests/hyperv/mod.rs b/opentmk/src/tests/hyperv/mod.rs new file mode 100644 index 0000000000..f64d8d391c --- /dev/null +++ b/opentmk/src/tests/hyperv/mod.rs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +pub mod hv_error_vp_start; +#[cfg(nightly)] +pub mod hv_memory_protect_read; +#[cfg(nightly)] +pub mod hv_memory_protect_write; +pub mod hv_processor; +#[cfg(nightly)] +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch sys-crate +pub mod hv_register_intercept; +#[cfg(nightly)] +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch sys-crate +pub mod hv_tpm_read_cvm; +#[cfg(nightly)] +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch sys-crate +pub mod hv_tpm_write_cvm; +pub mod test_helpers; diff --git a/opentmk/src/tests/hyperv/test_helpers.rs b/opentmk/src/tests/hyperv/test_helpers.rs new file mode 100644 index 0000000000..2ebeeefce9 --- /dev/null +++ b/opentmk/src/tests/hyperv/test_helpers.rs @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#[macro_export] +/// Generates a function that calls the given symbol saving and restoring general purpose registers around the call. +macro_rules! create_function_with_restore { + ($func_name:ident, $symbol:ident) => { + #[inline(never)] + // avoiding inline for debuggability in release builds. + fn $func_name() { + // SAFETY: we are calling a function pointer and restoring general purpose registers. + unsafe { + asm!(" + push rax + push rbx + push rcx + push rdx + push rsi + push rdi + push rbp + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + call {} + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop rbp + pop rdi + pop rsi + pop rdx + pop rcx + pop rbx + pop rax + ", sym $symbol); + } + } + }; +} diff --git a/opentmk/src/tests/mod.rs b/opentmk/src/tests/mod.rs new file mode 100644 index 0000000000..ec876317b6 --- /dev/null +++ b/opentmk/src/tests/mod.rs @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Test modules driving OpenTMK tests. + +// only one test is run at a time so there is dead code in other tests +#![expect(dead_code)] +use crate::platform::hyperv::ctx::HvTestCtx; +mod hyperv; + +/// Runs all the tests. +pub fn run_test() { + let mut ctx = HvTestCtx::new(); + ctx.init(hvdef::Vtl::Vtl0).expect("failed to init on BSP"); + hyperv::hv_processor::exec(&mut ctx); +} diff --git a/opentmk/src/tmk_assert.rs b/opentmk/src/tmk_assert.rs new file mode 100644 index 0000000000..a04617207a --- /dev/null +++ b/opentmk/src/tmk_assert.rs @@ -0,0 +1,91 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Assertion handling and logging in JSON format. +//! This module provides a custom assertion macro `tmk_assert!` that logs assertion results in +//! JSON format. It also includes utility functions for formatting and writing log messages. + +use alloc::string::String; +use core::fmt::Write; + +use serde::Serialize; + +#[derive(Serialize)] +struct AssertJson<'a, T> +where + T: Serialize, +{ + #[serde(rename = "type")] + type_: &'a str, + level: &'a str, + message: &'a str, + line: String, + assertion_result: bool, + testname: &'a T, +} + +impl<'a, T> AssertJson<'a, T> +where + T: Serialize, +{ + fn new( + type_: &'a str, + level: &'a str, + message: &'a str, + line: String, + assertion_result: bool, + testname: &'a T, + ) -> Self { + Self { + type_, + level, + message, + line, + assertion_result, + testname, + } + } +} + +pub(crate) fn format_assert_json_string( + s: &str, + terminate_new_line: bool, + line: String, + assert_result: bool, + testname: &T, +) -> String +where + T: Serialize, +{ + let assert_json = AssertJson::new("assert", "WARN", s, line, assert_result, testname); + + let mut out = serde_json::to_string(&assert_json).expect("Failed to serialize assert JSON"); + if terminate_new_line { + out.push('\n'); + } + out +} + +pub(crate) fn write_str(s: &str) { + _ = crate::tmk_logger::LOGGER.get_writer().write_str(s); +} + +#[macro_export] +/// Asserts that a condition is true, logging the result in JSON format. +/// If the condition is false, it panics with the provided message. +macro_rules! tmk_assert { + ($condition:expr, $message:expr) => {{ + let file = core::file!(); + let line = line!(); + let file_line = format!("{}:{}", file, line); + let expn = stringify!($condition); + let result: bool = $condition; + let js = $crate::tmk_assert::format_assert_json_string( + &expn, true, file_line, result, &$message, + ); + $crate::tmk_assert::write_str(&js); + if !result { + panic!("Assertion failed: {}", $message); + } + }}; +} diff --git a/opentmk/src/tmk_logger.rs b/opentmk/src/tmk_logger.rs new file mode 100644 index 0000000000..912993fa7f --- /dev/null +++ b/opentmk/src/tmk_logger.rs @@ -0,0 +1,125 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Logger implementation for OpenTMK. +//! This module provides a logger that formats log messages as JSON and writes them to a specified output +//! such as a serial port. + +use alloc::borrow::ToOwned; +use alloc::fmt::format; +use alloc::string::String; +use alloc::string::ToString; +use core::fmt::Write; + +use log::SetLoggerError; +use serde::Serialize; +use spin::Mutex; +use spin::MutexGuard; + +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch sys-crate +use crate::arch::serial::InstrIoAccess; +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch sys-crate +use crate::arch::serial::Serial; +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch sys-crate +use crate::arch::serial::SerialPort; +#[cfg(target_arch = "aarch64")] // xtask-fmt allow-target-arch sys-crate +use minimal_rt::arch::Serial; + +#[derive(Serialize)] +struct LogEntry { + #[serde(rename = "type")] + log_type: &'static str, + level: String, + message: String, + line: String, +} + +impl LogEntry { + fn new(level: log::Level, message: &str, line: &str) -> Self { + LogEntry { + log_type: "log", + level: level.as_str().to_string(), + message: message.to_owned(), + line: line.to_owned(), + } + } +} + +/// Formats a log message into a JSON string. +pub(crate) fn format_log_string_to_json( + message: &str, + line: &str, + terminate_new_line: bool, + level: log::Level, +) -> String { + let log_entry = LogEntry::new(level, message, line); + let mut out = serde_json::to_string(&log_entry).unwrap(); + if terminate_new_line { + out.push('\n'); + } + out +} + +/// A logger that writes log messages to a provided writer, such as a serial port. +pub struct TmkLogger { + writer: T, +} + +impl TmkLogger> +where + T: Write + Send, +{ + /// Creates a new `TmkLogger` instance with the provided writer. + pub const fn new(provider: T) -> Self { + TmkLogger { + writer: Mutex::new(provider), + } + } + + /// Returns a lock guard to the underlying writer. + /// This allows direct access to the writer for custom logging operations. + pub fn get_writer(&self) -> MutexGuard<'_, T> + where + T: Write + Send, + { + self.writer.lock() + } +} + +impl log::Log for TmkLogger> +where + T: Write + Send, +{ + fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { + true + } + + fn log(&self, record: &log::Record<'_>) { + let str = format(*record.args()); + let line = format!( + "{}:{}", + record.file().unwrap_or_default(), + record.line().unwrap_or_default() + ); + let str = format_log_string_to_json(&str, &line, true, record.level()); + _ = self.writer.lock().write_str(str.as_str()); + } + + fn flush(&self) {} +} + +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch sys-crate +type SerialPortWriter = Serial; +#[cfg(target_arch = "x86_64")] // xtask-fmt allow-target-arch sys-crate +/// The global logger instance for x86_64 architecture, using COM2 serial port. +pub static LOGGER: TmkLogger> = + TmkLogger::new(SerialPortWriter::new(SerialPort::COM2, InstrIoAccess)); + +#[cfg(target_arch = "aarch64")] // xtask-fmt allow-target-arch sys-crate +/// The global logger instance for aarch64 architecture, using the default serial implementation. +pub static LOGGER: TmkLogger> = TmkLogger::new(Serial {}); + +/// Initializes the global logger. +pub fn init() -> Result<(), SetLoggerError> { + log::set_logger(&LOGGER).map(|()| log::set_max_level(log::LevelFilter::Debug)) +} diff --git a/opentmk/src/tmkdefs.rs b/opentmk/src/tmkdefs.rs new file mode 100644 index 0000000000..437975bdaa --- /dev/null +++ b/opentmk/src/tmkdefs.rs @@ -0,0 +1,231 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! TMK error definitions and result type alias. +//! + +use thiserror::Error; + +/// Primary error type produced by TMK operations. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Error)] +pub enum TmkError { + /// Returned when a memory allocation attempt fails. + #[error("allocation failed")] + AllocationFailed, + /// Returned when an input parameter is invalid. + #[error("invalid parameter")] + InvalidParameter, + /// Returned when enabling a VTL operation fails. + #[error("failed to enable VTL")] + EnableVtlFailed, + /// Returned when setting the default context fails. + #[error("failed to set default context")] + SetDefaultCtxFailed, + /// Returned when starting a virtual processor fails. + #[error("failed to start VP")] + StartVpFailed, + /// Returned when queuing a command fails. + #[error("failed to queue command")] + QueueCommandFailed, + /// Returned when configuring VTL protection fails. + #[error("failed to set up VTL protection")] + SetupVtlProtectionFailed, + /// Returned when configuring partition-level VTL fails. + #[error("failed to set up partition VTL")] + SetupPartitionVtlFailed, + /// Returned when installing the interrupt handler fails. + #[error("failed to set up interrupt handler")] + SetupInterruptHandlerFailed, + /// Returned when assigning an interrupt index fails. + #[error("failed to set interrupt index")] + SetInterruptIdxFailed, + /// Returned when configuring secure intercept fails. + #[error("failed to set up secure intercept")] + SetupSecureInterceptFailed, + /// Returned when applying VTL memory protection fails. + #[error("failed to apply VTL protection for memory")] + ApplyVtlProtectionForMemoryFailed, + /// Returned when reading an MSR fails. + #[error("failed to read MSR")] + ReadMsrFailed, + /// Returned when writing an MSR fails. + #[error("failed to write MSR")] + WriteMsrFailed, + /// Returned when reading a register fails. + #[error("failed to get register")] + GetRegisterFailed, + /// Returned when a hypercall code is unrecognized. + #[error("invalid hypercall code")] + InvalidHypercallCode, + /// Returned when hypercall input is invalid. + #[error("invalid hypercall input")] + InvalidHypercallInput, + /// Returned when a value is not properly aligned. + #[error("invalid alignment")] + InvalidAlignment, + /// Returned when the operation lacks required privileges. + #[error("access denied")] + AccessDenied, + /// Returned when the partition state is invalid. + #[error("invalid partition state")] + InvalidPartitionState, + /// Returned when the operation is denied. + #[error("operation denied")] + OperationDenied, + /// Returned when querying an unknown property. + #[error("unknown property")] + UnknownProperty, + /// Returned when a property value is outside the supported range. + #[error("property value out of range")] + PropertyValueOutOfRange, + /// Returned when memory resources are insufficient. + #[error("insufficient memory")] + InsufficientMemory, + /// Returned when partition depth exceeds limits. + #[error("partition too deep")] + PartitionTooDeep, + /// Returned when a partition identifier is invalid. + #[error("invalid partition id")] + InvalidPartitionId, + /// Returned when a virtual processor index is invalid. + #[error("invalid VP index")] + InvalidVpIndex, + /// Returned when a requested resource is not found. + #[error("not found")] + NotFound, + /// Returned when a port identifier is invalid. + #[error("invalid port id")] + InvalidPortId, + /// Returned when a connection identifier is invalid. + #[error("invalid connection id")] + InvalidConnectionId, + /// Returned when available buffers are insufficient. + #[error("insufficient buffers")] + InsufficientBuffers, + /// Returned when required acknowledgment is missing. + #[error("not acknowledged")] + NotAcknowledged, + /// Returned when a virtual processor state is invalid. + #[error("invalid VP state")] + InvalidVpState, + /// Returned when an operation was already acknowledged. + #[error("already acknowledged")] + Acknowledged, + /// Returned when save or restore state is invalid. + #[error("invalid save/restore state")] + InvalidSaveRestoreState, + /// Returned when SynIC state is invalid. + #[error("invalid synic state")] + InvalidSynicState, + /// Returned when the object is already in use. + #[error("object in use")] + ObjectInUse, + /// Returned when proximity domain information is invalid. + #[error("invalid proximity domain info")] + InvalidProximityDomainInfo, + /// Returned when no data is available. + #[error("no data")] + NoData, + /// Returned when the target component is inactive. + #[error("inactive")] + Inactive, + /// Returned when required resources are unavailable. + #[error("no resources")] + NoResources, + /// Returned when a requested feature is unavailable. + #[error("feature unavailable")] + FeatureUnavailable, + /// Returned when only a partial packet is available. + #[error("partial packet")] + PartialPacket, + /// Returned when the processor lacks a required feature. + #[error("processor feature not supported")] + ProcessorFeatureNotSupported, + /// Returned when the processor cache line flush size is incompatible. + #[error("processor cache line flush size incompatible")] + ProcessorCacheLineFlushSizeIncompatible, + /// Returned when a provided buffer is too small. + #[error("insufficient buffer")] + InsufficientBuffer, + /// Returned when the processor is incompatible. + #[error("incompatible processor")] + IncompatibleProcessor, + /// Returned when there are not enough device domains. + #[error("insufficient device domains")] + InsufficientDeviceDomains, + /// Returned when CPUID feature validation fails. + #[error("cpuid feature validation error")] + CpuidFeatureValidationError, + /// Returned when CPUID XSAVE feature validation fails. + #[error("cpuid xsave feature validation error")] + CpuidXsaveFeatureValidationError, + /// Returned when processor startup times out. + #[error("processor startup timeout")] + ProcessorStartupTimeout, + /// Returned when SMX is enabled and unsupported. + #[error("smx enabled")] + SmxEnabled, + /// Returned when a logical processor index is invalid. + #[error("invalid LP index")] + InvalidLpIndex, + /// Returned when a register value is invalid. + #[error("invalid register value")] + InvalidRegisterValue, + /// Returned when a VTL state is invalid. + #[error("invalid VTL state")] + InvalidVtlState, + /// Returned when NX support is not detected. + #[error("nx not detected")] + NxNotDetected, + /// Returned when a device identifier is invalid. + #[error("invalid device id")] + InvalidDeviceId, + /// Returned when a device state is invalid. + #[error("invalid device state")] + InvalidDeviceState, + /// Returned when page requests remain pending. + #[error("pending page requests")] + PendingPageRequests, + /// Returned when a page request is invalid. + #[error("page request invalid")] + PageRequestInvalid, + /// Returned when a key already exists. + #[error("key already exists")] + KeyAlreadyExists, + /// Returned when a device is already assigned to a domain. + #[error("device already in domain")] + DeviceAlreadyInDomain, + /// Returned when a CPU group identifier is invalid. + #[error("invalid cpu group id")] + InvalidCpuGroupId, + /// Returned when a CPU group state is invalid. + #[error("invalid cpu group state")] + InvalidCpuGroupState, + /// Returned when an operation fails for an unspecified reason. + #[error("operation failed")] + OperationFailed, + /// Returned when nested virtualization forbids the operation. + #[error("not allowed with nested virtualization active")] + NotAllowedWithNestedVirtActive, + /// Returned when root partition memory is insufficient. + #[error("insufficient root memory")] + InsufficientRootMemory, + /// Returned when an event buffer was already freed. + #[error("event buffer already freed")] + EventBufferAlreadyFreed, + /// Returned when an operation times out. + #[error("timeout")] + Timeout, + /// Returned when the VTL is already enabled. + #[error("vtl already enabled")] + VtlAlreadyEnabled, + /// Returned when a register name is unrecognized. + #[error("unknown register name")] + UnknownRegisterName, + /// Returned when the operation is not implemented. + #[error("not implemented")] + NotImplemented, +} + +/// Result type alias for TMK operations using `TmkError`. +pub type TmkResult = Result; diff --git a/opentmk/src/uefi/alloc.rs b/opentmk/src/uefi/alloc.rs new file mode 100644 index 0000000000..8688d8da87 --- /dev/null +++ b/opentmk/src/uefi/alloc.rs @@ -0,0 +1,98 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use core::alloc::GlobalAlloc; +use core::cell::RefCell; + +use linked_list_allocator::LockedHeap; +use spin::Mutex; +use uefi::allocator::Allocator; +use uefi::boot::AllocateType; +use uefi::boot::MemoryType; +use uefi::boot::{self}; + +pub const SIZE_1MB: usize = 1024 * 1024; +const PAGE_SIZE: usize = 4096; + +#[global_allocator] +pub static ALLOCATOR: MemoryAllocator = MemoryAllocator { + use_locked_heap: Mutex::new(RefCell::new(false)), + locked_heap: LockedHeap::empty(), + uefi_allocator: Allocator {}, +}; + +pub struct MemoryAllocator { + use_locked_heap: Mutex>, + locked_heap: LockedHeap, + uefi_allocator: Allocator, +} + +// SAFETY: The methods of GlobalAlloc are unsafe because the caller must ensure the safety +unsafe impl GlobalAlloc for MemoryAllocator { + unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { + // SAFETY: caller must ensure layout is valid + unsafe { self.get_allocator().alloc(layout) } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) { + // SAFETY: caller must ensure ptr and layout are valid + unsafe { self.get_allocator().dealloc(ptr, layout) }; + } + + unsafe fn alloc_zeroed(&self, layout: core::alloc::Layout) -> *mut u8 { + // SAFETY: caller must ensure layout is valid + unsafe { self.get_allocator().alloc_zeroed(layout) } + } + + unsafe fn realloc( + &self, + ptr: *mut u8, + layout: core::alloc::Layout, + new_size: usize, + ) -> *mut u8 { + // SAFETY: caller must ensure ptr is valid for layout + unsafe { self.get_allocator().realloc(ptr, layout, new_size) } + } +} + +impl MemoryAllocator { + pub fn switch_to_capped_heap(&self, size: usize) -> bool { + let pages = ((SIZE_1MB * size) / 4096) + 1; + let size = pages * 4096; + let mem: Result, uefi::Error> = boot::allocate_pages( + AllocateType::AnyPages, + MemoryType::BOOT_SERVICES_DATA, + pages, + ); + if mem.is_err() { + return false; + } + let ptr = mem.unwrap().as_ptr(); + // SAFETY: its safe to init a locked heap at this point, we know memory allocated is valid + unsafe { self.locked_heap.lock().init(ptr, size) }; + *self.use_locked_heap.lock().borrow_mut() = true; + true + } + + #[expect(dead_code)] + pub fn get_page_aligned_memory(&self, size: usize) -> *mut u8 { + let pages = ((SIZE_1MB * size) / PAGE_SIZE) + 1; + let mem: Result, uefi::Error> = boot::allocate_pages( + AllocateType::AnyPages, + MemoryType::BOOT_SERVICES_DATA, + pages, + ); + if mem.is_err() { + return core::ptr::null_mut(); + } + mem.unwrap().as_ptr() + } + + fn get_allocator(&self) -> &dyn GlobalAlloc { + if *self.use_locked_heap.lock().borrow() { + &self.locked_heap + } else { + &self.uefi_allocator + } + } +} diff --git a/opentmk/src/uefi/init.rs b/opentmk/src/uefi/init.rs new file mode 100644 index 0000000000..65eb872ce0 --- /dev/null +++ b/opentmk/src/uefi/init.rs @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use uefi::CStr16; +use uefi::Status; +use uefi::boot::MemoryType; +use uefi::boot::exit_boot_services; +use uefi::guid; + +use super::alloc::ALLOCATOR; + +const EFI_GUID: uefi::Guid = guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91"); +const OS_LOADER_INDICATIONS: &str = "OsLoaderIndications"; + +fn enable_uefi_vtl_protection() { + let mut buf = vec![0u8; 1024]; + let mut str_buff = vec![0u16; 1024]; + let os_loader_indications_key = + CStr16::from_str_with_buf(OS_LOADER_INDICATIONS, str_buff.as_mut_slice()).unwrap(); + + let os_loader_indications_result = uefi::runtime::get_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(EFI_GUID), + buf.as_mut(), + ) + .expect("Failed to get OsLoaderIndications"); + + let mut os_loader_indications = u32::from_le_bytes( + os_loader_indications_result.0[0..4] + .try_into() + .expect("error in output"), + ); + os_loader_indications |= 0x1u32; + + let os_loader_indications = os_loader_indications.to_le_bytes(); + + uefi::runtime::set_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(EFI_GUID), + os_loader_indications_result.1, + &os_loader_indications, + ) + .expect("Failed to set OsLoaderIndications"); + + let _os_loader_indications_result = uefi::runtime::get_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(EFI_GUID), + buf.as_mut(), + ) + .expect("Failed to get OsLoaderIndications"); + + // SAFETY: its safe to exit boot services here + let _memory_map = unsafe { exit_boot_services(Some(MemoryType::BOOT_SERVICES_DATA)) }; +} + +pub fn init() -> Result<(), Status> { + let r: bool = ALLOCATOR.switch_to_capped_heap(512); + if !r { + return Err(Status::ABORTED); + } + crate::tmk_logger::init().map_err(|_| Status::NOT_READY)?; + enable_uefi_vtl_protection(); + Ok(()) +} diff --git a/opentmk/src/uefi/mod.rs b/opentmk/src/uefi/mod.rs new file mode 100644 index 0000000000..3ee952b65a --- /dev/null +++ b/opentmk/src/uefi/mod.rs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +mod alloc; +pub mod init; +mod rt; + +use init::init; +use uefi::Status; +use uefi::entry; + +use crate::tmk_assert; + +#[entry] +fn uefi_main() -> Status { + let r = init(); + tmk_assert!(r.is_ok(), "init should succeed"); + + log::warn!("TEST_START"); + crate::tests::run_test(); + log::warn!("TEST_END"); + loop { + core::hint::spin_loop(); + } +} diff --git a/opentmk/src/uefi/rt.rs b/opentmk/src/uefi/rt.rs new file mode 100644 index 0000000000..bbd46eb39c --- /dev/null +++ b/opentmk/src/uefi/rt.rs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#![cfg(target_os = "uefi")] +#[panic_handler] +fn panic_handler(panic: &core::panic::PanicInfo<'_>) -> ! { + log::error!("Panic at runtime: {}", panic); + log::warn!("TEST_END"); + loop {} +} diff --git a/petri/Cargo.toml b/petri/Cargo.toml index c7865b893a..6bc48314df 100644 --- a/petri/Cargo.toml +++ b/petri/Cargo.toml @@ -82,7 +82,7 @@ mbrman.workspace = true parking_lot.workspace = true prost.workspace = true serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } tempfile.workspace = true thiserror.workspace = true tracing.workspace = true diff --git a/support/nostd_spin_channel/Cargo.toml b/support/nostd_spin_channel/Cargo.toml new file mode 100644 index 0000000000..314eea4d9c --- /dev/null +++ b/support/nostd_spin_channel/Cargo.toml @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +[package] +name = "nostd_spin_channel" +rust-version.workspace = true +edition.workspace = true + +[dependencies] +spin.workspace = true +thiserror.workspace = true + +[lints] +workspace = true diff --git a/support/nostd_spin_channel/src/lib.rs b/support/nostd_spin_channel/src/lib.rs new file mode 100644 index 0000000000..700302b5a1 --- /dev/null +++ b/support/nostd_spin_channel/src/lib.rs @@ -0,0 +1,352 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! This crate provides a no_std, unbounded channel implementation with priority send capability. + +#![no_std] +#![forbid(unsafe_code)] +#![warn(missing_docs)] + +extern crate alloc; +use alloc::collections::VecDeque; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::sync::atomic::AtomicUsize; +use core::sync::atomic::Ordering; +use spin::Mutex; +use spin::MutexGuard; +use thiserror::Error; + +/// An unbounded channel implementation with priority send capability. +/// This implementation works in no_std environments using spin-rs. +/// It uses a VecDeque as the underlying buffer. +pub struct Channel { + inner: Arc>, +} + +/// The inner data structure holding the channel state +struct ChannelInner { + /// The internal buffer using a VecDeque protected by its own mutex + buffer: Mutex>, + + /// Number of active senders + senders: AtomicUsize, + + /// Number of active receivers + receivers: AtomicUsize, +} + +/// Error type for sending operations +#[derive(Debug, Eq, PartialEq, Error)] +pub enum SendError { + /// All receivers have been dropped + #[error("send failed because receiver is disconnected")] + Disconnected, +} + +/// Error type for receiving operations +#[derive(Debug, Eq, PartialEq, Error)] +pub enum RecvError { + /// No messages available to receive + #[error("receive failed because channel is empty")] + Empty, + /// All senders have been dropped + #[error("receive failed because all senders are disconnected")] + Disconnected, + /// Channel is currently locked by another operation + #[error("channel is locked by another operation")] + Unavailable, +} + +/// Sender half of the channel +pub struct Sender { + inner: Arc>, +} + +/// Receiver half of the channel and provides blocking and non-blocking interfaces. +pub struct Receiver { + inner: Arc>, +} + +// implement clone for Sender +impl Clone for Sender { + fn clone(&self) -> Self { + self.inner.senders.fetch_add(1, Ordering::SeqCst); + Sender { + inner: self.inner.clone(), + } + } +} + +// implement clone for Receiver +impl Clone for Receiver { + fn clone(&self) -> Self { + self.inner.receivers.fetch_add(1, Ordering::SeqCst); + Receiver { + inner: self.inner.clone(), + } + } +} + +impl Channel { + /// Creates a new unbounded channel + pub fn new() -> Self { + let inner = Arc::new(ChannelInner { + buffer: Mutex::new(VecDeque::new()), + senders: AtomicUsize::new(1), // Start with one sender + receivers: AtomicUsize::new(1), // Start with one receiver + }); + + Self { inner } + } + + /// Splits the channel into a sender and receiver pair + pub fn split(self) -> (Sender, Receiver) { + let sender = Sender { + inner: self.inner.clone(), + }; + + let receiver = Receiver { inner: self.inner }; + + (sender, receiver) + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Sender { + /// Sends an element to the back of the queue + /// Returns Ok(()) if successful, Err(SendError) if all receivers have been dropped + pub fn send(&self, value: T) -> Result<(), SendError> { + let mut buffer = self.buffer()?; + // Push to the back of the queue - can't fail since we're unbounded + buffer.push_back(value); + + Ok(()) + } + + /// Sends an element to the front of the queue (highest priority) + /// Returns Ok(()) if successful, Err(SendError) if all receivers have been dropped + pub fn send_priority(&self, value: T) -> Result<(), SendError> { + let mut buffer = self.buffer()?; + + // Push to the front of the queue - can't fail since we're unbounded + buffer.push_front(value); + + Ok(()) + } + + /// Send a batch of elements at once + /// Returns the number of elements successfully sent (all of them, unless disconnected) + pub fn send_batch(&self, items: I) -> Result + where + I: IntoIterator, + { + let mut buffer = self.buffer()?; + + let mut count = 0; + + // Push each item to the back of the queue + for item in items { + buffer.push_back(item); + count += 1; + } + + Ok(count) + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } + + fn buffer(&self) -> Result>, SendError> { + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected); + } + let buffer = self.inner.buffer.lock(); + Ok(buffer) + } +} + +impl Receiver { + /// Tries to receive an element from the front of the queue while blocking + /// Returns Ok(value) if successful, Err(RecvError) otherwise + pub fn recv(&self) -> Result { + loop { + // Use a separate scope for the lock to ensure it's released promptly + let result = { + let mut buffer = self.inner.buffer.lock(); + buffer.pop_front() + }; + let r = match result { + Some(val) => Ok(val), + None => { + // Check if there are any senders left + if self.inner.senders.load(Ordering::SeqCst) == 0 { + Err(RecvError::Disconnected) + } else { + Err(RecvError::Empty) + } + } + }; + + if let Err(err) = r { + if err != RecvError::Empty { + return Err(err); + } + } else { + return r; + } + } + } + + /// Tries to receive an element from the front of the queue without blocking + /// Returns Ok(value) if successful, Err(RecvError) otherwise + pub fn try_recv(&self) -> Result { + // Use a separate scope for the lock to ensure it's released promptly + let result = { + let mut buffer = self.inner.buffer.try_lock(); + if buffer.is_none() { + return Err(RecvError::Unavailable); + } + buffer.as_mut().unwrap().pop_front() + }; + + match result { + Some(val) => Ok(val), + None => { + // Check if there are any senders left + if self.inner.senders.load(Ordering::SeqCst) == 0 { + Err(RecvError::Disconnected) + } else { + Err(RecvError::Empty) + } + } + } + } + + /// Tries to receive multiple elements at once, up to the specified limit + /// Returns a vector of received elements + pub fn try_recv_batch(&self, max_items: usize) -> Vec + where + T: Send, + { + // If max_items is 0, return an empty vector + if max_items == 0 { + return Vec::new(); + } + + let mut items = Vec::new(); + + // Lock the buffer once for the entire batch + let mut buffer = self.inner.buffer.lock(); + + // Calculate how many items to take + let count = max_items.min(buffer.len()); + + // Reserve capacity for efficiency + items.reserve(count); + + // Take items from the front of the queue + for _ in 0..count { + if let Some(item) = buffer.pop_front() { + items.push(item); + } else { + // This shouldn't happen due to the min() above, but just in case + break; + } + } + + items + } + + /// Peeks at the next element without removing it + pub fn peek(&self) -> Option + where + T: Clone, + { + let buffer = self.inner.buffer.lock(); + buffer.front().cloned() + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Drop for Sender { + fn drop(&mut self) { + self.inner.senders.fetch_sub(1, Ordering::SeqCst); + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + self.inner.receivers.fetch_sub(1, Ordering::SeqCst); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec; + + #[test] + fn send_and_recv_roundtrip() { + let channel = Channel::new(); + let (sender, receiver) = channel.split(); + sender.send(42usize).unwrap(); + assert_eq!(receiver.recv().unwrap(), 42); + } + + #[test] + fn priority_messages_arrive_first() { + let channel = Channel::new(); + let (sender, receiver) = channel.split(); + sender.send(1).unwrap(); + sender.send_priority(99).unwrap(); + assert_eq!(receiver.recv().unwrap(), 99); + assert_eq!(receiver.recv().unwrap(), 1); + } + + #[test] + fn send_batch_preserves_order() { + let channel = Channel::new(); + let (sender, receiver) = channel.split(); + assert_eq!(sender.send_batch([1, 2, 3]).unwrap(), 3); + assert_eq!(receiver.try_recv_batch(8), vec![1, 2, 3]); + } + + #[test] + fn try_recv_reports_empty_when_sender_alive() { + let (_sender, receiver) = Channel::<()>::new().split(); + assert_eq!(receiver.try_recv().unwrap_err(), RecvError::Empty); + } + + #[test] + fn recv_reports_disconnected_after_last_sender_dropped() { + let (sender, receiver) = Channel::<()>::new().split(); + drop(sender); + assert_eq!(receiver.recv().unwrap_err(), RecvError::Disconnected); + } +} diff --git a/support/serde_helpers/Cargo.toml b/support/serde_helpers/Cargo.toml index d7afdceb8b..9be521fb35 100644 --- a/support/serde_helpers/Cargo.toml +++ b/support/serde_helpers/Cargo.toml @@ -11,7 +11,7 @@ guid.workspace = true base64.workspace = true serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } [lints] workspace = true diff --git a/vm/devices/firmware/hyperv_secure_boot_templates/Cargo.toml b/vm/devices/firmware/hyperv_secure_boot_templates/Cargo.toml index 24a153dfa3..ca43a29569 100644 --- a/vm/devices/firmware/hyperv_secure_boot_templates/Cargo.toml +++ b/vm/devices/firmware/hyperv_secure_boot_templates/Cargo.toml @@ -11,7 +11,7 @@ firmware_uefi_custom_vars.workspace = true hyperv_uefi_custom_vars_json.workspace = true [build-dependencies] -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } [lints] workspace = true diff --git a/vm/devices/firmware/hyperv_uefi_custom_vars_json/Cargo.toml b/vm/devices/firmware/hyperv_uefi_custom_vars_json/Cargo.toml index 8cf4058c27..87046b187f 100644 --- a/vm/devices/firmware/hyperv_uefi_custom_vars_json/Cargo.toml +++ b/vm/devices/firmware/hyperv_uefi_custom_vars_json/Cargo.toml @@ -14,7 +14,7 @@ serde_helpers.workspace = true base64.workspace = true serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } thiserror.workspace = true zerocopy.workspace = true [lints] diff --git a/vm/devices/get/get_protocol/Cargo.toml b/vm/devices/get/get_protocol/Cargo.toml index 98b55264a8..996c52f550 100644 --- a/vm/devices/get/get_protocol/Cargo.toml +++ b/vm/devices/get/get_protocol/Cargo.toml @@ -13,7 +13,7 @@ serde_helpers.workspace = true bitfield-struct.workspace = true serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } static_assertions.workspace = true zerocopy.workspace = true [lints] diff --git a/vm/devices/get/guest_emulation_device/Cargo.toml b/vm/devices/get/guest_emulation_device/Cargo.toml index 8316e1e2c3..05ccf15623 100644 --- a/vm/devices/get/guest_emulation_device/Cargo.toml +++ b/vm/devices/get/guest_emulation_device/Cargo.toml @@ -38,7 +38,7 @@ tracelimit.workspace = true base64 = { workspace = true, optional = true } futures.workspace = true parking_lot.workspace = true -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } sha2 = { workspace = true, optional = true } thiserror.workspace = true jiff.workspace = true diff --git a/vm/devices/get/guest_emulation_log/Cargo.toml b/vm/devices/get/guest_emulation_log/Cargo.toml index 423a0c4f53..c7ec332973 100644 --- a/vm/devices/get/guest_emulation_log/Cargo.toml +++ b/vm/devices/get/guest_emulation_log/Cargo.toml @@ -20,7 +20,7 @@ inspect.workspace = true task_control.workspace = true async-trait.workspace = true serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } thiserror.workspace = true tracing.workspace = true zerocopy.workspace = true diff --git a/vm/devices/get/guest_emulation_transport/Cargo.toml b/vm/devices/get/guest_emulation_transport/Cargo.toml index d874ce1ccb..5a61f98bf2 100644 --- a/vm/devices/get/guest_emulation_transport/Cargo.toml +++ b/vm/devices/get/guest_emulation_transport/Cargo.toml @@ -36,7 +36,7 @@ futures.workspace = true futures-concurrency.workspace = true getrandom.workspace = true parking_lot.workspace = true -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } thiserror.workspace = true tracing.workspace = true unicycle.workspace = true diff --git a/vm/devices/get/underhill_config/Cargo.toml b/vm/devices/get/underhill_config/Cargo.toml index c70886f6d5..2be0c84739 100644 --- a/vm/devices/get/underhill_config/Cargo.toml +++ b/vm/devices/get/underhill_config/Cargo.toml @@ -14,7 +14,7 @@ vtl2_settings_proto.workspace = true prost.workspace = true serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } thiserror.workspace = true [lints] diff --git a/vm/devices/get/vtl2_settings_proto/Cargo.toml b/vm/devices/get/vtl2_settings_proto/Cargo.toml index e1d2b9ca39..c9abbe6754 100644 --- a/vm/devices/get/vtl2_settings_proto/Cargo.toml +++ b/vm/devices/get/vtl2_settings_proto/Cargo.toml @@ -10,7 +10,7 @@ rust-version.workspace = true pbjson.workspace = true pbjson-types.workspace = true prost.workspace = true -serde.workspace = true +serde = { workspace = true, features = ["std"] } [build-dependencies] prost-build.workspace = true diff --git a/vm/devices/storage/disklayer_sqlite/Cargo.toml b/vm/devices/storage/disklayer_sqlite/Cargo.toml index 1086ab52d5..50954cbade 100644 --- a/vm/devices/storage/disklayer_sqlite/Cargo.toml +++ b/vm/devices/storage/disklayer_sqlite/Cargo.toml @@ -28,7 +28,7 @@ futures.workspace = true # "bundled" feature in order to ensure consistent behaviors across platforms. rusqlite.workspace = true serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } tracing.workspace = true [lints] diff --git a/vm/loader/igvmfilegen/Cargo.toml b/vm/loader/igvmfilegen/Cargo.toml index a26c639642..bb0ae67943 100644 --- a/vm/loader/igvmfilegen/Cargo.toml +++ b/vm/loader/igvmfilegen/Cargo.toml @@ -24,8 +24,8 @@ hex = { workspace = true, features = ["serde"] } igvm.workspace = true igvm_defs.workspace = true range_map_vec.workspace = true -serde.workspace = true -serde_json.workspace = true +serde = { workspace = true, features = ["std"] } +serde_json = { workspace = true, features = ["std"] } sha2.workspace = true thiserror.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/vm/loader/igvmfilegen_config/Cargo.toml b/vm/loader/igvmfilegen_config/Cargo.toml index 2584be9b75..f25fe5b3d4 100644 --- a/vm/loader/igvmfilegen_config/Cargo.toml +++ b/vm/loader/igvmfilegen_config/Cargo.toml @@ -10,7 +10,7 @@ rust-version.workspace = true serde = { workspace = true, features = ["derive"] } [dev-dependencies] -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } [lints] workspace = true diff --git a/vm/vmgs/vmgstool/Cargo.toml b/vm/vmgs/vmgstool/Cargo.toml index 305f8a6dcc..678545f71c 100644 --- a/vm/vmgs/vmgstool/Cargo.toml +++ b/vm/vmgs/vmgstool/Cargo.toml @@ -35,7 +35,7 @@ hex.workspace = true fs-err.workspace = true getrandom = { workspace = true, optional = true} serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } thiserror.workspace = true ucs2.workspace = true diff --git a/vmm_tests/vmm_tests/Cargo.toml b/vmm_tests/vmm_tests/Cargo.toml index 668c1b54d7..54a3a130e3 100644 --- a/vmm_tests/vmm_tests/Cargo.toml +++ b/vmm_tests/vmm_tests/Cargo.toml @@ -56,9 +56,9 @@ hvlite_ttrpc_vmservice.workspace = true mesh_rpc.workspace = true tempfile.workspace = true -serde_json.workspace = true +serde_json = { workspace = true, features = ["std"] } pipette_client.workspace = true -serde.workspace = true +serde = { workspace = true, features = ["std"] } [lints] workspace = true diff --git a/xsync/xsync/Cargo.toml b/xsync/xsync/Cargo.toml index 630de7566f..6eef9ebbcf 100644 --- a/xsync/xsync/Cargo.toml +++ b/xsync/xsync/Cargo.toml @@ -16,7 +16,7 @@ fs-err.workspace = true log.workspace = true pathdiff.workspace = true semver.workspace = true -serde.workspace = true +serde = { workspace = true, features = ["std"] } toml_edit = { workspace = true, features = ["serde"] } ci_logger.workspace = true diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index a6935c7c5f..244502ed5c 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -24,8 +24,8 @@ log.workspace = true object = { workspace = true, features = ["read_core", "elf", "std"] } mbrman.workspace = true rayon.workspace = true -serde_json.workspace = true -serde.workspace = true +serde_json = { workspace = true, features = ["std"]} +serde = { workspace = true, features = ["std"] } toml_edit = { workspace = true, features = ["serde"] } walkdir.workspace = true which.workspace = true