On Mon, Mar 11, 2024 at 11:15:37AM +1000, Alistair Francis wrote: > From: Wilfred Mallawa <wilfred.mall...@wdc.com> > > Setup Data Object Exchance (DOE) as an extended capability for the NVME > controller and connect SPDM to it (CMA) to it. > > Signed-off-by: Wilfred Mallawa <wilfred.mall...@wdc.com> > Signed-off-by: Alistair Francis <alistair.fran...@wdc.com> > Reviewed-by: Jonathan Cameron <jonathan.came...@huawei.com> > Acked-by: Klaus Jensen <k.jen...@samsung.com>
Coding style breakage here. checkpatch will tell if you run it. But more importantly, this breaks tests (just run make check on x86): # starting QEMU: exec ./buildx86/qemu-system-x86_64 -qtest unix:/tmp/qtest-222311.sock -qtest-log /dev/null -chardev socket,path=/tmp/qtest-222311.qmp,id=char0 -mon chardev=char0,mode=control -display none -audio none -M pc -drive id=drv0,if=none,file=null-co://,file.read-zeroes=on,format=raw -object memory-backend-ram,id=pmr0,share=on,size=8 -device nvme,addr=04.0,drive=drv0,serial=foo,cmb_size_mb=2 -accel qtest ** ERROR:../tests/qtest/libqos/pci.c:232:qpci_device_enable: assertion failed (cmd & PCI_COMMAND_IO == PCI_COMMAND_IO): (0x00000000 == 0x00000001) not ok /x86_64/pc/i440FX-pcihost/pci-bus-pc/pci-bus/nvme/nvme-tests/oob-cmb-access - ERROR:../tests/qtest/libqos/pci.c:232:qpci_device_enable: assertion failed (cmd & PCI_COMMAND_IO == PCI_COMMAND_IO): (0x00000000 == 0x00000001) Bail out! run4bisect.sh: line 30: 222311 Aborted (core dumped) QTEST_QEMU_STORAGE_DAEMON_BINARY=./buildx86/storage-daemon/qemu-storage-daemon QTEST_QEMU_BINARY=./buildx86/qemu-system-x86_64 ./buildx86/tests/qtest/qos-test > --- > docs/specs/index.rst | 1 + > docs/specs/spdm.rst | 122 ++++++++++++++++++++++++++++++++++++ > include/hw/pci/pci_device.h | 5 ++ > include/hw/pci/pcie_doe.h | 3 + > hw/nvme/ctrl.c | 57 +++++++++++++++++ > 5 files changed, 188 insertions(+) > create mode 100644 docs/specs/spdm.rst > > diff --git a/docs/specs/index.rst b/docs/specs/index.rst > index 1484e3e760..e2d907959a 100644 > --- a/docs/specs/index.rst > +++ b/docs/specs/index.rst > @@ -29,6 +29,7 @@ guest hardware that is specific to QEMU. > edu > ivshmem-spec > pvpanic > + spdm > standard-vga > virt-ctlr > vmcoreinfo > diff --git a/docs/specs/spdm.rst b/docs/specs/spdm.rst > new file mode 100644 > index 0000000000..06b5ccac09 > --- /dev/null > +++ b/docs/specs/spdm.rst > @@ -0,0 +1,122 @@ > +====================================================== > +QEMU Security Protocols and Data Models (SPDM) Support > +====================================================== > + > +SPDM enables authentication, attestation and key exchange to assist in > +providing infrastructure security enablement. It's a standard published > +by the `DMTF`_. > + > +QEMU supports connecting to a SPDM responder implementation. This allows an > +external application to emulate the SPDM responder logic for an SPDM device. > + > +Setting up a SPDM server > +======================== > + > +When using QEMU with SPDM devices QEMU will connect to a server which > +implements the SPDM functionality. > + > +SPDM-Utils > +---------- > + > +You can use `SPDM Utils`_ to emulate a responder. This is the simplest > method. > + > +SPDM-Utils is a Linux applications to manage, test and develop devices > +supporting DMTF Security Protocol and Data Model (SPDM). It is written in > Rust > +and utilises libspdm. > + > +To use SPDM-Utils you will need to do the following steps. Details are > included > +in the SPDM-Utils README. > + > + 1. `Build libspdm`_ > + 2. `Build SPDM Utils`_ > + 3. `Run it as a server`_ > + > +spdm-emu > +-------- > + > +You can use `spdm emu`_ to model the > +SPDM responder. > + > +.. code-block:: shell > + > + $ cd spdm-emu > + $ git submodule init; git submodule update --recursive > + $ mkdir build; cd build > + $ cmake -DARCH=x64 -DTOOLCHAIN=GCC -DTARGET=Debug -DCRYPTO=openssl .. > + $ make -j32 > + $ make copy_sample_key # Build certificates, required for SPDM > authentication. > + > +It is worth noting that the certificates should be in compliance with > +PCIe r6.1 sec 6.31.3. This means you will need to add the following to > +openssl.cnf > + > +.. code-block:: > + > + subjectAltName = > otherName:2.23.147;UTF8:Vendor=1b36:Device=0010:CC=010802:REV=02:SSVID=1af4:SSID=1100 > + 2.23.147 = ASN1:OID:2.23.147 > + > +and then manually regenerate some certificates with: > + > +.. code-block:: shell > + > + $ openssl req -nodes -newkey ec:param.pem -keyout end_responder.key \ > + -out end_responder.req -sha384 -batch \ > + -subj "/CN=DMTF libspdm ECP384 responder cert" > + > + $ openssl x509 -req -in end_responder.req -out end_responder.cert \ > + -CA inter.cert -CAkey inter.key -sha384 -days 3650 -set_serial 3 \ > + -extensions v3_end -extfile ../openssl.cnf > + > + $ openssl asn1parse -in end_responder.cert -out end_responder.cert.der > + > + $ cat ca.cert.der inter.cert.der end_responder.cert.der > > bundle_responder.certchain.der > + > +You can use SPDM-Utils instead as it will generate the correct certificates > +automatically. > + > +The responder can then be launched with > + > +.. code-block:: shell > + > + $ cd bin > + $ ./spdm_responder_emu --trans PCI_DOE > + > +Connecting an SPDM NVMe device > +============================== > + > +Once a SPDM server is running we can start QEMU and connect to the server. > + > +For an NVMe device first let's setup a block we can use > + > +.. code-block:: shell > + > + $ cd qemu-spdm/linux/image > + $ dd if=/dev/zero of=blknvme bs=1M count=2096 # 2GB NNMe Drive > + > +Then you can add this to your QEMU command line: > + > +.. code-block:: shell > + > + -drive file=blknvme,if=none,id=mynvme,format=raw \ > + -device nvme,drive=mynvme,serial=deadbeef,spdm=2323 > + > +At which point QEMU will try to connect to the SPDM server. > + > + > +.. _DMTF: > + https://www.dmtf.org/standards/SPDM > + > +.. _SPDM Utils: > + https://github.com/westerndigitalcorporation/spdm-utils > + > +.. _spdm emu: > + https://github.com/dmtf/spdm-emu > + > +.. _Build libspdm: > + > https://github.com/westerndigitalcorporation/spdm-utils?tab=readme-ov-file#build-libspdm > + > +.. _Build SPDM Utils: > + > https://github.com/westerndigitalcorporation/spdm-utils?tab=readme-ov-file#build-the-binary > + > +.. _Run it as a server: > + > https://github.com/westerndigitalcorporation/spdm-utils#qemu-spdm-device-emulation > diff --git a/include/hw/pci/pci_device.h b/include/hw/pci/pci_device.h > index d3dd0f64b2..b8379c78f1 100644 > --- a/include/hw/pci/pci_device.h > +++ b/include/hw/pci/pci_device.h > @@ -3,6 +3,7 @@ > > #include "hw/pci/pci.h" > #include "hw/pci/pcie.h" > +#include "hw/pci/pcie_doe.h" > > #define TYPE_PCI_DEVICE "pci-device" > typedef struct PCIDeviceClass PCIDeviceClass; > @@ -157,6 +158,10 @@ struct PCIDevice { > MSIVectorReleaseNotifier msix_vector_release_notifier; > MSIVectorPollNotifier msix_vector_poll_notifier; > > + /* DOE */ > + DOECap doe_spdm; > + uint16_t spdm_port; > + > /* ID of standby device in net_failover pair */ > char *failover_pair_id; > uint32_t acpi_index; > diff --git a/include/hw/pci/pcie_doe.h b/include/hw/pci/pcie_doe.h > index 15d94661f9..9e1275db8a 100644 > --- a/include/hw/pci/pcie_doe.h > +++ b/include/hw/pci/pcie_doe.h > @@ -108,6 +108,9 @@ struct DOECap { > /* Protocols and its callback response */ > DOEProtocol *protocols; > uint16_t protocol_num; > + > + /* Used for spdm-socket */ > + int spdm_socket; > }; > > void pcie_doe_init(PCIDevice *pdev, DOECap *doe_cap, uint16_t offset, > diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c > index 76fe039704..f2377745a9 100644 > --- a/hw/nvme/ctrl.c > +++ b/hw/nvme/ctrl.c > @@ -203,6 +203,7 @@ > #include "sysemu/hostmem.h" > #include "hw/pci/msix.h" > #include "hw/pci/pcie_sriov.h" > +#include "sysemu/spdm-socket.h" > #include "migration/vmstate.h" > > #include "nvme.h" > @@ -8070,6 +8071,28 @@ static int nvme_add_pm_capability(PCIDevice *pci_dev, > uint8_t offset) > return 0; > } > > +static bool pcie_doe_spdm_rsp(DOECap *doe_cap) > +{ > + void *req = pcie_doe_get_write_mbox_ptr(doe_cap); > + uint32_t req_len = pcie_doe_get_obj_len(req) * 4; > + void *rsp = doe_cap->read_mbox; > + uint32_t rsp_len = SPDM_SOCKET_MAX_MESSAGE_BUFFER_SIZE; > + uint32_t recvd; > + > + recvd = spdm_socket_rsp(doe_cap->spdm_socket, you can combine this init with the declaration. > + SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE, > + req, req_len, rsp, rsp_len); > + doe_cap->read_mbox_len += DIV_ROUND_UP(recvd, 4); > + > + return recvd != 0; > +} > + > +static DOEProtocol doe_spdm_prot[] = { > + { PCI_VENDOR_ID_PCI_SIG, PCI_SIG_DOE_CMA, pcie_doe_spdm_rsp }, > + { PCI_VENDOR_ID_PCI_SIG, PCI_SIG_DOE_SECURED_CMA, pcie_doe_spdm_rsp }, > + { } > +}; > + > static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) > { > ERRP_GUARD(); > @@ -8126,6 +8149,23 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice > *pci_dev, Error **errp) > > nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize); > > + pcie_cap_deverr_init(pci_dev); > + > + /* DOE Initialisation */ > + if (pci_dev->spdm_port) { > + uint16_t doe_offset = n->params.sriov_max_vfs ? > + PCI_CONFIG_SPACE_SIZE + PCI_ARI_SIZEOF > + : PCI_CONFIG_SPACE_SIZE; > + > + pcie_doe_init(pci_dev, &pci_dev->doe_spdm, doe_offset, > doe_spdm_prot, true, 0); > + coding style violation > + pci_dev->doe_spdm.spdm_socket = > spdm_socket_connect(pci_dev->spdm_port, errp); coding style violation > + > + if (pci_dev->doe_spdm.spdm_socket < 0 ) { coding style violation > + return false; > + } > + } > + > if (n->params.cmb_size_mb) { > nvme_init_cmb(n, pci_dev); > } > @@ -8370,6 +8410,11 @@ static void nvme_exit(PCIDevice *pci_dev) > g_free(n->cmb.buf); > } > > + if (pci_dev->doe_spdm.spdm_socket > 0 ) { coding style violation > + spdm_socket_close(pci_dev->doe_spdm.spdm_socket, > + SPDM_SOCKET_TRANSPORT_TYPE_PCI_DOE); > + } > + > if (n->pmr.dev) { > host_memory_backend_set_mapped(n->pmr.dev, false); > } > @@ -8412,6 +8457,7 @@ static Property nvme_props[] = { > params.sriov_max_vi_per_vf, 0), > DEFINE_PROP_UINT8("sriov_max_vq_per_vf", NvmeCtrl, > params.sriov_max_vq_per_vf, 0), > + DEFINE_PROP_UINT16("spdm", PCIDevice, spdm_port, 0), > DEFINE_PROP_END_OF_LIST(), > }; > > @@ -8494,10 +8540,20 @@ static void nvme_pci_write_config(PCIDevice *dev, > uint32_t address, > uint32_t val, int len) > { > nvme_sriov_pre_write_ctrl(dev, address, val, len); > + pcie_doe_write_config(&dev->doe_spdm, address, val, len); > pci_default_write_config(dev, address, val, len); > pcie_cap_flr_write_config(dev, address, val, len); > } > > +static uint32_t nvme_pci_read_config(PCIDevice *dev, uint32_t address, int > len) > +{ > + uint32_t val; > + if (pcie_doe_read_config(&dev->doe_spdm, address, len, &val)) { > + return val; > + } > + return pci_default_read_config(dev, address, len); > +} > + > static const VMStateDescription nvme_vmstate = { > .name = "nvme", > .unmigratable = 1, > @@ -8510,6 +8566,7 @@ static void nvme_class_init(ObjectClass *oc, void *data) > > pc->realize = nvme_realize; > pc->config_write = nvme_pci_write_config; > + pc->config_read = nvme_pci_read_config; > pc->exit = nvme_exit; > pc->class_id = PCI_CLASS_STORAGE_EXPRESS; > pc->revision = 2; > -- > 2.44.0