Hi On Fri, Sep 7, 2018 at 12:09 AM, Stefan Berger <stef...@linux.vnet.ibm.com> wrote: > On 09/05/2018 11:29 PM, Marc-André Lureau wrote: >> >> This allows to pass the last failing test from the Windows HLK TPM 2.0 >> TCG PPI 1.3 tests. >> >> The interface is described in the "TCG Platform Reset Attack >> Mitigation Specification", chapter 6 "ACPI _DSM Function". According >> to Laszlo, it's not so easy to implement in OVMF, he suggested to do >> it in qemu instead. >> >> Signed-off-by: Marc-André Lureau <marcandre.lur...@redhat.com> >> --- >> hw/tpm/tpm_ppi.h | 2 ++ >> hw/i386/acpi-build.c | 46 ++++++++++++++++++++++++++++++++++++++++++++ >> hw/tpm/tpm_crb.c | 1 + >> hw/tpm/tpm_ppi.c | 23 ++++++++++++++++++++++ >> hw/tpm/tpm_tis.c | 1 + >> docs/specs/tpm.txt | 2 ++ >> hw/tpm/trace-events | 3 +++ >> 7 files changed, 78 insertions(+) >> >> diff --git a/hw/tpm/tpm_ppi.h b/hw/tpm/tpm_ppi.h >> index c2ab2ed300..b8f67962c7 100644 >> --- a/hw/tpm/tpm_ppi.h >> +++ b/hw/tpm/tpm_ppi.h >> @@ -23,4 +23,6 @@ typedef struct TPMPPI { >> bool tpm_ppi_init(TPMPPI *tpmppi, struct MemoryRegion *m, >> hwaddr addr, Object *obj, Error **errp); >> +void tpm_ppi_reset(TPMPPI *tpmppi); >> + >> #endif /* TPM_TPM_PPI_H */ >> diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c >> index c5e9a6e11d..2ab3e8fae7 100644 >> --- a/hw/i386/acpi-build.c >> +++ b/hw/i386/acpi-build.c >> @@ -1824,6 +1824,13 @@ build_tpm_ppi(TPMIf *tpm, Aml *dev) >> pprq = aml_name("PPRQ"); >> pprm = aml_name("PPRM"); >> + aml_append(dev, >> + aml_operation_region("TPP3", AML_SYSTEM_MEMORY, >> + aml_int(TPM_PPI_ADDR_BASE + 0x15a), >> + 0x1)); >> + field = aml_field("TPP3", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); >> + aml_append(field, aml_named_field("MOVV", 8)); >> + aml_append(dev, field); >> /* >> * DerefOf in Windows is broken with SYSTEM_MEMORY. Use a dynamic >> * operation region inside of a method for getting FUNC[op]. >> @@ -2166,7 +2173,46 @@ build_tpm_ppi(TPMIf *tpm, Aml *dev) >> aml_append(ifctx, aml_return(aml_buffer(1, zerobyte))); >> } >> aml_append(method, ifctx); >> + >> + ifctx = aml_if( >> + aml_equal(uuid, >> + >> aml_touuid("376054ED-CC13-4675-901C-4756D7F2D45D"))); >> + { >> + /* standard DSM query function */ >> + ifctx2 = aml_if(aml_equal(function, zero)); >> + { >> + uint8_t byte_list[1] = { 0x03 }; >> + aml_append(ifctx2, aml_return(aml_buffer(1, byte_list))); >> + } >> + aml_append(ifctx, ifctx2); >> + >> + /* >> + * TCG Platform Reset Attack Mitigation Specification 1.0 >> Ch.6 >> + * >> + * Arg 2 (Integer): Function Index = 1 >> + * Arg 3 (Package): Arguments = Package: Type: Integer >> + * Operation Value of the Request >> + * Returns: Type: Integer >> + * 0: Success >> + * 1: General Failure >> + */ >> + ifctx2 = aml_if(aml_equal(function, one)); >> + { >> + aml_append(ifctx2, >> + aml_store(aml_derefof(aml_index(arguments, >> zero)), >> + op)); >> + { >> + aml_append(ifctx2, aml_store(op, aml_name("MOVV"))); >> + >> + /* 0: success */ >> + aml_append(ifctx2, aml_return(zero)); >> + } >> + } >> + aml_append(ifctx, ifctx2); >> + } >> + aml_append(method, ifctx); >> } >> + >> aml_append(dev, method); >> } >> diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c >> index b243222fd6..48f6a716ad 100644 >> --- a/hw/tpm/tpm_crb.c >> +++ b/hw/tpm/tpm_crb.c >> @@ -233,6 +233,7 @@ static void tpm_crb_reset(void *dev) >> { >> CRBState *s = CRB(dev); >> + tpm_ppi_reset(&s->ppi); >> tpm_backend_reset(s->tpmbe); >> memset(s->regs, 0, sizeof(s->regs)); >> diff --git a/hw/tpm/tpm_ppi.c b/hw/tpm/tpm_ppi.c >> index f2f07f895e..46ca8ea3ea 100644 >> --- a/hw/tpm/tpm_ppi.c >> +++ b/hw/tpm/tpm_ppi.c >> @@ -16,8 +16,30 @@ >> #include "qapi/error.h" >> #include "cpu.h" >> #include "sysemu/memory_mapping.h" >> +#include "sysemu/reset.h" >> #include "migration/vmstate.h" >> #include "tpm_ppi.h" >> +#include "trace.h" >> + >> +void tpm_ppi_reset(TPMPPI *tpmppi) >> +{ >> + char *ptr = memory_region_get_ram_ptr(&tpmppi->ram); >> + >> + if (ptr[0x15a] & 0x1) { >> + GuestPhysBlockList guest_phys_blocks; >> + GuestPhysBlock *block; >> + >> + guest_phys_blocks_init(&guest_phys_blocks); >> + guest_phys_blocks_append(&guest_phys_blocks); >> + QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) { >> + trace_tpm_ppi_memset(block->host_addr, >> + block->target_end - block->target_start); >> + memset(block->host_addr, 0, >> + block->target_end - block->target_start); > > > Does this also clear the PPI memory? If so, could we create a backup of the > few relevant bytes the firmware will look at and restore them after the > loop? The PPI device likely contains the only data that may need to be > preserved across a rebbot, and we are running this loop here in the ppi > module itself, so should be able to do that.
guest_phys_blocks_append() doesn't gather device ram, so it will skip TPM PPI buf. thanks