In preparation of raising -Wimplicit-fallthrough to 5, replace all fall-through comments with the fallthrough attribute pseudo-keyword.
Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidiana...@linaro.org> --- hw/pci/pcie_aer.c | 3 ++- hw/pci/pcie_doe.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index b68c7ecb49..c99ecce2a1 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -97,71 +97,72 @@ static void aer_log_clear_all_err(PCIEAERLog *aer_log) int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset, uint16_t size, Error **errp) { pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, cap_ver, offset, size); dev->exp.aer_cap = offset; /* clip down the value to avoid unreasonable memory usage */ if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) { error_setg(errp, "Invalid aer_log_max %d. The max number of aer log " "is %d", dev->exp.aer_log.log_max, PCIE_AER_LOG_MAX_LIMIT); return -EINVAL; } dev->exp.aer_log.log = g_malloc0(sizeof dev->exp.aer_log.log[0] * dev->exp.aer_log.log_max); pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, PCI_ERR_UNC_SUPPORTED); if (dev->cap_present & QEMU_PCIE_ERR_UNC_MASK) { pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, PCI_ERR_UNC_MASK_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, PCI_ERR_UNC_SUPPORTED); } pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SEVERITY_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SUPPORTED); pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS, PCI_ERR_COR_SUPPORTED); pci_set_long(dev->config + offset + PCI_ERR_COR_MASK, PCI_ERR_COR_MASK_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK, PCI_ERR_COR_SUPPORTED); /* capabilities and control. multiple header logging is supported */ if (dev->exp.aer_log.log_max > 0) { pci_set_long(dev->config + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC | PCI_ERR_CAP_MHRC); pci_set_long(dev->wmask + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE | PCI_ERR_CAP_MHRE); } else { pci_set_long(dev->config + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC); pci_set_long(dev->wmask + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); } switch (pcie_cap_get_type(dev)) { case PCI_EXP_TYPE_ROOT_PORT: /* this case will be set by pcie_aer_root_init() */ - /* fallthrough */ + fallthrough; case PCI_EXP_TYPE_DOWNSTREAM: + fallthrough; case PCI_EXP_TYPE_UPSTREAM: pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR); pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS, PCI_SEC_STATUS_RCV_SYSTEM_ERROR); break; default: /* nothing */ break; } return 0; } diff --git a/hw/pci/pcie_doe.c b/hw/pci/pcie_doe.c index 2210f86968..f04a36e664 100644 --- a/hw/pci/pcie_doe.c +++ b/hw/pci/pcie_doe.c @@ -295,73 +295,73 @@ bool pcie_doe_read_config(DOECap *doe_cap, uint32_t addr, int size, /* * Write to DOE config space. * Return if the address not within DOE_CAP range or receives an abort */ void pcie_doe_write_config(DOECap *doe_cap, uint32_t addr, uint32_t val, int size) { uint16_t doe_offset = doe_cap->offset; uint32_t shift; if (!range_covers_byte(doe_offset + PCI_EXP_DOE_CAP, PCI_DOE_SIZEOF - 4, addr)) { return; } /* Process Alignment */ shift = addr % DWORD_BYTE; addr -= (doe_offset + shift); val = deposit32(val, shift * 8, size * 8, val); switch (addr) { case PCI_EXP_DOE_CTRL: if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_ABORT)) { pcie_doe_set_ready(doe_cap, 0); pcie_doe_set_error(doe_cap, 0); pcie_doe_reset_mbox(doe_cap); return; } if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_GO)) { pcie_doe_prepare_rsp(doe_cap); } if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_INTR_EN)) { doe_cap->ctrl.intr = 1; /* Clear interrupt bit located within the first byte */ } else if (shift == 0) { doe_cap->ctrl.intr = 0; } break; case PCI_EXP_DOE_STATUS: if (FIELD_EX32(val, PCI_DOE_CAP_STATUS, DOE_INTR_STATUS)) { doe_cap->status.intr = 0; } break; case PCI_EXP_DOE_RD_DATA_MBOX: /* Mailbox should be DW accessed */ if (size != DWORD_BYTE) { return; } doe_cap->read_mbox_idx++; if (doe_cap->read_mbox_idx == doe_cap->read_mbox_len) { pcie_doe_reset_mbox(doe_cap); pcie_doe_set_ready(doe_cap, 0); } else if (doe_cap->read_mbox_idx > doe_cap->read_mbox_len) { /* Underflow */ pcie_doe_set_error(doe_cap, 1); } break; case PCI_EXP_DOE_WR_DATA_MBOX: /* Mailbox should be DW accessed */ if (size != DWORD_BYTE) { return; } doe_cap->write_mbox[doe_cap->write_mbox_len] = val; doe_cap->write_mbox_len++; break; case PCI_EXP_DOE_CAP: - /* fallthrough */ + fallthrough; default: break; } } -- 2.39.2