In preparation of raising -Wimplicit-fallthrough to 5, replace all fall-through comments with the fallthrough attribute pseudo-keyword.
Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidiana...@linaro.org> --- hw/s390x/ipl.c | 1 + hw/s390x/s390-pci-inst.c | 4 ++-- hw/s390x/sclp.c | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c index 515dcf51b5..da2333846f 100644 --- a/hw/s390x/ipl.c +++ b/hw/s390x/ipl.c @@ -394,57 +394,58 @@ static CcwDevice *s390_get_ccw_device(DeviceState *dev_st, int *devtype) static bool s390_gen_initial_iplb(S390IPLState *ipl) { DeviceState *dev_st; CcwDevice *ccw_dev = NULL; SCSIDevice *sd; int devtype; dev_st = get_boot_device(0); if (dev_st) { ccw_dev = s390_get_ccw_device(dev_st, &devtype); } /* * Currently allow IPL only from CCW devices. */ if (ccw_dev) { switch (devtype) { case CCW_DEVTYPE_SCSI: sd = SCSI_DEVICE(dev_st); ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN); ipl->iplb.blk0_len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN - S390_IPLB_HEADER_LEN); ipl->iplb.pbt = S390_IPL_TYPE_QEMU_SCSI; ipl->iplb.scsi.lun = cpu_to_be32(sd->lun); ipl->iplb.scsi.target = cpu_to_be16(sd->id); ipl->iplb.scsi.channel = cpu_to_be16(sd->channel); ipl->iplb.scsi.devno = cpu_to_be16(ccw_dev->sch->devno); ipl->iplb.scsi.ssid = ccw_dev->sch->ssid & 3; break; case CCW_DEVTYPE_VFIO: ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); ipl->iplb.pbt = S390_IPL_TYPE_CCW; ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; break; case CCW_DEVTYPE_VIRTIO_NET: ipl->netboot = true; /* Fall through to CCW_DEVTYPE_VIRTIO case */ + fallthrough; case CCW_DEVTYPE_VIRTIO: ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); ipl->iplb.blk0_len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN - S390_IPLB_HEADER_LEN); ipl->iplb.pbt = S390_IPL_TYPE_CCW; ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; break; } if (!s390_ipl_set_loadparm(ipl->iplb.loadparm)) { ipl->iplb.flags |= DIAG308_FLAGS_LP_VALID; } return true; } return false; } diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c index 30149546c0..171320384c 100644 --- a/hw/s390x/s390-pci-inst.c +++ b/hw/s390x/s390-pci-inst.c @@ -1330,83 +1330,83 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, uintptr_t ra) { CPUS390XState *env = &cpu->env; uint8_t dmaas; uint32_t fh; ZpciFib fib; S390PCIBusDevice *pbdev; uint32_t data; uint64_t cc = ZPCI_PCI_LS_OK; if (env->psw.mask & PSW_MASK_PSTATE) { s390_program_interrupt(env, PGM_PRIVILEGED, ra); return 0; } fh = env->regs[r1] >> 32; dmaas = (env->regs[r1] >> 16) & 0xff; if (dmaas) { setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS); return 0; } if (fiba & 0x7) { s390_program_interrupt(env, PGM_SPECIFICATION, ra); return 0; } pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX); if (!pbdev) { setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } memset(&fib, 0, sizeof(fib)); switch (pbdev->state) { case ZPCI_FS_RESERVED: case ZPCI_FS_STANDBY: setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; case ZPCI_FS_DISABLED: if (fh & FH_MASK_ENABLE) { setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } goto out; /* BLOCKED bit is set to one coincident with the setting of ERROR bit. * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */ case ZPCI_FS_ERROR: fib.fc |= 0x20; - /* fallthrough */ + fallthrough; case ZPCI_FS_BLOCKED: fib.fc |= 0x40; - /* fallthrough */ + fallthrough; case ZPCI_FS_ENABLED: fib.fc |= 0x80; if (pbdev->iommu->enabled) { fib.fc |= 0x10; } if (!(fh & FH_MASK_ENABLE)) { env->regs[r1] |= 1ULL << 63; } break; case ZPCI_FS_PERMANENT_ERROR: setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR); return 0; } stq_p(&fib.pba, pbdev->iommu->pba); stq_p(&fib.pal, pbdev->iommu->pal); stq_p(&fib.iota, pbdev->iommu->g_iota); stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr); stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr); stq_p(&fib.fmb_addr, pbdev->fmb_addr); data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) | ((uint32_t)pbdev->routes.adapter.ind_offset << 8) | ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; stl_p(&fib.data, data); diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index eff74479f4..9b8c7ff043 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -50,28 +50,28 @@ static inline bool sclp_command_code_valid(uint32_t code) static bool sccb_verify_boundary(uint64_t sccb_addr, uint16_t sccb_len, uint32_t code) { uint64_t sccb_max_addr = sccb_addr + sccb_len - 1; uint64_t sccb_boundary = (sccb_addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; switch (code & SCLP_CMD_CODE_MASK) { case SCLP_CMDW_READ_SCP_INFO: case SCLP_CMDW_READ_SCP_INFO_FORCED: case SCLP_CMDW_READ_CPU_INFO: /* * An extended-length SCCB is only allowed for Read SCP/CPU Info and * is allowed to exceed the 4k boundary. The respective commands will * set the length field to the required length if an insufficient * SCCB length is provided. */ if (s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB)) { return true; } - /* fallthrough */ + fallthrough; default: if (sccb_max_addr < sccb_boundary) { return true; } } return false; } @@ -207,28 +207,28 @@ static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb) static void sclp_configure_io_adapter(SCLPDevice *sclp, SCCB *sccb, bool configure) { int rc; if (be16_to_cpu(sccb->h.length) < 16) { rc = SCLP_RC_INSUFFICIENT_SCCB_LENGTH; goto out_err; } switch (((IoaCfgSccb *)sccb)->atype) { case SCLP_RECONFIG_PCI_ATYPE: if (s390_has_feat(S390_FEAT_ZPCI)) { if (configure) { s390_pci_sclp_configure(sccb); } else { s390_pci_sclp_deconfigure(sccb); } return; } - /* fallthrough */ + fallthrough; default: rc = SCLP_RC_ADAPTER_TYPE_NOT_RECOGNIZED; } out_err: sccb->h.response_code = cpu_to_be16(rc); } -- 2.39.2