Looks good.  A couple minor comments below.

On Mon, Oct 02, 2017 at 03:02:09PM +0000, Stahl, Manuel wrote:
> +static int open(struct uio_info *info, struct inode *inode)
> +{
> +     struct uio_pci_dmem_dev *priv = to_uio_pci_dmem_dev(info);
> +     struct uio_mem *uiomem;
> +     int ret = 0;
> +     int dmem_region = 0;
> +
> +     uiomem = &priv->info.mem[dmem_region];
> +
> +     mutex_lock(&priv->alloc_lock);
> +     while (!priv->refcnt && uiomem < &priv->info.mem[MAX_UIO_MAPS]) {
> +             void *addr;
> +
> +             if (!uiomem->size)
> +                     break;
> +
> +             addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
> +                                       (dma_addr_t *)&uiomem->addr,
> +                                       GFP_KERNEL);
> +             if (!addr)
> +                     uiomem->addr = DMEM_MAP_ERROR;
> +
> +             priv->dmem_region_vaddr[dmem_region++] = addr;
> +             ++uiomem;
> +     }
> +     if (pci_check_and_mask_intx(priv->pdev))
> +             dev_info(&priv->pdev->dev, "Found pending interrupt");
> +
> +     if (!priv->refcnt)
> +             pci_set_master(priv->pdev);
> +
> +     priv->refcnt++;
> +
> +     mutex_unlock(&priv->alloc_lock);
> +
> +     return ret;

Just "return 0;" is more readable/explicit than "return ret;".

> +}
> +
> +static int release(struct uio_info *info, struct inode *inode)
> +{
> +     struct uio_pci_dmem_dev *priv = to_uio_pci_dmem_dev(info);
> +     struct uio_mem *uiomem;
> +     int dmem_region = 0;
> +
> +     uiomem = &priv->info.mem[dmem_region];
> +
> +     mutex_lock(&priv->alloc_lock);
> +
> +     priv->refcnt--;
> +     while (!priv->refcnt && uiomem < &priv->info.mem[MAX_UIO_MAPS]) {
> +             if (!uiomem->size)
> +                     break;

I think this needs to be a continue instead of a break to match
parse_dmem_entries() since we don't break for size == 0 in that loop.

> +             if (priv->dmem_region_vaddr[dmem_region]) {
> +                     dma_free_coherent(&priv->pdev->dev, uiomem->size,
> +                                       priv->dmem_region_vaddr[dmem_region],
> +                                       uiomem->addr);
> +             }
> +             uiomem->addr = DMEM_MAP_ERROR;
> +             ++dmem_region;
> +             ++uiomem;
> +     }
> +     if (pci_check_and_mask_intx(priv->pdev))
> +             dev_info(&priv->pdev->dev, "Found pending interrupt");
> +
> +     if (!priv->refcnt)
> +             pci_clear_master(priv->pdev);
> +
> +     mutex_unlock(&priv->alloc_lock);
> +     return 0;
> +}
> +
> +static int dmem_mmap(struct uio_info *info, struct vm_area_struct *vma)
> +{
> +     struct uio_pci_dmem_dev *gdev = to_uio_pci_dmem_dev(info->priv);
> +     struct uio_mem *uiomem;
> +     int mi = vma->vm_pgoff;
> +
> +     if (mi >= MAX_UIO_MAPS)
> +             return -EINVAL;
> +
> +     uiomem = &info->mem[mi];
> +     if (uiomem->memtype != UIO_MEM_PHYS)
> +             return -EINVAL;
> +     if (!uiomem->size)
> +             return -EINVAL;
> +
> +     /* DMA address */
> +     vma->vm_pgoff = 0;
> +     return dma_mmap_coherent(&gdev->pdev->dev, vma,
> +                              gdev->dmem_region_vaddr[mi],
> +                              uiomem->addr, uiomem->size);
> +}
> +
> +/* Interrupt handler. Read/modify/write the command register to disable the
> + * interrupt.
> + */
> +static irqreturn_t irqhandler(int irq, struct uio_info *info)
> +{
> +     struct uio_pci_dmem_dev *gdev = to_uio_pci_dmem_dev(info);
> +
> +     if (gdev->pdev->msi_enabled)
> +             return IRQ_HANDLED;
> +
> +     if (pci_check_and_mask_intx(gdev->pdev))
> +             return IRQ_HANDLED;
> +
> +     return IRQ_NONE;
> +}
> +
> +static unsigned int uio_dmem_dma_bits = 32;
> +static char uio_dmem_sizes[256];
> +
> +static int parse_dmem_entries(struct pci_dev *pdev,
> +                           const struct pci_device_id *id,
> +                           struct uio_pci_dmem_dev *gdev)
> +{
> +     int ret;
> +     u32 regions = 0;
> +     u32 vendor, device;
> +     char *s, *tok, *sizes = NULL;
> +     unsigned long long size;
> +     struct uio_mem *uiomem;
> +     char * const buf = kstrdup(uio_dmem_sizes, GFP_KERNEL);
> +
> +     if (!buf) {
> +             ret = -ENOMEM;
> +             return ret;

I wish you wouldn't put the allocation in the declaration block...
Declaration blocks are a larger than expected source of static checker
bugs so I think they are not reviewed as carefully.  Anyway just do
"return -ENOMEM;" here instead of using a temporary variable.

> +     }
> +
> +     /* Find-out start and end of sizes list */
> +     s = buf;
> +     while (*s != '\0') {
> +             sizes = NULL;
> +             tok = strsep(&s, ":");
> +             if (!tok)
> +                     break;
> +             ret = kstrtou32(tok, 16, &vendor);
> +             if (ret)
> +                     break;
> +             tok = strsep(&s, ":");
> +             if (!tok)
> +                     break;
> +             ret = kstrtou32(tok, 16, &device);
> +             if (ret)
> +                     break;
> +             sizes = strsep(&s, ";");
> +             if (vendor == id->vendor && device == id->device)
> +                     break;
> +     }
> +
> +     memset(gdev->info.mem, 0, sizeof(gdev->info.mem));
> +     if (sizes) {
> +             dev_info(&pdev->dev, "Regions: %s\n", sizes);
> +
> +             /* Parse dynamic regions from sizes list */
> +             regions = 0;
> +             size = 0;
> +             s = sizes;
> +             while (s && (regions < MAX_UIO_MAPS)) {
> +                     tok = strsep(&s, ",");
> +                     if (!tok)
> +                             break;
> +
> +                     size = memparse(tok, NULL);
> +                     if (size) {
> +                             uiomem = &gdev->info.mem[regions];
> +                             uiomem->memtype = UIO_MEM_PHYS;
> +                             /* Will be allocated in open() call */
> +                             uiomem->addr = DMEM_MAP_ERROR;
> +                             uiomem->size = size;
> +                             regions++;
> +                     }
> +             }
> +             if (s)
> +                     dev_warn(&pdev->dev, "device has more than "
> +                                     __stringify(MAX_UIO_MAPS)
> +                                     " dynamic memory regions.\n");
> +     }
> +     dev_info(&pdev->dev, "Found %d regions\n", regions);
> +
> +     kfree(buf);
> +     return ret;
> +}
> +
> +static int probe(struct pci_dev *pdev, const struct pci_device_id *id)
> +{
> +     struct uio_pci_dmem_dev *gdev;
> +     int err;
> +
> +     dev_info(&pdev->dev, "Probe %s for %04x:%04x\n", DRIVER_NAME,
> +              id->vendor, id->device);
> +
> +     err = pci_enable_device(pdev);
> +     if (err) {
> +             dev_err(&pdev->dev, "%s: pci_enable_device failed: %d\n",
> +                     __func__, err);
> +             return err;
> +     }
> +
> +     dev_info(&pdev->dev, "Legacy IRQ: %i", pdev->irq);
> +     /* Try to use MSI interrupts */
> +     pci_enable_msi_range(pdev, 1, 1);
> +     if (pdev->irq && !pci_intx_mask_supported(pdev)) {
> +             err = -ENODEV;
> +             goto err_disable_pci;
> +     }
> +
> +     gdev = kzalloc(sizeof(*gdev), GFP_KERNEL);
> +     if (!gdev) {
> +             err = -ENOMEM;
> +             goto err_disable_pci;
> +     }
> +
> +     gdev->info.name = DRIVER_NAME;
> +     gdev->info.version = DRIVER_VERSION;
> +     gdev->info.irq = pdev->irq;
> +     gdev->info.irq_flags = IRQF_SHARED;
> +     gdev->info.handler = irqhandler;
> +     gdev->info.open = open;
> +     gdev->info.release = release;
> +     gdev->info.mmap = dmem_mmap;
> +     gdev->info.priv = gdev;
> +     gdev->pdev = pdev;
> +
> +     /* Set DMA coherent mask */
> +     if (uio_dmem_dma_bits > 64)
> +             uio_dmem_dma_bits = 64;
> +     err = dma_set_coherent_mask(&pdev->dev,
> +                                 DMA_BIT_MASK(uio_dmem_dma_bits));
> +     if (err) {
> +             dev_err(&pdev->dev, "Unable to dma_set_coherent_mask\n");
> +             goto err_free_gdev;
> +     }
> +
> +     err = parse_dmem_entries(pdev, id, gdev);
> +     if (err)
> +             goto err_free_gdev;
> +
> +     mutex_init(&gdev->alloc_lock);
> +
> +     if (uio_register_device(&pdev->dev, &gdev->info))
> +             goto err_free_gdev;

Preserve the error code from uio_register_device().

> +     pci_set_drvdata(pdev, gdev);
> +
> +     return 0;
> +err_free_gdev:
> +     kfree(gdev);
> +err_disable_pci:
> +     pci_disable_msi(pdev);
> +     pci_disable_device(pdev);
> +     return err;
> +}

regards,
dan carpenter
_______________________________________________
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

Reply via email to