On Tue, 2020-12-01 at 14:35 -0800, mgr...@linux.intel.com wrote:
> Enable asynchronous channel and event communication.
[]
> diff --git a/drivers/misc/xlink-core/xlink-core.c 
> b/drivers/misc/xlink-core/xlink-core.c
[]
> +
> +// sysfs attribute functions
> +
> +static ssize_t event0_show(struct device *dev, struct device_attribute 
> *attr, char *buf)
> +{
> +     struct keembay_xlink_dev *xlink_dev = dev_get_drvdata(dev);
> +     struct xlink_attr *a = &xlink_dev->eventx[0];
> +
> +     return scnprintf(buf, PAGE_SIZE, "0x%x 0x%lx\n", a->sw_dev_id, 
> a->value);
> +}
> +
> +static ssize_t event1_show(struct device *dev, struct device_attribute 
> *attr, char *buf)
> +{
> +     struct keembay_xlink_dev *xlink_dev = dev_get_drvdata(dev);
> +     struct xlink_attr *a = &xlink_dev->eventx[1];
> +
> +     return scnprintf(buf, PAGE_SIZE, "0x%x 0x%lx\n", a->sw_dev_id, 
> a->value);
> +}
> +
> +static ssize_t event2_show(struct device *dev, struct device_attribute 
> *attr, char *buf)
> +{
> +     struct keembay_xlink_dev *xlink_dev = dev_get_drvdata(dev);
> +     struct xlink_attr *a = &xlink_dev->eventx[2];
> +
> +     return scnprintf(buf, PAGE_SIZE, "0x%x 0x%lx\n", a->sw_dev_id, 
> a->value);
> +}
> +
> +static ssize_t event3_show(struct device *dev, struct device_attribute 
> *attr, char *buf)
> +{
> +     struct keembay_xlink_dev *xlink_dev = dev_get_drvdata(dev);
> +     struct xlink_attr *a = &xlink_dev->eventx[3];
> +
> +     return scnprintf(buf, PAGE_SIZE, "0x%x 0x%lx\n", a->sw_dev_id, 
> a->value);
> +}

These could use the sysfs_emit function and not scnprintf
and as well could use a single shared function with an index.

Something like:

static ssize_t eventx_show(struct device *dev, struct device_attr *attr, int 
index, char *buf)
{
        struct keembay_xlink_dev *xlink_dev = dev_get_drvdata(dev);
        struct xlink_attr *a = &xlink_dev->eventx[index];

        return sysfs_emit(buf, "0x%x 0x%lx\n", a->sw_dev_id, a->value);
}

static ssize_t event0_show(struct device *dev, struct device_attribute *attr, 
char *buf)
{
        return eventx_show(dev, attr, 0, buf);
}

etc...


> @@ -270,19 +353,23 @@ static long xlink_ioctl(struct file *file, unsigned int 
> cmd, unsigned long arg)
>       struct xlinkconnect             con     = {0};
>       struct xlinkrelease             rel     = {0};
>       struct xlinkstartvpu            startvpu = {0};
> +     struct xlinkcallback            cb      = {0};
>       struct xlinkgetdevicename       devn    = {0};
>       struct xlinkgetdevicelist       devl    = {0};
>       struct xlinkgetdevicestatus     devs    = {0};
>       struct xlinkbootdevice          boot    = {0};
>       struct xlinkresetdevice         res     = {0};
>       struct xlinkdevmode             devm    = {0};
> +     struct xlinkregdevevent         regdevevent = {0};

Perhaps significantly less stack would be used if these declarations were
moved into the case blocks where used.  Less initialization would be done
per ioctl too.

> @@ -318,6 +405,30 @@ static long xlink_ioctl(struct file *file, unsigned int 
> cmd, unsigned long arg)
>               if (copy_to_user((void __user *)op.return_code, (void *)&rc, 
> sizeof(rc)))
>                       return -EFAULT;
>               break;
> +     case XL_DATA_READY_CALLBACK:
> +             if (copy_from_user(&cb, (void __user *)arg,
> +                                sizeof(struct xlinkcallback)))
> +                     return -EFAULT;
> +             if (copy_from_user(&devh, (void __user *)cb.handle,
> +                                sizeof(struct xlink_handle)))
> +                     return -EFAULT;
> +             CHANNEL_SET_USER_BIT(cb.chan); // set MSbit for user space call
> +             rc = xlink_data_available_event(&devh, cb.chan, cb.callback);
> +             if (copy_to_user((void __user *)cb.return_code, (void *)&rc, 
> sizeof(rc)))
> +                     return -EFAULT;
> +             break;

        case XL_DATA_READY_CALLBACK: {
                struct xlinkcallback cb = {};
                etc...
                break;
        }



Reply via email to