On 06/05/16 03:26, David Kershner wrote:
> From: Erik Arfvidson <erik.arfvid...@unisys.com>
> 
> The purpose of this patch is to clean up commenting and making the
> code with comments be pleasant to eyes. Also make commenting be
> consistent throughout the file.
> 
> Signed-off-by: Erik Arfvidson <erik.arfvid...@unisys.com>
> Signed-off-by: David Kershner <david.kersh...@unisys.com>
> ---
>  drivers/staging/unisys/visornic/visornic_main.c | 152 
> +++++++++---------------
>  1 file changed, 58 insertions(+), 94 deletions(-)
> 
> diff --git a/drivers/staging/unisys/visornic/visornic_main.c 
> b/drivers/staging/unisys/visornic/visornic_main.c
> index de983d2..2c43396 100644
> --- a/drivers/staging/unisys/visornic/visornic_main.c
> +++ b/drivers/staging/unisys/visornic/visornic_main.c
> @@ -109,51 +109,46 @@ struct chanstat {
>  };
>  
>  struct visornic_devdata {
> -     unsigned short enabled;         /* 0 disabled 1 enabled to receive */
> -     unsigned short enab_dis_acked;  /* NET_RCV_ENABLE/DISABLE acked by
> -                                      * IOPART
> -                                      */
> +     /* 0 disabled 1 enabled to receive */
> +     unsigned short enabled;
> +     /* NET_RCV_ENABLE/DISABLE acked by IOPART */
> +     unsigned short enab_dis_acked;
> +
>       struct visor_device *dev;
>       struct net_device *netdev;
>       struct net_device_stats net_stats;
>       atomic_t interrupt_rcvd;
>       wait_queue_head_t rsp_queue;
>       struct sk_buff **rcvbuf;
> -     u64 incarnation_id;             /* lets IOPART know about re-birth */
> -     unsigned short old_flags;       /* flags as they were prior to
> -                                      * set_multicast_list
> -                                      */
> -     atomic_t usage;                 /* count of users */
> -     int num_rcv_bufs;               /* indicates how many rcv buffers
> -                                      * the vnic will post
> -                                      */
> +     /* incarnation_id lets IOPART know about re-birth */
> +     u64 incarnation_id;
> +     /* flags as they were prior to set_multicast_list */
> +     unsigned short old_flags;
> +     atomic_t usage; /* count of users */
> +
> +     /* number of rcv buffers the vnic will post */
> +     int num_rcv_bufs;
>       int num_rcv_bufs_could_not_alloc;
>       atomic_t num_rcvbuf_in_iovm;
>       unsigned long alloc_failed_in_if_needed_cnt;
>       unsigned long alloc_failed_in_repost_rtn_cnt;
> -     unsigned long max_outstanding_net_xmits; /* absolute max number of
> -                                               * outstanding xmits - should
> -                                               * never hit this
> -                                               */
> -     unsigned long upper_threshold_net_xmits;  /* high water mark for
> -                                                * calling netif_stop_queue()
> -                                                */
> -     unsigned long lower_threshold_net_xmits; /* high water mark for calling
> -                                               * netif_wake_queue()
> -                                               */
> -     struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
> -                                       * xmit buffer list that have been
> -                                       * sent to the IOPART end
> -                                       */
> +
> +     /* absolute max number of outstanding xmits - should never hit this */
> +     unsigned long max_outstanding_net_xmits;
> +     /* high water mark for calling netif_stop_queue() */
> +     unsigned long upper_threshold_net_xmits;
> +     /* high water mark for calling netif_wake_queue() */
> +     unsigned long lower_threshold_net_xmits;
> +     /* xmitbufhead - head of the xmit buffer list sent to the IOPART end */
> +     struct sk_buff_head xmitbufhead;
> +
>       visorbus_state_complete_func server_down_complete_func;
>       struct work_struct timeout_reset;
> -     struct uiscmdrsp *cmdrsp_rcv;    /* cmdrsp_rcv is used for
> -                                       * posting/unposting rcv buffers
> -                                       */
> -     struct uiscmdrsp *xmit_cmdrsp;   /* used to issue NET_XMIT - there is
> -                                       * never more that one xmit in
> -                                       * progress at a time
> -                                       */
> +     /* cmdrsp_rcv is used for posting/unposting rcv buffers  */
> +     struct uiscmdrsp *cmdrsp_rcv;
> +     /* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */
> +     struct uiscmdrsp *xmit_cmdrsp;
> +
>       bool server_down;                /* IOPART is down */
>       bool server_change_state;        /* Processing SERVER_CHANGESTATE msg */
>       bool going_away;                 /* device is being torn down */
> @@ -173,18 +168,10 @@ struct visornic_devdata {
>       unsigned long n_rcv1;                   /* # rcvs of 1 buffers */
>       unsigned long n_rcv2;                   /* # rcvs of 2 buffers */
>       unsigned long n_rcvx;                   /* # rcvs of >2 buffers */
> -     unsigned long found_repost_rcvbuf_cnt;  /* # times we called
> -                                              *   repost_rcvbuf_cnt
> -                                              */
> -     unsigned long repost_found_skb_cnt;     /* # times found the skb */
> -     unsigned long n_repost_deficit;         /* # times we couldn't find
> -                                              *   all of the rcv buffers
> -                                              */
> -     unsigned long bad_rcv_buf;              /* # times we negleted to
> -                                              * free the rcv skb because
> -                                              * we didn't know where it
> -                                              * came from
> -                                              */
> +     unsigned long found_repost_rcvbuf_cnt;  /* # repost_rcvbuf_cnt */
> +     unsigned long repost_found_skb_cnt;     /* # of found the skb */
> +     unsigned long n_repost_deficit;         /* # of lost rcv buffers */
> +     unsigned long bad_rcv_buf; /* # of unknown rcv skb  not freed */
>       unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
>  
>       int queuefullmsg_logged;
> @@ -219,8 +206,7 @@ visor_copy_fragsinfo_from_skb(struct sk_buff *skb, 
> unsigned int firstfraglen,
>  
>       numfrags = skb_shinfo(skb)->nr_frags;
>  
> -     /*
> -      * Compute the number of fragments this skb has, and if its more than
> +     /* Compute the number of fragments this skb has, and if its more than
>        * frag array can hold, linearize the skb
>        */
>       total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
> @@ -264,8 +250,7 @@ visor_copy_fragsinfo_from_skb(struct sk_buff *skb, 
> unsigned int firstfraglen,
>                                             page_offset,
>                                             skb_shinfo(skb)->frags[ii].
>                                             size, count, frags_max, frags);
> -                     /*
> -                      * add_physinfo_entries only returns
> +                     /* add_physinfo_entries only returns
>                        * zero if the frags array is out of room
>                        * That should never happen because we
>                        * fail above, if count+numfrags > frags_max.
> @@ -297,8 +282,7 @@ static ssize_t enable_ints_write(struct file *file,
>                                const char __user *buffer,
>                                size_t count, loff_t *ppos)
>  {
> -     /*
> -      * Don't want to break ABI here by having a debugfs
> +     /* Don't want to break ABI here by having a debugfs
>        * file that no longer exists or is writable, so
>        * lets just make this a vestigual function
>        */
> @@ -306,8 +290,7 @@ static ssize_t enable_ints_write(struct file *file,
>  }
>  
>  /**
> - *   visornic_serverdown_complete - IOPART went down, need to pause
> - *                                  device
> + *   visornic_serverdown_complete - IOPART went down, pause device
>   *   @work: Work queue it was scheduled on
>   *
>   *   The IO partition has gone down and we need to do some cleanup
> @@ -342,7 +325,7 @@ visornic_serverdown_complete(struct visornic_devdata 
> *devdata)
>  }
>  
>  /**
> - *   visornic_serverdown - Command has notified us that IOPARt is down
> + *   visornic_serverdown - Command has notified us that IOPART is down
>   *   @devdata: device that is being managed by IOPART
>   *
>   *   Schedule the work needed to handle the server down request. Make
> @@ -403,20 +386,19 @@ alloc_rcv_buf(struct net_device *netdev)
>  
>       /* NOTE: the first fragment in each rcv buffer is pointed to by
>        * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
> -      * in length, so the firstfrag is large enough to hold 1514.
> +      * in length, so the first frag is large enough to hold 1514.
>        */
>       skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
>       if (!skb)
>               return NULL;
>       skb->dev = netdev;
> -     skb->len = RCVPOST_BUF_SIZE;
>       /* current value of mtu doesn't come into play here; large
>        * packets will just end up using multiple rcv buffers all of
> -      * same size
> +      * same size.
>        */
> -     skb->data_len = 0;      /* dev_alloc_skb already zeroes it out
> -                              * for clarification.
> -                              */
> +     skb->len = RCVPOST_BUF_SIZE;
> +     /* alloc_skb already zeroes it out for clarification. */
> +     skb->data_len = 0;
>       return skb;
>  }
>  
> @@ -880,8 +862,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device 
> *netdev)
>  
>       if (vnic_hit_high_watermark(devdata,
>                                   devdata->max_outstanding_net_xmits)) {
> -             /* too many NET_XMITs queued over to IOVM - need to wait
> -              */
> +             /* extra NET_XMITs queued over to IOVM - need to wait */
>               devdata->chstat.reject_count++;
>               if (!devdata->queuefullmsg_logged &&
>                   ((devdata->chstat.reject_count & 0x3ff) == 1))
> @@ -958,16 +939,12 @@ visornic_xmit(struct sk_buff *skb, struct net_device 
> *netdev)
>       devdata->net_stats.tx_bytes += skb->len;
>       devdata->chstat.sent_xmit++;
>  
> -     /* check to see if we have hit the high watermark for
> -      * netif_stop_queue()
> -      */
> +     /* check if we have hit the high watermark for netif_stop_queue() */
>       if (vnic_hit_high_watermark(devdata,
>                                   devdata->upper_threshold_net_xmits)) {
> -             /* too many NET_XMITs queued over to IOVM - need to wait */
> -             netif_stop_queue(netdev); /* calling stop queue - call
> -                                        * netif_wake_queue() after lower
> -                                        * threshold
> -                                        */
> +             /* extra NET_XMITs queued over to IOVM - need to wait */
> +             /* stop queue - call netif_wake_queue() after lower threshold */
> +             netif_stop_queue(netdev);
>               dev_dbg(&netdev->dev,
>                       "%s busy - invoking iovm flow control\n",
>                       __func__);
> @@ -1320,16 +1297,13 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
>                                               break;
>                                       }
>                               }
> +                             /* accept pkt, dest matches a multicast addr */
>                               if (found_mc)
> -                                     break;  /* accept packet, dest
> -                                              * matches a multicast
> -                                              * address
> -                                              */
> +                                     break;
>                       }
> +             /* accept packet, h_dest must match vnic  mac address */
>               } else if (skb->pkt_type == PACKET_HOST) {
> -                     break;  /* accept packet, h_dest must match vnic
> -                              *  mac address
> -                              */
> +                     break;
>               } else if (skb->pkt_type == PACKET_OTHERHOST) {
>                       /* something is not right */
>                       dev_err(&devdata->netdev->dev,
> @@ -1417,14 +1391,10 @@ static ssize_t info_debugfs_read(struct file *file, 
> char __user *buf,
>       if (!vbuf)
>               return -ENOMEM;
>  
> -     /* for each vnic channel
> -      * dump out channel specific data
> -      */
> +     /* for each vnic channel dump out channel specific data */
>       rcu_read_lock();
>       for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
> -             /*
> -              * Only consider netdevs that are visornic, and are open
> -              */
> +             /* Only consider netdevs that are visornic, and are open */
>               if ((dev->netdev_ops != &visornic_dev_ops) ||
>                   (!netif_queue_stopped(dev)))
>                       continue;
> @@ -1651,9 +1621,8 @@ service_resp_queue(struct uiscmdrsp *cmdrsp, struct 
> visornic_devdata *devdata,
>                       /* ASSERT netdev == vnicinfo->netdev; */
>                       if ((netdev == devdata->netdev) &&
>                           netif_queue_stopped(netdev)) {
> -                             /* check to see if we have crossed
> -                              * the lower watermark for
> -                              * netif_wake_queue()
> +                             /* check if we have crossed the lower watermark
> +                              * for netif_wake_queue()
>                                */
>                               if (vnic_hit_low_watermark
>                                   (devdata,
> @@ -1721,10 +1690,7 @@ static int visornic_poll(struct napi_struct *napi, int 
> budget)
>       send_rcv_posts_if_needed(devdata);
>       service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
>  
> -     /*
> -      * If there aren't any more packets to receive
> -      * stop the poll
> -      */
> +     /* If there aren't any more packets to receive stop the poll */
>       if (rx_count < budget)
>               napi_complete(napi);
>  
> @@ -1876,8 +1842,7 @@ static int visornic_probe(struct visor_device *dev)
>  
>       setup_timer(&devdata->irq_poll_timer, poll_for_irq,
>                   (unsigned long)devdata);
> -     /*
> -      * Note: This time has to start running before the while
> +     /* Note: This time has to start running before the while
>        * loop below because the napi routine is responsible for
>        * setting enab_dis_acked
>        */
> @@ -1906,8 +1871,7 @@ static int visornic_probe(struct visor_device *dev)
>       /* Let's start our threads to get responses */
>       netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
>  
> -     /*
> -      * Note: Interupts have to be enable before the while
> +     /* Note: Interupts have to be enable before the while
>        * loop below because the napi routine is responsible for
>        * setting enab_dis_acked
>        */
> 

Looks good.

You can add Reviewed-by: Luis de Bethencourt <lui...@osg.samsung.com> in this
and the patch removing unused struct members when you resend them all as a 
series.

Thanks,
Luis
_______________________________________________
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

Reply via email to