On 11 Aug 11:27, Liang Ma wrote:
<snip>
> +static uint16_t
> +rte_ethdev_pmgmt_umait(uint16_t port_id, uint16_t qidx,
> +             struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx,
> +             uint16_t max_pkts __rte_unused, void *_  __rte_unused)
> +{
> +
> +     struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> +
> +     if (dev->pwr_mgmt_state == RTE_ETH_DEV_POWER_MGMT_ENABLED) {
> +             if (unlikely(nb_rx == 0)) {
> +                     dev->empty_poll_stats[qidx].num++;
> +                     if (unlikely(dev->empty_poll_stats[qidx].num >
> +                                     ETH_EMPTYPOLL_MAX)) {
> +                             volatile void *target_addr;
> +                             uint64_t expected, mask;
> +                             uint16_t ret;
> +
> +                             /*
> +                              * get address of next descriptor in the RX
> +                              * ring for this queue, as well as expected
> +                              * value and a mask.
> +                              */
> +                             ret = (*dev->dev_ops->next_rx_desc)
> +                                     (dev->data->rx_queues[qidx],
> +                                      &target_addr, &expected, &mask);
> +                             if (ret == 0)
> +                                     /* -1ULL is maximum value for TSC */
> +                                     rte_power_monitor(target_addr,
> +                                                       expected, mask,
> +                                                       0, -1ULL);
> +                     }
> +             } else
> +                     dev->empty_poll_stats[qidx].num = 0;
> +     }
> +
> +     return 0;
should return nb_rx here. that's fixed in v3.
> +}
> +
> +static uint16_t
> +rte_ethdev_pmgmt_pause(uint16_t port_id, uint16_t qidx,
> +             struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx,
> +             uint16_t max_pkts __rte_unused, void *_  __rte_unused)
> +{
> +     struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> +
> +     int i;
> +
> +     if (dev->pwr_mgmt_state == RTE_ETH_DEV_POWER_MGMT_ENABLED) {
> +             if (unlikely(nb_rx == 0)) {
> +
> +                     dev->empty_poll_stats[qidx].num++;
> +
> +                     if (unlikely(dev->empty_poll_stats[qidx].num >
> +                                     ETH_EMPTYPOLL_MAX)) {
> +
> +                             for (i = 0; i < RTE_ETH_PAUSE_NUM; i++)
> +                                     rte_pause();
> +
> +                     }
> +             } else
> +                     dev->empty_poll_stats[qidx].num = 0;
> +     }
> +
> +     return 0;
should return  nb_rx here. that's fixed in v3.
> +}
> +
> +static uint16_t
> +rte_ethdev_pmgmt_scalefreq(uint16_t port_id, uint16_t qidx,
> +             struct rte_mbuf **pkts __rte_unused, uint16_t nb_rx,
> +             uint16_t max_pkts __rte_unused, void *_  __rte_unused)
> +{
> +     struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> +
> +     if (dev->pwr_mgmt_state == RTE_ETH_DEV_POWER_MGMT_ENABLED) {
> +             if (unlikely(nb_rx == 0)) {
> +                     dev->empty_poll_stats[qidx].num++;
> +                     if (unlikely(dev->empty_poll_stats[qidx].num >
> +                                     ETH_EMPTYPOLL_MAX)) {
> +
> +                             /*scale down freq */
> +                             rte_power_freq_min(rte_lcore_id());
> +
> +                     }
> +             } else {
> +                     dev->empty_poll_stats[qidx].num = 0;
> +                     /* scal up freq */
> +                     rte_power_freq_max(rte_lcore_id());
> +             }
> +     }
> +
> +     return 0;
should return  nb_rx here. that's fixed in v3.
> +}
> +
</snip>

 -- 
> 2.17.1
> 

Reply via email to