From: "Matthew Wilcox (Oracle)" <wi...@infradead.org>

The XArray interface is easier for this driver to use.  Also fixes a
bug reported by the improper use of GFP_ATOMIC.

Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>
---
 net/qrtr/qrtr.c | 39 +++++++++++++--------------------------
 1 file changed, 13 insertions(+), 26 deletions(-)

diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 2d8d6131bc5f..488f8f326ee5 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -20,6 +20,7 @@
 /* auto-bind range */
 #define QRTR_MIN_EPH_SOCKET 0x4000
 #define QRTR_MAX_EPH_SOCKET 0x7fff
+#define QRTR_PORT_RANGE        XA_LIMIT(QRTR_MIN_EPH_SOCKET, 
QRTR_MAX_EPH_SOCKET)
 
 /**
  * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
@@ -106,8 +107,7 @@ static LIST_HEAD(qrtr_all_nodes);
 static DEFINE_MUTEX(qrtr_node_lock);
 
 /* local port allocation management */
-static DEFINE_IDR(qrtr_ports);
-static DEFINE_MUTEX(qrtr_port_lock);
+static DEFINE_XARRAY_ALLOC(qrtr_ports);
 
 /**
  * struct qrtr_node - endpoint node
@@ -623,7 +623,7 @@ static struct qrtr_sock *qrtr_port_lookup(int port)
                port = 0;
 
        rcu_read_lock();
-       ipc = idr_find(&qrtr_ports, port);
+       ipc = xa_load(&qrtr_ports, port);
        if (ipc)
                sock_hold(&ipc->sk);
        rcu_read_unlock();
@@ -665,9 +665,7 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
 
        __sock_put(&ipc->sk);
 
-       mutex_lock(&qrtr_port_lock);
-       idr_remove(&qrtr_ports, port);
-       mutex_unlock(&qrtr_port_lock);
+       xa_erase(&qrtr_ports, port);
 
        /* Ensure that if qrtr_port_lookup() did enter the RCU read section we
         * wait for it to up increment the refcount */
@@ -688,25 +686,18 @@ static int qrtr_port_assign(struct qrtr_sock *ipc, int 
*port)
 {
        int rc;
 
-       mutex_lock(&qrtr_port_lock);
        if (!*port) {
-               rc = idr_alloc(&qrtr_ports, ipc,
-                              QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
-                              GFP_ATOMIC);
-               if (rc >= 0)
-                       *port = rc;
+               rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_PORT_RANGE,
+                               GFP_KERNEL);
        } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
                rc = -EACCES;
        } else if (*port == QRTR_PORT_CTRL) {
-               rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
+               rc = xa_insert(&qrtr_ports, 0, ipc, GFP_KERNEL);
        } else {
-               rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
-               if (rc >= 0)
-                       *port = rc;
+               rc = xa_insert(&qrtr_ports, *port, ipc, GFP_KERNEL);
        }
-       mutex_unlock(&qrtr_port_lock);
 
-       if (rc == -ENOSPC)
+       if (rc == -EBUSY)
                return -EADDRINUSE;
        else if (rc < 0)
                return rc;
@@ -720,20 +711,16 @@ static int qrtr_port_assign(struct qrtr_sock *ipc, int 
*port)
 static void qrtr_reset_ports(void)
 {
        struct qrtr_sock *ipc;
-       int id;
-
-       mutex_lock(&qrtr_port_lock);
-       idr_for_each_entry(&qrtr_ports, ipc, id) {
-               /* Don't reset control port */
-               if (id == 0)
-                       continue;
+       unsigned long index;
 
+       rcu_read_lock();
+       xa_for_each_start(&qrtr_ports, index, ipc, 1) {
                sock_hold(&ipc->sk);
                ipc->sk.sk_err = ENETRESET;
                ipc->sk.sk_error_report(&ipc->sk);
                sock_put(&ipc->sk);
        }
-       mutex_unlock(&qrtr_port_lock);
+       rcu_read_unlock();
 }
 
 /* Bind socket to address.
-- 
2.26.2

Reply via email to