On 25/03/2025 03:07, Qingfang Deng wrote:
[...]
-static void ovpn_decrypt_post(struct sk_buff *skb, int ret)
+void ovpn_decrypt_post(void *data, int ret)
  {
-       struct ovpn_peer *peer = ovpn_skb_cb(skb)->peer;
+       struct ovpn_crypto_key_slot *ks;
+       unsigned int payload_offset = 0;
+       struct sk_buff *skb = data;
+       struct ovpn_peer *peer;
+       __be16 proto;
+       __be32 *pid;
+
+       /* crypto is happening asynchronously. this function will be called
+        * again later by the crypto callback with a proper return code
+        */
+       if (unlikely(ret == -EINPROGRESS))
+               return;
+
+       payload_offset = ovpn_skb_cb(skb)->payload_offset;
+       ks = ovpn_skb_cb(skb)->ks;
+       peer = ovpn_skb_cb(skb)->peer;
+
+       /* crypto is done, cleanup skb CB and its members */
+
+       if (likely(ovpn_skb_cb(skb)->iv))
+               kfree(ovpn_skb_cb(skb)->iv);

NULL check before kfree is unnecessary, as kfree(NULL) is a noop.

Right, will drop the check.


+
+       if (likely(ovpn_skb_cb(skb)->sg))
+               kfree(ovpn_skb_cb(skb)->sg);
+
+       if (likely(ovpn_skb_cb(skb)->req))
+               aead_request_free(ovpn_skb_cb(skb)->req);

Likewise.

ACK


if (unlikely(ret < 0))
                goto drop;
+ /* PID sits after the op */
+       pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE);
+       ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0);
+       if (unlikely(ret < 0)) {
+               net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n",
+                                   netdev_name(peer->ovpn->dev), peer->id,
+                                   ret);
+               goto drop;
+       }
+
+       /* point to encapsulated IP packet */
+       __skb_pull(skb, payload_offset);
+
+       /* check if this is a valid datapacket that has to be delivered to the
+        * ovpn interface
+        */
+       skb_reset_network_header(skb);
+       proto = ovpn_ip_check_protocol(skb);
+       if (unlikely(!proto)) {
+               /* check if null packet */
+               if (unlikely(!pskb_may_pull(skb, 1))) {
+                       net_info_ratelimited("%s: NULL packet received from peer 
%u\n",
+                                            netdev_name(peer->ovpn->dev),
+                                            peer->id);
+                       goto drop;
+               }
+
+               net_info_ratelimited("%s: unsupported protocol received from peer 
%u\n",
+                                    netdev_name(peer->ovpn->dev), peer->id);
+               goto drop;
+       }
+       skb->protocol = proto;
+
+       /* perform Reverse Path Filtering (RPF) */
+       if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) {
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       net_dbg_ratelimited("%s: RPF dropped packet from peer %u, 
src: %pI6c\n",
+                                           netdev_name(peer->ovpn->dev),
+                                           peer->id, &ipv6_hdr(skb)->saddr);
+               else
+                       net_dbg_ratelimited("%s: RPF dropped packet from peer %u, 
src: %pI4\n",
+                                           netdev_name(peer->ovpn->dev),
+                                           peer->id, &ip_hdr(skb)->saddr);
+               goto drop;
+       }
+
        ovpn_netdev_write(peer, skb);
        /* skb is passed to upper layer - don't free it */
        skb = NULL;
  drop:
        if (unlikely(skb))
                dev_core_stats_rx_dropped_inc(peer->ovpn->dev);
-       ovpn_peer_put(peer);
+       if (likely(peer))
+               ovpn_peer_put(peer);
+       if (likely(ks))
+               ovpn_crypto_key_slot_put(ks);
        kfree_skb(skb);
  }

[...]

--- /dev/null
+++ b/drivers/net/ovpn/pktid.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*  OpenVPN data channel offload
+ *
+ *  Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ *  Author:    Antonio Quartulli <anto...@openvpn.net>
+ *             James Yonan <ja...@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNPKTID_H_
+#define _NET_OVPN_OVPNPKTID_H_
+
+#include "proto.h"
+
+/* If no packets received for this length of time, set a backtrack floor
+ * at highest received packet ID thus far.
+ */
+#define PKTID_RECV_EXPIRE (30 * HZ)
+
+/* Packet-ID state for transmitter */
+struct ovpn_pktid_xmit {
+       atomic64_t seq_num;

pktid is 32-bit so atomic_t should be enough (see below).

+};
+
+/* replay window sizing in bytes = 2^REPLAY_WINDOW_ORDER */
+#define REPLAY_WINDOW_ORDER 8
+
+#define REPLAY_WINDOW_BYTES BIT(REPLAY_WINDOW_ORDER)
+#define REPLAY_WINDOW_SIZE  (REPLAY_WINDOW_BYTES * 8)
+#define REPLAY_INDEX(base, i) (((base) + (i)) & (REPLAY_WINDOW_SIZE - 1))
+
+/* Packet-ID state for receiver.
+ * Other than lock member, can be zeroed to initialize.
+ */
+struct ovpn_pktid_recv {
+       /* "sliding window" bitmask of recent packet IDs received */
+       u8 history[REPLAY_WINDOW_BYTES];
+       /* bit position of deque base in history */
+       unsigned int base;
+       /* extent (in bits) of deque in history */
+       unsigned int extent;
+       /* expiration of history in jiffies */
+       unsigned long expire;
+       /* highest sequence number received */
+       u32 id;
+       /* highest time stamp received */
+       u32 time;
+       /* we will only accept backtrack IDs > id_floor */
+       u32 id_floor;
+       unsigned int max_backtrack;
+       /* protects entire pktd ID state */
+       spinlock_t lock;
+};
+
+/* Get the next packet ID for xmit */
+static inline int ovpn_pktid_xmit_next(struct ovpn_pktid_xmit *pid, u32 *pktid)
+{
+       const s64 seq_num = atomic64_fetch_add_unless(&pid->seq_num, 1,
+                                                     0x100000000LL);
+       /* when the 32bit space is over, we return an error because the packet
+        * ID is used to create the cipher IV and we do not want to reuse the
+        * same value more than once
+        */
+       if (unlikely(seq_num == 0x100000000LL))
+               return -ERANGE;

You may use a 32-bit atomic_t, instead of checking if it equals
0x1_00000000, check if it wraparounds to zero.
Additionally, you don't need full memory ordering as you just want an
incrementing value:

int seq_num = atomic_fetch_inc_relaxed(&pid->seq_num);

if (unlikely(!seq_num))
        ...

But then if we have concurrent invocations of ovpn_pktid_xmit_next() only the first one will error out on wrap-around, while the others will return no error (seq_num becomes > 0) and will allow the packets to go through.

This is not what we want.


+
+       *pktid = (u32)seq_num;
+
+       return 0;
+}
+
+/* Write 12-byte AEAD IV to dest */
+static inline void ovpn_pktid_aead_write(const u32 pktid,
+                                        const u8 nt[],
+                                        unsigned char *dest)
+{
+       *(__force __be32 *)(dest) = htonl(pktid);
+       BUILD_BUG_ON(4 + OVPN_NONCE_TAIL_SIZE != OVPN_NONCE_SIZE);
+       memcpy(dest + 4, nt, OVPN_NONCE_TAIL_SIZE);
+}
+

Qingfang, may I ask you to remove from your reply non-relevant code (like I did above), especially in such long patches, as it becomes a bit tedious to spot your comments and I may miss something.

Thanks a lot!

Regards,

--
Antonio Quartulli
OpenVPN Inc.


Reply via email to