i don't know which one

In file included from
/tank/users/mjg/src/freebsd/sys/netinet/cc/cc_newreno.c:84:
/tank/users/mjg/src/freebsd/sys/netinet/tcp_hpts.h:118:1: warning:
unused function 'tcp_in_hpts' [-Wunused-function]
tcp_in_hpts(struct tcpcb *tp)
^
In file included from /tank/users/mjg/src/freebsd/sys/netinet/cc/cc_cubic.c:74:
/tank/users/mjg/src/freebsd/sys/netinet/tcp_hpts.h:118:1: warning:
unused function 'tcp_in_hpts' [-Wunused-function]
tcp_in_hpts(struct tcpcb *tp)
^
1 warning generated.
1 warning generated.


On 4/25/23, Gleb Smirnoff <[email protected]> wrote:
> The branch main has been updated by glebius:
>
> URL:
> https://cgit.FreeBSD.org/src/commit/?id=c3c20de3b2d1357d081ce6c41f15f23cd1020e35
>
> commit c3c20de3b2d1357d081ce6c41f15f23cd1020e35
> Author:     Gleb Smirnoff <[email protected]>
> AuthorDate: 2023-04-25 19:19:48 +0000
> Commit:     Gleb Smirnoff <[email protected]>
> CommitDate: 2023-04-25 19:19:48 +0000
>
>     tcp: move HPTS/LRO flags out of inpcb to tcpcb
>
>     These flags are TCP specific.  While here, make also several LRO
>     internal functions to pass tcpcb pointer instead of inpcb one.
>
>     Reviewed by:            rrs
>     Differential Revision:  https://reviews.freebsd.org/D39698
> ---
>  sys/netinet/in_pcb.h                     | 12 +++---
>  sys/netinet/tcp_hpts.c                   |  2 +-
>  sys/netinet/tcp_lro.c                    | 69
> ++++++++++++++------------------
>  sys/netinet/tcp_stacks/bbr.c             | 35 ++++++++--------
>  sys/netinet/tcp_stacks/rack.c            | 68
> +++++++++++++++----------------
>  sys/netinet/tcp_stacks/rack_bbr_common.c |  4 +-
>  sys/netinet/tcp_subr.c                   | 14 +++----
>  sys/netinet/tcp_syncache.c               |  2 +-
>  sys/netinet/tcp_var.h                    | 25 +++++++-----
>  9 files changed, 112 insertions(+), 119 deletions(-)
>
> diff --git a/sys/netinet/in_pcb.h b/sys/netinet/in_pcb.h
> index 62c5758268a7..574d575de8f0 100644
> --- a/sys/netinet/in_pcb.h
> +++ b/sys/netinet/in_pcb.h
> @@ -593,8 +593,8 @@ int       inp_so_options(const struct inpcb *inp);
>  /*
>   * Flags for inp_flags2.
>   */
> -#define      INP_MBUF_L_ACKS         0x00000001 /* We need large mbufs for 
> ack
> compression */
> -#define      INP_MBUF_ACKCMP         0x00000002 /* TCP mbuf ack compression 
> ok */
> +/*                           0x00000001 */
> +/*                           0x00000002 */
>  /*                           0x00000004 */
>  #define      INP_REUSEPORT           0x00000008 /* SO_REUSEPORT option is 
> set */
>  /*                           0x00000010 */
> @@ -605,11 +605,11 @@ int     inp_so_options(const struct inpcb *inp);
>  #define      INP_RECVRSSBUCKETID     0x00000200 /* populate recv datagram 
> with
> bucket id */
>  #define      INP_RATE_LIMIT_CHANGED  0x00000400 /* rate limit needs 
> attention */
>  #define      INP_ORIGDSTADDR         0x00000800 /* receive IP dst 
> address/port */
> -#define INP_CANNOT_DO_ECN    0x00001000 /* The stack does not do ECN */
> +/*                           0x00001000 */
>  #define      INP_REUSEPORT_LB        0x00002000 /* SO_REUSEPORT_LB option is 
> set */
> -#define INP_SUPPORTS_MBUFQ   0x00004000 /* Supports the mbuf queue method of
> LRO */
> -#define INP_MBUF_QUEUE_READY 0x00008000 /* The transport is pacing, inputs
> can be queued */
> -#define INP_DONT_SACK_QUEUE  0x00010000 /* If a sack arrives do not wake me
> */
> +/*                           0x00004000 */
> +/*                           0x00008000 */
> +/*                           0x00010000 */
>  #define INP_2PCP_SET         0x00020000 /* If the Eth PCP should be set 
> explicitly
> */
>  #define INP_2PCP_BIT0                0x00040000 /* Eth PCP Bit 0 */
>  #define INP_2PCP_BIT1                0x00080000 /* Eth PCP Bit 1 */
> diff --git a/sys/netinet/tcp_hpts.c b/sys/netinet/tcp_hpts.c
> index 59122bb242b9..a10355a38b6f 100644
> --- a/sys/netinet/tcp_hpts.c
> +++ b/sys/netinet/tcp_hpts.c
> @@ -1363,7 +1363,7 @@ again:
>                        * cause a call to output if it is needed.
>                        */
>                       tp->t_flags2 |= TF2_HPTS_CALLS;
> -                     if ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) &&
> +                     if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) &&
>                           !STAILQ_EMPTY(&tp->t_inqueue)) {
>                               error = (*tp->t_fb->tfb_do_queued_segments)(tp, 
> 0);
>                               if (error) {
> diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c
> index 76c345add1f8..d76e1e09edd7 100644
> --- a/sys/netinet/tcp_lro.c
> +++ b/sys/netinet/tcp_lro.c
> @@ -91,7 +91,7 @@ static int  tcp_lro_rx_common(struct lro_ctrl *lc, struct
> mbuf *m,
>                   uint32_t csum, bool use_hash);
>
>  #ifdef TCPHPTS
> -static bool  do_bpf_strip_and_compress(struct inpcb *, struct lro_ctrl *,
> +static bool  do_bpf_strip_and_compress(struct tcpcb *, struct lro_ctrl *,
>               struct lro_entry *, struct mbuf **, struct mbuf **, struct mbuf 
> **,
>               bool *, bool, bool, struct ifnet *, bool);
>
> @@ -1192,13 +1192,9 @@ tcp_queue_pkts(struct tcpcb *tp, struct lro_entry
> *le)
>  }
>
>  static bool
> -tcp_lro_check_wake_status(struct inpcb *inp)
> +tcp_lro_check_wake_status(struct tcpcb *tp)
>  {
> -     struct tcpcb *tp;
>
> -     tp = intotcpcb(inp);
> -     if (__predict_false(tp == NULL))
> -             return (true);
>       if (tp->t_fb->tfb_early_wake_check != NULL)
>               return ((tp->t_fb->tfb_early_wake_check)(tp));
>       return (false);
> @@ -1206,15 +1202,10 @@ tcp_lro_check_wake_status(struct inpcb *inp)
>
>  static struct mbuf *
>  tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct lro_entry *le,
> -    struct inpcb *inp, int32_t *new_m, bool can_append_old_cmp)
> +    struct tcpcb *tp, int32_t *new_m, bool can_append_old_cmp)
>  {
> -     struct tcpcb *tp;
>       struct mbuf *m;
>
> -     tp = intotcpcb(inp);
> -     if (__predict_false(tp == NULL))
> -             return (NULL);
> -
>       /* Look at the last mbuf if any in queue */
>       if (can_append_old_cmp) {
>               m = STAILQ_LAST(&tp->t_inqueue, mbuf, m_stailqpkt);
> @@ -1226,13 +1217,13 @@ tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc,
> struct lro_entry *le,
>                               return (m);
>                       } else {
>                               /* Mark we ran out of space */
> -                             inp->inp_flags2 |= INP_MBUF_L_ACKS;
> +                             tp->t_flags2 |= TF2_MBUF_L_ACKS;
>                       }
>               }
>       }
>       /* Decide mbuf size. */
>       tcp_lro_log(tp, lc, le, NULL, 21, 0, 0, 0, 0);
> -     if (inp->inp_flags2 & INP_MBUF_L_ACKS)
> +     if (tp->t_flags2 & TF2_MBUF_L_ACKS)
>               m = m_getcl(M_NOWAIT, MT_DATA, M_ACKCMP | M_PKTHDR);
>       else
>               m = m_gethdr(M_NOWAIT, MT_DATA);
> @@ -1248,7 +1239,7 @@ tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct
> lro_entry *le,
>       return (m);
>  }
>
> -static struct inpcb *
> +static struct tcpcb *
>  tcp_lro_lookup(struct ifnet *ifp, struct lro_parser *pa)
>  {
>       struct inpcb *inp;
> @@ -1277,10 +1268,10 @@ tcp_lro_lookup(struct ifnet *ifp, struct lro_parser
> *pa)
>               break;
>  #endif
>       default:
> -             inp = NULL;
> -             break;
> +             return (NULL);
>       }
> -     return (inp);
> +
> +     return (intotcpcb(inp));
>  }
>
>  static inline bool
> @@ -1335,7 +1326,6 @@ tcp_lro_ack_valid(struct mbuf *m, struct tcphdr *th,
> uint32_t **ppts, bool *othe
>  static int
>  tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct lro_entry *le)
>  {
> -     struct inpcb *inp;
>       struct tcpcb *tp;
>       struct mbuf **pp, *cmp, *mv_to;
>       struct ifnet *lagg_ifp;
> @@ -1364,31 +1354,28 @@ tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct
> lro_entry *le)
>           IN6_IS_ADDR_UNSPECIFIED(&le->inner.data.s_addr.v6)))
>               return (TCP_LRO_CANNOT);
>  #endif
> -     /* Lookup inp, if any. */
> -     inp = tcp_lro_lookup(lc->ifp,
> +     /* Lookup inp, if any.  Returns locked TCP inpcb. */
> +     tp = tcp_lro_lookup(lc->ifp,
>           (le->inner.data.lro_type == LRO_TYPE_NONE) ? &le->outer :
> &le->inner);
> -     if (inp == NULL)
> +     if (tp == NULL)
>               return (TCP_LRO_CANNOT);
>
>       counter_u64_add(tcp_inp_lro_locks_taken, 1);
>
> -     /* Get TCP control structure. */
> -     tp = intotcpcb(inp);
> -
>       /* Check if the inp is dead, Jim. */
>       if (tp->t_state == TCPS_TIME_WAIT) {
> -             INP_WUNLOCK(inp);
> +             INP_WUNLOCK(tptoinpcb(tp));
>               return (TCP_LRO_CANNOT);
>       }
>       if (tp->t_lro_cpu == HPTS_CPU_NONE && lc->lro_cpu_is_set == 1)
>               tp->t_lro_cpu = lc->lro_last_cpu;
>       /* Check if the transport doesn't support the needed optimizations. */
> -     if ((inp->inp_flags2 & (INP_SUPPORTS_MBUFQ | INP_MBUF_ACKCMP)) == 0) {
> -             INP_WUNLOCK(inp);
> +     if ((tp->t_flags2 & (TF2_SUPPORTS_MBUFQ | TF2_MBUF_ACKCMP)) == 0) {
> +             INP_WUNLOCK(tptoinpcb(tp));
>               return (TCP_LRO_CANNOT);
>       }
>
> -     if (inp->inp_flags2 & INP_MBUF_QUEUE_READY)
> +     if (tp->t_flags2 & TF2_MBUF_QUEUE_READY)
>               should_wake = false;
>       else
>               should_wake = true;
> @@ -1411,7 +1398,7 @@ tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct
> lro_entry *le)
>       cmp = NULL;
>       for (pp = &le->m_head; *pp != NULL; ) {
>               mv_to = NULL;
> -             if (do_bpf_strip_and_compress(inp, lc, le, pp,
> +             if (do_bpf_strip_and_compress(tp, lc, le, pp,
>                       &cmp, &mv_to, &should_wake, bpf_req,
>                       lagg_bpf_req, lagg_ifp, can_append_old_cmp) == false) {
>                       /* Advance to next mbuf. */
> @@ -1444,17 +1431,18 @@ tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct
> lro_entry *le)
>       /* Check if any data mbufs left. */
>       if (le->m_head != NULL) {
>               counter_u64_add(tcp_inp_lro_direct_queue, 1);
> -             tcp_lro_log(tp, lc, le, NULL, 22, 1, inp->inp_flags2, 0, 1);
> +             tcp_lro_log(tp, lc, le, NULL, 22, 1, tp->t_flags2, 0, 1);
>               tcp_queue_pkts(tp, le);
>       }
>       if (should_wake) {
>               /* Wakeup */
>               counter_u64_add(tcp_inp_lro_wokeup_queue, 1);
>               if ((*tp->t_fb->tfb_do_queued_segments)(tp, 0))
> -                     inp = NULL;
> +                     /* TCP cb gone and unlocked. */
> +                     return (0);
>       }
> -     if (inp != NULL)
> -             INP_WUNLOCK(inp);
> +     INP_WUNLOCK(tptoinpcb(tp));
> +
>       return (0);     /* Success. */
>  }
>  #endif
> @@ -1674,7 +1662,7 @@ build_ack_entry(struct tcp_ackent *ae, struct tcphdr
> *th, struct mbuf *m,
>   * and strip all, but the IPv4/IPv6 header.
>   */
>  static bool
> -do_bpf_strip_and_compress(struct inpcb *inp, struct lro_ctrl *lc,
> +do_bpf_strip_and_compress(struct tcpcb *tp, struct lro_ctrl *lc,
>      struct lro_entry *le, struct mbuf **pp, struct mbuf **cmp, struct mbuf
> **mv_to,
>      bool *should_wake, bool bpf_req, bool lagg_bpf_req, struct ifnet
> *lagg_ifp, bool can_append_old_cmp)
>  {
> @@ -1751,7 +1739,7 @@ do_bpf_strip_and_compress(struct inpcb *inp, struct
> lro_ctrl *lc,
>
>       /* Now lets look at the should wake states */
>       if ((other_opts == true) &&
> -         ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) == 0)) {
> +         ((tp->t_flags2 & TF2_DONT_SACK_QUEUE) == 0)) {
>               /*
>                * If there are other options (SACK?) and the
>                * tcp endpoint has not expressly told us it does
> @@ -1760,13 +1748,13 @@ do_bpf_strip_and_compress(struct inpcb *inp, struct
> lro_ctrl *lc,
>               *should_wake = true;
>       } else if (*should_wake == false) {
>               /* Wakeup override check if we are false here  */
> -             *should_wake = tcp_lro_check_wake_status(inp);
> +             *should_wake = tcp_lro_check_wake_status(tp);
>       }
>       /* Is the ack compressable? */
>       if (can_compress == false)
>               goto done;
>       /* Does the TCP endpoint support ACK compression? */
> -     if ((inp->inp_flags2 & INP_MBUF_ACKCMP) == 0)
> +     if ((tp->t_flags2 & TF2_MBUF_ACKCMP) == 0)
>               goto done;
>
>       /* Lets get the TOS/traffic class field */
> @@ -1785,7 +1773,8 @@ do_bpf_strip_and_compress(struct inpcb *inp, struct
> lro_ctrl *lc,
>       /* Now lets get space if we don't have some already */
>       if (*cmp == NULL) {
>  new_one:
> -             nm = tcp_lro_get_last_if_ackcmp(lc, le, inp, &n_mbuf,
> can_append_old_cmp);
> +             nm = tcp_lro_get_last_if_ackcmp(lc, le, tp, &n_mbuf,
> +                 can_append_old_cmp);
>               if (__predict_false(nm == NULL))
>                       goto done;
>               *cmp = nm;
> @@ -1812,7 +1801,7 @@ new_one:
>               nm = *cmp;
>               if (M_TRAILINGSPACE(nm) < sizeof(struct tcp_ackent)) {
>                       /* We ran out of space */
> -                     inp->inp_flags2 |= INP_MBUF_L_ACKS;
> +                     tp->t_flags2 |= TF2_MBUF_L_ACKS;
>                       goto new_one;
>               }
>       }
> diff --git a/sys/netinet/tcp_stacks/bbr.c b/sys/netinet/tcp_stacks/bbr.c
> index f8c7557150dd..5ecb558dadb3 100644
> --- a/sys/netinet/tcp_stacks/bbr.c
> +++ b/sys/netinet/tcp_stacks/bbr.c
> @@ -891,7 +891,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb
> *tp, uint32_t cts, int32_
>                * Tell LRO that it can queue packets while
>                * we pace.
>                */
> -             bbr->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
> +             bbr->rc_tp->t_flags2 |= TF2_MBUF_QUEUE_READY;
>               if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
>                   (bbr->rc_cwnd_limited == 0)) {
>                       /*
> @@ -899,9 +899,9 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb
> *tp, uint32_t cts, int32_
>                        * are running a rack timer we put on
>                        * the do not disturbe even for sack.
>                        */
> -                     inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
> +                     tp->t_flags2 |= TF2_DONT_SACK_QUEUE;
>               } else
> -                     inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
> +                     tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
>               bbr->rc_pacer_started = cts;
>
>               (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(slot),
> @@ -932,12 +932,12 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb
> *tp, uint32_t cts, int32_
>                        * if a sack arrives as long as we are
>                        * not cwnd limited.
>                        */
> -                     bbr->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
> -                     inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
> +                     tp->t_flags2 |= (TF2_MBUF_QUEUE_READY |
> +                         TF2_DONT_SACK_QUEUE);
>               } else {
>                       /* All other timers wake us up */
> -                     bbr->rc_inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
> -                     inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
> +                     tp->t_flags2 &= ~(TF2_MBUF_QUEUE_READY |
> +                         TF2_DONT_SACK_QUEUE);
>               }
>               bbr->bbr_timer_src = frm;
>               bbr_log_to_start(bbr, cts, hpts_timeout, slot, 0);
> @@ -2498,7 +2498,7 @@ bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts,
> uint32_t to, int32_t slot, u
>               log.u_bbr.flex4 = slot;
>               log.u_bbr.flex5 = bbr->rc_tp->t_hpts_slot;
>               log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur);
> -             log.u_bbr.pkts_out = bbr->rc_inp->inp_flags2;
> +             log.u_bbr.pkts_out = bbr->rc_tp->t_flags2;
>               log.u_bbr.flex8 = which;
>               TCP_LOG_EVENTP(bbr->rc_tp, NULL,
>                   &bbr->rc_inp->inp_socket->so_rcv,
> @@ -9940,13 +9940,13 @@ bbr_init(struct tcpcb *tp, void **ptr)
>       }
>       bbr = (struct tcp_bbr *)*ptr;
>       bbr->rtt_valid = 0;
> -     inp->inp_flags2 |= INP_CANNOT_DO_ECN;
> -     inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
> +     tp->t_flags2 |= TF2_CANNOT_DO_ECN;
> +     tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
>       /* Take off any undesired flags */
> -     inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
> -     inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
> -     inp->inp_flags2 &= ~INP_MBUF_ACKCMP;
> -     inp->inp_flags2 &= ~INP_MBUF_L_ACKS;
> +     tp->t_flags2 &= ~TF2_MBUF_QUEUE_READY;
> +     tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
> +     tp->t_flags2 &= ~TF2_MBUF_ACKCMP;
> +     tp->t_flags2 &= ~TF2_MBUF_L_ACKS;
>
>       TAILQ_INIT(&bbr->r_ctl.rc_map);
>       TAILQ_INIT(&bbr->r_ctl.rc_free);
> @@ -12046,7 +12046,7 @@ bbr_output_wtime(struct tcpcb *tp, const struct
> timeval *tv)
>                       return (retval < 0 ? retval : 0);
>               }
>       }
> -     bbr->rc_inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
> +     bbr->rc_tp->t_flags2 &= ~TF2_MBUF_QUEUE_READY;
>       if (hpts_calling &&
>           (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) {
>               bbr->r_ctl.rc_last_delay_val = 0;
> @@ -14078,15 +14078,14 @@ bbr_switch_failed(struct tcpcb *tp)
>        * pacer (if our flags are up) if so we are good, if
>        * not we need to get back into the pacer.
>        */
> -     struct inpcb *inp = tptoinpcb(tp);
>       struct timeval tv;
>       uint32_t cts;
>       uint32_t toval;
>       struct tcp_bbr *bbr;
>       struct hpts_diag diag;
>
> -     inp->inp_flags2 |= INP_CANNOT_DO_ECN;
> -     inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
> +     tp->t_flags2 |= TF2_CANNOT_DO_ECN;
> +     tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
>       tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS);
>       if (tp->t_in_hpts > IHPTS_NONE) {
>               return;
> diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c
> index 9e531a1d3182..44606c287f25 100644
> --- a/sys/netinet/tcp_stacks/rack.c
> +++ b/sys/netinet/tcp_stacks/rack.c
> @@ -6822,12 +6822,12 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct
> tcpcb *tp, uint32_t cts,
>        * are not on then these flags won't have any effect since it
>        * won't go through the queuing LRO path).
>        *
> -      * INP_MBUF_QUEUE_READY - This flags says that I am busy
> +      * TF2_MBUF_QUEUE_READY - This flags says that I am busy
>        *                        pacing output, so don't disturb. But
>        *                        it also means LRO can wake me if there
>        *                        is a SACK arrival.
>        *
> -      * INP_DONT_SACK_QUEUE - This flag is used in conjunction
> +      * TF2_DONT_SACK_QUEUE - This flag is used in conjunction
>        *                       with the above flag (QUEUE_READY) and
>        *                       when present it says don't even wake me
>        *                       if a SACK arrives.
> @@ -6842,7 +6842,7 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct
> tcpcb *tp, uint32_t cts,
>        * Other cases should usually have none of the flags set
>        * so LRO can call into us.
>        */
> -     inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
> +     tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY);
>       if (slot) {
>               rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
>               rack->r_ctl.rc_last_output_to = us_cts + slot;
> @@ -6854,7 +6854,7 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct
> tcpcb *tp, uint32_t cts,
>                * will be effective if mbuf queueing is on or
>                * compressed acks are being processed.
>                */
> -             inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
> +             tp->t_flags2 |= TF2_MBUF_QUEUE_READY;
>               /*
>                * But wait if we have a Rack timer running
>                * even a SACK should not disturb us (with
> @@ -6862,7 +6862,7 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct
> tcpcb *tp, uint32_t cts,
>                */
>               if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) {
>                       if (rack->r_rr_config != 3)
> -                             inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
> +                             tp->t_flags2 |= TF2_DONT_SACK_QUEUE;
>                       else if (rack->rc_pace_dnd) {
>                               if (IN_RECOVERY(tp->t_flags)) {
>                                       /*
> @@ -6873,13 +6873,14 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct
> tcpcb *tp, uint32_t cts,
>                                        * and let all sacks wake us up.
>                                        *
>                                        */
> -                                     inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
> +                                     tp->t_flags2 |= TF2_DONT_SACK_QUEUE;
>                               }
>                       }
>               }
>               /* For sack attackers we want to ignore sack */
>               if (rack->sack_attack_disable == 1) {
> -                     inp->inp_flags2 |= 
> (INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
> +                     tp->t_flags2 |= (TF2_DONT_SACK_QUEUE |
> +                         TF2_MBUF_QUEUE_READY);
>               } else if (rack->rc_ack_can_sendout_data) {
>                       /*
>                        * Ahh but wait, this is that special case
> @@ -6887,7 +6888,8 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct
> tcpcb *tp, uint32_t cts,
>                        * backout the changes (used for non-paced
>                        * burst limiting).
>                        */
> -                     inp->inp_flags2 &= 
> ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY);
> +                     tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE |
> +                         TF2_MBUF_QUEUE_READY);
>               }
>               if ((rack->use_rack_rr) &&
>                   (rack->r_rr_config < 2) &&
> @@ -6908,7 +6910,7 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct
> tcpcb *tp, uint32_t cts,
>               }
>       } else if (hpts_timeout) {
>               /*
> -              * With respect to inp_flags2 here, lets let any new acks wake
> +              * With respect to t_flags2(?) here, lets let any new acks wake
>                * us up here. Since we are not pacing (no pacing timer), output
>                * can happen so we should let it. If its a Rack timer, then 
> any inbound
>                * packet probably won't change the sending (we will be blocked)
> @@ -8036,7 +8038,7 @@ rack_process_timers(struct tcpcb *tp, struct tcp_rack
> *rack, uint32_t cts, uint8
>                * no-sack wakeup on since we no longer have a PKT_OUTPUT
>                * flag in place.
>                */
> -             rack->rc_inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
> +             rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
>               ret = -3;
>               left = rack->r_ctl.rc_timer_exp - cts;
>               tcp_hpts_insert(tp, HPTS_MS_TO_SLOTS(left));
> @@ -14566,9 +14568,8 @@ rack_switch_failed(struct tcpcb *tp)
>        * This method gets called if a stack switch was
>        * attempted and it failed. We are left
>        * but our hpts timers were stopped and we
> -      * need to validate time units and inp_flags2.
> +      * need to validate time units and t_flags2.
>        */
> -     struct inpcb *inp = tptoinpcb(tp);
>       struct tcp_rack *rack;
>       struct timeval tv;
>       uint32_t cts;
> @@ -14578,11 +14579,11 @@ rack_switch_failed(struct tcpcb *tp)
>       rack = (struct tcp_rack *)tp->t_fb_ptr;
>       tcp_change_time_units(tp, TCP_TMR_GRANULARITY_USEC);
>       if  (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
> -             inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
> +             tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
>       else
> -             inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
> +             tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
>       if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
> -             rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
> +             tp->t_flags2 |= TF2_MBUF_ACKCMP;
>       if (tp->t_in_hpts > IHPTS_NONE) {
>               /* Strange */
>               return;
> @@ -15089,13 +15090,13 @@ rack_init(struct tcpcb *tp, void **ptr)
>               }
>       }
>       rack_stop_all_timers(tp, rack);
> -     /* Setup all the inp_flags2 */
> +     /* Setup all the t_flags2 */
>       if  (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
> -             tptoinpcb(tp)->inp_flags2 |= INP_SUPPORTS_MBUFQ;
> +             tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
>       else
> -             tptoinpcb(tp)->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
> +             tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
>       if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
> -             rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
> +             tp->t_flags2 |= TF2_MBUF_ACKCMP;
>       /*
>        * Timers in Rack are kept in microseconds so lets
>        * convert any initial incoming variables
> @@ -15417,7 +15418,7 @@ rack_set_state(struct tcpcb *tp, struct tcp_rack
> *rack)
>               break;
>       };
>       if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
> -             rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
> +             rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP;
>
>  }
>
> @@ -16528,7 +16529,7 @@ rack_do_segment_nounlock(struct tcpcb *tp, struct
> mbuf *m, struct tcphdr *th,
>                * so should process the packets.
>                */
>               slot_remaining = rack->r_ctl.rc_last_output_to - us_cts;
> -             if (rack->rc_inp->inp_flags2 & INP_DONT_SACK_QUEUE) {
> +             if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) {
>                       no_output = 1;
>               } else {
>                       /*
> @@ -22410,7 +22411,7 @@ rack_set_dgp(struct tcp_rack *rack)
>       rack->use_fixed_rate = 0;
>       if (rack->gp_ready)
>               rack_set_cc_pacing(rack);
> -     rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
> +     rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
>       rack->rack_attempt_hdwr_pace = 0;
>       /* rxt settings */
>       rack->full_size_rxt = 1;
> @@ -22419,7 +22420,7 @@ rack_set_dgp(struct tcp_rack *rack)
>       rack->r_use_cmp_ack = 1;
>       if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) &&
>           rack->r_use_cmp_ack)
> -             rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
> +             rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP;
>       /* scwnd=1 */
>       rack->rack_enable_scwnd = 1;
>       /* dynamic=100 */
> @@ -22536,11 +22537,11 @@ rack_set_profile(struct tcp_rack *rack, int prof)
>               if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) {
>                       rack->r_mbuf_queue = 1;
>                       if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state))
> -                             rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
> -                     rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
> +                             rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP;
> +                     rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
>               } else {
>                       rack->r_mbuf_queue = 0;
> -                     rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
> +                     rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
>               }
>               if (rack_enable_shared_cwnd)
>                       rack->rack_enable_scwnd = 1;
> @@ -22687,7 +22688,6 @@ rack_process_option(struct tcpcb *tp, struct
> tcp_rack *rack, int sopt_name,
>       struct epoch_tracker et;
>       struct sockopt sopt;
>       struct cc_newreno_opts opt;
> -     struct inpcb *inp = tptoinpcb(tp);
>       uint64_t val;
>       int error = 0;
>       uint16_t ca, ss;
> @@ -22865,16 +22865,16 @@ rack_process_option(struct tcpcb *tp, struct
> tcp_rack *rack, int sopt_name,
>               break;
>       case TCP_USE_CMP_ACKS:
>               RACK_OPTS_INC(tcp_use_cmp_acks);
> -             if ((optval == 0) && (rack->rc_inp->inp_flags2 & 
> INP_MBUF_ACKCMP)) {
> +             if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) {
>                       /* You can't turn it off once its on! */
>                       error = EINVAL;
>               } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
>                       rack->r_use_cmp_ack = 1;
>                       rack->r_mbuf_queue = 1;
> -                     inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
> +                     tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
>               }
>               if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
> -                     inp->inp_flags2 |= INP_MBUF_ACKCMP;
> +                     tp->t_flags2 |= TF2_MBUF_ACKCMP;
>               break;
>       case TCP_SHARED_CWND_TIME_LIMIT:
>               RACK_OPTS_INC(tcp_lscwnd);
> @@ -22937,9 +22937,9 @@ rack_process_option(struct tcpcb *tp, struct
> tcp_rack *rack, int sopt_name,
>               else
>                       rack->r_mbuf_queue = 0;
>               if  (rack->r_mbuf_queue || rack->rc_always_pace || 
> rack->r_use_cmp_ack)
> -                     inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
> +                     tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
>               else
> -                     inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
> +                     tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
>               break;
>       case TCP_RACK_NONRXT_CFG_RATE:
>               RACK_OPTS_INC(tcp_rack_cfg_rate);
> @@ -23022,9 +23022,9 @@ rack_process_option(struct tcpcb *tp, struct
> tcp_rack *rack, int sopt_name,
>                       }
>               }
>               if  (rack->r_mbuf_queue || rack->rc_always_pace || 
> rack->r_use_cmp_ack)
> -                     inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
> +                     tp->t_flags2 |= TF2_SUPPORTS_MBUFQ;
>               else
> -                     inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
> +                     tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ;
>               /* A rate may be set irate or other, if so set seg size */
>               rack_update_seg(rack);
>               break;
> diff --git a/sys/netinet/tcp_stacks/rack_bbr_common.c
> b/sys/netinet/tcp_stacks/rack_bbr_common.c
> index 91bf32159004..625825fc7ed6 100644
> --- a/sys/netinet/tcp_stacks/rack_bbr_common.c
> +++ b/sys/netinet/tcp_stacks/rack_bbr_common.c
> @@ -445,8 +445,8 @@ skip_vnet:
>                        * been compressed. We assert the inp has
>                        * the flag set to enable this!
>                        */
> -                     KASSERT((inp->inp_flags2 & INP_MBUF_ACKCMP),
> -                         ("tp:%p inp:%p no INP_MBUF_ACKCMP flags?", tp, 
> inp));
> +                     KASSERT((tp->t_flags2 & TF2_MBUF_ACKCMP),
> +                         ("tp:%p no TF2_MBUF_ACKCMP flags?", tp));
>                       tlen = 0;
>                       drop_hdrlen = 0;
>                       th = NULL;
> diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
> index 40dd9b7f3aa9..a8a896b7ebe6 100644
> --- a/sys/netinet/tcp_subr.c
> +++ b/sys/netinet/tcp_subr.c
> @@ -1073,7 +1073,7 @@ tcp_default_fb_init(struct tcpcb *tp, void **ptr)
>
>       /* Make sure we get no interesting mbuf queuing behavior */
>       /* All mbuf queue/ack compress flags should be off */
> -     tcp_lro_features_off(tptoinpcb(tp));
> +     tcp_lro_features_off(tp);
>
>       /* Cancel the GP measurement in progress */
>       tp->t_flags &= ~TF_GPUTINPROG;
> @@ -2270,7 +2270,7 @@ tcp_newtcpcb(struct inpcb *inp)
>               V_tcp_mssdflt;
>
>       /* All mbuf queue/ack compress flags should be off */
> -     tcp_lro_features_off(tptoinpcb(tp));
> +     tcp_lro_features_off(tp);
>
>       callout_init_rw(&tp->t_callout, &inp->inp_lock, CALLOUT_RETURNUNLOCKED);
>       for (int i = 0; i < TT_N; i++)
> @@ -4051,14 +4051,14 @@ tcp_default_switch_failed(struct tcpcb *tp)
>       /*
>        * If a switch fails we only need to
>        * care about two things:
> -      * a) The inp_flags2
> +      * a) The t_flags2
>        * and
>        * b) The timer granularity.
>        * Timeouts, at least for now, don't use the
>        * old callout system in the other stacks so
>        * those are hopefully safe.
>        */
> -     tcp_lro_features_off(tptoinpcb(tp));
> +     tcp_lro_features_off(tp);
>       tcp_change_time_units(tp, TCP_TMR_GRANULARITY_TICKS);
>  }
>
> @@ -4236,15 +4236,15 @@ tcp_handle_orphaned_packets(struct tcpcb *tp)
>       /*
>        * Called when a stack switch is occuring from the fini()
>        * of the old stack. We assue the init() as already been
> -      * run of the new stack and it has set the inp_flags2 to
> +      * run of the new stack and it has set the t_flags2 to
>        * what it supports. This function will then deal with any
>        * differences i.e. cleanup packets that maybe queued that
>        * the newstack does not support.
>        */
>
> -     if (tptoinpcb(tp)->inp_flags2 & INP_MBUF_L_ACKS)
> +     if (tp->t_flags2 & TF2_MBUF_L_ACKS)
>               return;
> -     if ((tptoinpcb(tp)->inp_flags2 & INP_SUPPORTS_MBUFQ) == 0 &&
> +     if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) == 0 &&
>           !STAILQ_EMPTY(&tp->t_inqueue)) {
>               /*
>                * It is unsafe to process the packets since a
> diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
> index 5f4bae92974e..54a347c2669e 100644
> --- a/sys/netinet/tcp_syncache.c
> +++ b/sys/netinet/tcp_syncache.c
> @@ -1724,7 +1724,7 @@ skip_alloc:
>       if (ltflags & TF_NOOPT)
>               sc->sc_flags |= SCF_NOOPT;
>       /* ECN Handshake */
> -     if (V_tcp_do_ecn && (inp->inp_flags2 & INP_CANNOT_DO_ECN) == 0)
> +     if (V_tcp_do_ecn && (tp->t_flags2 & TF2_CANNOT_DO_ECN) == 0)
>               sc->sc_flags |= tcp_ecn_syncache_add(tcp_get_flags(th), iptos);
>
>       if (V_tcp_syncookies)
> diff --git a/sys/netinet/tcp_var.h b/sys/netinet/tcp_var.h
> index a3016a143b93..a8bd6f8732cc 100644
> --- a/sys/netinet/tcp_var.h
> +++ b/sys/netinet/tcp_var.h
> @@ -677,16 +677,6 @@ tcp_output(struct tcpcb *tp)
>       return (rv);
>  }
>
> -static inline void
> -tcp_lro_features_off(struct inpcb *inp)
> -{
> -     inp->inp_flags2 &= ~(INP_SUPPORTS_MBUFQ|
> -         INP_MBUF_QUEUE_READY|
> -         INP_DONT_SACK_QUEUE|
> -         INP_MBUF_ACKCMP|
> -         INP_MBUF_L_ACKS);
> -}
> -
>  /*
>   * tcp_output_unlock()
>   * Always returns unlocked, handles drop request from advanced stacks.
> @@ -853,6 +843,12 @@ tcp_packets_this_ack(struct tcpcb *tp, tcp_seq ack)
>  #define      TF2_ECN_USE_ECT1        0x00000800 /* Use ECT(1) marking on 
> session */
>  #define TF2_TCP_ACCOUNTING   0x00001000 /* Do TCP accounting */
>  #define      TF2_HPTS_CALLS          0x00002000 /* tcp_output() called via 
> HPTS */
> +#define      TF2_MBUF_L_ACKS         0x00004000 /* large mbufs for ack 
> compression */
> +#define      TF2_MBUF_ACKCMP         0x00008000 /* mbuf ack compression ok */
> +#define      TF2_SUPPORTS_MBUFQ      0x00010000 /* Supports the mbuf queue 
> method */
> +#define      TF2_MBUF_QUEUE_READY    0x00020000 /* Inputs can be queued */
> +#define      TF2_DONT_SACK_QUEUE     0x00040000 /* Don't wake on sack */
> +#define      TF2_CANNOT_DO_ECN       0x00080000 /* The stack does not do ECN 
> */
>
>  /*
>   * Structure to hold TCP options that are only used during segment
> @@ -1543,6 +1539,15 @@ tcp_http_alloc_req_full(struct tcpcb *tp, struct
> http_req *req, uint64_t ts, int
>  int tcp_do_ack_accounting(struct tcpcb *tp, struct tcphdr *th, struct
> tcpopt *to, uint32_t tiwin, int mss);
>  #endif
>
> +static inline void
> +tcp_lro_features_off(struct tcpcb *tp)
> +{
> +     tp->t_flags2 &= ~(TF2_SUPPORTS_MBUFQ|
> +         TF2_MBUF_QUEUE_READY|
> +         TF2_DONT_SACK_QUEUE|
> +         TF2_MBUF_ACKCMP|
> +         TF2_MBUF_L_ACKS);
> +}
>
>  static inline void
>  tcp_fields_to_host(struct tcphdr *th)
>


-- 
Mateusz Guzik <mjguzik gmail.com>


Reply via email to