i'll fix this before i enable sec(4) in GENERIC. thanks for reading it.

cheers,
dlg

On Tue, 4 Jul 2023 at 20:04, Vitaliy Makkoveev <m...@openbsd.org> wrote:

> On Tue, Jul 04, 2023 at 03:26:30PM +1000, David Gwynne wrote:
> > tl;dr: this adds sec(4) p2p ip interfaces. Traffic in and out of these
> > interfaces is protected by IPsec security associations (SAs), but
> > there's no flows (security policy database (SPD) entries) associated
> > with these SAs. The policy for using the sec(4) interfaces and their
> > SAs is route-based instead.
> >
>
> Hi,
>
> I don't like this sleep with netlock held. sec_send() handler has the
> netlock provided sleep point, so concurrent sec_ioctl() could grab
> netlock and call sec_down() while sec_send() awaiting netlock release.
> So sec_down() will sleep with netlock held awaiting sec_send() to finish
> and sec_send() will sleep awaiting netlock acquisition. Smells like
> deadlock.
>
> Could you move taskq_del_barrier() and refcnt_finalize() to
> sec_clone_destroy() to prevent this?
>
>
> > +static int
> > +sec_down(struct sec_softc *sc)
> > +{
> > +     struct ifnet *ifp = &sc->sc_if;
> > +     unsigned int idx = stoeplitz_h32(sc->sc_unit) % nitems(sec_map);
> > +
> > +     NET_ASSERT_LOCKED();
> > +
> > +     CLR(ifp->if_flags, IFF_RUNNING);
> > +
> > +     SMR_SLIST_REMOVE_LOCKED(&sec_map[idx], sc, sec_softc, sc_entry);
> > +
> > +     smr_barrier();
> > +     taskq_del_barrier(systq, &sc->sc_send);
> > +
> > +     refcnt_finalize(&sc->sc_refs, "secdown");
> > +
> > +     return (0);
> > +}
> > +
>
> > +static void
> > +sec_send(void *arg)
> > +{
> > +     struct sec_softc *sc = arg;
> > +     struct ifnet *ifp = &sc->sc_if;
> > +     struct ifqueue *ifq = &ifp->if_snd;
> > +     struct tdb *tdb;
> > +     struct mbuf *m;
> > +     int error;
> > +
> > +     if (!ISSET(ifp->if_flags, IFF_RUNNING))
> > +             return;
> > +
> > +     tdb = sec_tdb_get(sc->sc_unit);
> > +     if (tdb == NULL)
> > +             goto purge;
> > +
> > +     NET_LOCK();
> > +     while ((m = ifq_dequeue(ifq)) != NULL) {
> > +             CLR(m->m_flags, M_BCAST|M_MCAST);
> > +
> > +#if NPF > 0
> > +             pf_pkt_addr_changed(m);
> > +#endif
> > +
> > +             error = ipsp_process_packet(m, tdb,
> > +                 m->m_pkthdr.ph_family, /* already tunnelled? */ 0);
> > +             if (error != 0)
> > +                     counters_inc(ifp->if_counters, ifc_oerrors);
> > +     }
> > +     NET_UNLOCK();
> > +
> > +     tdb_unref(tdb);
> > +     return;
> > +
> > +purge:
> > +     counters_add(ifp->if_counters, ifc_oerrors, ifq_purge(ifq));
> > +}
> > +
>
> Nothing denies sec_start() be MP safe. No reason to wait or stop the
> rest of kernel locked code here.
>
> > +static void
> > +sec_start(struct ifnet *ifp)
> > +{
> > +     counters_add(ifp->if_counters, ifc_oerrors,
> ifq_purge(&ifp->if_snd));
> > +}
>

Reply via email to