Author: mav
Date: Sat Jul 27 15:02:19 2013
New Revision: 253706
URL: http://svnweb.freebsd.org/changeset/base/253706

Log:
  Introduce 3 seconds timeout on `graid stop` command (mostly with -f flag).
  Since completion waiting goes in g_event thread, it may cause GEOM deadlock
  if consumer on top (for example, ZFS) uses g_event thread for closing.

Modified:
  head/sys/geom/raid/g_raid.c
  head/sys/geom/raid/g_raid_ctl.c

Modified: head/sys/geom/raid/g_raid.c
==============================================================================
--- head/sys/geom/raid/g_raid.c Sat Jul 27 14:58:23 2013        (r253705)
+++ head/sys/geom/raid/g_raid.c Sat Jul 27 15:02:19 2013        (r253706)
@@ -2171,7 +2171,7 @@ g_raid_destroy_disk(struct g_raid_disk *
 int
 g_raid_destroy(struct g_raid_softc *sc, int how)
 {
-       int opens;
+       int error, opens;
 
        g_topology_assert_not();
        if (sc == NULL)
@@ -2188,11 +2188,13 @@ g_raid_destroy(struct g_raid_softc *sc, 
                        G_RAID_DEBUG1(1, sc,
                            "%d volumes are still open.",
                            opens);
+                       sx_xunlock(&sc->sc_lock);
                        return (EBUSY);
                case G_RAID_DESTROY_DELAYED:
                        G_RAID_DEBUG1(1, sc,
                            "Array will be destroyed on last close.");
                        sc->sc_stopping = G_RAID_DESTROY_DELAYED;
+                       sx_xunlock(&sc->sc_lock);
                        return (EBUSY);
                case G_RAID_DESTROY_HARD:
                        G_RAID_DEBUG1(1, sc,
@@ -2206,9 +2208,9 @@ g_raid_destroy(struct g_raid_softc *sc, 
        /* Wake up worker to let it selfdestruct. */
        g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
        /* Sleep until node destroyed. */
-       sx_sleep(&sc->sc_stopping, &sc->sc_lock,
-           PRIBIO | PDROP, "r:destroy", 0);
-       return (0);
+       error = sx_sleep(&sc->sc_stopping, &sc->sc_lock,
+           PRIBIO | PDROP, "r:destroy", hz * 3);
+       return (error == EWOULDBLOCK ? EBUSY : 0);
 }
 
 static void
@@ -2303,8 +2305,6 @@ g_raid_destroy_geom(struct gctl_req *req
        sx_xlock(&sc->sc_lock);
        g_cancel_event(sc);
        error = g_raid_destroy(gp->softc, G_RAID_DESTROY_SOFT);
-       if (error != 0)
-               sx_xunlock(&sc->sc_lock);
        g_topology_lock();
        return (error);
 }
@@ -2469,7 +2469,6 @@ g_raid_shutdown_post_sync(void *arg, int
        struct g_geom *gp, *gp2;
        struct g_raid_softc *sc;
        struct g_raid_volume *vol;
-       int error;
 
        mp = arg;
        DROP_GIANT();
@@ -2483,9 +2482,7 @@ g_raid_shutdown_post_sync(void *arg, int
                TAILQ_FOREACH(vol, &sc->sc_volumes, v_next)
                        g_raid_clean(vol, -1);
                g_cancel_event(sc);
-               error = g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
-               if (error != 0)
-                       sx_xunlock(&sc->sc_lock);
+               g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
                g_topology_lock();
        }
        g_topology_unlock();

Modified: head/sys/geom/raid/g_raid_ctl.c
==============================================================================
--- head/sys/geom/raid/g_raid_ctl.c     Sat Jul 27 14:58:23 2013        
(r253705)
+++ head/sys/geom/raid/g_raid_ctl.c     Sat Jul 27 15:02:19 2013        
(r253706)
@@ -181,7 +181,7 @@ g_raid_ctl_stop(struct gctl_req *req, st
        sx_xlock(&sc->sc_lock);
        error = g_raid_destroy(sc, how);
        if (error != 0)
-               sx_xunlock(&sc->sc_lock);
+               gctl_error(req, "Array is busy.");
        g_topology_lock();
 }
 
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to