Re: breaking the crunchgen logic into a share/mk file

2010-11-20 Thread John Baldwin
On Tuesday, November 16, 2010 8:45:08 am Andrey V. Elsukov wrote:
> On 16.11.2010 16:29, John Baldwin wrote:
> > Err, are there no longer hard links to all of the frontends for a given 
> > crunch?  If so, that is a problem as it will make rescue much harder to use.
> 
> Yes, probably this patch is not needed and it should be fixed somewhere in
> makefiles. But currently rescue does not have any hardlinks:
> http://pub.allbsd.org/FreeBSD-snapshots/i386-i386/9.0-HEAD-20101116-JPSNAP/cdrom/livefs/rescue/
> 
> And what is was before:
> http://pub.allbsd.org/FreeBSD-snapshots/i386-i386/9.0-HEAD-20101112-JPSNAP/cdrom/livefs/rescue/

That definitely needs to be fixed.

-- 
John Baldwin
___
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"


flowtable_cleaner/flowtable_flush livelock

2010-11-20 Thread Mikolaj Golub
Hi, 

Running something like below under VirtualBox (CURRENT, VIMAGE)

echo "creating jail and iface"
jail -c name="${JAIL}" vnet persist
ifconfig "${EPAIR}" create
ifconfig "${EPAIR}b" vnet "${JAIL}"
sleep 1
echo "destroying jail and iface"
# below is a race
jail -r "${JAIL}" &
ifconfig "${EPAIR}a" destroy
wait

I will frequently get a livelock (it might also crash, but that may be a
different story) between these 3 threads in flowtable code:

 1308  1183  1183 0  D+  flowclea 0xc101a314 ifconfig
 1307  1183  1183 0  R+  jail
   18 0 0 0  RL  [flowcleaner]

Thread 100075 at 0xc2685b40:
 proc (pid 1308): 0xc28e4aa0
 name: ifconfig
 stack: 0xc8742000-0xc8743fff
 flags: 0x20804  pflags: 0
 state: INHIBITED: {SLEEPING}
 wmesg: flowcleanwait  wchan: 0xc101a314
 priority: 138
 container lock: sleepq chain (0xc0ebee0c)

Tracing command ifconfig pid 1308 tid 100075 td 0xc2685b40
sched_switch(c2685b40,0,104,191,4b654535,...) at sched_switch+0x3d3
mi_switch(104,0,c0d299f4,1f3,0,...) at mi_switch+0x200
sleepq_switch(c2685b40,0,c0d299f4,268,c2685b40,...) at sleepq_switch+0x15f
sleepq_wait(c101a314,0,c87439c0,1,0,...) at sleepq_wait+0x63
_cv_wait(c101a314,c101a31c,c87439f8,17,0,...) at _cv_wait+0x243
flowtable_flush(0,c1ef,c0d353e4,38e,40,...) at flowtable_flush+0x90
if_detach_internal(c8743a68,c0999d7d,c1ef,c1ef,c8743aa4,...) at 
if_detach_internal+0x43d
if_detach(c1ef) at if_detach+0x10
ether_ifdetach(c1ef,1,c8743aa4,c099309e,c0d35665,...) at ether_ifdetach+0x3d
epair_clone_destroy(c2963c40,c1ef,c0d359fd,105,c2963c70,...) at 
epair_clone_destroy+0x6b
if_clone_destroyif(c2963c40,c1ef,c0d359fd,e0,c08cfc1d,...) at 
if_clone_destroyif+0x147
if_clone_destroy(c1fee8e0,19c,3,c2685b40,c0d52bad,...) at if_clone_destroy+0x147
ifioctl(c2564680,80206979,c1fee8e0,c2685b40,c08a6a31,...) at ifioctl+0x621
soo_ioctl(c1ff5d90,80206979,c1fee8e0,c1d83200,c2685b40,...) at soo_ioctl+0x427
kern_ioctl(c2685b40,3,80206979,c1fee8e0,743cec,...) at kern_ioctl+0x20d
ioctl(c2685b40,c8743cec,c2685b40,c8743d28,c0d2a23d,...) at ioctl+0x134
syscallenter(c2685b40,c8743ce4,c8743ce4,0,c0eb0c40,...) at syscallenter+0x2c3
syscall(c8743d28) at syscall+0x4f
Xint0x80_syscall() at Xint0x80_syscall+0x21
--- syscall (54, FreeBSD ELF32, ioctl), eip = 0x281c73f3, esp = 0xbfbfe46c, ebp 
= 0xbfbfe488 ---

Thread 100050 at 0xc20032d0:
 proc (pid 1307): 0xc267f7f8
 name: jail
 stack: 0xc43fd000-0xc43fefff
 flags: 0x4  pflags: 0
 state: RUNQ
 priority: 137
 container lock: sched lock 0 (0xc0eb0c40)

Tracing pid 1307 tid 100050 td 0xc20032d0
sched_switch(c20032d0,0,602,18c,4b69c645,...) at sched_switch+0x3d3
mi_switch(602,0,c0d25710,cd,0,...) at mi_switch+0x200
critical_exit(c0e6a98c,1,c0e6a98c,c43fea20,0,...) at critical_exit+0xa8
intr_event_handle(c1dbfe80,c43fea20,ff6b36c5,c20032d0,1,...) at 
intr_event_handle+0x115
intr_execute_handlers(c0e6a98c,c43fea20,c20032d0,c101a314,c43fea64,...) at 
intr_execute_handlers+
0x49
atpic_handle_intr(1,c43fea20) at atpic_handle_intr+0x7c
Xatpic_intr1() at Xatpic_intr1+0x22
--- interrupt, eip = 0xc0c30cfb, esp = 0xc43fea60, ebp = 0xc43fea64 ---
spinlock_exit(c0eb0c40,4,c0d236ac,109,c091cf25,39248) at spinlock_exit+0x2b
_mtx_unlock_spin_flags(c0eb0c40,0,c0d299f4,26a) at _mtx_unlock_spin_flags+0x12d
sleepq_wait(c101a314,0,c43feadc,1,0,...) at sleepq_wait+0x85
_cv_wait(c101a314,c101a31c,c43feb14,17,0,...) at _cv_wait+0x243
flowtable_flush(0,c1ef0400,c0d353e4,38e,c1d42dc0,...) at flowtable_flush+0x90
if_detach_internal(8,c0d37941,117,0,c0d204c3,...) at if_detach_internal+0x43d
if_vmove(c1ef0400,c1d720c0,117,115,0,...) at if_vmove+0x1b
vnet_destroy(c1d5d260,c0d204c3,9c6,9b8,17,...) at vnet_destroy+0x163
prison_deref(c08b7d2b,c253c028,0,c0d204c3,2,...) at prison_deref+0x3a2
prison_remove_one(c0e20060,1,c0d204c3,83f,c0c3d6cf,...) at 
prison_remove_one+0x53
jail_remove(c20032d0,c43fecec,c20032d0,c43fed28,c0d2a23d,...) at 
jail_remove+0x266
syscallenter(c20032d0,c43fece4,c43fece4,0,c0eb0c40,...) at syscallenter+0x2c3
syscall(c43fed28) at syscall+0x4f
Xint0x80_syscall() at Xint0x80_syscall+0x21
--- syscall (508, FreeBSD ELF32, jail_remove), eip = 0x280efa1b, esp = 
0xbfbfebdc, ebp = 0xbfbfeca8 ---

Thread 100037 at 0xc1fc6870:
 proc (pid 18): 0xc1fd47f8
 name: flowcleaner
 stack: 0xc43c6000-0xc43c7fff
 flags: 0x4  pflags: 0x20
 state: RUNQ
 priority: 160
 container lock: sched lock 0 (0xc0eb0c40)

Tracing pid 18 tid 100037 td 0xc1fc6870
sched_switch(c1fc6870,0,104,191,3d6a1775,...) at sched_switch+0x3d3
mi_switch(104,0,c0d299f4,1f3,0,...) at mi_switch+0x200
sleepq_switch(c1fc6870,0,c0d299f4,28b,c1fc6870,...) at sleepq_switch+0x15f
sleepq_timedwait(c101a314,0,c43c7ca0,1,0,...) at sleepq_timedwait+0x6b
_cv_timedwait(c101a314,c101a31c,7d0,620,c1fc6870,...) at _cv_timedwait+0x252
flowtable_cleaner(0,c43c7d28,c0d20004,33b,c1fd47f8,...) at 
flowtable_cleaner+0x255
fork_exit(c0990630,0,c43c7d28) at fork_ex

Re: flowtable_cleaner/flowtable_flush livelock

2010-11-20 Thread Bjoern A. Zeeb

On Sat, 20 Nov 2010, Mikolaj Golub wrote:

Hi,


Running something like below under VirtualBox (CURRENT, VIMAGE)

...

So the question is who is guilty in this situation? ULE? flowtable? Or
jail/epair, which should not allow simultaneous entering of flowtable_flush?


In general: you for running an experimental feature;-)

Seriously, flowtable has a number of different problems:
1) you will leak neighbor entries still
2) I have patches for VIMAGE but if you are running VIMAGE you are
   advised not to run flowtable.
3) FLOWTABLE should go from GENERIC but that's a different story.

I think net@ would have been a better initial place but since this
seems to be a problem when interacting with VIMAGE
freebsd-virtualization might be better.

What you could try is:
http://people.freebsd.org/~bz/20100216-10-ft-cv.diff

/bz

--
Bjoern A. Zeeb  Welcome a new stage of life.
 Going to jail sucks --  All my daemons like it!
  http://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/jails.html
___
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"


Re: flowtable_cleaner/flowtable_flush livelock

2010-11-20 Thread Mikolaj Golub

On Sat, 20 Nov 2010 17:03:13 + (UTC) Bjoern A. Zeeb wrote:

 BAZ> On Sat, 20 Nov 2010, Mikolaj Golub wrote:

 BAZ> Hi,

 >> Running something like below under VirtualBox (CURRENT, VIMAGE)
 BAZ> ...
 >> So the question is who is guilty in this situation? ULE? flowtable? Or
 >> jail/epair, which should not allow simultaneous entering of flowtable_flush?

 BAZ> In general: you for running an experimental feature;-)

I like experimenting :-)

 BAZ> What you could try is:
 BAZ> http://people.freebsd.org/~bz/20100216-10-ft-cv.diff

I will. Thanks.

-- 
Mikolaj Golub
___
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"


Re: flowtable_cleaner/flowtable_flush livelock

2010-11-20 Thread Mikolaj Golub

On Sat, 20 Nov 2010 17:03:13 + (UTC) Bjoern A. Zeeb wrote:

 BAZ> I think net@ would have been a better initial place but since this
 BAZ> seems to be a problem when interacting with VIMAGE
 BAZ> freebsd-virtualization might be better.

 BAZ> What you could try is:
 BAZ> http://people.freebsd.org/~bz/20100216-10-ft-cv.diff

Ah, I have recalled I had already saw this patch but did not understand what
the problem was that it fixed, thus did not associated it with my case
(actually, I thought you had committed all these patches to the tree long time
ago and I was running the kernel with them already :-).

BTW, the patch needs updating: in the current flow_full() wakes up flowcleaner
too, and flowcleaner sleeps for flowclean_freq instead of 10*hz (see the
attached patch).

With the patch I can't reproduce the lock. Only the crash I mentioned in my
first letter is observed:

(kgdb) bt
#0  doadump () at pcpu.h:231
#1  0xc04f2789 in db_fncall (dummy1=1, dummy2=0, dummy3=-1056677760, 
dummy4=0xc8731860 "")
at /usr/src/sys/ddb/db_command.c:548
#2  0xc04f2b81 in db_command (last_cmdp=0xc0e79f7c, cmd_table=0x0, dopager=1)
at /usr/src/sys/ddb/db_command.c:445
#3  0xc04f2cda in db_command_loop () at /usr/src/sys/ddb/db_command.c:498
#4  0xc04f4bfd in db_trap (type=12, code=0) at /usr/src/sys/ddb/db_main.c:229
#5  0xc09119be in kdb_trap (type=12, code=0, tf=0xc8731a94) at 
/usr/src/sys/kern/subr_kdb.c:546
#6  0xc0c3da8f in trap_fatal (frame=0xc8731a94, eva=3735929074)
at /usr/src/sys/i386/i386/trap.c:970
#7  0xc0c3e0be in trap (frame=0xc8731a94) at /usr/src/sys/i386/i386/trap.c:361
#8  0xc0c272dc in calltrap () at /usr/src/sys/i386/i386/exception.s:168
#9  0xc0988415 in strncmp (s1=0xc1fee4e0 "epair20b", 
s2=0xdeadc0f2 , n=16) at 
/usr/src/sys/libkern/strncmp.c:44
#10 0xc09929d7 in ifunit_ref (name=0xc1fee4e0 "epair20b") at 
/usr/src/sys/net/if.c:1986
#11 0xc0996982 in ifioctl (so=0xc25649c0, cmd=3223349536, data=0xc1fee4e0 
"epair20b", 
td=0xc286c000) at /usr/src/sys/net/if.c:2475
#12 0xc09307f7 in soo_ioctl (fp=0xc1ff5af0, cmd=3223349536, data=0xc1fee4e0, 
active_cred=0xc1d83e80, td=0xc286c000) at /usr/src/sys/kern/sys_socket.c:212
#13 0xc092a61d in kern_ioctl (td=0xc286c000, fd=3, com=3223349536, 
data=0xc1fee4e0 "epair20b")
at file.h:254
#14 0xc092a7a4 in ioctl (td=0xc286c000, uap=0xc8731cec) at 
/usr/src/sys/kern/sys_generic.c:679
#15 0xc091f303 in syscallenter (td=0xc286c000, sa=0xc8731ce4)
at /usr/src/sys/kern/subr_trap.c:318
#16 0xc0c3dd2f in syscall (frame=0xc8731d28) at 
/usr/src/sys/i386/i386/trap.c:1094
#17 0xc0c27371 in Xint0x80_syscall () at /usr/src/sys/i386/i386/exception.s:266
#18 0x0033 in ?? ()
Previous frame inner to this frame (corrupt stack?)
(kgdb) fr 10
#10 0xc09929d7 in ifunit_ref (name=0xc1fee4e0 "epair20b") at 
/usr/src/sys/net/if.c:1986
1986if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 &&
(kgdb) p ifp
$1 = (struct ifnet *) 0xdeadc0de

I might want to report it to freebsd-virtualization unless I find that this is
a known issue.

-- 
Mikolaj Golub

Index: sys/net/flowtable.c
===
--- sys/net/flowtable.c	(revision 215574)
+++ sys/net/flowtable.c	(working copy)
@@ -195,7 +195,8 @@ STATIC_VNET_DEFINE(uma_zone_t, flow_ipv6_zone);
 #define	V_flow_ipv6_zone	VNET(flow_ipv6_zone)
 
 
-static struct cv 	flowclean_cv;
+static struct cv 	flowclean_f_cv;
+static struct cv 	flowclean_c_cv;
 static struct mtx	flowclean_lock;
 static uint32_t		flowclean_cycles;
 static uint32_t		flowclean_freq;
@@ -951,7 +952,7 @@ flow_full(struct flowtable *ft)
 		if ((ft->ft_flags & FL_HASH_ALL) == 0)
 			ft->ft_udp_idle = ft->ft_fin_wait_idle =
 			ft->ft_syn_idle = ft->ft_tcp_idle = 5;
-		cv_broadcast(&flowclean_cv);
+		cv_broadcast(&flowclean_c_cv);
 	} else if (!full && ft->ft_full) {
 		flowclean_freq = 20*hz;
 		if ((ft->ft_flags & FL_HASH_ALL) == 0)
@@ -1560,14 +1561,14 @@ flowtable_cleaner(void)
 		}
 		VNET_LIST_RUNLOCK();
 
-		flowclean_cycles++;
 		/*
 		 * The 10 second interval between cleaning checks
 		 * is arbitrary
 		 */
 		mtx_lock(&flowclean_lock);
-		cv_broadcast(&flowclean_cv);
-		cv_timedwait(&flowclean_cv, &flowclean_lock, flowclean_freq);
+		flowclean_cycles++;
+		cv_broadcast(&flowclean_f_cv);
+		cv_timedwait(&flowclean_c_cv, &flowclean_lock, 10*hz);
 		mtx_unlock(&flowclean_lock);
 	}
 }
@@ -1580,8 +1581,8 @@ flowtable_flush(void *unused __unused)
 	mtx_lock(&flowclean_lock);
 	start = flowclean_cycles;
 	while (start == flowclean_cycles) {
-		cv_broadcast(&flowclean_cv);
-		cv_wait(&flowclean_cv, &flowclean_lock);
+		cv_broadcast(&flowclean_c_cv);
+		cv_wait(&flowclean_f_cv, &flowclean_lock);
 	}
 	mtx_unlock(&flowclean_lock);
 }
@@ -1613,7 +1614,8 @@ static void
 flowtable_init(const void *unused __unused)
 {
 
-	cv_init(&flowclean_cv, "flowcleanwait");
+	cv_init(&flowclean_c_cv, "c_flowcleanwait");
+	cv_init(&flowclean_f_cv, "f_flowcleanwait");
 	mtx_init(&flowclean

Re: flowtable_cleaner/flowtable_flush livelock

2010-11-20 Thread Bjoern A. Zeeb

On Sat, 20 Nov 2010, Mikolaj Golub wrote:

Hey,


On Sat, 20 Nov 2010 17:03:13 + (UTC) Bjoern A. Zeeb wrote:

BAZ> I think net@ would have been a better initial place but since this
BAZ> seems to be a problem when interacting with VIMAGE
BAZ> freebsd-virtualization might be better.

BAZ> What you could try is:
BAZ> http://people.freebsd.org/~bz/20100216-10-ft-cv.diff

Ah, I have recalled I had already saw this patch but did not understand what
the problem was that it fixed, thus did not associated it with my case
(actually, I thought you had committed all these patches to the tree long time
ago and I was running the kernel with them already :-).

BTW, the patch needs updating: in the current flow_full() wakes up flowcleaner
too, and flowcleaner sleeps for flowclean_freq instead of 10*hz (see the
attached patch).


For sure it does; as you can see form the date in the file name, that
patch was from the beginning of the year.


With the patch I can't reproduce the lock. Only the crash I mentioned in my
first letter is observed:


Hmm, I guess I should get the updated version comitted then.


How do you reproduce the crash?  Is it just another ifioctl race as
from kern/146250?

/bz

--
Bjoern A. Zeeb  Welcome a new stage of life.
 Going to jail sucks --  All my daemons like it!
  http://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/jails.html
___
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"


Quick i386 question...

2010-11-20 Thread Sergio Andrés Gómez del Real
If received an interrupt while in protected-mode and paging enabled,
is linear address from IDT stored at the idtr translated using the
paging-hierarchy structures?
I have looked at the interrupt/exception chapter in the corresponding
Intel manual but can't find the answer. Maybe I overlooked.
Thanks.
___
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"


Re: flowtable_cleaner/flowtable_flush livelock

2010-11-20 Thread Mikolaj Golub

On Sat, 20 Nov 2010 20:04:35 + (UTC) Bjoern A. Zeeb wrote:

 BAZ> How do you reproduce the crash?  Is it just another ifioctl race as
 BAZ> from kern/146250?

Using the same script I posted in my first mail, removing a jail and epair
interface simultaneously:

  ifconfig epair0b vnet myjail
  jail -r myjail &
  ifconfig epair0a destroy

For me it loooks like other thread is destroying interface improperly in that
time. One time I saw crash in another thread:

(kgdb) bt
#0  doadump () at pcpu.h:231
#1  0xc04f2439 in db_fncall (dummy1=1, dummy2=0, dummy3=-1056689728, 
dummy4=0xc2ba5984 "")
at /usr/src/sys/ddb/db_command.c:548
#2  0xc04f2831 in db_command (last_cmdp=0xc0e75cfc, cmd_table=0x0, dopager=1)
at /usr/src/sys/ddb/db_command.c:445
#3  0xc04f298a in db_command_loop () at /usr/src/sys/ddb/db_command.c:498
#4  0xc04f48ad in db_trap (type=12, code=0) at /usr/src/sys/ddb/db_main.c:229
#5  0xc090face in kdb_trap (type=12, code=0, tf=0xc2ba5bf8) at 
/usr/src/sys/kern/subr_kdb.c:546
#6  0xc0c3d2bf in trap_fatal (frame=0xc2ba5bf8, eva=3735929066)
at /usr/src/sys/i386/i386/trap.c:971
#7  0xc0c3d4f0 in trap_pfault (frame=0xc2ba5bf8, usermode=0, eva=3735929066)
at /usr/src/sys/i386/i386/trap.c:893
#8  0xc0c3dca5 in trap (frame=0xc2ba5bf8) at /usr/src/sys/i386/i386/trap.c:568
#9  0xc0c24a9c in calltrap () at /usr/src/sys/i386/i386/exception.s:168
#10 0xc09ad219 in vnet_destroy (vnet=0xc2f24240) at /usr/src/sys/net/vnet.c:284
#11 0xc08b5922 in prison_deref (pr=0xc3640800, flags=Variable "flags" is not 
available.
) at /usr/src/sys/kern/kern_jail.c:2506
#12 0xc08b5ab0 in prison_complete (context=0xc3640800, pending=1)
at /usr/src/sys/kern/kern_jail.c:2433
#13 0xc091c87b in taskqueue_run_locked (queue=0xc2dd6d80)
at /usr/src/sys/kern/subr_taskqueue.c:247
#14 0xc091cf17 in taskqueue_thread_loop (arg=0xc0ebb8e8)
at /usr/src/sys/kern/subr_taskqueue.c:379
#15 0xc08af558 in fork_exit (callout=0xc091ceb0 , 
arg=0xc0ebb8e8, 
frame=0xc2ba5d28) at /usr/src/sys/kern/kern_fork.c:835
#16 0xc0c24b44 in fork_trampoline () at /usr/src/sys/i386/i386/exception.s:275
(kgdb) fr 10
#10 0xc09ad219 in vnet_destroy (vnet=0xc2f24240) at /usr/src/sys/net/vnet.c:284
284 TAILQ_FOREACH_SAFE(ifp, &V_ifnet, if_link, nifp) {
(kgdb) list
279 VNET_LIST_WUNLOCK();
280
281 CURVNET_SET_QUIET(vnet);
282
283 /* Return all inherited interfaces to their parent vnets. */
284 TAILQ_FOREACH_SAFE(ifp, &V_ifnet, if_link, nifp) {
285 if (ifp->if_home_vnet != ifp->if_vnet)
286 if_vmove(ifp, ifp->if_home_vnet);
287 }
288
(kgdb) p ifp
$1 = (struct ifnet *) 0xdeadc0de

Doesn't this need some lock protection? I tried the attached patch, but still
observed crashes in ifioctl I posted earlier.  

-- 
Mikolaj Golub

Index: sys/net/vnet.c
===
--- sys/net/vnet.c	(revision 215576)
+++ sys/net/vnet.c	(working copy)
@@ -268,7 +268,7 @@ vnet_alloc(void)
 void
 vnet_destroy(struct vnet *vnet)
 {
-	struct ifnet *ifp, *nifp;
+	struct ifnet *ifp;
 
 	SDT_PROBE2(vnet, functions, vnet_destroy, entry, __LINE__, vnet);
 	KASSERT(vnet->vnet_sockcnt == 0,
@@ -281,10 +281,20 @@ vnet_destroy(struct vnet *vnet)
 	CURVNET_SET_QUIET(vnet);
 
 	/* Return all inherited interfaces to their parent vnets. */
-	TAILQ_FOREACH_SAFE(ifp, &V_ifnet, if_link, nifp) {
-		if (ifp->if_home_vnet != ifp->if_vnet)
+	do {
+		IFNET_RLOCK();
+		TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
+			if (ifp->if_home_vnet != ifp->if_vnet) {
+if_ref(ifp);
+break;
+			}
+		}
+		IFNET_RUNLOCK();
+		if (ifp != NULL) {
 			if_vmove(ifp, ifp->if_home_vnet);
-	}
+			if_rele(ifp);
+		}
+	} while (ifp != NULL);
 
 	vnet_sysuninit();
 	CURVNET_RESTORE();
___
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"

Best way to determine if an IRQ is present

2010-11-20 Thread Garrett Cooper
Trying to do a complete solution for kern/145385, Andriy has
raised concerns about IRQ mapping to CPUs; while I've have put
together more pieces of the puzzle, I'm a bit confused how I determine
whether or not an IRQ is available for use.
Sure, I could linear probe a series of IRQs, but that would
probably be expensive, and different architectures treat IRQs
differently, so building assumptions based on the fact that IRQ
hierarchy is done in a particular order is probably not the best thing
to do.
I've poked around kern/kern_cpuset.c and kern/kern_intr.c a bit
but I may have missed something important...
Thanks,
-Garrett
___
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"