The only user of simple lock on arm is the pxa2x0_do_pending()
code used by zaurus.
Switch to using a normal static variable like i80321intc_do_pending()
does for armish and remove simple lock.
I've built armv7/armish/zaurus kernels on armv7. I've no zaurus hw to
test against.
Index: include/lock.h
===================================================================
RCS file: /cvs/src/sys/arch/arm/include/lock.h,v
retrieving revision 1.5
diff -u -p -r1.5 lock.h
--- include/lock.h 29 Mar 2014 18:09:28 -0000 1.5
+++ include/lock.h 24 Mar 2016 08:39:37 -0000
@@ -1,90 +1,8 @@
/* $OpenBSD: lock.h,v 1.5 2014/03/29 18:09:28 guenther Exp $ */
-/* $NetBSD: lock.h,v 1.3 2002/10/07 23:19:49 bjh21 Exp $ */
-/*-
- * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Jason R. Thorpe.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Machine-dependent spin lock operations.
- *
- * NOTE: The SWP insn used here is available only on ARM architecture
- * version 3 and later (as well as 2a). What we are going to do is
- * expect that the kernel will trap and emulate the insn. That will
- * be slow, but give us the atomicity that we need.
- */
+/* public domain */
#ifndef _ARM_LOCK_H_
#define _ARM_LOCK_H_
-
-#include <arm/atomic.h>
-
-typedef volatile int __cpu_simple_lock_t;
-
-#define __SIMPLELOCK_LOCKED 1
-#define __SIMPLELOCK_UNLOCKED 0
-
-static __inline int
-__swp(int __val, volatile int *__ptr)
-{
-
- __asm volatile("swp %0, %1, [%2]"
- : "=r" (__val) : "r" (__val), "r" (__ptr) : "memory");
- return __val;
-}
-
-static __inline void __attribute__((__unused__))
-__cpu_simple_lock_init(__cpu_simple_lock_t *alp)
-{
-
- *alp = __SIMPLELOCK_UNLOCKED;
-}
-
-static __inline void __attribute__((__unused__))
-__cpu_simple_lock(__cpu_simple_lock_t *alp)
-{
-
- while (__swp(__SIMPLELOCK_LOCKED, alp) != __SIMPLELOCK_UNLOCKED)
- continue;
-}
-
-static __inline int __attribute__((__unused__))
-__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
-{
-
- return (__swp(__SIMPLELOCK_LOCKED, alp) == __SIMPLELOCK_UNLOCKED);
-}
-
-static __inline void __attribute__((__unused__))
-__cpu_simple_unlock(__cpu_simple_lock_t *alp)
-{
-
- *alp = __SIMPLELOCK_UNLOCKED;
-}
#endif /* _ARM_LOCK_H_ */
Index: xscale/pxa2x0_intr.c
===================================================================
RCS file: /cvs/src/sys/arch/arm/xscale/pxa2x0_intr.c,v
retrieving revision 1.27
diff -u -p -r1.27 pxa2x0_intr.c
--- xscale/pxa2x0_intr.c 31 Jan 2016 00:14:50 -0000 1.27
+++ xscale/pxa2x0_intr.c 24 Mar 2016 08:39:37 -0000
@@ -48,7 +48,6 @@
#include <machine/bus.h>
#include <machine/intr.h>
-#include <machine/lock.h>
#include <arm/xscale/pxa2x0reg.h>
#include <arm/xscale/pxa2x0var.h>
@@ -448,17 +447,18 @@ pxa2x0_init_interrupt_masks(void)
void
pxa2x0_do_pending(void)
{
- static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
+ static int processing = 0;
int oldirqstate, spl_save;
- if (__cpu_simple_lock_try(&processing) == 0)
- return;
+ oldirqstate = disable_interrupts(PSR_I);
spl_save = current_spl_level;
- oldirqstate = disable_interrupts(PSR_I);
+ if (processing == 1) {
+ restore_interrupts(oldirqstate);
+ return;
+ }
-#if 1
#define DO_SOFTINT(si,ipl)
\
if ((softint_pending & pxa2x0_imask[current_spl_level]) & \
SI_TO_IRQBIT(si)) { \
@@ -477,23 +477,10 @@ pxa2x0_do_pending(void)
DO_SOFTINT(SI_SOFTCLOCK, IPL_SOFTCLOCK);
DO_SOFTINT(SI_SOFT, IPL_SOFT);
} while( softint_pending & pxa2x0_imask[current_spl_level] );
-#else
- while( (si = find_first_bit(softint_pending &
pxa2x0_imask[current_spl_level])) >= 0 ){
- softint_pending &= ~SI_TO_IRQBIT(si);
- if (current_spl_level < ipl)
- pxa2x0_setipl(ipl);
- restore_interrupts(oldirqstate);
- softintr_dispatch(si);
- oldirqstate = disable_interrupts(PSR_I);
- pxa2x0_setipl(spl_save);
- }
-#endif
-
- __cpu_simple_unlock(&processing);
+ processing = 0;
restore_interrupts(oldirqstate);
}
-
#undef splx
void