Alexander Kabaev schrieb:
On Sat, 11 Apr 2009 09:35:28 -0700
Steve Kargl <s...@troutmask.apl.washington.edu> wrote:
On Sat, Apr 11, 2009 at 02:01:01PM +0000, Ed Schouten wrote:
Author: ed
Date: Sat Apr 11 14:01:01 2009
New Revision: 190919
URL: http://svn.freebsd.org/changeset/base/190919
Log:
Simplify in/out functions (for i386 and AMD64).
Remove a hack to generate more efficient code for port numbers
below 0x100, which has been obsolete for at least ten years,
because GCC has an asm constraint to specify that.
Submitted by: Christoph Mallon <christoph mallon gmx de>
I thought Christoph and bde were still hashing out the correctness
of this patch.
http://lists.freebsd.org/pipermail/freebsd-amd64/2009-April/012064.html
--
Steve
The patch is inconsistent in regards to usage of volatile vs.
__volatile even within itself. I think the code is sloppy and was not
ready to be committed yet. Please fix or back out.
Backing it out because of two underscores (!) would be
counterproductive: It removes about 150 lines of hard to read hacks,
which are unnecessary for at least a decade. GCC 2.95, which was
released in 1999 supports the "N" constraint for inline asm. Perhaps
olders do, too, but you cannot get older GCCs from the official site.
Attached is a patch, which replaces all __inline and __volatile in the
touched headers by thir ISO equivalents - again there hasn't been a
reason to use the alternate GCC keywords for at least a decade. Also
"inline" and "volatile" are already used hundreds of times in sys/. The
patch is simply the result of %s/\<__\(inline\|volatile\)\>/\1/.
Christoph
Index: i386/include/cpufunc.h
===================================================================
--- i386/include/cpufunc.h (Revision 190919)
+++ i386/include/cpufunc.h (Arbeitskopie)
@@ -65,84 +65,84 @@
#if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
-static __inline void
+static inline void
breakpoint(void)
{
- __asm __volatile("int $3");
+ __asm volatile("int $3");
}
-static __inline u_int
+static inline u_int
bsfl(u_int mask)
{
u_int result;
- __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
+ __asm volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
return (result);
}
-static __inline u_int
+static inline u_int
bsrl(u_int mask)
{
u_int result;
- __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
+ __asm volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
return (result);
}
-static __inline void
+static inline void
disable_intr(void)
{
#ifdef XEN
xen_cli();
#else
- __asm __volatile("cli" : : : "memory");
+ __asm volatile("cli" : : : "memory");
#endif
}
-static __inline void
+static inline void
do_cpuid(u_int ax, u_int *p)
{
- __asm __volatile("cpuid"
+ __asm volatile("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax));
}
-static __inline void
+static inline void
cpuid_count(u_int ax, u_int cx, u_int *p)
{
- __asm __volatile("cpuid"
+ __asm volatile("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax), "c" (cx));
}
-static __inline void
+static inline void
enable_intr(void)
{
#ifdef XEN
xen_sti();
#else
- __asm __volatile("sti");
+ __asm volatile("sti");
#endif
}
static inline void
cpu_monitor(const void *addr, int extensions, int hints)
{
- __asm __volatile("monitor;"
+ __asm volatile("monitor;"
: :"a" (addr), "c" (extensions), "d"(hints));
}
static inline void
cpu_mwait(int extensions, int hints)
{
- __asm __volatile("mwait;" : :"a" (hints), "c" (extensions));
+ __asm volatile("mwait;" : :"a" (hints), "c" (extensions));
}
#ifdef _KERNEL
#define HAVE_INLINE_FFS
-static __inline int
+static inline int
ffs(int mask)
{
/*
@@ -156,7 +156,7 @@
#define HAVE_INLINE_FLS
-static __inline int
+static inline int
fls(int mask)
{
return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
@@ -164,13 +164,13 @@
#endif /* _KERNEL */
-static __inline void
+static inline void
halt(void)
{
- __asm __volatile("hlt");
+ __asm volatile("hlt");
}
-static __inline u_char
+static inline u_char
inb(u_int port)
{
u_char data;
@@ -179,7 +179,7 @@
return (data);
}
-static __inline u_int
+static inline u_int
inl(u_int port)
{
u_int data;
@@ -188,40 +188,40 @@
return (data);
}
-static __inline void
+static inline void
insb(u_int port, void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; insb"
+ __asm volatile("cld; rep; insb"
: "+D" (addr), "+c" (cnt)
: "d" (port)
: "memory");
}
-static __inline void
+static inline void
insw(u_int port, void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; insw"
+ __asm volatile("cld; rep; insw"
: "+D" (addr), "+c" (cnt)
: "d" (port)
: "memory");
}
-static __inline void
+static inline void
insl(u_int port, void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; insl"
+ __asm volatile("cld; rep; insl"
: "+D" (addr), "+c" (cnt)
: "d" (port)
: "memory");
}
-static __inline void
+static inline void
invd(void)
{
- __asm __volatile("invd");
+ __asm volatile("invd");
}
-static __inline u_short
+static inline u_short
inw(u_int port)
{
u_short data;
@@ -230,125 +230,125 @@
return (data);
}
-static __inline void
+static inline void
outb(u_int port, u_char data)
{
- __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
+ __asm volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
}
-static __inline void
+static inline void
outl(u_int port, u_int data)
{
__asm volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
}
-static __inline void
+static inline void
outsb(u_int port, const void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; outsb"
+ __asm volatile("cld; rep; outsb"
: "+S" (addr), "+c" (cnt)
: "d" (port));
}
-static __inline void
+static inline void
outsw(u_int port, const void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; outsw"
+ __asm volatile("cld; rep; outsw"
: "+S" (addr), "+c" (cnt)
: "d" (port));
}
-static __inline void
+static inline void
outsl(u_int port, const void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; outsl"
+ __asm volatile("cld; rep; outsl"
: "+S" (addr), "+c" (cnt)
: "d" (port));
}
-static __inline void
+static inline void
outw(u_int port, u_short data)
{
__asm volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
}
-static __inline void
+static inline void
ia32_pause(void)
{
- __asm __volatile("pause");
+ __asm volatile("pause");
}
-static __inline u_int
+static inline u_int
read_eflags(void)
{
u_int ef;
- __asm __volatile("pushfl; popl %0" : "=r" (ef));
+ __asm volatile("pushfl; popl %0" : "=r" (ef));
return (ef);
}
-static __inline uint64_t
+static inline uint64_t
rdmsr(u_int msr)
{
uint64_t rv;
- __asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
+ __asm volatile("rdmsr" : "=A" (rv) : "c" (msr));
return (rv);
}
-static __inline uint64_t
+static inline uint64_t
rdpmc(u_int pmc)
{
uint64_t rv;
- __asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc));
+ __asm volatile("rdpmc" : "=A" (rv) : "c" (pmc));
return (rv);
}
-static __inline uint64_t
+static inline uint64_t
rdtsc(void)
{
uint64_t rv;
- __asm __volatile("rdtsc" : "=A" (rv));
+ __asm volatile("rdtsc" : "=A" (rv));
return (rv);
}
-static __inline void
+static inline void
wbinvd(void)
{
- __asm __volatile("wbinvd");
+ __asm volatile("wbinvd");
}
-static __inline void
+static inline void
write_eflags(u_int ef)
{
- __asm __volatile("pushl %0; popfl" : : "r" (ef));
+ __asm volatile("pushl %0; popfl" : : "r" (ef));
}
-static __inline void
+static inline void
wrmsr(u_int msr, uint64_t newval)
{
- __asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
+ __asm volatile("wrmsr" : : "A" (newval), "c" (msr));
}
-static __inline void
+static inline void
load_cr0(u_int data)
{
- __asm __volatile("movl %0,%%cr0" : : "r" (data));
+ __asm volatile("movl %0,%%cr0" : : "r" (data));
}
-static __inline u_int
+static inline u_int
rcr0(void)
{
u_int data;
- __asm __volatile("movl %%cr0,%0" : "=r" (data));
+ __asm volatile("movl %%cr0,%0" : "=r" (data));
return (data);
}
-static __inline u_int
+static inline u_int
rcr2(void)
{
u_int data;
@@ -356,48 +356,48 @@
#ifdef XEN
return (xen_rcr2());
#endif
- __asm __volatile("movl %%cr2,%0" : "=r" (data));
+ __asm volatile("movl %%cr2,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_cr3(u_int data)
{
#ifdef XEN
xen_load_cr3(data);
#else
- __asm __volatile("movl %0,%%cr3" : : "r" (data) : "memory");
+ __asm volatile("movl %0,%%cr3" : : "r" (data) : "memory");
#endif
}
-static __inline u_int
+static inline u_int
rcr3(void)
{
u_int data;
- __asm __volatile("movl %%cr3,%0" : "=r" (data));
+ __asm volatile("movl %%cr3,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_cr4(u_int data)
{
- __asm __volatile("movl %0,%%cr4" : : "r" (data));
+ __asm volatile("movl %0,%%cr4" : : "r" (data));
}
-static __inline u_int
+static inline u_int
rcr4(void)
{
u_int data;
- __asm __volatile("movl %%cr4,%0" : "=r" (data));
+ __asm volatile("movl %%cr4,%0" : "=r" (data));
return (data);
}
/*
* Global TLB flush (except for thise for pages marked PG_G)
*/
-static __inline void
+static inline void
invltlb(void)
{
#ifdef XEN
@@ -411,216 +411,216 @@
* TLB flush for an individual page (even if it has PG_G).
* Only works on 486+ CPUs (i386 does not have PG_G).
*/
-static __inline void
+static inline void
invlpg(u_int addr)
{
#ifdef XEN
xen_invlpg(addr);
#else
- __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
+ __asm volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
#endif
}
-static __inline u_int
+static inline u_int
rfs(void)
{
u_int sel;
- __asm __volatile("mov %%fs,%0" : "=rm" (sel));
+ __asm volatile("mov %%fs,%0" : "=rm" (sel));
return (sel);
}
-static __inline uint64_t
+static inline uint64_t
rgdt(void)
{
uint64_t gdtr;
- __asm __volatile("sgdt %0" : "=m" (gdtr));
+ __asm volatile("sgdt %0" : "=m" (gdtr));
return (gdtr);
}
-static __inline u_int
+static inline u_int
rgs(void)
{
u_int sel;
- __asm __volatile("mov %%gs,%0" : "=rm" (sel));
+ __asm volatile("mov %%gs,%0" : "=rm" (sel));
return (sel);
}
-static __inline uint64_t
+static inline uint64_t
ridt(void)
{
uint64_t idtr;
- __asm __volatile("sidt %0" : "=m" (idtr));
+ __asm volatile("sidt %0" : "=m" (idtr));
return (idtr);
}
-static __inline u_short
+static inline u_short
rldt(void)
{
u_short ldtr;
- __asm __volatile("sldt %0" : "=g" (ldtr));
+ __asm volatile("sldt %0" : "=g" (ldtr));
return (ldtr);
}
-static __inline u_int
+static inline u_int
rss(void)
{
u_int sel;
- __asm __volatile("mov %%ss,%0" : "=rm" (sel));
+ __asm volatile("mov %%ss,%0" : "=rm" (sel));
return (sel);
}
-static __inline u_short
+static inline u_short
rtr(void)
{
u_short tr;
- __asm __volatile("str %0" : "=g" (tr));
+ __asm volatile("str %0" : "=g" (tr));
return (tr);
}
-static __inline void
+static inline void
load_fs(u_int sel)
{
- __asm __volatile("mov %0,%%fs" : : "rm" (sel));
+ __asm volatile("mov %0,%%fs" : : "rm" (sel));
}
-static __inline void
+static inline void
load_gs(u_int sel)
{
- __asm __volatile("mov %0,%%gs" : : "rm" (sel));
+ __asm volatile("mov %0,%%gs" : : "rm" (sel));
}
-static __inline void
+static inline void
lidt(struct region_descriptor *addr)
{
- __asm __volatile("lidt (%0)" : : "r" (addr));
+ __asm volatile("lidt (%0)" : : "r" (addr));
}
-static __inline void
+static inline void
lldt(u_short sel)
{
- __asm __volatile("lldt %0" : : "r" (sel));
+ __asm volatile("lldt %0" : : "r" (sel));
}
-static __inline void
+static inline void
ltr(u_short sel)
{
- __asm __volatile("ltr %0" : : "r" (sel));
+ __asm volatile("ltr %0" : : "r" (sel));
}
-static __inline u_int
+static inline u_int
rdr0(void)
{
u_int data;
- __asm __volatile("movl %%dr0,%0" : "=r" (data));
+ __asm volatile("movl %%dr0,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr0(u_int dr0)
{
- __asm __volatile("movl %0,%%dr0" : : "r" (dr0));
+ __asm volatile("movl %0,%%dr0" : : "r" (dr0));
}
-static __inline u_int
+static inline u_int
rdr1(void)
{
u_int data;
- __asm __volatile("movl %%dr1,%0" : "=r" (data));
+ __asm volatile("movl %%dr1,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr1(u_int dr1)
{
- __asm __volatile("movl %0,%%dr1" : : "r" (dr1));
+ __asm volatile("movl %0,%%dr1" : : "r" (dr1));
}
-static __inline u_int
+static inline u_int
rdr2(void)
{
u_int data;
- __asm __volatile("movl %%dr2,%0" : "=r" (data));
+ __asm volatile("movl %%dr2,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr2(u_int dr2)
{
- __asm __volatile("movl %0,%%dr2" : : "r" (dr2));
+ __asm volatile("movl %0,%%dr2" : : "r" (dr2));
}
-static __inline u_int
+static inline u_int
rdr3(void)
{
u_int data;
- __asm __volatile("movl %%dr3,%0" : "=r" (data));
+ __asm volatile("movl %%dr3,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr3(u_int dr3)
{
- __asm __volatile("movl %0,%%dr3" : : "r" (dr3));
+ __asm volatile("movl %0,%%dr3" : : "r" (dr3));
}
-static __inline u_int
+static inline u_int
rdr4(void)
{
u_int data;
- __asm __volatile("movl %%dr4,%0" : "=r" (data));
+ __asm volatile("movl %%dr4,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr4(u_int dr4)
{
- __asm __volatile("movl %0,%%dr4" : : "r" (dr4));
+ __asm volatile("movl %0,%%dr4" : : "r" (dr4));
}
-static __inline u_int
+static inline u_int
rdr5(void)
{
u_int data;
- __asm __volatile("movl %%dr5,%0" : "=r" (data));
+ __asm volatile("movl %%dr5,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr5(u_int dr5)
{
- __asm __volatile("movl %0,%%dr5" : : "r" (dr5));
+ __asm volatile("movl %0,%%dr5" : : "r" (dr5));
}
-static __inline u_int
+static inline u_int
rdr6(void)
{
u_int data;
- __asm __volatile("movl %%dr6,%0" : "=r" (data));
+ __asm volatile("movl %%dr6,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr6(u_int dr6)
{
- __asm __volatile("movl %0,%%dr6" : : "r" (dr6));
+ __asm volatile("movl %0,%%dr6" : : "r" (dr6));
}
-static __inline u_int
+static inline u_int
rdr7(void)
{
u_int data;
- __asm __volatile("movl %%dr7,%0" : "=r" (data));
+ __asm volatile("movl %%dr7,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr7(u_int dr7)
{
- __asm __volatile("movl %0,%%dr7" : : "r" (dr7));
+ __asm volatile("movl %0,%%dr7" : : "r" (dr7));
}
-static __inline register_t
+static inline register_t
intr_disable(void)
{
register_t eflags;
@@ -634,7 +634,7 @@
return (eflags);
}
-static __inline void
+static inline void
intr_restore(register_t eflags)
{
#ifdef XEN
Index: amd64/include/cpufunc.h
===================================================================
--- amd64/include/cpufunc.h (Revision 190919)
+++ amd64/include/cpufunc.h (Arbeitskopie)
@@ -57,74 +57,74 @@
#if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
-static __inline void
+static inline void
breakpoint(void)
{
- __asm __volatile("int $3");
+ __asm volatile("int $3");
}
-static __inline u_int
+static inline u_int
bsfl(u_int mask)
{
u_int result;
- __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
+ __asm volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
return (result);
}
-static __inline u_long
+static inline u_long
bsfq(u_long mask)
{
u_long result;
- __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
+ __asm volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
return (result);
}
-static __inline u_int
+static inline u_int
bsrl(u_int mask)
{
u_int result;
- __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
+ __asm volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
return (result);
}
-static __inline u_long
+static inline u_long
bsrq(u_long mask)
{
u_long result;
- __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
+ __asm volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
return (result);
}
-static __inline void
+static inline void
disable_intr(void)
{
- __asm __volatile("cli" : : : "memory");
+ __asm volatile("cli" : : : "memory");
}
-static __inline void
+static inline void
do_cpuid(u_int ax, u_int *p)
{
- __asm __volatile("cpuid"
+ __asm volatile("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax));
}
-static __inline void
+static inline void
cpuid_count(u_int ax, u_int cx, u_int *p)
{
- __asm __volatile("cpuid"
+ __asm volatile("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax), "c" (cx));
}
-static __inline void
+static inline void
enable_intr(void)
{
- __asm __volatile("sti");
+ __asm volatile("sti");
}
#ifdef _KERNEL
@@ -134,7 +134,7 @@
#define HAVE_INLINE_FFSL
-static __inline int
+static inline int
ffsl(long mask)
{
return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
@@ -142,7 +142,7 @@
#define HAVE_INLINE_FLS
-static __inline int
+static inline int
fls(int mask)
{
return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
@@ -150,7 +150,7 @@
#define HAVE_INLINE_FLSL
-static __inline int
+static inline int
flsl(long mask)
{
return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
@@ -158,13 +158,13 @@
#endif /* _KERNEL */
-static __inline void
+static inline void
halt(void)
{
- __asm __volatile("hlt");
+ __asm volatile("hlt");
}
-static __inline u_char
+static inline u_char
inb(u_int port)
{
u_char data;
@@ -173,7 +173,7 @@
return (data);
}
-static __inline u_int
+static inline u_int
inl(u_int port)
{
u_int data;
@@ -182,40 +182,40 @@
return (data);
}
-static __inline void
+static inline void
insb(u_int port, void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; insb"
+ __asm volatile("cld; rep; insb"
: "+D" (addr), "+c" (cnt)
: "d" (port)
: "memory");
}
-static __inline void
+static inline void
insw(u_int port, void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; insw"
+ __asm volatile("cld; rep; insw"
: "+D" (addr), "+c" (cnt)
: "d" (port)
: "memory");
}
-static __inline void
+static inline void
insl(u_int port, void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; insl"
+ __asm volatile("cld; rep; insl"
: "+D" (addr), "+c" (cnt)
: "d" (port)
: "memory");
}
-static __inline void
+static inline void
invd(void)
{
- __asm __volatile("invd");
+ __asm volatile("invd");
}
-static __inline u_short
+static inline u_short
inw(u_int port)
{
u_short data;
@@ -224,172 +224,172 @@
return (data);
}
-static __inline void
+static inline void
outb(u_int port, u_char data)
{
__asm volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
}
-static __inline void
+static inline void
outl(u_int port, u_int data)
{
__asm volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
}
-static __inline void
+static inline void
outsb(u_int port, const void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; outsb"
+ __asm volatile("cld; rep; outsb"
: "+S" (addr), "+c" (cnt)
: "d" (port));
}
-static __inline void
+static inline void
outsw(u_int port, const void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; outsw"
+ __asm volatile("cld; rep; outsw"
: "+S" (addr), "+c" (cnt)
: "d" (port));
}
-static __inline void
+static inline void
outsl(u_int port, const void *addr, size_t cnt)
{
- __asm __volatile("cld; rep; outsl"
+ __asm volatile("cld; rep; outsl"
: "+S" (addr), "+c" (cnt)
: "d" (port));
}
-static __inline void
+static inline void
outw(u_int port, u_short data)
{
__asm volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
}
-static __inline void
+static inline void
ia32_pause(void)
{
- __asm __volatile("pause");
+ __asm volatile("pause");
}
-static __inline u_long
+static inline u_long
read_rflags(void)
{
u_long rf;
- __asm __volatile("pushfq; popq %0" : "=r" (rf));
+ __asm volatile("pushfq; popq %0" : "=r" (rf));
return (rf);
}
-static __inline u_int64_t
+static inline u_int64_t
rdmsr(u_int msr)
{
u_int32_t low, high;
- __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
+ __asm volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
return (low | ((u_int64_t)high << 32));
}
-static __inline u_int64_t
+static inline u_int64_t
rdpmc(u_int pmc)
{
u_int32_t low, high;
- __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
+ __asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
return (low | ((u_int64_t)high << 32));
}
-static __inline u_int64_t
+static inline u_int64_t
rdtsc(void)
{
u_int32_t low, high;
- __asm __volatile("rdtsc" : "=a" (low), "=d" (high));
+ __asm volatile("rdtsc" : "=a" (low), "=d" (high));
return (low | ((u_int64_t)high << 32));
}
-static __inline void
+static inline void
wbinvd(void)
{
- __asm __volatile("wbinvd");
+ __asm volatile("wbinvd");
}
-static __inline void
+static inline void
write_rflags(u_long rf)
{
- __asm __volatile("pushq %0; popfq" : : "r" (rf));
+ __asm volatile("pushq %0; popfq" : : "r" (rf));
}
-static __inline void
+static inline void
wrmsr(u_int msr, u_int64_t newval)
{
u_int32_t low, high;
low = newval;
high = newval >> 32;
- __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
+ __asm volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
}
-static __inline void
+static inline void
load_cr0(u_long data)
{
- __asm __volatile("movq %0,%%cr0" : : "r" (data));
+ __asm volatile("movq %0,%%cr0" : : "r" (data));
}
-static __inline u_long
+static inline u_long
rcr0(void)
{
u_long data;
- __asm __volatile("movq %%cr0,%0" : "=r" (data));
+ __asm volatile("movq %%cr0,%0" : "=r" (data));
return (data);
}
-static __inline u_long
+static inline u_long
rcr2(void)
{
u_long data;
- __asm __volatile("movq %%cr2,%0" : "=r" (data));
+ __asm volatile("movq %%cr2,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_cr3(u_long data)
{
- __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
+ __asm volatile("movq %0,%%cr3" : : "r" (data) : "memory");
}
-static __inline u_long
+static inline u_long
rcr3(void)
{
u_long data;
- __asm __volatile("movq %%cr3,%0" : "=r" (data));
+ __asm volatile("movq %%cr3,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_cr4(u_long data)
{
- __asm __volatile("movq %0,%%cr4" : : "r" (data));
+ __asm volatile("movq %0,%%cr4" : : "r" (data));
}
-static __inline u_long
+static inline u_long
rcr4(void)
{
u_long data;
- __asm __volatile("movq %%cr4,%0" : "=r" (data));
+ __asm volatile("movq %%cr4,%0" : "=r" (data));
return (data);
}
/*
* Global TLB flush (except for thise for pages marked PG_G)
*/
-static __inline void
+static inline void
invltlb(void)
{
@@ -400,60 +400,60 @@
* TLB flush for an individual page (even if it has PG_G).
* Only works on 486+ CPUs (i386 does not have PG_G).
*/
-static __inline void
+static inline void
invlpg(u_long addr)
{
- __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
+ __asm volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
}
-static __inline u_int
+static inline u_int
rfs(void)
{
u_int sel;
- __asm __volatile("mov %%fs,%0" : "=rm" (sel));
+ __asm volatile("mov %%fs,%0" : "=rm" (sel));
return (sel);
}
-static __inline u_int
+static inline u_int
rgs(void)
{
u_int sel;
- __asm __volatile("mov %%gs,%0" : "=rm" (sel));
+ __asm volatile("mov %%gs,%0" : "=rm" (sel));
return (sel);
}
-static __inline u_int
+static inline u_int
rss(void)
{
u_int sel;
- __asm __volatile("mov %%ss,%0" : "=rm" (sel));
+ __asm volatile("mov %%ss,%0" : "=rm" (sel));
return (sel);
}
-static __inline void
+static inline void
load_ds(u_int sel)
{
- __asm __volatile("mov %0,%%ds" : : "rm" (sel));
+ __asm volatile("mov %0,%%ds" : : "rm" (sel));
}
-static __inline void
+static inline void
load_es(u_int sel)
{
- __asm __volatile("mov %0,%%es" : : "rm" (sel));
+ __asm volatile("mov %0,%%es" : : "rm" (sel));
}
static inline void
cpu_monitor(const void *addr, int extensions, int hints)
{
- __asm __volatile("monitor;"
+ __asm volatile("monitor;"
: :"a" (addr), "c" (extensions), "d"(hints));
}
static inline void
cpu_mwait(int extensions, int hints)
{
- __asm __volatile("mwait;" : :"a" (hints), "c" (extensions));
+ __asm volatile("mwait;" : :"a" (hints), "c" (extensions));
}
#ifdef _KERNEL
@@ -461,18 +461,18 @@
#ifndef MSR_FSBASE
#define MSR_FSBASE 0xc0000100
#endif
-static __inline void
+static inline void
load_fs(u_int sel)
{
/* Preserve the fsbase value across the selector load */
- __asm __volatile("rdmsr; mov %0,%%fs; wrmsr"
+ __asm volatile("rdmsr; mov %0,%%fs; wrmsr"
: : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
}
#ifndef MSR_GSBASE
#define MSR_GSBASE 0xc0000101
#endif
-static __inline void
+static inline void
load_gs(u_int sel)
{
/*
@@ -480,155 +480,155 @@
* Note that we have to disable interrupts because the gsbase
* being trashed happens to be the kernel gsbase at the time.
*/
- __asm __volatile("pushfq; cli; rdmsr; mov %0,%%gs; wrmsr; popfq"
+ __asm volatile("pushfq; cli; rdmsr; mov %0,%%gs; wrmsr; popfq"
: : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
}
#else
/* Usable by userland */
-static __inline void
+static inline void
load_fs(u_int sel)
{
- __asm __volatile("mov %0,%%fs" : : "rm" (sel));
+ __asm volatile("mov %0,%%fs" : : "rm" (sel));
}
-static __inline void
+static inline void
load_gs(u_int sel)
{
- __asm __volatile("mov %0,%%gs" : : "rm" (sel));
+ __asm volatile("mov %0,%%gs" : : "rm" (sel));
}
#endif
-static __inline void
+static inline void
lidt(struct region_descriptor *addr)
{
- __asm __volatile("lidt (%0)" : : "r" (addr));
+ __asm volatile("lidt (%0)" : : "r" (addr));
}
-static __inline void
+static inline void
lldt(u_short sel)
{
- __asm __volatile("lldt %0" : : "r" (sel));
+ __asm volatile("lldt %0" : : "r" (sel));
}
-static __inline void
+static inline void
ltr(u_short sel)
{
- __asm __volatile("ltr %0" : : "r" (sel));
+ __asm volatile("ltr %0" : : "r" (sel));
}
-static __inline u_int64_t
+static inline u_int64_t
rdr0(void)
{
u_int64_t data;
- __asm __volatile("movq %%dr0,%0" : "=r" (data));
+ __asm volatile("movq %%dr0,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr0(u_int64_t dr0)
{
- __asm __volatile("movq %0,%%dr0" : : "r" (dr0));
+ __asm volatile("movq %0,%%dr0" : : "r" (dr0));
}
-static __inline u_int64_t
+static inline u_int64_t
rdr1(void)
{
u_int64_t data;
- __asm __volatile("movq %%dr1,%0" : "=r" (data));
+ __asm volatile("movq %%dr1,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr1(u_int64_t dr1)
{
- __asm __volatile("movq %0,%%dr1" : : "r" (dr1));
+ __asm volatile("movq %0,%%dr1" : : "r" (dr1));
}
-static __inline u_int64_t
+static inline u_int64_t
rdr2(void)
{
u_int64_t data;
- __asm __volatile("movq %%dr2,%0" : "=r" (data));
+ __asm volatile("movq %%dr2,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr2(u_int64_t dr2)
{
- __asm __volatile("movq %0,%%dr2" : : "r" (dr2));
+ __asm volatile("movq %0,%%dr2" : : "r" (dr2));
}
-static __inline u_int64_t
+static inline u_int64_t
rdr3(void)
{
u_int64_t data;
- __asm __volatile("movq %%dr3,%0" : "=r" (data));
+ __asm volatile("movq %%dr3,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr3(u_int64_t dr3)
{
- __asm __volatile("movq %0,%%dr3" : : "r" (dr3));
+ __asm volatile("movq %0,%%dr3" : : "r" (dr3));
}
-static __inline u_int64_t
+static inline u_int64_t
rdr4(void)
{
u_int64_t data;
- __asm __volatile("movq %%dr4,%0" : "=r" (data));
+ __asm volatile("movq %%dr4,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr4(u_int64_t dr4)
{
- __asm __volatile("movq %0,%%dr4" : : "r" (dr4));
+ __asm volatile("movq %0,%%dr4" : : "r" (dr4));
}
-static __inline u_int64_t
+static inline u_int64_t
rdr5(void)
{
u_int64_t data;
- __asm __volatile("movq %%dr5,%0" : "=r" (data));
+ __asm volatile("movq %%dr5,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr5(u_int64_t dr5)
{
- __asm __volatile("movq %0,%%dr5" : : "r" (dr5));
+ __asm volatile("movq %0,%%dr5" : : "r" (dr5));
}
-static __inline u_int64_t
+static inline u_int64_t
rdr6(void)
{
u_int64_t data;
- __asm __volatile("movq %%dr6,%0" : "=r" (data));
+ __asm volatile("movq %%dr6,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr6(u_int64_t dr6)
{
- __asm __volatile("movq %0,%%dr6" : : "r" (dr6));
+ __asm volatile("movq %0,%%dr6" : : "r" (dr6));
}
-static __inline u_int64_t
+static inline u_int64_t
rdr7(void)
{
u_int64_t data;
- __asm __volatile("movq %%dr7,%0" : "=r" (data));
+ __asm volatile("movq %%dr7,%0" : "=r" (data));
return (data);
}
-static __inline void
+static inline void
load_dr7(u_int64_t dr7)
{
- __asm __volatile("movq %0,%%dr7" : : "r" (dr7));
+ __asm volatile("movq %0,%%dr7" : : "r" (dr7));
}
-static __inline register_t
+static inline register_t
intr_disable(void)
{
register_t rflags;
@@ -638,7 +638,7 @@
return (rflags);
}
-static __inline void
+static inline void
intr_restore(register_t rflags)
{
write_rflags(rflags);
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"