Patches contributed by Eötvös Lorand University
commit f2633105cd92b793dd6a6f623b4140287d46160a
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:32:36 2008 +0100
x86: debug: double-check the empty zero page
temporary debugging - remove before this hits v2.6.25.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 6c3f6eb1f790..05f12c527b02 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -497,6 +497,14 @@ void __init mem_init(void)
/* clear_bss() already clear the empty_zero_page */
+ /* temporary debugging - double check it's true: */
+ {
+ int i;
+
+ for (i = 0; i < 1024; i++)
+ WARN_ON_ONCE(empty_zero_page[i]);
+ }
+
reservedpages = 0;
/* this will put all low memory onto the freelists */
commit ff3cf856120743c7386e8f6ab9f08e068886ce5c
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:32:31 2008 +0100
x86: hlt on early crash
H. Peter Anvin <hpa@zytor.com> wrote:
> It probably should actually HLT, to avoid sucking power, and stressing
> the thermal system. We're dead at this point, and the early 486's
> which had problems with HLT will lock up - we don't care.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/boot/compressed/misc_32.c b/arch/x86/boot/compressed/misc_32.c
index 288e16283ef9..9103652058c4 100644
--- a/arch/x86/boot/compressed/misc_32.c
+++ b/arch/x86/boot/compressed/misc_32.c
@@ -339,7 +339,8 @@ static void error(char *x)
putstr(x);
putstr("\n\n -- System halted");
- while(1); /* Halt */
+ while (1)
+ asm("hlt");
}
asmlinkage void decompress_kernel(void *rmode, unsigned long end,
diff --git a/arch/x86/boot/compressed/misc_64.c b/arch/x86/boot/compressed/misc_64.c
index 7d8338e21b00..8494f0dcff21 100644
--- a/arch/x86/boot/compressed/misc_64.c
+++ b/arch/x86/boot/compressed/misc_64.c
@@ -338,7 +338,8 @@ static void error(char *x)
putstr(x);
putstr("\n\n -- System halted");
- while(1); /* Halt */
+ while (1)
+ asm("hlt");
}
asmlinkage void decompress_kernel(void *rmode, unsigned long heap,
commit 7d409d6057c7244f8757ce15245f6df27271be0c
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:32:00 2008 +0100
x86: add some pirq debugging
we use a few static mapping rules in our pirq routing functions,
and for example regression f3ac84324fd94 was due to the pirq
being out of range of the remapping array. Put in a few
WARN_ON_ONCE() lines so that we get notified about any such
out-of-bound incidents.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 88d8f5c0ecb5..ee524ca5f8c0 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -200,6 +200,7 @@ static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
+ WARN_ON_ONCE(pirq >= 16);
return irqmap[read_config_nybble(router, 0x48, pirq-1)];
}
@@ -207,7 +208,8 @@ static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
{
static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
unsigned int val = irqmap[irq];
-
+
+ WARN_ON_ONCE(pirq >= 16);
if (val) {
write_config_nybble(router, 0x48, pirq-1, val);
return 1;
@@ -257,12 +259,16 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
+
+ WARN_ON_ONCE(pirq >= 5);
return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
}
static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
+
+ WARN_ON_ONCE(pirq >= 5);
write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
return 1;
}
@@ -275,12 +281,16 @@ static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq
static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
+
+ WARN_ON_ONCE(pirq >= 4);
return read_config_nybble(router,0x43, pirqmap[pirq-1]);
}
static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
+
+ WARN_ON_ONCE(pirq >= 4);
write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
return 1;
}
@@ -419,6 +429,7 @@ static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
+ WARN_ON_ONCE(pirq >= 9);
if (pirq > 8) {
printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
return 0;
@@ -428,6 +439,7 @@ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
+ WARN_ON_ONCE(pirq >= 9);
if (pirq > 8) {
printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
return 0;
commit aca46ba29298810b329518b96f97ace985027b59
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:31:58 2008 +0100
x86: remove unused include/asm-x86/processor_32/64.h
remove unused include/asm-x86/processor_32/64.h.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h
deleted file mode 100644
index 0d6a430b2bc3..000000000000
--- a/include/asm-x86/processor_32.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 1994 Linus Torvalds
- */
-
-#ifndef __ASM_I386_PROCESSOR_H
-#define __ASM_I386_PROCESSOR_H
-
-#include <asm/vm86.h>
-#include <asm/math_emu.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/sigcontext.h>
-#include <asm/cpufeature.h>
-#include <asm/msr.h>
-#include <asm/system.h>
-#include <linux/threads.h>
-#include <linux/init.h>
-#include <asm/desc_defs.h>
-
-/*
- * the following now lives in the per cpu area:
- * extern int cpu_llc_id[NR_CPUS];
- */
-DECLARE_PER_CPU(u8, cpu_llc_id);
-
-/*
- * User space process size: 3GB (default).
- */
-#define TASK_SIZE (PAGE_OFFSET)
-
-#define INIT_THREAD { \
- .sp0 = sizeof(init_stack) + (long)&init_stack, \
- .vm86_info = NULL, \
- .sysenter_cs = __KERNEL_CS, \
- .io_bitmap_ptr = NULL, \
- .fs = __KERNEL_PERCPU, \
-}
-
-/*
- * Note that the .io_bitmap member must be extra-big. This is because
- * the CPU will access an additional byte beyond the end of the IO
- * permission bitmap. The extra byte must be all 1 bits, and must
- * be within the limit.
- */
-#define INIT_TSS { \
- .x86_tss = { \
- .sp0 = sizeof(init_stack) + (long)&init_stack, \
- .ss0 = __KERNEL_DS, \
- .ss1 = __KERNEL_CS, \
- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
- }, \
- .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
-}
-
-#define start_thread(regs, new_eip, new_esp) do { \
- __asm__("movl %0,%%gs": :"r" (0)); \
- regs->fs = 0; \
- set_fs(USER_DS); \
- regs->ds = __USER_DS; \
- regs->es = __USER_DS; \
- regs->ss = __USER_DS; \
- regs->cs = __USER_CS; \
- regs->ip = new_eip; \
- regs->sp = new_esp; \
-} while (0)
-
-
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
-#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
-#define KSTK_TOP(info) \
-({ \
- unsigned long *__ptr = (unsigned long *)(info); \
- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
-})
-
-/*
- * The below -8 is to reserve 8 bytes on top of the ring0 stack.
- * This is necessary to guarantee that the entire "struct pt_regs"
- * is accessable even if the CPU haven't stored the SS/ESP registers
- * on the stack (interrupt gate does not save these registers
- * when switching to the same priv ring).
- * Therefore beware: accessing the ss/esp fields of the
- * "struct pt_regs" is possible, but they may contain the
- * completely wrong values.
- */
-#define task_pt_regs(task) \
-({ \
- struct pt_regs *__regs__; \
- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
- __regs__ - 1; \
-})
-
-#define KSTK_ESP(task) (task_pt_regs(task)->sp)
-
-#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h
deleted file mode 100644
index 04ce823ea7e6..000000000000
--- a/include/asm-x86/processor_64.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 1994 Linus Torvalds
- */
-
-#ifndef __ASM_X86_64_PROCESSOR_H
-#define __ASM_X86_64_PROCESSOR_H
-
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/sigcontext.h>
-#include <asm/cpufeature.h>
-#include <linux/threads.h>
-#include <asm/msr.h>
-#include <asm/current.h>
-#include <asm/system.h>
-#include <linux/personality.h>
-#include <asm/desc_defs.h>
-
-/*
- * User space process size. 47bits minus one guard page.
- */
-#define TASK_SIZE64 (0x800000000000UL - 4096)
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
-
-#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
-#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
-
-
-
-DECLARE_PER_CPU(struct orig_ist, orig_ist);
-
-#define INIT_THREAD { \
- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-}
-
-#define INIT_TSS { \
- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-}
-
-#define start_thread(regs,new_rip,new_rsp) do { \
- asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
- load_gs_index(0); \
- (regs)->ip = (new_rip); \
- (regs)->sp = (new_rsp); \
- write_pda(oldrsp, (new_rsp)); \
- (regs)->cs = __USER_CS; \
- (regs)->ss = __USER_DS; \
- (regs)->flags = 0x200; \
- set_fs(USER_DS); \
-} while(0)
-
-/*
- * Return saved PC of a blocked thread.
- * What is this good for? it will be always the scheduler or ret_from_fork.
- */
-#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
-
-#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
-#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
-
-#endif /* __ASM_X86_64_PROCESSOR_H */
commit 41e191e85a122ad822deb7525a015410012e6c70
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:31:24 2008 +0100
x86: replace outb_p() with udelay(2) in drivers/input/mouse/pc110pad.c
replace outb_p() with udelay(2). This is a real ISA device so it likely
needs this particular delay.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index 8991ab0b4fe3..61cff8374e6c 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -39,6 +39,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -62,8 +63,10 @@ static irqreturn_t pc110pad_interrupt(int irq, void *ptr)
int value = inb_p(pc110pad_io);
int handshake = inb_p(pc110pad_io + 2);
- outb_p(handshake | 1, pc110pad_io + 2);
- outb_p(handshake & ~1, pc110pad_io + 2);
+ outb(handshake | 1, pc110pad_io + 2);
+ udelay(2);
+ outb(handshake & ~1, pc110pad_io + 2);
+ udelay(2);
inb_p(0x64);
pc110pad_data[pc110pad_count++] = value;
commit 5f561d3be8f0db54f9b4fc5cb5db05343f372431
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:31:17 2008 +0100
x86: dummy placeholder for acpi/reboot.h
dummy placeholder for acpi/reboot.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/include/acpi/reboot.h b/include/acpi/reboot.h
new file mode 100644
index 000000000000..8857f57e0b78
--- /dev/null
+++ b/include/acpi/reboot.h
@@ -0,0 +1,9 @@
+
+/*
+ * Dummy placeholder to make the EFI patches apply to the x86 tree.
+ * Andrew/Len, please just kill this file if you encounter it.
+ */
+#ifndef acpi_reboot
+# define acpi_reboot() do { } while (0)
+#endif
+
commit 2355188570790930718fb72444cddc2959039d9d
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:31:10 2008 +0100
x86: avoid build warning
fix this build warning:
include/asm/topology_32.h: In function 'node_to_first_cpu':
include/asm/topology_32.h:66: warning: unused variable 'mask'
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 85bd790c201e..7047f58306a7 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -218,8 +218,8 @@ int __first_cpu(const cpumask_t *srcp);
int __next_cpu(int n, const cpumask_t *srcp);
#define next_cpu(n, src) __next_cpu((n), &(src))
#else
-#define first_cpu(src) 0
-#define next_cpu(n, src) 1
+#define first_cpu(src) ({ (void)(src); 0; })
+#define next_cpu(n, src) ({ (void)(src); 1; })
#endif
#define cpumask_of_cpu(cpu) \
commit 5fd1fe9c582e00ca0a98f852cd693dc3caf607a0
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:31:09 2008 +0100
x86: clean up drivers/char/rtc.c
tons of style cleanup in drivers/char/rtc.c - no code changed:
text data bss dec hex filename
6400 384 32 6816 1aa0 rtc.o.before
6400 384 32 6816 1aa0 rtc.o.after
since we seem to have a number of open breakages in this code we might
as well start with making the code more readable and maintainable.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 0c66b802736a..3ac7952fe086 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -1,5 +1,5 @@
/*
- * Real Time Clock interface for Linux
+ * Real Time Clock interface for Linux
*
* Copyright (C) 1996 Paul Gortmaker
*
@@ -17,7 +17,7 @@
* has been received. If a RTC interrupt has already happened,
* it will output an unsigned long and then block. The output value
* contains the interrupt status in the low byte and the number of
- * interrupts since the last read in the remaining high bytes. The
+ * interrupts since the last read in the remaining high bytes. The
* /dev/rtc interface can also be used with the select(2) call.
*
* This program is free software; you can redistribute it and/or
@@ -104,12 +104,12 @@ static int rtc_has_irq = 1;
#ifndef CONFIG_HPET_EMULATE_RTC
#define is_hpet_enabled() 0
-#define hpet_set_alarm_time(hrs, min, sec) 0
-#define hpet_set_periodic_freq(arg) 0
-#define hpet_mask_rtc_irq_bit(arg) 0
-#define hpet_set_rtc_irq_bit(arg) 0
-#define hpet_rtc_timer_init() do { } while (0)
-#define hpet_rtc_dropped_irq() 0
+#define hpet_set_alarm_time(hrs, min, sec) 0
+#define hpet_set_periodic_freq(arg) 0
+#define hpet_mask_rtc_irq_bit(arg) 0
+#define hpet_set_rtc_irq_bit(arg) 0
+#define hpet_rtc_timer_init() do { } while (0)
+#define hpet_rtc_dropped_irq() 0
#ifdef RTC_IRQ
static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
{
@@ -147,7 +147,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file,
static unsigned int rtc_poll(struct file *file, poll_table *wait);
#endif
-static void get_rtc_alm_time (struct rtc_time *alm_tm);
+static void get_rtc_alm_time(struct rtc_time *alm_tm);
#ifdef RTC_IRQ
static void set_rtc_irq_bit_locked(unsigned char bit);
static void mask_rtc_irq_bit_locked(unsigned char bit);
@@ -185,9 +185,9 @@ static int rtc_proc_open(struct inode *inode, struct file *file);
* rtc_status but before mod_timer is called, which would then reenable the
* timer (but you would need to have an awful timing before you'd trip on it)
*/
-static unsigned long rtc_status = 0; /* bitmapped status byte. */
-static unsigned long rtc_freq = 0; /* Current periodic IRQ rate */
-static unsigned long rtc_irq_data = 0; /* our output to the world */
+static unsigned long rtc_status; /* bitmapped status byte. */
+static unsigned long rtc_freq; /* Current periodic IRQ rate */
+static unsigned long rtc_irq_data; /* our output to the world */
static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
#ifdef RTC_IRQ
@@ -195,7 +195,7 @@ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
* rtc_task_lock nests inside rtc_lock.
*/
static DEFINE_SPINLOCK(rtc_task_lock);
-static rtc_task_t *rtc_callback = NULL;
+static rtc_task_t *rtc_callback;
#endif
/*
@@ -205,7 +205,7 @@ static rtc_task_t *rtc_callback = NULL;
static unsigned long epoch = 1900; /* year corresponding to 0x00 */
-static const unsigned char days_in_mo[] =
+static const unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
/*
@@ -242,7 +242,7 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
* the last read in the remainder of rtc_irq_data.
*/
- spin_lock (&rtc_lock);
+ spin_lock(&rtc_lock);
rtc_irq_data += 0x100;
rtc_irq_data &= ~0xff;
if (is_hpet_enabled()) {
@@ -259,16 +259,16 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
if (rtc_status & RTC_TIMER_ON)
mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
- spin_unlock (&rtc_lock);
+ spin_unlock(&rtc_lock);
/* Now do the rest of the actions */
spin_lock(&rtc_task_lock);
if (rtc_callback)
rtc_callback->func(rtc_callback->private_data);
spin_unlock(&rtc_task_lock);
- wake_up_interruptible(&rtc_wait);
+ wake_up_interruptible(&rtc_wait);
- kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
+ kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
return IRQ_HANDLED;
}
@@ -335,7 +335,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t retval;
-
+
if (rtc_has_irq == 0)
return -EIO;
@@ -358,11 +358,11 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
* confusing. And no, xchg() is not the answer. */
__set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irq (&rtc_lock);
+
+ spin_lock_irq(&rtc_lock);
data = rtc_irq_data;
rtc_irq_data = 0;
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
if (data != 0)
break;
@@ -378,10 +378,13 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
schedule();
} while (1);
- if (count == sizeof(unsigned int))
- retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int);
- else
- retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long);
+ if (count == sizeof(unsigned int)) {
+ retval = put_user(data,
+ (unsigned int __user *)buf) ?: sizeof(int);
+ } else {
+ retval = put_user(data,
+ (unsigned long __user *)buf) ?: sizeof(long);
+ }
if (!retval)
retval = count;
out:
@@ -394,7 +397,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
{
- struct rtc_time wtime;
+ struct rtc_time wtime;
#ifdef RTC_IRQ
if (rtc_has_irq == 0) {
@@ -426,35 +429,41 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
}
case RTC_PIE_OFF: /* Mask periodic int. enab. bit */
{
- unsigned long flags; /* can be called from isr via rtc_control() */
- spin_lock_irqsave (&rtc_lock, flags);
+ /* can be called from isr via rtc_control() */
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
mask_rtc_irq_bit_locked(RTC_PIE);
if (rtc_status & RTC_TIMER_ON) {
rtc_status &= ~RTC_TIMER_ON;
del_timer(&rtc_irq_timer);
}
- spin_unlock_irqrestore (&rtc_lock, flags);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
return 0;
}
case RTC_PIE_ON: /* Allow periodic ints */
{
- unsigned long flags; /* can be called from isr via rtc_control() */
+ /* can be called from isr via rtc_control() */
+ unsigned long flags;
+
/*
* We don't really want Joe User enabling more
* than 64Hz of interrupts on a multi-user machine.
*/
if (!kernel && (rtc_freq > rtc_max_user_freq) &&
- (!capable(CAP_SYS_RESOURCE)))
+ (!capable(CAP_SYS_RESOURCE)))
return -EACCES;
- spin_lock_irqsave (&rtc_lock, flags);
+ spin_lock_irqsave(&rtc_lock, flags);
if (!(rtc_status & RTC_TIMER_ON)) {
mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
2*HZ/100);
rtc_status |= RTC_TIMER_ON;
}
set_rtc_irq_bit_locked(RTC_PIE);
- spin_unlock_irqrestore (&rtc_lock, flags);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
return 0;
}
case RTC_UIE_OFF: /* Mask ints from RTC updates. */
@@ -477,7 +486,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
*/
memset(&wtime, 0, sizeof(struct rtc_time));
get_rtc_alm_time(&wtime);
- break;
+ break;
}
case RTC_ALM_SET: /* Store a time into the alarm */
{
@@ -505,16 +514,21 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
*/
}
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
- RTC_ALWAYS_BCD)
- {
- if (sec < 60) BIN_TO_BCD(sec);
- else sec = 0xff;
-
- if (min < 60) BIN_TO_BCD(min);
- else min = 0xff;
-
- if (hrs < 24) BIN_TO_BCD(hrs);
- else hrs = 0xff;
+ RTC_ALWAYS_BCD) {
+ if (sec < 60)
+ BIN_TO_BCD(sec);
+ else
+ sec = 0xff;
+
+ if (min < 60)
+ BIN_TO_BCD(min);
+ else
+ min = 0xff;
+
+ if (hrs < 24)
+ BIN_TO_BCD(hrs);
+ else
+ hrs = 0xff;
}
CMOS_WRITE(hrs, RTC_HOURS_ALARM);
CMOS_WRITE(min, RTC_MINUTES_ALARM);
@@ -563,11 +577,12 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
return -EINVAL;
-
+
if ((hrs >= 24) || (min >= 60) || (sec >= 60))
return -EINVAL;
- if ((yrs -= epoch) > 255) /* They are unsigned */
+ yrs -= epoch;
+ if (yrs > 255) /* They are unsigned */
return -EINVAL;
spin_lock_irq(&rtc_lock);
@@ -635,9 +650,10 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
{
int tmp = 0;
unsigned char val;
- unsigned long flags; /* can be called from isr via rtc_control() */
+ /* can be called from isr via rtc_control() */
+ unsigned long flags;
- /*
+ /*
* The max we can do is 8192Hz.
*/
if ((arg < 2) || (arg > 8192))
@@ -646,7 +662,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
* We don't really want Joe User generating more
* than 64Hz of interrupts on a multi-user machine.
*/
- if (!kernel && (arg > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE)))
+ if (!kernel && (arg > rtc_max_user_freq) &&
+ !capable(CAP_SYS_RESOURCE))
return -EACCES;
while (arg > (1<<tmp))
@@ -674,11 +691,11 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
#endif
case RTC_EPOCH_READ: /* Read the epoch. */
{
- return put_user (epoch, (unsigned long __user *)arg);
+ return put_user(epoch, (unsigned long __user *)arg);
}
case RTC_EPOCH_SET: /* Set the epoch. */
{
- /*
+ /*
* There were no RTC clocks before 1900.
*/
if (arg < 1900)
@@ -693,7 +710,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
default:
return -ENOTTY;
}
- return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
+ return copy_to_user((void __user *)arg,
+ &wtime, sizeof wtime) ? -EFAULT : 0;
}
static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
@@ -712,26 +730,25 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
* needed here. Or anywhere else in this driver. */
static int rtc_open(struct inode *inode, struct file *file)
{
- spin_lock_irq (&rtc_lock);
+ spin_lock_irq(&rtc_lock);
- if(rtc_status & RTC_IS_OPEN)
+ if (rtc_status & RTC_IS_OPEN)
goto out_busy;
rtc_status |= RTC_IS_OPEN;
rtc_irq_data = 0;
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
return 0;
out_busy:
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
return -EBUSY;
}
-static int rtc_fasync (int fd, struct file *filp, int on)
-
+static int rtc_fasync(int fd, struct file *filp, int on)
{
- return fasync_helper (fd, filp, on, &rtc_async_queue);
+ return fasync_helper(fd, filp, on, &rtc_async_queue);
}
static int rtc_release(struct inode *inode, struct file *file)
@@ -762,16 +779,16 @@ static int rtc_release(struct inode *inode, struct file *file)
}
spin_unlock_irq(&rtc_lock);
- if (file->f_flags & FASYNC) {
- rtc_fasync (-1, file, 0);
- }
+ if (file->f_flags & FASYNC)
+ rtc_fasync(-1, file, 0);
no_irq:
#endif
- spin_lock_irq (&rtc_lock);
+ spin_lock_irq(&rtc_lock);
rtc_irq_data = 0;
rtc_status &= ~RTC_IS_OPEN;
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
+
return 0;
}
@@ -786,9 +803,9 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
poll_wait(file, &rtc_wait, wait);
- spin_lock_irq (&rtc_lock);
+ spin_lock_irq(&rtc_lock);
l = rtc_irq_data;
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
if (l != 0)
return POLLIN | POLLRDNORM;
@@ -796,14 +813,6 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
}
#endif
-/*
- * exported stuffs
- */
-
-EXPORT_SYMBOL(rtc_register);
-EXPORT_SYMBOL(rtc_unregister);
-EXPORT_SYMBOL(rtc_control);
-
int rtc_register(rtc_task_t *task)
{
#ifndef RTC_IRQ
@@ -829,6 +838,7 @@ int rtc_register(rtc_task_t *task)
return 0;
#endif
}
+EXPORT_SYMBOL(rtc_register);
int rtc_unregister(rtc_task_t *task)
{
@@ -845,7 +855,7 @@ int rtc_unregister(rtc_task_t *task)
return -ENXIO;
}
rtc_callback = NULL;
-
+
/* disable controls */
if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
tmp = CMOS_READ(RTC_CONTROL);
@@ -865,6 +875,7 @@ int rtc_unregister(rtc_task_t *task)
return 0;
#endif
}
+EXPORT_SYMBOL(rtc_unregister);
int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
{
@@ -883,7 +894,7 @@ int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
return rtc_do_ioctl(cmd, arg, 1);
#endif
}
-
+EXPORT_SYMBOL(rtc_control);
/*
* The various file operations we support.
@@ -910,11 +921,11 @@ static struct miscdevice rtc_dev = {
#ifdef CONFIG_PROC_FS
static const struct file_operations rtc_proc_fops = {
- .owner = THIS_MODULE,
- .open = rtc_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .owner = THIS_MODULE,
+ .open = rtc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
#endif
@@ -965,7 +976,7 @@ static int __init rtc_init(void)
#ifdef CONFIG_SPARC32
for_each_ebus(ebus) {
for_each_ebusdev(edev, ebus) {
- if(strcmp(edev->prom_node->name, "rtc") == 0) {
+ if (strcmp(edev->prom_node->name, "rtc") == 0) {
rtc_port = edev->resource[0].start;
rtc_irq = edev->irqs[0];
goto found;
@@ -986,7 +997,8 @@ static int __init rtc_init(void)
* XXX Interrupt pin #7 in Espresso is shared between RTC and
* PCI Slot 2 INTA# (and some INTx# in Slot 1).
*/
- if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) {
+ if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
+ (void *)&rtc_port)) {
rtc_has_irq = 0;
printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
return -EIO;
@@ -1020,11 +1032,13 @@ static int __init rtc_init(void)
rtc_int_handler_ptr = rtc_interrupt;
}
- if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) {
+ if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED,
+ "rtc", NULL)) {
/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
rtc_has_irq = 0;
printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
rtc_release_region();
+
return -EIO;
}
hpet_rtc_timer_init();
@@ -1052,21 +1066,21 @@ static int __init rtc_init(void)
#if defined(__alpha__) || defined(__mips__)
rtc_freq = HZ;
-
+
/* Each operating system on an Alpha uses its own epoch.
Let's try to guess which one we are using now. */
-
+
if (rtc_is_updating() != 0)
msleep(20);
-
+
spin_lock_irq(&rtc_lock);
year = CMOS_READ(RTC_YEAR);
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irq(&rtc_lock);
-
+
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
BCD_TO_BIN(year); /* This should never happen... */
-
+
if (year < 20) {
epoch = 2000;
guess = "SRM (post-2000)";
@@ -1087,7 +1101,8 @@ static int __init rtc_init(void)
#endif
}
if (guess)
- printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch);
+ printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
+ guess, epoch);
#endif
#ifdef RTC_IRQ
if (rtc_has_irq == 0)
@@ -1096,8 +1111,12 @@ static int __init rtc_init(void)
spin_lock_irq(&rtc_lock);
rtc_freq = 1024;
if (!hpet_set_periodic_freq(rtc_freq)) {
- /* Initialize periodic freq. to CMOS reset default, which is 1024Hz */
- CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT);
+ /*
+ * Initialize periodic frequency to CMOS reset default,
+ * which is 1024Hz
+ */
+ CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
+ RTC_FREQ_SELECT);
}
spin_unlock_irq(&rtc_lock);
no_irq2:
@@ -1110,20 +1129,20 @@ static int __init rtc_init(void)
return 0;
}
-static void __exit rtc_exit (void)
+static void __exit rtc_exit(void)
{
cleanup_sysctl();
- remove_proc_entry ("driver/rtc", NULL);
+ remove_proc_entry("driver/rtc", NULL);
misc_deregister(&rtc_dev);
#ifdef CONFIG_SPARC32
if (rtc_has_irq)
- free_irq (rtc_irq, &rtc_port);
+ free_irq(rtc_irq, &rtc_port);
#else
rtc_release_region();
#ifdef RTC_IRQ
if (rtc_has_irq)
- free_irq (RTC_IRQ, NULL);
+ free_irq(RTC_IRQ, NULL);
#endif
#endif /* CONFIG_SPARC32 */
}
@@ -1133,14 +1152,14 @@ module_exit(rtc_exit);
#ifdef RTC_IRQ
/*
- * At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
+ * At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
* (usually during an IDE disk interrupt, with IRQ unmasking off)
* Since the interrupt handler doesn't get called, the IRQ status
* byte doesn't get read, and the RTC stops generating interrupts.
* A timer is set, and will call this function if/when that happens.
* To get it out of this stalled state, we just read the status.
* At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
- * (You *really* shouldn't be trying to use a non-realtime system
+ * (You *really* shouldn't be trying to use a non-realtime system
* for something that requires a steady > 1KHz signal anyways.)
*/
@@ -1148,7 +1167,7 @@ static void rtc_dropped_irq(unsigned long data)
{
unsigned long freq;
- spin_lock_irq (&rtc_lock);
+ spin_lock_irq(&rtc_lock);
if (hpet_rtc_dropped_irq()) {
spin_unlock_irq(&rtc_lock);
@@ -1167,13 +1186,15 @@ static void rtc_dropped_irq(unsigned long data)
spin_unlock_irq(&rtc_lock);
- if (printk_ratelimit())
- printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq);
+ if (printk_ratelimit()) {
+ printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
+ freq);
+ }
/* Now we have new data */
wake_up_interruptible(&rtc_wait);
- kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
+ kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
}
#endif
@@ -1277,7 +1298,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
* can take just over 2ms. We wait 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
- * periodic update complete interrupts, (via ioctl) and then
+ * periodic update complete interrupts, (via ioctl) and then
* immediately read /dev/rtc which will block until you get the IRQ.
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
@@ -1307,8 +1328,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- {
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
BCD_TO_BIN(rtc_tm->tm_sec);
BCD_TO_BIN(rtc_tm->tm_min);
BCD_TO_BIN(rtc_tm->tm_hour);
@@ -1326,7 +1346,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
- if ((rtc_tm->tm_year += (epoch - 1900)) <= 69)
+ rtc_tm->tm_year += epoch - 1900;
+ if (rtc_tm->tm_year <= 69)
rtc_tm->tm_year += 100;
rtc_tm->tm_mon--;
@@ -1347,8 +1368,7 @@ static void get_rtc_alm_time(struct rtc_time *alm_tm)
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irq(&rtc_lock);
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- {
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
BCD_TO_BIN(alm_tm->tm_sec);
BCD_TO_BIN(alm_tm->tm_min);
BCD_TO_BIN(alm_tm->tm_hour);
commit 05fccb0e3840248324a96b320562210410be73dc
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:30:12 2008 +0100
x86: code cleanups in arch/x86/kernel/pci-gart_64.c
code cleanups:
errors lines of code errors/KLOC
arch/x86/kernel/pci-gart_64.c 183 748 244.6
arch/x86/kernel/pci-gart_64.c 0 790 0
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 06bcba536045..d2b46b489412 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -1,12 +1,12 @@
/*
* Dynamic DMA mapping support for AMD Hammer.
- *
+ *
* Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
* This allows to use PCI devices that only support 32bit addresses on systems
- * with more than 4GB.
+ * with more than 4GB.
*
* See Documentation/DMA-mapping.txt for the interface specification.
- *
+ *
* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU General Public License v2 only.
*/
@@ -37,23 +37,26 @@
#include <asm/k8.h>
static unsigned long iommu_bus_base; /* GART remapping area (physical) */
-static unsigned long iommu_size; /* size of remapping area bytes */
+static unsigned long iommu_size; /* size of remapping area bytes */
static unsigned long iommu_pages; /* .. and in pages */
-static u32 *iommu_gatt_base; /* Remapping table */
+static u32 *iommu_gatt_base; /* Remapping table */
-/* If this is disabled the IOMMU will use an optimized flushing strategy
- of only flushing when an mapping is reused. With it true the GART is flushed
- for every mapping. Problem is that doing the lazy flush seems to trigger
- bugs with some popular PCI cards, in particular 3ware (but has been also
- also seen with Qlogic at least). */
+/*
+ * If this is disabled the IOMMU will use an optimized flushing strategy
+ * of only flushing when an mapping is reused. With it true the GART is
+ * flushed for every mapping. Problem is that doing the lazy flush seems
+ * to trigger bugs with some popular PCI cards, in particular 3ware (but
+ * has been also also seen with Qlogic at least).
+ */
int iommu_fullflush = 1;
-/* Allocation bitmap for the remapping area */
+/* Allocation bitmap for the remapping area: */
static DEFINE_SPINLOCK(iommu_bitmap_lock);
-static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
+/* Guarded by iommu_bitmap_lock: */
+static unsigned long *iommu_gart_bitmap;
-static u32 gart_unmapped_entry;
+static u32 gart_unmapped_entry;
#define GPTE_VALID 1
#define GPTE_COHERENT 2
@@ -61,10 +64,10 @@ static u32 gart_unmapped_entry;
(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
-#define to_pages(addr,size) \
+#define to_pages(addr, size) \
(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
-#define EMERGENCY_PAGES 32 /* = 128KB */
+#define EMERGENCY_PAGES 32 /* = 128KB */
#ifdef CONFIG_AGP
#define AGPEXTERN extern
@@ -77,130 +80,152 @@ AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
-static int need_flush; /* global flush state. set for each gart wrap */
+static int need_flush; /* global flush state. set for each gart wrap */
-static unsigned long alloc_iommu(int size)
-{
+static unsigned long alloc_iommu(int size)
+{
unsigned long offset, flags;
- spin_lock_irqsave(&iommu_bitmap_lock, flags);
- offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
+ spin_lock_irqsave(&iommu_bitmap_lock, flags);
+ offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
+ iommu_pages, size);
if (offset == -1) {
need_flush = 1;
- offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
+ offset = find_next_zero_string(iommu_gart_bitmap, 0,
+ iommu_pages, size);
}
- if (offset != -1) {
- set_bit_string(iommu_gart_bitmap, offset, size);
- next_bit = offset+size;
- if (next_bit >= iommu_pages) {
+ if (offset != -1) {
+ set_bit_string(iommu_gart_bitmap, offset, size);
+ next_bit = offset+size;
+ if (next_bit >= iommu_pages) {
next_bit = 0;
need_flush = 1;
- }
- }
+ }
+ }
if (iommu_fullflush)
need_flush = 1;
- spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
+ spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
+
return offset;
-}
+}
static void free_iommu(unsigned long offset, int size)
-{
+{
unsigned long flags;
+
spin_lock_irqsave(&iommu_bitmap_lock, flags);
__clear_bit_string(iommu_gart_bitmap, offset, size);
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
-}
+}
-/*
+/*
* Use global flush state to avoid races with multiple flushers.
*/
static void flush_gart(void)
-{
+{
unsigned long flags;
+
spin_lock_irqsave(&iommu_bitmap_lock, flags);
if (need_flush) {
k8_flush_garts();
need_flush = 0;
- }
+ }
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
-}
+}
#ifdef CONFIG_IOMMU_LEAK
-#define SET_LEAK(x) if (iommu_leak_tab) \
- iommu_leak_tab[x] = __builtin_return_address(0);
-#define CLEAR_LEAK(x) if (iommu_leak_tab) \
- iommu_leak_tab[x] = NULL;
+#define SET_LEAK(x) \
+ do { \
+ if (iommu_leak_tab) \
+ iommu_leak_tab[x] = __builtin_return_address(0);\
+ } while (0)
+
+#define CLEAR_LEAK(x) \
+ do { \
+ if (iommu_leak_tab) \
+ iommu_leak_tab[x] = NULL; \
+ } while (0)
/* Debugging aid for drivers that don't free their IOMMU tables */
-static void **iommu_leak_tab;
+static void **iommu_leak_tab;
static int leak_trace;
static int iommu_leak_pages = 20;
+
static void dump_leak(void)
{
int i;
- static int dump;
- if (dump || !iommu_leak_tab) return;
+ static int dump;
+
+ if (dump || !iommu_leak_tab)
+ return;
dump = 1;
- show_stack(NULL,NULL);
- /* Very crude. dump some from the end of the table too */
- printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
- for (i = 0; i < iommu_leak_pages; i+=2) {
- printk("%lu: ", iommu_pages-i);
+ show_stack(NULL, NULL);
+
+ /* Very crude. dump some from the end of the table too */
+ printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
+ iommu_leak_pages);
+ for (i = 0; i < iommu_leak_pages; i += 2) {
+ printk(KERN_DEBUG "%lu: ", iommu_pages-i);
printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
- printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
- }
- printk("\n");
+ printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
+ }
+ printk(KERN_DEBUG "\n");
}
#else
-#define SET_LEAK(x)
-#define CLEAR_LEAK(x)
+# define SET_LEAK(x)
+# define CLEAR_LEAK(x)
#endif
static void iommu_full(struct device *dev, size_t size, int dir)
{
- /*
+ /*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
- * Return some non mapped prereserved space in the aperture and
+ * Return some non mapped prereserved space in the aperture and
* let the Northbridge deal with it. This will result in garbage
* in the IO operation. When the size exceeds the prereserved space
- * memory corruption will occur or random memory will be DMAed
+ * memory corruption will occur or random memory will be DMAed
* out. Hopefully no network devices use single mappings that big.
- */
-
- printk(KERN_ERR
- "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
- size, dev->bus_id);
+ */
+
+ printk(KERN_ERR
+ "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
+ size, dev->bus_id);
if (size > PAGE_SIZE*EMERGENCY_PAGES) {
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic("PCI-DMA: Memory would be corrupted\n");
- if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
- panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
- }
-
+ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
+ panic(KERN_ERR
+ "PCI-DMA: Random memory would be DMAed\n");
+ }
#ifdef CONFIG_IOMMU_LEAK
- dump_leak();
+ dump_leak();
#endif
-}
+}
-static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
-{
+static inline int
+need_iommu(struct device *dev, unsigned long addr, size_t size)
+{
u64 mask = *dev->dma_mask;
int high = addr + size > mask;
int mmu = high;
- if (force_iommu)
- mmu = 1;
- return mmu;
+
+ if (force_iommu)
+ mmu = 1;
+
+ return mmu;
}
-static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
-{
+static inline int
+nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
+{
u64 mask = *dev->dma_mask;
int high = addr + size > mask;
int mmu = high;
- return mmu;
+
+ return mmu;
}
/* Map a single continuous physical area into the IOMMU.
@@ -208,13 +233,14 @@ static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t
*/
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir)
-{
+{
unsigned long npages = to_pages(phys_mem, size);
unsigned long iommu_page = alloc_iommu(npages);
int i;
+
if (iommu_page == -1) {
if (!nonforced_iommu(dev, phys_mem, size))
- return phys_mem;
+ return phys_mem;
if (panic_on_overflow)
panic("dma_map_area overflow %lu bytes\n", size);
iommu_full(dev, size, dir);
@@ -229,35 +255,39 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}
-static dma_addr_t gart_map_simple(struct device *dev, char *buf,
- size_t size, int dir)
+static dma_addr_t
+gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
{
dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
+
flush_gart();
+
return map;
}
/* Map a single area into the IOMMU */
-static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
+static dma_addr_t
+gart_map_single(struct device *dev, void *addr, size_t size, int dir)
{
unsigned long phys_mem, bus;
if (!dev)
dev = &fallback_dev;
- phys_mem = virt_to_phys(addr);
+ phys_mem = virt_to_phys(addr);
if (!need_iommu(dev, phys_mem, size))
- return phys_mem;
+ return phys_mem;
bus = gart_map_simple(dev, addr, size, dir);
- return bus;
+
+ return bus;
}
/*
* Free a DMA mapping.
*/
static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction)
+ size_t size, int direction)
{
unsigned long iommu_page;
int npages;
@@ -266,6 +296,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
dma_addr >= iommu_bus_base + iommu_size)
return;
+
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = to_pages(dma_addr, size);
for (i = 0; i < npages; i++) {
@@ -278,7 +309,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
/*
* Wrapper for pci_unmap_single working with scatterlists.
*/
-static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+static void
+gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
{
struct scatterlist *s;
int i;
@@ -303,12 +335,13 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
for_each_sg(sg, s, nents, i) {
unsigned long addr = sg_phys(s);
- if (nonforced_iommu(dev, addr, s->length)) {
+
+ if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir);
- if (addr == bad_dma_address) {
- if (i > 0)
+ if (addr == bad_dma_address) {
+ if (i > 0)
gart_unmap_sg(dev, sg, i, dir);
- nents = 0;
+ nents = 0;
sg[0].dma_length = 0;
break;
}
@@ -317,15 +350,16 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
s->dma_length = s->length;
}
flush_gart();
+
return nents;
}
/* Map multiple scatterlist entries continuous into the first. */
static int __dma_map_cont(struct scatterlist *start, int nelems,
- struct scatterlist *sout, unsigned long pages)
+ struct scatterlist *sout, unsigned long pages)
{
unsigned long iommu_start = alloc_iommu(pages);
- unsigned long iommu_page = iommu_start;
+ unsigned long iommu_page = iommu_start;
struct scatterlist *s;
int i;
@@ -335,32 +369,33 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
for_each_sg(start, s, nelems, i) {
unsigned long pages, addr;
unsigned long phys_addr = s->dma_address;
-
+
BUG_ON(s != start && s->offset);
if (s == start) {
sout->dma_address = iommu_bus_base;
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
sout->dma_length = s->length;
- } else {
- sout->dma_length += s->length;
+ } else {
+ sout->dma_length += s->length;
}
addr = phys_addr;
- pages = to_pages(s->offset, s->length);
- while (pages--) {
- iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
+ pages = to_pages(s->offset, s->length);
+ while (pages--) {
+ iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
SET_LEAK(iommu_page);
addr += PAGE_SIZE;
iommu_page++;
}
- }
- BUG_ON(iommu_page - iommu_start != pages);
+ }
+ BUG_ON(iommu_page - iommu_start != pages);
+
return 0;
}
-static inline int dma_map_cont(struct scatterlist *start, int nelems,
- struct scatterlist *sout,
- unsigned long pages, int need)
+static inline int
+dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
+ unsigned long pages, int need)
{
if (!need) {
BUG_ON(nelems != 1);
@@ -370,22 +405,19 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems,
}
return __dma_map_cont(start, nelems, sout, pages);
}
-
+
/*
* DMA map all entries in a scatterlist.
- * Merge chunks that have page aligned sizes into a continuous mapping.
+ * Merge chunks that have page aligned sizes into a continuous mapping.
*/
-static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- int dir)
+static int
+gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
{
- int i;
- int out;
- int start;
- unsigned long pages = 0;
- int need = 0, nextneed;
struct scatterlist *s, *ps, *start_sg, *sgmap;
+ int need = 0, nextneed, i, out, start;
+ unsigned long pages = 0;
- if (nents == 0)
+ if (nents == 0)
return 0;
if (!dev)
@@ -397,15 +429,19 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
ps = NULL; /* shut up gcc */
for_each_sg(sg, s, nents, i) {
dma_addr_t addr = sg_phys(s);
+
s->dma_address = addr;
- BUG_ON(s->length == 0);
+ BUG_ON(s->length == 0);
- nextneed = need_iommu(dev, addr, s->length);
+ nextneed = need_iommu(dev, addr, s->length);
/* Handle the previous not yet processed entries */
if (i > start) {
- /* Can only merge when the last chunk ends on a page
- boundary and the new one doesn't have an offset. */
+ /*
+ * Can only merge when the last chunk ends on a
+ * page boundary and the new one doesn't have an
+ * offset.
+ */
if (!iommu_merge || !nextneed || !need || s->offset ||
(ps->offset + ps->length) % PAGE_SIZE) {
if (dma_map_cont(start_sg, i - start, sgmap,
@@ -436,6 +472,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
error:
flush_gart();
gart_unmap_sg(dev, sg, out, dir);
+
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
out = dma_map_sg_nonforce(dev, sg, nents, dir);
@@ -444,64 +481,68 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
}
if (panic_on_overflow)
panic("dma_map_sg: overflow on %lu pages\n", pages);
+
iommu_full(dev, pages << PAGE_SHIFT, dir);
for_each_sg(sg, s, nents, i)
s->dma_address = bad_dma_address;
return 0;
-}
+}
static int no_agp;
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
-{
- unsigned long a;
- if (!iommu_size) {
- iommu_size = aper_size;
- if (!no_agp)
- iommu_size /= 2;
- }
-
- a = aper + iommu_size;
+{
+ unsigned long a;
+
+ if (!iommu_size) {
+ iommu_size = aper_size;
+ if (!no_agp)
+ iommu_size /= 2;
+ }
+
+ a = aper + iommu_size;
iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
- if (iommu_size < 64*1024*1024)
+ if (iommu_size < 64*1024*1024) {
printk(KERN_WARNING
- "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
-
+ "PCI-DMA: Warning: Small IOMMU %luMB."
+ " Consider increasing the AGP aperture in BIOS\n",
+ iommu_size >> 20);
+ }
+
return iommu_size;
-}
+}
-static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
-{
- unsigned aper_size = 0, aper_base_32;
+static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
+{
+ unsigned aper_size = 0, aper_base_32, aper_order;
u64 aper_base;
- unsigned aper_order;
- pci_read_config_dword(dev, 0x94, &aper_base_32);
+ pci_read_config_dword(dev, 0x94, &aper_base_32);
pci_read_config_dword(dev, 0x90, &aper_order);
- aper_order = (aper_order >> 1) & 7;
+ aper_order = (aper_order >> 1) & 7;
- aper_base = aper_base_32 & 0x7fff;
+ aper_base = aper_base_32 & 0x7fff;
aper_base <<= 25;
- aper_size = (32 * 1024 * 1024) << aper_order;
- if (aper_base + aper_size > 0x100000000UL || !aper_size)
+ aper_size = (32 * 1024 * 1024) << aper_order;
+ if (aper_base + aper_size > 0x100000000UL || !aper_size)
aper_base = 0;
*size = aper_size;
return aper_base;
-}
+}
-/*
+/*
* Private Northbridge GATT initialization in case we cannot use the
- * AGP driver for some reason.
+ * AGP driver for some reason.
*/
static __init int init_k8_gatt(struct agp_kern_info *info)
-{
+{
+ unsigned aper_size, gatt_size, new_aper_size;
+ unsigned aper_base, new_aper_base;
struct pci_dev *dev;
void *gatt;
- unsigned aper_base, new_aper_base;
- unsigned aper_size, gatt_size, new_aper_size;
int i;
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
@@ -509,75 +550,77 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
dev = NULL;
for (i = 0; i < num_k8_northbridges; i++) {
dev = k8_northbridges[i];
- new_aper_base = read_aperture(dev, &new_aper_size);
- if (!new_aper_base)
- goto nommu;
-
- if (!aper_base) {
+ new_aper_base = read_aperture(dev, &new_aper_size);
+ if (!new_aper_base)
+ goto nommu;
+
+ if (!aper_base) {
aper_size = new_aper_size;
aper_base = new_aper_base;
- }
- if (aper_size != new_aper_size || aper_base != new_aper_base)
+ }
+ if (aper_size != new_aper_size || aper_base != new_aper_base)
goto nommu;
}
if (!aper_base)
- goto nommu;
+ goto nommu;
info->aper_base = aper_base;
- info->aper_size = aper_size>>20;
+ info->aper_size = aper_size >> 20;
- gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
- gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
- if (!gatt)
+ gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
+ gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
+ if (!gatt)
panic("Cannot allocate GATT table");
- if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
+ if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT,
+ PAGE_KERNEL_NOCACHE))
panic("Could not set GART PTEs to uncacheable pages");
global_flush_tlb();
- memset(gatt, 0, gatt_size);
+ memset(gatt, 0, gatt_size);
agp_gatt_table = gatt;
for (i = 0; i < num_k8_northbridges; i++) {
- u32 ctl;
- u32 gatt_reg;
+ u32 gatt_reg;
+ u32 ctl;
dev = k8_northbridges[i];
- gatt_reg = __pa(gatt) >> 12;
- gatt_reg <<= 4;
+ gatt_reg = __pa(gatt) >> 12;
+ gatt_reg <<= 4;
pci_write_config_dword(dev, 0x98, gatt_reg);
- pci_read_config_dword(dev, 0x90, &ctl);
+ pci_read_config_dword(dev, 0x90, &ctl);
ctl |= 1;
ctl &= ~((1<<4) | (1<<5));
- pci_write_config_dword(dev, 0x90, ctl);
+ pci_write_config_dword(dev, 0x90, ctl);
}
flush_gart();
-
- printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
+
+ printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
+ aper_base, aper_size>>10);
return 0;
nommu:
- /* Should not happen anymore */
+ /* Should not happen anymore */
printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
- return -1;
-}
+ return -1;
+}
extern int agp_amd64_init(void);
static const struct dma_mapping_ops gart_dma_ops = {
- .mapping_error = NULL,
- .map_single = gart_map_single,
- .map_simple = gart_map_simple,
- .unmap_single = gart_unmap_single,
- .sync_single_for_cpu = NULL,
- .sync_single_for_device = NULL,
- .sync_single_range_for_cpu = NULL,
- .sync_single_range_for_device = NULL,
- .sync_sg_for_cpu = NULL,
- .sync_sg_for_device = NULL,
- .map_sg = gart_map_sg,
- .unmap_sg = gart_unmap_sg,
+ .mapping_error = NULL,
+ .map_single = gart_map_single,
+ .map_simple = gart_map_simple,
+ .unmap_single = gart_unmap_single,
+ .sync_single_for_cpu = NULL,
+ .sync_single_for_device = NULL,
+ .sync_single_range_for_cpu = NULL,
+ .sync_single_range_for_device = NULL,
+ .sync_sg_for_cpu = NULL,
+ .sync_sg_for_device = NULL,
+ .map_sg = gart_map_sg,
+ .unmap_sg = gart_unmap_sg,
};
void gart_iommu_shutdown(void)
@@ -588,23 +631,23 @@ void gart_iommu_shutdown(void)
if (no_agp && (dma_ops != &gart_dma_ops))
return;
- for (i = 0; i < num_k8_northbridges; i++) {
- u32 ctl;
+ for (i = 0; i < num_k8_northbridges; i++) {
+ u32 ctl;
- dev = k8_northbridges[i];
- pci_read_config_dword(dev, 0x90, &ctl);
+ dev = k8_northbridges[i];
+ pci_read_config_dword(dev, 0x90, &ctl);
- ctl &= ~1;
+ ctl &= ~1;
- pci_write_config_dword(dev, 0x90, ctl);
- }
+ pci_write_config_dword(dev, 0x90, ctl);
+ }
}
void __init gart_iommu_init(void)
-{
+{
struct agp_kern_info info;
- unsigned long aper_size;
unsigned long iommu_start;
+ unsigned long aper_size;
unsigned long scratch;
long i;
@@ -614,14 +657,14 @@ void __init gart_iommu_init(void)
}
#ifndef CONFIG_AGP_AMD64
- no_agp = 1;
+ no_agp = 1;
#else
/* Makefile puts PCI initialization via subsys_initcall first. */
/* Add other K8 AGP bridge drivers here */
- no_agp = no_agp ||
- (agp_amd64_init() < 0) ||
+ no_agp = no_agp ||
+ (agp_amd64_init() < 0) ||
(agp_copy_info(agp_bridge, &info) < 0);
-#endif
+#endif
if (swiotlb)
return;
@@ -643,77 +686,78 @@ void __init gart_iommu_init(void)
}
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
- aper_size = info.aper_size * 1024 * 1024;
- iommu_size = check_iommu_size(info.aper_base, aper_size);
- iommu_pages = iommu_size >> PAGE_SHIFT;
-
- iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
- get_order(iommu_pages/8));
- if (!iommu_gart_bitmap)
- panic("Cannot allocate iommu bitmap\n");
+ aper_size = info.aper_size * 1024 * 1024;
+ iommu_size = check_iommu_size(info.aper_base, aper_size);
+ iommu_pages = iommu_size >> PAGE_SHIFT;
+
+ iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
+ get_order(iommu_pages/8));
+ if (!iommu_gart_bitmap)
+ panic("Cannot allocate iommu bitmap\n");
memset(iommu_gart_bitmap, 0, iommu_pages/8);
#ifdef CONFIG_IOMMU_LEAK
- if (leak_trace) {
- iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
+ if (leak_trace) {
+ iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
get_order(iommu_pages*sizeof(void *)));
- if (iommu_leak_tab)
- memset(iommu_leak_tab, 0, iommu_pages * 8);
+ if (iommu_leak_tab)
+ memset(iommu_leak_tab, 0, iommu_pages * 8);
else
- printk("PCI-DMA: Cannot allocate leak trace area\n");
- }
+ printk(KERN_DEBUG
+ "PCI-DMA: Cannot allocate leak trace area\n");
+ }
#endif
- /*
+ /*
* Out of IOMMU space handling.
- * Reserve some invalid pages at the beginning of the GART.
- */
- set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
+ * Reserve some invalid pages at the beginning of the GART.
+ */
+ set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
- agp_memory_reserved = iommu_size;
+ agp_memory_reserved = iommu_size;
printk(KERN_INFO
"PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
- iommu_size>>20);
+ iommu_size >> 20);
- iommu_start = aper_size - iommu_size;
- iommu_bus_base = info.aper_base + iommu_start;
+ iommu_start = aper_size - iommu_size;
+ iommu_bus_base = info.aper_base + iommu_start;
bad_dma_address = iommu_bus_base;
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
- /*
+ /*
* Unmap the IOMMU part of the GART. The alias of the page is
* always mapped with cache enabled and there is no full cache
* coherency across the GART remapping. The unmapping avoids
* automatic prefetches from the CPU allocating cache lines in
* there. All CPU accesses are done via the direct mapping to
* the backing memory. The GART address is only used by PCI
- * devices.
+ * devices.
*/
clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
- /*
- * Try to workaround a bug (thanks to BenH)
- * Set unmapped entries to a scratch page instead of 0.
+ /*
+ * Try to workaround a bug (thanks to BenH)
+ * Set unmapped entries to a scratch page instead of 0.
* Any prefetches that hit unmapped entries won't get an bus abort
* then.
*/
- scratch = get_zeroed_page(GFP_KERNEL);
- if (!scratch)
+ scratch = get_zeroed_page(GFP_KERNEL);
+ if (!scratch)
panic("Cannot allocate iommu scratch page");
gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
- for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
+ for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
iommu_gatt_base[i] = gart_unmapped_entry;
flush_gart();
dma_ops = &gart_dma_ops;
-}
+}
void __init gart_parse_options(char *p)
{
int arg;
#ifdef CONFIG_IOMMU_LEAK
- if (!strncmp(p,"leak",4)) {
+ if (!strncmp(p, "leak", 4)) {
leak_trace = 1;
p += 4;
if (*p == '=') ++p;
@@ -723,18 +767,18 @@ void __init gart_parse_options(char *p)
#endif
if (isdigit(*p) && get_option(&p, &arg))
iommu_size = arg;
- if (!strncmp(p, "fullflush",8))
+ if (!strncmp(p, "fullflush", 8))
iommu_fullflush = 1;
- if (!strncmp(p, "nofullflush",11))
+ if (!strncmp(p, "nofullflush", 11))
iommu_fullflush = 0;
- if (!strncmp(p,"noagp",5))
+ if (!strncmp(p, "noagp", 5))
no_agp = 1;
- if (!strncmp(p, "noaperture",10))
+ if (!strncmp(p, "noaperture", 10))
fix_aperture = 0;
/* duplicated from pci-dma.c */
- if (!strncmp(p,"force",5))
+ if (!strncmp(p, "force", 5))
gart_iommu_aperture_allowed = 1;
- if (!strncmp(p,"allowed",7))
+ if (!strncmp(p, "allowed", 7))
gart_iommu_aperture_allowed = 1;
if (!strncmp(p, "memaper", 7)) {
fallback_aper_force = 1;
commit e8d591dc710158bae6b53c8b7a0172351025c6e2
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 30 13:30:12 2008 +0100
x86: lindent arch/i386/math-emu, cleanup
manually clean up some of the damage that lindent caused.
(this is a separate commit so that in the unlikely case of
a typo we can bisect it down to the manual edits.)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
index 7cb5bf3495b2..145b68a99516 100644
--- a/arch/x86/math-emu/errors.c
+++ b/arch/x86/math-emu/errors.c
@@ -374,7 +374,7 @@ asmlinkage void FPU_exception(int n)
/* Real operation attempted on a NaN. */
/* Returns < 0 if the exception is unmasked */
-int real_1op_NaN(FPU_REG * a)
+int real_1op_NaN(FPU_REG *a)
{
int signalling, isNaN;
@@ -573,7 +573,7 @@ asmlinkage int denormal_operand(void)
}
}
-asmlinkage int arith_overflow(FPU_REG * dest)
+asmlinkage int arith_overflow(FPU_REG *dest)
{
int tag = TAG_Valid;
@@ -601,7 +601,7 @@ asmlinkage int arith_overflow(FPU_REG * dest)
}
-asmlinkage int arith_underflow(FPU_REG * dest)
+asmlinkage int arith_underflow(FPU_REG *dest)
{
int tag = TAG_Valid;
diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
index 656dd4c04b1b..4dae511c85ad 100644
--- a/arch/x86/math-emu/fpu_emu.h
+++ b/arch/x86/math-emu/fpu_emu.h
@@ -122,7 +122,7 @@ struct fpu__reg {
typedef void (*FUNC) (void);
typedef struct fpu__reg FPU_REG;
-typedef void (*FUNC_ST0) (FPU_REG * st0_ptr, u_char st0_tag);
+typedef void (*FUNC_ST0) (FPU_REG *st0_ptr, u_char st0_tag);
typedef struct {
u_char address_size, operand_size, segment;
} overrides;
@@ -166,7 +166,7 @@ extern u_char const data_sizes_16[32];
#define signpositive(a) ( (signbyte(a) & 0x80) == 0 )
#define signnegative(a) (signbyte(a) & 0x80)
-static inline void reg_copy(FPU_REG const *x, FPU_REG * y)
+static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
{
*(short *)&(y->exp) = *(const short *)&(x->exp);
*(long long *)&(y->sigl) = *(const long long *)&(x->sigl);
@@ -187,8 +187,8 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG * y)
/*----- Prototypes for functions written in assembler -----*/
/* extern void reg_move(FPU_REG *a, FPU_REG *b); */
-asmlinkage int FPU_normalize(FPU_REG * x);
-asmlinkage int FPU_normalize_nuo(FPU_REG * x);
+asmlinkage int FPU_normalize(FPU_REG *x);
+asmlinkage int FPU_normalize_nuo(FPU_REG *x);
asmlinkage int FPU_u_sub(FPU_REG const *arg1, FPU_REG const *arg2,
FPU_REG * answ, unsigned int control_w, u_char sign,
int expa, int expb);
@@ -200,12 +200,12 @@ asmlinkage int FPU_u_div(FPU_REG const *arg1, FPU_REG const *arg2,
asmlinkage int FPU_u_add(FPU_REG const *arg1, FPU_REG const *arg2,
FPU_REG * answ, unsigned int control_w, u_char sign,
int expa, int expb);
-asmlinkage int wm_sqrt(FPU_REG * n, int dummy1, int dummy2,
+asmlinkage int wm_sqrt(FPU_REG *n, int dummy1, int dummy2,
unsigned int control_w, u_char sign);
asmlinkage unsigned FPU_shrx(void *l, unsigned x);
asmlinkage unsigned FPU_shrxs(void *v, unsigned x);
asmlinkage unsigned long FPU_div_small(unsigned long long *x, unsigned long y);
-asmlinkage int FPU_round(FPU_REG * arg, unsigned int extent, int dummy,
+asmlinkage int FPU_round(FPU_REG *arg, unsigned int extent, int dummy,
unsigned int control_w, u_char sign);
#ifndef MAKING_PROTO
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index cbb8717f09fd..377c60dfa2f0 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -126,7 +126,7 @@ static u_char const type_table[64] = {
u_char emulating = 0;
#endif /* RE_ENTRANT_CHECKING */
-static int valid_prefix(u_char * Byte, u_char __user ** fpu_eip,
+static int valid_prefix(u_char *Byte, u_char __user ** fpu_eip,
overrides * override);
asmlinkage void math_emulate(long arg)
@@ -580,7 +580,7 @@ asmlinkage void math_emulate(long arg)
all prefix bytes, further changes are needed in the emulator code
which accesses user address space. Access to separate segments is
important for msdos emulation. */
-static int valid_prefix(u_char * Byte, u_char __user ** fpu_eip,
+static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
overrides * override)
{
u_char byte;
@@ -673,7 +673,7 @@ void math_abort(struct info *info, unsigned int signal)
#define sstatus_word() \
((S387->swd & ~SW_Top & 0xffff) | ((S387->ftop << SW_Top_Shift) & SW_Top))
-int restore_i387_soft(void *s387, struct _fpstate __user * buf)
+int restore_i387_soft(void *s387, struct _fpstate __user *buf)
{
u_char __user *d = (u_char __user *) buf;
int offset, other, i, tags, regnr, tag, newtop;
diff --git a/arch/x86/math-emu/fpu_etc.c b/arch/x86/math-emu/fpu_etc.c
index e73631e0cde9..233e5af566f5 100644
--- a/arch/x86/math-emu/fpu_etc.c
+++ b/arch/x86/math-emu/fpu_etc.c
@@ -16,7 +16,7 @@
#include "status_w.h"
#include "reg_constant.h"
-static void fchs(FPU_REG * st0_ptr, u_char st0tag)
+static void fchs(FPU_REG *st0_ptr, u_char st0tag)
{
if (st0tag ^ TAG_Empty) {
signbyte(st0_ptr) ^= SIGN_NEG;
@@ -25,7 +25,7 @@ static void fchs(FPU_REG * st0_ptr, u_char st0tag)
FPU_stack_underflow();
}
-static void fabs(FPU_REG * st0_ptr, u_char st0tag)
+static void fabs(FPU_REG *st0_ptr, u_char st0tag)
{
if (st0tag ^ TAG_Empty) {
setpositive(st0_ptr);
@@ -34,7 +34,7 @@ static void fabs(FPU_REG * st0_ptr, u_char st0tag)
FPU_stack_underflow();
}
-static void ftst_(FPU_REG * st0_ptr, u_char st0tag)
+static void ftst_(FPU_REG *st0_ptr, u_char st0tag)
{
switch (st0tag) {
case TAG_Zero:
@@ -85,7 +85,7 @@ static void ftst_(FPU_REG * st0_ptr, u_char st0tag)
}
}
-static void fxam(FPU_REG * st0_ptr, u_char st0tag)
+static void fxam(FPU_REG *st0_ptr, u_char st0tag)
{
int c = 0;
switch (st0tag) {
diff --git a/arch/x86/math-emu/fpu_proto.h b/arch/x86/math-emu/fpu_proto.h
index 0f6384102afd..aa49b6a0d850 100644
--- a/arch/x86/math-emu/fpu_proto.h
+++ b/arch/x86/math-emu/fpu_proto.h
@@ -5,7 +5,7 @@
extern void FPU_illegal(void);
extern void FPU_printall(void);
asmlinkage void FPU_exception(int n);
-extern int real_1op_NaN(FPU_REG * a);
+extern int real_1op_NaN(FPU_REG *a);
extern int real_2op_NaN(FPU_REG const *b, u_char tagb, int deststnr,
FPU_REG const *defaultNaN);
asmlinkage int arith_invalid(int deststnr);
@@ -14,8 +14,8 @@ extern int set_precision_flag(int flags);
asmlinkage void set_precision_flag_up(void);
asmlinkage void set_precision_flag_down(void);
asmlinkage int denormal_operand(void);
-asmlinkage int arith_overflow(FPU_REG * dest);
-asmlinkage int arith_underflow(FPU_REG * dest);
+asmlinkage int arith_overflow(FPU_REG *dest);
+asmlinkage int arith_underflow(FPU_REG *dest);
extern void FPU_stack_overflow(void);
extern void FPU_stack_underflow(void);
extern void FPU_stack_underflow_i(int i);
@@ -84,19 +84,19 @@ extern void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip,
extern int FPU_load_store(u_char type, fpu_addr_modes addr_modes,
void __user * data_address);
/* poly_2xm1.c */
-extern int poly_2xm1(u_char sign, FPU_REG * arg, FPU_REG * result);
+extern int poly_2xm1(u_char sign, FPU_REG * arg, FPU_REG *result);
/* poly_atan.c */
-extern void poly_atan(FPU_REG * st0_ptr, u_char st0_tag, FPU_REG * st1_ptr,
+extern void poly_atan(FPU_REG * st0_ptr, u_char st0_tag, FPU_REG *st1_ptr,
u_char st1_tag);
/* poly_l2.c */
-extern void poly_l2(FPU_REG * st0_ptr, FPU_REG * st1_ptr, u_char st1_sign);
-extern int poly_l2p1(u_char s0, u_char s1, FPU_REG * r0, FPU_REG * r1,
+extern void poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign);
+extern int poly_l2p1(u_char s0, u_char s1, FPU_REG *r0, FPU_REG *r1,
FPU_REG * d);
/* poly_sin.c */
-extern void poly_sine(FPU_REG * st0_ptr);
-extern void poly_cos(FPU_REG * st0_ptr);
+extern void poly_sine(FPU_REG *st0_ptr);
+extern void poly_cos(FPU_REG *st0_ptr);
/* poly_tan.c */
-extern void poly_tan(FPU_REG * st0_ptr);
+extern void poly_tan(FPU_REG *st0_ptr);
/* reg_add_sub.c */
extern int FPU_add(FPU_REG const *b, u_char tagb, int destrnr, int control_w);
extern int FPU_sub(int flags, int rm, int control_w);
@@ -111,34 +111,34 @@ extern void fucompp(void);
/* reg_constant.c */
extern void fconst(void);
/* reg_ld_str.c */
-extern int FPU_load_extended(long double __user * s, int stnr);
-extern int FPU_load_double(double __user * dfloat, FPU_REG * loaded_data);
-extern int FPU_load_single(float __user * single, FPU_REG * loaded_data);
-extern int FPU_load_int64(long long __user * _s);
-extern int FPU_load_int32(long __user * _s, FPU_REG * loaded_data);
-extern int FPU_load_int16(short __user * _s, FPU_REG * loaded_data);
-extern int FPU_load_bcd(u_char __user * s);
-extern int FPU_store_extended(FPU_REG * st0_ptr, u_char st0_tag,
+extern int FPU_load_extended(long double __user *s, int stnr);
+extern int FPU_load_double(double __user *dfloat, FPU_REG *loaded_data);
+extern int FPU_load_single(float __user *single, FPU_REG *loaded_data);
+extern int FPU_load_int64(long long __user *_s);
+extern int FPU_load_int32(long __user *_s, FPU_REG *loaded_data);
+extern int FPU_load_int16(short __user *_s, FPU_REG *loaded_data);
+extern int FPU_load_bcd(u_char __user *s);
+extern int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag,
long double __user * d);
-extern int FPU_store_double(FPU_REG * st0_ptr, u_char st0_tag,
+extern int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag,
double __user * dfloat);
-extern int FPU_store_single(FPU_REG * st0_ptr, u_char st0_tag,
+extern int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag,
float __user * single);
-extern int FPU_store_int64(FPU_REG * st0_ptr, u_char st0_tag,
+extern int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag,
long long __user * d);
-extern int FPU_store_int32(FPU_REG * st0_ptr, u_char st0_tag, long __user * d);
-extern int FPU_store_int16(FPU_REG * st0_ptr, u_char st0_tag, short __user * d);
-extern int FPU_store_bcd(FPU_REG * st0_ptr, u_char st0_tag, u_char __user * d);
-extern int FPU_round_to_int(FPU_REG * r, u_char tag);
-extern u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user * s);
-extern void frstor(fpu_addr_modes addr_modes, u_char __user * data_address);
-extern u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user * d);
-extern void fsave(fpu_addr_modes addr_modes, u_char __user * data_address);
-extern int FPU_tagof(FPU_REG * ptr);
+extern int FPU_store_int32(FPU_REG *st0_ptr, u_char st0_tag, long __user *d);
+extern int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d);
+extern int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d);
+extern int FPU_round_to_int(FPU_REG *r, u_char tag);
+extern u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s);
+extern void frstor(fpu_addr_modes addr_modes, u_char __user *data_address);
+extern u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d);
+extern void fsave(fpu_addr_modes addr_modes, u_char __user *data_address);
+extern int FPU_tagof(FPU_REG *ptr);
/* reg_mul.c */
extern int FPU_mul(FPU_REG const *b, u_char tagb, int deststnr, int control_w);
extern int FPU_div(int flags, int regrm, int control_w);
/* reg_convert.c */
-extern int FPU_to_exp16(FPU_REG const *a, FPU_REG * x);
+extern int FPU_to_exp16(FPU_REG const *a, FPU_REG *x);
#endif /* _FPU_PROTO_H */
diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
index e5316a288a6e..ecd06680581c 100644
--- a/arch/x86/math-emu/fpu_trig.c
+++ b/arch/x86/math-emu/fpu_trig.c
@@ -30,7 +30,7 @@ static void rem_kernel(unsigned long long st0, unsigned long long *y,
/* Limited measurements show no results worse than 64 bit precision
except for the results for arguments close to 2^63, where the
precision of the result sometimes degrades to about 63.9 bits */
-static int trig_arg(FPU_REG * st0_ptr, int even)
+static int trig_arg(FPU_REG *st0_ptr, int even)
{
FPU_REG tmp;
u_char tmptag;
@@ -176,7 +176,7 @@ static void convert_l2reg(long const *arg, int deststnr)
return;
}
-static void single_arg_error(FPU_REG * st0_ptr, u_char st0_tag)
+static void single_arg_error(FPU_REG *st0_ptr, u_char st0_tag)
{
if (st0_tag == TAG_Empty)
FPU_stack_underflow(); /* Puts a QNaN in st(0) */
@@ -188,7 +188,7 @@ static void single_arg_error(FPU_REG * st0_ptr, u_char st0_tag)
#endif /* PARANOID */
}
-static void single_arg_2_error(FPU_REG * st0_ptr, u_char st0_tag)
+static void single_arg_2_error(FPU_REG *st0_ptr, u_char st0_tag)
{
int isNaN;
@@ -229,7 +229,7 @@ static void single_arg_2_error(FPU_REG * st0_ptr, u_char st0_tag)
/*---------------------------------------------------------------------------*/
-static void f2xm1(FPU_REG * st0_ptr, u_char tag)
+static void f2xm1(FPU_REG *st0_ptr, u_char tag)
{
FPU_REG a;
@@ -272,7 +272,7 @@ static void f2xm1(FPU_REG * st0_ptr, u_char tag)
}
}
-static void fptan(FPU_REG * st0_ptr, u_char st0_tag)
+static void fptan(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st_new_ptr;
int q;
@@ -351,7 +351,7 @@ static void fptan(FPU_REG * st0_ptr, u_char st0_tag)
single_arg_2_error(st0_ptr, st0_tag);
}
-static void fxtract(FPU_REG * st0_ptr, u_char st0_tag)
+static void fxtract(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st_new_ptr;
u_char sign;
@@ -444,7 +444,7 @@ static void fincstp(void)
top++;
}
-static void fsqrt_(FPU_REG * st0_ptr, u_char st0_tag)
+static void fsqrt_(FPU_REG *st0_ptr, u_char st0_tag)
{
int expon;
@@ -502,7 +502,7 @@ static void fsqrt_(FPU_REG * st0_ptr, u_char st0_tag)
}
-static void frndint_(FPU_REG * st0_ptr, u_char st0_tag)
+static void frndint_(FPU_REG *st0_ptr, u_char st0_tag)
{
int flags, tag;
@@ -546,7 +546,7 @@ static void frndint_(FPU_REG * st0_ptr, u_char st0_tag)
single_arg_error(st0_ptr, st0_tag);
}
-static int fsin(FPU_REG * st0_ptr, u_char tag)
+static int fsin(FPU_REG *st0_ptr, u_char tag)
{
u_char arg_sign = getsign(st0_ptr);
@@ -607,7 +607,7 @@ static int fsin(FPU_REG * st0_ptr, u_char tag)
}
}
-static int f_cos(FPU_REG * st0_ptr, u_char tag)
+static int f_cos(FPU_REG *st0_ptr, u_char tag)
{
u_char st0_sign;
@@ -677,12 +677,12 @@ static int f_cos(FPU_REG * st0_ptr, u_char tag)
}
}
-static void fcos(FPU_REG * st0_ptr, u_char st0_tag)
+static void fcos(FPU_REG *st0_ptr, u_char st0_tag)
{
f_cos(st0_ptr, st0_tag);
}
-static void fsincos(FPU_REG * st0_ptr, u_char st0_tag)
+static void fsincos(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st_new_ptr;
FPU_REG arg;
@@ -775,7 +775,7 @@ static void rem_kernel(unsigned long long st0, unsigned long long *y,
/* Remainder of st(0) / st(1) */
/* This routine produces exact results, i.e. there is never any
rounding or truncation, etc of the result. */
-static void do_fprem(FPU_REG * st0_ptr, u_char st0_tag, int round)
+static void do_fprem(FPU_REG *st0_ptr, u_char st0_tag, int round)
{
FPU_REG *st1_ptr = &st(1);
u_char st1_tag = FPU_gettagi(1);
@@ -1017,7 +1017,7 @@ static void do_fprem(FPU_REG * st0_ptr, u_char st0_tag, int round)
}
/* ST(1) <- ST(1) * log ST; pop ST */
-static void fyl2x(FPU_REG * st0_ptr, u_char st0_tag)
+static void fyl2x(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st1_ptr = &st(1), exponent;
u_char st1_tag = FPU_gettagi(1);
@@ -1188,7 +1188,7 @@ static void fyl2x(FPU_REG * st0_ptr, u_char st0_tag)
FPU_pop();
}
-static void fpatan(FPU_REG * st0_ptr, u_char st0_tag)
+static void fpatan(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st1_ptr = &st(1);
u_char st1_tag = FPU_gettagi(1);
@@ -1298,17 +1298,17 @@ static void fpatan(FPU_REG * st0_ptr, u_char st0_tag)
set_precision_flag_up(); /* We do not really know if up or down */
}
-static void fprem(FPU_REG * st0_ptr, u_char st0_tag)
+static void fprem(FPU_REG *st0_ptr, u_char st0_tag)
{
do_fprem(st0_ptr, st0_tag, RC_CHOP);
}
-static void fprem1(FPU_REG * st0_ptr, u_char st0_tag)
+static void fprem1(FPU_REG *st0_ptr, u_char st0_tag)
{
do_fprem(st0_ptr, st0_tag, RC_RND);
}
-static void fyl2xp1(FPU_REG * st0_ptr, u_char st0_tag)
+static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
{
u_char sign, sign1;
FPU_REG *st1_ptr = &st(1), a, b;
@@ -1477,7 +1477,7 @@ static void fyl2xp1(FPU_REG * st0_ptr, u_char st0_tag)
}
-static void fscale(FPU_REG * st0_ptr, u_char st0_tag)
+static void fscale(FPU_REG *st0_ptr, u_char st0_tag)
{
FPU_REG *st1_ptr = &st(1);
u_char st1_tag = FPU_gettagi(1);
diff --git a/arch/x86/math-emu/poly.h b/arch/x86/math-emu/poly.h
index f317de7d8864..168eb44c93c8 100644
--- a/arch/x86/math-emu/poly.h
+++ b/arch/x86/math-emu/poly.h
@@ -33,12 +33,12 @@ asmlinkage void polynomial_Xsig(Xsig *, const unsigned long long *x,
asmlinkage void mul32_Xsig(Xsig *, const unsigned long mult);
asmlinkage void mul64_Xsig(Xsig *, const unsigned long long *mult);
-asmlinkage void mul_Xsig_Xsig(Xsig * dest, const Xsig * mult);
+asmlinkage void mul_Xsig_Xsig(Xsig *dest, const Xsig *mult);
asmlinkage void shr_Xsig(Xsig *, const int n);
asmlinkage int round_Xsig(Xsig *);
asmlinkage int norm_Xsig(Xsig *);
-asmlinkage void div_Xsig(Xsig * x1, const Xsig * x2, const Xsig * dest);
+asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest);
/* Macro to extract the most significant 32 bits from a long long */
#define LL_MSW(x) (((unsigned long *)&x)[1])
@@ -70,7 +70,7 @@ static inline unsigned long mul_32_32(const unsigned long arg1,
}
/* Add the 12 byte Xsig x2 to Xsig dest, with no checks for overflow. */
-static inline void add_Xsig_Xsig(Xsig * dest, const Xsig * x2)
+static inline void add_Xsig_Xsig(Xsig *dest, const Xsig *x2)
{
asm volatile ("movl %1,%%edi; movl %2,%%esi;\n"
"movl (%%esi),%%eax; addl %%eax,(%%edi);\n"
@@ -84,7 +84,7 @@ static inline void add_Xsig_Xsig(Xsig * dest, const Xsig * x2)
/* Note: the constraints in the asm statement didn't always work properly
with gcc 2.5.8. Changing from using edi to using ecx got around the
problem, but keep fingers crossed! */
-static inline void add_two_Xsig(Xsig * dest, const Xsig * x2, long int *exp)
+static inline void add_two_Xsig(Xsig *dest, const Xsig *x2, long int *exp)
{
asm volatile ("movl %2,%%ecx; movl %3,%%esi;\n"
"movl (%%esi),%%eax; addl %%eax,(%%ecx);\n"
@@ -101,7 +101,7 @@ static inline void add_two_Xsig(Xsig * dest, const Xsig * x2, long int *exp)
/* Negate (subtract from 1.0) the 12 byte Xsig */
/* This is faster in a loop on my 386 than using the "neg" instruction. */
-static inline void negate_Xsig(Xsig * x)
+static inline void negate_Xsig(Xsig *x)
{
asm volatile ("movl %1,%%esi;\n"
"xorl %%ecx,%%ecx;\n"
diff --git a/arch/x86/math-emu/poly_2xm1.c b/arch/x86/math-emu/poly_2xm1.c
index d8f2be3c8383..b00e9e10cdce 100644
--- a/arch/x86/math-emu/poly_2xm1.c
+++ b/arch/x86/math-emu/poly_2xm1.c
@@ -49,7 +49,7 @@ static const Xsig *shiftterm[] = { &shiftterm0, &shiftterm1,
/*--- poly_2xm1() -----------------------------------------------------------+
| Requires st(0) which is TAG_Valid and < 1. |
+---------------------------------------------------------------------------*/
-int poly_2xm1(u_char sign, FPU_REG * arg, FPU_REG * result)
+int poly_2xm1(u_char sign, FPU_REG *arg, FPU_REG *result)
{
long int exponent, shift;
unsigned long long Xll;
diff --git a/arch/x86/math-emu/poly_atan.c b/arch/x86/math-emu/poly_atan.c
index 2f4ac8143fc3..20c28e58e2d4 100644
--- a/arch/x86/math-emu/poly_atan.c
+++ b/arch/x86/math-emu/poly_atan.c
@@ -48,8 +48,8 @@ static const Xsig pi_signif = MK_XSIG(0xc90fdaa2, 0x2168c234, 0xc4c6628b);
/*--- poly_atan() -----------------------------------------------------------+
| |
+---------------------------------------------------------------------------*/
-void poly_atan(FPU_REG * st0_ptr, u_char st0_tag,
- FPU_REG * st1_ptr, u_char st1_tag)
+void poly_atan(FPU_REG *st0_ptr, u_char st0_tag,
+ FPU_REG *st1_ptr, u_char st1_tag)
{
u_char transformed, inverted, sign1, sign2;
int exponent;
diff --git a/arch/x86/math-emu/poly_l2.c b/arch/x86/math-emu/poly_l2.c
index c0102ae87511..8e2ff4b28a0a 100644
--- a/arch/x86/math-emu/poly_l2.c
+++ b/arch/x86/math-emu/poly_l2.c
@@ -23,7 +23,7 @@ static void log2_kernel(FPU_REG const *arg, u_char argsign,
/*--- poly_l2() -------------------------------------------------------------+
| Base 2 logarithm by a polynomial approximation. |
+---------------------------------------------------------------------------*/
-void poly_l2(FPU_REG * st0_ptr, FPU_REG * st1_ptr, u_char st1_sign)
+void poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign)
{
long int exponent, expon, expon_expon;
Xsig accumulator, expon_accum, yaccum;
@@ -178,7 +178,7 @@ static const unsigned long leadterm = 0xb8000000;
| Base 2 logarithm by a polynomial approximation. |
| log2(x+1) |
+---------------------------------------------------------------------------*/
-static void log2_kernel(FPU_REG const *arg, u_char argsign, Xsig * accum_result,
+static void log2_kernel(FPU_REG const *arg, u_char argsign, Xsig *accum_result,
long int *expon)
{
long int exponent, adj;
diff --git a/arch/x86/math-emu/poly_sin.c b/arch/x86/math-emu/poly_sin.c
index 7273ae0c7692..b862039c728e 100644
--- a/arch/x86/math-emu/poly_sin.c
+++ b/arch/x86/math-emu/poly_sin.c
@@ -54,7 +54,7 @@ static const unsigned long long neg_terms_h[N_COEFF_NH] = {
/*--- poly_sine() -----------------------------------------------------------+
| |
+---------------------------------------------------------------------------*/
-void poly_sine(FPU_REG * st0_ptr)
+void poly_sine(FPU_REG *st0_ptr)
{
int exponent, echange;
Xsig accumulator, argSqrd, argTo4;
@@ -197,7 +197,7 @@ void poly_sine(FPU_REG * st0_ptr)
/*--- poly_cos() ------------------------------------------------------------+
| |
+---------------------------------------------------------------------------*/
-void poly_cos(FPU_REG * st0_ptr)
+void poly_cos(FPU_REG *st0_ptr)
{
FPU_REG result;
long int exponent, exp2, echange;
diff --git a/arch/x86/math-emu/poly_tan.c b/arch/x86/math-emu/poly_tan.c
index c0d181e39229..1875763e0c02 100644
--- a/arch/x86/math-emu/poly_tan.c
+++ b/arch/x86/math-emu/poly_tan.c
@@ -47,7 +47,7 @@ static const unsigned long long twothirds = 0xaaaaaaaaaaaaaaabLL;
/*--- poly_tan() ------------------------------------------------------------+
| |
+---------------------------------------------------------------------------*/
-void poly_tan(FPU_REG * st0_ptr)
+void poly_tan(FPU_REG *st0_ptr)
{
long int exponent;
int invert;
diff --git a/arch/x86/math-emu/reg_convert.c b/arch/x86/math-emu/reg_convert.c
index afd31b31000d..108060779977 100644
--- a/arch/x86/math-emu/reg_convert.c
+++ b/arch/x86/math-emu/reg_convert.c
@@ -13,7 +13,7 @@
#include "exception.h"
#include "fpu_emu.h"
-int FPU_to_exp16(FPU_REG const *a, FPU_REG * x)
+int FPU_to_exp16(FPU_REG const *a, FPU_REG *x)
{
int sign = getsign(a);
diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c
index 0b2ca8dc2988..799d4af5be66 100644
--- a/arch/x86/math-emu/reg_ld_str.c
+++ b/arch/x86/math-emu/reg_ld_str.c
@@ -35,7 +35,7 @@
#define SINGLE_Ebias 127
#define SINGLE_Emin (-126) /* smallest valid exponent */
-static u_char normalize_no_excep(FPU_REG * r, int exp, int sign)
+static u_char normalize_no_excep(FPU_REG *r, int exp, int sign)
{
u_char tag;
@@ -49,7 +49,7 @@ static u_char normalize_no_excep(FPU_REG * r, int exp, int sign)
return tag;
}
-int FPU_tagof(FPU_REG * ptr)
+int FPU_tagof(FPU_REG *ptr)
{
int exp;
@@ -78,7 +78,7 @@ int FPU_tagof(FPU_REG * ptr)
}
/* Get a long double from user memory */
-int FPU_load_extended(long double __user * s, int stnr)
+int FPU_load_extended(long double __user *s, int stnr)
{
FPU_REG *sti_ptr = &st(stnr);
@@ -91,7 +91,7 @@ int FPU_load_extended(long double __user * s, int stnr)
}
/* Get a double from user memory */
-int FPU_load_double(double __user * dfloat, FPU_REG * loaded_data)
+int FPU_load_double(double __user *dfloat, FPU_REG *loaded_data)
{
int exp, tag, negative;
unsigned m64, l64;
@@ -152,7 +152,7 @@ int FPU_load_double(double __user * dfloat, FPU_REG * loaded_data)
}
/* Get a float from user memory */
-int FPU_load_single(float __user * single, FPU_REG * loaded_data)
+int FPU_load_single(float __user *single, FPU_REG *loaded_data)
{
unsigned m32;
int exp, tag, negative;
@@ -206,7 +206,7 @@ int FPU_load_single(float __user * single, FPU_REG * loaded_data)
}
/* Get a long long from user memory */
-int FPU_load_int64(long long __user * _s)
+int FPU_load_int64(long long __user *_s)
{
long long s;
int sign;
@@ -236,7 +236,7 @@ int FPU_load_int64(long long __user * _s)
}
/* Get a long from user memory */
-int FPU_load_int32(long __user * _s, FPU_REG * loaded_data)
+int FPU_load_int32(long __user *_s, FPU_REG *loaded_data)
{
long s;
int negative;
@@ -265,7 +265,7 @@ int FPU_load_int32(long __user * _s, FPU_REG * loaded_data)
}
/* Get a short from user memory */
-int FPU_load_int16(short __user * _s, FPU_REG * loaded_data)
+int FPU_load_int16(short __user *_s, FPU_REG *loaded_data)
{
int s, negative;
@@ -294,7 +294,7 @@ int FPU_load_int16(short __user * _s, FPU_REG * loaded_data)
}
/* Get a packed bcd array from user memory */
-int FPU_load_bcd(u_char __user * s)
+int FPU_load_bcd(u_char __user *s)
{
FPU_REG *st0_ptr = &st(0);
int pos;
@@ -333,7 +333,7 @@ int FPU_load_bcd(u_char __user * s)
/*===========================================================================*/
/* Put a long double into user memory */
-int FPU_store_extended(FPU_REG * st0_ptr, u_char st0_tag,
+int FPU_store_extended(FPU_REG *st0_ptr, u_char st0_tag,
long double __user * d)
{
/*
@@ -375,7 +375,7 @@ int FPU_store_extended(FPU_REG * st0_ptr, u_char st0_tag,
}
/* Put a double into user memory */
-int FPU_store_double(FPU_REG * st0_ptr, u_char st0_tag, double __user * dfloat)
+int FPU_store_double(FPU_REG *st0_ptr, u_char st0_tag, double __user *dfloat)
{
unsigned long l[2];
unsigned long increment = 0; /* avoid gcc warnings */
@@ -565,7 +565,7 @@ int FPU_store_double(FPU_REG * st0_ptr, u_char st0_tag, double __user * dfloat)
}
/* Put a float into user memory */
-int FPU_store_single(FPU_REG * st0_ptr, u_char st0_tag, float __user * single)
+int FPU_store_single(FPU_REG *st0_ptr, u_char st0_tag, float __user *single)
{
long templ = 0;
unsigned long increment = 0; /* avoid gcc warnings */
@@ -754,7 +754,7 @@ int FPU_store_single(FPU_REG * st0_ptr, u_char st0_tag, float __user * single)
}
/* Put a long long into user memory */
-int FPU_store_int64(FPU_REG * st0_ptr, u_char st0_tag, long long __user * d)
+int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d)
{
FPU_REG t;
long long tll;
@@ -804,7 +804,7 @@ int FPU_store_int64(FPU_REG * st0_ptr, u_char st0_tag, long long __user * d)
}
/* Put a long into user memory */
-int FPU_store_int32(FPU_REG * st0_ptr, u_char st0_tag, long __user * d)
+int FPU_store_int32(FPU_REG *st0_ptr, u_char st0_tag, long __user *d)
{
FPU_REG t;
int precision_loss;
@@ -850,7 +850,7 @@ int FPU_store_int32(FPU_REG * st0_ptr, u_char st0_tag, long __user * d)
}
/* Put a short into user memory */
-int FPU_store_int16(FPU_REG * st0_ptr, u_char st0_tag, short __user * d)
+int FPU_store_int16(FPU_REG *st0_ptr, u_char st0_tag, short __user *d)
{
FPU_REG t;
int precision_loss;
@@ -896,7 +896,7 @@ int FPU_store_int16(FPU_REG * st0_ptr, u_char st0_tag, short __user * d)
}
/* Put a packed bcd array into user memory */
-int FPU_store_bcd(FPU_REG * st0_ptr, u_char st0_tag, u_char __user * d)
+int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
{
FPU_REG t;
unsigned long long ll;
@@ -971,7 +971,7 @@ int FPU_store_bcd(FPU_REG * st0_ptr, u_char st0_tag, u_char __user * d)
/* Overflow is signalled by a non-zero return value (in eax).
In the case of overflow, the returned significand always has the
largest possible value */
-int FPU_round_to_int(FPU_REG * r, u_char tag)
+int FPU_round_to_int(FPU_REG *r, u_char tag)
{
u_char very_big;
unsigned eax;
@@ -1028,7 +1028,7 @@ int FPU_round_to_int(FPU_REG * r, u_char tag)
/*===========================================================================*/
-u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user * s)
+u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user *s)
{
unsigned short tag_word = 0;
u_char tag;
@@ -1121,7 +1121,7 @@ u_char __user *fldenv(fpu_addr_modes addr_modes, u_char __user * s)
return s;
}
-void frstor(fpu_addr_modes addr_modes, u_char __user * data_address)
+void frstor(fpu_addr_modes addr_modes, u_char __user *data_address)
{
int i, regnr;
u_char __user *s = fldenv(addr_modes, data_address);
@@ -1144,7 +1144,7 @@ void frstor(fpu_addr_modes addr_modes, u_char __user * data_address)
}
-u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user * d)
+u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d)
{
if ((addr_modes.default_mode == VM86) ||
((addr_modes.default_mode == PM16)
@@ -1200,7 +1200,7 @@ u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user * d)
return d;
}
-void fsave(fpu_addr_modes addr_modes, u_char __user * data_address)
+void fsave(fpu_addr_modes addr_modes, u_char __user *data_address)
{
u_char __user *d;
int offset = (top & 7) * 10, other = 80 - offset;