Patches contributed by Eötvös Lorand University


commit e16852cfc5580b88cb327413ab8c89375f380592
Merge: bdff78707f3c 74e7ff8c50b6
Author: Ingo Molnar <mingo@elte.hu>
Date:   Tue Aug 4 13:58:28 2009 +0200

    Merge branch 'tracing/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into tracing/urgent

commit 47cab6a722d44c71c4f8224017ef548522243cf4
Author: Ingo Molnar <mingo@elte.hu>
Date:   Mon Aug 3 09:31:54 2009 +0200

    debug lockups: Improve lockup detection, fix generic arch fallback
    
    As Andrew noted, my previous patch ("debug lockups: Improve lockup
    detection") broke/removed SysRq-L support from architecture that do
    not provide a __trigger_all_cpu_backtrace implementation.
    
    Restore a fallback path and clean up the SysRq-L machinery a bit:
    
     - Rename the arch method to arch_trigger_all_cpu_backtrace()
    
     - Simplify the define
    
     - Document the method a bit - in the hope of more architectures
       adding support for it.
    
    [ The patch touches Sparc code for the rename. ]
    
    Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: "David S. Miller" <davem@davemloft.net>
    LKML-Reference: <20090802140809.7ec4bb6b.akpm@linux-foundation.org>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 1934f2cbf513..a0b443cb3c1f 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
 	return retval;
 }
 
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 
 extern void *hardirq_stack[NR_CPUS];
 extern void *softirq_stack[NR_CPUS];
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 4041f94e7724..18d67854a1b8 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
 	}
 }
 
-void __trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(void)
 {
 	struct thread_info *tp = current_thread_info();
 	struct pt_regs *regs = get_irq_regs();
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
 
 static void sysrq_handle_globreg(int key, struct tty_struct *tty)
 {
-	__trigger_all_cpu_backtrace();
+	arch_trigger_all_cpu_backtrace();
 }
 
 static struct sysrq_key_op sparc_globalreg_op = {
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c86e5ed4af51..e63cf7d441e1 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
 			void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 
 static inline void localise_nmi_watchdog(void)
 {
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 1bb1ac20e9ec..db7220220d09 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -554,7 +554,7 @@ int do_nmi_callback(struct pt_regs *regs, int cpu)
 	return 0;
 }
 
-void __trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(void)
 {
 	int i;
 
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 165f307f30e8..50eecfe1d724 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -223,7 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
 
 static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
 {
-	trigger_all_cpu_backtrace();
+	/*
+	 * Fall back to the workqueue based printing if the
+	 * backtrace printing did not succeed or the
+	 * architecture has no support for it:
+	 */
+	if (!trigger_all_cpu_backtrace()) {
+		struct pt_regs *regs = get_irq_regs();
+
+		if (regs) {
+			printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+			show_regs(regs);
+		}
+		schedule_work(&sysrq_showallcpus);
+	}
 }
 
 static struct sysrq_key_op sysrq_showallcpus_op = {
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 29af2d5df097..b752e807adde 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
 static inline void acpi_nmi_enable(void) { }
 #endif
 
-#ifndef trigger_all_cpu_backtrace
-#define trigger_all_cpu_backtrace() do { } while (0)
+/*
+ * Create trigger_all_cpu_backtrace() out of the arch-provided
+ * base function. Return whether such support was available,
+ * to allow calling code to fall back to some other mechanism:
+ */
+#ifdef arch_trigger_all_cpu_backtrace
+static inline bool trigger_all_cpu_backtrace(void)
+{
+	arch_trigger_all_cpu_backtrace();
+
+	return true;
+}
+#else
+static inline bool trigger_all_cpu_backtrace(void)
+{
+	return false;
+}
 #endif
 
 #endif

commit bcf08df3b23b3d13bf8c4ad6bd744a6ad30015fb
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sat Apr 19 12:11:10 2008 +0200

    sched: Fix cpupri build on !CONFIG_SMP
    
    This build bug:
    
     In file included from kernel/sched.c:1765:
     kernel/sched_rt.c: In function ‘has_pushable_tasks’:
     kernel/sched_rt.c:1069: error: ‘struct rt_rq’ has no member named ‘pushable_tasks’
     kernel/sched_rt.c: In function ‘pick_next_task_rt’:
     kernel/sched_rt.c:1084: error: ‘struct rq’ has no member named ‘post_schedule’
    
    Triggers because both pushable_tasks and post_schedule are
    SMP-only fields.
    
    Move pushable_tasks() to the SMP section and #ifdef the post_schedule use.
    
    Cc: Gregory Haskins <ghaskins@novell.com>
    Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
    LKML-Reference: <20090729150422.17691.55590.stgit@dev.haskins.net>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index f365e66b3d49..3d4020a9ba1b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -136,6 +136,11 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 }
 
+static inline int has_pushable_tasks(struct rq *rq)
+{
+	return !plist_head_empty(&rq->rt.pushable_tasks);
+}
+
 #else
 
 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
@@ -1064,11 +1069,6 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
 	return p;
 }
 
-static inline int has_pushable_tasks(struct rq *rq)
-{
-	return !plist_head_empty(&rq->rt.pushable_tasks);
-}
-
 static struct task_struct *pick_next_task_rt(struct rq *rq)
 {
 	struct task_struct *p = _pick_next_task_rt(rq);
@@ -1077,11 +1077,13 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
 	if (p)
 		dequeue_pushable_task(rq, p);
 
+#ifdef CONFIG_SMP
 	/*
 	 * We detect this state here so that we can avoid taking the RQ
 	 * lock again later if there is no need to push
 	 */
 	rq->post_schedule = has_pushable_tasks(rq);
+#endif
 
 	return p;
 }

commit bbfa26229a8143889e95e0df4a9d69067ee836cd
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sun Aug 2 14:44:24 2009 +0200

    lockdep: Fix BFS build
    
    Fix:
    
      kernel/built-in.o: In function `lockdep_stats_show':
      lockdep_proc.c:(.text+0x48202): undefined reference to `max_bfs_queue_depth'
    
    As max_bfs_queue_depth is only available under
    CONFIG_PROVE_LOCKING=y.
    
    Cc: Ming Lei <tom.leiming@gmail.com>
    Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
    LKML-Reference: <new-submission>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 9a1bf34d2ff6..fba81f16e346 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -411,8 +411,10 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
 			max_lockdep_depth);
 	seq_printf(m, " max recursion depth:           %11u\n",
 			max_recursion_depth);
+#ifdef CONFIG_PROVE_LOCKING
 	seq_printf(m, " max bfs queue depth:           %11u\n",
 			max_bfs_queue_depth);
+#endif
 	lockdep_stats_debug_show(m);
 	seq_printf(m, " debug_locks:                   %11u\n",
 			debug_locks);

commit 8e9ed8b02490fea577b1eb1704c05bf43c891ed7
Merge: 716a42348cda 07903af152b0
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sun Aug 2 14:11:26 2009 +0200

    Merge branch 'sched/urgent' into sched/core
    
    Merge reason: avoid upcoming patch conflict.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

commit c1dc0b9c0c8979ce4d411caadff5c0d79dee58bc
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sun Aug 2 11:28:21 2009 +0200

    debug lockups: Improve lockup detection
    
    When debugging a recent lockup bug i found various deficiencies
    in how our current lockup detection helpers work:
    
     - SysRq-L is not very efficient as it uses a workqueue, hence
       it cannot punch through hard lockups and cannot see through
       most soft lockups either.
    
     - The SysRq-L code depends on the NMI watchdog - which is off
       by default.
    
     - We dont print backtraces from the RCU code's built-in
       'RCU state machine is stuck' debug code. This debug
       code tends to be one of the first (and only) mechanisms
       that show that a lockup has occured.
    
    This patch changes the code so taht we:
    
     - Trigger the NMI backtrace code from SysRq-L instead of using
       a workqueue (which cannot punch through hard lockups)
    
     - Trigger print-all-CPU-backtraces from the RCU lockup detection
       code
    
    Also decouple the backtrace printing code from the NMI watchdog:
    
     - Dont use variable size cpumasks (it might not be initialized
       and they are a bit more fragile anyway)
    
     - Trigger an NMI immediately via an IPI, instead of waiting
       for the NMI tick to occur. This is a lot faster and can
       produce more relevant backtraces. It will also work if the
       NMI watchdog is disabled.
    
     - Dont print the 'dazed and confused' message when we print
       a backtrace from the NMI
    
     - Do a show_regs() plus a dump_stack() to get maximum info
       out of the dump. Worst-case we get two stacktraces - which
       is not a big deal. Sometimes, if register content is
       corrupted, the precise stack walker in show_regs() wont
       give us a full backtrace - in this case dump_stack() will
       do it.
    
    Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    LKML-Reference: <new-submission>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index b3025b43b63a..1bb1ac20e9ec 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -39,7 +39,7 @@
 int unknown_nmi_panic;
 int nmi_watchdog_enabled;
 
-static cpumask_var_t backtrace_mask;
+static cpumask_t backtrace_mask __read_mostly;
 
 /* nmi_active:
  * >0: the lapic NMI watchdog is active, but can be disabled
@@ -138,7 +138,6 @@ int __init check_nmi_watchdog(void)
 	if (!prev_nmi_count)
 		goto error;
 
-	alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
 	printk(KERN_INFO "Testing NMI watchdog ... ");
 
 #ifdef CONFIG_SMP
@@ -415,14 +414,17 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
 	}
 
 	/* We can be called before check_nmi_watchdog, hence NULL check. */
-	if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
+	if (cpumask_test_cpu(cpu, &backtrace_mask)) {
 		static DEFINE_SPINLOCK(lock);	/* Serialise the printks */
 
 		spin_lock(&lock);
 		printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
+		show_regs(regs);
 		dump_stack();
 		spin_unlock(&lock);
-		cpumask_clear_cpu(cpu, backtrace_mask);
+		cpumask_clear_cpu(cpu, &backtrace_mask);
+
+		rc = 1;
 	}
 
 	/* Could check oops_in_progress here too, but it's safer not to */
@@ -556,10 +558,14 @@ void __trigger_all_cpu_backtrace(void)
 {
 	int i;
 
-	cpumask_copy(backtrace_mask, cpu_online_mask);
+	cpumask_copy(&backtrace_mask, cpu_online_mask);
+
+	printk(KERN_INFO "sending NMI to all CPUs:\n");
+	apic->send_IPI_all(NMI_VECTOR);
+
 	/* Wait for up to 10 seconds for all CPUs to do the backtrace */
 	for (i = 0; i < 10 * 1000; i++) {
-		if (cpumask_empty(backtrace_mask))
+		if (cpumask_empty(&backtrace_mask))
 			break;
 		mdelay(1);
 	}
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5d7a02f63e1c..165f307f30e8 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -24,6 +24,7 @@
 #include <linux/sysrq.h>
 #include <linux/kbd_kern.h>
 #include <linux/proc_fs.h>
+#include <linux/nmi.h>
 #include <linux/quotaops.h>
 #include <linux/perf_counter.h>
 #include <linux/kernel.h>
@@ -222,12 +223,7 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
 
 static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
 {
-	struct pt_regs *regs = get_irq_regs();
-	if (regs) {
-		printk(KERN_INFO "CPU%d:\n", smp_processor_id());
-		show_regs(regs);
-	}
-	schedule_work(&sysrq_showallcpus);
+	trigger_all_cpu_backtrace();
 }
 
 static struct sysrq_key_op sysrq_showallcpus_op = {
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7717b95c2027..9c5fa9fc57ec 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -35,6 +35,7 @@
 #include <linux/rcupdate.h>
 #include <linux/interrupt.h>
 #include <linux/sched.h>
+#include <linux/nmi.h>
 #include <asm/atomic.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
@@ -469,6 +470,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
 	}
 	printk(" (detected by %d, t=%ld jiffies)\n",
 	       smp_processor_id(), (long)(jiffies - rsp->gp_start));
+	trigger_all_cpu_backtrace();
+
 	force_quiescent_state(rsp, 0);  /* Kick them all. */
 }
 
@@ -479,12 +482,14 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
 	printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
 			smp_processor_id(), jiffies - rsp->gp_start);
-	dump_stack();
+	trigger_all_cpu_backtrace();
+
 	spin_lock_irqsave(&rnp->lock, flags);
 	if ((long)(jiffies - rsp->jiffies_stall) >= 0)
 		rsp->jiffies_stall =
 			jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
 	spin_unlock_irqrestore(&rnp->lock, flags);
+
 	set_need_resched();  /* kick ourselves to get things going. */
 }
 

commit 2d1b6949d2c855f195de0f5146625015ecca3944
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sat Aug 1 13:15:36 2009 +0200

    perf_counter tools: Fix link errors with older toolchains
    
    On older distros (F8 for example) the perf build could fail
    with such missing symbols:
    
        LINK perf
    /usr/lib/gcc/x86_64-redhat-linux/4.3.2/../../../../lib64/libbfd.a(bfd.o): In function `bfd_demangle':
    (.text+0x2b3): undefined reference to `cplus_demangle'
    /usr/lib/gcc/x86_64-redhat-linux/4.3.2/../../../../lib64/libbfd.a(bfd.o): In function `bfd_demangle':
    
    Link in -liberty too.
    
    Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: Mike Galbraith <efault@gmx.de>
    Cc: Paul Mackerras <paulus@samba.org>
    Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
    Cc: Frederic Weisbecker <fweisbec@gmail.com>
    LKML-Reference: <new-submission>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index a5e9b876ca09..4b20fa47c3ab 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -345,7 +345,7 @@ BUILTIN_OBJS += builtin-stat.o
 BUILTIN_OBJS += builtin-top.o
 
 PERFLIBS = $(LIB_FILE)
-EXTLIBS = -lbfd
+EXTLIBS = -lbfd -liberty
 
 #
 # Platform specific tweaks

commit 5304d5fc74a269cc6c3e70f9713684ca729abdf0
Merge: 54d35f29f492 78af08d90b8f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sat Jul 18 15:50:22 2009 +0200

    Merge branch 'linus' into sched/core
    
    Merge reason: branch had an old upstream base (-rc1-ish), but also
                  merge to avoid a conflict.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

commit 45bceffc3013bda7d2ebc7802e9b153b674e2d44
Merge: 6f2f3cf00ee3 78af08d90b8f
Author: Ingo Molnar <mingo@elte.hu>
Date:   Sat Jul 18 12:19:57 2009 +0200

    Merge branch 'linus' into tracing/core
    
    Merge reason: tracing/core was on an older, pre-rc1 base.
    
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

commit f39d1b9792881ce4eb982ec8cc65258bf95674b5
Author: Ingo Molnar <mingo@elte.hu>
Date:   Fri Jul 10 21:38:02 2009 +0200

    dma-debug: Fix the overlap() function to be correct and readable
    
    Linus noticed how unclean and buggy the overlap() function is:
    
     - It uses convoluted (and bug-causing) positive checks for
       range overlap - instead of using a more natural negative
       check.
    
     - Even the positive checks are buggy: a positive intersection
       check has four natural cases while we checked only for three,
       missing the (addr < start && addr2 == end) case for example.
    
     - The variables are mis-named, making it non-obvious how the
       check was done.
    
     - It needlessly uses u64 instead of unsigned long. Since these
       are kernel memory pointers and we explicitly exclude highmem
       ranges anyway we cannot ever overflow 32 bits, even if we
       could. (and on 64-bit it doesnt matter anyway)
    
    All in one, this function needs a total revamp. I used Linus's
    suggestions minus the paranoid checks (we cannot overflow really
    because if we get totally bad DMA ranges passed far more things
    break in the systems than just DMA debugging). I also fixed a
    few other small details i noticed.
    
    Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Joerg Roedel <joerg.roedel@amd.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>

diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index c9187fed0b93..65b0d99b6d0a 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -856,22 +856,21 @@ static void check_for_stack(struct device *dev, void *addr)
 				"stack [addr=%p]\n", addr);
 }
 
-static inline bool overlap(void *addr, u64 size, void *start, void *end)
+static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
 {
-	void *addr2 = (char *)addr + size;
+	unsigned long a1 = (unsigned long)addr;
+	unsigned long b1 = a1 + len;
+	unsigned long a2 = (unsigned long)start;
+	unsigned long b2 = (unsigned long)end;
 
-	return ((addr >= start && addr < end) ||
-		(addr2 >= start && addr2 < end) ||
-		((addr < start) && (addr2 > end)));
+	return !(b1 <= a2 || a1 >= b2);
 }
 
-static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
+static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
 {
-	if (overlap(addr, size, _text, _etext) ||
-	    overlap(addr, size, __start_rodata, __end_rodata))
-		err_printk(dev, NULL, "DMA-API: device driver maps "
-				"memory from kernel text or rodata "
-				"[addr=%p] [size=%llu]\n", addr, size);
+	if (overlap(addr, len, _text, _etext) ||
+	    overlap(addr, len, __start_rodata, __end_rodata))
+		err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
 }
 
 static void check_sync(struct device *dev,
@@ -969,7 +968,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
 		entry->type = dma_debug_single;
 
 	if (!PageHighMem(page)) {
-		void *addr = ((char *)page_address(page)) + offset;
+		void *addr = page_address(page) + offset;
+
 		check_for_stack(dev, addr);
 		check_for_illegal_area(dev, addr, size);
 	}