]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[SPARC64]: Add kretprobe support.
authorDavid S. Miller <davem@huronp11.davemloft.net>
Sat, 9 Feb 2008 11:40:55 +0000 (03:40 -0800)
committerDavid S. Miller <davem@davemloft.net>
Sat, 9 Feb 2008 11:42:22 +0000 (03:42 -0800)
Passes the smoke tests at least, powerpc implementation was used
as a guide.

Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc64/kernel/kprobes.c
include/asm-sparc64/kprobes.h

index d94f901d321e91423e25f26fee5ccf3f57907c8f..34fc3ddd50026dbbc802ded84b7329382f76c17f 100644 (file)
@@ -480,8 +480,117 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
        return 0;
 }
 
-/* architecture specific initialization */
-int arch_init_kprobes(void)
+/* Called with kretprobe_lock held.  The value stored in the return
+ * address register is actually 2 instructions before where the
+ * callee will return to.  Sequences usually look something like this
+ *
+ *             call    some_function   <--- return register points here
+ *              nop                    <--- call delay slot
+ *             whatever                <--- where callee returns to
+ *
+ * To keep trampoline_probe_handler logic simpler, we normalize the
+ * value kept in ri->ret_addr so we don't need to keep adjusting it
+ * back and forth.
+ */
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+                                     struct pt_regs *regs)
+{
+       ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
+
+       /* Replace the return addr with trampoline addr */
+       regs->u_regs[UREG_RETPC] =
+               ((unsigned long)kretprobe_trampoline) - 8;
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       struct kretprobe_instance *ri = NULL;
+       struct hlist_head *head, empty_rp;
+       struct hlist_node *node, *tmp;
+       unsigned long flags, orig_ret_address = 0;
+       unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+
+       INIT_HLIST_HEAD(&empty_rp);
+       spin_lock_irqsave(&kretprobe_lock, flags);
+       head = kretprobe_inst_table_head(current);
+
+       /*
+        * It is possible to have multiple instances associated with a given
+        * task either because an multiple functions in the call path
+        * have a return probe installed on them, and/or more then one return
+        * return probe was registered for a target function.
+        *
+        * We can handle this because:
+        *     - instances are always inserted at the head of the list
+        *     - when multiple return probes are registered for the same
+        *       function, the first instance's ret_addr will point to the
+        *       real return address, and all the rest will point to
+        *       kretprobe_trampoline
+        */
+       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+               if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                       continue;
+
+               if (ri->rp && ri->rp->handler)
+                       ri->rp->handler(ri, regs);
+
+               orig_ret_address = (unsigned long)ri->ret_addr;
+               recycle_rp_inst(ri, &empty_rp);
+
+               if (orig_ret_address != trampoline_address)
+                       /*
+                        * This is the real return address. Any other
+                        * instances associated with this task are for
+                        * other calls deeper on the call stack
+                        */
+                       break;
+       }
+
+       kretprobe_assert(ri, orig_ret_address, trampoline_address);
+       regs->tpc = orig_ret_address;
+       regs->tnpc = orig_ret_address + 4;
+
+       reset_current_kprobe();
+       spin_unlock_irqrestore(&kretprobe_lock, flags);
+       preempt_enable_no_resched();
+
+       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+               hlist_del(&ri->hlist);
+               kfree(ri);
+       }
+       /*
+        * By returning a non-zero value, we are telling
+        * kprobe_handler() that we don't want the post_handler
+        * to run (and have re-enabled preemption)
+        */
+       return 1;
+}
+
+void kretprobe_trampoline_holder(void)
+{
+       asm volatile(".global kretprobe_trampoline\n"
+                    "kretprobe_trampoline:\n"
+                    "\tnop\n"
+                    "\tnop\n");
+}
+static struct kprobe trampoline_p = {
+       .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+       .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
 {
+       return register_kprobe(&trampoline_p);
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+       if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
+               return 1;
+
        return 0;
 }
index 5020eaf67c292d10c34831cb71ce5ec421a6e8fd..7237dd87663ec4857f9ae957c54acad25831745e 100644 (file)
@@ -14,11 +14,15 @@ typedef u32 kprobe_opcode_t;
 
 #define arch_remove_kprobe(p)  do {} while (0)
 
+#define ARCH_SUPPORTS_KRETPROBES
+
 #define flush_insn_slot(p)             \
 do {   flushi(&(p)->ainsn.insn[0]);    \
        flushi(&(p)->ainsn.insn[1]);    \
 } while (0)
 
+void kretprobe_trampoline(void);
+
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
        /* copy of the original instruction */