This is the mail archive of the systemtap@sources.redhat.com mailing list for the systemtap project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[RFC][PATCH]Atomically insert/remove probes


Hi All
 On many architecture it is not architecturally possible to atomically
insert or remove a probe as the size of the instruction to be patched
might exceed the atomic width of that architecture. Hence there is 
alway a race condition where in, cpuN while inserting/removing a probe at some
address, cpuM might be reading/executing at the same address. In this
case, cpuM if it see's old instruction then there is no issuess, but if
it reads/executes the incompletely updated instruction, then this might trigger
all sorts of uncertainty as the instruction which cpuM is executing could 
vary from random instruction to illegal instruction.

In attached patch I have tried to come up with a solution, where in
before patching the original instruction, put all but self cpu's to
a known location and once the instruction is patched release all the
cpu back to do it's job.

Attached patch has been tested only on IA64.
Please provide your feedback/comments.

signed-off-by: anil.s.keshavamurthy@intel.com <Anil S Keshavamurthy@intel.com>

Index: linux-2.6.12-rc4/kernel/kprobes.c
===================================================================
--- linux-2.6.12-rc4.orig/kernel/kprobes.c	2005-05-22 23:02:58.144841429 -0700
+++ linux-2.6.12-rc4/kernel/kprobes.c	2005-05-22 23:03:05.176091343 -0700
@@ -300,36 +300,82 @@
 	return ret;
 }
 
+void call_function_kprobes(void *data)
+{
+	struct kprobe_arm_info *arm_info = data;
+	unsigned long flags;
+	
+	local_irq_save(flags);
+	while(atomic_read(&arm_info->done_inst_patching)) {
+		/* wait until done */
+	}
+	mb();
+	atomic_inc(&arm_info->total_cpu);
+	local_irq_restore(flags);
+}
+
+void pre_arm_kprobe(struct kprobe_arm_info *arm_info)
+{
+	int cpus = num_online_cpus()-1;
+	void call_function_kprobes(void *data);
+
+	if (!cpus)
+		return;
+
+	atomic_set(&arm_info->done_inst_patching, 0);
+	atomic_set(&arm_info->total_cpu, 0);
+	
+	smp_call_function(call_function_kprobes, (void *)arm_info, 0, 0);
+
+}
+
+void post_arm_kprobe(struct kprobe_arm_info *arm_info)
+{
+	int cpus = num_online_cpus()-1;
+
+	if (!cpus)
+		return;
+	mb();
+	atomic_set(&arm_info->done_inst_patching, 1);
+
+	while (atomic_read(&arm_info->total_cpu) != cpus)
+		cpu_relax();
+}
+
 /* kprobe removal house-keeping routines */
-static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
+static inline void cleanup_kprobe(struct kprobe *p)
 {
+	struct kprobe_arm_info arm_info;
+	
+	pre_arm_kprobe(&arm_info);
 	arch_disarm_kprobe(p);
+	post_arm_kprobe(&arm_info);
 	hlist_del(&p->hlist);
-	spin_unlock_irqrestore(&kprobe_lock, flags);
+	spin_unlock(&kprobe_lock);
 	arch_remove_kprobe(p);
 }
 
 static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
-		struct kprobe *p, unsigned long flags)
+		struct kprobe *p)
 {
 	list_del(&p->list);
 	if (list_empty(&old_p->list)) {
-		cleanup_kprobe(old_p, flags);
+		cleanup_kprobe(old_p);
 		kfree(old_p);
 	} else
-		spin_unlock_irqrestore(&kprobe_lock, flags);
+		spin_unlock(&kprobe_lock);
 }
 
 int register_kprobe(struct kprobe *p)
 {
 	int ret = 0;
-	unsigned long flags = 0;
 	struct kprobe *old_p;
+	struct kprobe_arm_info arm_info;
 
 	if ((ret = arch_prepare_kprobe(p)) != 0) {
 		goto rm_kprobe;
 	}
-	spin_lock_irqsave(&kprobe_lock, flags);
+	spin_lock(&kprobe_lock);
 	old_p = get_kprobe(p->addr);
 	if (old_p) {
 		ret = register_aggr_kprobe(old_p, p);
@@ -341,10 +387,11 @@
 	hlist_add_head(&p->hlist,
 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
 
+	pre_arm_kprobe(&arm_info);
   	arch_arm_kprobe(p);
-
+	post_arm_kprobe(&arm_info);
 out:
-	spin_unlock_irqrestore(&kprobe_lock, flags);
+	spin_unlock(&kprobe_lock);
 rm_kprobe:
 	if (ret == -EEXIST)
 		arch_remove_kprobe(p);
@@ -353,18 +400,17 @@
 
 void unregister_kprobe(struct kprobe *p)
 {
-	unsigned long flags;
 	struct kprobe *old_p;
 
-	spin_lock_irqsave(&kprobe_lock, flags);
+	spin_lock(&kprobe_lock);
 	old_p = get_kprobe(p->addr);
 	if (old_p) {
 		if (old_p->pre_handler == aggr_pre_handler)
-			cleanup_aggr_kprobe(old_p, p, flags);
+			cleanup_aggr_kprobe(old_p, p);
 		else
-			cleanup_kprobe(p, flags);
+			cleanup_kprobe(p);
 	} else
-		spin_unlock_irqrestore(&kprobe_lock, flags);
+		spin_unlock(&kprobe_lock);
 }
 
 static struct notifier_block kprobe_exceptions_nb = {
Index: linux-2.6.12-rc4/include/linux/kprobes.h
===================================================================
--- linux-2.6.12-rc4.orig/include/linux/kprobes.h	2005-05-22 23:02:58.145817991 -0700
+++ linux-2.6.12-rc4/include/linux/kprobes.h	2005-05-22 23:03:05.177067905 -0700
@@ -153,6 +153,10 @@
 };
 
 #ifdef CONFIG_KPROBES
+struct kprobe_arm_info {
+	atomic_t done_inst_patching;
+	atomic_t total_cpu;
+};
 /* Locks kprobe: irq must be disabled */
 void lock_kprobes(void);
 void unlock_kprobes(void);



Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]