This is the mail archive of the systemtap@sourceware.org mailing list for the systemtap project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

RE: [Patch 2/3][Djprobe] Djprobe update for linux-2.6.14-mm1


>>-----Original Message-----
>>From: systemtap-owner@sourceware.org [mailto:systemtap-owner@sourceware.org]
>>On Behalf Of Masami Hiramatsu
>>Sent: 2005年11月8日 21:26
>>To: systemtap@sources.redhat.com
>>Cc: Satoshi Oshima; Yumiko Sugita; Hideo Aoki
>>Subject: [Patch 2/3][Djprobe] Djprobe update for linux-2.6.14-mm1
>>
>>Hi,
>>
>>This patch is the architecture independant part of djprobe.
>>+static inline
>>+    struct djprobe_instance *__create_djprobe_instance(struct djprobe *djp,
>>+						       void *addr, int size)
>>+{
>>+	struct djprobe_instance *djpi;
>>+	/* allocate a new instance */
>>+	djpi = kcalloc(1, sizeof(struct djprobe_instance), GFP_ATOMIC);
>>+	if (djpi == NULL) {
>>+		goto out;
>>+	}
>>+	/* allocate stub */
>>+	djpi->stub.insn = __get_insn_slot(&djprobe_insn_pages);
>>+	if (djpi->stub.insn == NULL) {
[YM] If coming here, djpi->plist is not initiated. So __free_djprobe_instance=>hlist_del will cause panic. How about to move the INIT_LIST_HEAD(&djpi->plist) just after kcalloc?




>>+		__free_djprobe_instance(djpi);
>>+		djpi = NULL;
>>+		goto out;
>>+	}
>>+
>>+	/* attach */
>>+	djp->inst = djpi;
>>+	INIT_LIST_HEAD(&djpi->plist);
>>+	list_add_rcu(&djp->plist, &djpi->plist);
>>+	djpi->kp.addr = addr;
>>+	djpi->kp.pre_handler = djprobe_pre_handler;
>>+	arch_prepare_djprobe_instance(djpi, size);
>>+
>>+	INIT_HLIST_NODE(&djpi->hlist);
>>+	hlist_add_head(&djpi->hlist,
>>&djprobe_inst_table[hash_djprobe(addr)]);
>>+      out:
>>+	return djpi;
>>+}
>>+
>>+static struct djprobe_instance *__kprobes __get_djprobe_instance(void
>>*addr,
>>+								 int size)
>>+{
>>+	struct djprobe_instance *djpi;
>>+	struct hlist_node *node;
>>+	unsigned long idx, eidx;
>>+
>>+	idx = hash_djprobe(addr - ARCH_STUB_INSN_MAX);
>>+	eidx = ((hash_djprobe(addr + size) + 1) & DJPROBE_TABLE_MASK);
>>+	do {
>>+		hlist_for_each_entry(djpi, node, &djprobe_inst_table[idx],
>>+				     hlist) {
>>+			if (((long)addr <
>>+			     (long)djpi->kp.addr + DJPI_ARCH_SIZE(djpi))
>>+			    && ((long)djpi->kp.addr < (long)addr + size)) {
>>+				return djpi;
>>+			}
>>+		}
>>+		idx = ((idx + 1) & DJPROBE_TABLE_MASK);
>>+	}while (idx != eidx);
>>+
>>+	return NULL;
>>+}
>>+
>>+struct djprobe_instance *__kprobes get_djprobe_instance(void *addr, int
>>size)
>>+{
>>+	struct djprobe_instance *djpi;
>>+	down(&djprobe_mutex);
>>+	djpi = __get_djprobe_instance(addr, size);
>>+	up(&djprobe_mutex);
>>+	return djpi;
>>+}
>>+
>>+/* This work function invoked while djprobe_mutex is locked. */
>>+static void __kprobes __work_check_safety(void *data)
>>+{
>>+	if (atomic_dec_and_test(&djprobe_count)) {
>>+		wake_up_all(&djprobe_wqh);
>>+	}
>>+}
>>+
>>+static void __kprobes __check_safety(void)
>>+{
>>+	int cpu;
>>+	struct work_struct *wk;
>>+	lock_cpu_hotplug();
>>+	atomic_set(&djprobe_count, num_online_cpus() - 1);
>>+	for_each_online_cpu(cpu) {
>>+		if (cpu == smp_processor_id())
>>+			continue;
>>+		wk = &per_cpu(djprobe_works, cpu);
>>+		INIT_WORK(wk, __work_check_safety, NULL);
>>+		schedule_delayed_work_on(cpu, wk, 0);
>>+	}
>>+	wait_event(djprobe_wqh, (atomic_read(&djprobe_count) == 0));
>>+	unlock_cpu_hotplug();
>>+}
>>+
>>+int __kprobes register_djprobe(struct djprobe *djp, void *addr, int size)
>>+{
>>+	struct djprobe_instance *djpi;
>>+	struct kprobe *kp;
>>+	int ret = 0, i;
>>+
>>+	BUG_ON(in_interrupt());
>>+
>>+	if (size > ARCH_STUB_INSN_MAX || size < ARCH_STUB_INSN_MIN)
>>+		return -EINVAL;
>>+
>>+	if ((ret = in_kprobes_functions((unsigned long)addr)) != 0)
>>+		return ret;
>>+
>>+	down(&djprobe_mutex);
>>+	INIT_LIST_HEAD(&djp->plist);
>>+	/* check confliction with other djprobes */
>>+	djpi = __get_djprobe_instance(addr, size);
>>+	if (djpi) {
>>+		if (djpi->kp.addr == addr) {
>>+			djp->inst = djpi;	/* add to another instance */
>>+			list_add_rcu(&djp->plist, &djpi->plist);
>>+		} else {
>>+			ret = -EEXIST;	/* other djprobes were inserted */
>>+		}
>>+		goto out;
>>+	}
>>+	djpi = __create_djprobe_instance(djp, addr, size);
>>+	if (djpi == NULL) {
>>+		ret = -ENOMEM;
>>+		goto out;
>>+	}
>>+
>>+	/* check confliction with kprobes */
>>+	for (i = 0; i < size; i++) {
>>+		kp = get_kprobe((void *)((long)addr + i));
[YM] There is a race between get_kprobe and register_kprobe without locking kprobe_lock. Could register_kprobe to check if the address is in a JTPR of registered djprobe? I think djprobe and kprobe could share the same spin_lock, namely kprobe_lock.



>>+		if (kp != NULL) {
>>+			ret = -EEXIST;	/* a kprobes were inserted */
>>+			goto fail;
>>+		}
>>+	}
>>+	ret = register_kprobe(&djpi->kp);
>>+	if (ret < 0) {
>>+       fail:
>>+		djpi->kp.addr = NULL;
>>+		djp->inst = NULL;
>>+		list_del_rcu(&djp->plist);
>>+		__free_djprobe_instance(djpi);
>>+	} else {
>>+		__check_safety();
>>+		arch_install_djprobe_instance(djpi);
>>+	}
>>+       out:
>>+	up(&djprobe_mutex);
>>+	return ret;


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]