On 07/09, Oleg Nesterov wrote: > > Currently build_probe_list() builds the list of all uprobes attached > to the given inode, and the caller should filter out those who don't > fall into the [start,end) range, this is sub-optimal. > > This patch turns find_least_offset_node() into find_node_in_range() > which returns the first node inside the [min,max] range, and changes > build_probe_list() to use this node as a starting point for rb_prev() > and rb_next() to find all other nodes the caller needs. The resulting > list is no longer sorted but we do not care. > > This can speed up both build_probe_list() and the callers, but there > is another reason to introduce find_node_in_range(). It can be used > to figure out whether the given vma has uprobes or not, this will be > needed soon.
I guess it is not easy to read this patch without applying, I am sending the code to simplify the review. ---------------------------------------------------------------------- static struct rb_node * find_node_in_range(struct inode *inode, loff_t min, loff_t max) { struct rb_node *n = uprobes_tree.rb_node; while (n) { struct uprobe *u = rb_entry(n, struct uprobe, rb_node); if (inode < u->inode) { n = n->rb_left; } else if (inode > u->inode) { n = n->rb_right; } else { if (max < u->offset) n = n->rb_left; else if (min > u->offset) n = n->rb_right; else break; } } return n; } /* * For a given range in vma, build a list of probes that need to be inserted. */ static void build_probe_list(struct inode *inode, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *head) { loff_t min, max; unsigned long flags; struct rb_node *n, *t; struct uprobe *u; INIT_LIST_HEAD(head); min = ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + start - vma->vm_start; max = min + (end - start) - 1; spin_lock_irqsave(&uprobes_treelock, flags); n = find_node_in_range(inode, min, max); if (n) { for (t = n; t; t = rb_prev(t)) { u = rb_entry(t, struct uprobe, rb_node); if (u->inode != inode || u->offset < min) break; list_add(&u->pending_list, head); atomic_inc(&u->ref); } for (t = n; (t = rb_next(t)); ) { u = rb_entry(t, struct uprobe, rb_node); if (u->inode != inode || u->offset > max) break; list_add(&u->pending_list, head); atomic_inc(&u->ref); } } spin_unlock_irqrestore(&uprobes_treelock, flags); } -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/