diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 330d32d84485b953944adf2b99e2e627b77dab0a..980cb7a74e358bc4298ffcd98104829f504605ba 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -53,14 +53,14 @@ TRACE_EVENT(qdisc_reset, TP_ARGS(q), TP_STRUCT__entry( - __string( dev, qdisc_dev(q) ) - __string( kind, q->ops->id ) - __field( u32, parent ) - __field( u32, handle ) + __string( dev, qdisc_dev(q)->name ) + __string( kind, q->ops->id ) + __field( u32, parent ) + __field( u32, handle ) ), TP_fast_assign( - __assign_str(dev, qdisc_dev(q)); + __assign_str(dev, qdisc_dev(q)->name); __assign_str(kind, q->ops->id); __entry->parent = q->parent; __entry->handle = q->handle; @@ -78,14 +78,14 @@ TRACE_EVENT(qdisc_destroy, TP_ARGS(q), TP_STRUCT__entry( - __string( dev, qdisc_dev(q) ) - __string( kind, q->ops->id ) - __field( u32, parent ) - __field( u32, handle ) + __string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" ) + __string( kind, q->ops->id ) + __field( u32, parent ) + __field( u32, handle ) ), TP_fast_assign( - __assign_str(dev, qdisc_dev(q)); + __assign_str(dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)"); __assign_str(kind, q->ops->id); __entry->parent = q->parent; __entry->handle = q->handle; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9df7e52e0a32f425c6fd816d438d2de9f2761f8f..41bcf69d8b563b0314932b87a6412cfe7f31fe76 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -545,17 +545,15 @@ static void do_unoptimize_kprobes(void) /* See comment in do_optimize_kprobes() */ lockdep_assert_cpus_held(); - /* Unoptimization must be done anytime */ - if (list_empty(&unoptimizing_list)) - return; + if (!list_empty(&unoptimizing_list)) + arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); - arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); - /* Loop free_list for disarming */ + /* Loop on 'freeing_list' for disarming and removing from kprobe hash list */ list_for_each_entry_safe(op, tmp, &freeing_list, list) { /* Switching from detour code to origin */ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; - /* Disarm probes if marked disabled */ - if (kprobe_disabled(&op->kp)) + /* Disarm probes if marked disabled and not gone */ + if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp)) arch_disarm_kprobe(&op->kp); if (kprobe_unused(&op->kp)) { /* @@ -784,14 +782,13 @@ static void kill_optimized_kprobe(struct kprobe *p) op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; if (kprobe_unused(p)) { - /* Enqueue if it is unused */ - list_add(&op->list, &freeing_list); /* - * Remove unused probes from the hash list. After waiting - * for synchronization, this probe is reclaimed. - * (reclaiming is done by do_free_cleaned_kprobes().) + * Unused kprobe is on unoptimizing or freeing list. We move it + * to freeing_list and let the kprobe_optimizer() remove it from + * the kprobe hash list and free it. */ - hlist_del_rcu(&op->kp.hlist); + if (optprobe_queued_unopt(op)) + list_move(&op->list, &freeing_list); } /* Don't touch the code, because it is already freed. */ @@ -1646,10 +1643,17 @@ static int check_kprobe_address_safe(struct kprobe *p, jump_label_lock(); preempt_disable(); - /* Ensure it is not in reserved area nor out of text */ - if (!(core_kernel_text((unsigned long) p->addr) || - is_module_text_address((unsigned long) p->addr)) || - in_gate_area_no_mm((unsigned long) p->addr) || + /* Ensure the address is in a text area, and find a module if exists. */ + *probed_mod = NULL; + if (!core_kernel_text((unsigned long) p->addr)) { + *probed_mod = __module_text_address((unsigned long) p->addr); + if (!(*probed_mod)) { + ret = -EINVAL; + goto out; + } + } + /* Ensure it is not in reserved area. */ + if (in_gate_area_no_mm((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) || jump_label_text_reserved(p->addr, p->addr) || static_call_text_reserved(p->addr, p->addr) || @@ -1660,8 +1664,7 @@ static int check_kprobe_address_safe(struct kprobe *p, goto out; } - /* Check if are we probing a module */ - *probed_mod = __module_text_address((unsigned long) p->addr); + /* Get module refcount and reject __init functions for loaded modules. */ if (*probed_mod) { /* * We must hold a refcount of the probed module while updating diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 7493f4b0d702e86a1972cd0c7586f1164a38dc73..0f0f4d2862e71e1cfa6670b3362d378536947649 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2754,7 +2754,8 @@ early_param("keep_bootcon", keep_bootcon_setup); * Care need to be taken with consoles that are statically * enabled such as netconsole */ -static int try_enable_new_console(struct console *newcon, bool user_specified) +static int try_enable_preferred_console(struct console *newcon, + bool user_specified) { struct console_cmdline *c; int i, err; @@ -2802,6 +2803,23 @@ static int try_enable_new_console(struct console *newcon, bool user_specified) return -ENOENT; } +/* Try to enable the console unconditionally */ +static void try_enable_default_console(struct console *newcon) +{ + if (newcon->index < 0) + newcon->index = 0; + + if (newcon->setup && newcon->setup(newcon, NULL) != 0) + return; + + newcon->flags |= CON_ENABLED; + + if (newcon->device) { + newcon->flags |= CON_CONSDEV; + has_preferred_console = true; + } +} + /* * The console driver calls this routine during kernel initialization * to register the console printing procedure with printk() and to @@ -2858,25 +2876,15 @@ void register_console(struct console *newcon) * didn't select a console we take the first one * that registers here. */ - if (!has_preferred_console) { - if (newcon->index < 0) - newcon->index = 0; - if (newcon->setup == NULL || - newcon->setup(newcon, NULL) == 0) { - newcon->flags |= CON_ENABLED; - if (newcon->device) { - newcon->flags |= CON_CONSDEV; - has_preferred_console = true; - } - } - } + if (!has_preferred_console) + try_enable_default_console(newcon); /* See if this console matches one we selected on the command line */ - err = try_enable_new_console(newcon, true); + err = try_enable_preferred_console(newcon, true); /* If not, try to match against the platform default(s) */ if (err == -ENOENT) - err = try_enable_new_console(newcon, false); + err = try_enable_preferred_console(newcon, false); /* printk() messages are not printed to the Braille console. */ if (err || newcon->flags & CON_BRL) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a90870ac7ad8c45a7ee57420ae2f8f4698b1e02b..9b0a35ca147958dcfd66fbe06dc34777f4410dcb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -416,7 +416,6 @@ struct rb_irq_work { struct irq_work work; wait_queue_head_t waiters; wait_queue_head_t full_waiters; - long wait_index; bool waiters_pending; bool full_waiters_pending; bool wakeup_full; @@ -833,8 +832,19 @@ static void rb_wake_up_waiters(struct irq_work *work) wake_up_all(&rbwork->waiters); if (rbwork->full_waiters_pending || rbwork->wakeup_full) { + /* Only cpu_buffer sets the above flags */ + struct ring_buffer_per_cpu *cpu_buffer = + container_of(rbwork, struct ring_buffer_per_cpu, irq_work); + + /* Called from interrupt context */ + raw_spin_lock(&cpu_buffer->reader_lock); rbwork->wakeup_full = false; rbwork->full_waiters_pending = false; + + /* Waking up all waiters, they will reset the shortest full */ + cpu_buffer->shortest_full = 0; + raw_spin_unlock(&cpu_buffer->reader_lock); + wake_up_all(&rbwork->full_waiters); } } @@ -863,14 +873,41 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) rbwork = &cpu_buffer->irq_work; } - rbwork->wait_index++; - /* make sure the waiters see the new index */ - smp_wmb(); - /* This can be called in any context */ irq_work_queue(&rbwork->work); } +static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) +{ + struct ring_buffer_per_cpu *cpu_buffer; + bool ret = false; + + /* Reads of all CPUs always waits for any data */ + if (cpu == RING_BUFFER_ALL_CPUS) + return !ring_buffer_empty(buffer); + + cpu_buffer = buffer->buffers[cpu]; + + if (!ring_buffer_empty_cpu(buffer, cpu)) { + unsigned long flags; + bool pagebusy; + + if (!full) + return true; + + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; + ret = !pagebusy && full_hit(buffer, cpu, full); + + if (!ret && (!cpu_buffer->shortest_full || + cpu_buffer->shortest_full > full)) { + cpu_buffer->shortest_full = full; + } + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + } + return ret; +} + /** * ring_buffer_wait - wait for input to the ring buffer * @buffer: buffer to wait on @@ -886,7 +923,6 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) struct ring_buffer_per_cpu *cpu_buffer; DEFINE_WAIT(wait); struct rb_irq_work *work; - long wait_index; int ret = 0; /* @@ -905,81 +941,54 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) work = &cpu_buffer->irq_work; } - wait_index = READ_ONCE(work->wait_index); - - while (true) { - if (full) - prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); - else - prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); - - /* - * The events can happen in critical sections where - * checking a work queue can cause deadlocks. - * After adding a task to the queue, this flag is set - * only to notify events to try to wake up the queue - * using irq_work. - * - * We don't clear it even if the buffer is no longer - * empty. The flag only causes the next event to run - * irq_work to do the work queue wake up. The worse - * that can happen if we race with !trace_empty() is that - * an event will cause an irq_work to try to wake up - * an empty queue. - * - * There's no reason to protect this flag either, as - * the work queue and irq_work logic will do the necessary - * synchronization for the wake ups. The only thing - * that is necessary is that the wake up happens after - * a task has been queued. It's OK for spurious wake ups. - */ - if (full) - work->full_waiters_pending = true; - else - work->waiters_pending = true; - - if (signal_pending(current)) { - ret = -EINTR; - break; - } - - if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) - break; - - if (cpu != RING_BUFFER_ALL_CPUS && - !ring_buffer_empty_cpu(buffer, cpu)) { - unsigned long flags; - bool pagebusy; - bool done; - - if (!full) - break; - - raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); - pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; - done = !pagebusy && full_hit(buffer, cpu, full); + if (full) + prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); + else + prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); - if (!cpu_buffer->shortest_full || - cpu_buffer->shortest_full > full) - cpu_buffer->shortest_full = full; - raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); - if (done) - break; - } + /* + * The events can happen in critical sections where + * checking a work queue can cause deadlocks. + * After adding a task to the queue, this flag is set + * only to notify events to try to wake up the queue + * using irq_work. + * + * We don't clear it even if the buffer is no longer + * empty. The flag only causes the next event to run + * irq_work to do the work queue wake up. The worse + * that can happen if we race with !trace_empty() is that + * an event will cause an irq_work to try to wake up + * an empty queue. + * + * There's no reason to protect this flag either, as + * the work queue and irq_work logic will do the necessary + * synchronization for the wake ups. The only thing + * that is necessary is that the wake up happens after + * a task has been queued. It's OK for spurious wake ups. + */ + if (full) + work->full_waiters_pending = true; + else + work->waiters_pending = true; - schedule(); + if (rb_watermark_hit(buffer, cpu, full)) + goto out; - /* Make sure to see the new wait index */ - smp_rmb(); - if (wait_index != work->wait_index) - break; + if (signal_pending(current)) { + ret = -EINTR; + goto out; } + schedule(); + out: if (full) finish_wait(&work->full_waiters, &wait); else finish_wait(&work->waiters, &wait); + if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current)) + ret = -EINTR; + return ret; } @@ -1002,30 +1011,51 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table, int full) { struct ring_buffer_per_cpu *cpu_buffer; - struct rb_irq_work *work; + struct rb_irq_work *rbwork; if (cpu == RING_BUFFER_ALL_CPUS) { - work = &buffer->irq_work; + rbwork = &buffer->irq_work; full = 0; } else { if (!cpumask_test_cpu(cpu, buffer->cpumask)) - return -EINVAL; + return EPOLLERR; cpu_buffer = buffer->buffers[cpu]; - work = &cpu_buffer->irq_work; + rbwork = &cpu_buffer->irq_work; } if (full) { - poll_wait(filp, &work->full_waiters, poll_table); - work->full_waiters_pending = true; + unsigned long flags; + + poll_wait(filp, &rbwork->full_waiters, poll_table); + + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); if (!cpu_buffer->shortest_full || cpu_buffer->shortest_full > full) cpu_buffer->shortest_full = full; - } else { - poll_wait(filp, &work->waiters, poll_table); - work->waiters_pending = true; + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + if (full_hit(buffer, cpu, full)) + return EPOLLIN | EPOLLRDNORM; + /* + * Only allow full_waiters_pending update to be seen after + * the shortest_full is set. If the writer sees the + * full_waiters_pending flag set, it will compare the + * amount in the ring buffer to shortest_full. If the amount + * in the ring buffer is greater than the shortest_full + * percent, it will call the irq_work handler to wake up + * this list. The irq_handler will reset shortest_full + * back to zero. That's done under the reader_lock, but + * the below smp_mb() makes sure that the update to + * full_waiters_pending doesn't leak up into the above. + */ + smp_mb(); + rbwork->full_waiters_pending = true; + return 0; } + poll_wait(filp, &rbwork->waiters, poll_table); + rbwork->waiters_pending = true; + /* * There's a tight race between setting the waiters_pending and * checking if the ring buffer is empty. Once the waiters_pending bit @@ -1041,9 +1071,6 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, */ smp_mb(); - if (full) - return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; - if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) return EPOLLIN | EPOLLRDNORM; @@ -3430,6 +3457,12 @@ rb_reserve_next_event(struct trace_buffer *buffer, int nr_loops = 0; int add_ts_default; + /* ring buffer does cmpxchg, make sure it is safe in NMI context */ + if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && + (unlikely(in_nmi()))) { + return NULL; + } + rb_start_commit(cpu_buffer); /* The commit page can not change after this */ @@ -4164,7 +4197,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) cpu_buffer = iter->cpu_buffer; reader = cpu_buffer->reader_page; head_page = cpu_buffer->head_page; - commit_page = cpu_buffer->commit_page; + commit_page = READ_ONCE(cpu_buffer->commit_page); commit_ts = commit_page->page->time_stamp; /* diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b6ee2f07a4c9d59d5ed803329147e752b7b74d07..3a5c874a40ae20c909d87f42b08d5bf62ec4e6a9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -2225,10 +2226,14 @@ struct saved_cmdlines_buffer { unsigned *map_cmdline_to_pid; unsigned cmdline_num; int cmdline_idx; - char *saved_cmdlines; + char saved_cmdlines[]; }; static struct saved_cmdlines_buffer *savedcmd; +/* Holds the size of a cmdline and pid element */ +#define SAVED_CMDLINE_MAP_ELEMENT_SIZE(s) \ + (TASK_COMM_LEN + sizeof((s)->map_cmdline_to_pid[0])) + static inline char *get_saved_cmdlines(int idx) { return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; @@ -2239,47 +2244,54 @@ static inline void set_cmdline(int idx, const char *cmdline) strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); } -static int allocate_cmdlines_buffer(unsigned int val, - struct saved_cmdlines_buffer *s) +static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) { - s->map_cmdline_to_pid = kmalloc_array(val, - sizeof(*s->map_cmdline_to_pid), - GFP_KERNEL); - if (!s->map_cmdline_to_pid) - return -ENOMEM; + int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN); - s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); - if (!s->saved_cmdlines) { - kfree(s->map_cmdline_to_pid); - return -ENOMEM; - } + kmemleak_free(s); + free_pages((unsigned long)s, order); +} - s->cmdline_idx = 0; +static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val) +{ + struct saved_cmdlines_buffer *s; + struct page *page; + int orig_size, size; + int order; + + /* Figure out how much is needed to hold the given number of cmdlines */ + orig_size = sizeof(*s) + val * SAVED_CMDLINE_MAP_ELEMENT_SIZE(s); + order = get_order(orig_size); + size = 1 << (order + PAGE_SHIFT); + page = alloc_pages(GFP_KERNEL, order); + if (!page) + return NULL; + + s = page_address(page); + kmemleak_alloc(s, size, 1, GFP_KERNEL); + memset(s, 0, sizeof(*s)); + + /* Round up to actual allocation */ + val = (size - sizeof(*s)) / SAVED_CMDLINE_MAP_ELEMENT_SIZE(s); s->cmdline_num = val; + + /* Place map_cmdline_to_pid array right after saved_cmdlines */ + s->map_cmdline_to_pid = (unsigned *)&s->saved_cmdlines[val * TASK_COMM_LEN]; + + s->cmdline_idx = 0; memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(s->map_pid_to_cmdline)); memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, val * sizeof(*s->map_cmdline_to_pid)); - return 0; + return s; } static int trace_create_savedcmd(void) { - int ret; + savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT); - savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); - if (!savedcmd) - return -ENOMEM; - - ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); - if (ret < 0) { - kfree(savedcmd); - savedcmd = NULL; - return -ENOMEM; - } - - return 0; + return savedcmd ? 0 : -ENOMEM; } int is_tracing_stopped(void) @@ -5617,26 +5629,14 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } -static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) -{ - kfree(s->saved_cmdlines); - kfree(s->map_cmdline_to_pid); - kfree(s); -} - static int tracing_resize_saved_cmdlines(unsigned int val) { struct saved_cmdlines_buffer *s, *savedcmd_temp; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = allocate_cmdlines_buffer(val); if (!s) return -ENOMEM; - if (allocate_cmdlines_buffer(val, s) < 0) { - kfree(s); - return -ENOMEM; - } - preempt_disable(); arch_spin_lock(&trace_cmdline_lock); savedcmd_temp = savedcmd; diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 51a9d1185033b981d4227a765ed00c31d9cda4ae..d47641f9740bcc537b414761662f7e44a1580964 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -574,7 +574,12 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only) } memcpy(elt->key, key, map->key_size); - entry->val = elt; + /* + * Ensure the initialization is visible and + * publish the elt. + */ + smp_wmb(); + WRITE_ONCE(entry->val, elt); atomic64_inc(&map->hits); return entry->val; diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 71bdc167a9ee72a6883ce9f13d7d1b064f8dd31a..b008af57bb9fa602c220e54c1bc37fdd19fa40cf 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -219,10 +219,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list) return obj; } -/* - * Allocate a new object. If the pool is empty, switch off the debugger. - * Must be called with interrupts disabled. - */ static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) { @@ -555,31 +551,73 @@ static void debug_object_is_on_stack(void *addr, int onstack) WARN_ON(1); } +static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, + const struct debug_obj_descr *descr, + bool onstack, bool alloc_ifstatic) +{ + struct debug_obj *obj = lookup_object(addr, b); + enum debug_obj_state state = ODEBUG_STATE_NONE; + + if (likely(obj)) + return obj; + + /* + * debug_object_init() unconditionally allocates untracked + * objects. It does not matter whether it is a static object or + * not. + * + * debug_object_assert_init() and debug_object_activate() allow + * allocation only if the descriptor callback confirms that the + * object is static and considered initialized. For non-static + * objects the allocation needs to be done from the fixup callback. + */ + if (unlikely(alloc_ifstatic)) { + if (!descr->is_static_object || !descr->is_static_object(addr)) + return ERR_PTR(-ENOENT); + /* Statically allocated objects are considered initialized */ + state = ODEBUG_STATE_INIT; + } + + obj = alloc_object(addr, b, descr); + if (likely(obj)) { + obj->state = state; + debug_object_is_on_stack(addr, onstack); + return obj; + } + + /* Out of memory. Do the cleanup outside of the locked region */ + debug_objects_enabled = 0; + return NULL; +} + +static void debug_objects_fill_pool(void) +{ + /* + * On RT enabled kernels the pool refill must happen in preemptible + * context: + */ + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) + fill_pool(); +} + static void __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) { - enum debug_obj_state state; - bool check_stack = false; + struct debug_obj *obj, o; struct debug_bucket *db; - struct debug_obj *obj; unsigned long flags; - fill_pool(); + debug_objects_fill_pool(); db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); - obj = lookup_object(addr, db); - if (!obj) { - obj = alloc_object(addr, db, descr); - if (!obj) { - debug_objects_enabled = 0; - raw_spin_unlock_irqrestore(&db->lock, flags); - debug_objects_oom(); - return; - } - check_stack = true; + obj = lookup_object_or_alloc(addr, db, descr, onstack, false); + if (unlikely(!obj)) { + raw_spin_unlock_irqrestore(&db->lock, flags); + debug_objects_oom(); + return; } switch (obj->state) { @@ -587,26 +625,18 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_INIT; - break; - - case ODEBUG_STATE_ACTIVE: - state = obj->state; - raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "init"); - debug_object_fixup(descr->fixup_init, addr, state); - return; - - case ODEBUG_STATE_DESTROYED: raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "init"); return; default: break; } + o = *obj; raw_spin_unlock_irqrestore(&db->lock, flags); - if (check_stack) - debug_object_is_on_stack(addr, onstack); + debug_print_object(&o, "init"); + + if (o.state == ODEBUG_STATE_ACTIVE) + debug_object_fixup(descr->fixup_init, addr, o.state); } /** @@ -646,74 +676,53 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack); */ int debug_object_activate(void *addr, const struct debug_obj_descr *descr) { - enum debug_obj_state state; + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; - int ret; - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; if (!debug_objects_enabled) return 0; + debug_objects_fill_pool(); + db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); - obj = lookup_object(addr, db); - if (obj) { - bool print_object = false; - + obj = lookup_object_or_alloc(addr, db, descr, false, true); + if (unlikely(!obj)) { + raw_spin_unlock_irqrestore(&db->lock, flags); + debug_objects_oom(); + return 0; + } else if (likely(!IS_ERR(obj))) { switch (obj->state) { - case ODEBUG_STATE_INIT: - case ODEBUG_STATE_INACTIVE: - obj->state = ODEBUG_STATE_ACTIVE; - ret = 0; - break; - case ODEBUG_STATE_ACTIVE: - state = obj->state; - raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "activate"); - ret = debug_object_fixup(descr->fixup_activate, addr, state); - return ret ? 0 : -EINVAL; - case ODEBUG_STATE_DESTROYED: - print_object = true; - ret = -EINVAL; + o = *obj; break; + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_ACTIVE; + fallthrough; default: - ret = 0; - break; + raw_spin_unlock_irqrestore(&db->lock, flags); + return 0; } - raw_spin_unlock_irqrestore(&db->lock, flags); - if (print_object) - debug_print_object(obj, "activate"); - return ret; } raw_spin_unlock_irqrestore(&db->lock, flags); + debug_print_object(&o, "activate"); - /* - * We are here when a static object is activated. We - * let the type specific code confirm whether this is - * true or not. if true, we just make sure that the - * static object is tracked in the object tracker. If - * not, this must be a bug, so we try to fix it up. - */ - if (descr->is_static_object && descr->is_static_object(addr)) { - /* track this static object */ - debug_object_init(addr, descr); - debug_object_activate(addr, descr); - } else { - debug_print_object(&o, "activate"); - ret = debug_object_fixup(descr->fixup_activate, addr, - ODEBUG_STATE_NOTAVAILABLE); - return ret ? 0 : -EINVAL; + switch (o.state) { + case ODEBUG_STATE_ACTIVE: + case ODEBUG_STATE_NOTAVAILABLE: + if (debug_object_fixup(descr->fixup_activate, addr, o.state)) + return 0; + fallthrough; + default: + return -EINVAL; } - return 0; } EXPORT_SYMBOL_GPL(debug_object_activate); @@ -724,10 +733,10 @@ EXPORT_SYMBOL_GPL(debug_object_activate); */ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) { + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; - bool print_object = false; if (!debug_objects_enabled) return; @@ -739,33 +748,24 @@ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) obj = lookup_object(addr, db); if (obj) { switch (obj->state) { + case ODEBUG_STATE_DESTROYED: + break; case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: case ODEBUG_STATE_ACTIVE: - if (!obj->astate) - obj->state = ODEBUG_STATE_INACTIVE; - else - print_object = true; - break; - - case ODEBUG_STATE_DESTROYED: - print_object = true; - break; + if (obj->astate) + break; + obj->state = ODEBUG_STATE_INACTIVE; + fallthrough; default: - break; + raw_spin_unlock_irqrestore(&db->lock, flags); + return; } + o = *obj; } raw_spin_unlock_irqrestore(&db->lock, flags); - if (!obj) { - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; - - debug_print_object(&o, "deactivate"); - } else if (print_object) { - debug_print_object(obj, "deactivate"); - } + debug_print_object(&o, "deactivate"); } EXPORT_SYMBOL_GPL(debug_object_deactivate); @@ -776,11 +776,9 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate); */ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) { - enum debug_obj_state state; + struct debug_obj *obj, o; struct debug_bucket *db; - struct debug_obj *obj; unsigned long flags; - bool print_object = false; if (!debug_objects_enabled) return; @@ -790,32 +788,31 @@ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); - if (!obj) - goto out_unlock; + if (!obj) { + raw_spin_unlock_irqrestore(&db->lock, flags); + return; + } switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + case ODEBUG_STATE_DESTROYED: + break; case ODEBUG_STATE_NONE: case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_DESTROYED; - break; - case ODEBUG_STATE_ACTIVE: - state = obj->state; + fallthrough; + default: raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "destroy"); - debug_object_fixup(descr->fixup_destroy, addr, state); return; - - case ODEBUG_STATE_DESTROYED: - print_object = true; - break; - default: - break; } -out_unlock: + + o = *obj; raw_spin_unlock_irqrestore(&db->lock, flags); - if (print_object) - debug_print_object(obj, "destroy"); + debug_print_object(&o, "destroy"); + + if (o.state == ODEBUG_STATE_ACTIVE) + debug_object_fixup(descr->fixup_destroy, addr, o.state); } EXPORT_SYMBOL_GPL(debug_object_destroy); @@ -826,9 +823,8 @@ EXPORT_SYMBOL_GPL(debug_object_destroy); */ void debug_object_free(void *addr, const struct debug_obj_descr *descr) { - enum debug_obj_state state; + struct debug_obj *obj, o; struct debug_bucket *db; - struct debug_obj *obj; unsigned long flags; if (!debug_objects_enabled) @@ -839,24 +835,26 @@ void debug_object_free(void *addr, const struct debug_obj_descr *descr) raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); - if (!obj) - goto out_unlock; + if (!obj) { + raw_spin_unlock_irqrestore(&db->lock, flags); + return; + } switch (obj->state) { case ODEBUG_STATE_ACTIVE: - state = obj->state; - raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "free"); - debug_object_fixup(descr->fixup_free, addr, state); - return; + break; default: hlist_del(&obj->node); raw_spin_unlock_irqrestore(&db->lock, flags); free_object(obj); return; } -out_unlock: + + o = *obj; raw_spin_unlock_irqrestore(&db->lock, flags); + debug_print_object(&o, "free"); + + debug_object_fixup(descr->fixup_free, addr, o.state); } EXPORT_SYMBOL_GPL(debug_object_free); @@ -867,6 +865,7 @@ EXPORT_SYMBOL_GPL(debug_object_free); */ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) { + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; @@ -874,34 +873,25 @@ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) if (!debug_objects_enabled) return; + debug_objects_fill_pool(); + db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); + obj = lookup_object_or_alloc(addr, db, descr, false, true); + raw_spin_unlock_irqrestore(&db->lock, flags); + if (likely(!IS_ERR_OR_NULL(obj))) + return; - obj = lookup_object(addr, db); + /* If NULL the allocation has hit OOM */ if (!obj) { - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; - - raw_spin_unlock_irqrestore(&db->lock, flags); - /* - * Maybe the object is static, and we let the type specific - * code confirm. Track this static object if true, else invoke - * fixup. - */ - if (descr->is_static_object && descr->is_static_object(addr)) { - /* Track this static object */ - debug_object_init(addr, descr); - } else { - debug_print_object(&o, "assert_init"); - debug_object_fixup(descr->fixup_assert_init, addr, - ODEBUG_STATE_NOTAVAILABLE); - } + debug_objects_oom(); return; } - raw_spin_unlock_irqrestore(&db->lock, flags); + /* Object is neither tracked nor static. It's not initialized. */ + debug_print_object(&o, "assert_init"); + debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); } EXPORT_SYMBOL_GPL(debug_object_assert_init); @@ -916,10 +906,10 @@ void debug_object_active_state(void *addr, const struct debug_obj_descr *descr, unsigned int expect, unsigned int next) { + struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; - bool print_object = false; if (!debug_objects_enabled) return; @@ -932,28 +922,19 @@ debug_object_active_state(void *addr, const struct debug_obj_descr *descr, if (obj) { switch (obj->state) { case ODEBUG_STATE_ACTIVE: - if (obj->astate == expect) - obj->astate = next; - else - print_object = true; - break; - + if (obj->astate != expect) + break; + obj->astate = next; + raw_spin_unlock_irqrestore(&db->lock, flags); + return; default: - print_object = true; break; } + o = *obj; } raw_spin_unlock_irqrestore(&db->lock, flags); - if (!obj) { - struct debug_obj o = { .object = addr, - .state = ODEBUG_STATE_NOTAVAILABLE, - .descr = descr }; - - debug_print_object(&o, "active_state"); - } else if (print_object) { - debug_print_object(obj, "active_state"); - } + debug_print_object(&o, "active_state"); } EXPORT_SYMBOL_GPL(debug_object_active_state); @@ -961,12 +942,10 @@ EXPORT_SYMBOL_GPL(debug_object_active_state); static void __debug_check_no_obj_freed(const void *address, unsigned long size) { unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; - const struct debug_obj_descr *descr; - enum debug_obj_state state; + int cnt, objs_checked = 0; + struct debug_obj *obj, o; struct debug_bucket *db; struct hlist_node *tmp; - struct debug_obj *obj; - int cnt, objs_checked = 0; saddr = (unsigned long) address; eaddr = saddr + size; @@ -988,12 +967,10 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) switch (obj->state) { case ODEBUG_STATE_ACTIVE: - descr = obj->descr; - state = obj->state; + o = *obj; raw_spin_unlock_irqrestore(&db->lock, flags); - debug_print_object(obj, "free"); - debug_object_fixup(descr->fixup_free, - (void *) oaddr, state); + debug_print_object(&o, "free"); + debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state); goto repeat; default: hlist_del(&obj->node);