Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/can/pch_can.cae64438be1("can: dev: fix skb drop check")1dd1b521be("can: remove obsolete PCH CAN driver") https://lore.kernel.org/all/20221110102509.1f7d63cc@canb.auug.org.au/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -1027,12 +1027,17 @@ out:
|
||||
*/
|
||||
static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
|
||||
{
|
||||
void *new_arr;
|
||||
|
||||
if (!new_n || old_n == new_n)
|
||||
goto out;
|
||||
|
||||
arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
|
||||
if (!arr)
|
||||
new_arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
|
||||
if (!new_arr) {
|
||||
kfree(arr);
|
||||
return NULL;
|
||||
}
|
||||
arr = new_arr;
|
||||
|
||||
if (new_n > old_n)
|
||||
memset(arr + old_n * size, 0, (new_n - old_n) * size);
|
||||
@@ -6618,8 +6623,12 @@ static int release_reference(struct bpf_verifier_env *env,
|
||||
return err;
|
||||
|
||||
bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
|
||||
if (reg->ref_obj_id == ref_obj_id)
|
||||
__mark_reg_unknown(env, reg);
|
||||
if (reg->ref_obj_id == ref_obj_id) {
|
||||
if (!env->allow_ptr_leaks)
|
||||
__mark_reg_not_init(env, reg);
|
||||
else
|
||||
__mark_reg_unknown(env, reg);
|
||||
}
|
||||
}));
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -295,11 +295,11 @@ static int test_init(struct kunit *test)
|
||||
{
|
||||
/* Most test cases want 2 distinct CPUs. */
|
||||
if (num_online_cpus() < 2)
|
||||
return -EINVAL;
|
||||
kunit_skip(test, "not enough cpus");
|
||||
|
||||
/* Want the system to not use breakpoints elsewhere. */
|
||||
if (hw_breakpoint_is_used())
|
||||
return -EBUSY;
|
||||
kunit_skip(test, "hw breakpoint already in use");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2429,8 +2429,11 @@ int enable_kprobe(struct kprobe *kp)
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
|
||||
p->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
ret = arm_kprobe(p);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
p->flags |= KPROBE_FLAG_DISABLED;
|
||||
if (p != kp)
|
||||
kp->flags |= KPROBE_FLAG_DISABLED;
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
@@ -141,6 +141,8 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
|
||||
return -E2BIG;
|
||||
|
||||
fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
|
||||
if (!fp->rethook)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < size; i++) {
|
||||
struct fprobe_rethook_node *node;
|
||||
|
||||
@@ -301,7 +303,8 @@ int unregister_fprobe(struct fprobe *fp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!fp || fp->ops.func != fprobe_handler)
|
||||
if (!fp || (fp->ops.saved_func != fprobe_handler &&
|
||||
fp->ops.saved_func != fprobe_kprobe_handler))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
||||
@@ -3028,18 +3028,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
command |= FTRACE_UPDATE_TRACE_FUNC;
|
||||
}
|
||||
|
||||
if (!command || !ftrace_enabled) {
|
||||
/*
|
||||
* If these are dynamic or per_cpu ops, they still
|
||||
* need their data freed. Since, function tracing is
|
||||
* not currently active, we can just free them
|
||||
* without synchronizing all CPUs.
|
||||
*/
|
||||
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
|
||||
goto free_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
if (!command || !ftrace_enabled)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If the ops uses a trampoline, then it needs to be
|
||||
@@ -3076,6 +3066,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
removed_ops = NULL;
|
||||
ops->flags &= ~FTRACE_OPS_FL_REMOVING;
|
||||
|
||||
out:
|
||||
/*
|
||||
* Dynamic ops may be freed, we must make sure that all
|
||||
* callers are done before leaving this function.
|
||||
@@ -3103,7 +3094,6 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
synchronize_rcu_tasks();
|
||||
|
||||
free_ops:
|
||||
ftrace_trampoline_free(ops);
|
||||
}
|
||||
|
||||
|
||||
@@ -100,20 +100,20 @@ static int __init test_gen_kprobe_cmd(void)
|
||||
KPROBE_GEN_TEST_FUNC,
|
||||
KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1);
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/* Use kprobe_event_add_fields to add the rest of the fields */
|
||||
|
||||
ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3);
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* This actually creates the event.
|
||||
*/
|
||||
ret = kprobe_event_gen_cmd_end(&cmd);
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Now get the gen_kprobe_test event file. We need to prevent
|
||||
@@ -136,13 +136,11 @@ static int __init test_gen_kprobe_cmd(void)
|
||||
goto delete;
|
||||
}
|
||||
out:
|
||||
kfree(buf);
|
||||
return ret;
|
||||
delete:
|
||||
/* We got an error after creating the event, delete it */
|
||||
ret = kprobe_event_delete("gen_kprobe_test");
|
||||
free:
|
||||
kfree(buf);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -170,14 +168,14 @@ static int __init test_gen_kretprobe_cmd(void)
|
||||
KPROBE_GEN_TEST_FUNC,
|
||||
"$retval");
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* This actually creates the event.
|
||||
*/
|
||||
ret = kretprobe_event_gen_cmd_end(&cmd);
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Now get the gen_kretprobe_test event file. We need to
|
||||
@@ -201,13 +199,11 @@ static int __init test_gen_kretprobe_cmd(void)
|
||||
goto delete;
|
||||
}
|
||||
out:
|
||||
kfree(buf);
|
||||
return ret;
|
||||
delete:
|
||||
/* We got an error after creating the event, delete it */
|
||||
ret = kprobe_event_delete("gen_kretprobe_test");
|
||||
free:
|
||||
kfree(buf);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
@@ -937,6 +937,9 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct rb_irq_work *rbwork;
|
||||
|
||||
if (!buffer)
|
||||
return;
|
||||
|
||||
if (cpu == RING_BUFFER_ALL_CPUS) {
|
||||
|
||||
/* Wake up individual ones too. One level recursion */
|
||||
@@ -945,7 +948,15 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
|
||||
|
||||
rbwork = &buffer->irq_work;
|
||||
} else {
|
||||
if (WARN_ON_ONCE(!buffer->buffers))
|
||||
return;
|
||||
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
|
||||
return;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
/* The CPU buffer may not have been initialized yet */
|
||||
if (!cpu_buffer)
|
||||
return;
|
||||
rbwork = &cpu_buffer->irq_work;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user