diff --git a/fs/proc/base.c b/fs/proc/base.c index a45d4d640f01..56b1c4f1e8c0 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -95,6 +95,7 @@ #include #include #include +#include #include "internal.h" #include "fd.h" diff --git a/kernel/softirq.c b/kernel/softirq.c index 73dae64bfc9c..9bad7a16dc61 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -947,10 +947,12 @@ static void __tasklet_schedule_common(struct tasklet_struct *t, * is locked before adding it to the list. */ if (test_bit(TASKLET_STATE_SCHED, &t->state)) { +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) if (test_and_set_bit(TASKLET_STATE_CHAINED, &t->state)) { tasklet_unlock(t); return; } +#endif t->next = NULL; *head->tail = t; head->tail = &(t->next); @@ -1044,7 +1046,11 @@ static void tasklet_action_common(struct softirq_action *a, again: t->func(t->data); +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) while (cmpxchg(&t->state, TASKLET_STATEF_RC, 0) != TASKLET_STATEF_RC) { +#else + while (!tasklet_tryunlock(t)) { +#endif /* * If it got disabled meanwhile, bail out: */ diff --git a/localversion-rt b/localversion-rt index 3165a8781ff5..51b05e9abe6f 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt54 +-rt55 diff --git a/mm/slub.c b/mm/slub.c index d243c6ef7fc9..a9473bbb1338 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2341,9 +2341,6 @@ static void flush_all(struct kmem_cache *s) for_each_online_cpu(cpu) { struct slub_free_list *f; - if (!has_cpu_slab(cpu, s)) - continue; - f = &per_cpu(slub_free_list, cpu); raw_spin_lock_irq(&f->lock); list_splice_init(&f->list, &tofree);