diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 448bbef474564..fc53e0ad56d90 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -35,10 +35,8 @@ static inline void local_bh_enable(void) #ifdef CONFIG_PREEMPT_RT extern bool local_bh_blocked(void); -extern void softirq_preempt(void); #else static inline bool local_bh_blocked(void) { return false; } -static inline void softirq_preempt(void) { } #endif #endif /* _LINUX_BH_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 04dbfb7cda334..2016534bbc533 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1838,7 +1838,6 @@ static inline int dl_task_check_affinity(struct task_struct *p, const struct cpu } #endif -extern bool task_is_pi_boosted(const struct task_struct *p); extern int yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d7af5c21c94a8..91d250a0e039b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7533,21 +7533,6 @@ static inline void preempt_dynamic_init(void) { } #endif /* CONFIG_PREEMPT_DYNAMIC */ -/* - * task_is_pi_boosted - Check if task has been PI boosted. - * @p: Task to check. - * - * Return true if task is subject to priority inheritance. - */ -bool task_is_pi_boosted(const struct task_struct *p) -{ - int prio = p->prio; - - if (!rt_prio(prio)) - return false; - return prio != p->normal_prio; -} - int io_schedule_prepare(void) { int old_iowait = current->in_iowait; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a825cdc1f02b3..172c588de5427 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2176,11 +2176,8 @@ static int rto_next_cpu(struct root_domain *rd) rd->rto_cpu = cpu; - if (cpu < nr_cpu_ids) { - if (!has_pushable_tasks(cpu_rq(cpu))) - continue; + if (cpu < nr_cpu_ids) return cpu; - } rd->rto_cpu = -1; diff --git a/kernel/softirq.c b/kernel/softirq.c index c251f1bf75c5e..0052bd4d9ec1d 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -248,19 +248,6 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) } EXPORT_SYMBOL(__local_bh_enable_ip); -void softirq_preempt(void) -{ - if (WARN_ON_ONCE(!preemptible())) - return; - - if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET)) - return; - - __local_bh_enable(SOFTIRQ_OFFSET, true); - /* preemption point */ - __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); -} - /* * Invoked from ksoftirqd_run() outside of the interrupt disabled section * to acquire the per CPU local lock for reentrancy protection. diff --git a/kernel/time/timer.c b/kernel/time/timer.c index aef6ca700c991..79f0dc73ac436 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1564,16 +1564,9 @@ static void timer_sync_wait_running(struct timer_base *base) __releases(&base->lock) __releases(&base->expiry_lock) __acquires(&base->expiry_lock) __acquires(&base->lock) { - bool need_preempt; - - need_preempt = task_is_pi_boosted(current); - if (need_preempt || atomic_read(&base->timer_waiters)) { + if (atomic_read(&base->timer_waiters)) { raw_spin_unlock_irq(&base->lock); spin_unlock(&base->expiry_lock); - - if (need_preempt) - softirq_preempt(); - spin_lock(&base->expiry_lock); raw_spin_lock_irq(&base->lock); } diff --git a/localversion-rt b/localversion-rt index 6f206be67cd28..c3054d08a1129 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt1 +-rt2