Home Home > GIT Browse > vanilla
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2017-11-19 22:03:37 +0100
committerJiri Kosina <jkosina@suse.cz>2017-11-19 22:03:37 +0100
commit826fdc0aefb9c141139b2aa05e4c4d85bd836905 (patch)
tree1155d75d3d990c71ec424791e64d2b93cfc68067
parentc32d23d2580c309f6167db1a1b15fbcb5cfb0f6e (diff)
parent78b0ff739f3b985027a8390081f866fb1570da25 (diff)
Merge remote-tracking branch 'origin/users/mgorman/SLE15/for-next' into SLE15
-rw-r--r--arch/x86/entry/common.c4
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c20
-rw-r--r--include/linux/lockdep.h15
-rw-r--r--kernel/events/core.c12
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/rcu/tree.c4
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/cputime.c3
-rw-r--r--kernel/sched/idle.c3
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c10
-rw-r--r--kernel/time/hrtimer.c4
-rw-r--r--kernel/time/posix-cpu-timers.c6
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/workqueue.c2
-rw-r--r--net/core/netpoll.c2
17 files changed, 64 insertions, 34 deletions
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index cdefcfdd9e63..58e639190aab 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -183,9 +183,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
struct thread_info *ti = current_thread_info();
u32 cached_flags;
- if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
- local_irq_disable();
-
+ lockdep_assert_irqs_disabled();
lockdep_sys_exit();
cached_flags = READ_ONCE(ti->flags);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 4899684fe181..4949ab5b8f34 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1097,7 +1097,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
unsigned long flags;
int err;
- WARN_ON(irqs_disabled());
+ lockdep_assert_irqs_enabled();
pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 3f0ce2ae35ee..d2e2963f3ede 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -200,10 +200,26 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
{
struct pcc_cpu *pcc_cpu_data;
struct cpufreq_freqs freqs;
+ static u32 limit = 0;
+
u16 status;
u32 input_buffer;
int cpu;
+ if (!limit) {
+ u32 f_min = policy->cpuinfo.min_freq / 1000;
+ u32 f_max = policy->cpuinfo.max_freq / 1000;
+ limit = (f_max - f_min) * f_min;
+ limit /= f_max;
+ limit *= 1000;
+ limit += f_min * 1000;
+ pr_debug("pcc-cpufreq: setting deadband limit to %u kHz\n",
+ limit);
+ }
+
+ if (target_freq < limit)
+ target_freq = policy->min;
+
cpu = policy->cpu;
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
@@ -214,6 +230,10 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
freqs.old = policy->cur;
freqs.new = target_freq;
+
+ if (freqs.new == freqs.old)
+ return 0;
+
cpufreq_freq_transition_begin(policy, &freqs);
spin_lock(&pcc_lock);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fffe49f188e6..0125a04ed343 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -586,9 +586,24 @@ do { \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
+
+#define lockdep_assert_irqs_enabled() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ !current->hardirqs_enabled, \
+ "IRQs not enabled as expected\n"); \
+ } while (0)
+
+#define lockdep_assert_irqs_disabled() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ current->hardirqs_enabled, \
+ "IRQs not disabled as expected\n"); \
+ } while (0)
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
+# define lockdep_assert_irqs_enabled() do { } while (0)
+# define lockdep_assert_irqs_disabled() do { } while (0)
#endif
#ifdef CONFIG_LOCKDEP
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 78de4fed1890..ffbdeedf77b2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -209,7 +209,7 @@ static int event_function(void *info)
struct perf_event_context *task_ctx = cpuctx->task_ctx;
int ret = 0;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
perf_ctx_lock(cpuctx, task_ctx);
/*
@@ -306,7 +306,7 @@ static void event_function_local(struct perf_event *event, event_f func, void *d
struct task_struct *task = READ_ONCE(ctx->task);
struct perf_event_context *task_ctx = NULL;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (task) {
if (task == TASK_TOMBSTONE)
@@ -1008,7 +1008,7 @@ static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
struct perf_cpu_context *cpuctx;
int rotations = 0;
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
rotations = perf_rotate_context(cpuctx);
@@ -1095,7 +1095,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx)
{
struct list_head *head = this_cpu_ptr(&active_ctx_list);
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
WARN_ON(!list_empty(&ctx->active_ctx_list));
@@ -1104,7 +1104,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx)
static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
{
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
WARN_ON(list_empty(&ctx->active_ctx_list));
@@ -3477,7 +3477,7 @@ void perf_event_task_tick(void)
struct perf_event_context *ctx, *tmp;
int throttled;
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index bcf107ce0854..899579657a0a 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -188,7 +188,7 @@ void irq_work_tick(void)
*/
void irq_work_sync(struct irq_work *work)
{
- WARN_ON_ONCE(irqs_disabled());
+ lockdep_assert_irqs_enabled();
while (work->flags & IRQ_WORK_BUSY)
cpu_relax();
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 6354641c2f8f..41b052849555 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -899,7 +899,7 @@ void rcu_irq_exit(void)
{
struct rcu_dynticks *rdtp;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
rdtp->dynticks_nesting < 1);
@@ -1031,7 +1031,7 @@ void rcu_irq_enter(void)
struct rcu_dynticks *rdtp;
long long oldval;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting++;
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index ca0f8fc945c6..e086babe6c61 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -388,7 +388,7 @@ void sched_clock_tick(void)
if (unlikely(!sched_clock_running))
return;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
scd = this_scd();
__scd_stamp(scd);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index ad275b25e3f9..57507c85bd2c 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -259,8 +259,7 @@ static inline u64 account_other_time(u64 max)
{
u64 accounted;
- /* Shall be converted to a lockdep-enabled lightweight check */
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
accounted = steal_account_process_time(max);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index ef63adce0c9c..44c3b46a57ca 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -209,6 +209,7 @@ exit_idle:
*/
static void do_idle(void)
{
+ int cpu = smp_processor_id();
/*
* If the arch has a polling bit, we maintain an invariant:
*
@@ -225,7 +226,7 @@ static void do_idle(void)
check_pgt_cache();
rmb();
- if (cpu_is_offline(smp_processor_id())) {
+ if (cpu_is_offline(cpu)) {
cpuhp_report_idle_dead();
arch_cpu_idle_dead();
}
diff --git a/kernel/smp.c b/kernel/smp.c
index 81cfca9b4cc3..b69dff67a09b 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -213,7 +213,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
call_single_data_t *csd, *csd_next;
static bool warned;
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
head = this_cpu_ptr(&call_single_queue);
entry = llist_del_all(head);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 4e09821f9d9e..662f7b1b7a78 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__local_bh_disable_ip);
static void __local_bh_enable(unsigned int cnt)
{
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
@@ -158,7 +158,8 @@ EXPORT_SYMBOL(_local_bh_enable);
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
- WARN_ON_ONCE(in_irq() || irqs_disabled());
+ WARN_ON_ONCE(in_irq());
+ lockdep_assert_irqs_enabled();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_disable();
#endif
@@ -396,9 +397,8 @@ void irq_exit(void)
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
local_irq_disable();
#else
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
#endif
-
account_irq_exit_time(current);
preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
@@ -488,7 +488,7 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
{
- BUG_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
t->next = __this_cpu_read(tasklet_hi_vec.head);
__this_cpu_write(tasklet_hi_vec.head, t);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index ac053bb5296e..82a06d25e644 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -757,9 +757,7 @@ void clock_was_set(void)
*/
void hrtimers_resume(void)
{
- WARN_ONCE(!irqs_disabled(),
- KERN_INFO "hrtimers_resume() called with IRQs enabled!");
-
+ lockdep_assert_irqs_disabled();
/* Retrigger on the local CPU */
retrigger_next_event(NULL);
/* And schedule a retrigger for all others */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index d2a1e6dd0291..29e695f174f2 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -590,7 +590,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
/*
* Disarm any old timer after extracting its expiry time.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -1027,7 +1027,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
arm_timer(timer);
unlock_task_sighand(p, &flags);
@@ -1122,7 +1122,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
struct k_itimer *timer, *next;
unsigned long flags;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
/*
* The fast path checks that there are no expired thread or thread
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9f74c7cfccaa..0bafeaa6caca 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -192,7 +192,7 @@ static bool check_tick_dependency(atomic_t *dep)
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
{
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (unlikely(!cpu_online(cpu)))
return false;
@@ -944,8 +944,7 @@ void tick_nohz_idle_enter(void)
{
struct tick_sched *ts;
- WARN_ON_ONCE(irqs_disabled());
-
+ lockdep_assert_irqs_enabled();
/*
* Update the idle state in the scheduler domain hierarchy
* when tick_nohz_stop_sched_tick() is called from the idle loop.
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7146ea70a62d..1b8cdd661a7f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1375,7 +1375,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
debug_work_activate(work);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a43fab2ec475..9ed46effeca1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -334,7 +334,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
/* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
npinfo = rcu_dereference_bh(np->dev->npinfo);
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {