aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/migration.c14
-rw-r--r--kernel/perf_event.c7
-rw-r--r--kernel/sched_fair.c13
-rw-r--r--kernel/sched_rt.c2
-rw-r--r--kernel/sys.c3
5 files changed, 23 insertions, 16 deletions
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 1d254194048..441fd629ff0 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -56,6 +56,7 @@ void move_masked_irq(int irq)
void move_native_irq(int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
+ bool masked;
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
return;
@@ -63,8 +64,15 @@ void move_native_irq(int irq)
if (unlikely(desc->status & IRQ_DISABLED))
return;
- desc->irq_data.chip->irq_mask(&desc->irq_data);
+ /*
+ * Be careful vs. already masked interrupts. If this is a
+ * threaded interrupt with ONESHOT set, we can end up with an
+ * interrupt storm.
+ */
+ masked = desc->status & IRQ_MASKED;
+ if (!masked)
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
move_masked_irq(irq);
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ if (!masked)
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
-
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 852ae8c6650..999835b6112 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1901,11 +1901,12 @@ static void __perf_event_read(void *info)
return;
raw_spin_lock(&ctx->lock);
- update_context_time(ctx);
+ if (ctx->is_active)
+ update_context_time(ctx);
update_event_times(event);
+ if (event->state == PERF_EVENT_STATE_ACTIVE)
+ event->pmu->read(event);
raw_spin_unlock(&ctx->lock);
-
- event->pmu->read(event);
}
static inline u64 perf_event_count(struct perf_event *event)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 354769979c0..0c26e2df450 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -722,10 +722,10 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
u64 now, delta;
unsigned long load = cfs_rq->load.weight;
- if (!cfs_rq)
+ if (cfs_rq->tg == &root_task_group)
return;
- now = rq_of(cfs_rq)->clock;
+ now = rq_of(cfs_rq)->clock_task;
delta = now - cfs_rq->load_stamp;
/* truncate load history at 4 idle periods */
@@ -830,9 +830,6 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
struct sched_entity *se;
long shares;
- if (!cfs_rq)
- return;
-
tg = cfs_rq->tg;
se = tg->se[cpu_of(rq_of(cfs_rq))];
if (!se)
@@ -1432,7 +1429,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
{
- unsigned long this_load, load;
+ s64 this_load, load;
int idx, this_cpu, prev_cpu;
unsigned long tl_per_task;
struct task_group *tg;
@@ -1471,8 +1468,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* Otherwise check if either cpus are near enough in load to allow this
* task to be woken on this_cpu.
*/
- if (this_load) {
- unsigned long this_eff_load, prev_eff_load;
+ if (this_load > 0) {
+ s64 this_eff_load, prev_eff_load;
this_eff_load = 100;
this_eff_load *= power_of(prev_cpu);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index c914ec747ca..ad6267714c8 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -625,7 +625,7 @@ static void update_curr_rt(struct rq *rq)
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
u64 delta_exec;
- if (!task_has_rt_policy(curr))
+ if (curr->sched_class != &rt_sched_class)
return;
delta_exec = rq->clock_task - curr->se.exec_start;
diff --git a/kernel/sys.c b/kernel/sys.c
index 31b71a276b4..18da702ec81 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1385,7 +1385,8 @@ static int check_prlimit_permission(struct task_struct *task)
const struct cred *cred = current_cred(), *tcred;
tcred = __task_cred(task);
- if ((cred->uid != tcred->euid ||
+ if (current != task &&
+ (cred->uid != tcred->euid ||
cred->uid != tcred->suid ||
cred->uid != tcred->uid ||
cred->gid != tcred->egid ||