aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/phy/phy.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:53:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:53:30 -0700
commitf317ff9eed763e99bd226a447f93d42509434f43 (patch)
treed77edfcdb1e7c32c30327f50dce04bcff8ec1531 /drivers/net/phy/phy.c
parent13cc56013842a847a0f6ff805d9ed9181e753ef8 (diff)
parenta85f1a41f020bc2c97611060bcfae6f48a1db28d (diff)
Merge branch 'for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue changes from Tejun Heo: "Surprisingly, Lai and I didn't break too many things implementing custom pools and stuff last time around and there aren't any follow-up changes necessary at this point. The only change in this pull request is Viresh's patches to make some per-cpu workqueues to behave as unbound workqueues dependent on a boot param whose default can be configured via a config option. This leads to higher processing overhead / lower bandwidth as more work items are bounced across CPUs; however, it can lead to noticeable powersave in certain configurations - ~10% w/ idlish constant workload on a big.LITTLE configuration according to Viresh. This is because per-cpu workqueues interfere with how the scheduler perceives whether or not each CPU is idle by forcing pinned tasks on them, which makes the scheduler's power-aware scheduling decisions less effective. Its effectiveness is likely less pronounced on homogenous configurations and this type of optimization can probably be made automatic; however, the changes are pretty minimal and the affected workqueues are clearly marked, so it's an easy gain for some configurations for the time being with pretty unintrusive changes." * 'for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: fbcon: queue work on power efficient wq block: queue work on power efficient wq PHYLIB: queue work on system_power_efficient_wq workqueue: Add system wide power_efficient workqueues workqueues: Introduce new flag WQ_POWER_EFFICIENT for power oriented workqueues
Diffstat (limited to 'drivers/net/phy/phy.c')
-rw-r--r--drivers/net/phy/phy.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 38f0b312ff8..663d2d0448b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -439,7 +439,7 @@ void phy_start_machine(struct phy_device *phydev,
{
phydev->adjust_state = handler;
- schedule_delayed_work(&phydev->state_queue, HZ);
+ queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
}
/**
@@ -500,7 +500,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
disable_irq_nosync(irq);
atomic_inc(&phydev->irq_disable);
- schedule_work(&phydev->phy_queue);
+ queue_work(system_power_efficient_wq, &phydev->phy_queue);
return IRQ_HANDLED;
}
@@ -655,7 +655,7 @@ static void phy_change(struct work_struct *work)
/* reschedule state queue work to run as soon as possible */
cancel_delayed_work_sync(&phydev->state_queue);
- schedule_delayed_work(&phydev->state_queue, 0);
+ queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
return;
@@ -918,7 +918,8 @@ void phy_state_machine(struct work_struct *work)
if (err < 0)
phy_error(phydev);
- schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
+ queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+ PHY_STATE_TIME * HZ);
}
static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,