diff options
Diffstat (limited to 'drivers/clocksource/sh_mtu2.c')
| -rw-r--r-- | drivers/clocksource/sh_mtu2.c | 488 | 
1 files changed, 344 insertions, 144 deletions
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 4aac9ee0d0c..188d4e092ef 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -11,37 +11,48 @@   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/err.h>  #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h>  #include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/delay.h>  #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h>  #include <linux/irq.h> -#include <linux/err.h> -#include <linux/clockchips.h> -#include <linux/sh_timer.h> -#include <linux/slab.h>  #include <linux/module.h> +#include <linux/platform_device.h>  #include <linux/pm_domain.h>  #include <linux/pm_runtime.h> +#include <linux/sh_timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +struct sh_mtu2_device; + +struct sh_mtu2_channel { +	struct sh_mtu2_device *mtu; +	unsigned int index; + +	void __iomem *base; +	int irq; + +	struct clock_event_device ced; +}; + +struct sh_mtu2_device { +	struct platform_device *pdev; -struct sh_mtu2_priv {  	void __iomem *mapbase;  	struct clk *clk; -	struct irqaction irqaction; -	struct platform_device *pdev; -	unsigned long rate; -	unsigned long periodic; -	struct clock_event_device ced; + +	struct sh_mtu2_channel *channels; +	unsigned int num_channels; + +	bool legacy; +	bool has_clockevent;  };  static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); @@ -55,6 +66,88 @@ static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);  #define TCNT 5 /* channel register */  #define TGR  6 /* channel register */ +#define TCR_CCLR_NONE		(0 << 5) +#define TCR_CCLR_TGRA		(1 << 5) +#define TCR_CCLR_TGRB		(2 << 5) +#define TCR_CCLR_SYNC		(3 << 5) +#define TCR_CCLR_TGRC		(5 << 5) +#define TCR_CCLR_TGRD		(6 << 5) +#define TCR_CCLR_MASK		(7 << 5) +#define TCR_CKEG_RISING		(0 << 3) +#define TCR_CKEG_FALLING	(1 << 3) +#define TCR_CKEG_BOTH		(2 << 3) +#define TCR_CKEG_MASK		(3 << 3) +/* Values 4 to 7 are channel-dependent */ +#define TCR_TPSC_P1		(0 << 0) +#define TCR_TPSC_P4		(1 << 0) +#define TCR_TPSC_P16		(2 << 0) +#define TCR_TPSC_P64		(3 << 0) +#define TCR_TPSC_CH0_TCLKA	(4 << 0) +#define TCR_TPSC_CH0_TCLKB	(5 << 0) +#define TCR_TPSC_CH0_TCLKC	(6 << 0) +#define TCR_TPSC_CH0_TCLKD	(7 << 0) +#define TCR_TPSC_CH1_TCLKA	(4 << 0) +#define TCR_TPSC_CH1_TCLKB	(5 << 0) +#define TCR_TPSC_CH1_P256	(6 << 0) +#define TCR_TPSC_CH1_TCNT2	(7 << 0) +#define TCR_TPSC_CH2_TCLKA	(4 << 0) +#define TCR_TPSC_CH2_TCLKB	(5 << 0) +#define TCR_TPSC_CH2_TCLKC	(6 << 0) +#define TCR_TPSC_CH2_P1024	(7 << 0) +#define TCR_TPSC_CH34_P256	(4 << 0) +#define TCR_TPSC_CH34_P1024	(5 << 0) +#define TCR_TPSC_CH34_TCLKA	(6 << 0) +#define TCR_TPSC_CH34_TCLKB	(7 << 0) +#define TCR_TPSC_MASK		(7 << 0) + +#define TMDR_BFE		(1 << 6) +#define TMDR_BFB		(1 << 5) +#define TMDR_BFA		(1 << 4) +#define TMDR_MD_NORMAL		(0 << 0) +#define TMDR_MD_PWM_1		(2 << 0) +#define TMDR_MD_PWM_2		(3 << 0) +#define TMDR_MD_PHASE_1		(4 << 0) +#define TMDR_MD_PHASE_2		(5 << 0) +#define TMDR_MD_PHASE_3		(6 << 0) +#define TMDR_MD_PHASE_4		(7 << 0) +#define TMDR_MD_PWM_SYNC	(8 << 0) +#define TMDR_MD_PWM_COMP_CREST	(13 << 0) +#define TMDR_MD_PWM_COMP_TROUGH	(14 << 0) +#define TMDR_MD_PWM_COMP_BOTH	(15 << 0) +#define TMDR_MD_MASK		(15 << 0) + +#define TIOC_IOCH(n)		((n) << 4) +#define TIOC_IOCL(n)		((n) << 0) +#define TIOR_OC_RETAIN		(0 << 0) +#define TIOR_OC_0_CLEAR		(1 << 0) +#define TIOR_OC_0_SET		(2 << 0) +#define TIOR_OC_0_TOGGLE	(3 << 0) +#define TIOR_OC_1_CLEAR		(5 << 0) +#define TIOR_OC_1_SET		(6 << 0) +#define TIOR_OC_1_TOGGLE	(7 << 0) +#define TIOR_IC_RISING		(8 << 0) +#define TIOR_IC_FALLING		(9 << 0) +#define TIOR_IC_BOTH		(10 << 0) +#define TIOR_IC_TCNT		(12 << 0) +#define TIOR_MASK		(15 << 0) + +#define TIER_TTGE		(1 << 7) +#define TIER_TTGE2		(1 << 6) +#define TIER_TCIEU		(1 << 5) +#define TIER_TCIEV		(1 << 4) +#define TIER_TGIED		(1 << 3) +#define TIER_TGIEC		(1 << 2) +#define TIER_TGIEB		(1 << 1) +#define TIER_TGIEA		(1 << 0) + +#define TSR_TCFD		(1 << 7) +#define TSR_TCFU		(1 << 5) +#define TSR_TCFV		(1 << 4) +#define TSR_TGFD		(1 << 3) +#define TSR_TGFC		(1 << 2) +#define TSR_TGFB		(1 << 1) +#define TSR_TGFA		(1 << 0) +  static unsigned long mtu2_reg_offs[] = {  	[TCR] = 0,  	[TMDR] = 1, @@ -65,135 +158,143 @@ static unsigned long mtu2_reg_offs[] = {  	[TGR] = 8,  }; -static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr) +static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs; -	if (reg_nr == TSTR) -		return ioread8(base + cfg->channel_offset); +	if (reg_nr == TSTR) { +		if (ch->mtu->legacy) +			return ioread8(ch->mtu->mapbase); +		else +			return ioread8(ch->mtu->mapbase + 0x280); +	}  	offs = mtu2_reg_offs[reg_nr];  	if ((reg_nr == TCNT) || (reg_nr == TGR)) -		return ioread16(base + offs); +		return ioread16(ch->base + offs);  	else -		return ioread8(base + offs); +		return ioread8(ch->base + offs);  } -static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr, +static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,  				unsigned long value)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs;  	if (reg_nr == TSTR) { -		iowrite8(value, base + cfg->channel_offset); -		return; +		if (ch->mtu->legacy) +			return iowrite8(value, ch->mtu->mapbase); +		else +			return iowrite8(value, ch->mtu->mapbase + 0x280);  	}  	offs = mtu2_reg_offs[reg_nr];  	if ((reg_nr == TCNT) || (reg_nr == TGR)) -		iowrite16(value, base + offs); +		iowrite16(value, ch->base + offs);  	else -		iowrite8(value, base + offs); +		iowrite8(value, ch->base + offs);  } -static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) +static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data;  	unsigned long flags, value;  	/* start stop register shared by multiple timer channels */  	raw_spin_lock_irqsave(&sh_mtu2_lock, flags); -	value = sh_mtu2_read(p, TSTR); +	value = sh_mtu2_read(ch, TSTR);  	if (start) -		value |= 1 << cfg->timer_bit; +		value |= 1 << ch->index;  	else -		value &= ~(1 << cfg->timer_bit); +		value &= ~(1 << ch->index); -	sh_mtu2_write(p, TSTR, value); +	sh_mtu2_write(ch, TSTR, value);  	raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);  } -static int sh_mtu2_enable(struct sh_mtu2_priv *p) +static int sh_mtu2_enable(struct sh_mtu2_channel *ch)  { +	unsigned long periodic; +	unsigned long rate;  	int ret; -	pm_runtime_get_sync(&p->pdev->dev); -	dev_pm_syscore_device(&p->pdev->dev, true); +	pm_runtime_get_sync(&ch->mtu->pdev->dev); +	dev_pm_syscore_device(&ch->mtu->pdev->dev, true);  	/* enable clock */ -	ret = clk_enable(p->clk); +	ret = clk_enable(ch->mtu->clk);  	if (ret) { -		dev_err(&p->pdev->dev, "cannot enable clock\n"); +		dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", +			ch->index);  		return ret;  	}  	/* make sure channel is disabled */ -	sh_mtu2_start_stop_ch(p, 0); - -	p->rate = clk_get_rate(p->clk) / 64; -	p->periodic = (p->rate + HZ/2) / HZ; - -	/* "Periodic Counter Operation" */ -	sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */ -	sh_mtu2_write(p, TIOR, 0); -	sh_mtu2_write(p, TGR, p->periodic); -	sh_mtu2_write(p, TCNT, 0); -	sh_mtu2_write(p, TMDR, 0); -	sh_mtu2_write(p, TIER, 0x01); +	sh_mtu2_start_stop_ch(ch, 0); + +	rate = clk_get_rate(ch->mtu->clk) / 64; +	periodic = (rate + HZ/2) / HZ; + +	/* +	 * "Periodic Counter Operation" +	 * Clear on TGRA compare match, divide clock by 64. +	 */ +	sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64); +	sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) | +		      TIOC_IOCL(TIOR_OC_0_CLEAR)); +	sh_mtu2_write(ch, TGR, periodic); +	sh_mtu2_write(ch, TCNT, 0); +	sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL); +	sh_mtu2_write(ch, TIER, TIER_TGIEA);  	/* enable channel */ -	sh_mtu2_start_stop_ch(p, 1); +	sh_mtu2_start_stop_ch(ch, 1);  	return 0;  } -static void sh_mtu2_disable(struct sh_mtu2_priv *p) +static void sh_mtu2_disable(struct sh_mtu2_channel *ch)  {  	/* disable channel */ -	sh_mtu2_start_stop_ch(p, 0); +	sh_mtu2_start_stop_ch(ch, 0);  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable(ch->mtu->clk); -	dev_pm_syscore_device(&p->pdev->dev, false); -	pm_runtime_put(&p->pdev->dev); +	dev_pm_syscore_device(&ch->mtu->pdev->dev, false); +	pm_runtime_put(&ch->mtu->pdev->dev);  }  static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)  { -	struct sh_mtu2_priv *p = dev_id; +	struct sh_mtu2_channel *ch = dev_id;  	/* acknowledge interrupt */ -	sh_mtu2_read(p, TSR); -	sh_mtu2_write(p, TSR, 0xfe); +	sh_mtu2_read(ch, TSR); +	sh_mtu2_write(ch, TSR, ~TSR_TGFA);  	/* notify clockevent layer */ -	p->ced.event_handler(&p->ced); +	ch->ced.event_handler(&ch->ced);  	return IRQ_HANDLED;  } -static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced) +static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)  { -	return container_of(ced, struct sh_mtu2_priv, ced); +	return container_of(ced, struct sh_mtu2_channel, ced);  }  static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,  				    struct clock_event_device *ced)  { -	struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced); +	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);  	int disabled = 0;  	/* deal with old setting first */  	switch (ced->mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		sh_mtu2_disable(p); +		sh_mtu2_disable(ch);  		disabled = 1;  		break;  	default: @@ -202,12 +303,13 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		dev_info(&p->pdev->dev, "used for periodic clock events\n"); -		sh_mtu2_enable(p); +		dev_info(&ch->mtu->pdev->dev, +			 "ch%u: used for periodic clock events\n", ch->index); +		sh_mtu2_enable(ch);  		break;  	case CLOCK_EVT_MODE_UNUSED:  		if (!disabled) -			sh_mtu2_disable(p); +			sh_mtu2_disable(ch);  		break;  	case CLOCK_EVT_MODE_SHUTDOWN:  	default: @@ -217,114 +319,207 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,  static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev); +	pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);  }  static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev); +	pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);  } -static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, -				       char *name, unsigned long rating) +static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, +					const char *name)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced;  	int ret; -	memset(ced, 0, sizeof(*ced)); -  	ced->name = name;  	ced->features = CLOCK_EVT_FEAT_PERIODIC; -	ced->rating = rating; -	ced->cpumask = cpumask_of(0); +	ced->rating = 200; +	ced->cpumask = cpu_possible_mask;  	ced->set_mode = sh_mtu2_clock_event_mode;  	ced->suspend = sh_mtu2_clock_event_suspend;  	ced->resume = sh_mtu2_clock_event_resume; -	dev_info(&p->pdev->dev, "used for clock events\n"); +	dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", +		 ch->index);  	clockevents_register_device(ced); -	ret = setup_irq(p->irqaction.irq, &p->irqaction); +	ret = request_irq(ch->irq, sh_mtu2_interrupt, +			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, +			  dev_name(&ch->mtu->pdev->dev), ch);  	if (ret) { -		dev_err(&p->pdev->dev, "failed to request irq %d\n", -			p->irqaction.irq); +		dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", +			ch->index, ch->irq);  		return;  	}  } -static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name, -			    unsigned long clockevent_rating) +static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name, +			    bool clockevent)  { -	if (clockevent_rating) -		sh_mtu2_register_clockevent(p, name, clockevent_rating); +	if (clockevent) { +		ch->mtu->has_clockevent = true; +		sh_mtu2_register_clockevent(ch, name); +	}  	return 0;  } -static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) +static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, +				 struct sh_mtu2_device *mtu)  { -	struct sh_timer_config *cfg = pdev->dev.platform_data; -	struct resource *res; -	int irq, ret; -	ret = -ENXIO; +	static const unsigned int channel_offsets[] = { +		0x300, 0x380, 0x000, +	}; +	bool clockevent; + +	ch->mtu = mtu; + +	if (mtu->legacy) { +		struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; + +		clockevent = cfg->clockevent_rating != 0; + +		ch->irq = platform_get_irq(mtu->pdev, 0); +		ch->base = mtu->mapbase - cfg->channel_offset; +		ch->index = cfg->timer_bit; +	} else { +		char name[6]; -	memset(p, 0, sizeof(*p)); -	p->pdev = pdev; +		clockevent = true; -	if (!cfg) { -		dev_err(&p->pdev->dev, "missing platform data\n"); -		goto err0; +		sprintf(name, "tgi%ua", index); +		ch->irq = platform_get_irq_byname(mtu->pdev, name); +		ch->base = mtu->mapbase + channel_offsets[index]; +		ch->index = index;  	} -	platform_set_drvdata(pdev, p); +	if (ch->irq < 0) { +		/* Skip channels with no declared interrupt. */ +		if (!mtu->legacy) +			return 0; + +		dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n", +			ch->index); +		return ch->irq; +	} + +	return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent); +} + +static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) +{ +	struct resource *res; -	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); +	res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);  	if (!res) { -		dev_err(&p->pdev->dev, "failed to get I/O memory\n"); -		goto err0; +		dev_err(&mtu->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO;  	} -	irq = platform_get_irq(p->pdev, 0); -	if (irq < 0) { -		dev_err(&p->pdev->dev, "failed to get irq\n"); -		goto err0; +	mtu->mapbase = ioremap_nocache(res->start, resource_size(res)); +	if (mtu->mapbase == NULL) +		return -ENXIO; + +	/* +	 * In legacy platform device configuration (with one device per channel) +	 * the resource points to the channel base address. +	 */ +	if (mtu->legacy) { +		struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; +		mtu->mapbase += cfg->channel_offset;  	} -	/* map memory, let mapbase point to our channel */ -	p->mapbase = ioremap_nocache(res->start, resource_size(res)); -	if (p->mapbase == NULL) { -		dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); -		goto err0; +	return 0; +} + +static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu) +{ +	if (mtu->legacy) { +		struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; +		mtu->mapbase -= cfg->channel_offset;  	} -	/* setup data for setup_irq() (too early for request_irq()) */ -	p->irqaction.name = dev_name(&p->pdev->dev); -	p->irqaction.handler = sh_mtu2_interrupt; -	p->irqaction.dev_id = p; -	p->irqaction.irq = irq; -	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ -			     IRQF_IRQPOLL  | IRQF_NOBALANCING; - -	/* get hold of clock */ -	p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); -	if (IS_ERR(p->clk)) { -		dev_err(&p->pdev->dev, "cannot get clock\n"); -		ret = PTR_ERR(p->clk); -		goto err1; +	iounmap(mtu->mapbase); +} + +static int sh_mtu2_setup(struct sh_mtu2_device *mtu, +			 struct platform_device *pdev) +{ +	struct sh_timer_config *cfg = pdev->dev.platform_data; +	const struct platform_device_id *id = pdev->id_entry; +	unsigned int i; +	int ret; + +	mtu->pdev = pdev; +	mtu->legacy = id->driver_data; + +	if (mtu->legacy && !cfg) { +		dev_err(&mtu->pdev->dev, "missing platform data\n"); +		return -ENXIO; +	} + +	/* Get hold of clock. */ +	mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck"); +	if (IS_ERR(mtu->clk)) { +		dev_err(&mtu->pdev->dev, "cannot get clock\n"); +		return PTR_ERR(mtu->clk); +	} + +	ret = clk_prepare(mtu->clk); +	if (ret < 0) +		goto err_clk_put; + +	/* Map the memory resource. */ +	ret = sh_mtu2_map_memory(mtu); +	if (ret < 0) { +		dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n"); +		goto err_clk_unprepare; +	} + +	/* Allocate and setup the channels. */ +	if (mtu->legacy) +		mtu->num_channels = 1; +	else +		mtu->num_channels = 3; + +	mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, +				GFP_KERNEL); +	if (mtu->channels == NULL) { +		ret = -ENOMEM; +		goto err_unmap;  	} -	return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), -				cfg->clockevent_rating); - err1: -	iounmap(p->mapbase); - err0: +	if (mtu->legacy) { +		ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu); +		if (ret < 0) +			goto err_unmap; +	} else { +		for (i = 0; i < mtu->num_channels; ++i) { +			ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu); +			if (ret < 0) +				goto err_unmap; +		} +	} + +	platform_set_drvdata(pdev, mtu); + +	return 0; + +err_unmap: +	kfree(mtu->channels); +	sh_mtu2_unmap_memory(mtu); +err_clk_unprepare: +	clk_unprepare(mtu->clk); +err_clk_put: +	clk_put(mtu->clk);  	return ret;  }  static int sh_mtu2_probe(struct platform_device *pdev)  { -	struct sh_mtu2_priv *p = platform_get_drvdata(pdev); -	struct sh_timer_config *cfg = pdev->dev.platform_data; +	struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);  	int ret;  	if (!is_early_platform_device(pdev)) { @@ -332,21 +527,18 @@ static int sh_mtu2_probe(struct platform_device *pdev)  		pm_runtime_enable(&pdev->dev);  	} -	if (p) { +	if (mtu) {  		dev_info(&pdev->dev, "kept as earlytimer\n");  		goto out;  	} -	p = kmalloc(sizeof(*p), GFP_KERNEL); -	if (p == NULL) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	mtu = kzalloc(sizeof(*mtu), GFP_KERNEL); +	if (mtu == NULL)  		return -ENOMEM; -	} -	ret = sh_mtu2_setup(p, pdev); +	ret = sh_mtu2_setup(mtu, pdev);  	if (ret) { -		kfree(p); -		platform_set_drvdata(pdev, NULL); +		kfree(mtu);  		pm_runtime_idle(&pdev->dev);  		return ret;  	} @@ -354,7 +546,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)  		return 0;   out: -	if (cfg->clockevent_rating) +	if (mtu->has_clockevent)  		pm_runtime_irq_safe(&pdev->dev);  	else  		pm_runtime_idle(&pdev->dev); @@ -367,12 +559,20 @@ static int sh_mtu2_remove(struct platform_device *pdev)  	return -EBUSY; /* cannot unregister clockevent */  } +static const struct platform_device_id sh_mtu2_id_table[] = { +	{ "sh_mtu2", 1 }, +	{ "sh-mtu2", 0 }, +	{ }, +}; +MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); +  static struct platform_driver sh_mtu2_device_driver = {  	.probe		= sh_mtu2_probe,  	.remove		= sh_mtu2_remove,  	.driver		= {  		.name	= "sh_mtu2", -	} +	}, +	.id_table	= sh_mtu2_id_table,  };  static int __init sh_mtu2_init(void)  | 
