diff options
Diffstat (limited to 'drivers/clocksource/sh_tmu.c')
| -rw-r--r-- | drivers/clocksource/sh_tmu.c | 543 | 
1 files changed, 346 insertions, 197 deletions
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 78b8dae4962..6bd17a8f3dd 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -11,35 +11,41 @@   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/delay.h> +#include <linux/err.h>  #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h>  #include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/delay.h>  #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h>  #include <linux/irq.h> -#include <linux/err.h> -#include <linux/clocksource.h> -#include <linux/clockchips.h> -#include <linux/sh_timer.h> -#include <linux/slab.h>  #include <linux/module.h> +#include <linux/platform_device.h>  #include <linux/pm_domain.h>  #include <linux/pm_runtime.h> +#include <linux/sh_timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +enum sh_tmu_model { +	SH_TMU_LEGACY, +	SH_TMU, +	SH_TMU_SH3, +}; + +struct sh_tmu_device; + +struct sh_tmu_channel { +	struct sh_tmu_device *tmu; +	unsigned int index; + +	void __iomem *base; +	int irq; -struct sh_tmu_priv { -	void __iomem *mapbase; -	struct clk *clk; -	struct irqaction irqaction; -	struct platform_device *pdev;  	unsigned long rate;  	unsigned long periodic;  	struct clock_event_device ced; @@ -48,6 +54,21 @@ struct sh_tmu_priv {  	unsigned int enable_count;  }; +struct sh_tmu_device { +	struct platform_device *pdev; + +	void __iomem *mapbase; +	struct clk *clk; + +	enum sh_tmu_model model; + +	struct sh_tmu_channel *channels; +	unsigned int num_channels; + +	bool has_clockevent; +	bool has_clocksource; +}; +  static DEFINE_RAW_SPINLOCK(sh_tmu_lock);  #define TSTR -1 /* shared register */ @@ -55,189 +76,208 @@ static DEFINE_RAW_SPINLOCK(sh_tmu_lock);  #define TCNT 1 /* channel register */  #define TCR 2 /* channel register */ -static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr) +#define TCR_UNF			(1 << 8) +#define TCR_UNIE		(1 << 5) +#define TCR_TPSC_CLK4		(0 << 0) +#define TCR_TPSC_CLK16		(1 << 0) +#define TCR_TPSC_CLK64		(2 << 0) +#define TCR_TPSC_CLK256		(3 << 0) +#define TCR_TPSC_CLK1024	(4 << 0) +#define TCR_TPSC_MASK		(7 << 0) + +static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs; -	if (reg_nr == TSTR) -		return ioread8(base - cfg->channel_offset); +	if (reg_nr == TSTR) { +		switch (ch->tmu->model) { +		case SH_TMU_LEGACY: +			return ioread8(ch->tmu->mapbase); +		case SH_TMU_SH3: +			return ioread8(ch->tmu->mapbase + 2); +		case SH_TMU: +			return ioread8(ch->tmu->mapbase + 4); +		} +	}  	offs = reg_nr << 2;  	if (reg_nr == TCR) -		return ioread16(base + offs); +		return ioread16(ch->base + offs);  	else -		return ioread32(base + offs); +		return ioread32(ch->base + offs);  } -static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr, +static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,  				unsigned long value)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs;  	if (reg_nr == TSTR) { -		iowrite8(value, base - cfg->channel_offset); -		return; +		switch (ch->tmu->model) { +		case SH_TMU_LEGACY: +			return iowrite8(value, ch->tmu->mapbase); +		case SH_TMU_SH3: +			return iowrite8(value, ch->tmu->mapbase + 2); +		case SH_TMU: +			return iowrite8(value, ch->tmu->mapbase + 4); +		}  	}  	offs = reg_nr << 2;  	if (reg_nr == TCR) -		iowrite16(value, base + offs); +		iowrite16(value, ch->base + offs);  	else -		iowrite32(value, base + offs); +		iowrite32(value, ch->base + offs);  } -static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) +static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data;  	unsigned long flags, value;  	/* start stop register shared by multiple timer channels */  	raw_spin_lock_irqsave(&sh_tmu_lock, flags); -	value = sh_tmu_read(p, TSTR); +	value = sh_tmu_read(ch, TSTR);  	if (start) -		value |= 1 << cfg->timer_bit; +		value |= 1 << ch->index;  	else -		value &= ~(1 << cfg->timer_bit); +		value &= ~(1 << ch->index); -	sh_tmu_write(p, TSTR, value); +	sh_tmu_write(ch, TSTR, value);  	raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);  } -static int __sh_tmu_enable(struct sh_tmu_priv *p) +static int __sh_tmu_enable(struct sh_tmu_channel *ch)  {  	int ret;  	/* enable clock */ -	ret = clk_enable(p->clk); +	ret = clk_enable(ch->tmu->clk);  	if (ret) { -		dev_err(&p->pdev->dev, "cannot enable clock\n"); +		dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n", +			ch->index);  		return ret;  	}  	/* make sure channel is disabled */ -	sh_tmu_start_stop_ch(p, 0); +	sh_tmu_start_stop_ch(ch, 0);  	/* maximum timeout */ -	sh_tmu_write(p, TCOR, 0xffffffff); -	sh_tmu_write(p, TCNT, 0xffffffff); +	sh_tmu_write(ch, TCOR, 0xffffffff); +	sh_tmu_write(ch, TCNT, 0xffffffff);  	/* configure channel to parent clock / 4, irq off */ -	p->rate = clk_get_rate(p->clk) / 4; -	sh_tmu_write(p, TCR, 0x0000); +	ch->rate = clk_get_rate(ch->tmu->clk) / 4; +	sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);  	/* enable channel */ -	sh_tmu_start_stop_ch(p, 1); +	sh_tmu_start_stop_ch(ch, 1);  	return 0;  } -static int sh_tmu_enable(struct sh_tmu_priv *p) +static int sh_tmu_enable(struct sh_tmu_channel *ch)  { -	if (p->enable_count++ > 0) +	if (ch->enable_count++ > 0)  		return 0; -	pm_runtime_get_sync(&p->pdev->dev); -	dev_pm_syscore_device(&p->pdev->dev, true); +	pm_runtime_get_sync(&ch->tmu->pdev->dev); +	dev_pm_syscore_device(&ch->tmu->pdev->dev, true); -	return __sh_tmu_enable(p); +	return __sh_tmu_enable(ch);  } -static void __sh_tmu_disable(struct sh_tmu_priv *p) +static void __sh_tmu_disable(struct sh_tmu_channel *ch)  {  	/* disable channel */ -	sh_tmu_start_stop_ch(p, 0); +	sh_tmu_start_stop_ch(ch, 0);  	/* disable interrupts in TMU block */ -	sh_tmu_write(p, TCR, 0x0000); +	sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable(ch->tmu->clk);  } -static void sh_tmu_disable(struct sh_tmu_priv *p) +static void sh_tmu_disable(struct sh_tmu_channel *ch)  { -	if (WARN_ON(p->enable_count == 0)) +	if (WARN_ON(ch->enable_count == 0))  		return; -	if (--p->enable_count > 0) +	if (--ch->enable_count > 0)  		return; -	__sh_tmu_disable(p); +	__sh_tmu_disable(ch); -	dev_pm_syscore_device(&p->pdev->dev, false); -	pm_runtime_put(&p->pdev->dev); +	dev_pm_syscore_device(&ch->tmu->pdev->dev, false); +	pm_runtime_put(&ch->tmu->pdev->dev);  } -static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, +static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,  			    int periodic)  {  	/* stop timer */ -	sh_tmu_start_stop_ch(p, 0); +	sh_tmu_start_stop_ch(ch, 0);  	/* acknowledge interrupt */ -	sh_tmu_read(p, TCR); +	sh_tmu_read(ch, TCR);  	/* enable interrupt */ -	sh_tmu_write(p, TCR, 0x0020); +	sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);  	/* reload delta value in case of periodic timer */  	if (periodic) -		sh_tmu_write(p, TCOR, delta); +		sh_tmu_write(ch, TCOR, delta);  	else -		sh_tmu_write(p, TCOR, 0xffffffff); +		sh_tmu_write(ch, TCOR, 0xffffffff); -	sh_tmu_write(p, TCNT, delta); +	sh_tmu_write(ch, TCNT, delta);  	/* start timer */ -	sh_tmu_start_stop_ch(p, 1); +	sh_tmu_start_stop_ch(ch, 1);  }  static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)  { -	struct sh_tmu_priv *p = dev_id; +	struct sh_tmu_channel *ch = dev_id;  	/* disable or acknowledge interrupt */ -	if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) -		sh_tmu_write(p, TCR, 0x0000); +	if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) +		sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);  	else -		sh_tmu_write(p, TCR, 0x0020); +		sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);  	/* notify clockevent layer */ -	p->ced.event_handler(&p->ced); +	ch->ced.event_handler(&ch->ced);  	return IRQ_HANDLED;  } -static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs) +static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)  { -	return container_of(cs, struct sh_tmu_priv, cs); +	return container_of(cs, struct sh_tmu_channel, cs);  }  static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); -	return sh_tmu_read(p, TCNT) ^ 0xffffffff; +	return sh_tmu_read(ch, TCNT) ^ 0xffffffff;  }  static int sh_tmu_clocksource_enable(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);  	int ret; -	if (WARN_ON(p->cs_enabled)) +	if (WARN_ON(ch->cs_enabled))  		return 0; -	ret = sh_tmu_enable(p); +	ret = sh_tmu_enable(ch);  	if (!ret) { -		__clocksource_updatefreq_hz(cs, p->rate); -		p->cs_enabled = true; +		__clocksource_updatefreq_hz(cs, ch->rate); +		ch->cs_enabled = true;  	}  	return ret; @@ -245,48 +285,48 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs)  static void sh_tmu_clocksource_disable(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); -	if (WARN_ON(!p->cs_enabled)) +	if (WARN_ON(!ch->cs_enabled))  		return; -	sh_tmu_disable(p); -	p->cs_enabled = false; +	sh_tmu_disable(ch); +	ch->cs_enabled = false;  }  static void sh_tmu_clocksource_suspend(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); -	if (!p->cs_enabled) +	if (!ch->cs_enabled)  		return; -	if (--p->enable_count == 0) { -		__sh_tmu_disable(p); -		pm_genpd_syscore_poweroff(&p->pdev->dev); +	if (--ch->enable_count == 0) { +		__sh_tmu_disable(ch); +		pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);  	}  }  static void sh_tmu_clocksource_resume(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); -	if (!p->cs_enabled) +	if (!ch->cs_enabled)  		return; -	if (p->enable_count++ == 0) { -		pm_genpd_syscore_poweron(&p->pdev->dev); -		__sh_tmu_enable(p); +	if (ch->enable_count++ == 0) { +		pm_genpd_syscore_poweron(&ch->tmu->pdev->dev); +		__sh_tmu_enable(ch);  	}  } -static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, -				       char *name, unsigned long rating) +static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, +				       const char *name)  { -	struct clocksource *cs = &p->cs; +	struct clocksource *cs = &ch->cs;  	cs->name = name; -	cs->rating = rating; +	cs->rating = 200;  	cs->read = sh_tmu_clocksource_read;  	cs->enable = sh_tmu_clocksource_enable;  	cs->disable = sh_tmu_clocksource_disable; @@ -295,43 +335,44 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,  	cs->mask = CLOCKSOURCE_MASK(32);  	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; -	dev_info(&p->pdev->dev, "used as clock source\n"); +	dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", +		 ch->index);  	/* Register with dummy 1 Hz value, gets updated in ->enable() */  	clocksource_register_hz(cs, 1);  	return 0;  } -static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced) +static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)  { -	return container_of(ced, struct sh_tmu_priv, ced); +	return container_of(ced, struct sh_tmu_channel, ced);  } -static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) +static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced; -	sh_tmu_enable(p); +	sh_tmu_enable(ch); -	clockevents_config(ced, p->rate); +	clockevents_config(ced, ch->rate);  	if (periodic) { -		p->periodic = (p->rate + HZ/2) / HZ; -		sh_tmu_set_next(p, p->periodic, 1); +		ch->periodic = (ch->rate + HZ/2) / HZ; +		sh_tmu_set_next(ch, ch->periodic, 1);  	}  }  static void sh_tmu_clock_event_mode(enum clock_event_mode mode,  				    struct clock_event_device *ced)  { -	struct sh_tmu_priv *p = ced_to_sh_tmu(ced); +	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);  	int disabled = 0;  	/* deal with old setting first */  	switch (ced->mode) {  	case CLOCK_EVT_MODE_PERIODIC:  	case CLOCK_EVT_MODE_ONESHOT: -		sh_tmu_disable(p); +		sh_tmu_disable(ch);  		disabled = 1;  		break;  	default: @@ -340,16 +381,18 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		dev_info(&p->pdev->dev, "used for periodic clock events\n"); -		sh_tmu_clock_event_start(p, 1); +		dev_info(&ch->tmu->pdev->dev, +			 "ch%u: used for periodic clock events\n", ch->index); +		sh_tmu_clock_event_start(ch, 1);  		break;  	case CLOCK_EVT_MODE_ONESHOT: -		dev_info(&p->pdev->dev, "used for oneshot clock events\n"); -		sh_tmu_clock_event_start(p, 0); +		dev_info(&ch->tmu->pdev->dev, +			 "ch%u: used for oneshot clock events\n", ch->index); +		sh_tmu_clock_event_start(ch, 0);  		break;  	case CLOCK_EVT_MODE_UNUSED:  		if (!disabled) -			sh_tmu_disable(p); +			sh_tmu_disable(ch);  		break;  	case CLOCK_EVT_MODE_SHUTDOWN:  	default: @@ -360,134 +403,234 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,  static int sh_tmu_clock_event_next(unsigned long delta,  				   struct clock_event_device *ced)  { -	struct sh_tmu_priv *p = ced_to_sh_tmu(ced); +	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);  	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);  	/* program new delta value */ -	sh_tmu_set_next(p, delta, 0); +	sh_tmu_set_next(ch, delta, 0);  	return 0;  }  static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev); +	pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);  }  static void sh_tmu_clock_event_resume(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev); +	pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);  } -static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, -				       char *name, unsigned long rating) +static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, +				       const char *name)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced;  	int ret; -	memset(ced, 0, sizeof(*ced)); -  	ced->name = name;  	ced->features = CLOCK_EVT_FEAT_PERIODIC;  	ced->features |= CLOCK_EVT_FEAT_ONESHOT; -	ced->rating = rating; +	ced->rating = 200;  	ced->cpumask = cpumask_of(0);  	ced->set_next_event = sh_tmu_clock_event_next;  	ced->set_mode = sh_tmu_clock_event_mode;  	ced->suspend = sh_tmu_clock_event_suspend;  	ced->resume = sh_tmu_clock_event_resume; -	dev_info(&p->pdev->dev, "used for clock events\n"); +	dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", +		 ch->index);  	clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); -	ret = setup_irq(p->irqaction.irq, &p->irqaction); +	ret = request_irq(ch->irq, sh_tmu_interrupt, +			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, +			  dev_name(&ch->tmu->pdev->dev), ch);  	if (ret) { -		dev_err(&p->pdev->dev, "failed to request irq %d\n", -			p->irqaction.irq); +		dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", +			ch->index, ch->irq);  		return;  	}  } -static int sh_tmu_register(struct sh_tmu_priv *p, char *name, -		    unsigned long clockevent_rating, -		    unsigned long clocksource_rating) +static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, +			   bool clockevent, bool clocksource)  { -	if (clockevent_rating) -		sh_tmu_register_clockevent(p, name, clockevent_rating); -	else if (clocksource_rating) -		sh_tmu_register_clocksource(p, name, clocksource_rating); +	if (clockevent) { +		ch->tmu->has_clockevent = true; +		sh_tmu_register_clockevent(ch, name); +	} else if (clocksource) { +		ch->tmu->has_clocksource = true; +		sh_tmu_register_clocksource(ch, name); +	}  	return 0;  } -static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) +static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, +				bool clockevent, bool clocksource, +				struct sh_tmu_device *tmu) +{ +	/* Skip unused channels. */ +	if (!clockevent && !clocksource) +		return 0; + +	ch->tmu = tmu; + +	if (tmu->model == SH_TMU_LEGACY) { +		struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; + +		/* +		 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps +		 * channel registers blocks at base + 2 + 12 * index, while all +		 * other variants map them at base + 4 + 12 * index. We can +		 * compute the index by just dividing by 12, the 2 bytes or 4 +		 * bytes offset being hidden by the integer division. +		 */ +		ch->index = cfg->channel_offset / 12; +		ch->base = tmu->mapbase + cfg->channel_offset; +	} else { +		ch->index = index; + +		if (tmu->model == SH_TMU_SH3) +			ch->base = tmu->mapbase + 4 + ch->index * 12; +		else +			ch->base = tmu->mapbase + 8 + ch->index * 12; +	} + +	ch->irq = platform_get_irq(tmu->pdev, index); +	if (ch->irq < 0) { +		dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n", +			ch->index); +		return ch->irq; +	} + +	ch->cs_enabled = false; +	ch->enable_count = 0; + +	return sh_tmu_register(ch, dev_name(&tmu->pdev->dev), +			       clockevent, clocksource); +} + +static int sh_tmu_map_memory(struct sh_tmu_device *tmu)  { -	struct sh_timer_config *cfg = pdev->dev.platform_data;  	struct resource *res; -	int irq, ret; -	ret = -ENXIO; -	memset(p, 0, sizeof(*p)); -	p->pdev = pdev; +	res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0); +	if (!res) { +		dev_err(&tmu->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO; +	} + +	tmu->mapbase = ioremap_nocache(res->start, resource_size(res)); +	if (tmu->mapbase == NULL) +		return -ENXIO; + +	/* +	 * In legacy platform device configuration (with one device per channel) +	 * the resource points to the channel base address. +	 */ +	if (tmu->model == SH_TMU_LEGACY) { +		struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; +		tmu->mapbase -= cfg->channel_offset; +	} + +	return 0; +} + +static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) +{ +	if (tmu->model == SH_TMU_LEGACY) { +		struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; +		tmu->mapbase += cfg->channel_offset; +	} + +	iounmap(tmu->mapbase); +} + +static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) +{ +	struct sh_timer_config *cfg = pdev->dev.platform_data; +	const struct platform_device_id *id = pdev->id_entry; +	unsigned int i; +	int ret;  	if (!cfg) { -		dev_err(&p->pdev->dev, "missing platform data\n"); -		goto err0; +		dev_err(&tmu->pdev->dev, "missing platform data\n"); +		return -ENXIO;  	} -	platform_set_drvdata(pdev, p); +	tmu->pdev = pdev; +	tmu->model = id->driver_data; -	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); -	if (!res) { -		dev_err(&p->pdev->dev, "failed to get I/O memory\n"); -		goto err0; +	/* Get hold of clock. */ +	tmu->clk = clk_get(&tmu->pdev->dev, +			   tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck"); +	if (IS_ERR(tmu->clk)) { +		dev_err(&tmu->pdev->dev, "cannot get clock\n"); +		return PTR_ERR(tmu->clk);  	} -	irq = platform_get_irq(p->pdev, 0); -	if (irq < 0) { -		dev_err(&p->pdev->dev, "failed to get irq\n"); -		goto err0; +	ret = clk_prepare(tmu->clk); +	if (ret < 0) +		goto err_clk_put; + +	/* Map the memory resource. */ +	ret = sh_tmu_map_memory(tmu); +	if (ret < 0) { +		dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n"); +		goto err_clk_unprepare;  	} -	/* map memory, let mapbase point to our channel */ -	p->mapbase = ioremap_nocache(res->start, resource_size(res)); -	if (p->mapbase == NULL) { -		dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); -		goto err0; +	/* Allocate and setup the channels. */ +	if (tmu->model == SH_TMU_LEGACY) +		tmu->num_channels = 1; +	else +		tmu->num_channels = hweight8(cfg->channels_mask); + +	tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, +				GFP_KERNEL); +	if (tmu->channels == NULL) { +		ret = -ENOMEM; +		goto err_unmap;  	} -	/* setup data for setup_irq() (too early for request_irq()) */ -	p->irqaction.name = dev_name(&p->pdev->dev); -	p->irqaction.handler = sh_tmu_interrupt; -	p->irqaction.dev_id = p; -	p->irqaction.irq = irq; -	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ -			     IRQF_IRQPOLL  | IRQF_NOBALANCING; - -	/* get hold of clock */ -	p->clk = clk_get(&p->pdev->dev, "tmu_fck"); -	if (IS_ERR(p->clk)) { -		dev_err(&p->pdev->dev, "cannot get clock\n"); -		ret = PTR_ERR(p->clk); -		goto err1; +	if (tmu->model == SH_TMU_LEGACY) { +		ret = sh_tmu_channel_setup(&tmu->channels[0], 0, +					   cfg->clockevent_rating != 0, +					   cfg->clocksource_rating != 0, tmu); +		if (ret < 0) +			goto err_unmap; +	} else { +		/* +		 * Use the first channel as a clock event device and the second +		 * channel as a clock source. +		 */ +		for (i = 0; i < tmu->num_channels; ++i) { +			ret = sh_tmu_channel_setup(&tmu->channels[i], i, +						   i == 0, i == 1, tmu); +			if (ret < 0) +				goto err_unmap; +		}  	} -	p->cs_enabled = false; -	p->enable_count = 0; - -	return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), -			       cfg->clockevent_rating, -			       cfg->clocksource_rating); - err1: -	iounmap(p->mapbase); - err0: + +	platform_set_drvdata(pdev, tmu); + +	return 0; + +err_unmap: +	kfree(tmu->channels); +	sh_tmu_unmap_memory(tmu); +err_clk_unprepare: +	clk_unprepare(tmu->clk); +err_clk_put: +	clk_put(tmu->clk);  	return ret;  }  static int sh_tmu_probe(struct platform_device *pdev)  { -	struct sh_tmu_priv *p = platform_get_drvdata(pdev); -	struct sh_timer_config *cfg = pdev->dev.platform_data; +	struct sh_tmu_device *tmu = platform_get_drvdata(pdev);  	int ret;  	if (!is_early_platform_device(pdev)) { @@ -495,21 +638,18 @@ static int sh_tmu_probe(struct platform_device *pdev)  		pm_runtime_enable(&pdev->dev);  	} -	if (p) { +	if (tmu) {  		dev_info(&pdev->dev, "kept as earlytimer\n");  		goto out;  	} -	p = kmalloc(sizeof(*p), GFP_KERNEL); -	if (p == NULL) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	tmu = kzalloc(sizeof(*tmu), GFP_KERNEL); +	if (tmu == NULL)  		return -ENOMEM; -	} -	ret = sh_tmu_setup(p, pdev); +	ret = sh_tmu_setup(tmu, pdev);  	if (ret) { -		kfree(p); -		platform_set_drvdata(pdev, NULL); +		kfree(tmu);  		pm_runtime_idle(&pdev->dev);  		return ret;  	} @@ -517,7 +657,7 @@ static int sh_tmu_probe(struct platform_device *pdev)  		return 0;   out: -	if (cfg->clockevent_rating || cfg->clocksource_rating) +	if (tmu->has_clockevent || tmu->has_clocksource)  		pm_runtime_irq_safe(&pdev->dev);  	else  		pm_runtime_idle(&pdev->dev); @@ -530,12 +670,21 @@ static int sh_tmu_remove(struct platform_device *pdev)  	return -EBUSY; /* cannot unregister clockevent and clocksource */  } +static const struct platform_device_id sh_tmu_id_table[] = { +	{ "sh_tmu", SH_TMU_LEGACY }, +	{ "sh-tmu", SH_TMU }, +	{ "sh-tmu-sh3", SH_TMU_SH3 }, +	{ } +}; +MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); +  static struct platform_driver sh_tmu_device_driver = {  	.probe		= sh_tmu_probe,  	.remove		= sh_tmu_remove,  	.driver		= {  		.name	= "sh_tmu", -	} +	}, +	.id_table	= sh_tmu_id_table,  };  static int __init sh_tmu_init(void)  | 
