diff options
Diffstat (limited to 'drivers/clocksource')
40 files changed, 3800 insertions, 1061 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 41c69469ce2..065131cbfcc 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -26,6 +26,7 @@ config DW_APB_TIMER_OF  config ARMADA_370_XP_TIMER  	bool +	select CLKSRC_OF  config ORION_TIMER  	select CLKSRC_OF @@ -33,6 +34,11 @@ config ORION_TIMER  	bool  config SUN4I_TIMER +	select CLKSRC_MMIO +	bool + +config SUN5I_HSTIMER +	select CLKSRC_MMIO  	bool  config VT8500_TIMER @@ -70,10 +76,35 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK  	help  	  Use the always on PRCMU Timer as sched_clock +config CLKSRC_EFM32 +	bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 +	depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) +	select CLKSRC_MMIO +	default ARCH_EFM32 +	help +	  Support to use the timers of EFM32 SoCs as clock source and clock +	  event device. +  config ARM_ARCH_TIMER  	bool  	select CLKSRC_OF if OF +config ARM_ARCH_TIMER_EVTSTREAM +	bool "Support for ARM architected timer event stream generation" +	default y if ARM_ARCH_TIMER +	depends on ARM_ARCH_TIMER +	help +	  This option enables support for event stream generation based on +	  the ARM architected timer. It is used for waking up CPUs executing +	  the wfe instruction at a frequency represented as a power-of-2 +	  divisor of the clock rate. +	  The main use of the event stream is wfe-based timeouts of userspace +	  locking implementations. It might also be useful for imposing timeout +	  on wfe to safeguard against any programming errors in case an expected +	  event is not generated. +	  This must be disabled for hardware validation purposes to detect any +	  hardware anomalies of missing events. +  config ARM_GLOBAL_TIMER  	bool  	select CLKSRC_OF if OF @@ -105,7 +136,74 @@ config CLKSRC_SAMSUNG_PWM  	  for all devicetree enabled platforms. This driver will be  	  needed only on systems that do not have the Exynos MCT available. +config FSL_FTM_TIMER +	bool +	help +	  Support for Freescale FlexTimer Module (FTM) timer. +  config VF_PIT_TIMER  	bool  	help  	  Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. + +config SYS_SUPPORTS_SH_CMT +        bool + +config SYS_SUPPORTS_SH_MTU2 +        bool + +config SYS_SUPPORTS_SH_TMU +        bool + +config SYS_SUPPORTS_EM_STI +        bool + +config SH_TIMER_CMT +	bool "Renesas CMT timer driver" if COMPILE_TEST +	depends on GENERIC_CLOCKEVENTS +	default SYS_SUPPORTS_SH_CMT +	help +	  This enables build of a clocksource and clockevent driver for +	  the Compare Match Timer (CMT) hardware available in 16/32/48-bit +	  variants on a wide range of Mobile and Automotive SoCs from Renesas. + +config SH_TIMER_MTU2 +	bool "Renesas MTU2 timer driver" if COMPILE_TEST +	depends on GENERIC_CLOCKEVENTS +	default SYS_SUPPORTS_SH_MTU2 +	help +	  This enables build of a clockevent driver for the Multi-Function +	  Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas. +	  This hardware comes with 16 bit-timer registers. + +config SH_TIMER_TMU +	bool "Renesas TMU timer driver" if COMPILE_TEST +	depends on GENERIC_CLOCKEVENTS +	default SYS_SUPPORTS_SH_TMU +	help +	  This enables build of a clocksource and clockevent driver for +	  the 32-bit Timer Unit (TMU) hardware available on a wide range +	  SoCs from Renesas. + +config EM_TIMER_STI +	bool "Renesas STI timer driver" if COMPILE_TEST +	depends on GENERIC_CLOCKEVENTS +	default SYS_SUPPORTS_EM_STI +	help +	  This enables build of a clocksource and clockevent driver for +	  the 48-bit System Timer (STI) hardware available on a SoCs +	  such as EMEV2 from former NEC Electronics. + +config CLKSRC_QCOM +	bool + +config CLKSRC_VERSATILE +	bool "ARM Versatile (Express) reference platforms clock source" +	depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET +	select CLKSRC_OF +	default y if MFD_VEXPRESS_SYSREG +	help +	  This option enables clock source based on free running +	  counter available in the "System Registers" block of +	  ARM Versatile, RealView and Versatile Express reference +	  platforms. diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 704d6d342ad..800b1303c23 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -1,6 +1,5 @@  obj-$(CONFIG_CLKSRC_OF)	+= clksrc-of.o  obj-$(CONFIG_ATMEL_TCB_CLKSRC)	+= tcb_clksrc.o -obj-$(CONFIG_X86_CYCLONE_TIMER)	+= cyclone.o  obj-$(CONFIG_X86_PM_TIMER)	+= acpi_pm.o  obj-$(CONFIG_SCx200HR_TIMER)	+= scx200_hrt.o  obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC)	+= cs5535-clockevt.o @@ -21,17 +20,24 @@ obj-$(CONFIG_ARCH_MARCO)	+= timer-marco.o  obj-$(CONFIG_ARCH_MOXART)	+= moxart_timer.o  obj-$(CONFIG_ARCH_MXS)		+= mxs_timer.o  obj-$(CONFIG_ARCH_PRIMA2)	+= timer-prima2.o +obj-$(CONFIG_ARCH_U300)		+= timer-u300.o  obj-$(CONFIG_SUN4I_TIMER)	+= sun4i_timer.o +obj-$(CONFIG_SUN5I_HSTIMER)	+= timer-sun5i.o  obj-$(CONFIG_ARCH_TEGRA)	+= tegra20_timer.o  obj-$(CONFIG_VT8500_TIMER)	+= vt8500_timer.o  obj-$(CONFIG_ARCH_NSPIRE)	+= zevio-timer.o -obj-$(CONFIG_ARCH_BCM)		+= bcm_kona_timer.o +obj-$(CONFIG_ARCH_BCM_MOBILE)	+= bcm_kona_timer.o  obj-$(CONFIG_CADENCE_TTC_TIMER)	+= cadence_ttc_timer.o +obj-$(CONFIG_CLKSRC_EFM32)	+= time-efm32.o  obj-$(CONFIG_CLKSRC_EXYNOS_MCT)	+= exynos_mct.o  obj-$(CONFIG_CLKSRC_SAMSUNG_PWM)	+= samsung_pwm_timer.o +obj-$(CONFIG_FSL_FTM_TIMER)	+= fsl_ftm_timer.o  obj-$(CONFIG_VF_PIT_TIMER)	+= vf_pit_timer.o +obj-$(CONFIG_CLKSRC_QCOM)	+= qcom-timer.o  obj-$(CONFIG_ARM_ARCH_TIMER)		+= arm_arch_timer.o  obj-$(CONFIG_ARM_GLOBAL_TIMER)		+= arm_global_timer.o  obj-$(CONFIG_CLKSRC_METAG_GENERIC)	+= metag_generic.o  obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST)	+= dummy_timer.o +obj-$(CONFIG_ARCH_KEYSTONE)		+= timer-keystone.o +obj-$(CONFIG_CLKSRC_VERSATILE)		+= versatile.o diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fbd9ccd5e11..5163ec13429 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -13,12 +13,14 @@  #include <linux/device.h>  #include <linux/smp.h>  #include <linux/cpu.h> +#include <linux/cpu_pm.h>  #include <linux/clockchips.h>  #include <linux/interrupt.h>  #include <linux/of_irq.h>  #include <linux/of_address.h>  #include <linux/io.h>  #include <linux/slab.h> +#include <linux/sched_clock.h>  #include <asm/arch_timer.h>  #include <asm/virt.h> @@ -64,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];  static struct clock_event_device __percpu *arch_timer_evt;  static bool arch_timer_use_virtual = true; +static bool arch_timer_c3stop;  static bool arch_timer_mem_use_virtual;  /* @@ -261,7 +264,8 @@ static void __arch_timer_setup(unsigned type,  	clk->features = CLOCK_EVT_FEAT_ONESHOT;  	if (type == ARCH_CP15_TIMER) { -		clk->features |= CLOCK_EVT_FEAT_C3STOP; +		if (arch_timer_c3stop) +			clk->features |= CLOCK_EVT_FEAT_C3STOP;  		clk->name = "arch_sys_timer";  		clk->rating = 450;  		clk->cpumask = cpumask_of(smp_processor_id()); @@ -275,6 +279,7 @@ static void __arch_timer_setup(unsigned type,  			clk->set_next_event = arch_timer_set_next_event_phys;  		}  	} else { +		clk->features |= CLOCK_EVT_FEAT_DYNIRQ;  		clk->name = "arch_mem_timer";  		clk->rating = 400;  		clk->cpumask = cpu_all_mask; @@ -294,6 +299,19 @@ static void __arch_timer_setup(unsigned type,  	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);  } +static void arch_timer_configure_evtstream(void) +{ +	int evt_stream_div, pos; + +	/* Find the closest power of two to the divisor */ +	evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; +	pos = fls(evt_stream_div); +	if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) +		pos--; +	/* enable event stream */ +	arch_timer_evtstrm_enable(min(pos, 15)); +} +  static int arch_timer_setup(struct clock_event_device *clk)  {  	__arch_timer_setup(ARCH_CP15_TIMER, clk); @@ -307,6 +325,8 @@ static int arch_timer_setup(struct clock_event_device *clk)  	}  	arch_counter_set_user_access(); +	if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) +		arch_timer_configure_evtstream();  	return 0;  } @@ -389,7 +409,7 @@ static struct clocksource clocksource_counter = {  	.rating	= 400,  	.read	= arch_counter_read,  	.mask	= CLOCKSOURCE_MASK(56), -	.flags	= CLOCK_SOURCE_IS_CONTINUOUS, +	.flags	= CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,  };  static struct cyclecounter cyclecounter = { @@ -419,6 +439,9 @@ static void __init arch_counter_register(unsigned type)  	cyclecounter.mult = clocksource_counter.mult;  	cyclecounter.shift = clocksource_counter.shift;  	timecounter_init(&timecounter, &cyclecounter, start_count); + +	/* 56 bits minimum, so we assume worst case rollover */ +	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);  }  static void arch_timer_stop(struct clock_event_device *clk) @@ -460,6 +483,33 @@ static struct notifier_block arch_timer_cpu_nb = {  	.notifier_call = arch_timer_cpu_notify,  }; +#ifdef CONFIG_CPU_PM +static unsigned int saved_cntkctl; +static int arch_timer_cpu_pm_notify(struct notifier_block *self, +				    unsigned long action, void *hcpu) +{ +	if (action == CPU_PM_ENTER) +		saved_cntkctl = arch_timer_get_cntkctl(); +	else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) +		arch_timer_set_cntkctl(saved_cntkctl); +	return NOTIFY_OK; +} + +static struct notifier_block arch_timer_cpu_pm_notifier = { +	.notifier_call = arch_timer_cpu_pm_notify, +}; + +static int __init arch_timer_cpu_pm_init(void) +{ +	return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); +} +#else +static int __init arch_timer_cpu_pm_init(void) +{ +	return 0; +} +#endif +  static int __init arch_timer_register(void)  {  	int err; @@ -499,11 +549,17 @@ static int __init arch_timer_register(void)  	if (err)  		goto out_free_irq; +	err = arch_timer_cpu_pm_init(); +	if (err) +		goto out_unreg_notify; +  	/* Immediately configure the timer on the boot CPU */  	arch_timer_setup(this_cpu_ptr(arch_timer_evt));  	return 0; +out_unreg_notify: +	unregister_cpu_notifier(&arch_timer_cpu_nb);  out_free_irq:  	if (arch_timer_use_virtual)  		free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); @@ -611,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)  		}  	} +	arch_timer_c3stop = !of_property_read_bool(np, "always-on"); +  	arch_timer_register();  	arch_timer_common_init();  } diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index b66c1f36066..60e5a170c4d 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -169,7 +169,8 @@ static int gt_clockevents_init(struct clock_event_device *clk)  	int cpu = smp_processor_id();  	clk->name = "arm_global_timer"; -	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; +	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | +		CLOCK_EVT_FEAT_PERCPU;  	clk->set_mode = gt_clockevent_set_mode;  	clk->set_next_event = gt_clockevent_set_next_event;  	clk->cpumask = cpumask_of(cpu); @@ -201,7 +202,7 @@ static struct clocksource gt_clocksource = {  };  #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK -static u32 notrace gt_sched_clock_read(void) +static u64 notrace gt_sched_clock_read(void)  {  	return gt_counter_read();  } @@ -216,7 +217,7 @@ static void __init gt_clocksource_init(void)  	writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);  #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK -	setup_sched_clock(gt_sched_clock_read, 32, gt_clk_rate); +	sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);  #endif  	clocksource_register_hz(>_clocksource, gt_clk_rate);  } @@ -245,11 +246,12 @@ static void __init global_timer_of_register(struct device_node *np)  	int err = 0;  	/* -	 * In r2p0 the comparators for each processor with the global timer +	 * In A9 r2p0 the comparators for each processor with the global timer  	 * fire when the timer value is greater than or equal to. In previous  	 * revisions the comparators fired when the timer value was equal to.  	 */ -	if ((read_cpuid_id() & 0xf0000f) < 0x200000) { +	if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9 +	    && (read_cpuid_id() & 0xf0000f) < 0x200000) {  		pr_warn("global-timer: non support for this cpu version.\n");  		return;  	} diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 07ea7ce900d..26ed331b1aa 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -49,7 +49,7 @@ struct bcm2835_timer {  static void __iomem *system_clock __read_mostly; -static u32 notrace bcm2835_sched_read(void) +static u64 notrace bcm2835_sched_read(void)  {  	return readl_relaxed(system_clock);  } @@ -110,7 +110,7 @@ static void __init bcm2835_timer_init(struct device_node *node)  		panic("Can't read clock-frequency");  	system_clock = base + REG_COUNTER_LO; -	setup_sched_clock(bcm2835_sched_read, 32, freq); +	sched_clock_register(bcm2835_sched_read, 32, freq);  	clocksource_mmio_init(base + REG_COUNTER_LO, node->name,  		freq, 300, 32, clocksource_mmio_readl_up); diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 0d7d8c3ed6b..0595dc6c453 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c @@ -17,6 +17,7 @@  #include <linux/jiffies.h>  #include <linux/clockchips.h>  #include <linux/types.h> +#include <linux/clk.h>  #include <linux/io.h>  #include <asm/mach/time.h> @@ -98,30 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)  	return;  } -static const struct of_device_id bcm_timer_ids[] __initconst = { -	{.compatible = "brcm,kona-timer"}, -	{.compatible = "bcm,kona-timer"}, /* deprecated name */ -	{}, -}; - -static void __init kona_timers_init(struct device_node *node) -{ -	u32 freq; - -	if (!of_property_read_u32(node, "clock-frequency", &freq)) -		arch_timer_rate = freq; -	else -		panic("clock-frequency not set in the .dts file"); - -	/* Setup IRQ numbers */ -	timers.tmr_irq = irq_of_parse_and_map(node, 0); - -	/* Setup IO addresses */ -	timers.tmr_regs = of_iomap(node, 0); - -	kona_timer_disable_and_clear(timers.tmr_regs); -} -  static int kona_timer_set_next_event(unsigned long clc,  				  struct clock_event_device *unused)  { @@ -196,7 +173,34 @@ static struct irqaction kona_timer_irq = {  static void __init kona_timer_init(struct device_node *node)  { -	kona_timers_init(node); +	u32 freq; +	struct clk *external_clk; + +	if (!of_device_is_available(node)) { +		pr_info("Kona Timer v1 marked as disabled in device tree\n"); +		return; +	} + +	external_clk = of_clk_get_by_name(node, NULL); + +	if (!IS_ERR(external_clk)) { +		arch_timer_rate = clk_get_rate(external_clk); +		clk_prepare_enable(external_clk); +	} else if (!of_property_read_u32(node, "clock-frequency", &freq)) { +		arch_timer_rate = freq; +	} else { +		pr_err("Kona Timer v1 unable to determine clock-frequency"); +		return; +	} + +	/* Setup IRQ numbers */ +	timers.tmr_irq = irq_of_parse_and_map(node, 0); + +	/* Setup IO addresses */ +	timers.tmr_regs = of_iomap(node, 0); + +	kona_timer_disable_and_clear(timers.tmr_regs); +  	kona_timer_clockevents_init();  	setup_irq(timers.tmr_irq, &kona_timer_irq);  	kona_timer_set_next_event((arch_timer_rate / HZ), NULL); diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index b2bb3a4bc20..7a08811df9a 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -16,6 +16,7 @@   */  #include <linux/clk.h> +#include <linux/clk-provider.h>  #include <linux/interrupt.h>  #include <linux/clockchips.h>  #include <linux/of_address.h> @@ -52,6 +53,8 @@  #define TTC_CNT_CNTRL_DISABLE_MASK	0x1  #define TTC_CLK_CNTRL_CSRC_MASK		(1 << 5)	/* clock source */ +#define TTC_CLK_CNTRL_PSV_MASK		0x1e +#define TTC_CLK_CNTRL_PSV_SHIFT		1  /*   * Setup the timers to use pre-scaling, using a fixed value for now that will @@ -63,15 +66,19 @@  #define CLK_CNTRL_PRESCALE_EN	1  #define CNT_CNTRL_RESET		(1 << 4) +#define MAX_F_ERR 50 +  /**   * struct ttc_timer - This definition defines local timer structure   *   * @base_addr:	Base address of timer + * @freq:	Timer input clock frequency   * @clk:	Associated clock source   * @clk_rate_change_nb	Notifier block for clock rate changes   */  struct ttc_timer {  	void __iomem *base_addr; +	unsigned long freq;  	struct clk *clk;  	struct notifier_block clk_rate_change_nb;  }; @@ -80,6 +87,8 @@ struct ttc_timer {  		container_of(x, struct ttc_timer, clk_rate_change_nb)  struct ttc_timer_clocksource { +	u32			scale_clk_ctrl_reg_old; +	u32			scale_clk_ctrl_reg_new;  	struct ttc_timer	ttc;  	struct clocksource	cs;  }; @@ -109,11 +118,11 @@ static void ttc_set_interval(struct ttc_timer *timer,  	u32 ctrl_reg;  	/* Disable the counter, set the counter value  and re-enable counter */ -	ctrl_reg = __raw_readl(timer->base_addr + TTC_CNT_CNTRL_OFFSET); +	ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);  	ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; -	__raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); +	writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); -	__raw_writel(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET); +	writel_relaxed(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET);  	/*  	 * Reset the counter (0x10) so that it starts from 0, one-shot @@ -121,7 +130,7 @@ static void ttc_set_interval(struct ttc_timer *timer,  	 */  	ctrl_reg |= CNT_CNTRL_RESET;  	ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; -	__raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); +	writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);  }  /** @@ -138,7 +147,7 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)  	struct ttc_timer *timer = &ttce->ttc;  	/* Acknowledge the interrupt and call event handler */ -	__raw_readl(timer->base_addr + TTC_ISR_OFFSET); +	readl_relaxed(timer->base_addr + TTC_ISR_OFFSET);  	ttce->ce.event_handler(&ttce->ce); @@ -154,13 +163,13 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs)  {  	struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; -	return (cycle_t)__raw_readl(timer->base_addr + +	return (cycle_t)readl_relaxed(timer->base_addr +  				TTC_COUNT_VAL_OFFSET);  } -static u32 notrace ttc_sched_clock_read(void) +static u64 notrace ttc_sched_clock_read(void)  { -	return __raw_readl(ttc_sched_clock_val_reg); +	return readl_relaxed(ttc_sched_clock_val_reg);  }  /** @@ -196,24 +205,23 @@ static void ttc_set_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		ttc_set_interval(timer, -				DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk), -					PRESCALE * HZ)); +		ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq, +						PRESCALE * HZ));  		break;  	case CLOCK_EVT_MODE_ONESHOT:  	case CLOCK_EVT_MODE_UNUSED:  	case CLOCK_EVT_MODE_SHUTDOWN: -		ctrl_reg = __raw_readl(timer->base_addr + +		ctrl_reg = readl_relaxed(timer->base_addr +  					TTC_CNT_CNTRL_OFFSET);  		ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; -		__raw_writel(ctrl_reg, +		writel_relaxed(ctrl_reg,  				timer->base_addr + TTC_CNT_CNTRL_OFFSET);  		break;  	case CLOCK_EVT_MODE_RESUME: -		ctrl_reg = __raw_readl(timer->base_addr + +		ctrl_reg = readl_relaxed(timer->base_addr +  					TTC_CNT_CNTRL_OFFSET);  		ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; -		__raw_writel(ctrl_reg, +		writel_relaxed(ctrl_reg,  				timer->base_addr + TTC_CNT_CNTRL_OFFSET);  		break;  	} @@ -228,32 +236,89 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,  			struct ttc_timer_clocksource, ttc);  	switch (event) { -	case POST_RATE_CHANGE: +	case PRE_RATE_CHANGE: +	{ +		u32 psv; +		unsigned long factor, rate_low, rate_high; + +		if (ndata->new_rate > ndata->old_rate) { +			factor = DIV_ROUND_CLOSEST(ndata->new_rate, +					ndata->old_rate); +			rate_low = ndata->old_rate; +			rate_high = ndata->new_rate; +		} else { +			factor = DIV_ROUND_CLOSEST(ndata->old_rate, +					ndata->new_rate); +			rate_low = ndata->new_rate; +			rate_high = ndata->old_rate; +		} + +		if (!is_power_of_2(factor)) +				return NOTIFY_BAD; + +		if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR) +			return NOTIFY_BAD; + +		factor = __ilog2_u32(factor); +  		/* -		 * Do whatever is necessary to maintain a proper time base -		 * -		 * I cannot find a way to adjust the currently used clocksource -		 * to the new frequency. __clocksource_updatefreq_hz() sounds -		 * good, but does not work. Not sure what's that missing. -		 * -		 * This approach works, but triggers two clocksource switches. -		 * The first after unregister to clocksource jiffies. And -		 * another one after the register to the newly registered timer. -		 * -		 * Alternatively we could 'waste' another HW timer to ping pong -		 * between clock sources. That would also use one register and -		 * one unregister call, but only trigger one clocksource switch -		 * for the cost of another HW timer used by the OS. +		 * store timer clock ctrl register so we can restore it in case +		 * of an abort.  		 */ -		clocksource_unregister(&ttccs->cs); -		clocksource_register_hz(&ttccs->cs, -				ndata->new_rate / PRESCALE); -		/* fall through */ -	case PRE_RATE_CHANGE: +		ttccs->scale_clk_ctrl_reg_old = +			readl_relaxed(ttccs->ttc.base_addr + +			TTC_CLK_CNTRL_OFFSET); + +		psv = (ttccs->scale_clk_ctrl_reg_old & +				TTC_CLK_CNTRL_PSV_MASK) >> +				TTC_CLK_CNTRL_PSV_SHIFT; +		if (ndata->new_rate < ndata->old_rate) +			psv -= factor; +		else +			psv += factor; + +		/* prescaler within legal range? */ +		if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT)) +			return NOTIFY_BAD; + +		ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old & +			~TTC_CLK_CNTRL_PSV_MASK; +		ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT; + + +		/* scale down: adjust divider in post-change notification */ +		if (ndata->new_rate < ndata->old_rate) +			return NOTIFY_DONE; + +		/* scale up: adjust divider now - before frequency change */ +		writel_relaxed(ttccs->scale_clk_ctrl_reg_new, +			       ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); +		break; +	} +	case POST_RATE_CHANGE: +		/* scale up: pre-change notification did the adjustment */ +		if (ndata->new_rate > ndata->old_rate) +			return NOTIFY_OK; + +		/* scale down: adjust divider now - after frequency change */ +		writel_relaxed(ttccs->scale_clk_ctrl_reg_new, +			       ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); +		break; +  	case ABORT_RATE_CHANGE: +		/* we have to undo the adjustment in case we scale up */ +		if (ndata->new_rate < ndata->old_rate) +			return NOTIFY_OK; + +		/* restore original register value */ +		writel_relaxed(ttccs->scale_clk_ctrl_reg_old, +			       ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); +		/* fall through */  	default:  		return NOTIFY_DONE;  	} + +	return NOTIFY_DONE;  }  static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) @@ -273,6 +338,8 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)  		return;  	} +	ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk); +  	ttccs->ttc.clk_rate_change_nb.notifier_call =  		ttc_rate_change_clocksource_cb;  	ttccs->ttc.clk_rate_change_nb.next = NULL; @@ -292,22 +359,20 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)  	 * with no interrupt and it rolls over at 0xFFFF. Pre-scale  	 * it by 32 also. Let it start running now.  	 */ -	__raw_writel(0x0,  ttccs->ttc.base_addr + TTC_IER_OFFSET); -	__raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, +	writel_relaxed(0x0,  ttccs->ttc.base_addr + TTC_IER_OFFSET); +	writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,  		     ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); -	__raw_writel(CNT_CNTRL_RESET, +	writel_relaxed(CNT_CNTRL_RESET,  		     ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); -	err = clocksource_register_hz(&ttccs->cs, -			clk_get_rate(ttccs->ttc.clk) / PRESCALE); +	err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);  	if (WARN_ON(err)) {  		kfree(ttccs);  		return;  	}  	ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; -	setup_sched_clock(ttc_sched_clock_read, 16, -			clk_get_rate(ttccs->ttc.clk) / PRESCALE); +	sched_clock_register(ttc_sched_clock_read, 16, ttccs->ttc.freq / PRESCALE);  }  static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, @@ -320,22 +385,12 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,  	switch (event) {  	case POST_RATE_CHANGE: -	{ -		unsigned long flags; +		/* update cached frequency */ +		ttc->freq = ndata->new_rate; -		/* -		 * clockevents_update_freq should be called with IRQ disabled on -		 * the CPU the timer provides events for. The timer we use is -		 * common to both CPUs, not sure if we need to run on both -		 * cores. -		 */ -		local_irq_save(flags); -		clockevents_update_freq(&ttcce->ce, -				ndata->new_rate / PRESCALE); -		local_irq_restore(flags); +		clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);  		/* fall through */ -	}  	case PRE_RATE_CHANGE:  	case ABORT_RATE_CHANGE:  	default: @@ -367,6 +422,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,  	if (clk_notifier_register(ttcce->ttc.clk,  				&ttcce->ttc.clk_rate_change_nb))  		pr_warn("Unable to register clock notifier.\n"); +	ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);  	ttcce->ttc.base_addr = base;  	ttcce->ce.name = "ttc_clockevent"; @@ -382,21 +438,20 @@ static void __init ttc_setup_clockevent(struct clk *clk,  	 * is prescaled by 32 using the interval interrupt. Leave it  	 * disabled for now.  	 */ -	__raw_writel(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); -	__raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, +	writel_relaxed(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); +	writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,  		     ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); -	__raw_writel(0x1,  ttcce->ttc.base_addr + TTC_IER_OFFSET); +	writel_relaxed(0x1,  ttcce->ttc.base_addr + TTC_IER_OFFSET);  	err = request_irq(irq, ttc_clock_event_interrupt, -			  IRQF_DISABLED | IRQF_TIMER, -			  ttcce->ce.name, ttcce); +			  IRQF_TIMER, ttcce->ce.name, ttcce);  	if (WARN_ON(err)) {  		kfree(ttcce);  		return;  	}  	clockevents_config_and_register(&ttcce->ce, -			clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe); +			ttcce->ttc.freq / PRESCALE, 1, 0xfffe);  }  /** @@ -435,7 +490,7 @@ static void __init ttc_timer_init(struct device_node *timer)  		BUG();  	} -	clksel = __raw_readl(timer_baseaddr + TTC_CLK_CNTRL_OFFSET); +	clksel = readl_relaxed(timer_baseaddr + TTC_CLK_CNTRL_OFFSET);  	clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);  	clk_cs = of_clk_get(timer, clksel);  	if (IS_ERR(clk_cs)) { @@ -443,7 +498,7 @@ static void __init ttc_timer_init(struct device_node *timer)  		BUG();  	} -	clksel = __raw_readl(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); +	clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);  	clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);  	clk_ce = of_clk_get(timer, clksel);  	if (IS_ERR(clk_ce)) { diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c index a9fd4ad2567..b375106844d 100644 --- a/drivers/clocksource/clksrc-dbx500-prcmu.c +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c @@ -53,7 +53,7 @@ static struct clocksource clocksource_dbx500_prcmu = {  #ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK -static u32 notrace dbx500_prcmu_sched_clock_read(void) +static u64 notrace dbx500_prcmu_sched_clock_read(void)  {  	if (unlikely(!clksrc_dbx500_timer_base))  		return 0; @@ -81,8 +81,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)  		       clksrc_dbx500_timer_base + PRCMU_TIMER_REF);  	}  #ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK -	setup_sched_clock(dbx500_prcmu_sched_clock_read, -			 32, RATE_32K); +	sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);  #endif  	clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);  } diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c index 37f5325bec9..0093a8e49e1 100644 --- a/drivers/clocksource/clksrc-of.c +++ b/drivers/clocksource/clksrc-of.c @@ -27,10 +27,17 @@ void __init clocksource_of_init(void)  {  	struct device_node *np;  	const struct of_device_id *match; -	clocksource_of_init_fn init_func; +	of_init_fn_1 init_func; +	unsigned clocksources = 0;  	for_each_matching_node_and_match(np, __clksrc_of_table, &match) { +		if (!of_device_is_available(np)) +			continue; +  		init_func = match->data;  		init_func(np); +		clocksources++;  	} +	if (!clocksources) +		pr_crit("%s: no matching clocksources found\n", __func__);  } diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index ea210482dd2..db210529089 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c @@ -131,7 +131,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)  static struct irqaction mfgptirq  = {  	.handler = mfgpt_tick, -	.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED, +	.flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED,  	.name = DRV_NAME,  }; diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c deleted file mode 100644 index 9e0998f2288..00000000000 --- a/drivers/clocksource/cyclone.c +++ /dev/null @@ -1,113 +0,0 @@ -#include <linux/clocksource.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/timex.h> -#include <linux/init.h> - -#include <asm/pgtable.h> -#include <asm/io.h> - -#include <asm/mach_timer.h> - -#define CYCLONE_CBAR_ADDR	0xFEB00CD0	/* base address ptr */ -#define CYCLONE_PMCC_OFFSET	0x51A0		/* offset to control register */ -#define CYCLONE_MPCS_OFFSET	0x51A8		/* offset to select register */ -#define CYCLONE_MPMC_OFFSET	0x51D0		/* offset to count register */ -#define CYCLONE_TIMER_FREQ	99780000	/* 100Mhz, but not really */ -#define CYCLONE_TIMER_MASK	CLOCKSOURCE_MASK(32) /* 32 bit mask */ - -int use_cyclone = 0; -static void __iomem *cyclone_ptr; - -static cycle_t read_cyclone(struct clocksource *cs) -{ -	return (cycle_t)readl(cyclone_ptr); -} - -static struct clocksource clocksource_cyclone = { -	.name		= "cyclone", -	.rating		= 250, -	.read		= read_cyclone, -	.mask		= CYCLONE_TIMER_MASK, -	.flags		= CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static int __init init_cyclone_clocksource(void) -{ -	unsigned long base;	/* saved value from CBAR */ -	unsigned long offset; -	u32 __iomem* volatile cyclone_timer;	/* Cyclone MPMC0 register */ -	u32 __iomem* reg; -	int i; - -	/* make sure we're on a summit box: */ -	if (!use_cyclone) -		return -ENODEV; - -	printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n"); - -	/* find base address: */ -	offset = CYCLONE_CBAR_ADDR; -	reg = ioremap_nocache(offset, sizeof(reg)); -	if (!reg) { -		printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n"); -		return -ENODEV; -	} -	/* even on 64bit systems, this is only 32bits: */ -	base = readl(reg); -	iounmap(reg); -	if (!base) { -		printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n"); -		return -ENODEV; -	} - -	/* setup PMCC: */ -	offset = base + CYCLONE_PMCC_OFFSET; -	reg = ioremap_nocache(offset, sizeof(reg)); -	if (!reg) { -		printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n"); -		return -ENODEV; -	} -	writel(0x00000001,reg); -	iounmap(reg); - -	/* setup MPCS: */ -	offset = base + CYCLONE_MPCS_OFFSET; -	reg = ioremap_nocache(offset, sizeof(reg)); -	if (!reg) { -		printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n"); -		return -ENODEV; -	} -	writel(0x00000001,reg); -	iounmap(reg); - -	/* map in cyclone_timer: */ -	offset = base + CYCLONE_MPMC_OFFSET; -	cyclone_timer = ioremap_nocache(offset, sizeof(u64)); -	if (!cyclone_timer) { -		printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n"); -		return -ENODEV; -	} - -	/* quick test to make sure its ticking: */ -	for (i = 0; i < 3; i++){ -		u32 old = readl(cyclone_timer); -		int stall = 100; - -		while (stall--) -			barrier(); - -		if (readl(cyclone_timer) == old) { -			printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n"); -			iounmap(cyclone_timer); -			cyclone_timer = NULL; -			return -ENODEV; -		} -	} -	cyclone_ptr = cyclone_timer; - -	return clocksource_register_hz(&clocksource_cyclone, -					CYCLONE_TIMER_FREQ); -} - -arch_initcall(init_cyclone_clocksource); diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c index b3eb582d6a6..ad357254172 100644 --- a/drivers/clocksource/dummy_timer.c +++ b/drivers/clocksource/dummy_timer.c @@ -56,14 +56,19 @@ static struct notifier_block dummy_timer_cpu_nb = {  static int __init dummy_timer_register(void)  { -	int err = register_cpu_notifier(&dummy_timer_cpu_nb); +	int err = 0; + +	cpu_notifier_register_begin(); +	err = __register_cpu_notifier(&dummy_timer_cpu_nb);  	if (err) -		return err; +		goto out;  	/* We won't get a call on the boot CPU, so register immediately */  	if (num_possible_cpus() > 1)  		dummy_timer_setup(); -	return 0; +out: +	cpu_notifier_register_done(); +	return err;  }  early_initcall(dummy_timer_register); diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index e54ca1062d8..f3656a6b038 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -243,8 +243,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,  	dw_ced->irqaction.dev_id	= &dw_ced->ced;  	dw_ced->irqaction.irq		= irq;  	dw_ced->irqaction.flags		= IRQF_TIMER | IRQF_IRQPOLL | -					  IRQF_NOBALANCING | -					  IRQF_DISABLED; +					  IRQF_NOBALANCING;  	dw_ced->eoi = apbt_eoi;  	err = setup_irq(irq, &dw_ced->irqaction); diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 4cbae4f762b..d305fb08976 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -23,7 +23,7 @@  #include <linux/clk.h>  #include <linux/sched_clock.h> -static void timer_get_base_and_rate(struct device_node *np, +static void __init timer_get_base_and_rate(struct device_node *np,  				    void __iomem **base, u32 *rate)  {  	struct clk *timer_clk; @@ -55,11 +55,11 @@ static void timer_get_base_and_rate(struct device_node *np,  try_clock_freq:  	if (of_property_read_u32(np, "clock-freq", rate) && -		of_property_read_u32(np, "clock-frequency", rate)) +	    of_property_read_u32(np, "clock-frequency", rate))  		panic("No clock nor clock-frequency property for %s", np->name);  } -static void add_clockevent(struct device_node *event_timer) +static void __init add_clockevent(struct device_node *event_timer)  {  	void __iomem *iobase;  	struct dw_apb_clock_event_device *ced; @@ -82,7 +82,7 @@ static void add_clockevent(struct device_node *event_timer)  static void __iomem *sched_io_base;  static u32 sched_rate; -static void add_clocksource(struct device_node *source_timer) +static void __init add_clocksource(struct device_node *source_timer)  {  	void __iomem *iobase;  	struct dw_apb_clocksource *cs; @@ -106,18 +106,17 @@ static void add_clocksource(struct device_node *source_timer)  	sched_rate = rate;  } -static u32 read_sched_clock(void) +static u64 notrace read_sched_clock(void)  { -	return __raw_readl(sched_io_base); +	return ~__raw_readl(sched_io_base);  }  static const struct of_device_id sptimer_ids[] __initconst = {  	{ .compatible = "picochip,pc3x2-rtc" }, -	{ .compatible = "snps,dw-apb-timer-sp" },  	{ /* Sentinel */ },  }; -static void init_sched_clock(void) +static void __init init_sched_clock(void)  {  	struct device_node *sched_timer; @@ -128,7 +127,7 @@ static void init_sched_clock(void)  		of_node_put(sched_timer);  	} -	setup_sched_clock(read_sched_clock, 32, sched_rate); +	sched_clock_register(read_sched_clock, 32, sched_rate);  }  static int num_called; @@ -138,12 +137,10 @@ static void __init dw_apb_timer_init(struct device_node *timer)  	case 0:  		pr_debug("%s: found clockevent timer\n", __func__);  		add_clockevent(timer); -		of_node_put(timer);  		break;  	case 1:  		pr_debug("%s: found clocksource timer\n", __func__);  		add_clocksource(timer); -		of_node_put(timer);  		init_sched_clock();  		break;  	default: @@ -153,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)  	num_called++;  }  CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); -CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init); diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index b9c81b7c3a3..d0a7bd66b8b 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c @@ -78,7 +78,7 @@ static int em_sti_enable(struct em_sti_priv *p)  	int ret;  	/* enable clock */ -	ret = clk_enable(p->clk); +	ret = clk_prepare_enable(p->clk);  	if (ret) {  		dev_err(&p->pdev->dev, "cannot enable clock\n");  		return ret; @@ -107,7 +107,7 @@ static void em_sti_disable(struct em_sti_priv *p)  	em_sti_write(p, STI_INTENCLR, 3);  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable_unprepare(p->clk);  }  static cycle_t em_sti_count(struct em_sti_priv *p) @@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)  	ced->name = dev_name(&p->pdev->dev);  	ced->features = CLOCK_EVT_FEAT_ONESHOT;  	ced->rating = 200; -	ced->cpumask = cpumask_of(0); +	ced->cpumask = cpu_possible_mask;  	ced->set_next_event = em_sti_clock_event_next;  	ced->set_mode = em_sti_clock_event_mode; @@ -318,10 +318,8 @@ static int em_sti_probe(struct platform_device *pdev)  	int irq;  	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); -	if (p == NULL) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	if (p == NULL)  		return -ENOMEM; -	}  	p->pdev = pdev;  	platform_set_drvdata(pdev, p); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 5b34768f4d7..ab51bf20a3e 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -24,8 +24,7 @@  #include <linux/of_irq.h>  #include <linux/of_address.h>  #include <linux/clocksource.h> - -#include <asm/mach/time.h> +#include <linux/sched_clock.h>  #define EXYNOS4_MCTREG(x)		(x)  #define EXYNOS4_MCT_G_CNT_L		EXYNOS4_MCTREG(0x100) @@ -71,6 +70,10 @@ enum {  	MCT_L1_IRQ,  	MCT_L2_IRQ,  	MCT_L3_IRQ, +	MCT_L4_IRQ, +	MCT_L5_IRQ, +	MCT_L6_IRQ, +	MCT_L7_IRQ,  	MCT_NR_IRQS,  }; @@ -150,19 +153,16 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)  }  /* Clocksource handling */ -static void exynos4_mct_frc_start(u32 hi, u32 lo) +static void exynos4_mct_frc_start(void)  {  	u32 reg; -	exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L); -	exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U); -  	reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);  	reg |= MCT_G_TCON_START;  	exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);  } -static cycle_t exynos4_frc_read(struct clocksource *cs) +static cycle_t notrace _exynos4_frc_read(void)  {  	unsigned int lo, hi;  	u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); @@ -176,9 +176,14 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)  	return ((cycle_t)hi << 32) | lo;  } +static cycle_t exynos4_frc_read(struct clocksource *cs) +{ +	return _exynos4_frc_read(); +} +  static void exynos4_frc_resume(struct clocksource *cs)  { -	exynos4_mct_frc_start(0, 0); +	exynos4_mct_frc_start();  }  struct clocksource mct_frc = { @@ -190,12 +195,30 @@ struct clocksource mct_frc = {  	.resume		= exynos4_frc_resume,  }; +static u64 notrace exynos4_read_sched_clock(void) +{ +	return _exynos4_frc_read(); +} + +static struct delay_timer exynos4_delay_timer; + +static cycles_t exynos4_read_current_timer(void) +{ +	return _exynos4_frc_read(); +} +  static void __init exynos4_clocksource_init(void)  { -	exynos4_mct_frc_start(0, 0); +	exynos4_mct_frc_start(); + +	exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer; +	exynos4_delay_timer.freq = clk_rate; +	register_current_timer_delay(&exynos4_delay_timer);  	if (clocksource_register_hz(&mct_frc, clk_rate))  		panic("%s: can't register clocksource\n", mct_frc.name); + +	sched_clock_register(exynos4_read_sched_clock, 64, clk_rate);  }  static void exynos4_mct_comp0_stop(void) @@ -406,7 +429,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)  	mevt = container_of(evt, struct mct_clock_event_device, evt);  	mevt->base = EXYNOS4_MCT_L_BASE(cpu); -	sprintf(mevt->name, "mct_tick%d", cpu); +	snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);  	evt->name = mevt->name;  	evt->cpumask = cpumask_of(cpu); @@ -414,8 +437,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)  	evt->set_mode = exynos4_tick_set_mode;  	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;  	evt->rating = 450; -	clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), -					0xf, 0x7fffffff);  	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); @@ -428,10 +449,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)  				evt->irq);  			return -EIO;  		} -		irq_set_affinity(evt->irq, cpumask_of(cpu)); +		irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));  	} else {  		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);  	} +	clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), +					0xf, 0x7fffffff);  	return 0;  } @@ -500,6 +523,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem  					 &percpu_mct_tick);  		WARN(err, "MCT: can't request IRQ %d (%d)\n",  		     mct_irqs[MCT_L0_IRQ], err); +	} else { +		irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));  	}  	err = register_cpu_notifier(&exynos4_mct_cpu_nb); diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c new file mode 100644 index 00000000000..454227d4f89 --- /dev/null +++ b/drivers/clocksource/fsl_ftm_timer.c @@ -0,0 +1,367 @@ +/* + * Freescale FlexTimer Module (FTM) timer driver. + * + * Copyright 2014 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> +#include <linux/slab.h> + +#define FTM_SC		0x00 +#define FTM_SC_CLK_SHIFT	3 +#define FTM_SC_CLK_MASK	(0x3 << FTM_SC_CLK_SHIFT) +#define FTM_SC_CLK(c)	((c) << FTM_SC_CLK_SHIFT) +#define FTM_SC_PS_MASK	0x7 +#define FTM_SC_TOIE	BIT(6) +#define FTM_SC_TOF	BIT(7) + +#define FTM_CNT		0x04 +#define FTM_MOD		0x08 +#define FTM_CNTIN	0x4C + +#define FTM_PS_MAX	7 + +struct ftm_clock_device { +	void __iomem *clksrc_base; +	void __iomem *clkevt_base; +	unsigned long periodic_cyc; +	unsigned long ps; +	bool big_endian; +}; + +static struct ftm_clock_device *priv; + +static inline u32 ftm_readl(void __iomem *addr) +{ +	if (priv->big_endian) +		return ioread32be(addr); +	else +		return ioread32(addr); +} + +static inline void ftm_writel(u32 val, void __iomem *addr) +{ +	if (priv->big_endian) +		iowrite32be(val, addr); +	else +		iowrite32(val, addr); +} + +static inline void ftm_counter_enable(void __iomem *base) +{ +	u32 val; + +	/* select and enable counter clock source */ +	val = ftm_readl(base + FTM_SC); +	val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); +	val |= priv->ps | FTM_SC_CLK(1); +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_counter_disable(void __iomem *base) +{ +	u32 val; + +	/* disable counter clock source */ +	val = ftm_readl(base + FTM_SC); +	val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_irq_acknowledge(void __iomem *base) +{ +	u32 val; + +	val = ftm_readl(base + FTM_SC); +	val &= ~FTM_SC_TOF; +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_irq_enable(void __iomem *base) +{ +	u32 val; + +	val = ftm_readl(base + FTM_SC); +	val |= FTM_SC_TOIE; +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_irq_disable(void __iomem *base) +{ +	u32 val; + +	val = ftm_readl(base + FTM_SC); +	val &= ~FTM_SC_TOIE; +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_reset_counter(void __iomem *base) +{ +	/* +	 * The CNT register contains the FTM counter value. +	 * Reset clears the CNT register. Writing any value to COUNT +	 * updates the counter with its initial value, CNTIN. +	 */ +	ftm_writel(0x00, base + FTM_CNT); +} + +static u64 ftm_read_sched_clock(void) +{ +	return ftm_readl(priv->clksrc_base + FTM_CNT); +} + +static int ftm_set_next_event(unsigned long delta, +				struct clock_event_device *unused) +{ +	/* +	 * The CNNIN and MOD are all double buffer registers, writing +	 * to the MOD register latches the value into a buffer. The MOD +	 * register is updated with the value of its write buffer with +	 * the following scenario: +	 * a, the counter source clock is diabled. +	 */ +	ftm_counter_disable(priv->clkevt_base); + +	/* Force the value of CNTIN to be loaded into the FTM counter */ +	ftm_reset_counter(priv->clkevt_base); + +	/* +	 * The counter increments until the value of MOD is reached, +	 * at which point the counter is reloaded with the value of CNTIN. +	 * The TOF (the overflow flag) bit is set when the FTM counter +	 * changes from MOD to CNTIN. So we should using the delta - 1. +	 */ +	ftm_writel(delta - 1, priv->clkevt_base + FTM_MOD); + +	ftm_counter_enable(priv->clkevt_base); + +	ftm_irq_enable(priv->clkevt_base); + +	return 0; +} + +static void ftm_set_mode(enum clock_event_mode mode, +				struct clock_event_device *evt) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		ftm_set_next_event(priv->periodic_cyc, evt); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		ftm_counter_disable(priv->clkevt_base); +		break; +	default: +		return; +	} +} + +static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	ftm_irq_acknowledge(priv->clkevt_base); + +	if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) { +		ftm_irq_disable(priv->clkevt_base); +		ftm_counter_disable(priv->clkevt_base); +	} + +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct clock_event_device ftm_clockevent = { +	.name		= "Freescale ftm timer", +	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +	.set_mode	= ftm_set_mode, +	.set_next_event	= ftm_set_next_event, +	.rating		= 300, +}; + +static struct irqaction ftm_timer_irq = { +	.name		= "Freescale ftm timer", +	.flags		= IRQF_TIMER | IRQF_IRQPOLL, +	.handler	= ftm_evt_interrupt, +	.dev_id		= &ftm_clockevent, +}; + +static int __init ftm_clockevent_init(unsigned long freq, int irq) +{ +	int err; + +	ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); +	ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); + +	ftm_reset_counter(priv->clkevt_base); + +	err = setup_irq(irq, &ftm_timer_irq); +	if (err) { +		pr_err("ftm: setup irq failed: %d\n", err); +		return err; +	} + +	ftm_clockevent.cpumask = cpumask_of(0); +	ftm_clockevent.irq = irq; + +	clockevents_config_and_register(&ftm_clockevent, +					freq / (1 << priv->ps), +					1, 0xffff); + +	ftm_counter_enable(priv->clkevt_base); + +	return 0; +} + +static int __init ftm_clocksource_init(unsigned long freq) +{ +	int err; + +	ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); +	ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); + +	ftm_reset_counter(priv->clksrc_base); + +	sched_clock_register(ftm_read_sched_clock, 16, freq / (1 << priv->ps)); +	err = clocksource_mmio_init(priv->clksrc_base + FTM_CNT, "fsl-ftm", +				    freq / (1 << priv->ps), 300, 16, +				    clocksource_mmio_readl_up); +	if (err) { +		pr_err("ftm: init clock source mmio failed: %d\n", err); +		return err; +	} + +	ftm_counter_enable(priv->clksrc_base); + +	return 0; +} + +static int __init __ftm_clk_init(struct device_node *np, char *cnt_name, +				 char *ftm_name) +{ +	struct clk *clk; +	int err; + +	clk = of_clk_get_by_name(np, cnt_name); +	if (IS_ERR(clk)) { +		pr_err("ftm: Cannot get \"%s\": %ld\n", cnt_name, PTR_ERR(clk)); +		return PTR_ERR(clk); +	} +	err = clk_prepare_enable(clk); +	if (err) { +		pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n", +			cnt_name, err); +		return err; +	} + +	clk = of_clk_get_by_name(np, ftm_name); +	if (IS_ERR(clk)) { +		pr_err("ftm: Cannot get \"%s\": %ld\n", ftm_name, PTR_ERR(clk)); +		return PTR_ERR(clk); +	} +	err = clk_prepare_enable(clk); +	if (err) +		pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n", +			ftm_name, err); + +	return clk_get_rate(clk); +} + +static unsigned long __init ftm_clk_init(struct device_node *np) +{ +	unsigned long freq; + +	freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); +	if (freq <= 0) +		return 0; + +	freq = __ftm_clk_init(np, "ftm-src-counter-en", "ftm-src"); +	if (freq <= 0) +		return 0; + +	return freq; +} + +static int __init ftm_calc_closest_round_cyc(unsigned long freq) +{ +	priv->ps = 0; + +	/* The counter register is only using the lower 16 bits, and +	 * if the 'freq' value is to big here, then the periodic_cyc +	 * may exceed 0xFFFF. +	 */ +	do { +		priv->periodic_cyc = DIV_ROUND_CLOSEST(freq, +						HZ * (1 << priv->ps++)); +	} while (priv->periodic_cyc > 0xFFFF); + +	if (priv->ps > FTM_PS_MAX) { +		pr_err("ftm: the prescaler is %lu > %d\n", +				priv->ps, FTM_PS_MAX); +		return -EINVAL; +	} + +	return 0; +} + +static void __init ftm_timer_init(struct device_node *np) +{ +	unsigned long freq; +	int irq; + +	priv = kzalloc(sizeof(*priv), GFP_KERNEL); +	if (!priv) +		return; + +	priv->clkevt_base = of_iomap(np, 0); +	if (!priv->clkevt_base) { +		pr_err("ftm: unable to map event timer registers\n"); +		goto err; +	} + +	priv->clksrc_base = of_iomap(np, 1); +	if (!priv->clksrc_base) { +		pr_err("ftm: unable to map source timer registers\n"); +		goto err; +	} + +	irq = irq_of_parse_and_map(np, 0); +	if (irq <= 0) { +		pr_err("ftm: unable to get IRQ from DT, %d\n", irq); +		goto err; +	} + +	priv->big_endian = of_property_read_bool(np, "big-endian"); + +	freq = ftm_clk_init(np); +	if (!freq) +		goto err; + +	if (ftm_calc_closest_round_cyc(freq)) +		goto err; + +	if (ftm_clocksource_init(freq)) +		goto err; + +	if (ftm_clockevent_init(freq, irq)) +		goto err; + +	return; + +err: +	kfree(priv); +} +CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init); diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c index c0e25125a55..1593ade2a81 100644 --- a/drivers/clocksource/mmio.c +++ b/drivers/clocksource/mmio.c @@ -22,22 +22,22 @@ static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)  cycle_t clocksource_mmio_readl_up(struct clocksource *c)  { -	return readl_relaxed(to_mmio_clksrc(c)->reg); +	return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg);  }  cycle_t clocksource_mmio_readl_down(struct clocksource *c)  { -	return ~readl_relaxed(to_mmio_clksrc(c)->reg); +	return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;  }  cycle_t clocksource_mmio_readw_up(struct clocksource *c)  { -	return readw_relaxed(to_mmio_clksrc(c)->reg); +	return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg);  }  cycle_t clocksource_mmio_readw_down(struct clocksource *c)  { -	return ~(unsigned)readw_relaxed(to_mmio_clksrc(c)->reg); +	return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;  }  /** diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c index 0f5e65f74dc..445b68a01dc 100644 --- a/drivers/clocksource/mxs_timer.c +++ b/drivers/clocksource/mxs_timer.c @@ -222,7 +222,7 @@ static struct clocksource clocksource_mxs = {  	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,  }; -static u32 notrace mxs_read_sched_clock_v2(void) +static u64 notrace mxs_read_sched_clock_v2(void)  {  	return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1));  } @@ -236,7 +236,7 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)  	else {  		clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1),  			"mxs_timer", c, 200, 32, clocksource_mmio_readl_down); -		setup_sched_clock(mxs_read_sched_clock_v2, 32, c); +		sched_clock_register(mxs_read_sched_clock_v2, 32, c);  	}  	return 0; diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c index 1b74bea1238..a709cfa49d8 100644 --- a/drivers/clocksource/nomadik-mtu.c +++ b/drivers/clocksource/nomadik-mtu.c @@ -20,7 +20,6 @@  #include <linux/jiffies.h>  #include <linux/delay.h>  #include <linux/err.h> -#include <linux/platform_data/clocksource-nomadik-mtu.h>  #include <linux/sched_clock.h>  #include <asm/mach/time.h> @@ -76,7 +75,7 @@ static struct delay_timer mtu_delay_timer;   * local implementation which uses the clocksource to get some   * better resolution when scheduling the kernel.   */ -static u32 notrace nomadik_read_sched_clock(void) +static u64 notrace nomadik_read_sched_clock(void)  {  	if (unlikely(!mtu_base))  		return 0; @@ -103,7 +102,7 @@ static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)  	return 0;  } -void nmdk_clkevt_reset(void) +static void nmdk_clkevt_reset(void)  {  	if (clkevt_periodic) {  		/* Timer: configure load and background-load, and fire it up */ @@ -144,7 +143,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,  	}  } -void nmdk_clksrc_reset(void) +static void nmdk_clksrc_reset(void)  {  	/* Disable */  	writel(0, mtu_base + MTU_CR(0)); @@ -187,13 +186,13 @@ static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)  static struct irqaction nmdk_timer_irq = {  	.name		= "Nomadik Timer Tick", -	.flags		= IRQF_DISABLED | IRQF_TIMER, +	.flags		= IRQF_TIMER,  	.handler	= nmdk_timer_interrupt,  	.dev_id		= &nmdk_clkevt,  }; -static void __init __nmdk_timer_init(void __iomem *base, int irq, -				     struct clk *pclk, struct clk *clk) +static void __init nmdk_timer_init(void __iomem *base, int irq, +				   struct clk *pclk, struct clk *clk)  {  	unsigned long rate; @@ -231,7 +230,7 @@ static void __init __nmdk_timer_init(void __iomem *base, int irq,  		       "mtu_0");  #ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK -	setup_sched_clock(nomadik_read_sched_clock, 32, rate); +	sched_clock_register(nomadik_read_sched_clock, 32, rate);  #endif  	/* Timer 1 is used for events, register irq and clockevents */ @@ -245,18 +244,6 @@ static void __init __nmdk_timer_init(void __iomem *base, int irq,  	register_current_timer_delay(&mtu_delay_timer);  } -void __init nmdk_timer_init(void __iomem *base, int irq) -{ -	struct clk *clk0, *pclk0; - -	pclk0 = clk_get_sys("mtu0", "apb_pclk"); -	BUG_ON(IS_ERR(pclk0)); -	clk0 = clk_get_sys("mtu0", NULL); -	BUG_ON(IS_ERR(clk0)); - -	__nmdk_timer_init(base, irq, pclk0, clk0); -} -  static void __init nmdk_timer_of_init(struct device_node *node)  {  	struct clk *pclk; @@ -280,7 +267,7 @@ static void __init nmdk_timer_of_init(struct device_node *node)  	if (irq <= 0)  		panic("Can't parse IRQ"); -	__nmdk_timer_init(base, irq, pclk, clk); +	nmdk_timer_init(base, irq, pclk, clk);  }  CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",  		       nmdk_timer_of_init); diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c new file mode 100644 index 00000000000..8d115db1e65 --- /dev/null +++ b/drivers/clocksource/qcom-timer.c @@ -0,0 +1,343 @@ +/* + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> + +#include <asm/delay.h> + +#define TIMER_MATCH_VAL			0x0000 +#define TIMER_COUNT_VAL			0x0004 +#define TIMER_ENABLE			0x0008 +#define TIMER_ENABLE_CLR_ON_MATCH_EN	BIT(1) +#define TIMER_ENABLE_EN			BIT(0) +#define TIMER_CLEAR			0x000C +#define DGT_CLK_CTL			0x10 +#define DGT_CLK_CTL_DIV_4		0x3 +#define TIMER_STS_GPT0_CLR_PEND		BIT(10) + +#define GPT_HZ 32768 + +#define MSM_DGT_SHIFT 5 + +static void __iomem *event_base; +static void __iomem *sts_base; + +static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; +	/* Stop the timer tick */ +	if (evt->mode == CLOCK_EVT_MODE_ONESHOT) { +		u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); +		ctrl &= ~TIMER_ENABLE_EN; +		writel_relaxed(ctrl, event_base + TIMER_ENABLE); +	} +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static int msm_timer_set_next_event(unsigned long cycles, +				    struct clock_event_device *evt) +{ +	u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); + +	ctrl &= ~TIMER_ENABLE_EN; +	writel_relaxed(ctrl, event_base + TIMER_ENABLE); + +	writel_relaxed(ctrl, event_base + TIMER_CLEAR); +	writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); + +	if (sts_base) +		while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND) +			cpu_relax(); + +	writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); +	return 0; +} + +static void msm_timer_set_mode(enum clock_event_mode mode, +			      struct clock_event_device *evt) +{ +	u32 ctrl; + +	ctrl = readl_relaxed(event_base + TIMER_ENABLE); +	ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN); + +	switch (mode) { +	case CLOCK_EVT_MODE_RESUME: +	case CLOCK_EVT_MODE_PERIODIC: +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		/* Timer is enabled in set_next_event */ +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		break; +	} +	writel_relaxed(ctrl, event_base + TIMER_ENABLE); +} + +static struct clock_event_device __percpu *msm_evt; + +static void __iomem *source_base; + +static notrace cycle_t msm_read_timer_count(struct clocksource *cs) +{ +	return readl_relaxed(source_base + TIMER_COUNT_VAL); +} + +static struct clocksource msm_clocksource = { +	.name	= "dg_timer", +	.rating	= 300, +	.read	= msm_read_timer_count, +	.mask	= CLOCKSOURCE_MASK(32), +	.flags	= CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int msm_timer_irq; +static int msm_timer_has_ppi; + +static int msm_local_timer_setup(struct clock_event_device *evt) +{ +	int cpu = smp_processor_id(); +	int err; + +	evt->irq = msm_timer_irq; +	evt->name = "msm_timer"; +	evt->features = CLOCK_EVT_FEAT_ONESHOT; +	evt->rating = 200; +	evt->set_mode = msm_timer_set_mode; +	evt->set_next_event = msm_timer_set_next_event; +	evt->cpumask = cpumask_of(cpu); + +	clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); + +	if (msm_timer_has_ppi) { +		enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); +	} else { +		err = request_irq(evt->irq, msm_timer_interrupt, +				IRQF_TIMER | IRQF_NOBALANCING | +				IRQF_TRIGGER_RISING, "gp_timer", evt); +		if (err) +			pr_err("request_irq failed\n"); +	} + +	return 0; +} + +static void msm_local_timer_stop(struct clock_event_device *evt) +{ +	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); +	disable_percpu_irq(evt->irq); +} + +static int msm_timer_cpu_notify(struct notifier_block *self, +					   unsigned long action, void *hcpu) +{ +	/* +	 * Grab cpu pointer in each case to avoid spurious +	 * preemptible warnings +	 */ +	switch (action & ~CPU_TASKS_FROZEN) { +	case CPU_STARTING: +		msm_local_timer_setup(this_cpu_ptr(msm_evt)); +		break; +	case CPU_DYING: +		msm_local_timer_stop(this_cpu_ptr(msm_evt)); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block msm_timer_cpu_nb = { +	.notifier_call = msm_timer_cpu_notify, +}; + +static u64 notrace msm_sched_clock_read(void) +{ +	return msm_clocksource.read(&msm_clocksource); +} + +static unsigned long msm_read_current_timer(void) +{ +	return msm_clocksource.read(&msm_clocksource); +} + +static struct delay_timer msm_delay_timer = { +	.read_current_timer = msm_read_current_timer, +}; + +static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, +				  bool percpu) +{ +	struct clocksource *cs = &msm_clocksource; +	int res = 0; + +	msm_timer_irq = irq; +	msm_timer_has_ppi = percpu; + +	msm_evt = alloc_percpu(struct clock_event_device); +	if (!msm_evt) { +		pr_err("memory allocation failed for clockevents\n"); +		goto err; +	} + +	if (percpu) +		res = request_percpu_irq(irq, msm_timer_interrupt, +					 "gp_timer", msm_evt); + +	if (res) { +		pr_err("request_percpu_irq failed\n"); +	} else { +		res = register_cpu_notifier(&msm_timer_cpu_nb); +		if (res) { +			free_percpu_irq(irq, msm_evt); +			goto err; +		} + +		/* Immediately configure the timer on the boot CPU */ +		msm_local_timer_setup(__this_cpu_ptr(msm_evt)); +	} + +err: +	writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); +	res = clocksource_register_hz(cs, dgt_hz); +	if (res) +		pr_err("clocksource_register failed\n"); +	sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); +	msm_delay_timer.freq = dgt_hz; +	register_current_timer_delay(&msm_delay_timer); +} + +#ifdef CONFIG_ARCH_QCOM +static void __init msm_dt_timer_init(struct device_node *np) +{ +	u32 freq; +	int irq; +	struct resource res; +	u32 percpu_offset; +	void __iomem *base; +	void __iomem *cpu0_base; + +	base = of_iomap(np, 0); +	if (!base) { +		pr_err("Failed to map event base\n"); +		return; +	} + +	/* We use GPT0 for the clockevent */ +	irq = irq_of_parse_and_map(np, 1); +	if (irq <= 0) { +		pr_err("Can't get irq\n"); +		return; +	} + +	/* We use CPU0's DGT for the clocksource */ +	if (of_property_read_u32(np, "cpu-offset", &percpu_offset)) +		percpu_offset = 0; + +	if (of_address_to_resource(np, 0, &res)) { +		pr_err("Failed to parse DGT resource\n"); +		return; +	} + +	cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res)); +	if (!cpu0_base) { +		pr_err("Failed to map source base\n"); +		return; +	} + +	if (of_property_read_u32(np, "clock-frequency", &freq)) { +		pr_err("Unknown frequency\n"); +		return; +	} + +	event_base = base + 0x4; +	sts_base = base + 0x88; +	source_base = cpu0_base + 0x24; +	freq /= 4; +	writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL); + +	msm_timer_init(freq, 32, irq, !!percpu_offset); +} +CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); +CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); +#else + +static int __init msm_timer_map(phys_addr_t addr, u32 event, u32 source, +				u32 sts) +{ +	void __iomem *base; + +	base = ioremap(addr, SZ_256); +	if (!base) { +		pr_err("Failed to map timer base\n"); +		return -ENOMEM; +	} +	event_base = base + event; +	source_base = base + source; +	if (sts) +		sts_base = base + sts; + +	return 0; +} + +static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs) +{ +	/* +	 * Shift timer count down by a constant due to unreliable lower bits +	 * on some targets. +	 */ +	return msm_read_timer_count(cs) >> MSM_DGT_SHIFT; +} + +void __init msm7x01_timer_init(void) +{ +	struct clocksource *cs = &msm_clocksource; + +	if (msm_timer_map(0xc0100000, 0x0, 0x10, 0x0)) +		return; +	cs->read = msm_read_timer_count_shift; +	cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)); +	/* 600 KHz */ +	msm_timer_init(19200000 >> MSM_DGT_SHIFT, 32 - MSM_DGT_SHIFT, 7, +			false); +} + +void __init msm7x30_timer_init(void) +{ +	if (msm_timer_map(0xc0100000, 0x4, 0x24, 0x80)) +		return; +	msm_timer_init(24576000 / 4, 32, 1, false); +} + +void __init qsd8x50_timer_init(void) +{ +	if (msm_timer_map(0xAC100000, 0x0, 0x10, 0x34)) +		return; +	msm_timer_init(19200000 / 4, 32, 7, false); +} +#endif diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c index ab29476ee5f..5645cfc90c4 100644 --- a/drivers/clocksource/samsung_pwm_timer.c +++ b/drivers/clocksource/samsung_pwm_timer.c @@ -264,7 +264,7 @@ static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)  static struct irqaction samsung_clock_event_irq = {  	.name		= "samsung_time_irq", -	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, +	.flags		= IRQF_TIMER | IRQF_IRQPOLL,  	.handler	= samsung_clock_event_isr,  	.dev_id		= &time_event_device,  }; @@ -331,7 +331,7 @@ static struct clocksource samsung_clocksource = {   * this wraps around for now, since it is just a relative time   * stamp. (Inspired by U300 implementation.)   */ -static u32 notrace samsung_read_sched_clock(void) +static u64 notrace samsung_read_sched_clock(void)  {  	return samsung_clocksource_read(NULL);  } @@ -357,7 +357,7 @@ static void __init samsung_clocksource_init(void)  	else  		pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14; -	setup_sched_clock(samsung_read_sched_clock, +	sched_clock_register(samsung_read_sched_clock,  						pwm.variant.bits, clock_rate);  	samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits); diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 0965e9848b3..dfa780396b9 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -11,40 +11,93 @@   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/delay.h> +#include <linux/err.h>  #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h>  #include <linux/interrupt.h> -#include <linux/ioport.h>  #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h>  #include <linux/irq.h> -#include <linux/err.h> -#include <linux/delay.h> -#include <linux/clocksource.h> -#include <linux/clockchips.h> -#include <linux/sh_timer.h> -#include <linux/slab.h>  #include <linux/module.h> +#include <linux/platform_device.h>  #include <linux/pm_domain.h>  #include <linux/pm_runtime.h> +#include <linux/sh_timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +struct sh_cmt_device; + +/* + * The CMT comes in 5 different identified flavours, depending not only on the + * SoC but also on the particular instance. The following table lists the main + * characteristics of those flavours. + * + *			16B	32B	32B-F	48B	48B-2 + * ----------------------------------------------------------------------------- + * Channels		2	1/4	1	6	2/8 + * Control Width	16	16	16	16	32 + * Counter Width	16	32	32	32/48	32/48 + * Shared Start/Stop	Y	Y	Y	Y	N + * + * The 48-bit gen2 version has a per-channel start/stop register located in the + * channel registers block. All other versions have a shared start/stop register + * located in the global space. + * + * Channels are indexed from 0 to N-1 in the documentation. The channel index + * infers the start/stop bit position in the control register and the channel + * registers block address. Some CMT instances have a subset of channels + * available, in which case the index in the documentation doesn't match the + * "real" index as implemented in hardware. This is for instance the case with + * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0 + * in the documentation but using start/stop bit 5 and having its registers + * block at 0x60. + * + * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit + * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable. + */ + +enum sh_cmt_model { +	SH_CMT_16BIT, +	SH_CMT_32BIT, +	SH_CMT_32BIT_FAST, +	SH_CMT_48BIT, +	SH_CMT_48BIT_GEN2, +}; + +struct sh_cmt_info { +	enum sh_cmt_model model; -struct sh_cmt_priv { -	void __iomem *mapbase; -	void __iomem *mapbase_str; -	struct clk *clk;  	unsigned long width; /* 16 or 32 bit version of hardware block */  	unsigned long overflow_bit;  	unsigned long clear_bits; -	struct irqaction irqaction; -	struct platform_device *pdev; +	/* callbacks for CMSTR and CMCSR access */ +	unsigned long (*read_control)(void __iomem *base, unsigned long offs); +	void (*write_control)(void __iomem *base, unsigned long offs, +			      unsigned long value); + +	/* callbacks for CMCNT and CMCOR access */ +	unsigned long (*read_count)(void __iomem *base, unsigned long offs); +	void (*write_count)(void __iomem *base, unsigned long offs, +			    unsigned long value); +}; + +struct sh_cmt_channel { +	struct sh_cmt_device *cmt; + +	unsigned int index;	/* Index in the documentation */ +	unsigned int hwidx;	/* Real hardware index */ + +	void __iomem *iostart; +	void __iomem *ioctrl; + +	unsigned int timer_bit;  	unsigned long flags;  	unsigned long match_value;  	unsigned long next_match_value; @@ -55,38 +108,52 @@ struct sh_cmt_priv {  	struct clocksource cs;  	unsigned long total_cycles;  	bool cs_enabled; +}; -	/* callbacks for CMSTR and CMCSR access */ -	unsigned long (*read_control)(void __iomem *base, unsigned long offs); -	void (*write_control)(void __iomem *base, unsigned long offs, -			      unsigned long value); +struct sh_cmt_device { +	struct platform_device *pdev; -	/* callbacks for CMCNT and CMCOR access */ -	unsigned long (*read_count)(void __iomem *base, unsigned long offs); -	void (*write_count)(void __iomem *base, unsigned long offs, -			    unsigned long value); +	const struct sh_cmt_info *info; +	bool legacy; + +	void __iomem *mapbase_ch; +	void __iomem *mapbase; +	struct clk *clk; + +	struct sh_cmt_channel *channels; +	unsigned int num_channels; + +	bool has_clockevent; +	bool has_clocksource;  }; -/* Examples of supported CMT timer register layouts and I/O access widths: - * - * "16-bit counter and 16-bit control" as found on sh7263: - * CMSTR 0xfffec000 16-bit - * CMCSR 0xfffec002 16-bit - * CMCNT 0xfffec004 16-bit - * CMCOR 0xfffec006 16-bit - * - * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740: - * CMSTR 0xffca0000 16-bit - * CMCSR 0xffca0060 16-bit - * CMCNT 0xffca0064 32-bit - * CMCOR 0xffca0068 32-bit - * - * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790: - * CMSTR 0xffca0500 32-bit - * CMCSR 0xffca0510 32-bit - * CMCNT 0xffca0514 32-bit - * CMCOR 0xffca0518 32-bit - */ +#define SH_CMT16_CMCSR_CMF		(1 << 7) +#define SH_CMT16_CMCSR_CMIE		(1 << 6) +#define SH_CMT16_CMCSR_CKS8		(0 << 0) +#define SH_CMT16_CMCSR_CKS32		(1 << 0) +#define SH_CMT16_CMCSR_CKS128		(2 << 0) +#define SH_CMT16_CMCSR_CKS512		(3 << 0) +#define SH_CMT16_CMCSR_CKS_MASK		(3 << 0) + +#define SH_CMT32_CMCSR_CMF		(1 << 15) +#define SH_CMT32_CMCSR_OVF		(1 << 14) +#define SH_CMT32_CMCSR_WRFLG		(1 << 13) +#define SH_CMT32_CMCSR_STTF		(1 << 12) +#define SH_CMT32_CMCSR_STPF		(1 << 11) +#define SH_CMT32_CMCSR_SSIE		(1 << 10) +#define SH_CMT32_CMCSR_CMS		(1 << 9) +#define SH_CMT32_CMCSR_CMM		(1 << 8) +#define SH_CMT32_CMCSR_CMTOUT_IE	(1 << 7) +#define SH_CMT32_CMCSR_CMR_NONE		(0 << 4) +#define SH_CMT32_CMCSR_CMR_DMA		(1 << 4) +#define SH_CMT32_CMCSR_CMR_IRQ		(2 << 4) +#define SH_CMT32_CMCSR_CMR_MASK		(3 << 4) +#define SH_CMT32_CMCSR_DBGIVD		(1 << 3) +#define SH_CMT32_CMCSR_CKS_RCLK8	(4 << 0) +#define SH_CMT32_CMCSR_CKS_RCLK32	(5 << 0) +#define SH_CMT32_CMCSR_CKS_RCLK128	(6 << 0) +#define SH_CMT32_CMCSR_CKS_RCLK1	(7 << 0) +#define SH_CMT32_CMCSR_CKS_MASK		(7 << 0)  static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)  { @@ -110,64 +177,123 @@ static void sh_cmt_write32(void __iomem *base, unsigned long offs,  	iowrite32(value, base + (offs << 2));  } +static const struct sh_cmt_info sh_cmt_info[] = { +	[SH_CMT_16BIT] = { +		.model = SH_CMT_16BIT, +		.width = 16, +		.overflow_bit = SH_CMT16_CMCSR_CMF, +		.clear_bits = ~SH_CMT16_CMCSR_CMF, +		.read_control = sh_cmt_read16, +		.write_control = sh_cmt_write16, +		.read_count = sh_cmt_read16, +		.write_count = sh_cmt_write16, +	}, +	[SH_CMT_32BIT] = { +		.model = SH_CMT_32BIT, +		.width = 32, +		.overflow_bit = SH_CMT32_CMCSR_CMF, +		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), +		.read_control = sh_cmt_read16, +		.write_control = sh_cmt_write16, +		.read_count = sh_cmt_read32, +		.write_count = sh_cmt_write32, +	}, +	[SH_CMT_32BIT_FAST] = { +		.model = SH_CMT_32BIT_FAST, +		.width = 32, +		.overflow_bit = SH_CMT32_CMCSR_CMF, +		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), +		.read_control = sh_cmt_read16, +		.write_control = sh_cmt_write16, +		.read_count = sh_cmt_read32, +		.write_count = sh_cmt_write32, +	}, +	[SH_CMT_48BIT] = { +		.model = SH_CMT_48BIT, +		.width = 32, +		.overflow_bit = SH_CMT32_CMCSR_CMF, +		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), +		.read_control = sh_cmt_read32, +		.write_control = sh_cmt_write32, +		.read_count = sh_cmt_read32, +		.write_count = sh_cmt_write32, +	}, +	[SH_CMT_48BIT_GEN2] = { +		.model = SH_CMT_48BIT_GEN2, +		.width = 32, +		.overflow_bit = SH_CMT32_CMCSR_CMF, +		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), +		.read_control = sh_cmt_read32, +		.write_control = sh_cmt_write32, +		.read_count = sh_cmt_read32, +		.write_count = sh_cmt_write32, +	}, +}; +  #define CMCSR 0 /* channel register */  #define CMCNT 1 /* channel register */  #define CMCOR 2 /* channel register */ -static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p) +static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)  { -	return p->read_control(p->mapbase_str, 0); +	if (ch->iostart) +		return ch->cmt->info->read_control(ch->iostart, 0); +	else +		return ch->cmt->info->read_control(ch->cmt->mapbase, 0);  } -static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p) +static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, +				      unsigned long value)  { -	return p->read_control(p->mapbase, CMCSR); +	if (ch->iostart) +		ch->cmt->info->write_control(ch->iostart, 0, value); +	else +		ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);  } -static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p) +static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)  { -	return p->read_count(p->mapbase, CMCNT); +	return ch->cmt->info->read_control(ch->ioctrl, CMCSR);  } -static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p, +static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,  				      unsigned long value)  { -	p->write_control(p->mapbase_str, 0, value); +	ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);  } -static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p, -				      unsigned long value) +static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)  { -	p->write_control(p->mapbase, CMCSR, value); +	return ch->cmt->info->read_count(ch->ioctrl, CMCNT);  } -static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p, +static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,  				      unsigned long value)  { -	p->write_count(p->mapbase, CMCNT, value); +	ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);  } -static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p, +static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,  				      unsigned long value)  { -	p->write_count(p->mapbase, CMCOR, value); +	ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);  } -static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, +static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,  					int *has_wrapped)  {  	unsigned long v1, v2, v3;  	int o1, o2; -	o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit; +	o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;  	/* Make sure the timer value is stable. Stolen from acpi_pm.c */  	do {  		o2 = o1; -		v1 = sh_cmt_read_cmcnt(p); -		v2 = sh_cmt_read_cmcnt(p); -		v3 = sh_cmt_read_cmcnt(p); -		o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit; +		v1 = sh_cmt_read_cmcnt(ch); +		v2 = sh_cmt_read_cmcnt(ch); +		v3 = sh_cmt_read_cmcnt(ch); +		o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;  	} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)  			  || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); @@ -177,52 +303,56 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,  static DEFINE_RAW_SPINLOCK(sh_cmt_lock); -static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) +static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data;  	unsigned long flags, value;  	/* start stop register shared by multiple timer channels */  	raw_spin_lock_irqsave(&sh_cmt_lock, flags); -	value = sh_cmt_read_cmstr(p); +	value = sh_cmt_read_cmstr(ch);  	if (start) -		value |= 1 << cfg->timer_bit; +		value |= 1 << ch->timer_bit;  	else -		value &= ~(1 << cfg->timer_bit); +		value &= ~(1 << ch->timer_bit); -	sh_cmt_write_cmstr(p, value); +	sh_cmt_write_cmstr(ch, value);  	raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);  } -static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) +static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)  {  	int k, ret; -	pm_runtime_get_sync(&p->pdev->dev); -	dev_pm_syscore_device(&p->pdev->dev, true); +	pm_runtime_get_sync(&ch->cmt->pdev->dev); +	dev_pm_syscore_device(&ch->cmt->pdev->dev, true);  	/* enable clock */ -	ret = clk_enable(p->clk); +	ret = clk_enable(ch->cmt->clk);  	if (ret) { -		dev_err(&p->pdev->dev, "cannot enable clock\n"); +		dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n", +			ch->index);  		goto err0;  	}  	/* make sure channel is disabled */ -	sh_cmt_start_stop_ch(p, 0); +	sh_cmt_start_stop_ch(ch, 0);  	/* configure channel, periodic mode and maximum timeout */ -	if (p->width == 16) { -		*rate = clk_get_rate(p->clk) / 512; -		sh_cmt_write_cmcsr(p, 0x43); +	if (ch->cmt->info->width == 16) { +		*rate = clk_get_rate(ch->cmt->clk) / 512; +		sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE | +				   SH_CMT16_CMCSR_CKS512);  	} else { -		*rate = clk_get_rate(p->clk) / 8; -		sh_cmt_write_cmcsr(p, 0x01a4); +		*rate = clk_get_rate(ch->cmt->clk) / 8; +		sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM | +				   SH_CMT32_CMCSR_CMTOUT_IE | +				   SH_CMT32_CMCSR_CMR_IRQ | +				   SH_CMT32_CMCSR_CKS_RCLK8);  	} -	sh_cmt_write_cmcor(p, 0xffffffff); -	sh_cmt_write_cmcnt(p, 0); +	sh_cmt_write_cmcor(ch, 0xffffffff); +	sh_cmt_write_cmcnt(ch, 0);  	/*  	 * According to the sh73a0 user's manual, as CMCNT can be operated @@ -236,41 +366,42 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)  	 * take RCLKx2 at maximum.  	 */  	for (k = 0; k < 100; k++) { -		if (!sh_cmt_read_cmcnt(p)) +		if (!sh_cmt_read_cmcnt(ch))  			break;  		udelay(1);  	} -	if (sh_cmt_read_cmcnt(p)) { -		dev_err(&p->pdev->dev, "cannot clear CMCNT\n"); +	if (sh_cmt_read_cmcnt(ch)) { +		dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", +			ch->index);  		ret = -ETIMEDOUT;  		goto err1;  	}  	/* enable channel */ -	sh_cmt_start_stop_ch(p, 1); +	sh_cmt_start_stop_ch(ch, 1);  	return 0;   err1:  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable(ch->cmt->clk);   err0:  	return ret;  } -static void sh_cmt_disable(struct sh_cmt_priv *p) +static void sh_cmt_disable(struct sh_cmt_channel *ch)  {  	/* disable channel */ -	sh_cmt_start_stop_ch(p, 0); +	sh_cmt_start_stop_ch(ch, 0);  	/* disable interrupts in CMT block */ -	sh_cmt_write_cmcsr(p, 0); +	sh_cmt_write_cmcsr(ch, 0);  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable(ch->cmt->clk); -	dev_pm_syscore_device(&p->pdev->dev, false); -	pm_runtime_put(&p->pdev->dev); +	dev_pm_syscore_device(&ch->cmt->pdev->dev, false); +	pm_runtime_put(&ch->cmt->pdev->dev);  }  /* private flags */ @@ -280,24 +411,24 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)  #define FLAG_SKIPEVENT (1 << 3)  #define FLAG_IRQCONTEXT (1 << 4) -static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, +static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,  					      int absolute)  {  	unsigned long new_match; -	unsigned long value = p->next_match_value; +	unsigned long value = ch->next_match_value;  	unsigned long delay = 0;  	unsigned long now = 0;  	int has_wrapped; -	now = sh_cmt_get_counter(p, &has_wrapped); -	p->flags |= FLAG_REPROGRAM; /* force reprogram */ +	now = sh_cmt_get_counter(ch, &has_wrapped); +	ch->flags |= FLAG_REPROGRAM; /* force reprogram */  	if (has_wrapped) {  		/* we're competing with the interrupt handler.  		 *  -> let the interrupt handler reprogram the timer.  		 *  -> interrupt number two handles the event.  		 */ -		p->flags |= FLAG_SKIPEVENT; +		ch->flags |= FLAG_SKIPEVENT;  		return;  	} @@ -309,20 +440,20 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,  		 * but don't save the new match value yet.  		 */  		new_match = now + value + delay; -		if (new_match > p->max_match_value) -			new_match = p->max_match_value; +		if (new_match > ch->max_match_value) +			new_match = ch->max_match_value; -		sh_cmt_write_cmcor(p, new_match); +		sh_cmt_write_cmcor(ch, new_match); -		now = sh_cmt_get_counter(p, &has_wrapped); -		if (has_wrapped && (new_match > p->match_value)) { +		now = sh_cmt_get_counter(ch, &has_wrapped); +		if (has_wrapped && (new_match > ch->match_value)) {  			/* we are changing to a greater match value,  			 * so this wrap must be caused by the counter  			 * matching the old value.  			 * -> first interrupt reprograms the timer.  			 * -> interrupt number two handles the event.  			 */ -			p->flags |= FLAG_SKIPEVENT; +			ch->flags |= FLAG_SKIPEVENT;  			break;  		} @@ -333,7 +464,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,  			 * -> save programmed match value.  			 * -> let isr handle the event.  			 */ -			p->match_value = new_match; +			ch->match_value = new_match;  			break;  		} @@ -344,7 +475,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,  			 * -> save programmed match value.  			 * -> let isr handle the event.  			 */ -			p->match_value = new_match; +			ch->match_value = new_match;  			break;  		} @@ -360,138 +491,141 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,  			delay = 1;  		if (!delay) -			dev_warn(&p->pdev->dev, "too long delay\n"); +			dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n", +				 ch->index);  	} while (delay);  } -static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) +static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)  { -	if (delta > p->max_match_value) -		dev_warn(&p->pdev->dev, "delta out of range\n"); +	if (delta > ch->max_match_value) +		dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n", +			 ch->index); -	p->next_match_value = delta; -	sh_cmt_clock_event_program_verify(p, 0); +	ch->next_match_value = delta; +	sh_cmt_clock_event_program_verify(ch, 0);  } -static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) +static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)  {  	unsigned long flags; -	raw_spin_lock_irqsave(&p->lock, flags); -	__sh_cmt_set_next(p, delta); -	raw_spin_unlock_irqrestore(&p->lock, flags); +	raw_spin_lock_irqsave(&ch->lock, flags); +	__sh_cmt_set_next(ch, delta); +	raw_spin_unlock_irqrestore(&ch->lock, flags);  }  static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)  { -	struct sh_cmt_priv *p = dev_id; +	struct sh_cmt_channel *ch = dev_id;  	/* clear flags */ -	sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits); +	sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & +			   ch->cmt->info->clear_bits);  	/* update clock source counter to begin with if enabled  	 * the wrap flag should be cleared by the timer specific  	 * isr before we end up here.  	 */ -	if (p->flags & FLAG_CLOCKSOURCE) -		p->total_cycles += p->match_value + 1; +	if (ch->flags & FLAG_CLOCKSOURCE) +		ch->total_cycles += ch->match_value + 1; -	if (!(p->flags & FLAG_REPROGRAM)) -		p->next_match_value = p->max_match_value; +	if (!(ch->flags & FLAG_REPROGRAM)) +		ch->next_match_value = ch->max_match_value; -	p->flags |= FLAG_IRQCONTEXT; +	ch->flags |= FLAG_IRQCONTEXT; -	if (p->flags & FLAG_CLOCKEVENT) { -		if (!(p->flags & FLAG_SKIPEVENT)) { -			if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { -				p->next_match_value = p->max_match_value; -				p->flags |= FLAG_REPROGRAM; +	if (ch->flags & FLAG_CLOCKEVENT) { +		if (!(ch->flags & FLAG_SKIPEVENT)) { +			if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) { +				ch->next_match_value = ch->max_match_value; +				ch->flags |= FLAG_REPROGRAM;  			} -			p->ced.event_handler(&p->ced); +			ch->ced.event_handler(&ch->ced);  		}  	} -	p->flags &= ~FLAG_SKIPEVENT; +	ch->flags &= ~FLAG_SKIPEVENT; -	if (p->flags & FLAG_REPROGRAM) { -		p->flags &= ~FLAG_REPROGRAM; -		sh_cmt_clock_event_program_verify(p, 1); +	if (ch->flags & FLAG_REPROGRAM) { +		ch->flags &= ~FLAG_REPROGRAM; +		sh_cmt_clock_event_program_verify(ch, 1); -		if (p->flags & FLAG_CLOCKEVENT) -			if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) -			    || (p->match_value == p->next_match_value)) -				p->flags &= ~FLAG_REPROGRAM; +		if (ch->flags & FLAG_CLOCKEVENT) +			if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) +			    || (ch->match_value == ch->next_match_value)) +				ch->flags &= ~FLAG_REPROGRAM;  	} -	p->flags &= ~FLAG_IRQCONTEXT; +	ch->flags &= ~FLAG_IRQCONTEXT;  	return IRQ_HANDLED;  } -static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) +static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)  {  	int ret = 0;  	unsigned long flags; -	raw_spin_lock_irqsave(&p->lock, flags); +	raw_spin_lock_irqsave(&ch->lock, flags); -	if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) -		ret = sh_cmt_enable(p, &p->rate); +	if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) +		ret = sh_cmt_enable(ch, &ch->rate);  	if (ret)  		goto out; -	p->flags |= flag; +	ch->flags |= flag;  	/* setup timeout if no clockevent */ -	if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) -		__sh_cmt_set_next(p, p->max_match_value); +	if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT))) +		__sh_cmt_set_next(ch, ch->max_match_value);   out: -	raw_spin_unlock_irqrestore(&p->lock, flags); +	raw_spin_unlock_irqrestore(&ch->lock, flags);  	return ret;  } -static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) +static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)  {  	unsigned long flags;  	unsigned long f; -	raw_spin_lock_irqsave(&p->lock, flags); +	raw_spin_lock_irqsave(&ch->lock, flags); -	f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); -	p->flags &= ~flag; +	f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); +	ch->flags &= ~flag; -	if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) -		sh_cmt_disable(p); +	if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) +		sh_cmt_disable(ch);  	/* adjust the timeout to maximum if only clocksource left */ -	if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) -		__sh_cmt_set_next(p, p->max_match_value); +	if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE)) +		__sh_cmt_set_next(ch, ch->max_match_value); -	raw_spin_unlock_irqrestore(&p->lock, flags); +	raw_spin_unlock_irqrestore(&ch->lock, flags);  } -static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) +static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)  { -	return container_of(cs, struct sh_cmt_priv, cs); +	return container_of(cs, struct sh_cmt_channel, cs);  }  static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)  { -	struct sh_cmt_priv *p = cs_to_sh_cmt(cs); +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);  	unsigned long flags, raw;  	unsigned long value;  	int has_wrapped; -	raw_spin_lock_irqsave(&p->lock, flags); -	value = p->total_cycles; -	raw = sh_cmt_get_counter(p, &has_wrapped); +	raw_spin_lock_irqsave(&ch->lock, flags); +	value = ch->total_cycles; +	raw = sh_cmt_get_counter(ch, &has_wrapped);  	if (unlikely(has_wrapped)) -		raw += p->match_value + 1; -	raw_spin_unlock_irqrestore(&p->lock, flags); +		raw += ch->match_value + 1; +	raw_spin_unlock_irqrestore(&ch->lock, flags);  	return value + raw;  } @@ -499,53 +633,53 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)  static int sh_cmt_clocksource_enable(struct clocksource *cs)  {  	int ret; -	struct sh_cmt_priv *p = cs_to_sh_cmt(cs); +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); -	WARN_ON(p->cs_enabled); +	WARN_ON(ch->cs_enabled); -	p->total_cycles = 0; +	ch->total_cycles = 0; -	ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); +	ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);  	if (!ret) { -		__clocksource_updatefreq_hz(cs, p->rate); -		p->cs_enabled = true; +		__clocksource_updatefreq_hz(cs, ch->rate); +		ch->cs_enabled = true;  	}  	return ret;  }  static void sh_cmt_clocksource_disable(struct clocksource *cs)  { -	struct sh_cmt_priv *p = cs_to_sh_cmt(cs); +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); -	WARN_ON(!p->cs_enabled); +	WARN_ON(!ch->cs_enabled); -	sh_cmt_stop(p, FLAG_CLOCKSOURCE); -	p->cs_enabled = false; +	sh_cmt_stop(ch, FLAG_CLOCKSOURCE); +	ch->cs_enabled = false;  }  static void sh_cmt_clocksource_suspend(struct clocksource *cs)  { -	struct sh_cmt_priv *p = cs_to_sh_cmt(cs); +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); -	sh_cmt_stop(p, FLAG_CLOCKSOURCE); -	pm_genpd_syscore_poweroff(&p->pdev->dev); +	sh_cmt_stop(ch, FLAG_CLOCKSOURCE); +	pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);  }  static void sh_cmt_clocksource_resume(struct clocksource *cs)  { -	struct sh_cmt_priv *p = cs_to_sh_cmt(cs); +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); -	pm_genpd_syscore_poweron(&p->pdev->dev); -	sh_cmt_start(p, FLAG_CLOCKSOURCE); +	pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); +	sh_cmt_start(ch, FLAG_CLOCKSOURCE);  } -static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, -				       char *name, unsigned long rating) +static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch, +				       const char *name)  { -	struct clocksource *cs = &p->cs; +	struct clocksource *cs = &ch->cs;  	cs->name = name; -	cs->rating = rating; +	cs->rating = 125;  	cs->read = sh_cmt_clocksource_read;  	cs->enable = sh_cmt_clocksource_enable;  	cs->disable = sh_cmt_clocksource_disable; @@ -554,47 +688,48 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,  	cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);  	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; -	dev_info(&p->pdev->dev, "used as clock source\n"); +	dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n", +		 ch->index);  	/* Register with dummy 1 Hz value, gets updated in ->enable() */  	clocksource_register_hz(cs, 1);  	return 0;  } -static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) +static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)  { -	return container_of(ced, struct sh_cmt_priv, ced); +	return container_of(ced, struct sh_cmt_channel, ced);  } -static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) +static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced; -	sh_cmt_start(p, FLAG_CLOCKEVENT); +	sh_cmt_start(ch, FLAG_CLOCKEVENT);  	/* TODO: calculate good shift from rate and counter bit width */  	ced->shift = 32; -	ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); -	ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); +	ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift); +	ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);  	ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);  	if (periodic) -		sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1); +		sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1);  	else -		sh_cmt_set_next(p, p->max_match_value); +		sh_cmt_set_next(ch, ch->max_match_value);  }  static void sh_cmt_clock_event_mode(enum clock_event_mode mode,  				    struct clock_event_device *ced)  { -	struct sh_cmt_priv *p = ced_to_sh_cmt(ced); +	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);  	/* deal with old setting first */  	switch (ced->mode) {  	case CLOCK_EVT_MODE_PERIODIC:  	case CLOCK_EVT_MODE_ONESHOT: -		sh_cmt_stop(p, FLAG_CLOCKEVENT); +		sh_cmt_stop(ch, FLAG_CLOCKEVENT);  		break;  	default:  		break; @@ -602,16 +737,18 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		dev_info(&p->pdev->dev, "used for periodic clock events\n"); -		sh_cmt_clock_event_start(p, 1); +		dev_info(&ch->cmt->pdev->dev, +			 "ch%u: used for periodic clock events\n", ch->index); +		sh_cmt_clock_event_start(ch, 1);  		break;  	case CLOCK_EVT_MODE_ONESHOT: -		dev_info(&p->pdev->dev, "used for oneshot clock events\n"); -		sh_cmt_clock_event_start(p, 0); +		dev_info(&ch->cmt->pdev->dev, +			 "ch%u: used for oneshot clock events\n", ch->index); +		sh_cmt_clock_event_start(ch, 0);  		break;  	case CLOCK_EVT_MODE_SHUTDOWN:  	case CLOCK_EVT_MODE_UNUSED: -		sh_cmt_stop(p, FLAG_CLOCKEVENT); +		sh_cmt_stop(ch, FLAG_CLOCKEVENT);  		break;  	default:  		break; @@ -621,185 +758,341 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,  static int sh_cmt_clock_event_next(unsigned long delta,  				   struct clock_event_device *ced)  { -	struct sh_cmt_priv *p = ced_to_sh_cmt(ced); +	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);  	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); -	if (likely(p->flags & FLAG_IRQCONTEXT)) -		p->next_match_value = delta - 1; +	if (likely(ch->flags & FLAG_IRQCONTEXT)) +		ch->next_match_value = delta - 1;  	else -		sh_cmt_set_next(p, delta - 1); +		sh_cmt_set_next(ch, delta - 1);  	return 0;  }  static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev); +	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); + +	pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); +	clk_unprepare(ch->cmt->clk);  }  static void sh_cmt_clock_event_resume(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev); +	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); + +	clk_prepare(ch->cmt->clk); +	pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);  } -static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, -				       char *name, unsigned long rating) +static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, +				      const char *name)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced; +	int irq; +	int ret; -	memset(ced, 0, sizeof(*ced)); +	irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index); +	if (irq < 0) { +		dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", +			ch->index); +		return irq; +	} + +	ret = request_irq(irq, sh_cmt_interrupt, +			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, +			  dev_name(&ch->cmt->pdev->dev), ch); +	if (ret) { +		dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n", +			ch->index, irq); +		return ret; +	}  	ced->name = name;  	ced->features = CLOCK_EVT_FEAT_PERIODIC;  	ced->features |= CLOCK_EVT_FEAT_ONESHOT; -	ced->rating = rating; -	ced->cpumask = cpumask_of(0); +	ced->rating = 125; +	ced->cpumask = cpu_possible_mask;  	ced->set_next_event = sh_cmt_clock_event_next;  	ced->set_mode = sh_cmt_clock_event_mode;  	ced->suspend = sh_cmt_clock_event_suspend;  	ced->resume = sh_cmt_clock_event_resume; -	dev_info(&p->pdev->dev, "used for clock events\n"); +	dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n", +		 ch->index);  	clockevents_register_device(ced); + +	return 0;  } -static int sh_cmt_register(struct sh_cmt_priv *p, char *name, -			   unsigned long clockevent_rating, -			   unsigned long clocksource_rating) +static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name, +			   bool clockevent, bool clocksource)  { -	if (clockevent_rating) -		sh_cmt_register_clockevent(p, name, clockevent_rating); +	int ret; -	if (clocksource_rating) -		sh_cmt_register_clocksource(p, name, clocksource_rating); +	if (clockevent) { +		ch->cmt->has_clockevent = true; +		ret = sh_cmt_register_clockevent(ch, name); +		if (ret < 0) +			return ret; +	} + +	if (clocksource) { +		ch->cmt->has_clocksource = true; +		sh_cmt_register_clocksource(ch, name); +	}  	return 0;  } -static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) +static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, +				unsigned int hwidx, bool clockevent, +				bool clocksource, struct sh_cmt_device *cmt)  { -	struct sh_timer_config *cfg = pdev->dev.platform_data; -	struct resource *res, *res2; -	int irq, ret; -	ret = -ENXIO; +	int ret; -	memset(p, 0, sizeof(*p)); -	p->pdev = pdev; +	/* Skip unused channels. */ +	if (!clockevent && !clocksource) +		return 0; -	if (!cfg) { -		dev_err(&p->pdev->dev, "missing platform data\n"); -		goto err0; +	ch->cmt = cmt; +	ch->index = index; +	ch->hwidx = hwidx; + +	/* +	 * Compute the address of the channel control register block. For the +	 * timers with a per-channel start/stop register, compute its address +	 * as well. +	 * +	 * For legacy configuration the address has been mapped explicitly. +	 */ +	if (cmt->legacy) { +		ch->ioctrl = cmt->mapbase_ch; +	} else { +		switch (cmt->info->model) { +		case SH_CMT_16BIT: +			ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; +			break; +		case SH_CMT_32BIT: +		case SH_CMT_48BIT: +			ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; +			break; +		case SH_CMT_32BIT_FAST: +			/* +			 * The 32-bit "fast" timer has a single channel at hwidx +			 * 5 but is located at offset 0x40 instead of 0x60 for +			 * some reason. +			 */ +			ch->ioctrl = cmt->mapbase + 0x40; +			break; +		case SH_CMT_48BIT_GEN2: +			ch->iostart = cmt->mapbase + ch->hwidx * 0x100; +			ch->ioctrl = ch->iostart + 0x10; +			break; +		}  	} -	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); -	if (!res) { -		dev_err(&p->pdev->dev, "failed to get I/O memory\n"); -		goto err0; +	if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) +		ch->max_match_value = ~0; +	else +		ch->max_match_value = (1 << cmt->info->width) - 1; + +	ch->match_value = ch->max_match_value; +	raw_spin_lock_init(&ch->lock); + +	if (cmt->legacy) { +		ch->timer_bit = ch->hwidx; +	} else { +		ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 +			      ? 0 : ch->hwidx;  	} -	/* optional resource for the shared timer start/stop register */ -	res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1); +	ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), +			      clockevent, clocksource); +	if (ret) { +		dev_err(&cmt->pdev->dev, "ch%u: registration failed\n", +			ch->index); +		return ret; +	} +	ch->cs_enabled = false; -	irq = platform_get_irq(p->pdev, 0); -	if (irq < 0) { -		dev_err(&p->pdev->dev, "failed to get irq\n"); -		goto err0; +	return 0; +} + +static int sh_cmt_map_memory(struct sh_cmt_device *cmt) +{ +	struct resource *mem; + +	mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); +	if (!mem) { +		dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO;  	} -	/* map memory, let mapbase point to our channel */ -	p->mapbase = ioremap_nocache(res->start, resource_size(res)); -	if (p->mapbase == NULL) { -		dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); -		goto err0; +	cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem)); +	if (cmt->mapbase == NULL) { +		dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); +		return -ENXIO;  	} -	/* map second resource for CMSTR */ -	p->mapbase_str = ioremap_nocache(res2 ? res2->start : -					 res->start - cfg->channel_offset, -					 res2 ? resource_size(res2) : 2); -	if (p->mapbase_str == NULL) { -		dev_err(&p->pdev->dev, "failed to remap I/O second memory\n"); -		goto err1; +	return 0; +} + +static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt) +{ +	struct sh_timer_config *cfg = cmt->pdev->dev.platform_data; +	struct resource *res, *res2; + +	/* map memory, let mapbase_ch point to our channel */ +	res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); +	if (!res) { +		dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO;  	} -	/* request irq using setup_irq() (too early for request_irq()) */ -	p->irqaction.name = dev_name(&p->pdev->dev); -	p->irqaction.handler = sh_cmt_interrupt; -	p->irqaction.dev_id = p; -	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ -			     IRQF_IRQPOLL  | IRQF_NOBALANCING; - -	/* get hold of clock */ -	p->clk = clk_get(&p->pdev->dev, "cmt_fck"); -	if (IS_ERR(p->clk)) { -		dev_err(&p->pdev->dev, "cannot get clock\n"); -		ret = PTR_ERR(p->clk); -		goto err2; +	cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res)); +	if (cmt->mapbase_ch == NULL) { +		dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); +		return -ENXIO;  	} -	if (res2 && (resource_size(res2) == 4)) { -		/* assume both CMSTR and CMCSR to be 32-bit */ -		p->read_control = sh_cmt_read32; -		p->write_control = sh_cmt_write32; -	} else { -		p->read_control = sh_cmt_read16; -		p->write_control = sh_cmt_write16; +	/* optional resource for the shared timer start/stop register */ +	res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1); + +	/* map second resource for CMSTR */ +	cmt->mapbase = ioremap_nocache(res2 ? res2->start : +				       res->start - cfg->channel_offset, +				       res2 ? resource_size(res2) : 2); +	if (cmt->mapbase == NULL) { +		dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n"); +		iounmap(cmt->mapbase_ch); +		return -ENXIO;  	} -	if (resource_size(res) == 6) { -		p->width = 16; -		p->read_count = sh_cmt_read16; -		p->write_count = sh_cmt_write16; -		p->overflow_bit = 0x80; -		p->clear_bits = ~0x80; -	} else { -		p->width = 32; -		p->read_count = sh_cmt_read32; -		p->write_count = sh_cmt_write32; -		p->overflow_bit = 0x8000; -		p->clear_bits = ~0xc000; +	/* identify the model based on the resources */ +	if (resource_size(res) == 6) +		cmt->info = &sh_cmt_info[SH_CMT_16BIT]; +	else if (res2 && (resource_size(res2) == 4)) +		cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2]; +	else +		cmt->info = &sh_cmt_info[SH_CMT_32BIT]; + +	return 0; +} + +static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt) +{ +	iounmap(cmt->mapbase); +	if (cmt->mapbase_ch) +		iounmap(cmt->mapbase_ch); +} + +static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) +{ +	struct sh_timer_config *cfg = pdev->dev.platform_data; +	const struct platform_device_id *id = pdev->id_entry; +	unsigned int hw_channels; +	int ret; + +	memset(cmt, 0, sizeof(*cmt)); +	cmt->pdev = pdev; + +	if (!cfg) { +		dev_err(&cmt->pdev->dev, "missing platform data\n"); +		return -ENXIO;  	} -	if (p->width == (sizeof(p->max_match_value) * 8)) -		p->max_match_value = ~0; +	cmt->info = (const struct sh_cmt_info *)id->driver_data; +	cmt->legacy = cmt->info ? false : true; + +	/* Get hold of clock. */ +	cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck"); +	if (IS_ERR(cmt->clk)) { +		dev_err(&cmt->pdev->dev, "cannot get clock\n"); +		return PTR_ERR(cmt->clk); +	} + +	ret = clk_prepare(cmt->clk); +	if (ret < 0) +		goto err_clk_put; + +	/* +	 * Map the memory resource(s). We need to support both the legacy +	 * platform device configuration (with one device per channel) and the +	 * new version (with multiple channels per device). +	 */ +	if (cmt->legacy) +		ret = sh_cmt_map_memory_legacy(cmt);  	else -		p->max_match_value = (1 << p->width) - 1; +		ret = sh_cmt_map_memory(cmt); -	p->match_value = p->max_match_value; -	raw_spin_lock_init(&p->lock); +	if (ret < 0) +		goto err_clk_unprepare; -	ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev), -			      cfg->clockevent_rating, -			      cfg->clocksource_rating); -	if (ret) { -		dev_err(&p->pdev->dev, "registration failed\n"); -		goto err3; +	/* Allocate and setup the channels. */ +	if (cmt->legacy) { +		cmt->num_channels = 1; +		hw_channels = 0; +	} else { +		cmt->num_channels = hweight8(cfg->channels_mask); +		hw_channels = cfg->channels_mask;  	} -	p->cs_enabled = false; -	ret = setup_irq(irq, &p->irqaction); -	if (ret) { -		dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); -		goto err3; +	cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), +				GFP_KERNEL); +	if (cmt->channels == NULL) { +		ret = -ENOMEM; +		goto err_unmap;  	} -	platform_set_drvdata(pdev, p); +	if (cmt->legacy) { +		ret = sh_cmt_setup_channel(&cmt->channels[0], +					   cfg->timer_bit, cfg->timer_bit, +					   cfg->clockevent_rating != 0, +					   cfg->clocksource_rating != 0, cmt); +		if (ret < 0) +			goto err_unmap; +	} else { +		unsigned int mask = hw_channels; +		unsigned int i; + +		/* +		 * Use the first channel as a clock event device and the second +		 * channel as a clock source. If only one channel is available +		 * use it for both. +		 */ +		for (i = 0; i < cmt->num_channels; ++i) { +			unsigned int hwidx = ffs(mask) - 1; +			bool clocksource = i == 1 || cmt->num_channels == 1; +			bool clockevent = i == 0; + +			ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx, +						   clockevent, clocksource, +						   cmt); +			if (ret < 0) +				goto err_unmap; + +			mask &= ~(1 << hwidx); +		} +	} + +	platform_set_drvdata(pdev, cmt);  	return 0; -err3: -	clk_put(p->clk); -err2: -	iounmap(p->mapbase_str); -err1: -	iounmap(p->mapbase); -err0: + +err_unmap: +	kfree(cmt->channels); +	sh_cmt_unmap_memory(cmt); +err_clk_unprepare: +	clk_unprepare(cmt->clk); +err_clk_put: +	clk_put(cmt->clk);  	return ret;  }  static int sh_cmt_probe(struct platform_device *pdev)  { -	struct sh_cmt_priv *p = platform_get_drvdata(pdev); -	struct sh_timer_config *cfg = pdev->dev.platform_data; +	struct sh_cmt_device *cmt = platform_get_drvdata(pdev);  	int ret;  	if (!is_early_platform_device(pdev)) { @@ -807,20 +1100,18 @@ static int sh_cmt_probe(struct platform_device *pdev)  		pm_runtime_enable(&pdev->dev);  	} -	if (p) { +	if (cmt) {  		dev_info(&pdev->dev, "kept as earlytimer\n");  		goto out;  	} -	p = kmalloc(sizeof(*p), GFP_KERNEL); -	if (p == NULL) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	cmt = kzalloc(sizeof(*cmt), GFP_KERNEL); +	if (cmt == NULL)  		return -ENOMEM; -	} -	ret = sh_cmt_setup(p, pdev); +	ret = sh_cmt_setup(cmt, pdev);  	if (ret) { -		kfree(p); +		kfree(cmt);  		pm_runtime_idle(&pdev->dev);  		return ret;  	} @@ -828,7 +1119,7 @@ static int sh_cmt_probe(struct platform_device *pdev)  		return 0;   out: -	if (cfg->clockevent_rating || cfg->clocksource_rating) +	if (cmt->has_clockevent || cmt->has_clocksource)  		pm_runtime_irq_safe(&pdev->dev);  	else  		pm_runtime_idle(&pdev->dev); @@ -841,12 +1132,24 @@ static int sh_cmt_remove(struct platform_device *pdev)  	return -EBUSY; /* cannot unregister clockevent and clocksource */  } +static const struct platform_device_id sh_cmt_id_table[] = { +	{ "sh_cmt", 0 }, +	{ "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, +	{ "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, +	{ "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] }, +	{ "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] }, +	{ "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] }, +	{ } +}; +MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); +  static struct platform_driver sh_cmt_device_driver = {  	.probe		= sh_cmt_probe,  	.remove		= sh_cmt_remove,  	.driver		= {  		.name	= "sh_cmt", -	} +	}, +	.id_table	= sh_cmt_id_table,  };  static int __init sh_cmt_init(void) diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 4aac9ee0d0c..188d4e092ef 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -11,37 +11,48 @@   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/err.h>  #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h>  #include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/delay.h>  #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h>  #include <linux/irq.h> -#include <linux/err.h> -#include <linux/clockchips.h> -#include <linux/sh_timer.h> -#include <linux/slab.h>  #include <linux/module.h> +#include <linux/platform_device.h>  #include <linux/pm_domain.h>  #include <linux/pm_runtime.h> +#include <linux/sh_timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +struct sh_mtu2_device; + +struct sh_mtu2_channel { +	struct sh_mtu2_device *mtu; +	unsigned int index; + +	void __iomem *base; +	int irq; + +	struct clock_event_device ced; +}; + +struct sh_mtu2_device { +	struct platform_device *pdev; -struct sh_mtu2_priv {  	void __iomem *mapbase;  	struct clk *clk; -	struct irqaction irqaction; -	struct platform_device *pdev; -	unsigned long rate; -	unsigned long periodic; -	struct clock_event_device ced; + +	struct sh_mtu2_channel *channels; +	unsigned int num_channels; + +	bool legacy; +	bool has_clockevent;  };  static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); @@ -55,6 +66,88 @@ static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);  #define TCNT 5 /* channel register */  #define TGR  6 /* channel register */ +#define TCR_CCLR_NONE		(0 << 5) +#define TCR_CCLR_TGRA		(1 << 5) +#define TCR_CCLR_TGRB		(2 << 5) +#define TCR_CCLR_SYNC		(3 << 5) +#define TCR_CCLR_TGRC		(5 << 5) +#define TCR_CCLR_TGRD		(6 << 5) +#define TCR_CCLR_MASK		(7 << 5) +#define TCR_CKEG_RISING		(0 << 3) +#define TCR_CKEG_FALLING	(1 << 3) +#define TCR_CKEG_BOTH		(2 << 3) +#define TCR_CKEG_MASK		(3 << 3) +/* Values 4 to 7 are channel-dependent */ +#define TCR_TPSC_P1		(0 << 0) +#define TCR_TPSC_P4		(1 << 0) +#define TCR_TPSC_P16		(2 << 0) +#define TCR_TPSC_P64		(3 << 0) +#define TCR_TPSC_CH0_TCLKA	(4 << 0) +#define TCR_TPSC_CH0_TCLKB	(5 << 0) +#define TCR_TPSC_CH0_TCLKC	(6 << 0) +#define TCR_TPSC_CH0_TCLKD	(7 << 0) +#define TCR_TPSC_CH1_TCLKA	(4 << 0) +#define TCR_TPSC_CH1_TCLKB	(5 << 0) +#define TCR_TPSC_CH1_P256	(6 << 0) +#define TCR_TPSC_CH1_TCNT2	(7 << 0) +#define TCR_TPSC_CH2_TCLKA	(4 << 0) +#define TCR_TPSC_CH2_TCLKB	(5 << 0) +#define TCR_TPSC_CH2_TCLKC	(6 << 0) +#define TCR_TPSC_CH2_P1024	(7 << 0) +#define TCR_TPSC_CH34_P256	(4 << 0) +#define TCR_TPSC_CH34_P1024	(5 << 0) +#define TCR_TPSC_CH34_TCLKA	(6 << 0) +#define TCR_TPSC_CH34_TCLKB	(7 << 0) +#define TCR_TPSC_MASK		(7 << 0) + +#define TMDR_BFE		(1 << 6) +#define TMDR_BFB		(1 << 5) +#define TMDR_BFA		(1 << 4) +#define TMDR_MD_NORMAL		(0 << 0) +#define TMDR_MD_PWM_1		(2 << 0) +#define TMDR_MD_PWM_2		(3 << 0) +#define TMDR_MD_PHASE_1		(4 << 0) +#define TMDR_MD_PHASE_2		(5 << 0) +#define TMDR_MD_PHASE_3		(6 << 0) +#define TMDR_MD_PHASE_4		(7 << 0) +#define TMDR_MD_PWM_SYNC	(8 << 0) +#define TMDR_MD_PWM_COMP_CREST	(13 << 0) +#define TMDR_MD_PWM_COMP_TROUGH	(14 << 0) +#define TMDR_MD_PWM_COMP_BOTH	(15 << 0) +#define TMDR_MD_MASK		(15 << 0) + +#define TIOC_IOCH(n)		((n) << 4) +#define TIOC_IOCL(n)		((n) << 0) +#define TIOR_OC_RETAIN		(0 << 0) +#define TIOR_OC_0_CLEAR		(1 << 0) +#define TIOR_OC_0_SET		(2 << 0) +#define TIOR_OC_0_TOGGLE	(3 << 0) +#define TIOR_OC_1_CLEAR		(5 << 0) +#define TIOR_OC_1_SET		(6 << 0) +#define TIOR_OC_1_TOGGLE	(7 << 0) +#define TIOR_IC_RISING		(8 << 0) +#define TIOR_IC_FALLING		(9 << 0) +#define TIOR_IC_BOTH		(10 << 0) +#define TIOR_IC_TCNT		(12 << 0) +#define TIOR_MASK		(15 << 0) + +#define TIER_TTGE		(1 << 7) +#define TIER_TTGE2		(1 << 6) +#define TIER_TCIEU		(1 << 5) +#define TIER_TCIEV		(1 << 4) +#define TIER_TGIED		(1 << 3) +#define TIER_TGIEC		(1 << 2) +#define TIER_TGIEB		(1 << 1) +#define TIER_TGIEA		(1 << 0) + +#define TSR_TCFD		(1 << 7) +#define TSR_TCFU		(1 << 5) +#define TSR_TCFV		(1 << 4) +#define TSR_TGFD		(1 << 3) +#define TSR_TGFC		(1 << 2) +#define TSR_TGFB		(1 << 1) +#define TSR_TGFA		(1 << 0) +  static unsigned long mtu2_reg_offs[] = {  	[TCR] = 0,  	[TMDR] = 1, @@ -65,135 +158,143 @@ static unsigned long mtu2_reg_offs[] = {  	[TGR] = 8,  }; -static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr) +static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs; -	if (reg_nr == TSTR) -		return ioread8(base + cfg->channel_offset); +	if (reg_nr == TSTR) { +		if (ch->mtu->legacy) +			return ioread8(ch->mtu->mapbase); +		else +			return ioread8(ch->mtu->mapbase + 0x280); +	}  	offs = mtu2_reg_offs[reg_nr];  	if ((reg_nr == TCNT) || (reg_nr == TGR)) -		return ioread16(base + offs); +		return ioread16(ch->base + offs);  	else -		return ioread8(base + offs); +		return ioread8(ch->base + offs);  } -static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr, +static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,  				unsigned long value)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs;  	if (reg_nr == TSTR) { -		iowrite8(value, base + cfg->channel_offset); -		return; +		if (ch->mtu->legacy) +			return iowrite8(value, ch->mtu->mapbase); +		else +			return iowrite8(value, ch->mtu->mapbase + 0x280);  	}  	offs = mtu2_reg_offs[reg_nr];  	if ((reg_nr == TCNT) || (reg_nr == TGR)) -		iowrite16(value, base + offs); +		iowrite16(value, ch->base + offs);  	else -		iowrite8(value, base + offs); +		iowrite8(value, ch->base + offs);  } -static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) +static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data;  	unsigned long flags, value;  	/* start stop register shared by multiple timer channels */  	raw_spin_lock_irqsave(&sh_mtu2_lock, flags); -	value = sh_mtu2_read(p, TSTR); +	value = sh_mtu2_read(ch, TSTR);  	if (start) -		value |= 1 << cfg->timer_bit; +		value |= 1 << ch->index;  	else -		value &= ~(1 << cfg->timer_bit); +		value &= ~(1 << ch->index); -	sh_mtu2_write(p, TSTR, value); +	sh_mtu2_write(ch, TSTR, value);  	raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);  } -static int sh_mtu2_enable(struct sh_mtu2_priv *p) +static int sh_mtu2_enable(struct sh_mtu2_channel *ch)  { +	unsigned long periodic; +	unsigned long rate;  	int ret; -	pm_runtime_get_sync(&p->pdev->dev); -	dev_pm_syscore_device(&p->pdev->dev, true); +	pm_runtime_get_sync(&ch->mtu->pdev->dev); +	dev_pm_syscore_device(&ch->mtu->pdev->dev, true);  	/* enable clock */ -	ret = clk_enable(p->clk); +	ret = clk_enable(ch->mtu->clk);  	if (ret) { -		dev_err(&p->pdev->dev, "cannot enable clock\n"); +		dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", +			ch->index);  		return ret;  	}  	/* make sure channel is disabled */ -	sh_mtu2_start_stop_ch(p, 0); - -	p->rate = clk_get_rate(p->clk) / 64; -	p->periodic = (p->rate + HZ/2) / HZ; - -	/* "Periodic Counter Operation" */ -	sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */ -	sh_mtu2_write(p, TIOR, 0); -	sh_mtu2_write(p, TGR, p->periodic); -	sh_mtu2_write(p, TCNT, 0); -	sh_mtu2_write(p, TMDR, 0); -	sh_mtu2_write(p, TIER, 0x01); +	sh_mtu2_start_stop_ch(ch, 0); + +	rate = clk_get_rate(ch->mtu->clk) / 64; +	periodic = (rate + HZ/2) / HZ; + +	/* +	 * "Periodic Counter Operation" +	 * Clear on TGRA compare match, divide clock by 64. +	 */ +	sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64); +	sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) | +		      TIOC_IOCL(TIOR_OC_0_CLEAR)); +	sh_mtu2_write(ch, TGR, periodic); +	sh_mtu2_write(ch, TCNT, 0); +	sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL); +	sh_mtu2_write(ch, TIER, TIER_TGIEA);  	/* enable channel */ -	sh_mtu2_start_stop_ch(p, 1); +	sh_mtu2_start_stop_ch(ch, 1);  	return 0;  } -static void sh_mtu2_disable(struct sh_mtu2_priv *p) +static void sh_mtu2_disable(struct sh_mtu2_channel *ch)  {  	/* disable channel */ -	sh_mtu2_start_stop_ch(p, 0); +	sh_mtu2_start_stop_ch(ch, 0);  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable(ch->mtu->clk); -	dev_pm_syscore_device(&p->pdev->dev, false); -	pm_runtime_put(&p->pdev->dev); +	dev_pm_syscore_device(&ch->mtu->pdev->dev, false); +	pm_runtime_put(&ch->mtu->pdev->dev);  }  static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)  { -	struct sh_mtu2_priv *p = dev_id; +	struct sh_mtu2_channel *ch = dev_id;  	/* acknowledge interrupt */ -	sh_mtu2_read(p, TSR); -	sh_mtu2_write(p, TSR, 0xfe); +	sh_mtu2_read(ch, TSR); +	sh_mtu2_write(ch, TSR, ~TSR_TGFA);  	/* notify clockevent layer */ -	p->ced.event_handler(&p->ced); +	ch->ced.event_handler(&ch->ced);  	return IRQ_HANDLED;  } -static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced) +static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)  { -	return container_of(ced, struct sh_mtu2_priv, ced); +	return container_of(ced, struct sh_mtu2_channel, ced);  }  static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,  				    struct clock_event_device *ced)  { -	struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced); +	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);  	int disabled = 0;  	/* deal with old setting first */  	switch (ced->mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		sh_mtu2_disable(p); +		sh_mtu2_disable(ch);  		disabled = 1;  		break;  	default: @@ -202,12 +303,13 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		dev_info(&p->pdev->dev, "used for periodic clock events\n"); -		sh_mtu2_enable(p); +		dev_info(&ch->mtu->pdev->dev, +			 "ch%u: used for periodic clock events\n", ch->index); +		sh_mtu2_enable(ch);  		break;  	case CLOCK_EVT_MODE_UNUSED:  		if (!disabled) -			sh_mtu2_disable(p); +			sh_mtu2_disable(ch);  		break;  	case CLOCK_EVT_MODE_SHUTDOWN:  	default: @@ -217,114 +319,207 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,  static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev); +	pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);  }  static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev); +	pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);  } -static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, -				       char *name, unsigned long rating) +static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, +					const char *name)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced;  	int ret; -	memset(ced, 0, sizeof(*ced)); -  	ced->name = name;  	ced->features = CLOCK_EVT_FEAT_PERIODIC; -	ced->rating = rating; -	ced->cpumask = cpumask_of(0); +	ced->rating = 200; +	ced->cpumask = cpu_possible_mask;  	ced->set_mode = sh_mtu2_clock_event_mode;  	ced->suspend = sh_mtu2_clock_event_suspend;  	ced->resume = sh_mtu2_clock_event_resume; -	dev_info(&p->pdev->dev, "used for clock events\n"); +	dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", +		 ch->index);  	clockevents_register_device(ced); -	ret = setup_irq(p->irqaction.irq, &p->irqaction); +	ret = request_irq(ch->irq, sh_mtu2_interrupt, +			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, +			  dev_name(&ch->mtu->pdev->dev), ch);  	if (ret) { -		dev_err(&p->pdev->dev, "failed to request irq %d\n", -			p->irqaction.irq); +		dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", +			ch->index, ch->irq);  		return;  	}  } -static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name, -			    unsigned long clockevent_rating) +static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name, +			    bool clockevent)  { -	if (clockevent_rating) -		sh_mtu2_register_clockevent(p, name, clockevent_rating); +	if (clockevent) { +		ch->mtu->has_clockevent = true; +		sh_mtu2_register_clockevent(ch, name); +	}  	return 0;  } -static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) +static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, +				 struct sh_mtu2_device *mtu)  { -	struct sh_timer_config *cfg = pdev->dev.platform_data; -	struct resource *res; -	int irq, ret; -	ret = -ENXIO; +	static const unsigned int channel_offsets[] = { +		0x300, 0x380, 0x000, +	}; +	bool clockevent; + +	ch->mtu = mtu; + +	if (mtu->legacy) { +		struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; + +		clockevent = cfg->clockevent_rating != 0; + +		ch->irq = platform_get_irq(mtu->pdev, 0); +		ch->base = mtu->mapbase - cfg->channel_offset; +		ch->index = cfg->timer_bit; +	} else { +		char name[6]; -	memset(p, 0, sizeof(*p)); -	p->pdev = pdev; +		clockevent = true; -	if (!cfg) { -		dev_err(&p->pdev->dev, "missing platform data\n"); -		goto err0; +		sprintf(name, "tgi%ua", index); +		ch->irq = platform_get_irq_byname(mtu->pdev, name); +		ch->base = mtu->mapbase + channel_offsets[index]; +		ch->index = index;  	} -	platform_set_drvdata(pdev, p); +	if (ch->irq < 0) { +		/* Skip channels with no declared interrupt. */ +		if (!mtu->legacy) +			return 0; + +		dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n", +			ch->index); +		return ch->irq; +	} + +	return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent); +} + +static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) +{ +	struct resource *res; -	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); +	res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);  	if (!res) { -		dev_err(&p->pdev->dev, "failed to get I/O memory\n"); -		goto err0; +		dev_err(&mtu->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO;  	} -	irq = platform_get_irq(p->pdev, 0); -	if (irq < 0) { -		dev_err(&p->pdev->dev, "failed to get irq\n"); -		goto err0; +	mtu->mapbase = ioremap_nocache(res->start, resource_size(res)); +	if (mtu->mapbase == NULL) +		return -ENXIO; + +	/* +	 * In legacy platform device configuration (with one device per channel) +	 * the resource points to the channel base address. +	 */ +	if (mtu->legacy) { +		struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; +		mtu->mapbase += cfg->channel_offset;  	} -	/* map memory, let mapbase point to our channel */ -	p->mapbase = ioremap_nocache(res->start, resource_size(res)); -	if (p->mapbase == NULL) { -		dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); -		goto err0; +	return 0; +} + +static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu) +{ +	if (mtu->legacy) { +		struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; +		mtu->mapbase -= cfg->channel_offset;  	} -	/* setup data for setup_irq() (too early for request_irq()) */ -	p->irqaction.name = dev_name(&p->pdev->dev); -	p->irqaction.handler = sh_mtu2_interrupt; -	p->irqaction.dev_id = p; -	p->irqaction.irq = irq; -	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ -			     IRQF_IRQPOLL  | IRQF_NOBALANCING; - -	/* get hold of clock */ -	p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); -	if (IS_ERR(p->clk)) { -		dev_err(&p->pdev->dev, "cannot get clock\n"); -		ret = PTR_ERR(p->clk); -		goto err1; +	iounmap(mtu->mapbase); +} + +static int sh_mtu2_setup(struct sh_mtu2_device *mtu, +			 struct platform_device *pdev) +{ +	struct sh_timer_config *cfg = pdev->dev.platform_data; +	const struct platform_device_id *id = pdev->id_entry; +	unsigned int i; +	int ret; + +	mtu->pdev = pdev; +	mtu->legacy = id->driver_data; + +	if (mtu->legacy && !cfg) { +		dev_err(&mtu->pdev->dev, "missing platform data\n"); +		return -ENXIO; +	} + +	/* Get hold of clock. */ +	mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck"); +	if (IS_ERR(mtu->clk)) { +		dev_err(&mtu->pdev->dev, "cannot get clock\n"); +		return PTR_ERR(mtu->clk); +	} + +	ret = clk_prepare(mtu->clk); +	if (ret < 0) +		goto err_clk_put; + +	/* Map the memory resource. */ +	ret = sh_mtu2_map_memory(mtu); +	if (ret < 0) { +		dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n"); +		goto err_clk_unprepare; +	} + +	/* Allocate and setup the channels. */ +	if (mtu->legacy) +		mtu->num_channels = 1; +	else +		mtu->num_channels = 3; + +	mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, +				GFP_KERNEL); +	if (mtu->channels == NULL) { +		ret = -ENOMEM; +		goto err_unmap;  	} -	return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), -				cfg->clockevent_rating); - err1: -	iounmap(p->mapbase); - err0: +	if (mtu->legacy) { +		ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu); +		if (ret < 0) +			goto err_unmap; +	} else { +		for (i = 0; i < mtu->num_channels; ++i) { +			ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu); +			if (ret < 0) +				goto err_unmap; +		} +	} + +	platform_set_drvdata(pdev, mtu); + +	return 0; + +err_unmap: +	kfree(mtu->channels); +	sh_mtu2_unmap_memory(mtu); +err_clk_unprepare: +	clk_unprepare(mtu->clk); +err_clk_put: +	clk_put(mtu->clk);  	return ret;  }  static int sh_mtu2_probe(struct platform_device *pdev)  { -	struct sh_mtu2_priv *p = platform_get_drvdata(pdev); -	struct sh_timer_config *cfg = pdev->dev.platform_data; +	struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);  	int ret;  	if (!is_early_platform_device(pdev)) { @@ -332,21 +527,18 @@ static int sh_mtu2_probe(struct platform_device *pdev)  		pm_runtime_enable(&pdev->dev);  	} -	if (p) { +	if (mtu) {  		dev_info(&pdev->dev, "kept as earlytimer\n");  		goto out;  	} -	p = kmalloc(sizeof(*p), GFP_KERNEL); -	if (p == NULL) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	mtu = kzalloc(sizeof(*mtu), GFP_KERNEL); +	if (mtu == NULL)  		return -ENOMEM; -	} -	ret = sh_mtu2_setup(p, pdev); +	ret = sh_mtu2_setup(mtu, pdev);  	if (ret) { -		kfree(p); -		platform_set_drvdata(pdev, NULL); +		kfree(mtu);  		pm_runtime_idle(&pdev->dev);  		return ret;  	} @@ -354,7 +546,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)  		return 0;   out: -	if (cfg->clockevent_rating) +	if (mtu->has_clockevent)  		pm_runtime_irq_safe(&pdev->dev);  	else  		pm_runtime_idle(&pdev->dev); @@ -367,12 +559,20 @@ static int sh_mtu2_remove(struct platform_device *pdev)  	return -EBUSY; /* cannot unregister clockevent */  } +static const struct platform_device_id sh_mtu2_id_table[] = { +	{ "sh_mtu2", 1 }, +	{ "sh-mtu2", 0 }, +	{ }, +}; +MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); +  static struct platform_driver sh_mtu2_device_driver = {  	.probe		= sh_mtu2_probe,  	.remove		= sh_mtu2_remove,  	.driver		= {  		.name	= "sh_mtu2", -	} +	}, +	.id_table	= sh_mtu2_id_table,  };  static int __init sh_mtu2_init(void) diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 78b8dae4962..6bd17a8f3dd 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -11,35 +11,41 @@   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/delay.h> +#include <linux/err.h>  #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h>  #include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/delay.h>  #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h>  #include <linux/irq.h> -#include <linux/err.h> -#include <linux/clocksource.h> -#include <linux/clockchips.h> -#include <linux/sh_timer.h> -#include <linux/slab.h>  #include <linux/module.h> +#include <linux/platform_device.h>  #include <linux/pm_domain.h>  #include <linux/pm_runtime.h> +#include <linux/sh_timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +enum sh_tmu_model { +	SH_TMU_LEGACY, +	SH_TMU, +	SH_TMU_SH3, +}; + +struct sh_tmu_device; + +struct sh_tmu_channel { +	struct sh_tmu_device *tmu; +	unsigned int index; + +	void __iomem *base; +	int irq; -struct sh_tmu_priv { -	void __iomem *mapbase; -	struct clk *clk; -	struct irqaction irqaction; -	struct platform_device *pdev;  	unsigned long rate;  	unsigned long periodic;  	struct clock_event_device ced; @@ -48,6 +54,21 @@ struct sh_tmu_priv {  	unsigned int enable_count;  }; +struct sh_tmu_device { +	struct platform_device *pdev; + +	void __iomem *mapbase; +	struct clk *clk; + +	enum sh_tmu_model model; + +	struct sh_tmu_channel *channels; +	unsigned int num_channels; + +	bool has_clockevent; +	bool has_clocksource; +}; +  static DEFINE_RAW_SPINLOCK(sh_tmu_lock);  #define TSTR -1 /* shared register */ @@ -55,189 +76,208 @@ static DEFINE_RAW_SPINLOCK(sh_tmu_lock);  #define TCNT 1 /* channel register */  #define TCR 2 /* channel register */ -static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr) +#define TCR_UNF			(1 << 8) +#define TCR_UNIE		(1 << 5) +#define TCR_TPSC_CLK4		(0 << 0) +#define TCR_TPSC_CLK16		(1 << 0) +#define TCR_TPSC_CLK64		(2 << 0) +#define TCR_TPSC_CLK256		(3 << 0) +#define TCR_TPSC_CLK1024	(4 << 0) +#define TCR_TPSC_MASK		(7 << 0) + +static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs; -	if (reg_nr == TSTR) -		return ioread8(base - cfg->channel_offset); +	if (reg_nr == TSTR) { +		switch (ch->tmu->model) { +		case SH_TMU_LEGACY: +			return ioread8(ch->tmu->mapbase); +		case SH_TMU_SH3: +			return ioread8(ch->tmu->mapbase + 2); +		case SH_TMU: +			return ioread8(ch->tmu->mapbase + 4); +		} +	}  	offs = reg_nr << 2;  	if (reg_nr == TCR) -		return ioread16(base + offs); +		return ioread16(ch->base + offs);  	else -		return ioread32(base + offs); +		return ioread32(ch->base + offs);  } -static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr, +static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,  				unsigned long value)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs;  	if (reg_nr == TSTR) { -		iowrite8(value, base - cfg->channel_offset); -		return; +		switch (ch->tmu->model) { +		case SH_TMU_LEGACY: +			return iowrite8(value, ch->tmu->mapbase); +		case SH_TMU_SH3: +			return iowrite8(value, ch->tmu->mapbase + 2); +		case SH_TMU: +			return iowrite8(value, ch->tmu->mapbase + 4); +		}  	}  	offs = reg_nr << 2;  	if (reg_nr == TCR) -		iowrite16(value, base + offs); +		iowrite16(value, ch->base + offs);  	else -		iowrite32(value, base + offs); +		iowrite32(value, ch->base + offs);  } -static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) +static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data;  	unsigned long flags, value;  	/* start stop register shared by multiple timer channels */  	raw_spin_lock_irqsave(&sh_tmu_lock, flags); -	value = sh_tmu_read(p, TSTR); +	value = sh_tmu_read(ch, TSTR);  	if (start) -		value |= 1 << cfg->timer_bit; +		value |= 1 << ch->index;  	else -		value &= ~(1 << cfg->timer_bit); +		value &= ~(1 << ch->index); -	sh_tmu_write(p, TSTR, value); +	sh_tmu_write(ch, TSTR, value);  	raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);  } -static int __sh_tmu_enable(struct sh_tmu_priv *p) +static int __sh_tmu_enable(struct sh_tmu_channel *ch)  {  	int ret;  	/* enable clock */ -	ret = clk_enable(p->clk); +	ret = clk_enable(ch->tmu->clk);  	if (ret) { -		dev_err(&p->pdev->dev, "cannot enable clock\n"); +		dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n", +			ch->index);  		return ret;  	}  	/* make sure channel is disabled */ -	sh_tmu_start_stop_ch(p, 0); +	sh_tmu_start_stop_ch(ch, 0);  	/* maximum timeout */ -	sh_tmu_write(p, TCOR, 0xffffffff); -	sh_tmu_write(p, TCNT, 0xffffffff); +	sh_tmu_write(ch, TCOR, 0xffffffff); +	sh_tmu_write(ch, TCNT, 0xffffffff);  	/* configure channel to parent clock / 4, irq off */ -	p->rate = clk_get_rate(p->clk) / 4; -	sh_tmu_write(p, TCR, 0x0000); +	ch->rate = clk_get_rate(ch->tmu->clk) / 4; +	sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);  	/* enable channel */ -	sh_tmu_start_stop_ch(p, 1); +	sh_tmu_start_stop_ch(ch, 1);  	return 0;  } -static int sh_tmu_enable(struct sh_tmu_priv *p) +static int sh_tmu_enable(struct sh_tmu_channel *ch)  { -	if (p->enable_count++ > 0) +	if (ch->enable_count++ > 0)  		return 0; -	pm_runtime_get_sync(&p->pdev->dev); -	dev_pm_syscore_device(&p->pdev->dev, true); +	pm_runtime_get_sync(&ch->tmu->pdev->dev); +	dev_pm_syscore_device(&ch->tmu->pdev->dev, true); -	return __sh_tmu_enable(p); +	return __sh_tmu_enable(ch);  } -static void __sh_tmu_disable(struct sh_tmu_priv *p) +static void __sh_tmu_disable(struct sh_tmu_channel *ch)  {  	/* disable channel */ -	sh_tmu_start_stop_ch(p, 0); +	sh_tmu_start_stop_ch(ch, 0);  	/* disable interrupts in TMU block */ -	sh_tmu_write(p, TCR, 0x0000); +	sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable(ch->tmu->clk);  } -static void sh_tmu_disable(struct sh_tmu_priv *p) +static void sh_tmu_disable(struct sh_tmu_channel *ch)  { -	if (WARN_ON(p->enable_count == 0)) +	if (WARN_ON(ch->enable_count == 0))  		return; -	if (--p->enable_count > 0) +	if (--ch->enable_count > 0)  		return; -	__sh_tmu_disable(p); +	__sh_tmu_disable(ch); -	dev_pm_syscore_device(&p->pdev->dev, false); -	pm_runtime_put(&p->pdev->dev); +	dev_pm_syscore_device(&ch->tmu->pdev->dev, false); +	pm_runtime_put(&ch->tmu->pdev->dev);  } -static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, +static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,  			    int periodic)  {  	/* stop timer */ -	sh_tmu_start_stop_ch(p, 0); +	sh_tmu_start_stop_ch(ch, 0);  	/* acknowledge interrupt */ -	sh_tmu_read(p, TCR); +	sh_tmu_read(ch, TCR);  	/* enable interrupt */ -	sh_tmu_write(p, TCR, 0x0020); +	sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);  	/* reload delta value in case of periodic timer */  	if (periodic) -		sh_tmu_write(p, TCOR, delta); +		sh_tmu_write(ch, TCOR, delta);  	else -		sh_tmu_write(p, TCOR, 0xffffffff); +		sh_tmu_write(ch, TCOR, 0xffffffff); -	sh_tmu_write(p, TCNT, delta); +	sh_tmu_write(ch, TCNT, delta);  	/* start timer */ -	sh_tmu_start_stop_ch(p, 1); +	sh_tmu_start_stop_ch(ch, 1);  }  static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)  { -	struct sh_tmu_priv *p = dev_id; +	struct sh_tmu_channel *ch = dev_id;  	/* disable or acknowledge interrupt */ -	if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) -		sh_tmu_write(p, TCR, 0x0000); +	if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) +		sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);  	else -		sh_tmu_write(p, TCR, 0x0020); +		sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);  	/* notify clockevent layer */ -	p->ced.event_handler(&p->ced); +	ch->ced.event_handler(&ch->ced);  	return IRQ_HANDLED;  } -static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs) +static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)  { -	return container_of(cs, struct sh_tmu_priv, cs); +	return container_of(cs, struct sh_tmu_channel, cs);  }  static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); -	return sh_tmu_read(p, TCNT) ^ 0xffffffff; +	return sh_tmu_read(ch, TCNT) ^ 0xffffffff;  }  static int sh_tmu_clocksource_enable(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);  	int ret; -	if (WARN_ON(p->cs_enabled)) +	if (WARN_ON(ch->cs_enabled))  		return 0; -	ret = sh_tmu_enable(p); +	ret = sh_tmu_enable(ch);  	if (!ret) { -		__clocksource_updatefreq_hz(cs, p->rate); -		p->cs_enabled = true; +		__clocksource_updatefreq_hz(cs, ch->rate); +		ch->cs_enabled = true;  	}  	return ret; @@ -245,48 +285,48 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs)  static void sh_tmu_clocksource_disable(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); -	if (WARN_ON(!p->cs_enabled)) +	if (WARN_ON(!ch->cs_enabled))  		return; -	sh_tmu_disable(p); -	p->cs_enabled = false; +	sh_tmu_disable(ch); +	ch->cs_enabled = false;  }  static void sh_tmu_clocksource_suspend(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); -	if (!p->cs_enabled) +	if (!ch->cs_enabled)  		return; -	if (--p->enable_count == 0) { -		__sh_tmu_disable(p); -		pm_genpd_syscore_poweroff(&p->pdev->dev); +	if (--ch->enable_count == 0) { +		__sh_tmu_disable(ch); +		pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);  	}  }  static void sh_tmu_clocksource_resume(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); -	if (!p->cs_enabled) +	if (!ch->cs_enabled)  		return; -	if (p->enable_count++ == 0) { -		pm_genpd_syscore_poweron(&p->pdev->dev); -		__sh_tmu_enable(p); +	if (ch->enable_count++ == 0) { +		pm_genpd_syscore_poweron(&ch->tmu->pdev->dev); +		__sh_tmu_enable(ch);  	}  } -static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, -				       char *name, unsigned long rating) +static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, +				       const char *name)  { -	struct clocksource *cs = &p->cs; +	struct clocksource *cs = &ch->cs;  	cs->name = name; -	cs->rating = rating; +	cs->rating = 200;  	cs->read = sh_tmu_clocksource_read;  	cs->enable = sh_tmu_clocksource_enable;  	cs->disable = sh_tmu_clocksource_disable; @@ -295,43 +335,44 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,  	cs->mask = CLOCKSOURCE_MASK(32);  	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; -	dev_info(&p->pdev->dev, "used as clock source\n"); +	dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", +		 ch->index);  	/* Register with dummy 1 Hz value, gets updated in ->enable() */  	clocksource_register_hz(cs, 1);  	return 0;  } -static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced) +static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)  { -	return container_of(ced, struct sh_tmu_priv, ced); +	return container_of(ced, struct sh_tmu_channel, ced);  } -static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) +static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced; -	sh_tmu_enable(p); +	sh_tmu_enable(ch); -	clockevents_config(ced, p->rate); +	clockevents_config(ced, ch->rate);  	if (periodic) { -		p->periodic = (p->rate + HZ/2) / HZ; -		sh_tmu_set_next(p, p->periodic, 1); +		ch->periodic = (ch->rate + HZ/2) / HZ; +		sh_tmu_set_next(ch, ch->periodic, 1);  	}  }  static void sh_tmu_clock_event_mode(enum clock_event_mode mode,  				    struct clock_event_device *ced)  { -	struct sh_tmu_priv *p = ced_to_sh_tmu(ced); +	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);  	int disabled = 0;  	/* deal with old setting first */  	switch (ced->mode) {  	case CLOCK_EVT_MODE_PERIODIC:  	case CLOCK_EVT_MODE_ONESHOT: -		sh_tmu_disable(p); +		sh_tmu_disable(ch);  		disabled = 1;  		break;  	default: @@ -340,16 +381,18 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		dev_info(&p->pdev->dev, "used for periodic clock events\n"); -		sh_tmu_clock_event_start(p, 1); +		dev_info(&ch->tmu->pdev->dev, +			 "ch%u: used for periodic clock events\n", ch->index); +		sh_tmu_clock_event_start(ch, 1);  		break;  	case CLOCK_EVT_MODE_ONESHOT: -		dev_info(&p->pdev->dev, "used for oneshot clock events\n"); -		sh_tmu_clock_event_start(p, 0); +		dev_info(&ch->tmu->pdev->dev, +			 "ch%u: used for oneshot clock events\n", ch->index); +		sh_tmu_clock_event_start(ch, 0);  		break;  	case CLOCK_EVT_MODE_UNUSED:  		if (!disabled) -			sh_tmu_disable(p); +			sh_tmu_disable(ch);  		break;  	case CLOCK_EVT_MODE_SHUTDOWN:  	default: @@ -360,134 +403,234 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,  static int sh_tmu_clock_event_next(unsigned long delta,  				   struct clock_event_device *ced)  { -	struct sh_tmu_priv *p = ced_to_sh_tmu(ced); +	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);  	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);  	/* program new delta value */ -	sh_tmu_set_next(p, delta, 0); +	sh_tmu_set_next(ch, delta, 0);  	return 0;  }  static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev); +	pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);  }  static void sh_tmu_clock_event_resume(struct clock_event_device *ced)  { -	pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev); +	pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);  } -static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, -				       char *name, unsigned long rating) +static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, +				       const char *name)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced;  	int ret; -	memset(ced, 0, sizeof(*ced)); -  	ced->name = name;  	ced->features = CLOCK_EVT_FEAT_PERIODIC;  	ced->features |= CLOCK_EVT_FEAT_ONESHOT; -	ced->rating = rating; +	ced->rating = 200;  	ced->cpumask = cpumask_of(0);  	ced->set_next_event = sh_tmu_clock_event_next;  	ced->set_mode = sh_tmu_clock_event_mode;  	ced->suspend = sh_tmu_clock_event_suspend;  	ced->resume = sh_tmu_clock_event_resume; -	dev_info(&p->pdev->dev, "used for clock events\n"); +	dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", +		 ch->index);  	clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); -	ret = setup_irq(p->irqaction.irq, &p->irqaction); +	ret = request_irq(ch->irq, sh_tmu_interrupt, +			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, +			  dev_name(&ch->tmu->pdev->dev), ch);  	if (ret) { -		dev_err(&p->pdev->dev, "failed to request irq %d\n", -			p->irqaction.irq); +		dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", +			ch->index, ch->irq);  		return;  	}  } -static int sh_tmu_register(struct sh_tmu_priv *p, char *name, -		    unsigned long clockevent_rating, -		    unsigned long clocksource_rating) +static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, +			   bool clockevent, bool clocksource)  { -	if (clockevent_rating) -		sh_tmu_register_clockevent(p, name, clockevent_rating); -	else if (clocksource_rating) -		sh_tmu_register_clocksource(p, name, clocksource_rating); +	if (clockevent) { +		ch->tmu->has_clockevent = true; +		sh_tmu_register_clockevent(ch, name); +	} else if (clocksource) { +		ch->tmu->has_clocksource = true; +		sh_tmu_register_clocksource(ch, name); +	}  	return 0;  } -static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) +static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, +				bool clockevent, bool clocksource, +				struct sh_tmu_device *tmu) +{ +	/* Skip unused channels. */ +	if (!clockevent && !clocksource) +		return 0; + +	ch->tmu = tmu; + +	if (tmu->model == SH_TMU_LEGACY) { +		struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; + +		/* +		 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps +		 * channel registers blocks at base + 2 + 12 * index, while all +		 * other variants map them at base + 4 + 12 * index. We can +		 * compute the index by just dividing by 12, the 2 bytes or 4 +		 * bytes offset being hidden by the integer division. +		 */ +		ch->index = cfg->channel_offset / 12; +		ch->base = tmu->mapbase + cfg->channel_offset; +	} else { +		ch->index = index; + +		if (tmu->model == SH_TMU_SH3) +			ch->base = tmu->mapbase + 4 + ch->index * 12; +		else +			ch->base = tmu->mapbase + 8 + ch->index * 12; +	} + +	ch->irq = platform_get_irq(tmu->pdev, index); +	if (ch->irq < 0) { +		dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n", +			ch->index); +		return ch->irq; +	} + +	ch->cs_enabled = false; +	ch->enable_count = 0; + +	return sh_tmu_register(ch, dev_name(&tmu->pdev->dev), +			       clockevent, clocksource); +} + +static int sh_tmu_map_memory(struct sh_tmu_device *tmu)  { -	struct sh_timer_config *cfg = pdev->dev.platform_data;  	struct resource *res; -	int irq, ret; -	ret = -ENXIO; -	memset(p, 0, sizeof(*p)); -	p->pdev = pdev; +	res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0); +	if (!res) { +		dev_err(&tmu->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO; +	} + +	tmu->mapbase = ioremap_nocache(res->start, resource_size(res)); +	if (tmu->mapbase == NULL) +		return -ENXIO; + +	/* +	 * In legacy platform device configuration (with one device per channel) +	 * the resource points to the channel base address. +	 */ +	if (tmu->model == SH_TMU_LEGACY) { +		struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; +		tmu->mapbase -= cfg->channel_offset; +	} + +	return 0; +} + +static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) +{ +	if (tmu->model == SH_TMU_LEGACY) { +		struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; +		tmu->mapbase += cfg->channel_offset; +	} + +	iounmap(tmu->mapbase); +} + +static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) +{ +	struct sh_timer_config *cfg = pdev->dev.platform_data; +	const struct platform_device_id *id = pdev->id_entry; +	unsigned int i; +	int ret;  	if (!cfg) { -		dev_err(&p->pdev->dev, "missing platform data\n"); -		goto err0; +		dev_err(&tmu->pdev->dev, "missing platform data\n"); +		return -ENXIO;  	} -	platform_set_drvdata(pdev, p); +	tmu->pdev = pdev; +	tmu->model = id->driver_data; -	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); -	if (!res) { -		dev_err(&p->pdev->dev, "failed to get I/O memory\n"); -		goto err0; +	/* Get hold of clock. */ +	tmu->clk = clk_get(&tmu->pdev->dev, +			   tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck"); +	if (IS_ERR(tmu->clk)) { +		dev_err(&tmu->pdev->dev, "cannot get clock\n"); +		return PTR_ERR(tmu->clk);  	} -	irq = platform_get_irq(p->pdev, 0); -	if (irq < 0) { -		dev_err(&p->pdev->dev, "failed to get irq\n"); -		goto err0; +	ret = clk_prepare(tmu->clk); +	if (ret < 0) +		goto err_clk_put; + +	/* Map the memory resource. */ +	ret = sh_tmu_map_memory(tmu); +	if (ret < 0) { +		dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n"); +		goto err_clk_unprepare;  	} -	/* map memory, let mapbase point to our channel */ -	p->mapbase = ioremap_nocache(res->start, resource_size(res)); -	if (p->mapbase == NULL) { -		dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); -		goto err0; +	/* Allocate and setup the channels. */ +	if (tmu->model == SH_TMU_LEGACY) +		tmu->num_channels = 1; +	else +		tmu->num_channels = hweight8(cfg->channels_mask); + +	tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, +				GFP_KERNEL); +	if (tmu->channels == NULL) { +		ret = -ENOMEM; +		goto err_unmap;  	} -	/* setup data for setup_irq() (too early for request_irq()) */ -	p->irqaction.name = dev_name(&p->pdev->dev); -	p->irqaction.handler = sh_tmu_interrupt; -	p->irqaction.dev_id = p; -	p->irqaction.irq = irq; -	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ -			     IRQF_IRQPOLL  | IRQF_NOBALANCING; - -	/* get hold of clock */ -	p->clk = clk_get(&p->pdev->dev, "tmu_fck"); -	if (IS_ERR(p->clk)) { -		dev_err(&p->pdev->dev, "cannot get clock\n"); -		ret = PTR_ERR(p->clk); -		goto err1; +	if (tmu->model == SH_TMU_LEGACY) { +		ret = sh_tmu_channel_setup(&tmu->channels[0], 0, +					   cfg->clockevent_rating != 0, +					   cfg->clocksource_rating != 0, tmu); +		if (ret < 0) +			goto err_unmap; +	} else { +		/* +		 * Use the first channel as a clock event device and the second +		 * channel as a clock source. +		 */ +		for (i = 0; i < tmu->num_channels; ++i) { +			ret = sh_tmu_channel_setup(&tmu->channels[i], i, +						   i == 0, i == 1, tmu); +			if (ret < 0) +				goto err_unmap; +		}  	} -	p->cs_enabled = false; -	p->enable_count = 0; - -	return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), -			       cfg->clockevent_rating, -			       cfg->clocksource_rating); - err1: -	iounmap(p->mapbase); - err0: + +	platform_set_drvdata(pdev, tmu); + +	return 0; + +err_unmap: +	kfree(tmu->channels); +	sh_tmu_unmap_memory(tmu); +err_clk_unprepare: +	clk_unprepare(tmu->clk); +err_clk_put: +	clk_put(tmu->clk);  	return ret;  }  static int sh_tmu_probe(struct platform_device *pdev)  { -	struct sh_tmu_priv *p = platform_get_drvdata(pdev); -	struct sh_timer_config *cfg = pdev->dev.platform_data; +	struct sh_tmu_device *tmu = platform_get_drvdata(pdev);  	int ret;  	if (!is_early_platform_device(pdev)) { @@ -495,21 +638,18 @@ static int sh_tmu_probe(struct platform_device *pdev)  		pm_runtime_enable(&pdev->dev);  	} -	if (p) { +	if (tmu) {  		dev_info(&pdev->dev, "kept as earlytimer\n");  		goto out;  	} -	p = kmalloc(sizeof(*p), GFP_KERNEL); -	if (p == NULL) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	tmu = kzalloc(sizeof(*tmu), GFP_KERNEL); +	if (tmu == NULL)  		return -ENOMEM; -	} -	ret = sh_tmu_setup(p, pdev); +	ret = sh_tmu_setup(tmu, pdev);  	if (ret) { -		kfree(p); -		platform_set_drvdata(pdev, NULL); +		kfree(tmu);  		pm_runtime_idle(&pdev->dev);  		return ret;  	} @@ -517,7 +657,7 @@ static int sh_tmu_probe(struct platform_device *pdev)  		return 0;   out: -	if (cfg->clockevent_rating || cfg->clocksource_rating) +	if (tmu->has_clockevent || tmu->has_clocksource)  		pm_runtime_irq_safe(&pdev->dev);  	else  		pm_runtime_idle(&pdev->dev); @@ -530,12 +670,21 @@ static int sh_tmu_remove(struct platform_device *pdev)  	return -EBUSY; /* cannot unregister clockevent and clocksource */  } +static const struct platform_device_id sh_tmu_id_table[] = { +	{ "sh_tmu", SH_TMU_LEGACY }, +	{ "sh-tmu", SH_TMU }, +	{ "sh-tmu-sh3", SH_TMU_SH3 }, +	{ } +}; +MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); +  static struct platform_driver sh_tmu_device_driver = {  	.probe		= sh_tmu_probe,  	.remove		= sh_tmu_remove,  	.driver		= {  		.name	= "sh_tmu", -	} +	}, +	.id_table	= sh_tmu_id_table,  };  static int __init sh_tmu_init(void) diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index 8ead0258740..efb17c3ee12 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -37,6 +37,8 @@  #define TIMER_INTVAL_REG(val)	(0x10 * (val) + 0x14)  #define TIMER_CNTVAL_REG(val)	(0x10 * (val) + 0x18) +#define TIMER_SYNC_TICKS	3 +  static void __iomem *timer_base;  static u32 ticks_per_jiffy; @@ -50,7 +52,7 @@ static void sun4i_clkevt_sync(void)  {  	u32 old = readl(timer_base + TIMER_CNTVAL_REG(1)); -	while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < 3) +	while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < TIMER_SYNC_TICKS)  		cpu_relax();  } @@ -104,7 +106,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,  				   struct clock_event_device *unused)  {  	sun4i_clkevt_time_stop(0); -	sun4i_clkevt_time_setup(0, evt); +	sun4i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS);  	sun4i_clkevt_time_start(0, false);  	return 0; @@ -112,7 +114,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,  static struct clock_event_device sun4i_clockevent = {  	.name = "sun4i_tick", -	.rating = 300, +	.rating = 350,  	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,  	.set_mode = sun4i_clkevt_mode,  	.set_next_event = sun4i_clkevt_next_event, @@ -131,12 +133,12 @@ static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)  static struct irqaction sun4i_timer_irq = {  	.name = "sun4i_timer0", -	.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, +	.flags = IRQF_TIMER | IRQF_IRQPOLL,  	.handler = sun4i_timer_interrupt,  	.dev_id = &sun4i_clockevent,  }; -static u32 sun4i_timer_sched_read(void) +static u64 notrace sun4i_timer_sched_read(void)  {  	return ~readl(timer_base + TIMER_CNTVAL_REG(1));  } @@ -168,15 +170,18 @@ static void __init sun4i_timer_init(struct device_node *node)  	       TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),  	       timer_base + TIMER_CTL_REG(1)); -	setup_sched_clock(sun4i_timer_sched_read, 32, rate); +	sched_clock_register(sun4i_timer_sched_read, 32, rate);  	clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, -			      rate, 300, 32, clocksource_mmio_readl_down); +			      rate, 350, 32, clocksource_mmio_readl_down);  	ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);  	writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),  	       timer_base + TIMER_CTL_REG(0)); +	/* Make sure timer is stopped before playing with interrupts */ +	sun4i_clkevt_time_stop(0); +  	ret = setup_irq(irq, &sun4i_timer_irq);  	if (ret)  		pr_warn("failed to setup irq %d\n", irq); @@ -185,10 +190,11 @@ static void __init sun4i_timer_init(struct device_node *node)  	val = readl(timer_base + TIMER_IRQ_EN_REG);  	writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); -	sun4i_clockevent.cpumask = cpumask_of(0); +	sun4i_clockevent.cpumask = cpu_possible_mask; +	sun4i_clockevent.irq = irq; -	clockevents_config_and_register(&sun4i_clockevent, rate, 0x1, -					0xffffffff); +	clockevents_config_and_register(&sun4i_clockevent, rate, +					TIMER_SYNC_TICKS, 0xffffffff);  } -CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer", +CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",  		       sun4i_timer_init); diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 8a6187225dd..a8d7ea14f18 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -180,15 +180,22 @@ static irqreturn_t ch2_irq(int irq, void *handle)  static struct irqaction tc_irqaction = {  	.name		= "tc_clkevt", -	.flags		= IRQF_TIMER | IRQF_DISABLED, +	.flags		= IRQF_TIMER,  	.handler	= ch2_irq,  }; -static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) +static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)  { +	int ret;  	struct clk *t2_clk = tc->clk[2];  	int irq = tc->irq[2]; +	/* try to enable t2 clk to avoid future errors in mode change */ +	ret = clk_prepare_enable(t2_clk); +	if (ret) +		return ret; +	clk_disable(t2_clk); +  	clkevt.regs = tc->regs;  	clkevt.clk = t2_clk;  	tc_irqaction.dev_id = &clkevt; @@ -197,16 +204,21 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)  	clkevt.clkevt.cpumask = cpumask_of(0); +	ret = setup_irq(irq, &tc_irqaction); +	if (ret) +		return ret; +  	clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); -	setup_irq(irq, &tc_irqaction); +	return ret;  }  #else /* !CONFIG_GENERIC_CLOCKEVENTS */ -static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) +static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)  {  	/* NOTHING */ +	return 0;  }  #endif @@ -265,6 +277,7 @@ static int __init tcb_clksrc_init(void)  	int best_divisor_idx = -1;  	int clk32k_divisor_idx = -1;  	int i; +	int ret;  	tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name);  	if (!tc) { @@ -275,7 +288,11 @@ static int __init tcb_clksrc_init(void)  	pdev = tc->pdev;  	t0_clk = tc->clk[0]; -	clk_enable(t0_clk); +	ret = clk_prepare_enable(t0_clk); +	if (ret) { +		pr_debug("can't enable T0 clk\n"); +		goto err_free_tc; +	}  	/* How fast will we be counting?  Pick something over 5 MHz.  */  	rate = (u32) clk_get_rate(t0_clk); @@ -313,17 +330,39 @@ static int __init tcb_clksrc_init(void)  		/* tclib will give us three clocks no matter what the  		 * underlying platform supports.  		 */ -		clk_enable(tc->clk[1]); +		ret = clk_prepare_enable(tc->clk[1]); +		if (ret) { +			pr_debug("can't enable T1 clk\n"); +			goto err_disable_t0; +		}  		/* setup both channel 0 & 1 */  		tcb_setup_dual_chan(tc, best_divisor_idx);  	}  	/* and away we go! */ -	clocksource_register_hz(&clksrc, divided_rate); +	ret = clocksource_register_hz(&clksrc, divided_rate); +	if (ret) +		goto err_disable_t1;  	/* channel 2:  periodic and oneshot timer support */ -	setup_clkevents(tc, clk32k_divisor_idx); +	ret = setup_clkevents(tc, clk32k_divisor_idx); +	if (ret) +		goto err_unregister_clksrc;  	return 0; + +err_unregister_clksrc: +	clocksource_unregister(&clksrc); + +err_disable_t1: +	if (!tc->tcb_config || tc->tcb_config->counter_width != 32) +		clk_disable_unprepare(tc->clk[1]); + +err_disable_t0: +	clk_disable_unprepare(t0_clk); + +err_free_tc: +	atmel_tc_free(tc); +	return ret;  }  arch_initcall(tcb_clksrc_init); diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c index 93961703b88..d1869f02051 100644 --- a/drivers/clocksource/tegra20_timer.c +++ b/drivers/clocksource/tegra20_timer.c @@ -98,7 +98,7 @@ static struct clock_event_device tegra_clockevent = {  	.set_mode	= tegra_timer_set_mode,  }; -static u32 notrace tegra_read_sched_clock(void) +static u64 notrace tegra_read_sched_clock(void)  {  	return timer_readl(TIMERUS_CNTR_1US);  } @@ -149,7 +149,7 @@ static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)  static struct irqaction tegra_timer_irq = {  	.name		= "timer0", -	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH, +	.flags		= IRQF_TIMER | IRQF_TRIGGER_HIGH,  	.handler	= tegra_timer_interrupt,  	.dev_id		= &tegra_clockevent,  }; @@ -181,8 +181,6 @@ static void __init tegra20_init_timer(struct device_node *np)  		rate = clk_get_rate(clk);  	} -	of_node_put(np); -  	switch (rate) {  	case 12000000:  		timer_writel(0x000b, TIMERUS_USEC_CFG); @@ -200,7 +198,7 @@ static void __init tegra20_init_timer(struct device_node *np)  		WARN(1, "Unknown clock rate");  	} -	setup_sched_clock(tegra_read_sched_clock, 32, 1000000); +	sched_clock_register(tegra_read_sched_clock, 32, 1000000);  	if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,  		"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) { @@ -241,8 +239,6 @@ static void __init tegra20_init_rtc(struct device_node *np)  	else  		clk_prepare_enable(clk); -	of_node_put(np); -  	register_persistent_clock(NULL, tegra_read_persistent_clock);  }  CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc); diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 0198504ef6b..0451e62fac7 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -76,6 +76,7 @@  static void __iomem *timer_base, *local_base;  static unsigned int timer_clk;  static bool timer25Mhz = true; +static u32 enable_mask;  /*   * Number of timer ticks per jiffy. @@ -84,19 +85,13 @@ static u32 ticks_per_jiffy;  static struct clock_event_device __percpu *armada_370_xp_evt; -static void timer_ctrl_clrset(u32 clr, u32 set) -{ -	writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set, -		timer_base + TIMER_CTRL_OFF); -} -  static void local_timer_ctrl_clrset(u32 clr, u32 set)  {  	writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,  		local_base + TIMER_CTRL_OFF);  } -static u32 notrace armada_370_xp_read_sched_clock(void) +static u64 notrace armada_370_xp_read_sched_clock(void)  {  	return ~readl(timer_base + TIMER0_VAL_OFF);  } @@ -121,8 +116,7 @@ armada_370_xp_clkevt_next_event(unsigned long delta,  	/*  	 * Enable the timer.  	 */ -	local_timer_ctrl_clrset(TIMER0_RELOAD_EN, -				TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); +	local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);  	return 0;  } @@ -141,9 +135,7 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,  		/*  		 * Enable timer.  		 */ -		local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | -					   TIMER0_EN | -					   TIMER0_DIV(TIMER_DIVIDER_SHIFT)); +		local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);  	} else {  		/*  		 * Disable timer. @@ -240,11 +232,14 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)  	WARN_ON(!timer_base);  	local_base = of_iomap(np, 1); -	if (timer25Mhz) +	if (timer25Mhz) {  		set = TIMER0_25MHZ;		 -	else +		enable_mask = TIMER0_EN; +	} else {  		clr = TIMER0_25MHZ; -	timer_ctrl_clrset(clr, set); +		enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); +	} +	atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);  	local_timer_ctrl_clrset(clr, set);  	/* @@ -256,19 +251,20 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)  	ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;  	/* -	 * Set scale and timer for sched_clock. -	 */ -	setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk); - -	/*  	 * Setup free-running clocksource timer (interrupts  	 * disabled).  	 */  	writel(0xffffffff, timer_base + TIMER0_VAL_OFF);  	writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); -	timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | -			     TIMER0_DIV(TIMER_DIVIDER_SHIFT)); +	atomic_io_modify(timer_base + TIMER_CTRL_OFF, +		TIMER0_RELOAD_EN | enable_mask, +		TIMER0_RELOAD_EN | enable_mask); + +	/* +	 * Set scale and timer for sched_clock. +	 */ +	sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);  	clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,  			      "armada_370_xp_clocksource", diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c new file mode 100644 index 00000000000..bba62f9deef --- /dev/null +++ b/drivers/clocksource/time-efm32.c @@ -0,0 +1,276 @@ +/* + * Copyright (C) 2013 Pengutronix + * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/clk.h> + +#define TIMERn_CTRL			0x00 +#define TIMERn_CTRL_PRESC(val)			(((val) & 0xf) << 24) +#define TIMERn_CTRL_PRESC_1024			TIMERn_CTRL_PRESC(10) +#define TIMERn_CTRL_CLKSEL(val)			(((val) & 0x3) << 16) +#define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK	TIMERn_CTRL_CLKSEL(0) +#define TIMERn_CTRL_OSMEN			0x00000010 +#define TIMERn_CTRL_MODE(val)			(((val) & 0x3) <<  0) +#define TIMERn_CTRL_MODE_UP			TIMERn_CTRL_MODE(0) +#define TIMERn_CTRL_MODE_DOWN			TIMERn_CTRL_MODE(1) + +#define TIMERn_CMD			0x04 +#define TIMERn_CMD_START			0x00000001 +#define TIMERn_CMD_STOP				0x00000002 + +#define TIMERn_IEN			0x0c +#define TIMERn_IF			0x10 +#define TIMERn_IFS			0x14 +#define TIMERn_IFC			0x18 +#define TIMERn_IRQ_UF				0x00000002 + +#define TIMERn_TOP			0x1c +#define TIMERn_CNT			0x24 + +struct efm32_clock_event_ddata { +	struct clock_event_device evtdev; +	void __iomem *base; +	unsigned periodic_top; +}; + +static void efm32_clock_event_set_mode(enum clock_event_mode mode, +				       struct clock_event_device *evtdev) +{ +	struct efm32_clock_event_ddata *ddata = +		container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); +		writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP); +		writel_relaxed(TIMERn_CTRL_PRESC_1024 | +			       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | +			       TIMERn_CTRL_MODE_DOWN, +			       ddata->base + TIMERn_CTRL); +		writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); +		break; + +	case CLOCK_EVT_MODE_ONESHOT: +		writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); +		writel_relaxed(TIMERn_CTRL_PRESC_1024 | +			       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | +			       TIMERn_CTRL_OSMEN | +			       TIMERn_CTRL_MODE_DOWN, +			       ddata->base + TIMERn_CTRL); +		break; + +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); +		break; + +	case CLOCK_EVT_MODE_RESUME: +		break; +	} +} + +static int efm32_clock_event_set_next_event(unsigned long evt, +					    struct clock_event_device *evtdev) +{ +	struct efm32_clock_event_ddata *ddata = +		container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + +	writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); +	writel_relaxed(evt, ddata->base + TIMERn_CNT); +	writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); + +	return 0; +} + +static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id) +{ +	struct efm32_clock_event_ddata *ddata = dev_id; + +	writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC); + +	ddata->evtdev.event_handler(&ddata->evtdev); + +	return IRQ_HANDLED; +} + +static struct efm32_clock_event_ddata clock_event_ddata = { +	.evtdev = { +		.name = "efm32 clockevent", +		.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_MODE_PERIODIC, +		.set_mode = efm32_clock_event_set_mode, +		.set_next_event = efm32_clock_event_set_next_event, +		.rating = 200, +	}, +}; + +static struct irqaction efm32_clock_event_irq = { +	.name = "efm32 clockevent", +	.flags = IRQF_TIMER, +	.handler = efm32_clock_event_handler, +	.dev_id = &clock_event_ddata, +}; + +static int __init efm32_clocksource_init(struct device_node *np) +{ +	struct clk *clk; +	void __iomem *base; +	unsigned long rate; +	int ret; + +	clk = of_clk_get(np, 0); +	if (IS_ERR(clk)) { +		ret = PTR_ERR(clk); +		pr_err("failed to get clock for clocksource (%d)\n", ret); +		goto err_clk_get; +	} + +	ret = clk_prepare_enable(clk); +	if (ret) { +		pr_err("failed to enable timer clock for clocksource (%d)\n", +		       ret); +		goto err_clk_enable; +	} +	rate = clk_get_rate(clk); + +	base = of_iomap(np, 0); +	if (!base) { +		ret = -EADDRNOTAVAIL; +		pr_err("failed to map registers for clocksource\n"); +		goto err_iomap; +	} + +	writel_relaxed(TIMERn_CTRL_PRESC_1024 | +		       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | +		       TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL); +	writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD); + +	ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer", +				    DIV_ROUND_CLOSEST(rate, 1024), 200, 16, +				    clocksource_mmio_readl_up); +	if (ret) { +		pr_err("failed to init clocksource (%d)\n", ret); +		goto err_clocksource_init; +	} + +	return 0; + +err_clocksource_init: + +	iounmap(base); +err_iomap: + +	clk_disable_unprepare(clk); +err_clk_enable: + +	clk_put(clk); +err_clk_get: + +	return ret; +} + +static int __init efm32_clockevent_init(struct device_node *np) +{ +	struct clk *clk; +	void __iomem *base; +	unsigned long rate; +	int irq; +	int ret; + +	clk = of_clk_get(np, 0); +	if (IS_ERR(clk)) { +		ret = PTR_ERR(clk); +		pr_err("failed to get clock for clockevent (%d)\n", ret); +		goto err_clk_get; +	} + +	ret = clk_prepare_enable(clk); +	if (ret) { +		pr_err("failed to enable timer clock for clockevent (%d)\n", +		       ret); +		goto err_clk_enable; +	} +	rate = clk_get_rate(clk); + +	base = of_iomap(np, 0); +	if (!base) { +		ret = -EADDRNOTAVAIL; +		pr_err("failed to map registers for clockevent\n"); +		goto err_iomap; +	} + +	irq = irq_of_parse_and_map(np, 0); +	if (!irq) { +		ret = -ENOENT; +		pr_err("failed to get irq for clockevent\n"); +		goto err_get_irq; +	} + +	writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN); + +	clock_event_ddata.base = base; +	clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); + +	setup_irq(irq, &efm32_clock_event_irq); + +	clockevents_config_and_register(&clock_event_ddata.evtdev, +					DIV_ROUND_CLOSEST(rate, 1024), +					0xf, 0xffff); + +	return 0; + +err_get_irq: + +	iounmap(base); +err_iomap: + +	clk_disable_unprepare(clk); +err_clk_enable: + +	clk_put(clk); +err_clk_get: + +	return ret; +} + +/* + * This function asserts that we have exactly one clocksource and one + * clock_event_device in the end. + */ +static void __init efm32_timer_init(struct device_node *np) +{ +	static int has_clocksource, has_clockevent; +	int ret; + +	if (!has_clocksource) { +		ret = efm32_clocksource_init(np); +		if (!ret) { +			has_clocksource = 1; +			return; +		} +	} + +	if (!has_clockevent) { +		ret = efm32_clockevent_init(np); +		if (!ret) { +			has_clockevent = 1; +			return; +		} +	} +} +CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init); +CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init); diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c index 9c7f018a67c..0b3ce0399c5 100644 --- a/drivers/clocksource/time-orion.c +++ b/drivers/clocksource/time-orion.c @@ -35,25 +35,11 @@  #define ORION_ONESHOT_MAX	0xfffffffe  static void __iomem *timer_base; -static DEFINE_SPINLOCK(timer_ctrl_lock); - -/* - * Thread-safe access to TIMER_CTRL register - * (shared with watchdog timer) - */ -void orion_timer_ctrl_clrset(u32 clr, u32 set) -{ -	spin_lock(&timer_ctrl_lock); -	writel((readl(timer_base + TIMER_CTRL) & ~clr) | set, -		timer_base + TIMER_CTRL); -	spin_unlock(&timer_ctrl_lock); -} -EXPORT_SYMBOL(orion_timer_ctrl_clrset);  /*   * Free-running clocksource handling.   */ -static u32 notrace orion_read_sched_clock(void) +static u64 notrace orion_read_sched_clock(void)  {  	return ~readl(timer_base + TIMER0_VAL);  } @@ -68,7 +54,8 @@ static int orion_clkevt_next_event(unsigned long delta,  {  	/* setup and enable one-shot timer */  	writel(delta, timer_base + TIMER1_VAL); -	orion_timer_ctrl_clrset(TIMER1_RELOAD_EN, TIMER1_EN); +	atomic_io_modify(timer_base + TIMER_CTRL, +		TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN);  	return 0;  } @@ -80,10 +67,13 @@ static void orion_clkevt_mode(enum clock_event_mode mode,  		/* setup and enable periodic timer at 1/HZ intervals */  		writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);  		writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); -		orion_timer_ctrl_clrset(0, TIMER1_RELOAD_EN | TIMER1_EN); +		atomic_io_modify(timer_base + TIMER_CTRL, +			TIMER1_RELOAD_EN | TIMER1_EN, +			TIMER1_RELOAD_EN | TIMER1_EN);  	} else {  		/* disable timer */ -		orion_timer_ctrl_clrset(TIMER1_RELOAD_EN | TIMER1_EN, 0); +		atomic_io_modify(timer_base + TIMER_CTRL, +			TIMER1_RELOAD_EN | TIMER1_EN, 0);  	}  } @@ -131,11 +121,13 @@ static void __init orion_timer_init(struct device_node *np)  	/* setup timer0 as free-running clocksource */  	writel(~0, timer_base + TIMER0_VAL);  	writel(~0, timer_base + TIMER0_RELOAD); -	orion_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | TIMER0_EN); +	atomic_io_modify(timer_base + TIMER_CTRL, +		TIMER0_RELOAD_EN | TIMER0_EN, +		TIMER0_RELOAD_EN | TIMER0_EN);  	clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",  			      clk_get_rate(clk), 300, 32,  			      clocksource_mmio_readl_down); -	setup_sched_clock(orion_read_sched_clock, 32, clk_get_rate(clk)); +	sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));  	/* setup timer1 as clockevent timer */  	if (setup_irq(irq, &orion_clkevt_irq)) diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c new file mode 100644 index 00000000000..0250354f7e5 --- /dev/null +++ b/drivers/clocksource/timer-keystone.c @@ -0,0 +1,241 @@ +/* + * Keystone broadcast clock-event + * + * Copyright 2013 Texas Instruments, Inc. + * + * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define TIMER_NAME			"timer-keystone" + +/* Timer register offsets */ +#define TIM12				0x10 +#define TIM34				0x14 +#define PRD12				0x18 +#define PRD34				0x1c +#define TCR				0x20 +#define TGCR				0x24 +#define INTCTLSTAT			0x44 + +/* Timer register bitfields */ +#define TCR_ENAMODE_MASK		0xC0 +#define TCR_ENAMODE_ONESHOT_MASK	0x40 +#define TCR_ENAMODE_PERIODIC_MASK	0x80 + +#define TGCR_TIM_UNRESET_MASK		0x03 +#define INTCTLSTAT_ENINT_MASK		0x01 + +/** + * struct keystone_timer: holds timer's data + * @base: timer memory base address + * @hz_period: cycles per HZ period + * @event_dev: event device based on timer + */ +static struct keystone_timer { +	void __iomem *base; +	unsigned long hz_period; +	struct clock_event_device event_dev; +} timer; + +static inline u32 keystone_timer_readl(unsigned long rg) +{ +	return readl_relaxed(timer.base + rg); +} + +static inline void keystone_timer_writel(u32 val, unsigned long rg) +{ +	writel_relaxed(val, timer.base + rg); +} + +/** + * keystone_timer_barrier: write memory barrier + * use explicit barrier to avoid using readl/writel non relaxed function + * variants, because in our case non relaxed variants hide the true places + * where barrier is needed. + */ +static inline void keystone_timer_barrier(void) +{ +	__iowmb(); +} + +/** + * keystone_timer_config: configures timer to work in oneshot/periodic modes. + * @ mode: mode to configure + * @ period: cycles number to configure for + */ +static int keystone_timer_config(u64 period, enum clock_event_mode mode) +{ +	u32 tcr; +	u32 off; + +	tcr = keystone_timer_readl(TCR); +	off = tcr & ~(TCR_ENAMODE_MASK); + +	/* set enable mode */ +	switch (mode) { +	case CLOCK_EVT_MODE_ONESHOT: +		tcr |= TCR_ENAMODE_ONESHOT_MASK; +		break; +	case CLOCK_EVT_MODE_PERIODIC: +		tcr |= TCR_ENAMODE_PERIODIC_MASK; +		break; +	default: +		return -1; +	} + +	/* disable timer */ +	keystone_timer_writel(off, TCR); +	/* here we have to be sure the timer has been disabled */ +	keystone_timer_barrier(); + +	/* reset counter to zero, set new period */ +	keystone_timer_writel(0, TIM12); +	keystone_timer_writel(0, TIM34); +	keystone_timer_writel(period & 0xffffffff, PRD12); +	keystone_timer_writel(period >> 32, PRD34); + +	/* +	 * enable timer +	 * here we have to be sure that CNTLO, CNTHI, PRDLO, PRDHI registers +	 * have been written. +	 */ +	keystone_timer_barrier(); +	keystone_timer_writel(tcr, TCR); +	return 0; +} + +static void keystone_timer_disable(void) +{ +	u32 tcr; + +	tcr = keystone_timer_readl(TCR); + +	/* disable timer */ +	tcr &= ~(TCR_ENAMODE_MASK); +	keystone_timer_writel(tcr, TCR); +} + +static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static int keystone_set_next_event(unsigned long cycles, +				  struct clock_event_device *evt) +{ +	return keystone_timer_config(cycles, evt->mode); +} + +static void keystone_set_mode(enum clock_event_mode mode, +			     struct clock_event_device *evt) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC); +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_ONESHOT: +		keystone_timer_disable(); +		break; +	default: +		break; +	} +} + +static void __init keystone_timer_init(struct device_node *np) +{ +	struct clock_event_device *event_dev = &timer.event_dev; +	unsigned long rate; +	struct clk *clk; +	int irq, error; + +	irq  = irq_of_parse_and_map(np, 0); +	if (irq == NO_IRQ) { +		pr_err("%s: failed to map interrupts\n", __func__); +		return; +	} + +	timer.base = of_iomap(np, 0); +	if (!timer.base) { +		pr_err("%s: failed to map registers\n", __func__); +		return; +	} + +	clk = of_clk_get(np, 0); +	if (IS_ERR(clk)) { +		pr_err("%s: failed to get clock\n", __func__); +		iounmap(timer.base); +		return; +	} + +	error = clk_prepare_enable(clk); +	if (error) { +		pr_err("%s: failed to enable clock\n", __func__); +		goto err; +	} + +	rate = clk_get_rate(clk); + +	/* disable, use internal clock source */ +	keystone_timer_writel(0, TCR); +	/* here we have to be sure the timer has been disabled */ +	keystone_timer_barrier(); + +	/* reset timer as 64-bit, no pre-scaler, plus features are disabled */ +	keystone_timer_writel(0, TGCR); + +	/* unreset timer */ +	keystone_timer_writel(TGCR_TIM_UNRESET_MASK, TGCR); + +	/* init counter to zero */ +	keystone_timer_writel(0, TIM12); +	keystone_timer_writel(0, TIM34); + +	timer.hz_period = DIV_ROUND_UP(rate, HZ); + +	/* enable timer interrupts */ +	keystone_timer_writel(INTCTLSTAT_ENINT_MASK, INTCTLSTAT); + +	error = request_irq(irq, keystone_timer_interrupt, IRQF_TIMER, +			    TIMER_NAME, event_dev); +	if (error) { +		pr_err("%s: failed to setup irq\n", __func__); +		goto err; +	} + +	/* setup clockevent */ +	event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; +	event_dev->set_next_event = keystone_set_next_event; +	event_dev->set_mode = keystone_set_mode; +	event_dev->cpumask = cpu_all_mask; +	event_dev->owner = THIS_MODULE; +	event_dev->name = TIMER_NAME; +	event_dev->irq = irq; + +	clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX); + +	pr_info("keystone timer clock @%lu Hz\n", rate); +	return; +err: +	clk_put(clk); +	iounmap(timer.base); +} + +CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer", +					keystone_timer_init); diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index 09a17d9a659..dbd30398222 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c @@ -19,7 +19,8 @@  #include <linux/of_irq.h>  #include <linux/of_address.h>  #include <linux/sched_clock.h> -#include <asm/mach/time.h> + +#define MARCO_CLOCK_FREQ 1000000  #define SIRFSOC_TIMER_32COUNTER_0_CTRL			0x0000  #define SIRFSOC_TIMER_32COUNTER_1_CTRL			0x0004 @@ -191,14 +192,14 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)  	ce->rating = 200;  	ce->set_mode = sirfsoc_timer_set_mode;  	ce->set_next_event = sirfsoc_timer_set_next_event; -	clockevents_calc_mult_shift(ce, CLOCK_TICK_RATE, 60); +	clockevents_calc_mult_shift(ce, MARCO_CLOCK_FREQ, 60);  	ce->max_delta_ns = clockevent_delta2ns(-2, ce);  	ce->min_delta_ns = clockevent_delta2ns(2, ce);  	ce->cpumask = cpumask_of(cpu);  	action->dev_id = ce;  	BUG_ON(setup_irq(ce->irq, action)); -	irq_set_affinity(action->irq, cpumask_of(cpu)); +	irq_force_affinity(action->irq, cpumask_of(cpu));  	clockevents_register_device(ce);  	return 0; @@ -251,23 +252,21 @@ static void __init sirfsoc_clockevent_init(void)  }  /* initialize the kernel jiffy timer source */ -static void __init sirfsoc_marco_timer_init(void) +static void __init sirfsoc_marco_timer_init(struct device_node *np)  {  	unsigned long rate;  	u32 timer_div;  	struct clk *clk; -	/* timer's input clock is io clock */ -	clk = clk_get_sys("io", NULL); - +	clk = of_clk_get(np, 0);  	BUG_ON(IS_ERR(clk));  	rate = clk_get_rate(clk); -	BUG_ON(rate < CLOCK_TICK_RATE); -	BUG_ON(rate % CLOCK_TICK_RATE); +	BUG_ON(rate < MARCO_CLOCK_FREQ); +	BUG_ON(rate % MARCO_CLOCK_FREQ);  	/* Initialize the timer dividers */ -	timer_div = rate / CLOCK_TICK_RATE - 1; +	timer_div = rate / MARCO_CLOCK_FREQ - 1;  	writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);  	writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL);  	writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_1_CTRL); @@ -283,7 +282,7 @@ static void __init sirfsoc_marco_timer_init(void)  	/* Clear all interrupts */  	writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS); -	BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); +	BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, MARCO_CLOCK_FREQ));  	sirfsoc_clockevent_init();  } @@ -302,6 +301,6 @@ static void __init sirfsoc_of_timer_init(struct device_node *np)  	if (!sirfsoc_timer1_irq.irq)  		panic("No irq passed for timer1 via DT\n"); -	sirfsoc_marco_timer_init(); +	sirfsoc_marco_timer_init(np);  }  CLOCKSOURCE_OF_DECLARE(sirfsoc_marco_timer, "sirf,marco-tick", sirfsoc_of_timer_init ); diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c index ef3cfb269d8..a722aac7ac0 100644 --- a/drivers/clocksource/timer-prima2.c +++ b/drivers/clocksource/timer-prima2.c @@ -21,6 +21,8 @@  #include <linux/sched_clock.h>  #include <asm/mach/time.h> +#define PRIMA2_CLOCK_FREQ 1000000 +  #define SIRFSOC_TIMER_COUNTER_LO	0x0000  #define SIRFSOC_TIMER_COUNTER_HI	0x0004  #define SIRFSOC_TIMER_MATCH_0		0x0008 @@ -59,7 +61,8 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)  {  	struct clock_event_device *ce = dev_id; -	WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) & BIT(0))); +	WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) & +		BIT(0)));  	/* clear timer0 interrupt */  	writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); @@ -75,9 +78,11 @@ static cycle_t sirfsoc_timer_read(struct clocksource *cs)  	u64 cycles;  	/* latch the 64-bit timer counter */ -	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); +	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, +		sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);  	cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_HI); -	cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); +	cycles = (cycles << 32) | +		readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);  	return cycles;  } @@ -87,11 +92,13 @@ static int sirfsoc_timer_set_next_event(unsigned long delta,  {  	unsigned long now, next; -	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); +	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, +		sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);  	now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);  	next = now + delta;  	writel_relaxed(next, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0); -	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); +	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, +		sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);  	now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO);  	return next - now > delta ? -ETIME : 0; @@ -106,10 +113,12 @@ static void sirfsoc_timer_set_mode(enum clock_event_mode mode,  		WARN_ON(1);  		break;  	case CLOCK_EVT_MODE_ONESHOT: -		writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); +		writel_relaxed(val | BIT(0), +			sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);  		break;  	case CLOCK_EVT_MODE_SHUTDOWN: -		writel_relaxed(val & ~BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); +		writel_relaxed(val & ~BIT(0), +			sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);  		break;  	case CLOCK_EVT_MODE_UNUSED:  	case CLOCK_EVT_MODE_RESUME: @@ -121,10 +130,13 @@ static void sirfsoc_clocksource_suspend(struct clocksource *cs)  {  	int i; -	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); +	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, +		sirfsoc_timer_base + SIRFSOC_TIMER_LATCH);  	for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++) -		sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); +		sirfsoc_timer_reg_val[i] = +			readl_relaxed(sirfsoc_timer_base + +				sirfsoc_timer_reg_list[i]);  }  static void sirfsoc_clocksource_resume(struct clocksource *cs) @@ -132,10 +144,13 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs)  	int i;  	for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++) -		writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); +		writel_relaxed(sirfsoc_timer_reg_val[i], +			sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); -	writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); -	writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); +	writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], +		sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); +	writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], +		sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);  }  static struct clock_event_device sirfsoc_clockevent = { @@ -165,15 +180,15 @@ static struct irqaction sirfsoc_timer_irq = {  };  /* Overwrite weak default sched_clock with more precise one */ -static u32 notrace sirfsoc_read_sched_clock(void) +static u64 notrace sirfsoc_read_sched_clock(void)  { -	return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff); +	return sirfsoc_timer_read(NULL);  }  static void __init sirfsoc_clockevent_init(void)  {  	sirfsoc_clockevent.cpumask = cpumask_of(0); -	clockevents_config_and_register(&sirfsoc_clockevent, CLOCK_TICK_RATE, +	clockevents_config_and_register(&sirfsoc_clockevent, PRIMA2_CLOCK_FREQ,  					2, -2);  } @@ -183,15 +198,12 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)  	unsigned long rate;  	struct clk *clk; -	/* timer's input clock is io clock */ -	clk = clk_get_sys("io", NULL); - +	clk = of_clk_get(np, 0);  	BUG_ON(IS_ERR(clk)); -  	rate = clk_get_rate(clk); -	BUG_ON(rate < CLOCK_TICK_RATE); -	BUG_ON(rate % CLOCK_TICK_RATE); +	BUG_ON(rate < PRIMA2_CLOCK_FREQ); +	BUG_ON(rate % PRIMA2_CLOCK_FREQ);  	sirfsoc_timer_base = of_iomap(np, 0);  	if (!sirfsoc_timer_base) @@ -199,17 +211,20 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)  	sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); -	writel_relaxed(rate / CLOCK_TICK_RATE / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV); +	writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1, +		sirfsoc_timer_base + SIRFSOC_TIMER_DIV);  	writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);  	writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);  	writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); -	BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); +	BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, +				       PRIMA2_CLOCK_FREQ)); -	setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE); +	sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);  	BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));  	sirfsoc_clockevent_init();  } -CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, "sirf,prima2-tick", sirfsoc_prima2_timer_init); +CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, +	"sirf,prima2-tick", sirfsoc_prima2_timer_init); diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c new file mode 100644 index 00000000000..02268448dc8 --- /dev/null +++ b/drivers/clocksource/timer-sun5i.c @@ -0,0 +1,198 @@ +/* + * Allwinner SoCs hstimer driver. + * + * Copyright (C) 2013 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2.  This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/reset.h> +#include <linux/sched_clock.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define TIMER_IRQ_EN_REG		0x00 +#define TIMER_IRQ_EN(val)			BIT(val) +#define TIMER_IRQ_ST_REG		0x04 +#define TIMER_CTL_REG(val)		(0x20 * (val) + 0x10) +#define TIMER_CTL_ENABLE			BIT(0) +#define TIMER_CTL_RELOAD			BIT(1) +#define TIMER_CTL_CLK_PRES(val)			(((val) & 0x7) << 4) +#define TIMER_CTL_ONESHOT			BIT(7) +#define TIMER_INTVAL_LO_REG(val)	(0x20 * (val) + 0x14) +#define TIMER_INTVAL_HI_REG(val)	(0x20 * (val) + 0x18) +#define TIMER_CNTVAL_LO_REG(val)	(0x20 * (val) + 0x1c) +#define TIMER_CNTVAL_HI_REG(val)	(0x20 * (val) + 0x20) + +#define TIMER_SYNC_TICKS	3 + +static void __iomem *timer_base; +static u32 ticks_per_jiffy; + +/* + * When we disable a timer, we need to wait at least for 2 cycles of + * the timer source clock. We will use for that the clocksource timer + * that is already setup and runs at the same frequency than the other + * timers, and we never will be disabled. + */ +static void sun5i_clkevt_sync(void) +{ +	u32 old = readl(timer_base + TIMER_CNTVAL_LO_REG(1)); + +	while ((old - readl(timer_base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS) +		cpu_relax(); +} + +static void sun5i_clkevt_time_stop(u8 timer) +{ +	u32 val = readl(timer_base + TIMER_CTL_REG(timer)); +	writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer)); + +	sun5i_clkevt_sync(); +} + +static void sun5i_clkevt_time_setup(u8 timer, u32 delay) +{ +	writel(delay, timer_base + TIMER_INTVAL_LO_REG(timer)); +} + +static void sun5i_clkevt_time_start(u8 timer, bool periodic) +{ +	u32 val = readl(timer_base + TIMER_CTL_REG(timer)); + +	if (periodic) +		val &= ~TIMER_CTL_ONESHOT; +	else +		val |= TIMER_CTL_ONESHOT; + +	writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, +	       timer_base + TIMER_CTL_REG(timer)); +} + +static void sun5i_clkevt_mode(enum clock_event_mode mode, +			      struct clock_event_device *clk) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		sun5i_clkevt_time_stop(0); +		sun5i_clkevt_time_setup(0, ticks_per_jiffy); +		sun5i_clkevt_time_start(0, true); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		sun5i_clkevt_time_stop(0); +		sun5i_clkevt_time_start(0, false); +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	default: +		sun5i_clkevt_time_stop(0); +		break; +	} +} + +static int sun5i_clkevt_next_event(unsigned long evt, +				   struct clock_event_device *unused) +{ +	sun5i_clkevt_time_stop(0); +	sun5i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS); +	sun5i_clkevt_time_start(0, false); + +	return 0; +} + +static struct clock_event_device sun5i_clockevent = { +	.name = "sun5i_tick", +	.rating = 340, +	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +	.set_mode = sun5i_clkevt_mode, +	.set_next_event = sun5i_clkevt_next_event, +}; + + +static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = (struct clock_event_device *)dev_id; + +	writel(0x1, timer_base + TIMER_IRQ_ST_REG); +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct irqaction sun5i_timer_irq = { +	.name = "sun5i_timer0", +	.flags = IRQF_TIMER | IRQF_IRQPOLL, +	.handler = sun5i_timer_interrupt, +	.dev_id = &sun5i_clockevent, +}; + +static u64 sun5i_timer_sched_read(void) +{ +	return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1)); +} + +static void __init sun5i_timer_init(struct device_node *node) +{ +	struct reset_control *rstc; +	unsigned long rate; +	struct clk *clk; +	int ret, irq; +	u32 val; + +	timer_base = of_iomap(node, 0); +	if (!timer_base) +		panic("Can't map registers"); + +	irq = irq_of_parse_and_map(node, 0); +	if (irq <= 0) +		panic("Can't parse IRQ"); + +	clk = of_clk_get(node, 0); +	if (IS_ERR(clk)) +		panic("Can't get timer clock"); +	clk_prepare_enable(clk); +	rate = clk_get_rate(clk); + +	rstc = of_reset_control_get(node, NULL); +	if (!IS_ERR(rstc)) +		reset_control_deassert(rstc); + +	writel(~0, timer_base + TIMER_INTVAL_LO_REG(1)); +	writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, +	       timer_base + TIMER_CTL_REG(1)); + +	sched_clock_register(sun5i_timer_sched_read, 32, rate); +	clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, +			      rate, 340, 32, clocksource_mmio_readl_down); + +	ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); + +	ret = setup_irq(irq, &sun5i_timer_irq); +	if (ret) +		pr_warn("failed to setup irq %d\n", irq); + +	/* Enable timer0 interrupt */ +	val = readl(timer_base + TIMER_IRQ_EN_REG); +	writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); + +	sun5i_clockevent.cpumask = cpu_possible_mask; +	sun5i_clockevent.irq = irq; + +	clockevents_config_and_register(&sun5i_clockevent, rate, +					TIMER_SYNC_TICKS, 0xffffffff); +} +CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", +		       sun5i_timer_init); +CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer", +		       sun5i_timer_init); diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c new file mode 100644 index 00000000000..5dcf756970e --- /dev/null +++ b/drivers/clocksource/timer-u300.c @@ -0,0 +1,447 @@ +/* + * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * Timer COH 901 328, runs the OS timer interrupt. + * Author: Linus Walleij <linus.walleij@stericsson.com> + */ +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/types.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> + +/* Generic stuff */ +#include <asm/mach/map.h> +#include <asm/mach/time.h> + +/* + * APP side special timer registers + * This timer contains four timers which can fire an interrupt each. + * OS (operating system) timer @ 32768 Hz + * DD (device driver) timer @ 1 kHz + * GP1 (general purpose 1) timer @ 1MHz + * GP2 (general purpose 2) timer @ 1MHz + */ + +/* Reset OS Timer 32bit (-/W) */ +#define U300_TIMER_APP_ROST					(0x0000) +#define U300_TIMER_APP_ROST_TIMER_RESET				(0x00000000) +/* Enable OS Timer 32bit (-/W) */ +#define U300_TIMER_APP_EOST					(0x0004) +#define U300_TIMER_APP_EOST_TIMER_ENABLE			(0x00000000) +/* Disable OS Timer 32bit (-/W) */ +#define U300_TIMER_APP_DOST					(0x0008) +#define U300_TIMER_APP_DOST_TIMER_DISABLE			(0x00000000) +/* OS Timer Mode Register 32bit (-/W) */ +#define U300_TIMER_APP_SOSTM					(0x000c) +#define U300_TIMER_APP_SOSTM_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_SOSTM_MODE_ONE_SHOT			(0x00000001) +/* OS Timer Status Register 32bit (R/-) */ +#define U300_TIMER_APP_OSTS					(0x0010) +#define U300_TIMER_APP_OSTS_TIMER_STATE_MASK			(0x0000000F) +#define U300_TIMER_APP_OSTS_TIMER_STATE_IDLE			(0x00000001) +#define U300_TIMER_APP_OSTS_TIMER_STATE_ACTIVE			(0x00000002) +#define U300_TIMER_APP_OSTS_ENABLE_IND				(0x00000010) +#define U300_TIMER_APP_OSTS_MODE_MASK				(0x00000020) +#define U300_TIMER_APP_OSTS_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_OSTS_MODE_ONE_SHOT			(0x00000020) +#define U300_TIMER_APP_OSTS_IRQ_ENABLED_IND			(0x00000040) +#define U300_TIMER_APP_OSTS_IRQ_PENDING_IND			(0x00000080) +/* OS Timer Current Count Register 32bit (R/-) */ +#define U300_TIMER_APP_OSTCC					(0x0014) +/* OS Timer Terminal Count Register 32bit (R/W) */ +#define U300_TIMER_APP_OSTTC					(0x0018) +/* OS Timer Interrupt Enable Register 32bit (-/W) */ +#define U300_TIMER_APP_OSTIE					(0x001c) +#define U300_TIMER_APP_OSTIE_IRQ_DISABLE			(0x00000000) +#define U300_TIMER_APP_OSTIE_IRQ_ENABLE				(0x00000001) +/* OS Timer Interrupt Acknowledge Register 32bit (-/W) */ +#define U300_TIMER_APP_OSTIA					(0x0020) +#define U300_TIMER_APP_OSTIA_IRQ_ACK				(0x00000080) + +/* Reset DD Timer 32bit (-/W) */ +#define U300_TIMER_APP_RDDT					(0x0040) +#define U300_TIMER_APP_RDDT_TIMER_RESET				(0x00000000) +/* Enable DD Timer 32bit (-/W) */ +#define U300_TIMER_APP_EDDT					(0x0044) +#define U300_TIMER_APP_EDDT_TIMER_ENABLE			(0x00000000) +/* Disable DD Timer 32bit (-/W) */ +#define U300_TIMER_APP_DDDT					(0x0048) +#define U300_TIMER_APP_DDDT_TIMER_DISABLE			(0x00000000) +/* DD Timer Mode Register 32bit (-/W) */ +#define U300_TIMER_APP_SDDTM					(0x004c) +#define U300_TIMER_APP_SDDTM_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_SDDTM_MODE_ONE_SHOT			(0x00000001) +/* DD Timer Status Register 32bit (R/-) */ +#define U300_TIMER_APP_DDTS					(0x0050) +#define U300_TIMER_APP_DDTS_TIMER_STATE_MASK			(0x0000000F) +#define U300_TIMER_APP_DDTS_TIMER_STATE_IDLE			(0x00000001) +#define U300_TIMER_APP_DDTS_TIMER_STATE_ACTIVE			(0x00000002) +#define U300_TIMER_APP_DDTS_ENABLE_IND				(0x00000010) +#define U300_TIMER_APP_DDTS_MODE_MASK				(0x00000020) +#define U300_TIMER_APP_DDTS_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_DDTS_MODE_ONE_SHOT			(0x00000020) +#define U300_TIMER_APP_DDTS_IRQ_ENABLED_IND			(0x00000040) +#define U300_TIMER_APP_DDTS_IRQ_PENDING_IND			(0x00000080) +/* DD Timer Current Count Register 32bit (R/-) */ +#define U300_TIMER_APP_DDTCC					(0x0054) +/* DD Timer Terminal Count Register 32bit (R/W) */ +#define U300_TIMER_APP_DDTTC					(0x0058) +/* DD Timer Interrupt Enable Register 32bit (-/W) */ +#define U300_TIMER_APP_DDTIE					(0x005c) +#define U300_TIMER_APP_DDTIE_IRQ_DISABLE			(0x00000000) +#define U300_TIMER_APP_DDTIE_IRQ_ENABLE				(0x00000001) +/* DD Timer Interrupt Acknowledge Register 32bit (-/W) */ +#define U300_TIMER_APP_DDTIA					(0x0060) +#define U300_TIMER_APP_DDTIA_IRQ_ACK				(0x00000080) + +/* Reset GP1 Timer 32bit (-/W) */ +#define U300_TIMER_APP_RGPT1					(0x0080) +#define U300_TIMER_APP_RGPT1_TIMER_RESET			(0x00000000) +/* Enable GP1 Timer 32bit (-/W) */ +#define U300_TIMER_APP_EGPT1					(0x0084) +#define U300_TIMER_APP_EGPT1_TIMER_ENABLE			(0x00000000) +/* Disable GP1 Timer 32bit (-/W) */ +#define U300_TIMER_APP_DGPT1					(0x0088) +#define U300_TIMER_APP_DGPT1_TIMER_DISABLE			(0x00000000) +/* GP1 Timer Mode Register 32bit (-/W) */ +#define U300_TIMER_APP_SGPT1M					(0x008c) +#define U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT			(0x00000001) +/* GP1 Timer Status Register 32bit (R/-) */ +#define U300_TIMER_APP_GPT1S					(0x0090) +#define U300_TIMER_APP_GPT1S_TIMER_STATE_MASK			(0x0000000F) +#define U300_TIMER_APP_GPT1S_TIMER_STATE_IDLE			(0x00000001) +#define U300_TIMER_APP_GPT1S_TIMER_STATE_ACTIVE			(0x00000002) +#define U300_TIMER_APP_GPT1S_ENABLE_IND				(0x00000010) +#define U300_TIMER_APP_GPT1S_MODE_MASK				(0x00000020) +#define U300_TIMER_APP_GPT1S_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_GPT1S_MODE_ONE_SHOT			(0x00000020) +#define U300_TIMER_APP_GPT1S_IRQ_ENABLED_IND			(0x00000040) +#define U300_TIMER_APP_GPT1S_IRQ_PENDING_IND			(0x00000080) +/* GP1 Timer Current Count Register 32bit (R/-) */ +#define U300_TIMER_APP_GPT1CC					(0x0094) +/* GP1 Timer Terminal Count Register 32bit (R/W) */ +#define U300_TIMER_APP_GPT1TC					(0x0098) +/* GP1 Timer Interrupt Enable Register 32bit (-/W) */ +#define U300_TIMER_APP_GPT1IE					(0x009c) +#define U300_TIMER_APP_GPT1IE_IRQ_DISABLE			(0x00000000) +#define U300_TIMER_APP_GPT1IE_IRQ_ENABLE			(0x00000001) +/* GP1 Timer Interrupt Acknowledge Register 32bit (-/W) */ +#define U300_TIMER_APP_GPT1IA					(0x00a0) +#define U300_TIMER_APP_GPT1IA_IRQ_ACK				(0x00000080) + +/* Reset GP2 Timer 32bit (-/W) */ +#define U300_TIMER_APP_RGPT2					(0x00c0) +#define U300_TIMER_APP_RGPT2_TIMER_RESET			(0x00000000) +/* Enable GP2 Timer 32bit (-/W) */ +#define U300_TIMER_APP_EGPT2					(0x00c4) +#define U300_TIMER_APP_EGPT2_TIMER_ENABLE			(0x00000000) +/* Disable GP2 Timer 32bit (-/W) */ +#define U300_TIMER_APP_DGPT2					(0x00c8) +#define U300_TIMER_APP_DGPT2_TIMER_DISABLE			(0x00000000) +/* GP2 Timer Mode Register 32bit (-/W) */ +#define U300_TIMER_APP_SGPT2M					(0x00cc) +#define U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_SGPT2M_MODE_ONE_SHOT			(0x00000001) +/* GP2 Timer Status Register 32bit (R/-) */ +#define U300_TIMER_APP_GPT2S					(0x00d0) +#define U300_TIMER_APP_GPT2S_TIMER_STATE_MASK			(0x0000000F) +#define U300_TIMER_APP_GPT2S_TIMER_STATE_IDLE			(0x00000001) +#define U300_TIMER_APP_GPT2S_TIMER_STATE_ACTIVE			(0x00000002) +#define U300_TIMER_APP_GPT2S_ENABLE_IND				(0x00000010) +#define U300_TIMER_APP_GPT2S_MODE_MASK				(0x00000020) +#define U300_TIMER_APP_GPT2S_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_GPT2S_MODE_ONE_SHOT			(0x00000020) +#define U300_TIMER_APP_GPT2S_IRQ_ENABLED_IND			(0x00000040) +#define U300_TIMER_APP_GPT2S_IRQ_PENDING_IND			(0x00000080) +/* GP2 Timer Current Count Register 32bit (R/-) */ +#define U300_TIMER_APP_GPT2CC					(0x00d4) +/* GP2 Timer Terminal Count Register 32bit (R/W) */ +#define U300_TIMER_APP_GPT2TC					(0x00d8) +/* GP2 Timer Interrupt Enable Register 32bit (-/W) */ +#define U300_TIMER_APP_GPT2IE					(0x00dc) +#define U300_TIMER_APP_GPT2IE_IRQ_DISABLE			(0x00000000) +#define U300_TIMER_APP_GPT2IE_IRQ_ENABLE			(0x00000001) +/* GP2 Timer Interrupt Acknowledge Register 32bit (-/W) */ +#define U300_TIMER_APP_GPT2IA					(0x00e0) +#define U300_TIMER_APP_GPT2IA_IRQ_ACK				(0x00000080) + +/* Clock request control register - all four timers */ +#define U300_TIMER_APP_CRC					(0x100) +#define U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE			(0x00000001) + +static void __iomem *u300_timer_base; + +struct u300_clockevent_data { +	struct clock_event_device cevd; +	unsigned ticks_per_jiffy; +}; + +/* + * The u300_set_mode() function is always called first, if we + * have oneshot timer active, the oneshot scheduling function + * u300_set_next_event() is called immediately after. + */ +static void u300_set_mode(enum clock_event_mode mode, +			  struct clock_event_device *evt) +{ +	struct u300_clockevent_data *cevdata = +		container_of(evt, struct u300_clockevent_data, cevd); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		/* Disable interrupts on GPT1 */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Disable GP1 while we're reprogramming it. */ +		writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, +		       u300_timer_base + U300_TIMER_APP_DGPT1); +		/* +		 * Set the periodic mode to a certain number of ticks per +		 * jiffy. +		 */ +		writel(cevdata->ticks_per_jiffy, +		       u300_timer_base + U300_TIMER_APP_GPT1TC); +		/* +		 * Set continuous mode, so the timer keeps triggering +		 * interrupts. +		 */ +		writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS, +		       u300_timer_base + U300_TIMER_APP_SGPT1M); +		/* Enable timer interrupts */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Then enable the OS timer again */ +		writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, +		       u300_timer_base + U300_TIMER_APP_EGPT1); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		/* Just break; here? */ +		/* +		 * The actual event will be programmed by the next event hook, +		 * so we just set a dummy value somewhere at the end of the +		 * universe here. +		 */ +		/* Disable interrupts on GPT1 */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Disable GP1 while we're reprogramming it. */ +		writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, +		       u300_timer_base + U300_TIMER_APP_DGPT1); +		/* +		 * Expire far in the future, u300_set_next_event() will be +		 * called soon... +		 */ +		writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC); +		/* We run one shot per tick here! */ +		writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT, +		       u300_timer_base + U300_TIMER_APP_SGPT1M); +		/* Enable interrupts for this timer */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Enable timer */ +		writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, +		       u300_timer_base + U300_TIMER_APP_EGPT1); +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		/* Disable interrupts on GP1 */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Disable GP1 */ +		writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, +		       u300_timer_base + U300_TIMER_APP_DGPT1); +		break; +	case CLOCK_EVT_MODE_RESUME: +		/* Ignore this call */ +		break; +	} +} + +/* + * The app timer in one shot mode obviously has to be reprogrammed + * in EXACTLY this sequence to work properly. Do NOT try to e.g. replace + * the interrupt disable + timer disable commands with a reset command, + * it will fail miserably. Apparently (and I found this the hard way) + * the timer is very sensitive to the instruction order, though you don't + * get that impression from the data sheet. + */ +static int u300_set_next_event(unsigned long cycles, +			       struct clock_event_device *evt) + +{ +	/* Disable interrupts on GPT1 */ +	writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, +	       u300_timer_base + U300_TIMER_APP_GPT1IE); +	/* Disable GP1 while we're reprogramming it. */ +	writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, +	       u300_timer_base + U300_TIMER_APP_DGPT1); +	/* Reset the General Purpose timer 1. */ +	writel(U300_TIMER_APP_RGPT1_TIMER_RESET, +	       u300_timer_base + U300_TIMER_APP_RGPT1); +	/* IRQ in n * cycles */ +	writel(cycles, u300_timer_base + U300_TIMER_APP_GPT1TC); +	/* +	 * We run one shot per tick here! (This is necessary to reconfigure, +	 * the timer will tilt if you don't!) +	 */ +	writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT, +	       u300_timer_base + U300_TIMER_APP_SGPT1M); +	/* Enable timer interrupts */ +	writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, +	       u300_timer_base + U300_TIMER_APP_GPT1IE); +	/* Then enable the OS timer again */ +	writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, +	       u300_timer_base + U300_TIMER_APP_EGPT1); +	return 0; +} + +static struct u300_clockevent_data u300_clockevent_data = { +	/* Use general purpose timer 1 as clock event */ +	.cevd = { +		.name		= "GPT1", +		/* Reasonably fast and accurate clock event */ +		.rating		= 300, +		.features	= CLOCK_EVT_FEAT_PERIODIC | +			CLOCK_EVT_FEAT_ONESHOT, +		.set_next_event	= u300_set_next_event, +		.set_mode	= u300_set_mode, +	}, +}; + +/* Clock event timer interrupt handler */ +static irqreturn_t u300_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = &u300_clockevent_data.cevd; +	/* ACK/Clear timer IRQ for the APP GPT1 Timer */ + +	writel(U300_TIMER_APP_GPT1IA_IRQ_ACK, +		u300_timer_base + U300_TIMER_APP_GPT1IA); +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static struct irqaction u300_timer_irq = { +	.name		= "U300 Timer Tick", +	.flags		= IRQF_TIMER | IRQF_IRQPOLL, +	.handler	= u300_timer_interrupt, +}; + +/* + * Override the global weak sched_clock symbol with this + * local implementation which uses the clocksource to get some + * better resolution when scheduling the kernel. We accept that + * this wraps around for now, since it is just a relative time + * stamp. (Inspired by OMAP implementation.) + */ + +static u64 notrace u300_read_sched_clock(void) +{ +	return readl(u300_timer_base + U300_TIMER_APP_GPT2CC); +} + +static unsigned long u300_read_current_timer(void) +{ +	return readl(u300_timer_base + U300_TIMER_APP_GPT2CC); +} + +static struct delay_timer u300_delay_timer; + +/* + * This sets up the system timers, clock source and clock event. + */ +static void __init u300_timer_init_of(struct device_node *np) +{ +	unsigned int irq; +	struct clk *clk; +	unsigned long rate; + +	u300_timer_base = of_iomap(np, 0); +	if (!u300_timer_base) +		panic("could not ioremap system timer\n"); + +	/* Get the IRQ for the GP1 timer */ +	irq = irq_of_parse_and_map(np, 2); +	if (!irq) +		panic("no IRQ for system timer\n"); + +	pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq); + +	/* Clock the interrupt controller */ +	clk = of_clk_get(np, 0); +	BUG_ON(IS_ERR(clk)); +	clk_prepare_enable(clk); +	rate = clk_get_rate(clk); + +	u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); + +	sched_clock_register(u300_read_sched_clock, 32, rate); + +	u300_delay_timer.read_current_timer = &u300_read_current_timer; +	u300_delay_timer.freq = rate; +	register_current_timer_delay(&u300_delay_timer); + +	/* +	 * Disable the "OS" and "DD" timers - these are designed for Symbian! +	 * Example usage in cnh1601578 cpu subsystem pd_timer_app.c +	 */ +	writel(U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE, +		u300_timer_base + U300_TIMER_APP_CRC); +	writel(U300_TIMER_APP_ROST_TIMER_RESET, +		u300_timer_base + U300_TIMER_APP_ROST); +	writel(U300_TIMER_APP_DOST_TIMER_DISABLE, +		u300_timer_base + U300_TIMER_APP_DOST); +	writel(U300_TIMER_APP_RDDT_TIMER_RESET, +		u300_timer_base + U300_TIMER_APP_RDDT); +	writel(U300_TIMER_APP_DDDT_TIMER_DISABLE, +		u300_timer_base + U300_TIMER_APP_DDDT); + +	/* Reset the General Purpose timer 1. */ +	writel(U300_TIMER_APP_RGPT1_TIMER_RESET, +		u300_timer_base + U300_TIMER_APP_RGPT1); + +	/* Set up the IRQ handler */ +	setup_irq(irq, &u300_timer_irq); + +	/* Reset the General Purpose timer 2 */ +	writel(U300_TIMER_APP_RGPT2_TIMER_RESET, +		u300_timer_base + U300_TIMER_APP_RGPT2); +	/* Set this timer to run around forever */ +	writel(0xFFFFFFFFU, u300_timer_base + U300_TIMER_APP_GPT2TC); +	/* Set continuous mode so it wraps around */ +	writel(U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS, +	       u300_timer_base + U300_TIMER_APP_SGPT2M); +	/* Disable timer interrupts */ +	writel(U300_TIMER_APP_GPT2IE_IRQ_DISABLE, +		u300_timer_base + U300_TIMER_APP_GPT2IE); +	/* Then enable the GP2 timer to use as a free running us counter */ +	writel(U300_TIMER_APP_EGPT2_TIMER_ENABLE, +		u300_timer_base + U300_TIMER_APP_EGPT2); + +	/* Use general purpose timer 2 as clock source */ +	if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC, +			"GPT2", rate, 300, 32, clocksource_mmio_readl_up)) +		pr_err("timer: failed to initialize U300 clock source\n"); + +	/* Configure and register the clockevent */ +	clockevents_config_and_register(&u300_clockevent_data.cevd, rate, +					1, 0xffffffff); + +	/* +	 * TODO: init and register the rest of the timers too, they can be +	 * used by hrtimers! +	 */ +} + +CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer", +		       u300_timer_init_of); diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c new file mode 100644 index 00000000000..2798e749223 --- /dev/null +++ b/drivers/clocksource/versatile.c @@ -0,0 +1,40 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/clocksource.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/sched_clock.h> + +#define SYS_24MHZ 0x05c + +static void __iomem *versatile_sys_24mhz; + +static u64 notrace versatile_sys_24mhz_read(void) +{ +	return readl(versatile_sys_24mhz); +} + +static void __init versatile_sched_clock_init(struct device_node *node) +{ +	void __iomem *base = of_iomap(node, 0); + +	if (!base) +		return; + +	versatile_sys_24mhz = base + SYS_24MHZ; + +	sched_clock_register(versatile_sys_24mhz_read, 32, 24000000); +} +CLOCKSOURCE_OF_DECLARE(versatile, "arm,vexpress-sysreg", +		       versatile_sched_clock_init); diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c index 587e0202a70..a918bc481c5 100644 --- a/drivers/clocksource/vf_pit_timer.c +++ b/drivers/clocksource/vf_pit_timer.c @@ -52,9 +52,9 @@ static inline void pit_irq_acknowledge(void)  	__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);  } -static unsigned int pit_read_sched_clock(void) +static u64 pit_read_sched_clock(void)  { -	return __raw_readl(clksrc_base + PITCVAL); +	return ~__raw_readl(clksrc_base + PITCVAL);  }  static int __init pit_clocksource_init(unsigned long rate) @@ -64,7 +64,7 @@ static int __init pit_clocksource_init(unsigned long rate)  	__raw_writel(~0UL, clksrc_base + PITLDVAL);  	__raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL); -	setup_sched_clock(pit_read_sched_clock, 32, rate); +	sched_clock_register(pit_read_sched_clock, 32, rate);  	return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,  			300, 32, clocksource_mmio_readl_down);  } diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c index 64f553f04fa..1098ed3b9b8 100644 --- a/drivers/clocksource/vt8500_timer.c +++ b/drivers/clocksource/vt8500_timer.c @@ -124,7 +124,7 @@ static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)  static struct irqaction irq = {  	.name    = "vt8500_timer", -	.flags   = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, +	.flags   = IRQF_TIMER | IRQF_IRQPOLL,  	.handler = vt8500_timer_interrupt,  	.dev_id  = &clockevent,  }; @@ -137,14 +137,12 @@ static void __init vt8500_timer_init(struct device_node *np)  	if (!regbase) {  		pr_err("%s: Missing iobase description in Device Tree\n",  								__func__); -		of_node_put(np);  		return;  	}  	timer_irq = irq_of_parse_and_map(np, 0);  	if (!timer_irq) {  		pr_err("%s: Missing irq description in Device Tree\n",  								__func__); -		of_node_put(np);  		return;  	} diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c index ca81809d159..7ce442148c3 100644 --- a/drivers/clocksource/zevio-timer.c +++ b/drivers/clocksource/zevio-timer.c @@ -212,4 +212,9 @@ error_free:  	return ret;  } -CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); +static void __init zevio_timer_init(struct device_node *node) +{ +	BUG_ON(zevio_timer_add(node)); +} + +CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);  | 
