diff options
Diffstat (limited to 'drivers/clocksource')
45 files changed, 11275 insertions, 828 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig new file mode 100644 index 00000000000..065131cbfcc --- /dev/null +++ b/drivers/clocksource/Kconfig @@ -0,0 +1,209 @@ +config CLKSRC_OF +	bool + +config CLKSRC_I8253 +	bool + +config CLKEVT_I8253 +	bool + +config I8253_LOCK +	bool + +config CLKBLD_I8253 +	def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK + +config CLKSRC_MMIO +	bool + +config DW_APB_TIMER +	bool + +config DW_APB_TIMER_OF +	bool +	select DW_APB_TIMER +	select CLKSRC_OF + +config ARMADA_370_XP_TIMER +	bool +	select CLKSRC_OF + +config ORION_TIMER +	select CLKSRC_OF +	select CLKSRC_MMIO +	bool + +config SUN4I_TIMER +	select CLKSRC_MMIO +	bool + +config SUN5I_HSTIMER +	select CLKSRC_MMIO +	bool + +config VT8500_TIMER +	bool + +config CADENCE_TTC_TIMER +	bool + +config CLKSRC_NOMADIK_MTU +	bool +	depends on (ARCH_NOMADIK || ARCH_U8500) +	select CLKSRC_MMIO +	help +	  Support for Multi Timer Unit. MTU provides access +	  to multiple interrupt generating programmable +	  32-bit free running decrementing counters. + +config CLKSRC_NOMADIK_MTU_SCHED_CLOCK +	bool +	depends on CLKSRC_NOMADIK_MTU +	help +	  Use the Multi Timer Unit as the sched_clock. + +config CLKSRC_DBX500_PRCMU +	bool "Clocksource PRCMU Timer" +	depends on UX500_SOC_DB8500 +	default y +	help +	  Use the always on PRCMU Timer as clocksource + +config CLKSRC_DBX500_PRCMU_SCHED_CLOCK +	bool "Clocksource PRCMU Timer sched_clock" +	depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK) +	default y +	help +	  Use the always on PRCMU Timer as sched_clock + +config CLKSRC_EFM32 +	bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32 +	depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) +	select CLKSRC_MMIO +	default ARCH_EFM32 +	help +	  Support to use the timers of EFM32 SoCs as clock source and clock +	  event device. + +config ARM_ARCH_TIMER +	bool +	select CLKSRC_OF if OF + +config ARM_ARCH_TIMER_EVTSTREAM +	bool "Support for ARM architected timer event stream generation" +	default y if ARM_ARCH_TIMER +	depends on ARM_ARCH_TIMER +	help +	  This option enables support for event stream generation based on +	  the ARM architected timer. It is used for waking up CPUs executing +	  the wfe instruction at a frequency represented as a power-of-2 +	  divisor of the clock rate. +	  The main use of the event stream is wfe-based timeouts of userspace +	  locking implementations. It might also be useful for imposing timeout +	  on wfe to safeguard against any programming errors in case an expected +	  event is not generated. +	  This must be disabled for hardware validation purposes to detect any +	  hardware anomalies of missing events. + +config ARM_GLOBAL_TIMER +	bool +	select CLKSRC_OF if OF +	help +	  This options enables support for the ARM global timer unit + +config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK +	bool +	depends on ARM_GLOBAL_TIMER +	default y +	help +	 Use ARM global timer clock source as sched_clock + +config CLKSRC_METAG_GENERIC +	def_bool y if METAG +	help +	  This option enables support for the Meta per-thread timers. + +config CLKSRC_EXYNOS_MCT +	def_bool y if ARCH_EXYNOS +	help +	  Support for Multi Core Timer controller on Exynos SoCs. + +config CLKSRC_SAMSUNG_PWM +	bool +	help +	  This is a new clocksource driver for the PWM timer found in +	  Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver +	  for all devicetree enabled platforms. This driver will be +	  needed only on systems that do not have the Exynos MCT available. + +config FSL_FTM_TIMER +	bool +	help +	  Support for Freescale FlexTimer Module (FTM) timer. + +config VF_PIT_TIMER +	bool +	help +	  Support for Period Interrupt Timer on Freescale Vybrid Family SoCs. + +config SYS_SUPPORTS_SH_CMT +        bool + +config SYS_SUPPORTS_SH_MTU2 +        bool + +config SYS_SUPPORTS_SH_TMU +        bool + +config SYS_SUPPORTS_EM_STI +        bool + +config SH_TIMER_CMT +	bool "Renesas CMT timer driver" if COMPILE_TEST +	depends on GENERIC_CLOCKEVENTS +	default SYS_SUPPORTS_SH_CMT +	help +	  This enables build of a clocksource and clockevent driver for +	  the Compare Match Timer (CMT) hardware available in 16/32/48-bit +	  variants on a wide range of Mobile and Automotive SoCs from Renesas. + +config SH_TIMER_MTU2 +	bool "Renesas MTU2 timer driver" if COMPILE_TEST +	depends on GENERIC_CLOCKEVENTS +	default SYS_SUPPORTS_SH_MTU2 +	help +	  This enables build of a clockevent driver for the Multi-Function +	  Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas. +	  This hardware comes with 16 bit-timer registers. + +config SH_TIMER_TMU +	bool "Renesas TMU timer driver" if COMPILE_TEST +	depends on GENERIC_CLOCKEVENTS +	default SYS_SUPPORTS_SH_TMU +	help +	  This enables build of a clocksource and clockevent driver for +	  the 32-bit Timer Unit (TMU) hardware available on a wide range +	  SoCs from Renesas. + +config EM_TIMER_STI +	bool "Renesas STI timer driver" if COMPILE_TEST +	depends on GENERIC_CLOCKEVENTS +	default SYS_SUPPORTS_EM_STI +	help +	  This enables build of a clocksource and clockevent driver for +	  the 48-bit System Timer (STI) hardware available on a SoCs +	  such as EMEV2 from former NEC Electronics. + +config CLKSRC_QCOM +	bool + +config CLKSRC_VERSATILE +	bool "ARM Versatile (Express) reference platforms clock source" +	depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET +	select CLKSRC_OF +	default y if MFD_VEXPRESS_SYSREG +	help +	  This option enables clock source based on free running +	  counter available in the "System Registers" block of +	  ARM Versatile, RealView and Versatile Express reference +	  platforms. diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index be61ece6330..800b1303c23 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -1,8 +1,43 @@ +obj-$(CONFIG_CLKSRC_OF)	+= clksrc-of.o  obj-$(CONFIG_ATMEL_TCB_CLKSRC)	+= tcb_clksrc.o -obj-$(CONFIG_X86_CYCLONE_TIMER)	+= cyclone.o  obj-$(CONFIG_X86_PM_TIMER)	+= acpi_pm.o  obj-$(CONFIG_SCx200HR_TIMER)	+= scx200_hrt.o  obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC)	+= cs5535-clockevt.o  obj-$(CONFIG_SH_TIMER_CMT)	+= sh_cmt.o  obj-$(CONFIG_SH_TIMER_MTU2)	+= sh_mtu2.o  obj-$(CONFIG_SH_TIMER_TMU)	+= sh_tmu.o +obj-$(CONFIG_EM_TIMER_STI)	+= em_sti.o +obj-$(CONFIG_CLKBLD_I8253)	+= i8253.o +obj-$(CONFIG_CLKSRC_MMIO)	+= mmio.o +obj-$(CONFIG_DW_APB_TIMER)	+= dw_apb_timer.o +obj-$(CONFIG_DW_APB_TIMER_OF)	+= dw_apb_timer_of.o +obj-$(CONFIG_CLKSRC_NOMADIK_MTU)	+= nomadik-mtu.o +obj-$(CONFIG_CLKSRC_DBX500_PRCMU)	+= clksrc-dbx500-prcmu.o +obj-$(CONFIG_ARMADA_370_XP_TIMER)	+= time-armada-370-xp.o +obj-$(CONFIG_ORION_TIMER)	+= time-orion.o +obj-$(CONFIG_ARCH_BCM2835)	+= bcm2835_timer.o +obj-$(CONFIG_ARCH_MARCO)	+= timer-marco.o +obj-$(CONFIG_ARCH_MOXART)	+= moxart_timer.o +obj-$(CONFIG_ARCH_MXS)		+= mxs_timer.o +obj-$(CONFIG_ARCH_PRIMA2)	+= timer-prima2.o +obj-$(CONFIG_ARCH_U300)		+= timer-u300.o +obj-$(CONFIG_SUN4I_TIMER)	+= sun4i_timer.o +obj-$(CONFIG_SUN5I_HSTIMER)	+= timer-sun5i.o +obj-$(CONFIG_ARCH_TEGRA)	+= tegra20_timer.o +obj-$(CONFIG_VT8500_TIMER)	+= vt8500_timer.o +obj-$(CONFIG_ARCH_NSPIRE)	+= zevio-timer.o +obj-$(CONFIG_ARCH_BCM_MOBILE)	+= bcm_kona_timer.o +obj-$(CONFIG_CADENCE_TTC_TIMER)	+= cadence_ttc_timer.o +obj-$(CONFIG_CLKSRC_EFM32)	+= time-efm32.o +obj-$(CONFIG_CLKSRC_EXYNOS_MCT)	+= exynos_mct.o +obj-$(CONFIG_CLKSRC_SAMSUNG_PWM)	+= samsung_pwm_timer.o +obj-$(CONFIG_FSL_FTM_TIMER)	+= fsl_ftm_timer.o +obj-$(CONFIG_VF_PIT_TIMER)	+= vf_pit_timer.o +obj-$(CONFIG_CLKSRC_QCOM)	+= qcom-timer.o + +obj-$(CONFIG_ARM_ARCH_TIMER)		+= arm_arch_timer.o +obj-$(CONFIG_ARM_GLOBAL_TIMER)		+= arm_global_timer.o +obj-$(CONFIG_CLKSRC_METAG_GENERIC)	+= metag_generic.o +obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST)	+= dummy_timer.o +obj-$(CONFIG_ARCH_KEYSTONE)		+= timer-keystone.o +obj-$(CONFIG_CLKSRC_VERSATILE)		+= versatile.o diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index cfb0f527841..6eab8898567 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -73,7 +73,7 @@ static struct clocksource clocksource_acpi_pm = {  #ifdef CONFIG_PCI -static int __devinitdata acpi_pm_good; +static int acpi_pm_good;  static int __init acpi_pm_good_setup(char *__str)  {  	acpi_pm_good = 1; @@ -102,7 +102,7 @@ static inline void acpi_pm_need_workaround(void)   * incorrect when read). As a result, the ACPI free running count up   * timer specification is violated due to erroneous reads.   */ -static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev) +static void acpi_pm_check_blacklist(struct pci_dev *dev)  {  	if (acpi_pm_good)  		return; @@ -120,7 +120,7 @@ static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev)  DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,  			acpi_pm_check_blacklist); -static void __devinit acpi_pm_check_graylist(struct pci_dev *dev) +static void acpi_pm_check_graylist(struct pci_dev *dev)  {  	if (acpi_pm_good)  		return; @@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,  #ifndef CONFIG_X86_64  #include <asm/mach_timer.h>  #define PMTMR_EXPECTED_RATE \ -  ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10)) +  ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (PIT_TICK_RATE>>10))  /*   * Some boards have the PMTMR running way too fast. We check   * the PMTMR rate against PIT channel 2 to catch these cases. @@ -200,19 +200,23 @@ static int __init init_acpi_pm_clocksource(void)  			if ((value2 < value1) && ((value2) < 0xFFF))  				break;  			printk(KERN_INFO "PM-Timer had inconsistent results:" -			       " 0x%#llx, 0x%#llx - aborting.\n", +			       " %#llx, %#llx - aborting.\n",  			       value1, value2); +			pmtmr_ioport = 0;  			return -EINVAL;  		}  		if (i == ACPI_PM_READ_CHECKS) {  			printk(KERN_INFO "PM-Timer failed consistency check " -			       " (0x%#llx) - aborting.\n", value1); +			       " (%#llx) - aborting.\n", value1); +			pmtmr_ioport = 0;  			return -ENODEV;  		}  	} -	if (verify_pmtmr_rate() != 0) +	if (verify_pmtmr_rate() != 0){ +		pmtmr_ioport = 0;  		return -ENODEV; +	}  	return clocksource_register_hz(&clocksource_acpi_pm,  						PMTMR_TICKS_PER_SEC); @@ -229,16 +233,15 @@ fs_initcall(init_acpi_pm_clocksource);   */  static int __init parse_pmtmr(char *arg)  { -	unsigned long base; +	unsigned int base; +	int ret; -	if (strict_strtoul(arg, 16, &base)) -		return -EINVAL; -#ifdef CONFIG_X86_64 -	if (base > UINT_MAX) -		return -ERANGE; -#endif -	printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n", -	       pmtmr_ioport, base); +	ret = kstrtouint(arg, 16, &base); +	if (ret) +		return ret; + +	pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport, +		base);  	pmtmr_ioport = base;  	return 1; diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c new file mode 100644 index 00000000000..5163ec13429 --- /dev/null +++ b/drivers/clocksource/arm_arch_timer.c @@ -0,0 +1,742 @@ +/* + *  linux/drivers/clocksource/arm_arch_timer.c + * + *  Copyright (C) 2011 ARM Ltd. + *  All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/smp.h> +#include <linux/cpu.h> +#include <linux/cpu_pm.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/sched_clock.h> + +#include <asm/arch_timer.h> +#include <asm/virt.h> + +#include <clocksource/arm_arch_timer.h> + +#define CNTTIDR		0x08 +#define CNTTIDR_VIRT(n)	(BIT(1) << ((n) * 4)) + +#define CNTVCT_LO	0x08 +#define CNTVCT_HI	0x0c +#define CNTFRQ		0x10 +#define CNTP_TVAL	0x28 +#define CNTP_CTL	0x2c +#define CNTV_TVAL	0x38 +#define CNTV_CTL	0x3c + +#define ARCH_CP15_TIMER	BIT(0) +#define ARCH_MEM_TIMER	BIT(1) +static unsigned arch_timers_present __initdata; + +static void __iomem *arch_counter_base; + +struct arch_timer { +	void __iomem *base; +	struct clock_event_device evt; +}; + +#define to_arch_timer(e) container_of(e, struct arch_timer, evt) + +static u32 arch_timer_rate; + +enum ppi_nr { +	PHYS_SECURE_PPI, +	PHYS_NONSECURE_PPI, +	VIRT_PPI, +	HYP_PPI, +	MAX_TIMER_PPI +}; + +static int arch_timer_ppi[MAX_TIMER_PPI]; + +static struct clock_event_device __percpu *arch_timer_evt; + +static bool arch_timer_use_virtual = true; +static bool arch_timer_c3stop; +static bool arch_timer_mem_use_virtual; + +/* + * Architected system timer support. + */ + +static __always_inline +void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, +			  struct clock_event_device *clk) +{ +	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { +		struct arch_timer *timer = to_arch_timer(clk); +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			writel_relaxed(val, timer->base + CNTP_CTL); +			break; +		case ARCH_TIMER_REG_TVAL: +			writel_relaxed(val, timer->base + CNTP_TVAL); +			break; +		} +	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { +		struct arch_timer *timer = to_arch_timer(clk); +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			writel_relaxed(val, timer->base + CNTV_CTL); +			break; +		case ARCH_TIMER_REG_TVAL: +			writel_relaxed(val, timer->base + CNTV_TVAL); +			break; +		} +	} else { +		arch_timer_reg_write_cp15(access, reg, val); +	} +} + +static __always_inline +u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, +			struct clock_event_device *clk) +{ +	u32 val; + +	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { +		struct arch_timer *timer = to_arch_timer(clk); +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			val = readl_relaxed(timer->base + CNTP_CTL); +			break; +		case ARCH_TIMER_REG_TVAL: +			val = readl_relaxed(timer->base + CNTP_TVAL); +			break; +		} +	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { +		struct arch_timer *timer = to_arch_timer(clk); +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			val = readl_relaxed(timer->base + CNTV_CTL); +			break; +		case ARCH_TIMER_REG_TVAL: +			val = readl_relaxed(timer->base + CNTV_TVAL); +			break; +		} +	} else { +		val = arch_timer_reg_read_cp15(access, reg); +	} + +	return val; +} + +static __always_inline irqreturn_t timer_handler(const int access, +					struct clock_event_device *evt) +{ +	unsigned long ctrl; + +	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); +	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { +		ctrl |= ARCH_TIMER_CTRL_IT_MASK; +		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); +		evt->event_handler(evt); +		return IRQ_HANDLED; +	} + +	return IRQ_NONE; +} + +static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); +} + +static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); +} + +static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); +} + +static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); +} + +static __always_inline void timer_set_mode(const int access, int mode, +				  struct clock_event_device *clk) +{ +	unsigned long ctrl; +	switch (mode) { +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); +		ctrl &= ~ARCH_TIMER_CTRL_ENABLE; +		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); +		break; +	default: +		break; +	} +} + +static void arch_timer_set_mode_virt(enum clock_event_mode mode, +				     struct clock_event_device *clk) +{ +	timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk); +} + +static void arch_timer_set_mode_phys(enum clock_event_mode mode, +				     struct clock_event_device *clk) +{ +	timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk); +} + +static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode, +					 struct clock_event_device *clk) +{ +	timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk); +} + +static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode, +					 struct clock_event_device *clk) +{ +	timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk); +} + +static __always_inline void set_next_event(const int access, unsigned long evt, +					   struct clock_event_device *clk) +{ +	unsigned long ctrl; +	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); +	ctrl |= ARCH_TIMER_CTRL_ENABLE; +	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; +	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); +	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); +} + +static int arch_timer_set_next_event_virt(unsigned long evt, +					  struct clock_event_device *clk) +{ +	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); +	return 0; +} + +static int arch_timer_set_next_event_phys(unsigned long evt, +					  struct clock_event_device *clk) +{ +	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); +	return 0; +} + +static int arch_timer_set_next_event_virt_mem(unsigned long evt, +					      struct clock_event_device *clk) +{ +	set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); +	return 0; +} + +static int arch_timer_set_next_event_phys_mem(unsigned long evt, +					      struct clock_event_device *clk) +{ +	set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); +	return 0; +} + +static void __arch_timer_setup(unsigned type, +			       struct clock_event_device *clk) +{ +	clk->features = CLOCK_EVT_FEAT_ONESHOT; + +	if (type == ARCH_CP15_TIMER) { +		if (arch_timer_c3stop) +			clk->features |= CLOCK_EVT_FEAT_C3STOP; +		clk->name = "arch_sys_timer"; +		clk->rating = 450; +		clk->cpumask = cpumask_of(smp_processor_id()); +		if (arch_timer_use_virtual) { +			clk->irq = arch_timer_ppi[VIRT_PPI]; +			clk->set_mode = arch_timer_set_mode_virt; +			clk->set_next_event = arch_timer_set_next_event_virt; +		} else { +			clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; +			clk->set_mode = arch_timer_set_mode_phys; +			clk->set_next_event = arch_timer_set_next_event_phys; +		} +	} else { +		clk->features |= CLOCK_EVT_FEAT_DYNIRQ; +		clk->name = "arch_mem_timer"; +		clk->rating = 400; +		clk->cpumask = cpu_all_mask; +		if (arch_timer_mem_use_virtual) { +			clk->set_mode = arch_timer_set_mode_virt_mem; +			clk->set_next_event = +				arch_timer_set_next_event_virt_mem; +		} else { +			clk->set_mode = arch_timer_set_mode_phys_mem; +			clk->set_next_event = +				arch_timer_set_next_event_phys_mem; +		} +	} + +	clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk); + +	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); +} + +static void arch_timer_configure_evtstream(void) +{ +	int evt_stream_div, pos; + +	/* Find the closest power of two to the divisor */ +	evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; +	pos = fls(evt_stream_div); +	if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) +		pos--; +	/* enable event stream */ +	arch_timer_evtstrm_enable(min(pos, 15)); +} + +static int arch_timer_setup(struct clock_event_device *clk) +{ +	__arch_timer_setup(ARCH_CP15_TIMER, clk); + +	if (arch_timer_use_virtual) +		enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); +	else { +		enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); +		if (arch_timer_ppi[PHYS_NONSECURE_PPI]) +			enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); +	} + +	arch_counter_set_user_access(); +	if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) +		arch_timer_configure_evtstream(); + +	return 0; +} + +static void +arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np) +{ +	/* Who has more than one independent system counter? */ +	if (arch_timer_rate) +		return; + +	/* Try to determine the frequency from the device tree or CNTFRQ */ +	if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) { +		if (cntbase) +			arch_timer_rate = readl_relaxed(cntbase + CNTFRQ); +		else +			arch_timer_rate = arch_timer_get_cntfrq(); +	} + +	/* Check the timer frequency. */ +	if (arch_timer_rate == 0) +		pr_warn("Architected timer frequency not available\n"); +} + +static void arch_timer_banner(unsigned type) +{ +	pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", +		     type & ARCH_CP15_TIMER ? "cp15" : "", +		     type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  " and " : "", +		     type & ARCH_MEM_TIMER ? "mmio" : "", +		     (unsigned long)arch_timer_rate / 1000000, +		     (unsigned long)(arch_timer_rate / 10000) % 100, +		     type & ARCH_CP15_TIMER ? +			arch_timer_use_virtual ? "virt" : "phys" : +			"", +		     type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  "/" : "", +		     type & ARCH_MEM_TIMER ? +			arch_timer_mem_use_virtual ? "virt" : "phys" : +			""); +} + +u32 arch_timer_get_rate(void) +{ +	return arch_timer_rate; +} + +static u64 arch_counter_get_cntvct_mem(void) +{ +	u32 vct_lo, vct_hi, tmp_hi; + +	do { +		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); +		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); +		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); +	} while (vct_hi != tmp_hi); + +	return ((u64) vct_hi << 32) | vct_lo; +} + +/* + * Default to cp15 based access because arm64 uses this function for + * sched_clock() before DT is probed and the cp15 method is guaranteed + * to exist on arm64. arm doesn't use this before DT is probed so even + * if we don't have the cp15 accessors we won't have a problem. + */ +u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; + +static cycle_t arch_counter_read(struct clocksource *cs) +{ +	return arch_timer_read_counter(); +} + +static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) +{ +	return arch_timer_read_counter(); +} + +static struct clocksource clocksource_counter = { +	.name	= "arch_sys_counter", +	.rating	= 400, +	.read	= arch_counter_read, +	.mask	= CLOCKSOURCE_MASK(56), +	.flags	= CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP, +}; + +static struct cyclecounter cyclecounter = { +	.read	= arch_counter_read_cc, +	.mask	= CLOCKSOURCE_MASK(56), +}; + +static struct timecounter timecounter; + +struct timecounter *arch_timer_get_timecounter(void) +{ +	return &timecounter; +} + +static void __init arch_counter_register(unsigned type) +{ +	u64 start_count; + +	/* Register the CP15 based counter if we have one */ +	if (type & ARCH_CP15_TIMER) +		arch_timer_read_counter = arch_counter_get_cntvct; +	else +		arch_timer_read_counter = arch_counter_get_cntvct_mem; + +	start_count = arch_timer_read_counter(); +	clocksource_register_hz(&clocksource_counter, arch_timer_rate); +	cyclecounter.mult = clocksource_counter.mult; +	cyclecounter.shift = clocksource_counter.shift; +	timecounter_init(&timecounter, &cyclecounter, start_count); + +	/* 56 bits minimum, so we assume worst case rollover */ +	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); +} + +static void arch_timer_stop(struct clock_event_device *clk) +{ +	pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", +		 clk->irq, smp_processor_id()); + +	if (arch_timer_use_virtual) +		disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); +	else { +		disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); +		if (arch_timer_ppi[PHYS_NONSECURE_PPI]) +			disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); +	} + +	clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); +} + +static int arch_timer_cpu_notify(struct notifier_block *self, +					   unsigned long action, void *hcpu) +{ +	/* +	 * Grab cpu pointer in each case to avoid spurious +	 * preemptible warnings +	 */ +	switch (action & ~CPU_TASKS_FROZEN) { +	case CPU_STARTING: +		arch_timer_setup(this_cpu_ptr(arch_timer_evt)); +		break; +	case CPU_DYING: +		arch_timer_stop(this_cpu_ptr(arch_timer_evt)); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block arch_timer_cpu_nb = { +	.notifier_call = arch_timer_cpu_notify, +}; + +#ifdef CONFIG_CPU_PM +static unsigned int saved_cntkctl; +static int arch_timer_cpu_pm_notify(struct notifier_block *self, +				    unsigned long action, void *hcpu) +{ +	if (action == CPU_PM_ENTER) +		saved_cntkctl = arch_timer_get_cntkctl(); +	else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) +		arch_timer_set_cntkctl(saved_cntkctl); +	return NOTIFY_OK; +} + +static struct notifier_block arch_timer_cpu_pm_notifier = { +	.notifier_call = arch_timer_cpu_pm_notify, +}; + +static int __init arch_timer_cpu_pm_init(void) +{ +	return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); +} +#else +static int __init arch_timer_cpu_pm_init(void) +{ +	return 0; +} +#endif + +static int __init arch_timer_register(void) +{ +	int err; +	int ppi; + +	arch_timer_evt = alloc_percpu(struct clock_event_device); +	if (!arch_timer_evt) { +		err = -ENOMEM; +		goto out; +	} + +	if (arch_timer_use_virtual) { +		ppi = arch_timer_ppi[VIRT_PPI]; +		err = request_percpu_irq(ppi, arch_timer_handler_virt, +					 "arch_timer", arch_timer_evt); +	} else { +		ppi = arch_timer_ppi[PHYS_SECURE_PPI]; +		err = request_percpu_irq(ppi, arch_timer_handler_phys, +					 "arch_timer", arch_timer_evt); +		if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { +			ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; +			err = request_percpu_irq(ppi, arch_timer_handler_phys, +						 "arch_timer", arch_timer_evt); +			if (err) +				free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], +						arch_timer_evt); +		} +	} + +	if (err) { +		pr_err("arch_timer: can't register interrupt %d (%d)\n", +		       ppi, err); +		goto out_free; +	} + +	err = register_cpu_notifier(&arch_timer_cpu_nb); +	if (err) +		goto out_free_irq; + +	err = arch_timer_cpu_pm_init(); +	if (err) +		goto out_unreg_notify; + +	/* Immediately configure the timer on the boot CPU */ +	arch_timer_setup(this_cpu_ptr(arch_timer_evt)); + +	return 0; + +out_unreg_notify: +	unregister_cpu_notifier(&arch_timer_cpu_nb); +out_free_irq: +	if (arch_timer_use_virtual) +		free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); +	else { +		free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], +				arch_timer_evt); +		if (arch_timer_ppi[PHYS_NONSECURE_PPI]) +			free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], +					arch_timer_evt); +	} + +out_free: +	free_percpu(arch_timer_evt); +out: +	return err; +} + +static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) +{ +	int ret; +	irq_handler_t func; +	struct arch_timer *t; + +	t = kzalloc(sizeof(*t), GFP_KERNEL); +	if (!t) +		return -ENOMEM; + +	t->base = base; +	t->evt.irq = irq; +	__arch_timer_setup(ARCH_MEM_TIMER, &t->evt); + +	if (arch_timer_mem_use_virtual) +		func = arch_timer_handler_virt_mem; +	else +		func = arch_timer_handler_phys_mem; + +	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); +	if (ret) { +		pr_err("arch_timer: Failed to request mem timer irq\n"); +		kfree(t); +	} + +	return ret; +} + +static const struct of_device_id arch_timer_of_match[] __initconst = { +	{ .compatible   = "arm,armv7-timer",    }, +	{ .compatible   = "arm,armv8-timer",    }, +	{}, +}; + +static const struct of_device_id arch_timer_mem_of_match[] __initconst = { +	{ .compatible   = "arm,armv7-timer-mem", }, +	{}, +}; + +static void __init arch_timer_common_init(void) +{ +	unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; + +	/* Wait until both nodes are probed if we have two timers */ +	if ((arch_timers_present & mask) != mask) { +		if (of_find_matching_node(NULL, arch_timer_mem_of_match) && +				!(arch_timers_present & ARCH_MEM_TIMER)) +			return; +		if (of_find_matching_node(NULL, arch_timer_of_match) && +				!(arch_timers_present & ARCH_CP15_TIMER)) +			return; +	} + +	arch_timer_banner(arch_timers_present); +	arch_counter_register(arch_timers_present); +	arch_timer_arch_init(); +} + +static void __init arch_timer_init(struct device_node *np) +{ +	int i; + +	if (arch_timers_present & ARCH_CP15_TIMER) { +		pr_warn("arch_timer: multiple nodes in dt, skipping\n"); +		return; +	} + +	arch_timers_present |= ARCH_CP15_TIMER; +	for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) +		arch_timer_ppi[i] = irq_of_parse_and_map(np, i); +	arch_timer_detect_rate(NULL, np); + +	/* +	 * If HYP mode is available, we know that the physical timer +	 * has been configured to be accessible from PL1. Use it, so +	 * that a guest can use the virtual timer instead. +	 * +	 * If no interrupt provided for virtual timer, we'll have to +	 * stick to the physical timer. It'd better be accessible... +	 */ +	if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) { +		arch_timer_use_virtual = false; + +		if (!arch_timer_ppi[PHYS_SECURE_PPI] || +		    !arch_timer_ppi[PHYS_NONSECURE_PPI]) { +			pr_warn("arch_timer: No interrupt available, giving up\n"); +			return; +		} +	} + +	arch_timer_c3stop = !of_property_read_bool(np, "always-on"); + +	arch_timer_register(); +	arch_timer_common_init(); +} +CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init); +CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init); + +static void __init arch_timer_mem_init(struct device_node *np) +{ +	struct device_node *frame, *best_frame = NULL; +	void __iomem *cntctlbase, *base; +	unsigned int irq; +	u32 cnttidr; + +	arch_timers_present |= ARCH_MEM_TIMER; +	cntctlbase = of_iomap(np, 0); +	if (!cntctlbase) { +		pr_err("arch_timer: Can't find CNTCTLBase\n"); +		return; +	} + +	cnttidr = readl_relaxed(cntctlbase + CNTTIDR); +	iounmap(cntctlbase); + +	/* +	 * Try to find a virtual capable frame. Otherwise fall back to a +	 * physical capable frame. +	 */ +	for_each_available_child_of_node(np, frame) { +		int n; + +		if (of_property_read_u32(frame, "frame-number", &n)) { +			pr_err("arch_timer: Missing frame-number\n"); +			of_node_put(best_frame); +			of_node_put(frame); +			return; +		} + +		if (cnttidr & CNTTIDR_VIRT(n)) { +			of_node_put(best_frame); +			best_frame = frame; +			arch_timer_mem_use_virtual = true; +			break; +		} +		of_node_put(best_frame); +		best_frame = of_node_get(frame); +	} + +	base = arch_counter_base = of_iomap(best_frame, 0); +	if (!base) { +		pr_err("arch_timer: Can't map frame's registers\n"); +		of_node_put(best_frame); +		return; +	} + +	if (arch_timer_mem_use_virtual) +		irq = irq_of_parse_and_map(best_frame, 1); +	else +		irq = irq_of_parse_and_map(best_frame, 0); +	of_node_put(best_frame); +	if (!irq) { +		pr_err("arch_timer: Frame missing %s irq", +		       arch_timer_mem_use_virtual ? "virt" : "phys"); +		return; +	} + +	arch_timer_detect_rate(base, np); +	arch_timer_mem_register(base, irq); +	arch_timer_common_init(); +} +CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", +		       arch_timer_mem_init); diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c new file mode 100644 index 00000000000..60e5a170c4d --- /dev/null +++ b/drivers/clocksource/arm_global_timer.c @@ -0,0 +1,323 @@ +/* + * drivers/clocksource/arm_global_timer.c + * + * Copyright (C) 2013 STMicroelectronics (R&D) Limited. + * Author: Stuart Menefy <stuart.menefy@st.com> + * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/sched_clock.h> + +#include <asm/cputype.h> + +#define GT_COUNTER0	0x00 +#define GT_COUNTER1	0x04 + +#define GT_CONTROL	0x08 +#define GT_CONTROL_TIMER_ENABLE		BIT(0)  /* this bit is NOT banked */ +#define GT_CONTROL_COMP_ENABLE		BIT(1)	/* banked */ +#define GT_CONTROL_IRQ_ENABLE		BIT(2)	/* banked */ +#define GT_CONTROL_AUTO_INC		BIT(3)	/* banked */ + +#define GT_INT_STATUS	0x0c +#define GT_INT_STATUS_EVENT_FLAG	BIT(0) + +#define GT_COMP0	0x10 +#define GT_COMP1	0x14 +#define GT_AUTO_INC	0x18 + +/* + * We are expecting to be clocked by the ARM peripheral clock. + * + * Note: it is assumed we are using a prescaler value of zero, so this is + * the units for all operations. + */ +static void __iomem *gt_base; +static unsigned long gt_clk_rate; +static int gt_ppi; +static struct clock_event_device __percpu *gt_evt; + +/* + * To get the value from the Global Timer Counter register proceed as follows: + * 1. Read the upper 32-bit timer counter register + * 2. Read the lower 32-bit timer counter register + * 3. Read the upper 32-bit timer counter register again. If the value is + *  different to the 32-bit upper value read previously, go back to step 2. + *  Otherwise the 64-bit timer counter value is correct. + */ +static u64 gt_counter_read(void) +{ +	u64 counter; +	u32 lower; +	u32 upper, old_upper; + +	upper = readl_relaxed(gt_base + GT_COUNTER1); +	do { +		old_upper = upper; +		lower = readl_relaxed(gt_base + GT_COUNTER0); +		upper = readl_relaxed(gt_base + GT_COUNTER1); +	} while (upper != old_upper); + +	counter = upper; +	counter <<= 32; +	counter |= lower; +	return counter; +} + +/** + * To ensure that updates to comparator value register do not set the + * Interrupt Status Register proceed as follows: + * 1. Clear the Comp Enable bit in the Timer Control Register. + * 2. Write the lower 32-bit Comparator Value Register. + * 3. Write the upper 32-bit Comparator Value Register. + * 4. Set the Comp Enable bit and, if necessary, the IRQ enable bit. + */ +static void gt_compare_set(unsigned long delta, int periodic) +{ +	u64 counter = gt_counter_read(); +	unsigned long ctrl; + +	counter += delta; +	ctrl = GT_CONTROL_TIMER_ENABLE; +	writel(ctrl, gt_base + GT_CONTROL); +	writel(lower_32_bits(counter), gt_base + GT_COMP0); +	writel(upper_32_bits(counter), gt_base + GT_COMP1); + +	if (periodic) { +		writel(delta, gt_base + GT_AUTO_INC); +		ctrl |= GT_CONTROL_AUTO_INC; +	} + +	ctrl |= GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE; +	writel(ctrl, gt_base + GT_CONTROL); +} + +static void gt_clockevent_set_mode(enum clock_event_mode mode, +				   struct clock_event_device *clk) +{ +	unsigned long ctrl; + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		ctrl = readl(gt_base + GT_CONTROL); +		ctrl &= ~(GT_CONTROL_COMP_ENABLE | +				GT_CONTROL_IRQ_ENABLE | GT_CONTROL_AUTO_INC); +		writel(ctrl, gt_base + GT_CONTROL); +		break; +	default: +		break; +	} +} + +static int gt_clockevent_set_next_event(unsigned long evt, +					struct clock_event_device *unused) +{ +	gt_compare_set(evt, 0); +	return 0; +} + +static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	if (!(readl_relaxed(gt_base + GT_INT_STATUS) & +				GT_INT_STATUS_EVENT_FLAG)) +		return IRQ_NONE; + +	/** +	 * ERRATA 740657( Global Timer can send 2 interrupts for +	 * the same event in single-shot mode) +	 * Workaround: +	 *	Either disable single-shot mode. +	 *	Or +	 *	Modify the Interrupt Handler to avoid the +	 *	offending sequence. This is achieved by clearing +	 *	the Global Timer flag _after_ having incremented +	 *	the Comparator register	value to a higher value. +	 */ +	if (evt->mode == CLOCK_EVT_MODE_ONESHOT) +		gt_compare_set(ULONG_MAX, 0); + +	writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS); +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static int gt_clockevents_init(struct clock_event_device *clk) +{ +	int cpu = smp_processor_id(); + +	clk->name = "arm_global_timer"; +	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | +		CLOCK_EVT_FEAT_PERCPU; +	clk->set_mode = gt_clockevent_set_mode; +	clk->set_next_event = gt_clockevent_set_next_event; +	clk->cpumask = cpumask_of(cpu); +	clk->rating = 300; +	clk->irq = gt_ppi; +	clockevents_config_and_register(clk, gt_clk_rate, +					1, 0xffffffff); +	enable_percpu_irq(clk->irq, IRQ_TYPE_NONE); +	return 0; +} + +static void gt_clockevents_stop(struct clock_event_device *clk) +{ +	gt_clockevent_set_mode(CLOCK_EVT_MODE_UNUSED, clk); +	disable_percpu_irq(clk->irq); +} + +static cycle_t gt_clocksource_read(struct clocksource *cs) +{ +	return gt_counter_read(); +} + +static struct clocksource gt_clocksource = { +	.name	= "arm_global_timer", +	.rating	= 300, +	.read	= gt_clocksource_read, +	.mask	= CLOCKSOURCE_MASK(64), +	.flags	= CLOCK_SOURCE_IS_CONTINUOUS, +}; + +#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK +static u64 notrace gt_sched_clock_read(void) +{ +	return gt_counter_read(); +} +#endif + +static void __init gt_clocksource_init(void) +{ +	writel(0, gt_base + GT_CONTROL); +	writel(0, gt_base + GT_COUNTER0); +	writel(0, gt_base + GT_COUNTER1); +	/* enables timer on all the cores */ +	writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL); + +#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK +	sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate); +#endif +	clocksource_register_hz(>_clocksource, gt_clk_rate); +} + +static int gt_cpu_notify(struct notifier_block *self, unsigned long action, +			 void *hcpu) +{ +	switch (action & ~CPU_TASKS_FROZEN) { +	case CPU_STARTING: +		gt_clockevents_init(this_cpu_ptr(gt_evt)); +		break; +	case CPU_DYING: +		gt_clockevents_stop(this_cpu_ptr(gt_evt)); +		break; +	} + +	return NOTIFY_OK; +} +static struct notifier_block gt_cpu_nb = { +	.notifier_call = gt_cpu_notify, +}; + +static void __init global_timer_of_register(struct device_node *np) +{ +	struct clk *gt_clk; +	int err = 0; + +	/* +	 * In A9 r2p0 the comparators for each processor with the global timer +	 * fire when the timer value is greater than or equal to. In previous +	 * revisions the comparators fired when the timer value was equal to. +	 */ +	if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9 +	    && (read_cpuid_id() & 0xf0000f) < 0x200000) { +		pr_warn("global-timer: non support for this cpu version.\n"); +		return; +	} + +	gt_ppi = irq_of_parse_and_map(np, 0); +	if (!gt_ppi) { +		pr_warn("global-timer: unable to parse irq\n"); +		return; +	} + +	gt_base = of_iomap(np, 0); +	if (!gt_base) { +		pr_warn("global-timer: invalid base address\n"); +		return; +	} + +	gt_clk = of_clk_get(np, 0); +	if (!IS_ERR(gt_clk)) { +		err = clk_prepare_enable(gt_clk); +		if (err) +			goto out_unmap; +	} else { +		pr_warn("global-timer: clk not found\n"); +		err = -EINVAL; +		goto out_unmap; +	} + +	gt_clk_rate = clk_get_rate(gt_clk); +	gt_evt = alloc_percpu(struct clock_event_device); +	if (!gt_evt) { +		pr_warn("global-timer: can't allocate memory\n"); +		err = -ENOMEM; +		goto out_clk; +	} + +	err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt, +				 "gt", gt_evt); +	if (err) { +		pr_warn("global-timer: can't register interrupt %d (%d)\n", +			gt_ppi, err); +		goto out_free; +	} + +	err = register_cpu_notifier(>_cpu_nb); +	if (err) { +		pr_warn("global-timer: unable to register cpu notifier.\n"); +		goto out_irq; +	} + +	/* Immediately configure the timer on the boot CPU */ +	gt_clocksource_init(); +	gt_clockevents_init(this_cpu_ptr(gt_evt)); + +	return; + +out_irq: +	free_percpu_irq(gt_ppi, gt_evt); +out_free: +	free_percpu(gt_evt); +out_clk: +	clk_disable_unprepare(gt_clk); +out_unmap: +	iounmap(gt_base); +	WARN(err, "ARM Global timer register failed (%d)\n", err); +} + +/* Only tested on r2p2 and r3p0  */ +CLOCKSOURCE_OF_DECLARE(arm_gt, "arm,cortex-a9-global-timer", +			global_timer_of_register); diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c new file mode 100644 index 00000000000..26ed331b1aa --- /dev/null +++ b/drivers/clocksource/bcm2835_timer.c @@ -0,0 +1,148 @@ +/* + * Copyright 2012 Simon Arlott + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA + */ + +#include <linux/bitops.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/interrupt.h> +#include <linux/irqreturn.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/sched_clock.h> + +#include <asm/irq.h> + +#define REG_CONTROL	0x00 +#define REG_COUNTER_LO	0x04 +#define REG_COUNTER_HI	0x08 +#define REG_COMPARE(n)	(0x0c + (n) * 4) +#define MAX_TIMER	3 +#define DEFAULT_TIMER	3 + +struct bcm2835_timer { +	void __iomem *control; +	void __iomem *compare; +	int match_mask; +	struct clock_event_device evt; +	struct irqaction act; +}; + +static void __iomem *system_clock __read_mostly; + +static u64 notrace bcm2835_sched_read(void) +{ +	return readl_relaxed(system_clock); +} + +static void bcm2835_time_set_mode(enum clock_event_mode mode, +	struct clock_event_device *evt_dev) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_ONESHOT: +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_RESUME: +		break; +	default: +		WARN(1, "%s: unhandled event mode %d\n", __func__, mode); +		break; +	} +} + +static int bcm2835_time_set_next_event(unsigned long event, +	struct clock_event_device *evt_dev) +{ +	struct bcm2835_timer *timer = container_of(evt_dev, +		struct bcm2835_timer, evt); +	writel_relaxed(readl_relaxed(system_clock) + event, +		timer->compare); +	return 0; +} + +static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id) +{ +	struct bcm2835_timer *timer = dev_id; +	void (*event_handler)(struct clock_event_device *); +	if (readl_relaxed(timer->control) & timer->match_mask) { +		writel_relaxed(timer->match_mask, timer->control); + +		event_handler = ACCESS_ONCE(timer->evt.event_handler); +		if (event_handler) +			event_handler(&timer->evt); +		return IRQ_HANDLED; +	} else { +		return IRQ_NONE; +	} +} + +static void __init bcm2835_timer_init(struct device_node *node) +{ +	void __iomem *base; +	u32 freq; +	int irq; +	struct bcm2835_timer *timer; + +	base = of_iomap(node, 0); +	if (!base) +		panic("Can't remap registers"); + +	if (of_property_read_u32(node, "clock-frequency", &freq)) +		panic("Can't read clock-frequency"); + +	system_clock = base + REG_COUNTER_LO; +	sched_clock_register(bcm2835_sched_read, 32, freq); + +	clocksource_mmio_init(base + REG_COUNTER_LO, node->name, +		freq, 300, 32, clocksource_mmio_readl_up); + +	irq = irq_of_parse_and_map(node, DEFAULT_TIMER); +	if (irq <= 0) +		panic("Can't parse IRQ"); + +	timer = kzalloc(sizeof(*timer), GFP_KERNEL); +	if (!timer) +		panic("Can't allocate timer struct\n"); + +	timer->control = base + REG_CONTROL; +	timer->compare = base + REG_COMPARE(DEFAULT_TIMER); +	timer->match_mask = BIT(DEFAULT_TIMER); +	timer->evt.name = node->name; +	timer->evt.rating = 300; +	timer->evt.features = CLOCK_EVT_FEAT_ONESHOT; +	timer->evt.set_mode = bcm2835_time_set_mode; +	timer->evt.set_next_event = bcm2835_time_set_next_event; +	timer->evt.cpumask = cpumask_of(0); +	timer->act.name = node->name; +	timer->act.flags = IRQF_TIMER | IRQF_SHARED; +	timer->act.dev_id = timer; +	timer->act.handler = bcm2835_time_interrupt; + +	if (setup_irq(irq, &timer->act)) +		panic("Can't set up timer IRQ\n"); + +	clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff); + +	pr_info("bcm2835: system timer (irq = %d)\n", irq); +} +CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer", +			bcm2835_timer_init); diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c new file mode 100644 index 00000000000..0595dc6c453 --- /dev/null +++ b/drivers/clocksource/bcm_kona_timer.c @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2012 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/clockchips.h> +#include <linux/types.h> +#include <linux/clk.h> + +#include <linux/io.h> +#include <asm/mach/time.h> + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + + +#define KONA_GPTIMER_STCS_OFFSET			0x00000000 +#define KONA_GPTIMER_STCLO_OFFSET			0x00000004 +#define KONA_GPTIMER_STCHI_OFFSET			0x00000008 +#define KONA_GPTIMER_STCM0_OFFSET			0x0000000C + +#define KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT		0 +#define KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT		4 + +struct kona_bcm_timers { +	int tmr_irq; +	void __iomem *tmr_regs; +}; + +static struct kona_bcm_timers timers; + +static u32 arch_timer_rate; + +/* + * We use the peripheral timers for system tick, the cpu global timer for + * profile tick + */ +static void kona_timer_disable_and_clear(void __iomem *base) +{ +	uint32_t reg; + +	/* +	 * clear and disable interrupts +	 * We are using compare/match register 0 for our system interrupts +	 */ +	reg = readl(base + KONA_GPTIMER_STCS_OFFSET); + +	/* Clear compare (0) interrupt */ +	reg |= 1 << KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT; +	/* disable compare */ +	reg &= ~(1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT); + +	writel(reg, base + KONA_GPTIMER_STCS_OFFSET); + +} + +static void +kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) +{ +	void __iomem *base = IOMEM(timer_base); +	int loop_limit = 4; + +	/* +	 * Read 64-bit free running counter +	 * 1. Read hi-word +	 * 2. Read low-word +	 * 3. Read hi-word again +	 * 4.1 +	 *      if new hi-word is not equal to previously read hi-word, then +	 *      start from #1 +	 * 4.2 +	 *      if new hi-word is equal to previously read hi-word then stop. +	 */ + +	while (--loop_limit) { +		*msw = readl(base + KONA_GPTIMER_STCHI_OFFSET); +		*lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET); +		if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET)) +			break; +	} +	if (!loop_limit) { +		pr_err("bcm_kona_timer: getting counter failed.\n"); +		pr_err(" Timer will be impacted\n"); +	} + +	return; +} + +static int kona_timer_set_next_event(unsigned long clc, +				  struct clock_event_device *unused) +{ +	/* +	 * timer (0) is disabled by the timer interrupt already +	 * so, here we reload the next event value and re-enable +	 * the timer. +	 * +	 * This way, we are potentially losing the time between +	 * timer-interrupt->set_next_event. CPU local timers, when +	 * they come in should get rid of skew. +	 */ + +	uint32_t lsw, msw; +	uint32_t reg; + +	kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); + +	/* Load the "next" event tick value */ +	writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); + +	/* Enable compare */ +	reg = readl(timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET); +	reg |= (1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT); +	writel(reg, timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET); + +	return 0; +} + +static void kona_timer_set_mode(enum clock_event_mode mode, +			     struct clock_event_device *unused) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_ONESHOT: +		/* by default mode is one shot don't do any thing */ +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	default: +		kona_timer_disable_and_clear(timers.tmr_regs); +	} +} + +static struct clock_event_device kona_clockevent_timer = { +	.name = "timer 1", +	.features = CLOCK_EVT_FEAT_ONESHOT, +	.set_next_event = kona_timer_set_next_event, +	.set_mode = kona_timer_set_mode +}; + +static void __init kona_timer_clockevents_init(void) +{ +	kona_clockevent_timer.cpumask = cpumask_of(0); +	clockevents_config_and_register(&kona_clockevent_timer, +		arch_timer_rate, 6, 0xffffffff); +} + +static irqreturn_t kona_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = &kona_clockevent_timer; + +	kona_timer_disable_and_clear(timers.tmr_regs); +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static struct irqaction kona_timer_irq = { +	.name = "Kona Timer Tick", +	.flags = IRQF_TIMER, +	.handler = kona_timer_interrupt, +}; + +static void __init kona_timer_init(struct device_node *node) +{ +	u32 freq; +	struct clk *external_clk; + +	if (!of_device_is_available(node)) { +		pr_info("Kona Timer v1 marked as disabled in device tree\n"); +		return; +	} + +	external_clk = of_clk_get_by_name(node, NULL); + +	if (!IS_ERR(external_clk)) { +		arch_timer_rate = clk_get_rate(external_clk); +		clk_prepare_enable(external_clk); +	} else if (!of_property_read_u32(node, "clock-frequency", &freq)) { +		arch_timer_rate = freq; +	} else { +		pr_err("Kona Timer v1 unable to determine clock-frequency"); +		return; +	} + +	/* Setup IRQ numbers */ +	timers.tmr_irq = irq_of_parse_and_map(node, 0); + +	/* Setup IO addresses */ +	timers.tmr_regs = of_iomap(node, 0); + +	kona_timer_disable_and_clear(timers.tmr_regs); + +	kona_timer_clockevents_init(); +	setup_irq(timers.tmr_irq, &kona_timer_irq); +	kona_timer_set_next_event((arch_timer_rate / HZ), NULL); +} + +CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init); +/* + * bcm,kona-timer is deprecated by brcm,kona-timer + * being kept here for driver compatibility + */ +CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer", kona_timer_init); diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c new file mode 100644 index 00000000000..7a08811df9a --- /dev/null +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -0,0 +1,515 @@ +/* + * This file contains driver for the Cadence Triple Timer Counter Rev 06 + * + *  Copyright (C) 2011-2013 Xilinx + * + * based on arch/mips/kernel/time.c timer driver + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/interrupt.h> +#include <linux/clockchips.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/slab.h> +#include <linux/sched_clock.h> + +/* + * This driver configures the 2 16-bit count-up timers as follows: + * + * T1: Timer 1, clocksource for generic timekeeping + * T2: Timer 2, clockevent source for hrtimers + * T3: Timer 3, <unused> + * + * The input frequency to the timer module for emulation is 2.5MHz which is + * common to all the timer channels (T1, T2, and T3). With a pre-scaler of 32, + * the timers are clocked at 78.125KHz (12.8 us resolution). + + * The input frequency to the timer module in silicon is configurable and + * obtained from device tree. The pre-scaler of 32 is used. + */ + +/* + * Timer Register Offset Definitions of Timer 1, Increment base address by 4 + * and use same offsets for Timer 2 + */ +#define TTC_CLK_CNTRL_OFFSET		0x00 /* Clock Control Reg, RW */ +#define TTC_CNT_CNTRL_OFFSET		0x0C /* Counter Control Reg, RW */ +#define TTC_COUNT_VAL_OFFSET		0x18 /* Counter Value Reg, RO */ +#define TTC_INTR_VAL_OFFSET		0x24 /* Interval Count Reg, RW */ +#define TTC_ISR_OFFSET		0x54 /* Interrupt Status Reg, RO */ +#define TTC_IER_OFFSET		0x60 /* Interrupt Enable Reg, RW */ + +#define TTC_CNT_CNTRL_DISABLE_MASK	0x1 + +#define TTC_CLK_CNTRL_CSRC_MASK		(1 << 5)	/* clock source */ +#define TTC_CLK_CNTRL_PSV_MASK		0x1e +#define TTC_CLK_CNTRL_PSV_SHIFT		1 + +/* + * Setup the timers to use pre-scaling, using a fixed value for now that will + * work across most input frequency, but it may need to be more dynamic + */ +#define PRESCALE_EXPONENT	11	/* 2 ^ PRESCALE_EXPONENT = PRESCALE */ +#define PRESCALE		2048	/* The exponent must match this */ +#define CLK_CNTRL_PRESCALE	((PRESCALE_EXPONENT - 1) << 1) +#define CLK_CNTRL_PRESCALE_EN	1 +#define CNT_CNTRL_RESET		(1 << 4) + +#define MAX_F_ERR 50 + +/** + * struct ttc_timer - This definition defines local timer structure + * + * @base_addr:	Base address of timer + * @freq:	Timer input clock frequency + * @clk:	Associated clock source + * @clk_rate_change_nb	Notifier block for clock rate changes + */ +struct ttc_timer { +	void __iomem *base_addr; +	unsigned long freq; +	struct clk *clk; +	struct notifier_block clk_rate_change_nb; +}; + +#define to_ttc_timer(x) \ +		container_of(x, struct ttc_timer, clk_rate_change_nb) + +struct ttc_timer_clocksource { +	u32			scale_clk_ctrl_reg_old; +	u32			scale_clk_ctrl_reg_new; +	struct ttc_timer	ttc; +	struct clocksource	cs; +}; + +#define to_ttc_timer_clksrc(x) \ +		container_of(x, struct ttc_timer_clocksource, cs) + +struct ttc_timer_clockevent { +	struct ttc_timer		ttc; +	struct clock_event_device	ce; +}; + +#define to_ttc_timer_clkevent(x) \ +		container_of(x, struct ttc_timer_clockevent, ce) + +static void __iomem *ttc_sched_clock_val_reg; + +/** + * ttc_set_interval - Set the timer interval value + * + * @timer:	Pointer to the timer instance + * @cycles:	Timer interval ticks + **/ +static void ttc_set_interval(struct ttc_timer *timer, +					unsigned long cycles) +{ +	u32 ctrl_reg; + +	/* Disable the counter, set the counter value  and re-enable counter */ +	ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); +	ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; +	writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); + +	writel_relaxed(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET); + +	/* +	 * Reset the counter (0x10) so that it starts from 0, one-shot +	 * mode makes this needed for timing to be right. +	 */ +	ctrl_reg |= CNT_CNTRL_RESET; +	ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; +	writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); +} + +/** + * ttc_clock_event_interrupt - Clock event timer interrupt handler + * + * @irq:	IRQ number of the Timer + * @dev_id:	void pointer to the ttc_timer instance + * + * returns: Always IRQ_HANDLED - success + **/ +static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id) +{ +	struct ttc_timer_clockevent *ttce = dev_id; +	struct ttc_timer *timer = &ttce->ttc; + +	/* Acknowledge the interrupt and call event handler */ +	readl_relaxed(timer->base_addr + TTC_ISR_OFFSET); + +	ttce->ce.event_handler(&ttce->ce); + +	return IRQ_HANDLED; +} + +/** + * __ttc_clocksource_read - Reads the timer counter register + * + * returns: Current timer counter register value + **/ +static cycle_t __ttc_clocksource_read(struct clocksource *cs) +{ +	struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; + +	return (cycle_t)readl_relaxed(timer->base_addr + +				TTC_COUNT_VAL_OFFSET); +} + +static u64 notrace ttc_sched_clock_read(void) +{ +	return readl_relaxed(ttc_sched_clock_val_reg); +} + +/** + * ttc_set_next_event - Sets the time interval for next event + * + * @cycles:	Timer interval ticks + * @evt:	Address of clock event instance + * + * returns: Always 0 - success + **/ +static int ttc_set_next_event(unsigned long cycles, +					struct clock_event_device *evt) +{ +	struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); +	struct ttc_timer *timer = &ttce->ttc; + +	ttc_set_interval(timer, cycles); +	return 0; +} + +/** + * ttc_set_mode - Sets the mode of timer + * + * @mode:	Mode to be set + * @evt:	Address of clock event instance + **/ +static void ttc_set_mode(enum clock_event_mode mode, +					struct clock_event_device *evt) +{ +	struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt); +	struct ttc_timer *timer = &ttce->ttc; +	u32 ctrl_reg; + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq, +						PRESCALE * HZ)); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		ctrl_reg = readl_relaxed(timer->base_addr + +					TTC_CNT_CNTRL_OFFSET); +		ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; +		writel_relaxed(ctrl_reg, +				timer->base_addr + TTC_CNT_CNTRL_OFFSET); +		break; +	case CLOCK_EVT_MODE_RESUME: +		ctrl_reg = readl_relaxed(timer->base_addr + +					TTC_CNT_CNTRL_OFFSET); +		ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; +		writel_relaxed(ctrl_reg, +				timer->base_addr + TTC_CNT_CNTRL_OFFSET); +		break; +	} +} + +static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, +		unsigned long event, void *data) +{ +	struct clk_notifier_data *ndata = data; +	struct ttc_timer *ttc = to_ttc_timer(nb); +	struct ttc_timer_clocksource *ttccs = container_of(ttc, +			struct ttc_timer_clocksource, ttc); + +	switch (event) { +	case PRE_RATE_CHANGE: +	{ +		u32 psv; +		unsigned long factor, rate_low, rate_high; + +		if (ndata->new_rate > ndata->old_rate) { +			factor = DIV_ROUND_CLOSEST(ndata->new_rate, +					ndata->old_rate); +			rate_low = ndata->old_rate; +			rate_high = ndata->new_rate; +		} else { +			factor = DIV_ROUND_CLOSEST(ndata->old_rate, +					ndata->new_rate); +			rate_low = ndata->new_rate; +			rate_high = ndata->old_rate; +		} + +		if (!is_power_of_2(factor)) +				return NOTIFY_BAD; + +		if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR) +			return NOTIFY_BAD; + +		factor = __ilog2_u32(factor); + +		/* +		 * store timer clock ctrl register so we can restore it in case +		 * of an abort. +		 */ +		ttccs->scale_clk_ctrl_reg_old = +			readl_relaxed(ttccs->ttc.base_addr + +			TTC_CLK_CNTRL_OFFSET); + +		psv = (ttccs->scale_clk_ctrl_reg_old & +				TTC_CLK_CNTRL_PSV_MASK) >> +				TTC_CLK_CNTRL_PSV_SHIFT; +		if (ndata->new_rate < ndata->old_rate) +			psv -= factor; +		else +			psv += factor; + +		/* prescaler within legal range? */ +		if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT)) +			return NOTIFY_BAD; + +		ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old & +			~TTC_CLK_CNTRL_PSV_MASK; +		ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT; + + +		/* scale down: adjust divider in post-change notification */ +		if (ndata->new_rate < ndata->old_rate) +			return NOTIFY_DONE; + +		/* scale up: adjust divider now - before frequency change */ +		writel_relaxed(ttccs->scale_clk_ctrl_reg_new, +			       ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); +		break; +	} +	case POST_RATE_CHANGE: +		/* scale up: pre-change notification did the adjustment */ +		if (ndata->new_rate > ndata->old_rate) +			return NOTIFY_OK; + +		/* scale down: adjust divider now - after frequency change */ +		writel_relaxed(ttccs->scale_clk_ctrl_reg_new, +			       ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); +		break; + +	case ABORT_RATE_CHANGE: +		/* we have to undo the adjustment in case we scale up */ +		if (ndata->new_rate < ndata->old_rate) +			return NOTIFY_OK; + +		/* restore original register value */ +		writel_relaxed(ttccs->scale_clk_ctrl_reg_old, +			       ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); +		/* fall through */ +	default: +		return NOTIFY_DONE; +	} + +	return NOTIFY_DONE; +} + +static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) +{ +	struct ttc_timer_clocksource *ttccs; +	int err; + +	ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL); +	if (WARN_ON(!ttccs)) +		return; + +	ttccs->ttc.clk = clk; + +	err = clk_prepare_enable(ttccs->ttc.clk); +	if (WARN_ON(err)) { +		kfree(ttccs); +		return; +	} + +	ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk); + +	ttccs->ttc.clk_rate_change_nb.notifier_call = +		ttc_rate_change_clocksource_cb; +	ttccs->ttc.clk_rate_change_nb.next = NULL; +	if (clk_notifier_register(ttccs->ttc.clk, +				&ttccs->ttc.clk_rate_change_nb)) +		pr_warn("Unable to register clock notifier.\n"); + +	ttccs->ttc.base_addr = base; +	ttccs->cs.name = "ttc_clocksource"; +	ttccs->cs.rating = 200; +	ttccs->cs.read = __ttc_clocksource_read; +	ttccs->cs.mask = CLOCKSOURCE_MASK(16); +	ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; + +	/* +	 * Setup the clock source counter to be an incrementing counter +	 * with no interrupt and it rolls over at 0xFFFF. Pre-scale +	 * it by 32 also. Let it start running now. +	 */ +	writel_relaxed(0x0,  ttccs->ttc.base_addr + TTC_IER_OFFSET); +	writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, +		     ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); +	writel_relaxed(CNT_CNTRL_RESET, +		     ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); + +	err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE); +	if (WARN_ON(err)) { +		kfree(ttccs); +		return; +	} + +	ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; +	sched_clock_register(ttc_sched_clock_read, 16, ttccs->ttc.freq / PRESCALE); +} + +static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, +		unsigned long event, void *data) +{ +	struct clk_notifier_data *ndata = data; +	struct ttc_timer *ttc = to_ttc_timer(nb); +	struct ttc_timer_clockevent *ttcce = container_of(ttc, +			struct ttc_timer_clockevent, ttc); + +	switch (event) { +	case POST_RATE_CHANGE: +		/* update cached frequency */ +		ttc->freq = ndata->new_rate; + +		clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE); + +		/* fall through */ +	case PRE_RATE_CHANGE: +	case ABORT_RATE_CHANGE: +	default: +		return NOTIFY_DONE; +	} +} + +static void __init ttc_setup_clockevent(struct clk *clk, +						void __iomem *base, u32 irq) +{ +	struct ttc_timer_clockevent *ttcce; +	int err; + +	ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL); +	if (WARN_ON(!ttcce)) +		return; + +	ttcce->ttc.clk = clk; + +	err = clk_prepare_enable(ttcce->ttc.clk); +	if (WARN_ON(err)) { +		kfree(ttcce); +		return; +	} + +	ttcce->ttc.clk_rate_change_nb.notifier_call = +		ttc_rate_change_clockevent_cb; +	ttcce->ttc.clk_rate_change_nb.next = NULL; +	if (clk_notifier_register(ttcce->ttc.clk, +				&ttcce->ttc.clk_rate_change_nb)) +		pr_warn("Unable to register clock notifier.\n"); +	ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); + +	ttcce->ttc.base_addr = base; +	ttcce->ce.name = "ttc_clockevent"; +	ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; +	ttcce->ce.set_next_event = ttc_set_next_event; +	ttcce->ce.set_mode = ttc_set_mode; +	ttcce->ce.rating = 200; +	ttcce->ce.irq = irq; +	ttcce->ce.cpumask = cpu_possible_mask; + +	/* +	 * Setup the clock event timer to be an interval timer which +	 * is prescaled by 32 using the interval interrupt. Leave it +	 * disabled for now. +	 */ +	writel_relaxed(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); +	writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, +		     ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); +	writel_relaxed(0x1,  ttcce->ttc.base_addr + TTC_IER_OFFSET); + +	err = request_irq(irq, ttc_clock_event_interrupt, +			  IRQF_TIMER, ttcce->ce.name, ttcce); +	if (WARN_ON(err)) { +		kfree(ttcce); +		return; +	} + +	clockevents_config_and_register(&ttcce->ce, +			ttcce->ttc.freq / PRESCALE, 1, 0xfffe); +} + +/** + * ttc_timer_init - Initialize the timer + * + * Initializes the timer hardware and register the clock source and clock event + * timers with Linux kernal timer framework + */ +static void __init ttc_timer_init(struct device_node *timer) +{ +	unsigned int irq; +	void __iomem *timer_baseaddr; +	struct clk *clk_cs, *clk_ce; +	static int initialized; +	int clksel; + +	if (initialized) +		return; + +	initialized = 1; + +	/* +	 * Get the 1st Triple Timer Counter (TTC) block from the device tree +	 * and use it. Note that the event timer uses the interrupt and it's the +	 * 2nd TTC hence the irq_of_parse_and_map(,1) +	 */ +	timer_baseaddr = of_iomap(timer, 0); +	if (!timer_baseaddr) { +		pr_err("ERROR: invalid timer base address\n"); +		BUG(); +	} + +	irq = irq_of_parse_and_map(timer, 1); +	if (irq <= 0) { +		pr_err("ERROR: invalid interrupt number\n"); +		BUG(); +	} + +	clksel = readl_relaxed(timer_baseaddr + TTC_CLK_CNTRL_OFFSET); +	clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); +	clk_cs = of_clk_get(timer, clksel); +	if (IS_ERR(clk_cs)) { +		pr_err("ERROR: timer input clock not found\n"); +		BUG(); +	} + +	clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); +	clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); +	clk_ce = of_clk_get(timer, clksel); +	if (IS_ERR(clk_ce)) { +		pr_err("ERROR: timer input clock not found\n"); +		BUG(); +	} + +	ttc_setup_clocksource(clk_cs, timer_baseaddr); +	ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); + +	pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq); +} + +CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init); diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c new file mode 100644 index 00000000000..b375106844d --- /dev/null +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c @@ -0,0 +1,87 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson + * Author: Sundar Iyer for ST-Ericsson + * sched_clock implementation is based on: + * plat-nomadik/timer.c Linus Walleij <linus.walleij@stericsson.com> + * + * DBx500-PRCMU Timer + * The PRCMU has 5 timers which are available in a always-on + * power domain.  We use the Timer 4 for our always-on clock + * source on DB8500. + */ +#include <linux/clockchips.h> +#include <linux/clksrc-dbx500-prcmu.h> +#include <linux/sched_clock.h> + +#define RATE_32K		32768 + +#define TIMER_MODE_CONTINOUS	0x1 +#define TIMER_DOWNCOUNT_VAL	0xffffffff + +#define PRCMU_TIMER_REF		0 +#define PRCMU_TIMER_DOWNCOUNT	0x4 +#define PRCMU_TIMER_MODE	0x8 + +#define SCHED_CLOCK_MIN_WRAP 131072 /* 2^32 / 32768 */ + +static void __iomem *clksrc_dbx500_timer_base; + +static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) +{ +	void __iomem *base = clksrc_dbx500_timer_base; +	u32 count, count2; + +	do { +		count = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT); +		count2 = readl_relaxed(base + PRCMU_TIMER_DOWNCOUNT); +	} while (count2 != count); + +	/* Negate because the timer is a decrementing counter */ +	return ~count; +} + +static struct clocksource clocksource_dbx500_prcmu = { +	.name		= "dbx500-prcmu-timer", +	.rating		= 300, +	.read		= clksrc_dbx500_prcmu_read, +	.mask		= CLOCKSOURCE_MASK(32), +	.flags		= CLOCK_SOURCE_IS_CONTINUOUS, +}; + +#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK + +static u64 notrace dbx500_prcmu_sched_clock_read(void) +{ +	if (unlikely(!clksrc_dbx500_timer_base)) +		return 0; + +	return clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu); +} + +#endif + +void __init clksrc_dbx500_prcmu_init(void __iomem *base) +{ +	clksrc_dbx500_timer_base = base; + +	/* +	 * The A9 sub system expects the timer to be configured as +	 * a continous looping timer. +	 * The PRCMU should configure it but if it for some reason +	 * don't we do it here. +	 */ +	if (readl(clksrc_dbx500_timer_base + PRCMU_TIMER_MODE) != +	    TIMER_MODE_CONTINOUS) { +		writel(TIMER_MODE_CONTINOUS, +		       clksrc_dbx500_timer_base + PRCMU_TIMER_MODE); +		writel(TIMER_DOWNCOUNT_VAL, +		       clksrc_dbx500_timer_base + PRCMU_TIMER_REF); +	} +#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK +	sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K); +#endif +	clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K); +} diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c new file mode 100644 index 00000000000..0093a8e49e1 --- /dev/null +++ b/drivers/clocksource/clksrc-of.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/of.h> +#include <linux/clocksource.h> + +extern struct of_device_id __clksrc_of_table[]; + +static const struct of_device_id __clksrc_of_table_sentinel +	__used __section(__clksrc_of_table_end); + +void __init clocksource_of_init(void) +{ +	struct device_node *np; +	const struct of_device_id *match; +	of_init_fn_1 init_func; +	unsigned clocksources = 0; + +	for_each_matching_node_and_match(np, __clksrc_of_table, &match) { +		if (!of_device_is_available(np)) +			continue; + +		init_func = match->data; +		init_func(np); +		clocksources++; +	} +	if (!clocksources) +		pr_crit("%s: no matching clocksources found\n", __func__); +} diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index b7dab32ce63..db210529089 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c @@ -53,7 +53,7 @@ static struct cs5535_mfgpt_timer *cs5535_event_clock;  #define MFGPT_PERIODIC (MFGPT_HZ / HZ)  /* - * The MFPGT timers on the CS5536 provide us with suitable timers to use + * The MFGPT timers on the CS5536 provide us with suitable timers to use   * as clock event sources - not as good as a HPET or APIC, but certainly   * better than the PIT.  This isn't a general purpose MFGPT driver, but   * a simplified one designed specifically to act as a clock event source. @@ -100,8 +100,6 @@ static struct clock_event_device cs5535_clockevent = {  	.set_mode = mfgpt_set_mode,  	.set_next_event = mfgpt_next_event,  	.rating = 250, -	.cpumask = cpu_all_mask, -	.shift = 32  };  static irqreturn_t mfgpt_tick(int irq, void *dev_id) @@ -133,7 +131,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)  static struct irqaction mfgptirq  = {  	.handler = mfgpt_tick, -	.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, +	.flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED,  	.name = DRV_NAME,  }; @@ -145,7 +143,7 @@ static int __init cs5535_mfgpt_init(void)  	timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);  	if (!timer) { -		printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); +		printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n");  		return -ENODEV;  	}  	cs5535_event_clock = timer; @@ -170,17 +168,11 @@ static int __init cs5535_mfgpt_init(void)  	cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val);  	/* Set up the clock event */ -	cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, -			cs5535_clockevent.shift); -	cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF, -			&cs5535_clockevent); -	cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE, -			&cs5535_clockevent); -  	printk(KERN_INFO DRV_NAME  		": Registering MFGPT timer as a clock event, using IRQ %d\n",  		timer_irq); -	clockevents_register_device(&cs5535_clockevent); +	clockevents_config_and_register(&cs5535_clockevent, MFGPT_HZ, +					0xF, 0xFFFE);  	return 0; diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c deleted file mode 100644 index 64e528e8bfa..00000000000 --- a/drivers/clocksource/cyclone.c +++ /dev/null @@ -1,119 +0,0 @@ -#include <linux/clocksource.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/timex.h> -#include <linux/init.h> - -#include <asm/pgtable.h> -#include <asm/io.h> - -#include <asm/mach_timer.h> - -#define CYCLONE_CBAR_ADDR	0xFEB00CD0	/* base address ptr */ -#define CYCLONE_PMCC_OFFSET	0x51A0		/* offset to control register */ -#define CYCLONE_MPCS_OFFSET	0x51A8		/* offset to select register */ -#define CYCLONE_MPMC_OFFSET	0x51D0		/* offset to count register */ -#define CYCLONE_TIMER_FREQ	99780000	/* 100Mhz, but not really */ -#define CYCLONE_TIMER_MASK	CLOCKSOURCE_MASK(32) /* 32 bit mask */ - -int use_cyclone = 0; -static void __iomem *cyclone_ptr; - -static cycle_t read_cyclone(struct clocksource *cs) -{ -	return (cycle_t)readl(cyclone_ptr); -} - -static struct clocksource clocksource_cyclone = { -	.name		= "cyclone", -	.rating		= 250, -	.read		= read_cyclone, -	.mask		= CYCLONE_TIMER_MASK, -	.mult		= 10, -	.shift		= 0, -	.flags		= CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static int __init init_cyclone_clocksource(void) -{ -	unsigned long base;	/* saved value from CBAR */ -	unsigned long offset; -	u32 __iomem* volatile cyclone_timer;	/* Cyclone MPMC0 register */ -	u32 __iomem* reg; -	int i; - -	/* make sure we're on a summit box: */ -	if (!use_cyclone) -		return -ENODEV; - -	printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n"); - -	/* find base address: */ -	offset = CYCLONE_CBAR_ADDR; -	reg = ioremap_nocache(offset, sizeof(reg)); -	if (!reg) { -		printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n"); -		return -ENODEV; -	} -	/* even on 64bit systems, this is only 32bits: */ -	base = readl(reg); -	if (!base) { -		printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n"); -		return -ENODEV; -	} -	iounmap(reg); - -	/* setup PMCC: */ -	offset = base + CYCLONE_PMCC_OFFSET; -	reg = ioremap_nocache(offset, sizeof(reg)); -	if (!reg) { -		printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n"); -		return -ENODEV; -	} -	writel(0x00000001,reg); -	iounmap(reg); - -	/* setup MPCS: */ -	offset = base + CYCLONE_MPCS_OFFSET; -	reg = ioremap_nocache(offset, sizeof(reg)); -	if (!reg) { -		printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n"); -		return -ENODEV; -	} -	writel(0x00000001,reg); -	iounmap(reg); - -	/* map in cyclone_timer: */ -	offset = base + CYCLONE_MPMC_OFFSET; -	cyclone_timer = ioremap_nocache(offset, sizeof(u64)); -	if (!cyclone_timer) { -		printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n"); -		return -ENODEV; -	} - -	/* quick test to make sure its ticking: */ -	for (i = 0; i < 3; i++){ -		u32 old = readl(cyclone_timer); -		int stall = 100; - -		while (stall--) -			barrier(); - -		if (readl(cyclone_timer) == old) { -			printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n"); -			iounmap(cyclone_timer); -			cyclone_timer = NULL; -			return -ENODEV; -		} -	} -	cyclone_ptr = cyclone_timer; - -	/* sort out mult/shift values: */ -	clocksource_cyclone.shift = 22; -	clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, -						clocksource_cyclone.shift); - -	return clocksource_register(&clocksource_cyclone); -} - -arch_initcall(init_cyclone_clocksource); diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c new file mode 100644 index 00000000000..ad357254172 --- /dev/null +++ b/drivers/clocksource/dummy_timer.c @@ -0,0 +1,74 @@ +/* + *  linux/drivers/clocksource/dummy_timer.c + * + *  Copyright (C) 2013 ARM Ltd. + *  All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/init.h> +#include <linux/percpu.h> +#include <linux/cpumask.h> + +static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt); + +static void dummy_timer_set_mode(enum clock_event_mode mode, +			   struct clock_event_device *evt) +{ +	/* +	 * Core clockevents code will call this when exchanging timer devices. +	 * We don't need to do anything here. +	 */ +} + +static void dummy_timer_setup(void) +{ +	int cpu = smp_processor_id(); +	struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt); + +	evt->name	= "dummy_timer"; +	evt->features	= CLOCK_EVT_FEAT_PERIODIC | +			  CLOCK_EVT_FEAT_ONESHOT | +			  CLOCK_EVT_FEAT_DUMMY; +	evt->rating	= 100; +	evt->set_mode	= dummy_timer_set_mode; +	evt->cpumask	= cpumask_of(cpu); + +	clockevents_register_device(evt); +} + +static int dummy_timer_cpu_notify(struct notifier_block *self, +				      unsigned long action, void *hcpu) +{ +	if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) +		dummy_timer_setup(); + +	return NOTIFY_OK; +} + +static struct notifier_block dummy_timer_cpu_nb = { +	.notifier_call = dummy_timer_cpu_notify, +}; + +static int __init dummy_timer_register(void) +{ +	int err = 0; + +	cpu_notifier_register_begin(); +	err = __register_cpu_notifier(&dummy_timer_cpu_nb); +	if (err) +		goto out; + +	/* We won't get a call on the boot CPU, so register immediately */ +	if (num_possible_cpus() > 1) +		dummy_timer_setup(); + +out: +	cpu_notifier_register_done(); +	return err; +} +early_initcall(dummy_timer_register); diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c new file mode 100644 index 00000000000..f3656a6b038 --- /dev/null +++ b/drivers/clocksource/dw_apb_timer.c @@ -0,0 +1,388 @@ +/* + * (C) Copyright 2009 Intel Corporation + * Author: Jacob Pan (jacob.jun.pan@intel.com) + * + * Shared with ARM platforms, Jamie Iles, Picochip 2011 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Support for the Synopsys DesignWare APB Timers. + */ +#include <linux/dw_apb_timer.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/slab.h> + +#define APBT_MIN_PERIOD			4 +#define APBT_MIN_DELTA_USEC		200 + +#define APBTMR_N_LOAD_COUNT		0x00 +#define APBTMR_N_CURRENT_VALUE		0x04 +#define APBTMR_N_CONTROL		0x08 +#define APBTMR_N_EOI			0x0c +#define APBTMR_N_INT_STATUS		0x10 + +#define APBTMRS_INT_STATUS		0xa0 +#define APBTMRS_EOI			0xa4 +#define APBTMRS_RAW_INT_STATUS		0xa8 +#define APBTMRS_COMP_VERSION		0xac + +#define APBTMR_CONTROL_ENABLE		(1 << 0) +/* 1: periodic, 0:free running. */ +#define APBTMR_CONTROL_MODE_PERIODIC	(1 << 1) +#define APBTMR_CONTROL_INT		(1 << 2) + +static inline struct dw_apb_clock_event_device * +ced_to_dw_apb_ced(struct clock_event_device *evt) +{ +	return container_of(evt, struct dw_apb_clock_event_device, ced); +} + +static inline struct dw_apb_clocksource * +clocksource_to_dw_apb_clocksource(struct clocksource *cs) +{ +	return container_of(cs, struct dw_apb_clocksource, cs); +} + +static unsigned long apbt_readl(struct dw_apb_timer *timer, unsigned long offs) +{ +	return readl(timer->base + offs); +} + +static void apbt_writel(struct dw_apb_timer *timer, unsigned long val, +		 unsigned long offs) +{ +	writel(val, timer->base + offs); +} + +static void apbt_disable_int(struct dw_apb_timer *timer) +{ +	unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL); + +	ctrl |= APBTMR_CONTROL_INT; +	apbt_writel(timer, ctrl, APBTMR_N_CONTROL); +} + +/** + * dw_apb_clockevent_pause() - stop the clock_event_device from running + * + * @dw_ced:	The APB clock to stop generating events. + */ +void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced) +{ +	disable_irq(dw_ced->timer.irq); +	apbt_disable_int(&dw_ced->timer); +} + +static void apbt_eoi(struct dw_apb_timer *timer) +{ +	apbt_readl(timer, APBTMR_N_EOI); +} + +static irqreturn_t dw_apb_clockevent_irq(int irq, void *data) +{ +	struct clock_event_device *evt = data; +	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); + +	if (!evt->event_handler) { +		pr_info("Spurious APBT timer interrupt %d", irq); +		return IRQ_NONE; +	} + +	if (dw_ced->eoi) +		dw_ced->eoi(&dw_ced->timer); + +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static void apbt_enable_int(struct dw_apb_timer *timer) +{ +	unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL); +	/* clear pending intr */ +	apbt_readl(timer, APBTMR_N_EOI); +	ctrl &= ~APBTMR_CONTROL_INT; +	apbt_writel(timer, ctrl, APBTMR_N_CONTROL); +} + +static void apbt_set_mode(enum clock_event_mode mode, +			  struct clock_event_device *evt) +{ +	unsigned long ctrl; +	unsigned long period; +	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); + +	pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask), +		 mode); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		period = DIV_ROUND_UP(dw_ced->timer.freq, HZ); +		ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); +		ctrl |= APBTMR_CONTROL_MODE_PERIODIC; +		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); +		/* +		 * DW APB p. 46, have to disable timer before load counter, +		 * may cause sync problem. +		 */ +		ctrl &= ~APBTMR_CONTROL_ENABLE; +		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); +		udelay(1); +		pr_debug("Setting clock period %lu for HZ %d\n", period, HZ); +		apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT); +		ctrl |= APBTMR_CONTROL_ENABLE; +		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); +		break; + +	case CLOCK_EVT_MODE_ONESHOT: +		ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); +		/* +		 * set free running mode, this mode will let timer reload max +		 * timeout which will give time (3min on 25MHz clock) to rearm +		 * the next event, therefore emulate the one-shot mode. +		 */ +		ctrl &= ~APBTMR_CONTROL_ENABLE; +		ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; + +		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); +		/* write again to set free running mode */ +		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + +		/* +		 * DW APB p. 46, load counter with all 1s before starting free +		 * running mode. +		 */ +		apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT); +		ctrl &= ~APBTMR_CONTROL_INT; +		ctrl |= APBTMR_CONTROL_ENABLE; +		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); +		break; + +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); +		ctrl &= ~APBTMR_CONTROL_ENABLE; +		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); +		break; + +	case CLOCK_EVT_MODE_RESUME: +		apbt_enable_int(&dw_ced->timer); +		break; +	} +} + +static int apbt_next_event(unsigned long delta, +			   struct clock_event_device *evt) +{ +	unsigned long ctrl; +	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt); + +	/* Disable timer */ +	ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL); +	ctrl &= ~APBTMR_CONTROL_ENABLE; +	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); +	/* write new count */ +	apbt_writel(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT); +	ctrl |= APBTMR_CONTROL_ENABLE; +	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL); + +	return 0; +} + +/** + * dw_apb_clockevent_init() - use an APB timer as a clock_event_device + * + * @cpu:	The CPU the events will be targeted at. + * @name:	The name used for the timer and the IRQ for it. + * @rating:	The rating to give the timer. + * @base:	I/O base for the timer registers. + * @irq:	The interrupt number to use for the timer. + * @freq:	The frequency that the timer counts at. + * + * This creates a clock_event_device for using with the generic clock layer + * but does not start and register it.  This should be done with + * dw_apb_clockevent_register() as the next step.  If this is the first time + * it has been called for a timer then the IRQ will be requested, if not it + * just be enabled to allow CPU hotplug to avoid repeatedly requesting and + * releasing the IRQ. + */ +struct dw_apb_clock_event_device * +dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, +		       void __iomem *base, int irq, unsigned long freq) +{ +	struct dw_apb_clock_event_device *dw_ced = +		kzalloc(sizeof(*dw_ced), GFP_KERNEL); +	int err; + +	if (!dw_ced) +		return NULL; + +	dw_ced->timer.base = base; +	dw_ced->timer.irq = irq; +	dw_ced->timer.freq = freq; + +	clockevents_calc_mult_shift(&dw_ced->ced, freq, APBT_MIN_PERIOD); +	dw_ced->ced.max_delta_ns = clockevent_delta2ns(0x7fffffff, +						       &dw_ced->ced); +	dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced); +	dw_ced->ced.cpumask = cpumask_of(cpu); +	dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; +	dw_ced->ced.set_mode = apbt_set_mode; +	dw_ced->ced.set_next_event = apbt_next_event; +	dw_ced->ced.irq = dw_ced->timer.irq; +	dw_ced->ced.rating = rating; +	dw_ced->ced.name = name; + +	dw_ced->irqaction.name		= dw_ced->ced.name; +	dw_ced->irqaction.handler	= dw_apb_clockevent_irq; +	dw_ced->irqaction.dev_id	= &dw_ced->ced; +	dw_ced->irqaction.irq		= irq; +	dw_ced->irqaction.flags		= IRQF_TIMER | IRQF_IRQPOLL | +					  IRQF_NOBALANCING; + +	dw_ced->eoi = apbt_eoi; +	err = setup_irq(irq, &dw_ced->irqaction); +	if (err) { +		pr_err("failed to request timer irq\n"); +		kfree(dw_ced); +		dw_ced = NULL; +	} + +	return dw_ced; +} + +/** + * dw_apb_clockevent_resume() - resume a clock that has been paused. + * + * @dw_ced:	The APB clock to resume. + */ +void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced) +{ +	enable_irq(dw_ced->timer.irq); +} + +/** + * dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ. + * + * @dw_ced:	The APB clock to stop generating the events. + */ +void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced) +{ +	free_irq(dw_ced->timer.irq, &dw_ced->ced); +} + +/** + * dw_apb_clockevent_register() - register the clock with the generic layer + * + * @dw_ced:	The APB clock to register as a clock_event_device. + */ +void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced) +{ +	apbt_writel(&dw_ced->timer, 0, APBTMR_N_CONTROL); +	clockevents_register_device(&dw_ced->ced); +	apbt_enable_int(&dw_ced->timer); +} + +/** + * dw_apb_clocksource_start() - start the clocksource counting. + * + * @dw_cs:	The clocksource to start. + * + * This is used to start the clocksource before registration and can be used + * to enable calibration of timers. + */ +void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs) +{ +	/* +	 * start count down from 0xffff_ffff. this is done by toggling the +	 * enable bit then load initial load count to ~0. +	 */ +	unsigned long ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL); + +	ctrl &= ~APBTMR_CONTROL_ENABLE; +	apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL); +	apbt_writel(&dw_cs->timer, ~0, APBTMR_N_LOAD_COUNT); +	/* enable, mask interrupt */ +	ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; +	ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT); +	apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL); +	/* read it once to get cached counter value initialized */ +	dw_apb_clocksource_read(dw_cs); +} + +static cycle_t __apbt_read_clocksource(struct clocksource *cs) +{ +	unsigned long current_count; +	struct dw_apb_clocksource *dw_cs = +		clocksource_to_dw_apb_clocksource(cs); + +	current_count = apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); + +	return (cycle_t)~current_count; +} + +static void apbt_restart_clocksource(struct clocksource *cs) +{ +	struct dw_apb_clocksource *dw_cs = +		clocksource_to_dw_apb_clocksource(cs); + +	dw_apb_clocksource_start(dw_cs); +} + +/** + * dw_apb_clocksource_init() - use an APB timer as a clocksource. + * + * @rating:	The rating to give the clocksource. + * @name:	The name for the clocksource. + * @base:	The I/O base for the timer registers. + * @freq:	The frequency that the timer counts at. + * + * This creates a clocksource using an APB timer but does not yet register it + * with the clocksource system.  This should be done with + * dw_apb_clocksource_register() as the next step. + */ +struct dw_apb_clocksource * +dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, +			unsigned long freq) +{ +	struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL); + +	if (!dw_cs) +		return NULL; + +	dw_cs->timer.base = base; +	dw_cs->timer.freq = freq; +	dw_cs->cs.name = name; +	dw_cs->cs.rating = rating; +	dw_cs->cs.read = __apbt_read_clocksource; +	dw_cs->cs.mask = CLOCKSOURCE_MASK(32); +	dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; +	dw_cs->cs.resume = apbt_restart_clocksource; + +	return dw_cs; +} + +/** + * dw_apb_clocksource_register() - register the APB clocksource. + * + * @dw_cs:	The clocksource to register. + */ +void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs) +{ +	clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq); +} + +/** + * dw_apb_clocksource_read() - read the current value of a clocksource. + * + * @dw_cs:	The clocksource to read. + */ +cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) +{ +	return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); +} diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c new file mode 100644 index 00000000000..d305fb08976 --- /dev/null +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2012 Altera Corporation + * Copyright (c) 2011 Picochip Ltd., Jamie Iles + * + * Modified from mach-picoxcell/time.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program.  If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/dw_apb_timer.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/clk.h> +#include <linux/sched_clock.h> + +static void __init timer_get_base_and_rate(struct device_node *np, +				    void __iomem **base, u32 *rate) +{ +	struct clk *timer_clk; +	struct clk *pclk; + +	*base = of_iomap(np, 0); + +	if (!*base) +		panic("Unable to map regs for %s", np->name); + +	/* +	 * Not all implementations use a periphal clock, so don't panic +	 * if it's not present +	 */ +	pclk = of_clk_get_by_name(np, "pclk"); +	if (!IS_ERR(pclk)) +		if (clk_prepare_enable(pclk)) +			pr_warn("pclk for %s is present, but could not be activated\n", +				np->name); + +	timer_clk = of_clk_get_by_name(np, "timer"); +	if (IS_ERR(timer_clk)) +		goto try_clock_freq; + +	if (!clk_prepare_enable(timer_clk)) { +		*rate = clk_get_rate(timer_clk); +		return; +	} + +try_clock_freq: +	if (of_property_read_u32(np, "clock-freq", rate) && +	    of_property_read_u32(np, "clock-frequency", rate)) +		panic("No clock nor clock-frequency property for %s", np->name); +} + +static void __init add_clockevent(struct device_node *event_timer) +{ +	void __iomem *iobase; +	struct dw_apb_clock_event_device *ced; +	u32 irq, rate; + +	irq = irq_of_parse_and_map(event_timer, 0); +	if (irq == 0) +		panic("No IRQ for clock event timer"); + +	timer_get_base_and_rate(event_timer, &iobase, &rate); + +	ced = dw_apb_clockevent_init(0, event_timer->name, 300, iobase, irq, +				     rate); +	if (!ced) +		panic("Unable to initialise clockevent device"); + +	dw_apb_clockevent_register(ced); +} + +static void __iomem *sched_io_base; +static u32 sched_rate; + +static void __init add_clocksource(struct device_node *source_timer) +{ +	void __iomem *iobase; +	struct dw_apb_clocksource *cs; +	u32 rate; + +	timer_get_base_and_rate(source_timer, &iobase, &rate); + +	cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate); +	if (!cs) +		panic("Unable to initialise clocksource device"); + +	dw_apb_clocksource_start(cs); +	dw_apb_clocksource_register(cs); + +	/* +	 * Fallback to use the clocksource as sched_clock if no separate +	 * timer is found. sched_io_base then points to the current_value +	 * register of the clocksource timer. +	 */ +	sched_io_base = iobase + 0x04; +	sched_rate = rate; +} + +static u64 notrace read_sched_clock(void) +{ +	return ~__raw_readl(sched_io_base); +} + +static const struct of_device_id sptimer_ids[] __initconst = { +	{ .compatible = "picochip,pc3x2-rtc" }, +	{ /* Sentinel */ }, +}; + +static void __init init_sched_clock(void) +{ +	struct device_node *sched_timer; + +	sched_timer = of_find_matching_node(NULL, sptimer_ids); +	if (sched_timer) { +		timer_get_base_and_rate(sched_timer, &sched_io_base, +					&sched_rate); +		of_node_put(sched_timer); +	} + +	sched_clock_register(read_sched_clock, 32, sched_rate); +} + +static int num_called; +static void __init dw_apb_timer_init(struct device_node *timer) +{ +	switch (num_called) { +	case 0: +		pr_debug("%s: found clockevent timer\n", __func__); +		add_clockevent(timer); +		break; +	case 1: +		pr_debug("%s: found clocksource timer\n", __func__); +		add_clocksource(timer); +		init_sched_clock(); +		break; +	default: +		break; +	} + +	num_called++; +} +CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init); +CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init); diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c new file mode 100644 index 00000000000..d0a7bd66b8b --- /dev/null +++ b/drivers/clocksource/em_sti.c @@ -0,0 +1,394 @@ +/* + * Emma Mobile Timer Support - STI + * + *  Copyright (C) 2012 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA + */ + +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/slab.h> +#include <linux/module.h> + +enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR }; + +struct em_sti_priv { +	void __iomem *base; +	struct clk *clk; +	struct platform_device *pdev; +	unsigned int active[USER_NR]; +	unsigned long rate; +	raw_spinlock_t lock; +	struct clock_event_device ced; +	struct clocksource cs; +}; + +#define STI_CONTROL 0x00 +#define STI_COMPA_H 0x10 +#define STI_COMPA_L 0x14 +#define STI_COMPB_H 0x18 +#define STI_COMPB_L 0x1c +#define STI_COUNT_H 0x20 +#define STI_COUNT_L 0x24 +#define STI_COUNT_RAW_H 0x28 +#define STI_COUNT_RAW_L 0x2c +#define STI_SET_H 0x30 +#define STI_SET_L 0x34 +#define STI_INTSTATUS 0x40 +#define STI_INTRAWSTATUS 0x44 +#define STI_INTENSET 0x48 +#define STI_INTENCLR 0x4c +#define STI_INTFFCLR 0x50 + +static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs) +{ +	return ioread32(p->base + offs); +} + +static inline void em_sti_write(struct em_sti_priv *p, int offs, +				unsigned long value) +{ +	iowrite32(value, p->base + offs); +} + +static int em_sti_enable(struct em_sti_priv *p) +{ +	int ret; + +	/* enable clock */ +	ret = clk_prepare_enable(p->clk); +	if (ret) { +		dev_err(&p->pdev->dev, "cannot enable clock\n"); +		return ret; +	} + +	/* configure channel, periodic mode and maximum timeout */ +	p->rate = clk_get_rate(p->clk); + +	/* reset the counter */ +	em_sti_write(p, STI_SET_H, 0x40000000); +	em_sti_write(p, STI_SET_L, 0x00000000); + +	/* mask and clear pending interrupts */ +	em_sti_write(p, STI_INTENCLR, 3); +	em_sti_write(p, STI_INTFFCLR, 3); + +	/* enable updates of counter registers */ +	em_sti_write(p, STI_CONTROL, 1); + +	return 0; +} + +static void em_sti_disable(struct em_sti_priv *p) +{ +	/* mask interrupts */ +	em_sti_write(p, STI_INTENCLR, 3); + +	/* stop clock */ +	clk_disable_unprepare(p->clk); +} + +static cycle_t em_sti_count(struct em_sti_priv *p) +{ +	cycle_t ticks; +	unsigned long flags; + +	/* the STI hardware buffers the 48-bit count, but to +	 * break it out into two 32-bit access the registers +	 * must be accessed in a certain order. +	 * Always read STI_COUNT_H before STI_COUNT_L. +	 */ +	raw_spin_lock_irqsave(&p->lock, flags); +	ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; +	ticks |= em_sti_read(p, STI_COUNT_L); +	raw_spin_unlock_irqrestore(&p->lock, flags); + +	return ticks; +} + +static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) +{ +	unsigned long flags; + +	raw_spin_lock_irqsave(&p->lock, flags); + +	/* mask compare A interrupt */ +	em_sti_write(p, STI_INTENCLR, 1); + +	/* update compare A value */ +	em_sti_write(p, STI_COMPA_H, next >> 32); +	em_sti_write(p, STI_COMPA_L, next & 0xffffffff); + +	/* clear compare A interrupt source */ +	em_sti_write(p, STI_INTFFCLR, 1); + +	/* unmask compare A interrupt */ +	em_sti_write(p, STI_INTENSET, 1); + +	raw_spin_unlock_irqrestore(&p->lock, flags); + +	return next; +} + +static irqreturn_t em_sti_interrupt(int irq, void *dev_id) +{ +	struct em_sti_priv *p = dev_id; + +	p->ced.event_handler(&p->ced); +	return IRQ_HANDLED; +} + +static int em_sti_start(struct em_sti_priv *p, unsigned int user) +{ +	unsigned long flags; +	int used_before; +	int ret = 0; + +	raw_spin_lock_irqsave(&p->lock, flags); +	used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; +	if (!used_before) +		ret = em_sti_enable(p); + +	if (!ret) +		p->active[user] = 1; +	raw_spin_unlock_irqrestore(&p->lock, flags); + +	return ret; +} + +static void em_sti_stop(struct em_sti_priv *p, unsigned int user) +{ +	unsigned long flags; +	int used_before, used_after; + +	raw_spin_lock_irqsave(&p->lock, flags); +	used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; +	p->active[user] = 0; +	used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; + +	if (used_before && !used_after) +		em_sti_disable(p); +	raw_spin_unlock_irqrestore(&p->lock, flags); +} + +static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) +{ +	return container_of(cs, struct em_sti_priv, cs); +} + +static cycle_t em_sti_clocksource_read(struct clocksource *cs) +{ +	return em_sti_count(cs_to_em_sti(cs)); +} + +static int em_sti_clocksource_enable(struct clocksource *cs) +{ +	int ret; +	struct em_sti_priv *p = cs_to_em_sti(cs); + +	ret = em_sti_start(p, USER_CLOCKSOURCE); +	if (!ret) +		__clocksource_updatefreq_hz(cs, p->rate); +	return ret; +} + +static void em_sti_clocksource_disable(struct clocksource *cs) +{ +	em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE); +} + +static void em_sti_clocksource_resume(struct clocksource *cs) +{ +	em_sti_clocksource_enable(cs); +} + +static int em_sti_register_clocksource(struct em_sti_priv *p) +{ +	struct clocksource *cs = &p->cs; + +	memset(cs, 0, sizeof(*cs)); +	cs->name = dev_name(&p->pdev->dev); +	cs->rating = 200; +	cs->read = em_sti_clocksource_read; +	cs->enable = em_sti_clocksource_enable; +	cs->disable = em_sti_clocksource_disable; +	cs->suspend = em_sti_clocksource_disable; +	cs->resume = em_sti_clocksource_resume; +	cs->mask = CLOCKSOURCE_MASK(48); +	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; + +	dev_info(&p->pdev->dev, "used as clock source\n"); + +	/* Register with dummy 1 Hz value, gets updated in ->enable() */ +	clocksource_register_hz(cs, 1); +	return 0; +} + +static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced) +{ +	return container_of(ced, struct em_sti_priv, ced); +} + +static void em_sti_clock_event_mode(enum clock_event_mode mode, +				    struct clock_event_device *ced) +{ +	struct em_sti_priv *p = ced_to_em_sti(ced); + +	/* deal with old setting first */ +	switch (ced->mode) { +	case CLOCK_EVT_MODE_ONESHOT: +		em_sti_stop(p, USER_CLOCKEVENT); +		break; +	default: +		break; +	} + +	switch (mode) { +	case CLOCK_EVT_MODE_ONESHOT: +		dev_info(&p->pdev->dev, "used for oneshot clock events\n"); +		em_sti_start(p, USER_CLOCKEVENT); +		clockevents_config(&p->ced, p->rate); +		break; +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_UNUSED: +		em_sti_stop(p, USER_CLOCKEVENT); +		break; +	default: +		break; +	} +} + +static int em_sti_clock_event_next(unsigned long delta, +				   struct clock_event_device *ced) +{ +	struct em_sti_priv *p = ced_to_em_sti(ced); +	cycle_t next; +	int safe; + +	next = em_sti_set_next(p, em_sti_count(p) + delta); +	safe = em_sti_count(p) < (next - 1); + +	return !safe; +} + +static void em_sti_register_clockevent(struct em_sti_priv *p) +{ +	struct clock_event_device *ced = &p->ced; + +	memset(ced, 0, sizeof(*ced)); +	ced->name = dev_name(&p->pdev->dev); +	ced->features = CLOCK_EVT_FEAT_ONESHOT; +	ced->rating = 200; +	ced->cpumask = cpu_possible_mask; +	ced->set_next_event = em_sti_clock_event_next; +	ced->set_mode = em_sti_clock_event_mode; + +	dev_info(&p->pdev->dev, "used for clock events\n"); + +	/* Register with dummy 1 Hz value, gets updated in ->set_mode() */ +	clockevents_config_and_register(ced, 1, 2, 0xffffffff); +} + +static int em_sti_probe(struct platform_device *pdev) +{ +	struct em_sti_priv *p; +	struct resource *res; +	int irq; + +	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); +	if (p == NULL) +		return -ENOMEM; + +	p->pdev = pdev; +	platform_set_drvdata(pdev, p); + +	irq = platform_get_irq(pdev, 0); +	if (irq < 0) { +		dev_err(&pdev->dev, "failed to get irq\n"); +		return -EINVAL; +	} + +	/* map memory, let base point to the STI instance */ +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	p->base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(p->base)) +		return PTR_ERR(p->base); + +	/* get hold of clock */ +	p->clk = devm_clk_get(&pdev->dev, "sclk"); +	if (IS_ERR(p->clk)) { +		dev_err(&pdev->dev, "cannot get clock\n"); +		return PTR_ERR(p->clk); +	} + +	if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt, +			     IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, +			     dev_name(&pdev->dev), p)) { +		dev_err(&pdev->dev, "failed to request low IRQ\n"); +		return -ENOENT; +	} + +	raw_spin_lock_init(&p->lock); +	em_sti_register_clockevent(p); +	em_sti_register_clocksource(p); +	return 0; +} + +static int em_sti_remove(struct platform_device *pdev) +{ +	return -EBUSY; /* cannot unregister clockevent and clocksource */ +} + +static const struct of_device_id em_sti_dt_ids[] = { +	{ .compatible = "renesas,em-sti", }, +	{}, +}; +MODULE_DEVICE_TABLE(of, em_sti_dt_ids); + +static struct platform_driver em_sti_device_driver = { +	.probe		= em_sti_probe, +	.remove		= em_sti_remove, +	.driver		= { +		.name	= "em_sti", +		.of_match_table = em_sti_dt_ids, +	} +}; + +static int __init em_sti_init(void) +{ +	return platform_driver_register(&em_sti_device_driver); +} + +static void __exit em_sti_exit(void) +{ +	platform_driver_unregister(&em_sti_device_driver); +} + +subsys_initcall(em_sti_init); +module_exit(em_sti_exit); + +MODULE_AUTHOR("Magnus Damm"); +MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c new file mode 100644 index 00000000000..ab51bf20a3e --- /dev/null +++ b/drivers/clocksource/exynos_mct.c @@ -0,0 +1,592 @@ +/* linux/arch/arm/mach-exynos4/mct.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + *		http://www.samsung.com + * + * EXYNOS4 MCT(Multi-Core Timer) support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/percpu.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/clocksource.h> +#include <linux/sched_clock.h> + +#define EXYNOS4_MCTREG(x)		(x) +#define EXYNOS4_MCT_G_CNT_L		EXYNOS4_MCTREG(0x100) +#define EXYNOS4_MCT_G_CNT_U		EXYNOS4_MCTREG(0x104) +#define EXYNOS4_MCT_G_CNT_WSTAT		EXYNOS4_MCTREG(0x110) +#define EXYNOS4_MCT_G_COMP0_L		EXYNOS4_MCTREG(0x200) +#define EXYNOS4_MCT_G_COMP0_U		EXYNOS4_MCTREG(0x204) +#define EXYNOS4_MCT_G_COMP0_ADD_INCR	EXYNOS4_MCTREG(0x208) +#define EXYNOS4_MCT_G_TCON		EXYNOS4_MCTREG(0x240) +#define EXYNOS4_MCT_G_INT_CSTAT		EXYNOS4_MCTREG(0x244) +#define EXYNOS4_MCT_G_INT_ENB		EXYNOS4_MCTREG(0x248) +#define EXYNOS4_MCT_G_WSTAT		EXYNOS4_MCTREG(0x24C) +#define _EXYNOS4_MCT_L_BASE		EXYNOS4_MCTREG(0x300) +#define EXYNOS4_MCT_L_BASE(x)		(_EXYNOS4_MCT_L_BASE + (0x100 * x)) +#define EXYNOS4_MCT_L_MASK		(0xffffff00) + +#define MCT_L_TCNTB_OFFSET		(0x00) +#define MCT_L_ICNTB_OFFSET		(0x08) +#define MCT_L_TCON_OFFSET		(0x20) +#define MCT_L_INT_CSTAT_OFFSET		(0x30) +#define MCT_L_INT_ENB_OFFSET		(0x34) +#define MCT_L_WSTAT_OFFSET		(0x40) +#define MCT_G_TCON_START		(1 << 8) +#define MCT_G_TCON_COMP0_AUTO_INC	(1 << 1) +#define MCT_G_TCON_COMP0_ENABLE		(1 << 0) +#define MCT_L_TCON_INTERVAL_MODE	(1 << 2) +#define MCT_L_TCON_INT_START		(1 << 1) +#define MCT_L_TCON_TIMER_START		(1 << 0) + +#define TICK_BASE_CNT	1 + +enum { +	MCT_INT_SPI, +	MCT_INT_PPI +}; + +enum { +	MCT_G0_IRQ, +	MCT_G1_IRQ, +	MCT_G2_IRQ, +	MCT_G3_IRQ, +	MCT_L0_IRQ, +	MCT_L1_IRQ, +	MCT_L2_IRQ, +	MCT_L3_IRQ, +	MCT_L4_IRQ, +	MCT_L5_IRQ, +	MCT_L6_IRQ, +	MCT_L7_IRQ, +	MCT_NR_IRQS, +}; + +static void __iomem *reg_base; +static unsigned long clk_rate; +static unsigned int mct_int_type; +static int mct_irqs[MCT_NR_IRQS]; + +struct mct_clock_event_device { +	struct clock_event_device evt; +	unsigned long base; +	char name[10]; +}; + +static void exynos4_mct_write(unsigned int value, unsigned long offset) +{ +	unsigned long stat_addr; +	u32 mask; +	u32 i; + +	__raw_writel(value, reg_base + offset); + +	if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { +		stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; +		switch (offset & EXYNOS4_MCT_L_MASK) { +		case MCT_L_TCON_OFFSET: +			mask = 1 << 3;		/* L_TCON write status */ +			break; +		case MCT_L_ICNTB_OFFSET: +			mask = 1 << 1;		/* L_ICNTB write status */ +			break; +		case MCT_L_TCNTB_OFFSET: +			mask = 1 << 0;		/* L_TCNTB write status */ +			break; +		default: +			return; +		} +	} else { +		switch (offset) { +		case EXYNOS4_MCT_G_TCON: +			stat_addr = EXYNOS4_MCT_G_WSTAT; +			mask = 1 << 16;		/* G_TCON write status */ +			break; +		case EXYNOS4_MCT_G_COMP0_L: +			stat_addr = EXYNOS4_MCT_G_WSTAT; +			mask = 1 << 0;		/* G_COMP0_L write status */ +			break; +		case EXYNOS4_MCT_G_COMP0_U: +			stat_addr = EXYNOS4_MCT_G_WSTAT; +			mask = 1 << 1;		/* G_COMP0_U write status */ +			break; +		case EXYNOS4_MCT_G_COMP0_ADD_INCR: +			stat_addr = EXYNOS4_MCT_G_WSTAT; +			mask = 1 << 2;		/* G_COMP0_ADD_INCR w status */ +			break; +		case EXYNOS4_MCT_G_CNT_L: +			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; +			mask = 1 << 0;		/* G_CNT_L write status */ +			break; +		case EXYNOS4_MCT_G_CNT_U: +			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; +			mask = 1 << 1;		/* G_CNT_U write status */ +			break; +		default: +			return; +		} +	} + +	/* Wait maximum 1 ms until written values are applied */ +	for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) +		if (__raw_readl(reg_base + stat_addr) & mask) { +			__raw_writel(mask, reg_base + stat_addr); +			return; +		} + +	panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset); +} + +/* Clocksource handling */ +static void exynos4_mct_frc_start(void) +{ +	u32 reg; + +	reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); +	reg |= MCT_G_TCON_START; +	exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); +} + +static cycle_t notrace _exynos4_frc_read(void) +{ +	unsigned int lo, hi; +	u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); + +	do { +		hi = hi2; +		lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L); +		hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); +	} while (hi != hi2); + +	return ((cycle_t)hi << 32) | lo; +} + +static cycle_t exynos4_frc_read(struct clocksource *cs) +{ +	return _exynos4_frc_read(); +} + +static void exynos4_frc_resume(struct clocksource *cs) +{ +	exynos4_mct_frc_start(); +} + +struct clocksource mct_frc = { +	.name		= "mct-frc", +	.rating		= 400, +	.read		= exynos4_frc_read, +	.mask		= CLOCKSOURCE_MASK(64), +	.flags		= CLOCK_SOURCE_IS_CONTINUOUS, +	.resume		= exynos4_frc_resume, +}; + +static u64 notrace exynos4_read_sched_clock(void) +{ +	return _exynos4_frc_read(); +} + +static struct delay_timer exynos4_delay_timer; + +static cycles_t exynos4_read_current_timer(void) +{ +	return _exynos4_frc_read(); +} + +static void __init exynos4_clocksource_init(void) +{ +	exynos4_mct_frc_start(); + +	exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer; +	exynos4_delay_timer.freq = clk_rate; +	register_current_timer_delay(&exynos4_delay_timer); + +	if (clocksource_register_hz(&mct_frc, clk_rate)) +		panic("%s: can't register clocksource\n", mct_frc.name); + +	sched_clock_register(exynos4_read_sched_clock, 64, clk_rate); +} + +static void exynos4_mct_comp0_stop(void) +{ +	unsigned int tcon; + +	tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); +	tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); + +	exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); +	exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB); +} + +static void exynos4_mct_comp0_start(enum clock_event_mode mode, +				    unsigned long cycles) +{ +	unsigned int tcon; +	cycle_t comp_cycle; + +	tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); + +	if (mode == CLOCK_EVT_MODE_PERIODIC) { +		tcon |= MCT_G_TCON_COMP0_AUTO_INC; +		exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); +	} + +	comp_cycle = exynos4_frc_read(&mct_frc) + cycles; +	exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); +	exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); + +	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB); + +	tcon |= MCT_G_TCON_COMP0_ENABLE; +	exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON); +} + +static int exynos4_comp_set_next_event(unsigned long cycles, +				       struct clock_event_device *evt) +{ +	exynos4_mct_comp0_start(evt->mode, cycles); + +	return 0; +} + +static void exynos4_comp_set_mode(enum clock_event_mode mode, +				  struct clock_event_device *evt) +{ +	unsigned long cycles_per_jiffy; +	exynos4_mct_comp0_stop(); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		cycles_per_jiffy = +			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); +		exynos4_mct_comp0_start(mode, cycles_per_jiffy); +		break; + +	case CLOCK_EVT_MODE_ONESHOT: +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_RESUME: +		break; +	} +} + +static struct clock_event_device mct_comp_device = { +	.name		= "mct-comp", +	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +	.rating		= 250, +	.set_next_event	= exynos4_comp_set_next_event, +	.set_mode	= exynos4_comp_set_mode, +}; + +static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT); + +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct irqaction mct_comp_event_irq = { +	.name		= "mct_comp_irq", +	.flags		= IRQF_TIMER | IRQF_IRQPOLL, +	.handler	= exynos4_mct_comp_isr, +	.dev_id		= &mct_comp_device, +}; + +static void exynos4_clockevent_init(void) +{ +	mct_comp_device.cpumask = cpumask_of(0); +	clockevents_config_and_register(&mct_comp_device, clk_rate, +					0xf, 0xffffffff); +	setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); +} + +static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); + +/* Clock event handling */ +static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) +{ +	unsigned long tmp; +	unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; +	unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; + +	tmp = __raw_readl(reg_base + offset); +	if (tmp & mask) { +		tmp &= ~mask; +		exynos4_mct_write(tmp, offset); +	} +} + +static void exynos4_mct_tick_start(unsigned long cycles, +				   struct mct_clock_event_device *mevt) +{ +	unsigned long tmp; + +	exynos4_mct_tick_stop(mevt); + +	tmp = (1 << 31) | cycles;	/* MCT_L_UPDATE_ICNTB */ + +	/* update interrupt count buffer */ +	exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET); + +	/* enable MCT tick interrupt */ +	exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); + +	tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET); +	tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | +	       MCT_L_TCON_INTERVAL_MODE; +	exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); +} + +static int exynos4_tick_set_next_event(unsigned long cycles, +				       struct clock_event_device *evt) +{ +	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); + +	exynos4_mct_tick_start(cycles, mevt); + +	return 0; +} + +static inline void exynos4_tick_set_mode(enum clock_event_mode mode, +					 struct clock_event_device *evt) +{ +	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); +	unsigned long cycles_per_jiffy; + +	exynos4_mct_tick_stop(mevt); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		cycles_per_jiffy = +			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); +		exynos4_mct_tick_start(cycles_per_jiffy, mevt); +		break; + +	case CLOCK_EVT_MODE_ONESHOT: +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_RESUME: +		break; +	} +} + +static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) +{ +	struct clock_event_device *evt = &mevt->evt; + +	/* +	 * This is for supporting oneshot mode. +	 * Mct would generate interrupt periodically +	 * without explicit stopping. +	 */ +	if (evt->mode != CLOCK_EVT_MODE_PERIODIC) +		exynos4_mct_tick_stop(mevt); + +	/* Clear the MCT tick interrupt */ +	if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) { +		exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); +		return 1; +	} else { +		return 0; +	} +} + +static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) +{ +	struct mct_clock_event_device *mevt = dev_id; +	struct clock_event_device *evt = &mevt->evt; + +	exynos4_mct_tick_clear(mevt); + +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static int exynos4_local_timer_setup(struct clock_event_device *evt) +{ +	struct mct_clock_event_device *mevt; +	unsigned int cpu = smp_processor_id(); + +	mevt = container_of(evt, struct mct_clock_event_device, evt); + +	mevt->base = EXYNOS4_MCT_L_BASE(cpu); +	snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); + +	evt->name = mevt->name; +	evt->cpumask = cpumask_of(cpu); +	evt->set_next_event = exynos4_tick_set_next_event; +	evt->set_mode = exynos4_tick_set_mode; +	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; +	evt->rating = 450; + +	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); + +	if (mct_int_type == MCT_INT_SPI) { +		evt->irq = mct_irqs[MCT_L0_IRQ + cpu]; +		if (request_irq(evt->irq, exynos4_mct_tick_isr, +				IRQF_TIMER | IRQF_NOBALANCING, +				evt->name, mevt)) { +			pr_err("exynos-mct: cannot register IRQ %d\n", +				evt->irq); +			return -EIO; +		} +		irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu)); +	} else { +		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); +	} +	clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), +					0xf, 0x7fffffff); + +	return 0; +} + +static void exynos4_local_timer_stop(struct clock_event_device *evt) +{ +	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); +	if (mct_int_type == MCT_INT_SPI) +		free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick)); +	else +		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); +} + +static int exynos4_mct_cpu_notify(struct notifier_block *self, +					   unsigned long action, void *hcpu) +{ +	struct mct_clock_event_device *mevt; + +	/* +	 * Grab cpu pointer in each case to avoid spurious +	 * preemptible warnings +	 */ +	switch (action & ~CPU_TASKS_FROZEN) { +	case CPU_STARTING: +		mevt = this_cpu_ptr(&percpu_mct_tick); +		exynos4_local_timer_setup(&mevt->evt); +		break; +	case CPU_DYING: +		mevt = this_cpu_ptr(&percpu_mct_tick); +		exynos4_local_timer_stop(&mevt->evt); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block exynos4_mct_cpu_nb = { +	.notifier_call = exynos4_mct_cpu_notify, +}; + +static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) +{ +	int err; +	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); +	struct clk *mct_clk, *tick_clk; + +	tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : +				clk_get(NULL, "fin_pll"); +	if (IS_ERR(tick_clk)) +		panic("%s: unable to determine tick clock rate\n", __func__); +	clk_rate = clk_get_rate(tick_clk); + +	mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct"); +	if (IS_ERR(mct_clk)) +		panic("%s: unable to retrieve mct clock instance\n", __func__); +	clk_prepare_enable(mct_clk); + +	reg_base = base; +	if (!reg_base) +		panic("%s: unable to ioremap mct address space\n", __func__); + +	if (mct_int_type == MCT_INT_PPI) { + +		err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], +					 exynos4_mct_tick_isr, "MCT", +					 &percpu_mct_tick); +		WARN(err, "MCT: can't request IRQ %d (%d)\n", +		     mct_irqs[MCT_L0_IRQ], err); +	} else { +		irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0)); +	} + +	err = register_cpu_notifier(&exynos4_mct_cpu_nb); +	if (err) +		goto out_irq; + +	/* Immediately configure the timer on the boot CPU */ +	exynos4_local_timer_setup(&mevt->evt); +	return; + +out_irq: +	free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); +} + +void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) +{ +	mct_irqs[MCT_G0_IRQ] = irq_g0; +	mct_irqs[MCT_L0_IRQ] = irq_l0; +	mct_irqs[MCT_L1_IRQ] = irq_l1; +	mct_int_type = MCT_INT_SPI; + +	exynos4_timer_resources(NULL, base); +	exynos4_clocksource_init(); +	exynos4_clockevent_init(); +} + +static void __init mct_init_dt(struct device_node *np, unsigned int int_type) +{ +	u32 nr_irqs, i; + +	mct_int_type = int_type; + +	/* This driver uses only one global timer interrupt */ +	mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); + +	/* +	 * Find out the number of local irqs specified. The local +	 * timer irqs are specified after the four global timer +	 * irqs are specified. +	 */ +#ifdef CONFIG_OF +	nr_irqs = of_irq_count(np); +#else +	nr_irqs = 0; +#endif +	for (i = MCT_L0_IRQ; i < nr_irqs; i++) +		mct_irqs[i] = irq_of_parse_and_map(np, i); + +	exynos4_timer_resources(np, of_iomap(np, 0)); +	exynos4_clocksource_init(); +	exynos4_clockevent_init(); +} + + +static void __init mct_init_spi(struct device_node *np) +{ +	return mct_init_dt(np, MCT_INT_SPI); +} + +static void __init mct_init_ppi(struct device_node *np) +{ +	return mct_init_dt(np, MCT_INT_PPI); +} +CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi); +CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi); diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c new file mode 100644 index 00000000000..454227d4f89 --- /dev/null +++ b/drivers/clocksource/fsl_ftm_timer.c @@ -0,0 +1,367 @@ +/* + * Freescale FlexTimer Module (FTM) timer driver. + * + * Copyright 2014 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> +#include <linux/slab.h> + +#define FTM_SC		0x00 +#define FTM_SC_CLK_SHIFT	3 +#define FTM_SC_CLK_MASK	(0x3 << FTM_SC_CLK_SHIFT) +#define FTM_SC_CLK(c)	((c) << FTM_SC_CLK_SHIFT) +#define FTM_SC_PS_MASK	0x7 +#define FTM_SC_TOIE	BIT(6) +#define FTM_SC_TOF	BIT(7) + +#define FTM_CNT		0x04 +#define FTM_MOD		0x08 +#define FTM_CNTIN	0x4C + +#define FTM_PS_MAX	7 + +struct ftm_clock_device { +	void __iomem *clksrc_base; +	void __iomem *clkevt_base; +	unsigned long periodic_cyc; +	unsigned long ps; +	bool big_endian; +}; + +static struct ftm_clock_device *priv; + +static inline u32 ftm_readl(void __iomem *addr) +{ +	if (priv->big_endian) +		return ioread32be(addr); +	else +		return ioread32(addr); +} + +static inline void ftm_writel(u32 val, void __iomem *addr) +{ +	if (priv->big_endian) +		iowrite32be(val, addr); +	else +		iowrite32(val, addr); +} + +static inline void ftm_counter_enable(void __iomem *base) +{ +	u32 val; + +	/* select and enable counter clock source */ +	val = ftm_readl(base + FTM_SC); +	val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); +	val |= priv->ps | FTM_SC_CLK(1); +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_counter_disable(void __iomem *base) +{ +	u32 val; + +	/* disable counter clock source */ +	val = ftm_readl(base + FTM_SC); +	val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_irq_acknowledge(void __iomem *base) +{ +	u32 val; + +	val = ftm_readl(base + FTM_SC); +	val &= ~FTM_SC_TOF; +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_irq_enable(void __iomem *base) +{ +	u32 val; + +	val = ftm_readl(base + FTM_SC); +	val |= FTM_SC_TOIE; +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_irq_disable(void __iomem *base) +{ +	u32 val; + +	val = ftm_readl(base + FTM_SC); +	val &= ~FTM_SC_TOIE; +	ftm_writel(val, base + FTM_SC); +} + +static inline void ftm_reset_counter(void __iomem *base) +{ +	/* +	 * The CNT register contains the FTM counter value. +	 * Reset clears the CNT register. Writing any value to COUNT +	 * updates the counter with its initial value, CNTIN. +	 */ +	ftm_writel(0x00, base + FTM_CNT); +} + +static u64 ftm_read_sched_clock(void) +{ +	return ftm_readl(priv->clksrc_base + FTM_CNT); +} + +static int ftm_set_next_event(unsigned long delta, +				struct clock_event_device *unused) +{ +	/* +	 * The CNNIN and MOD are all double buffer registers, writing +	 * to the MOD register latches the value into a buffer. The MOD +	 * register is updated with the value of its write buffer with +	 * the following scenario: +	 * a, the counter source clock is diabled. +	 */ +	ftm_counter_disable(priv->clkevt_base); + +	/* Force the value of CNTIN to be loaded into the FTM counter */ +	ftm_reset_counter(priv->clkevt_base); + +	/* +	 * The counter increments until the value of MOD is reached, +	 * at which point the counter is reloaded with the value of CNTIN. +	 * The TOF (the overflow flag) bit is set when the FTM counter +	 * changes from MOD to CNTIN. So we should using the delta - 1. +	 */ +	ftm_writel(delta - 1, priv->clkevt_base + FTM_MOD); + +	ftm_counter_enable(priv->clkevt_base); + +	ftm_irq_enable(priv->clkevt_base); + +	return 0; +} + +static void ftm_set_mode(enum clock_event_mode mode, +				struct clock_event_device *evt) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		ftm_set_next_event(priv->periodic_cyc, evt); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		ftm_counter_disable(priv->clkevt_base); +		break; +	default: +		return; +	} +} + +static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	ftm_irq_acknowledge(priv->clkevt_base); + +	if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) { +		ftm_irq_disable(priv->clkevt_base); +		ftm_counter_disable(priv->clkevt_base); +	} + +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct clock_event_device ftm_clockevent = { +	.name		= "Freescale ftm timer", +	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +	.set_mode	= ftm_set_mode, +	.set_next_event	= ftm_set_next_event, +	.rating		= 300, +}; + +static struct irqaction ftm_timer_irq = { +	.name		= "Freescale ftm timer", +	.flags		= IRQF_TIMER | IRQF_IRQPOLL, +	.handler	= ftm_evt_interrupt, +	.dev_id		= &ftm_clockevent, +}; + +static int __init ftm_clockevent_init(unsigned long freq, int irq) +{ +	int err; + +	ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); +	ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); + +	ftm_reset_counter(priv->clkevt_base); + +	err = setup_irq(irq, &ftm_timer_irq); +	if (err) { +		pr_err("ftm: setup irq failed: %d\n", err); +		return err; +	} + +	ftm_clockevent.cpumask = cpumask_of(0); +	ftm_clockevent.irq = irq; + +	clockevents_config_and_register(&ftm_clockevent, +					freq / (1 << priv->ps), +					1, 0xffff); + +	ftm_counter_enable(priv->clkevt_base); + +	return 0; +} + +static int __init ftm_clocksource_init(unsigned long freq) +{ +	int err; + +	ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); +	ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); + +	ftm_reset_counter(priv->clksrc_base); + +	sched_clock_register(ftm_read_sched_clock, 16, freq / (1 << priv->ps)); +	err = clocksource_mmio_init(priv->clksrc_base + FTM_CNT, "fsl-ftm", +				    freq / (1 << priv->ps), 300, 16, +				    clocksource_mmio_readl_up); +	if (err) { +		pr_err("ftm: init clock source mmio failed: %d\n", err); +		return err; +	} + +	ftm_counter_enable(priv->clksrc_base); + +	return 0; +} + +static int __init __ftm_clk_init(struct device_node *np, char *cnt_name, +				 char *ftm_name) +{ +	struct clk *clk; +	int err; + +	clk = of_clk_get_by_name(np, cnt_name); +	if (IS_ERR(clk)) { +		pr_err("ftm: Cannot get \"%s\": %ld\n", cnt_name, PTR_ERR(clk)); +		return PTR_ERR(clk); +	} +	err = clk_prepare_enable(clk); +	if (err) { +		pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n", +			cnt_name, err); +		return err; +	} + +	clk = of_clk_get_by_name(np, ftm_name); +	if (IS_ERR(clk)) { +		pr_err("ftm: Cannot get \"%s\": %ld\n", ftm_name, PTR_ERR(clk)); +		return PTR_ERR(clk); +	} +	err = clk_prepare_enable(clk); +	if (err) +		pr_err("ftm: clock failed to prepare+enable \"%s\": %d\n", +			ftm_name, err); + +	return clk_get_rate(clk); +} + +static unsigned long __init ftm_clk_init(struct device_node *np) +{ +	unsigned long freq; + +	freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); +	if (freq <= 0) +		return 0; + +	freq = __ftm_clk_init(np, "ftm-src-counter-en", "ftm-src"); +	if (freq <= 0) +		return 0; + +	return freq; +} + +static int __init ftm_calc_closest_round_cyc(unsigned long freq) +{ +	priv->ps = 0; + +	/* The counter register is only using the lower 16 bits, and +	 * if the 'freq' value is to big here, then the periodic_cyc +	 * may exceed 0xFFFF. +	 */ +	do { +		priv->periodic_cyc = DIV_ROUND_CLOSEST(freq, +						HZ * (1 << priv->ps++)); +	} while (priv->periodic_cyc > 0xFFFF); + +	if (priv->ps > FTM_PS_MAX) { +		pr_err("ftm: the prescaler is %lu > %d\n", +				priv->ps, FTM_PS_MAX); +		return -EINVAL; +	} + +	return 0; +} + +static void __init ftm_timer_init(struct device_node *np) +{ +	unsigned long freq; +	int irq; + +	priv = kzalloc(sizeof(*priv), GFP_KERNEL); +	if (!priv) +		return; + +	priv->clkevt_base = of_iomap(np, 0); +	if (!priv->clkevt_base) { +		pr_err("ftm: unable to map event timer registers\n"); +		goto err; +	} + +	priv->clksrc_base = of_iomap(np, 1); +	if (!priv->clksrc_base) { +		pr_err("ftm: unable to map source timer registers\n"); +		goto err; +	} + +	irq = irq_of_parse_and_map(np, 0); +	if (irq <= 0) { +		pr_err("ftm: unable to get IRQ from DT, %d\n", irq); +		goto err; +	} + +	priv->big_endian = of_property_read_bool(np, "big-endian"); + +	freq = ftm_clk_init(np); +	if (!freq) +		goto err; + +	if (ftm_calc_closest_round_cyc(freq)) +		goto err; + +	if (ftm_clocksource_init(freq)) +		goto err; + +	if (ftm_clockevent_init(freq, irq)) +		goto err; + +	return; + +err: +	kfree(priv); +} +CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init); diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c new file mode 100644 index 00000000000..14ee3efcc40 --- /dev/null +++ b/drivers/clocksource/i8253.c @@ -0,0 +1,186 @@ +/* + * i8253 PIT clocksource + */ +#include <linux/clockchips.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/timex.h> +#include <linux/module.h> +#include <linux/i8253.h> +#include <linux/smp.h> + +/* + * Protects access to I/O ports + * + * 0040-0043 : timer0, i8253 / i8254 + * 0061-0061 : NMI Control Register which contains two speaker control bits. + */ +DEFINE_RAW_SPINLOCK(i8253_lock); +EXPORT_SYMBOL(i8253_lock); + +#ifdef CONFIG_CLKSRC_I8253 +/* + * Since the PIT overflows every tick, its not very useful + * to just read by itself. So use jiffies to emulate a free + * running counter: + */ +static cycle_t i8253_read(struct clocksource *cs) +{ +	static int old_count; +	static u32 old_jifs; +	unsigned long flags; +	int count; +	u32 jifs; + +	raw_spin_lock_irqsave(&i8253_lock, flags); +	/* +	 * Although our caller may have the read side of jiffies_lock, +	 * this is now a seqlock, and we are cheating in this routine +	 * by having side effects on state that we cannot undo if +	 * there is a collision on the seqlock and our caller has to +	 * retry.  (Namely, old_jifs and old_count.)  So we must treat +	 * jiffies as volatile despite the lock.  We read jiffies +	 * before latching the timer count to guarantee that although +	 * the jiffies value might be older than the count (that is, +	 * the counter may underflow between the last point where +	 * jiffies was incremented and the point where we latch the +	 * count), it cannot be newer. +	 */ +	jifs = jiffies; +	outb_p(0x00, PIT_MODE);	/* latch the count ASAP */ +	count = inb_p(PIT_CH0);	/* read the latched count */ +	count |= inb_p(PIT_CH0) << 8; + +	/* VIA686a test code... reset the latch if count > max + 1 */ +	if (count > PIT_LATCH) { +		outb_p(0x34, PIT_MODE); +		outb_p(PIT_LATCH & 0xff, PIT_CH0); +		outb_p(PIT_LATCH >> 8, PIT_CH0); +		count = PIT_LATCH - 1; +	} + +	/* +	 * It's possible for count to appear to go the wrong way for a +	 * couple of reasons: +	 * +	 *  1. The timer counter underflows, but we haven't handled the +	 *     resulting interrupt and incremented jiffies yet. +	 *  2. Hardware problem with the timer, not giving us continuous time, +	 *     the counter does small "jumps" upwards on some Pentium systems, +	 *     (see c't 95/10 page 335 for Neptun bug.) +	 * +	 * Previous attempts to handle these cases intelligently were +	 * buggy, so we just do the simple thing now. +	 */ +	if (count > old_count && jifs == old_jifs) +		count = old_count; + +	old_count = count; +	old_jifs = jifs; + +	raw_spin_unlock_irqrestore(&i8253_lock, flags); + +	count = (PIT_LATCH - 1) - count; + +	return (cycle_t)(jifs * PIT_LATCH) + count; +} + +static struct clocksource i8253_cs = { +	.name		= "pit", +	.rating		= 110, +	.read		= i8253_read, +	.mask		= CLOCKSOURCE_MASK(32), +}; + +int __init clocksource_i8253_init(void) +{ +	return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE); +} +#endif + +#ifdef CONFIG_CLKEVT_I8253 +/* + * Initialize the PIT timer. + * + * This is also called after resume to bring the PIT into operation again. + */ +static void init_pit_timer(enum clock_event_mode mode, +			   struct clock_event_device *evt) +{ +	raw_spin_lock(&i8253_lock); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		/* binary, mode 2, LSB/MSB, ch 0 */ +		outb_p(0x34, PIT_MODE); +		outb_p(PIT_LATCH & 0xff , PIT_CH0);	/* LSB */ +		outb_p(PIT_LATCH >> 8 , PIT_CH0);		/* MSB */ +		break; + +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_UNUSED: +		if (evt->mode == CLOCK_EVT_MODE_PERIODIC || +		    evt->mode == CLOCK_EVT_MODE_ONESHOT) { +			outb_p(0x30, PIT_MODE); +			outb_p(0, PIT_CH0); +			outb_p(0, PIT_CH0); +		} +		break; + +	case CLOCK_EVT_MODE_ONESHOT: +		/* One shot setup */ +		outb_p(0x38, PIT_MODE); +		break; + +	case CLOCK_EVT_MODE_RESUME: +		/* Nothing to do here */ +		break; +	} +	raw_spin_unlock(&i8253_lock); +} + +/* + * Program the next event in oneshot mode + * + * Delta is given in PIT ticks + */ +static int pit_next_event(unsigned long delta, struct clock_event_device *evt) +{ +	raw_spin_lock(&i8253_lock); +	outb_p(delta & 0xff , PIT_CH0);	/* LSB */ +	outb_p(delta >> 8 , PIT_CH0);		/* MSB */ +	raw_spin_unlock(&i8253_lock); + +	return 0; +} + +/* + * On UP the PIT can serve all of the possible timer functions. On SMP systems + * it can be solely used for the global tick. + */ +struct clock_event_device i8253_clockevent = { +	.name		= "pit", +	.features	= CLOCK_EVT_FEAT_PERIODIC, +	.set_mode	= init_pit_timer, +	.set_next_event = pit_next_event, +}; + +/* + * Initialize the conversion factor and the min/max deltas of the clock event + * structure and register the clock event source with the framework. + */ +void __init clockevent_i8253_init(bool oneshot) +{ +	if (oneshot) +		i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT; +	/* +	 * Start pit with the boot cpu mask. x86 might make it global +	 * when it is used as broadcast device later. +	 */ +	i8253_clockevent.cpumask = cpumask_of(smp_processor_id()); + +	clockevents_config_and_register(&i8253_clockevent, PIT_TICK_RATE, +					0xF, 0x7FFF); +} +#endif diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c new file mode 100644 index 00000000000..9e4db41abe3 --- /dev/null +++ b/drivers/clocksource/metag_generic.c @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2005-2013 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program.  If not, see <http://www.gnu.org/licenses/>. + * + * + * Support for Meta per-thread timers. + * + * Meta hardware threads have 2 timers. The background timer (TXTIMER) is used + * as a free-running time base (hz clocksource), and the interrupt timer + * (TXTIMERI) is used for the timer interrupt (clock event). Both counters + * traditionally count at approximately 1MHz. + */ + +#include <clocksource/metag_generic.h> +#include <linux/cpu.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/time.h> +#include <linux/init.h> +#include <linux/proc_fs.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> + +#include <asm/clock.h> +#include <asm/hwthread.h> +#include <asm/core_reg.h> +#include <asm/metag_mem.h> +#include <asm/tbx.h> + +#define HARDWARE_FREQ		1000000	/* 1MHz */ +#define HARDWARE_DIV		1	/* divide by 1 = 1MHz clock */ +#define HARDWARE_TO_NS_SHIFT	10	/* convert ticks to ns */ + +static unsigned int hwtimer_freq = HARDWARE_FREQ; +static DEFINE_PER_CPU(struct clock_event_device, local_clockevent); +static DEFINE_PER_CPU(char [11], local_clockevent_name); + +static int metag_timer_set_next_event(unsigned long delta, +				      struct clock_event_device *dev) +{ +	__core_reg_set(TXTIMERI, -delta); +	return 0; +} + +static void metag_timer_set_mode(enum clock_event_mode mode, +				 struct clock_event_device *evt) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_ONESHOT: +	case CLOCK_EVT_MODE_RESUME: +		break; + +	case CLOCK_EVT_MODE_SHUTDOWN: +		/* We should disable the IRQ here */ +		break; + +	case CLOCK_EVT_MODE_PERIODIC: +	case CLOCK_EVT_MODE_UNUSED: +		WARN_ON(1); +		break; +	}; +} + +static cycle_t metag_clocksource_read(struct clocksource *cs) +{ +	return __core_reg_get(TXTIMER); +} + +static struct clocksource clocksource_metag = { +	.name = "META", +	.rating = 200, +	.mask = CLOCKSOURCE_MASK(32), +	.read = metag_clocksource_read, +	.flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static irqreturn_t metag_timer_interrupt(int irq, void *dummy) +{ +	struct clock_event_device *evt = &__get_cpu_var(local_clockevent); + +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct irqaction metag_timer_irq = { +	.name = "META core timer", +	.handler = metag_timer_interrupt, +	.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU, +}; + +unsigned long long sched_clock(void) +{ +	unsigned long long ticks = __core_reg_get(TXTIMER); +	return ticks << HARDWARE_TO_NS_SHIFT; +} + +static void arch_timer_setup(unsigned int cpu) +{ +	unsigned int txdivtime; +	struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); +	char *name = per_cpu(local_clockevent_name, cpu); + +	txdivtime = __core_reg_get(TXDIVTIME); + +	txdivtime &= ~TXDIVTIME_DIV_BITS; +	txdivtime |= (HARDWARE_DIV & TXDIVTIME_DIV_BITS); + +	__core_reg_set(TXDIVTIME, txdivtime); + +	sprintf(name, "META %d", cpu); +	clk->name = name; +	clk->features = CLOCK_EVT_FEAT_ONESHOT, + +	clk->rating = 200, +	clk->shift = 12, +	clk->irq = tbisig_map(TBID_SIGNUM_TRT), +	clk->set_mode = metag_timer_set_mode, +	clk->set_next_event = metag_timer_set_next_event, + +	clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift); +	clk->max_delta_ns = clockevent_delta2ns(0x7fffffff, clk); +	clk->min_delta_ns = clockevent_delta2ns(0xf, clk); +	clk->cpumask = cpumask_of(cpu); + +	clockevents_register_device(clk); + +	/* +	 * For all non-boot CPUs we need to synchronize our free +	 * running clock (TXTIMER) with the boot CPU's clock. +	 * +	 * While this won't be accurate, it should be close enough. +	 */ +	if (cpu) { +		unsigned int thread0 = cpu_2_hwthread_id[0]; +		unsigned long val; + +		val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0); +		__core_reg_set(TXTIMER, val); +	} +} + +static int arch_timer_cpu_notify(struct notifier_block *self, +					   unsigned long action, void *hcpu) +{ +	int cpu = (long)hcpu; + +	switch (action) { +	case CPU_STARTING: +	case CPU_STARTING_FROZEN: +		arch_timer_setup(cpu); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block arch_timer_cpu_nb = { +	.notifier_call = arch_timer_cpu_notify, +}; + +int __init metag_generic_timer_init(void) +{ +	/* +	 * On Meta 2 SoCs, the actual frequency of the timer is based on the +	 * Meta core clock speed divided by an integer, so it is only +	 * approximately 1MHz. Calculating the real frequency here drastically +	 * reduces clock skew on these SoCs. +	 */ +#ifdef CONFIG_METAG_META21 +	hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1); +#endif +	pr_info("Timer frequency: %u Hz\n", hwtimer_freq); + +	clocksource_register_hz(&clocksource_metag, hwtimer_freq); + +	setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); + +	/* Configure timer on boot CPU */ +	arch_timer_setup(smp_processor_id()); + +	/* Hook cpu boot to configure other CPU's timers */ +	register_cpu_notifier(&arch_timer_cpu_nb); + +	return 0; +} diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c new file mode 100644 index 00000000000..1593ade2a81 --- /dev/null +++ b/drivers/clocksource/mmio.c @@ -0,0 +1,73 @@ +/* + * Generic MMIO clocksource support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/clocksource.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> + +struct clocksource_mmio { +	void __iomem *reg; +	struct clocksource clksrc; +}; + +static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c) +{ +	return container_of(c, struct clocksource_mmio, clksrc); +} + +cycle_t clocksource_mmio_readl_up(struct clocksource *c) +{ +	return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg); +} + +cycle_t clocksource_mmio_readl_down(struct clocksource *c) +{ +	return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; +} + +cycle_t clocksource_mmio_readw_up(struct clocksource *c) +{ +	return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg); +} + +cycle_t clocksource_mmio_readw_down(struct clocksource *c) +{ +	return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; +} + +/** + * clocksource_mmio_init - Initialize a simple mmio based clocksource + * @base:	Virtual address of the clock readout register + * @name:	Name of the clocksource + * @hz:		Frequency of the clocksource in Hz + * @rating:	Rating of the clocksource + * @bits:	Number of valid bits + * @read:	One of clocksource_mmio_read*() above + */ +int __init clocksource_mmio_init(void __iomem *base, const char *name, +	unsigned long hz, int rating, unsigned bits, +	cycle_t (*read)(struct clocksource *)) +{ +	struct clocksource_mmio *cs; + +	if (bits > 32 || bits < 16) +		return -EINVAL; + +	cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL); +	if (!cs) +		return -ENOMEM; + +	cs->reg = base; +	cs->clksrc.name = name; +	cs->clksrc.rating = rating; +	cs->clksrc.read = read; +	cs->clksrc.mask = CLOCKSOURCE_MASK(bits); +	cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + +	return clocksource_register_hz(&cs->clksrc, hz); +} diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c new file mode 100644 index 00000000000..5eb2c35932b --- /dev/null +++ b/drivers/clocksource/moxart_timer.c @@ -0,0 +1,165 @@ +/* + * MOXA ART SoCs timer handling. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2.  This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/io.h> +#include <linux/clocksource.h> +#include <linux/bitops.h> + +#define TIMER1_BASE		0x00 +#define TIMER2_BASE		0x10 +#define TIMER3_BASE		0x20 + +#define REG_COUNT		0x0 /* writable */ +#define REG_LOAD		0x4 +#define REG_MATCH1		0x8 +#define REG_MATCH2		0xC + +#define TIMER_CR		0x30 +#define TIMER_INTR_STATE	0x34 +#define TIMER_INTR_MASK		0x38 + +/* + * TIMER_CR flags: + * + * TIMEREG_CR_*_CLOCK	0: PCLK, 1: EXT1CLK + * TIMEREG_CR_*_INT	overflow interrupt enable bit + */ +#define TIMEREG_CR_1_ENABLE	BIT(0) +#define TIMEREG_CR_1_CLOCK	BIT(1) +#define TIMEREG_CR_1_INT	BIT(2) +#define TIMEREG_CR_2_ENABLE	BIT(3) +#define TIMEREG_CR_2_CLOCK	BIT(4) +#define TIMEREG_CR_2_INT	BIT(5) +#define TIMEREG_CR_3_ENABLE	BIT(6) +#define TIMEREG_CR_3_CLOCK	BIT(7) +#define TIMEREG_CR_3_INT	BIT(8) +#define TIMEREG_CR_COUNT_UP	BIT(9) + +#define TIMER1_ENABLE		(TIMEREG_CR_2_ENABLE | TIMEREG_CR_1_ENABLE) +#define TIMER1_DISABLE		(TIMEREG_CR_2_ENABLE) + +static void __iomem *base; +static unsigned int clock_count_per_tick; + +static void moxart_clkevt_mode(enum clock_event_mode mode, +			       struct clock_event_device *clk) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_RESUME: +	case CLOCK_EVT_MODE_ONESHOT: +		writel(TIMER1_DISABLE, base + TIMER_CR); +		writel(~0, base + TIMER1_BASE + REG_LOAD); +		break; +	case CLOCK_EVT_MODE_PERIODIC: +		writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD); +		writel(TIMER1_ENABLE, base + TIMER_CR); +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	default: +		writel(TIMER1_DISABLE, base + TIMER_CR); +		break; +	} +} + +static int moxart_clkevt_next_event(unsigned long cycles, +				    struct clock_event_device *unused) +{ +	u32 u; + +	writel(TIMER1_DISABLE, base + TIMER_CR); + +	u = readl(base + TIMER1_BASE + REG_COUNT) - cycles; +	writel(u, base + TIMER1_BASE + REG_MATCH1); + +	writel(TIMER1_ENABLE, base + TIMER_CR); + +	return 0; +} + +static struct clock_event_device moxart_clockevent = { +	.name		= "moxart_timer", +	.rating		= 200, +	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +	.set_mode	= moxart_clkevt_mode, +	.set_next_event	= moxart_clkevt_next_event, +}; + +static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static struct irqaction moxart_timer_irq = { +	.name		= "moxart-timer", +	.flags		= IRQF_TIMER, +	.handler	= moxart_timer_interrupt, +	.dev_id		= &moxart_clockevent, +}; + +static void __init moxart_timer_init(struct device_node *node) +{ +	int ret, irq; +	unsigned long pclk; +	struct clk *clk; + +	base = of_iomap(node, 0); +	if (!base) +		panic("%s: of_iomap failed\n", node->full_name); + +	irq = irq_of_parse_and_map(node, 0); +	if (irq <= 0) +		panic("%s: irq_of_parse_and_map failed\n", node->full_name); + +	ret = setup_irq(irq, &moxart_timer_irq); +	if (ret) +		panic("%s: setup_irq failed\n", node->full_name); + +	clk = of_clk_get(node, 0); +	if (IS_ERR(clk)) +		panic("%s: of_clk_get failed\n", node->full_name); + +	pclk = clk_get_rate(clk); + +	if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT, +				  "moxart_timer", pclk, 200, 32, +				  clocksource_mmio_readl_down)) +		panic("%s: clocksource_mmio_init failed\n", node->full_name); + +	clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ); + +	writel(~0, base + TIMER2_BASE + REG_LOAD); +	writel(TIMEREG_CR_2_ENABLE, base + TIMER_CR); + +	moxart_clockevent.cpumask = cpumask_of(0); +	moxart_clockevent.irq = irq; + +	/* +	 * documentation is not publicly available: +	 * min_delta / max_delta obtained by trial-and-error, +	 * max_delta 0xfffffffe should be ok because count +	 * register size is u32 +	 */ +	clockevents_config_and_register(&moxart_clockevent, pclk, +					0x4, 0xfffffffe); +} +CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init); diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c new file mode 100644 index 00000000000..445b68a01dc --- /dev/null +++ b/drivers/clocksource/mxs_timer.c @@ -0,0 +1,304 @@ +/* + *  Copyright (C) 2000-2001 Deep Blue Solutions + *  Copyright (C) 2002 Shane Nay (shane@minirl.com) + *  Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com) + *  Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de) + *  Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + */ + +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/clockchips.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/stmp_device.h> +#include <linux/sched_clock.h> + +#include <asm/mach/time.h> + +/* + * There are 2 versions of the timrot on Freescale MXS-based SoCs. + * The v1 on MX23 only gets 16 bits counter, while v2 on MX28 + * extends the counter to 32 bits. + * + * The implementation uses two timers, one for clock_event and + * another for clocksource. MX28 uses timrot 0 and 1, while MX23 + * uses 0 and 2. + */ + +#define MX23_TIMROT_VERSION_OFFSET	0x0a0 +#define MX28_TIMROT_VERSION_OFFSET	0x120 +#define BP_TIMROT_MAJOR_VERSION		24 +#define BV_TIMROT_VERSION_1		0x01 +#define BV_TIMROT_VERSION_2		0x02 +#define timrot_is_v1()	(timrot_major_version == BV_TIMROT_VERSION_1) + +/* + * There are 4 registers for each timrotv2 instance, and 2 registers + * for each timrotv1. So address step 0x40 in macros below strides + * one instance of timrotv2 while two instances of timrotv1. + * + * As the result, HW_TIMROT_XXXn(1) defines the address of timrot1 + * on MX28 while timrot2 on MX23. + */ +/* common between v1 and v2 */ +#define HW_TIMROT_ROTCTRL		0x00 +#define HW_TIMROT_TIMCTRLn(n)		(0x20 + (n) * 0x40) +/* v1 only */ +#define HW_TIMROT_TIMCOUNTn(n)		(0x30 + (n) * 0x40) +/* v2 only */ +#define HW_TIMROT_RUNNING_COUNTn(n)	(0x30 + (n) * 0x40) +#define HW_TIMROT_FIXED_COUNTn(n)	(0x40 + (n) * 0x40) + +#define BM_TIMROT_TIMCTRLn_RELOAD	(1 << 6) +#define BM_TIMROT_TIMCTRLn_UPDATE	(1 << 7) +#define BM_TIMROT_TIMCTRLn_IRQ_EN	(1 << 14) +#define BM_TIMROT_TIMCTRLn_IRQ		(1 << 15) +#define BP_TIMROT_TIMCTRLn_SELECT	0 +#define BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL		0x8 +#define BV_TIMROTv2_TIMCTRLn_SELECT__32KHZ_XTAL		0xb +#define BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS	0xf + +static struct clock_event_device mxs_clockevent_device; +static enum clock_event_mode mxs_clockevent_mode = CLOCK_EVT_MODE_UNUSED; + +static void __iomem *mxs_timrot_base; +static u32 timrot_major_version; + +static inline void timrot_irq_disable(void) +{ +	__raw_writel(BM_TIMROT_TIMCTRLn_IRQ_EN, mxs_timrot_base + +		     HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR); +} + +static inline void timrot_irq_enable(void) +{ +	__raw_writel(BM_TIMROT_TIMCTRLn_IRQ_EN, mxs_timrot_base + +		     HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_SET); +} + +static void timrot_irq_acknowledge(void) +{ +	__raw_writel(BM_TIMROT_TIMCTRLn_IRQ, mxs_timrot_base + +		     HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR); +} + +static cycle_t timrotv1_get_cycles(struct clocksource *cs) +{ +	return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)) +			& 0xffff0000) >> 16); +} + +static int timrotv1_set_next_event(unsigned long evt, +					struct clock_event_device *dev) +{ +	/* timrot decrements the count */ +	__raw_writel(evt, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(0)); + +	return 0; +} + +static int timrotv2_set_next_event(unsigned long evt, +					struct clock_event_device *dev) +{ +	/* timrot decrements the count */ +	__raw_writel(evt, mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(0)); + +	return 0; +} + +static irqreturn_t mxs_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	timrot_irq_acknowledge(); +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct irqaction mxs_timer_irq = { +	.name		= "MXS Timer Tick", +	.dev_id		= &mxs_clockevent_device, +	.flags		= IRQF_TIMER | IRQF_IRQPOLL, +	.handler	= mxs_timer_interrupt, +}; + +#ifdef DEBUG +static const char *clock_event_mode_label[] const = { +	[CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC", +	[CLOCK_EVT_MODE_ONESHOT]  = "CLOCK_EVT_MODE_ONESHOT", +	[CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN", +	[CLOCK_EVT_MODE_UNUSED]   = "CLOCK_EVT_MODE_UNUSED" +}; +#endif /* DEBUG */ + +static void mxs_set_mode(enum clock_event_mode mode, +				struct clock_event_device *evt) +{ +	/* Disable interrupt in timer module */ +	timrot_irq_disable(); + +	if (mode != mxs_clockevent_mode) { +		/* Set event time into the furthest future */ +		if (timrot_is_v1()) +			__raw_writel(0xffff, +				mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)); +		else +			__raw_writel(0xffffffff, +				mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1)); + +		/* Clear pending interrupt */ +		timrot_irq_acknowledge(); +	} + +#ifdef DEBUG +	pr_info("%s: changing mode from %s to %s\n", __func__, +		clock_event_mode_label[mxs_clockevent_mode], +		clock_event_mode_label[mode]); +#endif /* DEBUG */ + +	/* Remember timer mode */ +	mxs_clockevent_mode = mode; + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		pr_err("%s: Periodic mode is not implemented\n", __func__); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		timrot_irq_enable(); +		break; +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_RESUME: +		/* Left event sources disabled, no more interrupts appear */ +		break; +	} +} + +static struct clock_event_device mxs_clockevent_device = { +	.name		= "mxs_timrot", +	.features	= CLOCK_EVT_FEAT_ONESHOT, +	.set_mode	= mxs_set_mode, +	.set_next_event	= timrotv2_set_next_event, +	.rating		= 200, +}; + +static int __init mxs_clockevent_init(struct clk *timer_clk) +{ +	if (timrot_is_v1()) +		mxs_clockevent_device.set_next_event = timrotv1_set_next_event; +	mxs_clockevent_device.cpumask = cpumask_of(0); +	clockevents_config_and_register(&mxs_clockevent_device, +					clk_get_rate(timer_clk), +					timrot_is_v1() ? 0xf : 0x2, +					timrot_is_v1() ? 0xfffe : 0xfffffffe); + +	return 0; +} + +static struct clocksource clocksource_mxs = { +	.name		= "mxs_timer", +	.rating		= 200, +	.read		= timrotv1_get_cycles, +	.mask		= CLOCKSOURCE_MASK(16), +	.flags		= CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static u64 notrace mxs_read_sched_clock_v2(void) +{ +	return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1)); +} + +static int __init mxs_clocksource_init(struct clk *timer_clk) +{ +	unsigned int c = clk_get_rate(timer_clk); + +	if (timrot_is_v1()) +		clocksource_register_hz(&clocksource_mxs, c); +	else { +		clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1), +			"mxs_timer", c, 200, 32, clocksource_mmio_readl_down); +		sched_clock_register(mxs_read_sched_clock_v2, 32, c); +	} + +	return 0; +} + +static void __init mxs_timer_init(struct device_node *np) +{ +	struct clk *timer_clk; +	int irq; + +	mxs_timrot_base = of_iomap(np, 0); +	WARN_ON(!mxs_timrot_base); + +	timer_clk = of_clk_get(np, 0); +	if (IS_ERR(timer_clk)) { +		pr_err("%s: failed to get clk\n", __func__); +		return; +	} + +	clk_prepare_enable(timer_clk); + +	/* +	 * Initialize timers to a known state +	 */ +	stmp_reset_block(mxs_timrot_base + HW_TIMROT_ROTCTRL); + +	/* get timrot version */ +	timrot_major_version = __raw_readl(mxs_timrot_base + +			(of_device_is_compatible(np, "fsl,imx23-timrot") ? +						MX23_TIMROT_VERSION_OFFSET : +						MX28_TIMROT_VERSION_OFFSET)); +	timrot_major_version >>= BP_TIMROT_MAJOR_VERSION; + +	/* one for clock_event */ +	__raw_writel((timrot_is_v1() ? +			BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL : +			BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) | +			BM_TIMROT_TIMCTRLn_UPDATE | +			BM_TIMROT_TIMCTRLn_IRQ_EN, +			mxs_timrot_base + HW_TIMROT_TIMCTRLn(0)); + +	/* another for clocksource */ +	__raw_writel((timrot_is_v1() ? +			BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL : +			BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) | +			BM_TIMROT_TIMCTRLn_RELOAD, +			mxs_timrot_base + HW_TIMROT_TIMCTRLn(1)); + +	/* set clocksource timer fixed count to the maximum */ +	if (timrot_is_v1()) +		__raw_writel(0xffff, +			mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)); +	else +		__raw_writel(0xffffffff, +			mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1)); + +	/* init and register the timer to the framework */ +	mxs_clocksource_init(timer_clk); +	mxs_clockevent_init(timer_clk); + +	/* Make irqs happen */ +	irq = irq_of_parse_and_map(np, 0); +	setup_irq(irq, &mxs_timer_irq); +} +CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init); diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c new file mode 100644 index 00000000000..a709cfa49d8 --- /dev/null +++ b/drivers/clocksource/nomadik-mtu.c @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2008 STMicroelectronics + * Copyright (C) 2010 Alessandro Rubini + * Copyright (C) 2010 Linus Walleij for ST-Ericsson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/clk.h> +#include <linux/jiffies.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/sched_clock.h> +#include <asm/mach/time.h> + +/* + * The MTU device hosts four different counters, with 4 set of + * registers. These are register names. + */ + +#define MTU_IMSC	0x00	/* Interrupt mask set/clear */ +#define MTU_RIS		0x04	/* Raw interrupt status */ +#define MTU_MIS		0x08	/* Masked interrupt status */ +#define MTU_ICR		0x0C	/* Interrupt clear register */ + +/* per-timer registers take 0..3 as argument */ +#define MTU_LR(x)	(0x10 + 0x10 * (x) + 0x00)	/* Load value */ +#define MTU_VAL(x)	(0x10 + 0x10 * (x) + 0x04)	/* Current value */ +#define MTU_CR(x)	(0x10 + 0x10 * (x) + 0x08)	/* Control reg */ +#define MTU_BGLR(x)	(0x10 + 0x10 * (x) + 0x0c)	/* At next overflow */ + +/* bits for the control register */ +#define MTU_CRn_ENA		0x80 +#define MTU_CRn_PERIODIC	0x40	/* if 0 = free-running */ +#define MTU_CRn_PRESCALE_MASK	0x0c +#define MTU_CRn_PRESCALE_1		0x00 +#define MTU_CRn_PRESCALE_16		0x04 +#define MTU_CRn_PRESCALE_256		0x08 +#define MTU_CRn_32BITS		0x02 +#define MTU_CRn_ONESHOT		0x01	/* if 0 = wraps reloading from BGLR*/ + +/* Other registers are usual amba/primecell registers, currently not used */ +#define MTU_ITCR	0xff0 +#define MTU_ITOP	0xff4 + +#define MTU_PERIPH_ID0	0xfe0 +#define MTU_PERIPH_ID1	0xfe4 +#define MTU_PERIPH_ID2	0xfe8 +#define MTU_PERIPH_ID3	0xfeC + +#define MTU_PCELL0	0xff0 +#define MTU_PCELL1	0xff4 +#define MTU_PCELL2	0xff8 +#define MTU_PCELL3	0xffC + +static void __iomem *mtu_base; +static bool clkevt_periodic; +static u32 clk_prescale; +static u32 nmdk_cycle;		/* write-once */ +static struct delay_timer mtu_delay_timer; + +#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK +/* + * Override the global weak sched_clock symbol with this + * local implementation which uses the clocksource to get some + * better resolution when scheduling the kernel. + */ +static u64 notrace nomadik_read_sched_clock(void) +{ +	if (unlikely(!mtu_base)) +		return 0; + +	return -readl(mtu_base + MTU_VAL(0)); +} +#endif + +static unsigned long nmdk_timer_read_current_timer(void) +{ +	return ~readl_relaxed(mtu_base + MTU_VAL(0)); +} + +/* Clockevent device: use one-shot mode */ +static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev) +{ +	writel(1 << 1, mtu_base + MTU_IMSC); +	writel(evt, mtu_base + MTU_LR(1)); +	/* Load highest value, enable device, enable interrupts */ +	writel(MTU_CRn_ONESHOT | clk_prescale | +	       MTU_CRn_32BITS | MTU_CRn_ENA, +	       mtu_base + MTU_CR(1)); + +	return 0; +} + +static void nmdk_clkevt_reset(void) +{ +	if (clkevt_periodic) { +		/* Timer: configure load and background-load, and fire it up */ +		writel(nmdk_cycle, mtu_base + MTU_LR(1)); +		writel(nmdk_cycle, mtu_base + MTU_BGLR(1)); + +		writel(MTU_CRn_PERIODIC | clk_prescale | +		       MTU_CRn_32BITS | MTU_CRn_ENA, +		       mtu_base + MTU_CR(1)); +		writel(1 << 1, mtu_base + MTU_IMSC); +	} else { +		/* Generate an interrupt to start the clockevent again */ +		(void) nmdk_clkevt_next(nmdk_cycle, NULL); +	} +} + +static void nmdk_clkevt_mode(enum clock_event_mode mode, +			     struct clock_event_device *dev) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		clkevt_periodic = true; +		nmdk_clkevt_reset(); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		clkevt_periodic = false; +		break; +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_UNUSED: +		writel(0, mtu_base + MTU_IMSC); +		/* disable timer */ +		writel(0, mtu_base + MTU_CR(1)); +		/* load some high default value */ +		writel(0xffffffff, mtu_base + MTU_LR(1)); +		break; +	case CLOCK_EVT_MODE_RESUME: +		break; +	} +} + +static void nmdk_clksrc_reset(void) +{ +	/* Disable */ +	writel(0, mtu_base + MTU_CR(0)); + +	/* ClockSource: configure load and background-load, and fire it up */ +	writel(nmdk_cycle, mtu_base + MTU_LR(0)); +	writel(nmdk_cycle, mtu_base + MTU_BGLR(0)); + +	writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA, +	       mtu_base + MTU_CR(0)); +} + +static void nmdk_clkevt_resume(struct clock_event_device *cedev) +{ +	nmdk_clkevt_reset(); +	nmdk_clksrc_reset(); +} + +static struct clock_event_device nmdk_clkevt = { +	.name		= "mtu_1", +	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | +	                  CLOCK_EVT_FEAT_DYNIRQ, +	.rating		= 200, +	.set_mode	= nmdk_clkevt_mode, +	.set_next_event	= nmdk_clkevt_next, +	.resume		= nmdk_clkevt_resume, +}; + +/* + * IRQ Handler for timer 1 of the MTU block. + */ +static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evdev = dev_id; + +	writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */ +	evdev->event_handler(evdev); +	return IRQ_HANDLED; +} + +static struct irqaction nmdk_timer_irq = { +	.name		= "Nomadik Timer Tick", +	.flags		= IRQF_TIMER, +	.handler	= nmdk_timer_interrupt, +	.dev_id		= &nmdk_clkevt, +}; + +static void __init nmdk_timer_init(void __iomem *base, int irq, +				   struct clk *pclk, struct clk *clk) +{ +	unsigned long rate; + +	mtu_base = base; + +	BUG_ON(clk_prepare_enable(pclk)); +	BUG_ON(clk_prepare_enable(clk)); + +	/* +	 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz +	 * for ux500. +	 * Use a divide-by-16 counter if the tick rate is more than 32MHz. +	 * At 32 MHz, the timer (with 32 bit counter) can be programmed +	 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer +	 * with 16 gives too low timer resolution. +	 */ +	rate = clk_get_rate(clk); +	if (rate > 32000000) { +		rate /= 16; +		clk_prescale = MTU_CRn_PRESCALE_16; +	} else { +		clk_prescale = MTU_CRn_PRESCALE_1; +	} + +	/* Cycles for periodic mode */ +	nmdk_cycle = DIV_ROUND_CLOSEST(rate, HZ); + + +	/* Timer 0 is the free running clocksource */ +	nmdk_clksrc_reset(); + +	if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", +			rate, 200, 32, clocksource_mmio_readl_down)) +		pr_err("timer: failed to initialize clock source %s\n", +		       "mtu_0"); + +#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK +	sched_clock_register(nomadik_read_sched_clock, 32, rate); +#endif + +	/* Timer 1 is used for events, register irq and clockevents */ +	setup_irq(irq, &nmdk_timer_irq); +	nmdk_clkevt.cpumask = cpumask_of(0); +	nmdk_clkevt.irq = irq; +	clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU); + +	mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer; +	mtu_delay_timer.freq = rate; +	register_current_timer_delay(&mtu_delay_timer); +} + +static void __init nmdk_timer_of_init(struct device_node *node) +{ +	struct clk *pclk; +	struct clk *clk; +	void __iomem *base; +	int irq; + +	base = of_iomap(node, 0); +	if (!base) +		panic("Can't remap registers"); + +	pclk = of_clk_get_by_name(node, "apb_pclk"); +	if (IS_ERR(pclk)) +		panic("could not get apb_pclk"); + +	clk = of_clk_get_by_name(node, "timclk"); +	if (IS_ERR(clk)) +		panic("could not get timclk"); + +	irq = irq_of_parse_and_map(node, 0); +	if (irq <= 0) +		panic("Can't parse IRQ"); + +	nmdk_timer_init(base, irq, pclk, clk); +} +CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu", +		       nmdk_timer_of_init); diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c new file mode 100644 index 00000000000..8d115db1e65 --- /dev/null +++ b/drivers/clocksource/qcom-timer.c @@ -0,0 +1,343 @@ +/* + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> + +#include <asm/delay.h> + +#define TIMER_MATCH_VAL			0x0000 +#define TIMER_COUNT_VAL			0x0004 +#define TIMER_ENABLE			0x0008 +#define TIMER_ENABLE_CLR_ON_MATCH_EN	BIT(1) +#define TIMER_ENABLE_EN			BIT(0) +#define TIMER_CLEAR			0x000C +#define DGT_CLK_CTL			0x10 +#define DGT_CLK_CTL_DIV_4		0x3 +#define TIMER_STS_GPT0_CLR_PEND		BIT(10) + +#define GPT_HZ 32768 + +#define MSM_DGT_SHIFT 5 + +static void __iomem *event_base; +static void __iomem *sts_base; + +static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; +	/* Stop the timer tick */ +	if (evt->mode == CLOCK_EVT_MODE_ONESHOT) { +		u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); +		ctrl &= ~TIMER_ENABLE_EN; +		writel_relaxed(ctrl, event_base + TIMER_ENABLE); +	} +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static int msm_timer_set_next_event(unsigned long cycles, +				    struct clock_event_device *evt) +{ +	u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); + +	ctrl &= ~TIMER_ENABLE_EN; +	writel_relaxed(ctrl, event_base + TIMER_ENABLE); + +	writel_relaxed(ctrl, event_base + TIMER_CLEAR); +	writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); + +	if (sts_base) +		while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND) +			cpu_relax(); + +	writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); +	return 0; +} + +static void msm_timer_set_mode(enum clock_event_mode mode, +			      struct clock_event_device *evt) +{ +	u32 ctrl; + +	ctrl = readl_relaxed(event_base + TIMER_ENABLE); +	ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN); + +	switch (mode) { +	case CLOCK_EVT_MODE_RESUME: +	case CLOCK_EVT_MODE_PERIODIC: +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		/* Timer is enabled in set_next_event */ +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		break; +	} +	writel_relaxed(ctrl, event_base + TIMER_ENABLE); +} + +static struct clock_event_device __percpu *msm_evt; + +static void __iomem *source_base; + +static notrace cycle_t msm_read_timer_count(struct clocksource *cs) +{ +	return readl_relaxed(source_base + TIMER_COUNT_VAL); +} + +static struct clocksource msm_clocksource = { +	.name	= "dg_timer", +	.rating	= 300, +	.read	= msm_read_timer_count, +	.mask	= CLOCKSOURCE_MASK(32), +	.flags	= CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int msm_timer_irq; +static int msm_timer_has_ppi; + +static int msm_local_timer_setup(struct clock_event_device *evt) +{ +	int cpu = smp_processor_id(); +	int err; + +	evt->irq = msm_timer_irq; +	evt->name = "msm_timer"; +	evt->features = CLOCK_EVT_FEAT_ONESHOT; +	evt->rating = 200; +	evt->set_mode = msm_timer_set_mode; +	evt->set_next_event = msm_timer_set_next_event; +	evt->cpumask = cpumask_of(cpu); + +	clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); + +	if (msm_timer_has_ppi) { +		enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); +	} else { +		err = request_irq(evt->irq, msm_timer_interrupt, +				IRQF_TIMER | IRQF_NOBALANCING | +				IRQF_TRIGGER_RISING, "gp_timer", evt); +		if (err) +			pr_err("request_irq failed\n"); +	} + +	return 0; +} + +static void msm_local_timer_stop(struct clock_event_device *evt) +{ +	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); +	disable_percpu_irq(evt->irq); +} + +static int msm_timer_cpu_notify(struct notifier_block *self, +					   unsigned long action, void *hcpu) +{ +	/* +	 * Grab cpu pointer in each case to avoid spurious +	 * preemptible warnings +	 */ +	switch (action & ~CPU_TASKS_FROZEN) { +	case CPU_STARTING: +		msm_local_timer_setup(this_cpu_ptr(msm_evt)); +		break; +	case CPU_DYING: +		msm_local_timer_stop(this_cpu_ptr(msm_evt)); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block msm_timer_cpu_nb = { +	.notifier_call = msm_timer_cpu_notify, +}; + +static u64 notrace msm_sched_clock_read(void) +{ +	return msm_clocksource.read(&msm_clocksource); +} + +static unsigned long msm_read_current_timer(void) +{ +	return msm_clocksource.read(&msm_clocksource); +} + +static struct delay_timer msm_delay_timer = { +	.read_current_timer = msm_read_current_timer, +}; + +static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, +				  bool percpu) +{ +	struct clocksource *cs = &msm_clocksource; +	int res = 0; + +	msm_timer_irq = irq; +	msm_timer_has_ppi = percpu; + +	msm_evt = alloc_percpu(struct clock_event_device); +	if (!msm_evt) { +		pr_err("memory allocation failed for clockevents\n"); +		goto err; +	} + +	if (percpu) +		res = request_percpu_irq(irq, msm_timer_interrupt, +					 "gp_timer", msm_evt); + +	if (res) { +		pr_err("request_percpu_irq failed\n"); +	} else { +		res = register_cpu_notifier(&msm_timer_cpu_nb); +		if (res) { +			free_percpu_irq(irq, msm_evt); +			goto err; +		} + +		/* Immediately configure the timer on the boot CPU */ +		msm_local_timer_setup(__this_cpu_ptr(msm_evt)); +	} + +err: +	writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); +	res = clocksource_register_hz(cs, dgt_hz); +	if (res) +		pr_err("clocksource_register failed\n"); +	sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); +	msm_delay_timer.freq = dgt_hz; +	register_current_timer_delay(&msm_delay_timer); +} + +#ifdef CONFIG_ARCH_QCOM +static void __init msm_dt_timer_init(struct device_node *np) +{ +	u32 freq; +	int irq; +	struct resource res; +	u32 percpu_offset; +	void __iomem *base; +	void __iomem *cpu0_base; + +	base = of_iomap(np, 0); +	if (!base) { +		pr_err("Failed to map event base\n"); +		return; +	} + +	/* We use GPT0 for the clockevent */ +	irq = irq_of_parse_and_map(np, 1); +	if (irq <= 0) { +		pr_err("Can't get irq\n"); +		return; +	} + +	/* We use CPU0's DGT for the clocksource */ +	if (of_property_read_u32(np, "cpu-offset", &percpu_offset)) +		percpu_offset = 0; + +	if (of_address_to_resource(np, 0, &res)) { +		pr_err("Failed to parse DGT resource\n"); +		return; +	} + +	cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res)); +	if (!cpu0_base) { +		pr_err("Failed to map source base\n"); +		return; +	} + +	if (of_property_read_u32(np, "clock-frequency", &freq)) { +		pr_err("Unknown frequency\n"); +		return; +	} + +	event_base = base + 0x4; +	sts_base = base + 0x88; +	source_base = cpu0_base + 0x24; +	freq /= 4; +	writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL); + +	msm_timer_init(freq, 32, irq, !!percpu_offset); +} +CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); +CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); +#else + +static int __init msm_timer_map(phys_addr_t addr, u32 event, u32 source, +				u32 sts) +{ +	void __iomem *base; + +	base = ioremap(addr, SZ_256); +	if (!base) { +		pr_err("Failed to map timer base\n"); +		return -ENOMEM; +	} +	event_base = base + event; +	source_base = base + source; +	if (sts) +		sts_base = base + sts; + +	return 0; +} + +static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs) +{ +	/* +	 * Shift timer count down by a constant due to unreliable lower bits +	 * on some targets. +	 */ +	return msm_read_timer_count(cs) >> MSM_DGT_SHIFT; +} + +void __init msm7x01_timer_init(void) +{ +	struct clocksource *cs = &msm_clocksource; + +	if (msm_timer_map(0xc0100000, 0x0, 0x10, 0x0)) +		return; +	cs->read = msm_read_timer_count_shift; +	cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)); +	/* 600 KHz */ +	msm_timer_init(19200000 >> MSM_DGT_SHIFT, 32 - MSM_DGT_SHIFT, 7, +			false); +} + +void __init msm7x30_timer_init(void) +{ +	if (msm_timer_map(0xc0100000, 0x4, 0x24, 0x80)) +		return; +	msm_timer_init(24576000 / 4, 32, 1, false); +} + +void __init qsd8x50_timer_init(void) +{ +	if (msm_timer_map(0xAC100000, 0x0, 0x10, 0x34)) +		return; +	msm_timer_init(19200000 / 4, 32, 7, false); +} +#endif diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c new file mode 100644 index 00000000000..5645cfc90c4 --- /dev/null +++ b/drivers/clocksource/samsung_pwm_timer.c @@ -0,0 +1,508 @@ +/* + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + *		http://www.samsung.com/ + * + * samsung - Common hr-timer support (s3c and s5p) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/sched_clock.h> + +#include <clocksource/samsung_pwm.h> + + +/* + * Clocksource driver + */ + +#define REG_TCFG0			0x00 +#define REG_TCFG1			0x04 +#define REG_TCON			0x08 +#define REG_TINT_CSTAT			0x44 + +#define REG_TCNTB(chan)			(0x0c + 12 * (chan)) +#define REG_TCMPB(chan)			(0x10 + 12 * (chan)) + +#define TCFG0_PRESCALER_MASK		0xff +#define TCFG0_PRESCALER1_SHIFT		8 + +#define TCFG1_SHIFT(x)	  		((x) * 4) +#define TCFG1_MUX_MASK	  		0xf + +/* + * Each channel occupies 4 bits in TCON register, but there is a gap of 4 + * bits (one channel) after channel 0, so channels have different numbering + * when accessing TCON register. + * + * In addition, the location of autoreload bit for channel 4 (TCON channel 5) + * in its set of bits is 2 as opposed to 3 for other channels. + */ +#define TCON_START(chan)		(1 << (4 * (chan) + 0)) +#define TCON_MANUALUPDATE(chan)		(1 << (4 * (chan) + 1)) +#define TCON_INVERT(chan)		(1 << (4 * (chan) + 2)) +#define _TCON_AUTORELOAD(chan)		(1 << (4 * (chan) + 3)) +#define _TCON_AUTORELOAD4(chan)		(1 << (4 * (chan) + 2)) +#define TCON_AUTORELOAD(chan)		\ +	((chan < 5) ? _TCON_AUTORELOAD(chan) : _TCON_AUTORELOAD4(chan)) + +DEFINE_SPINLOCK(samsung_pwm_lock); +EXPORT_SYMBOL(samsung_pwm_lock); + +struct samsung_pwm_clocksource { +	void __iomem *base; +	void __iomem *source_reg; +	unsigned int irq[SAMSUNG_PWM_NUM]; +	struct samsung_pwm_variant variant; + +	struct clk *timerclk; + +	unsigned int event_id; +	unsigned int source_id; +	unsigned int tcnt_max; +	unsigned int tscaler_div; +	unsigned int tdiv; + +	unsigned long clock_count_per_tick; +}; + +static struct samsung_pwm_clocksource pwm; + +static void samsung_timer_set_prescale(unsigned int channel, u16 prescale) +{ +	unsigned long flags; +	u8 shift = 0; +	u32 reg; + +	if (channel >= 2) +		shift = TCFG0_PRESCALER1_SHIFT; + +	spin_lock_irqsave(&samsung_pwm_lock, flags); + +	reg = readl(pwm.base + REG_TCFG0); +	reg &= ~(TCFG0_PRESCALER_MASK << shift); +	reg |= (prescale - 1) << shift; +	writel(reg, pwm.base + REG_TCFG0); + +	spin_unlock_irqrestore(&samsung_pwm_lock, flags); +} + +static void samsung_timer_set_divisor(unsigned int channel, u8 divisor) +{ +	u8 shift = TCFG1_SHIFT(channel); +	unsigned long flags; +	u32 reg; +	u8 bits; + +	bits = (fls(divisor) - 1) - pwm.variant.div_base; + +	spin_lock_irqsave(&samsung_pwm_lock, flags); + +	reg = readl(pwm.base + REG_TCFG1); +	reg &= ~(TCFG1_MUX_MASK << shift); +	reg |= bits << shift; +	writel(reg, pwm.base + REG_TCFG1); + +	spin_unlock_irqrestore(&samsung_pwm_lock, flags); +} + +static void samsung_time_stop(unsigned int channel) +{ +	unsigned long tcon; +	unsigned long flags; + +	if (channel > 0) +		++channel; + +	spin_lock_irqsave(&samsung_pwm_lock, flags); + +	tcon = __raw_readl(pwm.base + REG_TCON); +	tcon &= ~TCON_START(channel); +	__raw_writel(tcon, pwm.base + REG_TCON); + +	spin_unlock_irqrestore(&samsung_pwm_lock, flags); +} + +static void samsung_time_setup(unsigned int channel, unsigned long tcnt) +{ +	unsigned long tcon; +	unsigned long flags; +	unsigned int tcon_chan = channel; + +	if (tcon_chan > 0) +		++tcon_chan; + +	spin_lock_irqsave(&samsung_pwm_lock, flags); + +	tcon = __raw_readl(pwm.base + REG_TCON); + +	tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan)); +	tcon |= TCON_MANUALUPDATE(tcon_chan); + +	__raw_writel(tcnt, pwm.base + REG_TCNTB(channel)); +	__raw_writel(tcnt, pwm.base + REG_TCMPB(channel)); +	__raw_writel(tcon, pwm.base + REG_TCON); + +	spin_unlock_irqrestore(&samsung_pwm_lock, flags); +} + +static void samsung_time_start(unsigned int channel, bool periodic) +{ +	unsigned long tcon; +	unsigned long flags; + +	if (channel > 0) +		++channel; + +	spin_lock_irqsave(&samsung_pwm_lock, flags); + +	tcon = __raw_readl(pwm.base + REG_TCON); + +	tcon &= ~TCON_MANUALUPDATE(channel); +	tcon |= TCON_START(channel); + +	if (periodic) +		tcon |= TCON_AUTORELOAD(channel); +	else +		tcon &= ~TCON_AUTORELOAD(channel); + +	__raw_writel(tcon, pwm.base + REG_TCON); + +	spin_unlock_irqrestore(&samsung_pwm_lock, flags); +} + +static int samsung_set_next_event(unsigned long cycles, +				struct clock_event_device *evt) +{ +	/* +	 * This check is needed to account for internal rounding +	 * errors inside clockevents core, which might result in +	 * passing cycles = 0, which in turn would not generate any +	 * timer interrupt and hang the system. +	 * +	 * Another solution would be to set up the clockevent device +	 * with min_delta = 2, but this would unnecessarily increase +	 * the minimum sleep period. +	 */ +	if (!cycles) +		cycles = 1; + +	samsung_time_setup(pwm.event_id, cycles); +	samsung_time_start(pwm.event_id, false); + +	return 0; +} + +static void samsung_set_mode(enum clock_event_mode mode, +				struct clock_event_device *evt) +{ +	samsung_time_stop(pwm.event_id); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1); +		samsung_time_start(pwm.event_id, true); +		break; + +	case CLOCK_EVT_MODE_ONESHOT: +		break; + +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_RESUME: +		break; +	} +} + +static void samsung_clockevent_resume(struct clock_event_device *cev) +{ +	samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div); +	samsung_timer_set_divisor(pwm.event_id, pwm.tdiv); + +	if (pwm.variant.has_tint_cstat) { +		u32 mask = (1 << pwm.event_id); +		writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT); +	} +} + +static struct clock_event_device time_event_device = { +	.name		= "samsung_event_timer", +	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +	.rating		= 200, +	.set_next_event	= samsung_set_next_event, +	.set_mode	= samsung_set_mode, +	.resume		= samsung_clockevent_resume, +}; + +static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	if (pwm.variant.has_tint_cstat) { +		u32 mask = (1 << pwm.event_id); +		writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT); +	} + +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct irqaction samsung_clock_event_irq = { +	.name		= "samsung_time_irq", +	.flags		= IRQF_TIMER | IRQF_IRQPOLL, +	.handler	= samsung_clock_event_isr, +	.dev_id		= &time_event_device, +}; + +static void __init samsung_clockevent_init(void) +{ +	unsigned long pclk; +	unsigned long clock_rate; +	unsigned int irq_number; + +	pclk = clk_get_rate(pwm.timerclk); + +	samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div); +	samsung_timer_set_divisor(pwm.event_id, pwm.tdiv); + +	clock_rate = pclk / (pwm.tscaler_div * pwm.tdiv); +	pwm.clock_count_per_tick = clock_rate / HZ; + +	time_event_device.cpumask = cpumask_of(0); +	clockevents_config_and_register(&time_event_device, +						clock_rate, 1, pwm.tcnt_max); + +	irq_number = pwm.irq[pwm.event_id]; +	setup_irq(irq_number, &samsung_clock_event_irq); + +	if (pwm.variant.has_tint_cstat) { +		u32 mask = (1 << pwm.event_id); +		writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT); +	} +} + +static void samsung_clocksource_suspend(struct clocksource *cs) +{ +	samsung_time_stop(pwm.source_id); +} + +static void samsung_clocksource_resume(struct clocksource *cs) +{ +	samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div); +	samsung_timer_set_divisor(pwm.source_id, pwm.tdiv); + +	samsung_time_setup(pwm.source_id, pwm.tcnt_max); +	samsung_time_start(pwm.source_id, true); +} + +static cycle_t samsung_clocksource_read(struct clocksource *c) +{ +	return ~readl_relaxed(pwm.source_reg); +} + +static struct clocksource samsung_clocksource = { +	.name		= "samsung_clocksource_timer", +	.rating		= 250, +	.read		= samsung_clocksource_read, +	.suspend	= samsung_clocksource_suspend, +	.resume		= samsung_clocksource_resume, +	.flags		= CLOCK_SOURCE_IS_CONTINUOUS, +}; + +/* + * Override the global weak sched_clock symbol with this + * local implementation which uses the clocksource to get some + * better resolution when scheduling the kernel. We accept that + * this wraps around for now, since it is just a relative time + * stamp. (Inspired by U300 implementation.) + */ +static u64 notrace samsung_read_sched_clock(void) +{ +	return samsung_clocksource_read(NULL); +} + +static void __init samsung_clocksource_init(void) +{ +	unsigned long pclk; +	unsigned long clock_rate; +	int ret; + +	pclk = clk_get_rate(pwm.timerclk); + +	samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div); +	samsung_timer_set_divisor(pwm.source_id, pwm.tdiv); + +	clock_rate = pclk / (pwm.tscaler_div * pwm.tdiv); + +	samsung_time_setup(pwm.source_id, pwm.tcnt_max); +	samsung_time_start(pwm.source_id, true); + +	if (pwm.source_id == 4) +		pwm.source_reg = pwm.base + 0x40; +	else +		pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14; + +	sched_clock_register(samsung_read_sched_clock, +						pwm.variant.bits, clock_rate); + +	samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits); +	ret = clocksource_register_hz(&samsung_clocksource, clock_rate); +	if (ret) +		panic("samsung_clocksource_timer: can't register clocksource\n"); +} + +static void __init samsung_timer_resources(void) +{ +	clk_prepare_enable(pwm.timerclk); + +	pwm.tcnt_max = (1UL << pwm.variant.bits) - 1; +	if (pwm.variant.bits == 16) { +		pwm.tscaler_div = 25; +		pwm.tdiv = 2; +	} else { +		pwm.tscaler_div = 2; +		pwm.tdiv = 1; +	} +} + +/* + * PWM master driver + */ +static void __init _samsung_pwm_clocksource_init(void) +{ +	u8 mask; +	int channel; + +	mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1); +	channel = fls(mask) - 1; +	if (channel < 0) +		panic("failed to find PWM channel for clocksource"); +	pwm.source_id = channel; + +	mask &= ~(1 << channel); +	channel = fls(mask) - 1; +	if (channel < 0) +		panic("failed to find PWM channel for clock event"); +	pwm.event_id = channel; + +	samsung_timer_resources(); +	samsung_clockevent_init(); +	samsung_clocksource_init(); +} + +void __init samsung_pwm_clocksource_init(void __iomem *base, +			unsigned int *irqs, struct samsung_pwm_variant *variant) +{ +	pwm.base = base; +	memcpy(&pwm.variant, variant, sizeof(pwm.variant)); +	memcpy(pwm.irq, irqs, SAMSUNG_PWM_NUM * sizeof(*irqs)); + +	pwm.timerclk = clk_get(NULL, "timers"); +	if (IS_ERR(pwm.timerclk)) +		panic("failed to get timers clock for timer"); + +	_samsung_pwm_clocksource_init(); +} + +#ifdef CONFIG_CLKSRC_OF +static void __init samsung_pwm_alloc(struct device_node *np, +				     const struct samsung_pwm_variant *variant) +{ +	struct property *prop; +	const __be32 *cur; +	u32 val; +	int i; + +	memcpy(&pwm.variant, variant, sizeof(pwm.variant)); +	for (i = 0; i < SAMSUNG_PWM_NUM; ++i) +		pwm.irq[i] = irq_of_parse_and_map(np, i); + +	of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) { +		if (val >= SAMSUNG_PWM_NUM) { +			pr_warning("%s: invalid channel index in samsung,pwm-outputs property\n", +								__func__); +			continue; +		} +		pwm.variant.output_mask |= 1 << val; +	} + +	pwm.base = of_iomap(np, 0); +	if (!pwm.base) { +		pr_err("%s: failed to map PWM registers\n", __func__); +		return; +	} + +	pwm.timerclk = of_clk_get_by_name(np, "timers"); +	if (IS_ERR(pwm.timerclk)) +		panic("failed to get timers clock for timer"); + +	_samsung_pwm_clocksource_init(); +} + +static const struct samsung_pwm_variant s3c24xx_variant = { +	.bits		= 16, +	.div_base	= 1, +	.has_tint_cstat	= false, +	.tclk_mask	= (1 << 4), +}; + +static void __init s3c2410_pwm_clocksource_init(struct device_node *np) +{ +	samsung_pwm_alloc(np, &s3c24xx_variant); +} +CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init); + +static const struct samsung_pwm_variant s3c64xx_variant = { +	.bits		= 32, +	.div_base	= 0, +	.has_tint_cstat	= true, +	.tclk_mask	= (1 << 7) | (1 << 6) | (1 << 5), +}; + +static void __init s3c64xx_pwm_clocksource_init(struct device_node *np) +{ +	samsung_pwm_alloc(np, &s3c64xx_variant); +} +CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init); + +static const struct samsung_pwm_variant s5p64x0_variant = { +	.bits		= 32, +	.div_base	= 0, +	.has_tint_cstat	= true, +	.tclk_mask	= 0, +}; + +static void __init s5p64x0_pwm_clocksource_init(struct device_node *np) +{ +	samsung_pwm_alloc(np, &s5p64x0_variant); +} +CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init); + +static const struct samsung_pwm_variant s5p_variant = { +	.bits		= 32, +	.div_base	= 0, +	.has_tint_cstat	= true, +	.tclk_mask	= (1 << 5), +}; + +static void __init s5p_pwm_clocksource_init(struct device_node *np) +{ +	samsung_pwm_alloc(np, &s5p_variant); +} +CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init); +#endif diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c index 27f4d9637b6..64f9e829443 100644 --- a/drivers/clocksource/scx200_hrt.c +++ b/drivers/clocksource/scx200_hrt.c @@ -49,9 +49,6 @@ static cycle_t read_hrt(struct clocksource *cs)  	return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);  } -#define HRT_SHIFT_1	22 -#define HRT_SHIFT_27	26 -  static struct clocksource cs_hrt = {  	.name		= "scx200_hrt",  	.rating		= 250, @@ -63,6 +60,7 @@ static struct clocksource cs_hrt = {  static int __init init_hrt_clocksource(void)  { +	u32 freq;  	/* Make sure scx200 has initialized the configuration block */  	if (!scx200_cb_present())  		return -ENODEV; @@ -71,7 +69,7 @@ static int __init init_hrt_clocksource(void)  	if (!request_region(scx200_cb_base + SCx200_TIMER_OFFSET,  			    SCx200_TIMER_SIZE,  			    "NatSemi SCx200 High-Resolution Timer")) { -		printk(KERN_WARNING NAME ": unable to lock timer region\n"); +		pr_warn("unable to lock timer region\n");  		return -ENODEV;  	} @@ -79,19 +77,13 @@ static int __init init_hrt_clocksource(void)  	outb(HR_TMEN | (mhz27 ? HR_TMCLKSEL : 0),  	     scx200_cb_base + SCx200_TMCNFG_OFFSET); -	if (mhz27) { -		cs_hrt.shift = HRT_SHIFT_27; -		cs_hrt.mult = clocksource_hz2mult((HRT_FREQ + ppm) * 27, -						  cs_hrt.shift); -	} else { -		cs_hrt.shift = HRT_SHIFT_1; -		cs_hrt.mult = clocksource_hz2mult(HRT_FREQ + ppm, -						  cs_hrt.shift); -	} -	printk(KERN_INFO "enabling scx200 high-res timer (%s MHz +%d ppm)\n", -		mhz27 ? "27":"1", ppm); +	freq = (HRT_FREQ + ppm); +	if (mhz27) +		freq *= 27; + +	pr_info("enabling scx200 high-res timer (%s MHz +%d ppm)\n", mhz27 ? "27":"1", ppm); -	return clocksource_register(&cs_hrt); +	return clocksource_register_hz(&cs_hrt, freq);  }  module_init(init_hrt_clocksource); diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index d68d3aa1814..dfa780396b9 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -11,117 +11,289 @@   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/delay.h> +#include <linux/err.h>  #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h>  #include <linux/interrupt.h> -#include <linux/ioport.h>  #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h>  #include <linux/irq.h> -#include <linux/err.h> -#include <linux/clocksource.h> -#include <linux/clockchips.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h>  #include <linux/sh_timer.h>  #include <linux/slab.h> +#include <linux/spinlock.h> + +struct sh_cmt_device; + +/* + * The CMT comes in 5 different identified flavours, depending not only on the + * SoC but also on the particular instance. The following table lists the main + * characteristics of those flavours. + * + *			16B	32B	32B-F	48B	48B-2 + * ----------------------------------------------------------------------------- + * Channels		2	1/4	1	6	2/8 + * Control Width	16	16	16	16	32 + * Counter Width	16	32	32	32/48	32/48 + * Shared Start/Stop	Y	Y	Y	Y	N + * + * The 48-bit gen2 version has a per-channel start/stop register located in the + * channel registers block. All other versions have a shared start/stop register + * located in the global space. + * + * Channels are indexed from 0 to N-1 in the documentation. The channel index + * infers the start/stop bit position in the control register and the channel + * registers block address. Some CMT instances have a subset of channels + * available, in which case the index in the documentation doesn't match the + * "real" index as implemented in hardware. This is for instance the case with + * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0 + * in the documentation but using start/stop bit 5 and having its registers + * block at 0x60. + * + * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit + * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable. + */ + +enum sh_cmt_model { +	SH_CMT_16BIT, +	SH_CMT_32BIT, +	SH_CMT_32BIT_FAST, +	SH_CMT_48BIT, +	SH_CMT_48BIT_GEN2, +}; + +struct sh_cmt_info { +	enum sh_cmt_model model; -struct sh_cmt_priv { -	void __iomem *mapbase; -	struct clk *clk;  	unsigned long width; /* 16 or 32 bit version of hardware block */  	unsigned long overflow_bit;  	unsigned long clear_bits; -	struct irqaction irqaction; -	struct platform_device *pdev; +	/* callbacks for CMSTR and CMCSR access */ +	unsigned long (*read_control)(void __iomem *base, unsigned long offs); +	void (*write_control)(void __iomem *base, unsigned long offs, +			      unsigned long value); + +	/* callbacks for CMCNT and CMCOR access */ +	unsigned long (*read_count)(void __iomem *base, unsigned long offs); +	void (*write_count)(void __iomem *base, unsigned long offs, +			    unsigned long value); +}; + +struct sh_cmt_channel { +	struct sh_cmt_device *cmt; + +	unsigned int index;	/* Index in the documentation */ +	unsigned int hwidx;	/* Real hardware index */ + +	void __iomem *iostart; +	void __iomem *ioctrl; + +	unsigned int timer_bit;  	unsigned long flags;  	unsigned long match_value;  	unsigned long next_match_value;  	unsigned long max_match_value;  	unsigned long rate; -	spinlock_t lock; +	raw_spinlock_t lock;  	struct clock_event_device ced;  	struct clocksource cs;  	unsigned long total_cycles; +	bool cs_enabled;  }; -static DEFINE_SPINLOCK(sh_cmt_lock); +struct sh_cmt_device { +	struct platform_device *pdev; + +	const struct sh_cmt_info *info; +	bool legacy; + +	void __iomem *mapbase_ch; +	void __iomem *mapbase; +	struct clk *clk; + +	struct sh_cmt_channel *channels; +	unsigned int num_channels; + +	bool has_clockevent; +	bool has_clocksource; +}; + +#define SH_CMT16_CMCSR_CMF		(1 << 7) +#define SH_CMT16_CMCSR_CMIE		(1 << 6) +#define SH_CMT16_CMCSR_CKS8		(0 << 0) +#define SH_CMT16_CMCSR_CKS32		(1 << 0) +#define SH_CMT16_CMCSR_CKS128		(2 << 0) +#define SH_CMT16_CMCSR_CKS512		(3 << 0) +#define SH_CMT16_CMCSR_CKS_MASK		(3 << 0) + +#define SH_CMT32_CMCSR_CMF		(1 << 15) +#define SH_CMT32_CMCSR_OVF		(1 << 14) +#define SH_CMT32_CMCSR_WRFLG		(1 << 13) +#define SH_CMT32_CMCSR_STTF		(1 << 12) +#define SH_CMT32_CMCSR_STPF		(1 << 11) +#define SH_CMT32_CMCSR_SSIE		(1 << 10) +#define SH_CMT32_CMCSR_CMS		(1 << 9) +#define SH_CMT32_CMCSR_CMM		(1 << 8) +#define SH_CMT32_CMCSR_CMTOUT_IE	(1 << 7) +#define SH_CMT32_CMCSR_CMR_NONE		(0 << 4) +#define SH_CMT32_CMCSR_CMR_DMA		(1 << 4) +#define SH_CMT32_CMCSR_CMR_IRQ		(2 << 4) +#define SH_CMT32_CMCSR_CMR_MASK		(3 << 4) +#define SH_CMT32_CMCSR_DBGIVD		(1 << 3) +#define SH_CMT32_CMCSR_CKS_RCLK8	(4 << 0) +#define SH_CMT32_CMCSR_CKS_RCLK32	(5 << 0) +#define SH_CMT32_CMCSR_CKS_RCLK128	(6 << 0) +#define SH_CMT32_CMCSR_CKS_RCLK1	(7 << 0) +#define SH_CMT32_CMCSR_CKS_MASK		(7 << 0) + +static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs) +{ +	return ioread16(base + (offs << 1)); +} + +static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs) +{ +	return ioread32(base + (offs << 2)); +} + +static void sh_cmt_write16(void __iomem *base, unsigned long offs, +			   unsigned long value) +{ +	iowrite16(value, base + (offs << 1)); +} + +static void sh_cmt_write32(void __iomem *base, unsigned long offs, +			   unsigned long value) +{ +	iowrite32(value, base + (offs << 2)); +} + +static const struct sh_cmt_info sh_cmt_info[] = { +	[SH_CMT_16BIT] = { +		.model = SH_CMT_16BIT, +		.width = 16, +		.overflow_bit = SH_CMT16_CMCSR_CMF, +		.clear_bits = ~SH_CMT16_CMCSR_CMF, +		.read_control = sh_cmt_read16, +		.write_control = sh_cmt_write16, +		.read_count = sh_cmt_read16, +		.write_count = sh_cmt_write16, +	}, +	[SH_CMT_32BIT] = { +		.model = SH_CMT_32BIT, +		.width = 32, +		.overflow_bit = SH_CMT32_CMCSR_CMF, +		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), +		.read_control = sh_cmt_read16, +		.write_control = sh_cmt_write16, +		.read_count = sh_cmt_read32, +		.write_count = sh_cmt_write32, +	}, +	[SH_CMT_32BIT_FAST] = { +		.model = SH_CMT_32BIT_FAST, +		.width = 32, +		.overflow_bit = SH_CMT32_CMCSR_CMF, +		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), +		.read_control = sh_cmt_read16, +		.write_control = sh_cmt_write16, +		.read_count = sh_cmt_read32, +		.write_count = sh_cmt_write32, +	}, +	[SH_CMT_48BIT] = { +		.model = SH_CMT_48BIT, +		.width = 32, +		.overflow_bit = SH_CMT32_CMCSR_CMF, +		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), +		.read_control = sh_cmt_read32, +		.write_control = sh_cmt_write32, +		.read_count = sh_cmt_read32, +		.write_count = sh_cmt_write32, +	}, +	[SH_CMT_48BIT_GEN2] = { +		.model = SH_CMT_48BIT_GEN2, +		.width = 32, +		.overflow_bit = SH_CMT32_CMCSR_CMF, +		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), +		.read_control = sh_cmt_read32, +		.write_control = sh_cmt_write32, +		.read_count = sh_cmt_read32, +		.write_count = sh_cmt_write32, +	}, +}; -#define CMSTR -1 /* shared register */  #define CMCSR 0 /* channel register */  #define CMCNT 1 /* channel register */  #define CMCOR 2 /* channel register */ -static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr) +static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase; -	unsigned long offs; - -	if (reg_nr == CMSTR) { -		offs = 0; -		base -= cfg->channel_offset; -	} else -		offs = reg_nr; +	if (ch->iostart) +		return ch->cmt->info->read_control(ch->iostart, 0); +	else +		return ch->cmt->info->read_control(ch->cmt->mapbase, 0); +} -	if (p->width == 16) -		offs <<= 1; -	else { -		offs <<= 2; -		if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) -			return ioread32(base + offs); -	} +static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, +				      unsigned long value) +{ +	if (ch->iostart) +		ch->cmt->info->write_control(ch->iostart, 0, value); +	else +		ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); +} -	return ioread16(base + offs); +static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) +{ +	return ch->cmt->info->read_control(ch->ioctrl, CMCSR);  } -static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, -				unsigned long value) +static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, +				      unsigned long value)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase; -	unsigned long offs; +	ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); +} -	if (reg_nr == CMSTR) { -		offs = 0; -		base -= cfg->channel_offset; -	} else -		offs = reg_nr; +static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) +{ +	return ch->cmt->info->read_count(ch->ioctrl, CMCNT); +} -	if (p->width == 16) -		offs <<= 1; -	else { -		offs <<= 2; -		if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) { -			iowrite32(value, base + offs); -			return; -		} -	} +static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, +				      unsigned long value) +{ +	ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); +} -	iowrite16(value, base + offs); +static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, +				      unsigned long value) +{ +	ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);  } -static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, +static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,  					int *has_wrapped)  {  	unsigned long v1, v2, v3;  	int o1, o2; -	o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; +	o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;  	/* Make sure the timer value is stable. Stolen from acpi_pm.c */  	do {  		o2 = o1; -		v1 = sh_cmt_read(p, CMCNT); -		v2 = sh_cmt_read(p, CMCNT); -		v3 = sh_cmt_read(p, CMCNT); -		o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; +		v1 = sh_cmt_read_cmcnt(ch); +		v2 = sh_cmt_read_cmcnt(ch); +		v3 = sh_cmt_read_cmcnt(ch); +		o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;  	} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)  			  || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); @@ -129,66 +301,107 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,  	return v2;  } +static DEFINE_RAW_SPINLOCK(sh_cmt_lock); -static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) +static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data;  	unsigned long flags, value;  	/* start stop register shared by multiple timer channels */ -	spin_lock_irqsave(&sh_cmt_lock, flags); -	value = sh_cmt_read(p, CMSTR); +	raw_spin_lock_irqsave(&sh_cmt_lock, flags); +	value = sh_cmt_read_cmstr(ch);  	if (start) -		value |= 1 << cfg->timer_bit; +		value |= 1 << ch->timer_bit;  	else -		value &= ~(1 << cfg->timer_bit); +		value &= ~(1 << ch->timer_bit); -	sh_cmt_write(p, CMSTR, value); -	spin_unlock_irqrestore(&sh_cmt_lock, flags); +	sh_cmt_write_cmstr(ch, value); +	raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);  } -static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) +static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)  { -	int ret; +	int k, ret; + +	pm_runtime_get_sync(&ch->cmt->pdev->dev); +	dev_pm_syscore_device(&ch->cmt->pdev->dev, true);  	/* enable clock */ -	ret = clk_enable(p->clk); +	ret = clk_enable(ch->cmt->clk);  	if (ret) { -		dev_err(&p->pdev->dev, "cannot enable clock\n"); -		return ret; +		dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n", +			ch->index); +		goto err0;  	}  	/* make sure channel is disabled */ -	sh_cmt_start_stop_ch(p, 0); +	sh_cmt_start_stop_ch(ch, 0);  	/* configure channel, periodic mode and maximum timeout */ -	if (p->width == 16) { -		*rate = clk_get_rate(p->clk) / 512; -		sh_cmt_write(p, CMCSR, 0x43); +	if (ch->cmt->info->width == 16) { +		*rate = clk_get_rate(ch->cmt->clk) / 512; +		sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE | +				   SH_CMT16_CMCSR_CKS512);  	} else { -		*rate = clk_get_rate(p->clk) / 8; -		sh_cmt_write(p, CMCSR, 0x01a4); +		*rate = clk_get_rate(ch->cmt->clk) / 8; +		sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM | +				   SH_CMT32_CMCSR_CMTOUT_IE | +				   SH_CMT32_CMCSR_CMR_IRQ | +				   SH_CMT32_CMCSR_CKS_RCLK8);  	} -	sh_cmt_write(p, CMCOR, 0xffffffff); -	sh_cmt_write(p, CMCNT, 0); +	sh_cmt_write_cmcor(ch, 0xffffffff); +	sh_cmt_write_cmcnt(ch, 0); + +	/* +	 * According to the sh73a0 user's manual, as CMCNT can be operated +	 * only by the RCLK (Pseudo 32 KHz), there's one restriction on +	 * modifying CMCNT register; two RCLK cycles are necessary before +	 * this register is either read or any modification of the value +	 * it holds is reflected in the LSI's actual operation. +	 * +	 * While at it, we're supposed to clear out the CMCNT as of this +	 * moment, so make sure it's processed properly here.  This will +	 * take RCLKx2 at maximum. +	 */ +	for (k = 0; k < 100; k++) { +		if (!sh_cmt_read_cmcnt(ch)) +			break; +		udelay(1); +	} + +	if (sh_cmt_read_cmcnt(ch)) { +		dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", +			ch->index); +		ret = -ETIMEDOUT; +		goto err1; +	}  	/* enable channel */ -	sh_cmt_start_stop_ch(p, 1); +	sh_cmt_start_stop_ch(ch, 1);  	return 0; + err1: +	/* stop clock */ +	clk_disable(ch->cmt->clk); + + err0: +	return ret;  } -static void sh_cmt_disable(struct sh_cmt_priv *p) +static void sh_cmt_disable(struct sh_cmt_channel *ch)  {  	/* disable channel */ -	sh_cmt_start_stop_ch(p, 0); +	sh_cmt_start_stop_ch(ch, 0);  	/* disable interrupts in CMT block */ -	sh_cmt_write(p, CMCSR, 0); +	sh_cmt_write_cmcsr(ch, 0);  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable(ch->cmt->clk); + +	dev_pm_syscore_device(&ch->cmt->pdev->dev, false); +	pm_runtime_put(&ch->cmt->pdev->dev);  }  /* private flags */ @@ -198,24 +411,24 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)  #define FLAG_SKIPEVENT (1 << 3)  #define FLAG_IRQCONTEXT (1 << 4) -static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, +static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,  					      int absolute)  {  	unsigned long new_match; -	unsigned long value = p->next_match_value; +	unsigned long value = ch->next_match_value;  	unsigned long delay = 0;  	unsigned long now = 0;  	int has_wrapped; -	now = sh_cmt_get_counter(p, &has_wrapped); -	p->flags |= FLAG_REPROGRAM; /* force reprogram */ +	now = sh_cmt_get_counter(ch, &has_wrapped); +	ch->flags |= FLAG_REPROGRAM; /* force reprogram */  	if (has_wrapped) {  		/* we're competing with the interrupt handler.  		 *  -> let the interrupt handler reprogram the timer.  		 *  -> interrupt number two handles the event.  		 */ -		p->flags |= FLAG_SKIPEVENT; +		ch->flags |= FLAG_SKIPEVENT;  		return;  	} @@ -227,20 +440,20 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,  		 * but don't save the new match value yet.  		 */  		new_match = now + value + delay; -		if (new_match > p->max_match_value) -			new_match = p->max_match_value; +		if (new_match > ch->max_match_value) +			new_match = ch->max_match_value; -		sh_cmt_write(p, CMCOR, new_match); +		sh_cmt_write_cmcor(ch, new_match); -		now = sh_cmt_get_counter(p, &has_wrapped); -		if (has_wrapped && (new_match > p->match_value)) { +		now = sh_cmt_get_counter(ch, &has_wrapped); +		if (has_wrapped && (new_match > ch->match_value)) {  			/* we are changing to a greater match value,  			 * so this wrap must be caused by the counter  			 * matching the old value.  			 * -> first interrupt reprograms the timer.  			 * -> interrupt number two handles the event.  			 */ -			p->flags |= FLAG_SKIPEVENT; +			ch->flags |= FLAG_SKIPEVENT;  			break;  		} @@ -251,7 +464,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,  			 * -> save programmed match value.  			 * -> let isr handle the event.  			 */ -			p->match_value = new_match; +			ch->match_value = new_match;  			break;  		} @@ -262,7 +475,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,  			 * -> save programmed match value.  			 * -> let isr handle the event.  			 */ -			p->match_value = new_match; +			ch->match_value = new_match;  			break;  		} @@ -278,221 +491,245 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,  			delay = 1;  		if (!delay) -			dev_warn(&p->pdev->dev, "too long delay\n"); +			dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n", +				 ch->index);  	} while (delay);  } -static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) +static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)  { -	unsigned long flags; +	if (delta > ch->max_match_value) +		dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n", +			 ch->index); -	if (delta > p->max_match_value) -		dev_warn(&p->pdev->dev, "delta out of range\n"); +	ch->next_match_value = delta; +	sh_cmt_clock_event_program_verify(ch, 0); +} + +static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) +{ +	unsigned long flags; -	spin_lock_irqsave(&p->lock, flags); -	p->next_match_value = delta; -	sh_cmt_clock_event_program_verify(p, 0); -	spin_unlock_irqrestore(&p->lock, flags); +	raw_spin_lock_irqsave(&ch->lock, flags); +	__sh_cmt_set_next(ch, delta); +	raw_spin_unlock_irqrestore(&ch->lock, flags);  }  static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)  { -	struct sh_cmt_priv *p = dev_id; +	struct sh_cmt_channel *ch = dev_id;  	/* clear flags */ -	sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); +	sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & +			   ch->cmt->info->clear_bits);  	/* update clock source counter to begin with if enabled  	 * the wrap flag should be cleared by the timer specific  	 * isr before we end up here.  	 */ -	if (p->flags & FLAG_CLOCKSOURCE) -		p->total_cycles += p->match_value + 1; +	if (ch->flags & FLAG_CLOCKSOURCE) +		ch->total_cycles += ch->match_value + 1; -	if (!(p->flags & FLAG_REPROGRAM)) -		p->next_match_value = p->max_match_value; +	if (!(ch->flags & FLAG_REPROGRAM)) +		ch->next_match_value = ch->max_match_value; -	p->flags |= FLAG_IRQCONTEXT; +	ch->flags |= FLAG_IRQCONTEXT; -	if (p->flags & FLAG_CLOCKEVENT) { -		if (!(p->flags & FLAG_SKIPEVENT)) { -			if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { -				p->next_match_value = p->max_match_value; -				p->flags |= FLAG_REPROGRAM; +	if (ch->flags & FLAG_CLOCKEVENT) { +		if (!(ch->flags & FLAG_SKIPEVENT)) { +			if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) { +				ch->next_match_value = ch->max_match_value; +				ch->flags |= FLAG_REPROGRAM;  			} -			p->ced.event_handler(&p->ced); +			ch->ced.event_handler(&ch->ced);  		}  	} -	p->flags &= ~FLAG_SKIPEVENT; +	ch->flags &= ~FLAG_SKIPEVENT; -	if (p->flags & FLAG_REPROGRAM) { -		p->flags &= ~FLAG_REPROGRAM; -		sh_cmt_clock_event_program_verify(p, 1); +	if (ch->flags & FLAG_REPROGRAM) { +		ch->flags &= ~FLAG_REPROGRAM; +		sh_cmt_clock_event_program_verify(ch, 1); -		if (p->flags & FLAG_CLOCKEVENT) -			if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) -			    || (p->match_value == p->next_match_value)) -				p->flags &= ~FLAG_REPROGRAM; +		if (ch->flags & FLAG_CLOCKEVENT) +			if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) +			    || (ch->match_value == ch->next_match_value)) +				ch->flags &= ~FLAG_REPROGRAM;  	} -	p->flags &= ~FLAG_IRQCONTEXT; +	ch->flags &= ~FLAG_IRQCONTEXT;  	return IRQ_HANDLED;  } -static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) +static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)  {  	int ret = 0;  	unsigned long flags; -	spin_lock_irqsave(&p->lock, flags); +	raw_spin_lock_irqsave(&ch->lock, flags); -	if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) -		ret = sh_cmt_enable(p, &p->rate); +	if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) +		ret = sh_cmt_enable(ch, &ch->rate);  	if (ret)  		goto out; -	p->flags |= flag; +	ch->flags |= flag;  	/* setup timeout if no clockevent */ -	if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) -		sh_cmt_set_next(p, p->max_match_value); +	if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT))) +		__sh_cmt_set_next(ch, ch->max_match_value);   out: -	spin_unlock_irqrestore(&p->lock, flags); +	raw_spin_unlock_irqrestore(&ch->lock, flags);  	return ret;  } -static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) +static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)  {  	unsigned long flags;  	unsigned long f; -	spin_lock_irqsave(&p->lock, flags); +	raw_spin_lock_irqsave(&ch->lock, flags); -	f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); -	p->flags &= ~flag; +	f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); +	ch->flags &= ~flag; -	if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) -		sh_cmt_disable(p); +	if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) +		sh_cmt_disable(ch);  	/* adjust the timeout to maximum if only clocksource left */ -	if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) -		sh_cmt_set_next(p, p->max_match_value); +	if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE)) +		__sh_cmt_set_next(ch, ch->max_match_value); -	spin_unlock_irqrestore(&p->lock, flags); +	raw_spin_unlock_irqrestore(&ch->lock, flags);  } -static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) +static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)  { -	return container_of(cs, struct sh_cmt_priv, cs); +	return container_of(cs, struct sh_cmt_channel, cs);  }  static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)  { -	struct sh_cmt_priv *p = cs_to_sh_cmt(cs); +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);  	unsigned long flags, raw;  	unsigned long value;  	int has_wrapped; -	spin_lock_irqsave(&p->lock, flags); -	value = p->total_cycles; -	raw = sh_cmt_get_counter(p, &has_wrapped); +	raw_spin_lock_irqsave(&ch->lock, flags); +	value = ch->total_cycles; +	raw = sh_cmt_get_counter(ch, &has_wrapped);  	if (unlikely(has_wrapped)) -		raw += p->match_value + 1; -	spin_unlock_irqrestore(&p->lock, flags); +		raw += ch->match_value + 1; +	raw_spin_unlock_irqrestore(&ch->lock, flags);  	return value + raw;  }  static int sh_cmt_clocksource_enable(struct clocksource *cs)  { -	struct sh_cmt_priv *p = cs_to_sh_cmt(cs); +	int ret; +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); + +	WARN_ON(ch->cs_enabled); -	p->total_cycles = 0; +	ch->total_cycles = 0; -	return sh_cmt_start(p, FLAG_CLOCKSOURCE); +	ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE); +	if (!ret) { +		__clocksource_updatefreq_hz(cs, ch->rate); +		ch->cs_enabled = true; +	} +	return ret;  }  static void sh_cmt_clocksource_disable(struct clocksource *cs)  { -	sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); + +	WARN_ON(!ch->cs_enabled); + +	sh_cmt_stop(ch, FLAG_CLOCKSOURCE); +	ch->cs_enabled = false; +} + +static void sh_cmt_clocksource_suspend(struct clocksource *cs) +{ +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); + +	sh_cmt_stop(ch, FLAG_CLOCKSOURCE); +	pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);  }  static void sh_cmt_clocksource_resume(struct clocksource *cs)  { -	sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); +	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); + +	pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); +	sh_cmt_start(ch, FLAG_CLOCKSOURCE);  } -static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, -				       char *name, unsigned long rating) +static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch, +				       const char *name)  { -	struct clocksource *cs = &p->cs; +	struct clocksource *cs = &ch->cs;  	cs->name = name; -	cs->rating = rating; +	cs->rating = 125;  	cs->read = sh_cmt_clocksource_read;  	cs->enable = sh_cmt_clocksource_enable;  	cs->disable = sh_cmt_clocksource_disable; -	cs->suspend = sh_cmt_clocksource_disable; +	cs->suspend = sh_cmt_clocksource_suspend;  	cs->resume = sh_cmt_clocksource_resume;  	cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);  	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; -	/* clk_get_rate() needs an enabled clock */ -	clk_enable(p->clk); -	p->rate = clk_get_rate(p->clk) / ((p->width == 16) ? 512 : 8); -	clk_disable(p->clk); - -	/* TODO: calculate good shift from rate and counter bit width */ -	cs->shift = 0; -	cs->mult = clocksource_hz2mult(p->rate, cs->shift); - -	dev_info(&p->pdev->dev, "used as clock source\n"); - -	clocksource_register(cs); +	dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n", +		 ch->index); +	/* Register with dummy 1 Hz value, gets updated in ->enable() */ +	clocksource_register_hz(cs, 1);  	return 0;  } -static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) +static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)  { -	return container_of(ced, struct sh_cmt_priv, ced); +	return container_of(ced, struct sh_cmt_channel, ced);  } -static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) +static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced; -	sh_cmt_start(p, FLAG_CLOCKEVENT); +	sh_cmt_start(ch, FLAG_CLOCKEVENT);  	/* TODO: calculate good shift from rate and counter bit width */  	ced->shift = 32; -	ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); -	ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); +	ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift); +	ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);  	ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);  	if (periodic) -		sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1); +		sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1);  	else -		sh_cmt_set_next(p, p->max_match_value); +		sh_cmt_set_next(ch, ch->max_match_value);  }  static void sh_cmt_clock_event_mode(enum clock_event_mode mode,  				    struct clock_event_device *ced)  { -	struct sh_cmt_priv *p = ced_to_sh_cmt(ced); +	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);  	/* deal with old setting first */  	switch (ced->mode) {  	case CLOCK_EVT_MODE_PERIODIC:  	case CLOCK_EVT_MODE_ONESHOT: -		sh_cmt_stop(p, FLAG_CLOCKEVENT); +		sh_cmt_stop(ch, FLAG_CLOCKEVENT);  		break;  	default:  		break; @@ -500,16 +737,18 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		dev_info(&p->pdev->dev, "used for periodic clock events\n"); -		sh_cmt_clock_event_start(p, 1); +		dev_info(&ch->cmt->pdev->dev, +			 "ch%u: used for periodic clock events\n", ch->index); +		sh_cmt_clock_event_start(ch, 1);  		break;  	case CLOCK_EVT_MODE_ONESHOT: -		dev_info(&p->pdev->dev, "used for oneshot clock events\n"); -		sh_cmt_clock_event_start(p, 0); +		dev_info(&ch->cmt->pdev->dev, +			 "ch%u: used for oneshot clock events\n", ch->index); +		sh_cmt_clock_event_start(ch, 0);  		break;  	case CLOCK_EVT_MODE_SHUTDOWN:  	case CLOCK_EVT_MODE_UNUSED: -		sh_cmt_stop(p, FLAG_CLOCKEVENT); +		sh_cmt_stop(ch, FLAG_CLOCKEVENT);  		break;  	default:  		break; @@ -519,175 +758,398 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,  static int sh_cmt_clock_event_next(unsigned long delta,  				   struct clock_event_device *ced)  { -	struct sh_cmt_priv *p = ced_to_sh_cmt(ced); +	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);  	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); -	if (likely(p->flags & FLAG_IRQCONTEXT)) -		p->next_match_value = delta - 1; +	if (likely(ch->flags & FLAG_IRQCONTEXT)) +		ch->next_match_value = delta - 1;  	else -		sh_cmt_set_next(p, delta - 1); +		sh_cmt_set_next(ch, delta - 1);  	return 0;  } -static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, -				       char *name, unsigned long rating) +static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)  { -	struct clock_event_device *ced = &p->ced; +	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); -	memset(ced, 0, sizeof(*ced)); +	pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); +	clk_unprepare(ch->cmt->clk); +} + +static void sh_cmt_clock_event_resume(struct clock_event_device *ced) +{ +	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); + +	clk_prepare(ch->cmt->clk); +	pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); +} + +static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, +				      const char *name) +{ +	struct clock_event_device *ced = &ch->ced; +	int irq; +	int ret; + +	irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index); +	if (irq < 0) { +		dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", +			ch->index); +		return irq; +	} + +	ret = request_irq(irq, sh_cmt_interrupt, +			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, +			  dev_name(&ch->cmt->pdev->dev), ch); +	if (ret) { +		dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n", +			ch->index, irq); +		return ret; +	}  	ced->name = name;  	ced->features = CLOCK_EVT_FEAT_PERIODIC;  	ced->features |= CLOCK_EVT_FEAT_ONESHOT; -	ced->rating = rating; -	ced->cpumask = cpumask_of(0); +	ced->rating = 125; +	ced->cpumask = cpu_possible_mask;  	ced->set_next_event = sh_cmt_clock_event_next;  	ced->set_mode = sh_cmt_clock_event_mode; +	ced->suspend = sh_cmt_clock_event_suspend; +	ced->resume = sh_cmt_clock_event_resume; -	dev_info(&p->pdev->dev, "used for clock events\n"); +	dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n", +		 ch->index);  	clockevents_register_device(ced); + +	return 0; +} + +static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name, +			   bool clockevent, bool clocksource) +{ +	int ret; + +	if (clockevent) { +		ch->cmt->has_clockevent = true; +		ret = sh_cmt_register_clockevent(ch, name); +		if (ret < 0) +			return ret; +	} + +	if (clocksource) { +		ch->cmt->has_clocksource = true; +		sh_cmt_register_clocksource(ch, name); +	} + +	return 0;  } -static int sh_cmt_register(struct sh_cmt_priv *p, char *name, -			   unsigned long clockevent_rating, -			   unsigned long clocksource_rating) +static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, +				unsigned int hwidx, bool clockevent, +				bool clocksource, struct sh_cmt_device *cmt)  { -	if (p->width == (sizeof(p->max_match_value) * 8)) -		p->max_match_value = ~0; +	int ret; + +	/* Skip unused channels. */ +	if (!clockevent && !clocksource) +		return 0; + +	ch->cmt = cmt; +	ch->index = index; +	ch->hwidx = hwidx; + +	/* +	 * Compute the address of the channel control register block. For the +	 * timers with a per-channel start/stop register, compute its address +	 * as well. +	 * +	 * For legacy configuration the address has been mapped explicitly. +	 */ +	if (cmt->legacy) { +		ch->ioctrl = cmt->mapbase_ch; +	} else { +		switch (cmt->info->model) { +		case SH_CMT_16BIT: +			ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; +			break; +		case SH_CMT_32BIT: +		case SH_CMT_48BIT: +			ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; +			break; +		case SH_CMT_32BIT_FAST: +			/* +			 * The 32-bit "fast" timer has a single channel at hwidx +			 * 5 but is located at offset 0x40 instead of 0x60 for +			 * some reason. +			 */ +			ch->ioctrl = cmt->mapbase + 0x40; +			break; +		case SH_CMT_48BIT_GEN2: +			ch->iostart = cmt->mapbase + ch->hwidx * 0x100; +			ch->ioctrl = ch->iostart + 0x10; +			break; +		} +	} + +	if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) +		ch->max_match_value = ~0;  	else -		p->max_match_value = (1 << p->width) - 1; +		ch->max_match_value = (1 << cmt->info->width) - 1; -	p->match_value = p->max_match_value; -	spin_lock_init(&p->lock); +	ch->match_value = ch->max_match_value; +	raw_spin_lock_init(&ch->lock); -	if (clockevent_rating) -		sh_cmt_register_clockevent(p, name, clockevent_rating); +	if (cmt->legacy) { +		ch->timer_bit = ch->hwidx; +	} else { +		ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 +			      ? 0 : ch->hwidx; +	} -	if (clocksource_rating) -		sh_cmt_register_clocksource(p, name, clocksource_rating); +	ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), +			      clockevent, clocksource); +	if (ret) { +		dev_err(&cmt->pdev->dev, "ch%u: registration failed\n", +			ch->index); +		return ret; +	} +	ch->cs_enabled = false;  	return 0;  } -static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) +static int sh_cmt_map_memory(struct sh_cmt_device *cmt)  { -	struct sh_timer_config *cfg = pdev->dev.platform_data; -	struct resource *res; -	int irq, ret; -	ret = -ENXIO; +	struct resource *mem; -	memset(p, 0, sizeof(*p)); -	p->pdev = pdev; +	mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); +	if (!mem) { +		dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO; +	} -	if (!cfg) { -		dev_err(&p->pdev->dev, "missing platform data\n"); -		goto err0; +	cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem)); +	if (cmt->mapbase == NULL) { +		dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); +		return -ENXIO;  	} -	platform_set_drvdata(pdev, p); +	return 0; +} -	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); +static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt) +{ +	struct sh_timer_config *cfg = cmt->pdev->dev.platform_data; +	struct resource *res, *res2; + +	/* map memory, let mapbase_ch point to our channel */ +	res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);  	if (!res) { -		dev_err(&p->pdev->dev, "failed to get I/O memory\n"); -		goto err0; +		dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO;  	} -	irq = platform_get_irq(p->pdev, 0); -	if (irq < 0) { -		dev_err(&p->pdev->dev, "failed to get irq\n"); -		goto err0; +	cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res)); +	if (cmt->mapbase_ch == NULL) { +		dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); +		return -ENXIO;  	} -	/* map memory, let mapbase point to our channel */ -	p->mapbase = ioremap_nocache(res->start, resource_size(res)); -	if (p->mapbase == NULL) { -		dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); -		goto err0; +	/* optional resource for the shared timer start/stop register */ +	res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1); + +	/* map second resource for CMSTR */ +	cmt->mapbase = ioremap_nocache(res2 ? res2->start : +				       res->start - cfg->channel_offset, +				       res2 ? resource_size(res2) : 2); +	if (cmt->mapbase == NULL) { +		dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n"); +		iounmap(cmt->mapbase_ch); +		return -ENXIO;  	} -	/* request irq using setup_irq() (too early for request_irq()) */ -	p->irqaction.name = dev_name(&p->pdev->dev); -	p->irqaction.handler = sh_cmt_interrupt; -	p->irqaction.dev_id = p; -	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ -			     IRQF_IRQPOLL  | IRQF_NOBALANCING; - -	/* get hold of clock */ -	p->clk = clk_get(&p->pdev->dev, "cmt_fck"); -	if (IS_ERR(p->clk)) { -		dev_err(&p->pdev->dev, "cannot get clock\n"); -		ret = PTR_ERR(p->clk); -		goto err1; +	/* identify the model based on the resources */ +	if (resource_size(res) == 6) +		cmt->info = &sh_cmt_info[SH_CMT_16BIT]; +	else if (res2 && (resource_size(res2) == 4)) +		cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2]; +	else +		cmt->info = &sh_cmt_info[SH_CMT_32BIT]; + +	return 0; +} + +static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt) +{ +	iounmap(cmt->mapbase); +	if (cmt->mapbase_ch) +		iounmap(cmt->mapbase_ch); +} + +static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) +{ +	struct sh_timer_config *cfg = pdev->dev.platform_data; +	const struct platform_device_id *id = pdev->id_entry; +	unsigned int hw_channels; +	int ret; + +	memset(cmt, 0, sizeof(*cmt)); +	cmt->pdev = pdev; + +	if (!cfg) { +		dev_err(&cmt->pdev->dev, "missing platform data\n"); +		return -ENXIO;  	} -	if (resource_size(res) == 6) { -		p->width = 16; -		p->overflow_bit = 0x80; -		p->clear_bits = ~0x80; +	cmt->info = (const struct sh_cmt_info *)id->driver_data; +	cmt->legacy = cmt->info ? false : true; + +	/* Get hold of clock. */ +	cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck"); +	if (IS_ERR(cmt->clk)) { +		dev_err(&cmt->pdev->dev, "cannot get clock\n"); +		return PTR_ERR(cmt->clk); +	} + +	ret = clk_prepare(cmt->clk); +	if (ret < 0) +		goto err_clk_put; + +	/* +	 * Map the memory resource(s). We need to support both the legacy +	 * platform device configuration (with one device per channel) and the +	 * new version (with multiple channels per device). +	 */ +	if (cmt->legacy) +		ret = sh_cmt_map_memory_legacy(cmt); +	else +		ret = sh_cmt_map_memory(cmt); + +	if (ret < 0) +		goto err_clk_unprepare; + +	/* Allocate and setup the channels. */ +	if (cmt->legacy) { +		cmt->num_channels = 1; +		hw_channels = 0;  	} else { -		p->width = 32; -		p->overflow_bit = 0x8000; -		p->clear_bits = ~0xc000; +		cmt->num_channels = hweight8(cfg->channels_mask); +		hw_channels = cfg->channels_mask;  	} -	ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev), -			      cfg->clockevent_rating, -			      cfg->clocksource_rating); -	if (ret) { -		dev_err(&p->pdev->dev, "registration failed\n"); -		goto err1; +	cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), +				GFP_KERNEL); +	if (cmt->channels == NULL) { +		ret = -ENOMEM; +		goto err_unmap;  	} -	ret = setup_irq(irq, &p->irqaction); -	if (ret) { -		dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); -		goto err1; +	if (cmt->legacy) { +		ret = sh_cmt_setup_channel(&cmt->channels[0], +					   cfg->timer_bit, cfg->timer_bit, +					   cfg->clockevent_rating != 0, +					   cfg->clocksource_rating != 0, cmt); +		if (ret < 0) +			goto err_unmap; +	} else { +		unsigned int mask = hw_channels; +		unsigned int i; + +		/* +		 * Use the first channel as a clock event device and the second +		 * channel as a clock source. If only one channel is available +		 * use it for both. +		 */ +		for (i = 0; i < cmt->num_channels; ++i) { +			unsigned int hwidx = ffs(mask) - 1; +			bool clocksource = i == 1 || cmt->num_channels == 1; +			bool clockevent = i == 0; + +			ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx, +						   clockevent, clocksource, +						   cmt); +			if (ret < 0) +				goto err_unmap; + +			mask &= ~(1 << hwidx); +		}  	} +	platform_set_drvdata(pdev, cmt); +  	return 0; -err1: -	iounmap(p->mapbase); -err0: +err_unmap: +	kfree(cmt->channels); +	sh_cmt_unmap_memory(cmt); +err_clk_unprepare: +	clk_unprepare(cmt->clk); +err_clk_put: +	clk_put(cmt->clk);  	return ret;  } -static int __devinit sh_cmt_probe(struct platform_device *pdev) +static int sh_cmt_probe(struct platform_device *pdev)  { -	struct sh_cmt_priv *p = platform_get_drvdata(pdev); +	struct sh_cmt_device *cmt = platform_get_drvdata(pdev);  	int ret; -	if (p) { +	if (!is_early_platform_device(pdev)) { +		pm_runtime_set_active(&pdev->dev); +		pm_runtime_enable(&pdev->dev); +	} + +	if (cmt) {  		dev_info(&pdev->dev, "kept as earlytimer\n"); -		return 0; +		goto out;  	} -	p = kmalloc(sizeof(*p), GFP_KERNEL); -	if (p == NULL) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	cmt = kzalloc(sizeof(*cmt), GFP_KERNEL); +	if (cmt == NULL)  		return -ENOMEM; -	} -	ret = sh_cmt_setup(p, pdev); +	ret = sh_cmt_setup(cmt, pdev);  	if (ret) { -		kfree(p); -		platform_set_drvdata(pdev, NULL); +		kfree(cmt); +		pm_runtime_idle(&pdev->dev); +		return ret;  	} -	return ret; +	if (is_early_platform_device(pdev)) +		return 0; + + out: +	if (cmt->has_clockevent || cmt->has_clocksource) +		pm_runtime_irq_safe(&pdev->dev); +	else +		pm_runtime_idle(&pdev->dev); + +	return 0;  } -static int __devexit sh_cmt_remove(struct platform_device *pdev) +static int sh_cmt_remove(struct platform_device *pdev)  {  	return -EBUSY; /* cannot unregister clockevent and clocksource */  } +static const struct platform_device_id sh_cmt_id_table[] = { +	{ "sh_cmt", 0 }, +	{ "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, +	{ "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, +	{ "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] }, +	{ "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] }, +	{ "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] }, +	{ } +}; +MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); +  static struct platform_driver sh_cmt_device_driver = {  	.probe		= sh_cmt_probe, -	.remove		= __devexit_p(sh_cmt_remove), +	.remove		= sh_cmt_remove,  	.driver		= {  		.name	= "sh_cmt", -	} +	}, +	.id_table	= sh_cmt_id_table,  };  static int __init sh_cmt_init(void) @@ -701,7 +1163,7 @@ static void __exit sh_cmt_exit(void)  }  early_platform_init("earlytimer", &sh_cmt_device_driver); -module_init(sh_cmt_init); +subsys_initcall(sh_cmt_init);  module_exit(sh_cmt_exit);  MODULE_AUTHOR("Magnus Damm"); diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 40630cb9823..188d4e092ef 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -11,37 +11,51 @@   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/err.h>  #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h>  #include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/delay.h>  #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h>  #include <linux/irq.h> -#include <linux/err.h> -#include <linux/clockchips.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h>  #include <linux/sh_timer.h>  #include <linux/slab.h> +#include <linux/spinlock.h> + +struct sh_mtu2_device; + +struct sh_mtu2_channel { +	struct sh_mtu2_device *mtu; +	unsigned int index; + +	void __iomem *base; +	int irq; + +	struct clock_event_device ced; +}; + +struct sh_mtu2_device { +	struct platform_device *pdev; -struct sh_mtu2_priv {  	void __iomem *mapbase;  	struct clk *clk; -	struct irqaction irqaction; -	struct platform_device *pdev; -	unsigned long rate; -	unsigned long periodic; -	struct clock_event_device ced; + +	struct sh_mtu2_channel *channels; +	unsigned int num_channels; + +	bool legacy; +	bool has_clockevent;  }; -static DEFINE_SPINLOCK(sh_mtu2_lock); +static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);  #define TSTR -1 /* shared register */  #define TCR  0 /* channel register */ @@ -52,6 +66,88 @@ static DEFINE_SPINLOCK(sh_mtu2_lock);  #define TCNT 5 /* channel register */  #define TGR  6 /* channel register */ +#define TCR_CCLR_NONE		(0 << 5) +#define TCR_CCLR_TGRA		(1 << 5) +#define TCR_CCLR_TGRB		(2 << 5) +#define TCR_CCLR_SYNC		(3 << 5) +#define TCR_CCLR_TGRC		(5 << 5) +#define TCR_CCLR_TGRD		(6 << 5) +#define TCR_CCLR_MASK		(7 << 5) +#define TCR_CKEG_RISING		(0 << 3) +#define TCR_CKEG_FALLING	(1 << 3) +#define TCR_CKEG_BOTH		(2 << 3) +#define TCR_CKEG_MASK		(3 << 3) +/* Values 4 to 7 are channel-dependent */ +#define TCR_TPSC_P1		(0 << 0) +#define TCR_TPSC_P4		(1 << 0) +#define TCR_TPSC_P16		(2 << 0) +#define TCR_TPSC_P64		(3 << 0) +#define TCR_TPSC_CH0_TCLKA	(4 << 0) +#define TCR_TPSC_CH0_TCLKB	(5 << 0) +#define TCR_TPSC_CH0_TCLKC	(6 << 0) +#define TCR_TPSC_CH0_TCLKD	(7 << 0) +#define TCR_TPSC_CH1_TCLKA	(4 << 0) +#define TCR_TPSC_CH1_TCLKB	(5 << 0) +#define TCR_TPSC_CH1_P256	(6 << 0) +#define TCR_TPSC_CH1_TCNT2	(7 << 0) +#define TCR_TPSC_CH2_TCLKA	(4 << 0) +#define TCR_TPSC_CH2_TCLKB	(5 << 0) +#define TCR_TPSC_CH2_TCLKC	(6 << 0) +#define TCR_TPSC_CH2_P1024	(7 << 0) +#define TCR_TPSC_CH34_P256	(4 << 0) +#define TCR_TPSC_CH34_P1024	(5 << 0) +#define TCR_TPSC_CH34_TCLKA	(6 << 0) +#define TCR_TPSC_CH34_TCLKB	(7 << 0) +#define TCR_TPSC_MASK		(7 << 0) + +#define TMDR_BFE		(1 << 6) +#define TMDR_BFB		(1 << 5) +#define TMDR_BFA		(1 << 4) +#define TMDR_MD_NORMAL		(0 << 0) +#define TMDR_MD_PWM_1		(2 << 0) +#define TMDR_MD_PWM_2		(3 << 0) +#define TMDR_MD_PHASE_1		(4 << 0) +#define TMDR_MD_PHASE_2		(5 << 0) +#define TMDR_MD_PHASE_3		(6 << 0) +#define TMDR_MD_PHASE_4		(7 << 0) +#define TMDR_MD_PWM_SYNC	(8 << 0) +#define TMDR_MD_PWM_COMP_CREST	(13 << 0) +#define TMDR_MD_PWM_COMP_TROUGH	(14 << 0) +#define TMDR_MD_PWM_COMP_BOTH	(15 << 0) +#define TMDR_MD_MASK		(15 << 0) + +#define TIOC_IOCH(n)		((n) << 4) +#define TIOC_IOCL(n)		((n) << 0) +#define TIOR_OC_RETAIN		(0 << 0) +#define TIOR_OC_0_CLEAR		(1 << 0) +#define TIOR_OC_0_SET		(2 << 0) +#define TIOR_OC_0_TOGGLE	(3 << 0) +#define TIOR_OC_1_CLEAR		(5 << 0) +#define TIOR_OC_1_SET		(6 << 0) +#define TIOR_OC_1_TOGGLE	(7 << 0) +#define TIOR_IC_RISING		(8 << 0) +#define TIOR_IC_FALLING		(9 << 0) +#define TIOR_IC_BOTH		(10 << 0) +#define TIOR_IC_TCNT		(12 << 0) +#define TIOR_MASK		(15 << 0) + +#define TIER_TTGE		(1 << 7) +#define TIER_TTGE2		(1 << 6) +#define TIER_TCIEU		(1 << 5) +#define TIER_TCIEV		(1 << 4) +#define TIER_TGIED		(1 << 3) +#define TIER_TGIEC		(1 << 2) +#define TIER_TGIEB		(1 << 1) +#define TIER_TGIEA		(1 << 0) + +#define TSR_TCFD		(1 << 7) +#define TSR_TCFU		(1 << 5) +#define TSR_TCFV		(1 << 4) +#define TSR_TGFD		(1 << 3) +#define TSR_TGFC		(1 << 2) +#define TSR_TGFB		(1 << 1) +#define TSR_TGFA		(1 << 0) +  static unsigned long mtu2_reg_offs[] = {  	[TCR] = 0,  	[TMDR] = 1, @@ -62,129 +158,143 @@ static unsigned long mtu2_reg_offs[] = {  	[TGR] = 8,  }; -static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr) +static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs; -	if (reg_nr == TSTR) -		return ioread8(base + cfg->channel_offset); +	if (reg_nr == TSTR) { +		if (ch->mtu->legacy) +			return ioread8(ch->mtu->mapbase); +		else +			return ioread8(ch->mtu->mapbase + 0x280); +	}  	offs = mtu2_reg_offs[reg_nr];  	if ((reg_nr == TCNT) || (reg_nr == TGR)) -		return ioread16(base + offs); +		return ioread16(ch->base + offs);  	else -		return ioread8(base + offs); +		return ioread8(ch->base + offs);  } -static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr, +static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,  				unsigned long value)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs;  	if (reg_nr == TSTR) { -		iowrite8(value, base + cfg->channel_offset); -		return; +		if (ch->mtu->legacy) +			return iowrite8(value, ch->mtu->mapbase); +		else +			return iowrite8(value, ch->mtu->mapbase + 0x280);  	}  	offs = mtu2_reg_offs[reg_nr];  	if ((reg_nr == TCNT) || (reg_nr == TGR)) -		iowrite16(value, base + offs); +		iowrite16(value, ch->base + offs);  	else -		iowrite8(value, base + offs); +		iowrite8(value, ch->base + offs);  } -static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) +static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data;  	unsigned long flags, value;  	/* start stop register shared by multiple timer channels */ -	spin_lock_irqsave(&sh_mtu2_lock, flags); -	value = sh_mtu2_read(p, TSTR); +	raw_spin_lock_irqsave(&sh_mtu2_lock, flags); +	value = sh_mtu2_read(ch, TSTR);  	if (start) -		value |= 1 << cfg->timer_bit; +		value |= 1 << ch->index;  	else -		value &= ~(1 << cfg->timer_bit); +		value &= ~(1 << ch->index); -	sh_mtu2_write(p, TSTR, value); -	spin_unlock_irqrestore(&sh_mtu2_lock, flags); +	sh_mtu2_write(ch, TSTR, value); +	raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);  } -static int sh_mtu2_enable(struct sh_mtu2_priv *p) +static int sh_mtu2_enable(struct sh_mtu2_channel *ch)  { +	unsigned long periodic; +	unsigned long rate;  	int ret; +	pm_runtime_get_sync(&ch->mtu->pdev->dev); +	dev_pm_syscore_device(&ch->mtu->pdev->dev, true); +  	/* enable clock */ -	ret = clk_enable(p->clk); +	ret = clk_enable(ch->mtu->clk);  	if (ret) { -		dev_err(&p->pdev->dev, "cannot enable clock\n"); +		dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", +			ch->index);  		return ret;  	}  	/* make sure channel is disabled */ -	sh_mtu2_start_stop_ch(p, 0); - -	p->rate = clk_get_rate(p->clk) / 64; -	p->periodic = (p->rate + HZ/2) / HZ; - -	/* "Periodic Counter Operation" */ -	sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */ -	sh_mtu2_write(p, TIOR, 0); -	sh_mtu2_write(p, TGR, p->periodic); -	sh_mtu2_write(p, TCNT, 0); -	sh_mtu2_write(p, TMDR, 0); -	sh_mtu2_write(p, TIER, 0x01); +	sh_mtu2_start_stop_ch(ch, 0); + +	rate = clk_get_rate(ch->mtu->clk) / 64; +	periodic = (rate + HZ/2) / HZ; + +	/* +	 * "Periodic Counter Operation" +	 * Clear on TGRA compare match, divide clock by 64. +	 */ +	sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64); +	sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) | +		      TIOC_IOCL(TIOR_OC_0_CLEAR)); +	sh_mtu2_write(ch, TGR, periodic); +	sh_mtu2_write(ch, TCNT, 0); +	sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL); +	sh_mtu2_write(ch, TIER, TIER_TGIEA);  	/* enable channel */ -	sh_mtu2_start_stop_ch(p, 1); +	sh_mtu2_start_stop_ch(ch, 1);  	return 0;  } -static void sh_mtu2_disable(struct sh_mtu2_priv *p) +static void sh_mtu2_disable(struct sh_mtu2_channel *ch)  {  	/* disable channel */ -	sh_mtu2_start_stop_ch(p, 0); +	sh_mtu2_start_stop_ch(ch, 0);  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable(ch->mtu->clk); + +	dev_pm_syscore_device(&ch->mtu->pdev->dev, false); +	pm_runtime_put(&ch->mtu->pdev->dev);  }  static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)  { -	struct sh_mtu2_priv *p = dev_id; +	struct sh_mtu2_channel *ch = dev_id;  	/* acknowledge interrupt */ -	sh_mtu2_read(p, TSR); -	sh_mtu2_write(p, TSR, 0xfe); +	sh_mtu2_read(ch, TSR); +	sh_mtu2_write(ch, TSR, ~TSR_TGFA);  	/* notify clockevent layer */ -	p->ced.event_handler(&p->ced); +	ch->ced.event_handler(&ch->ced);  	return IRQ_HANDLED;  } -static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced) +static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)  { -	return container_of(ced, struct sh_mtu2_priv, ced); +	return container_of(ced, struct sh_mtu2_channel, ced);  }  static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,  				    struct clock_event_device *ced)  { -	struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced); +	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);  	int disabled = 0;  	/* deal with old setting first */  	switch (ced->mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		sh_mtu2_disable(p); +		sh_mtu2_disable(ch);  		disabled = 1;  		break;  	default: @@ -193,12 +303,13 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		dev_info(&p->pdev->dev, "used for periodic clock events\n"); -		sh_mtu2_enable(p); +		dev_info(&ch->mtu->pdev->dev, +			 "ch%u: used for periodic clock events\n", ch->index); +		sh_mtu2_enable(ch);  		break;  	case CLOCK_EVT_MODE_UNUSED:  		if (!disabled) -			sh_mtu2_disable(p); +			sh_mtu2_disable(ch);  		break;  	case CLOCK_EVT_MODE_SHUTDOWN:  	default: @@ -206,135 +317,262 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,  	}  } -static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, -				       char *name, unsigned long rating) +static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)  { -	struct clock_event_device *ced = &p->ced; -	int ret; +	pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev); +} + +static void sh_mtu2_clock_event_resume(struct clock_event_device *ced) +{ +	pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev); +} -	memset(ced, 0, sizeof(*ced)); +static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, +					const char *name) +{ +	struct clock_event_device *ced = &ch->ced; +	int ret;  	ced->name = name;  	ced->features = CLOCK_EVT_FEAT_PERIODIC; -	ced->rating = rating; -	ced->cpumask = cpumask_of(0); +	ced->rating = 200; +	ced->cpumask = cpu_possible_mask;  	ced->set_mode = sh_mtu2_clock_event_mode; +	ced->suspend = sh_mtu2_clock_event_suspend; +	ced->resume = sh_mtu2_clock_event_resume; -	dev_info(&p->pdev->dev, "used for clock events\n"); +	dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", +		 ch->index);  	clockevents_register_device(ced); -	ret = setup_irq(p->irqaction.irq, &p->irqaction); +	ret = request_irq(ch->irq, sh_mtu2_interrupt, +			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, +			  dev_name(&ch->mtu->pdev->dev), ch);  	if (ret) { -		dev_err(&p->pdev->dev, "failed to request irq %d\n", -			p->irqaction.irq); +		dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", +			ch->index, ch->irq);  		return;  	}  } -static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name, -			    unsigned long clockevent_rating) +static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name, +			    bool clockevent)  { -	if (clockevent_rating) -		sh_mtu2_register_clockevent(p, name, clockevent_rating); +	if (clockevent) { +		ch->mtu->has_clockevent = true; +		sh_mtu2_register_clockevent(ch, name); +	}  	return 0;  } -static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) +static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, +				 struct sh_mtu2_device *mtu)  { -	struct sh_timer_config *cfg = pdev->dev.platform_data; -	struct resource *res; -	int irq, ret; -	ret = -ENXIO; +	static const unsigned int channel_offsets[] = { +		0x300, 0x380, 0x000, +	}; +	bool clockevent; + +	ch->mtu = mtu; -	memset(p, 0, sizeof(*p)); -	p->pdev = pdev; +	if (mtu->legacy) { +		struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; -	if (!cfg) { -		dev_err(&p->pdev->dev, "missing platform data\n"); -		goto err0; +		clockevent = cfg->clockevent_rating != 0; + +		ch->irq = platform_get_irq(mtu->pdev, 0); +		ch->base = mtu->mapbase - cfg->channel_offset; +		ch->index = cfg->timer_bit; +	} else { +		char name[6]; + +		clockevent = true; + +		sprintf(name, "tgi%ua", index); +		ch->irq = platform_get_irq_byname(mtu->pdev, name); +		ch->base = mtu->mapbase + channel_offsets[index]; +		ch->index = index;  	} -	platform_set_drvdata(pdev, p); +	if (ch->irq < 0) { +		/* Skip channels with no declared interrupt. */ +		if (!mtu->legacy) +			return 0; + +		dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n", +			ch->index); +		return ch->irq; +	} + +	return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent); +} -	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); +static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) +{ +	struct resource *res; + +	res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);  	if (!res) { -		dev_err(&p->pdev->dev, "failed to get I/O memory\n"); -		goto err0; +		dev_err(&mtu->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO;  	} -	irq = platform_get_irq(p->pdev, 0); -	if (irq < 0) { -		dev_err(&p->pdev->dev, "failed to get irq\n"); -		goto err0; +	mtu->mapbase = ioremap_nocache(res->start, resource_size(res)); +	if (mtu->mapbase == NULL) +		return -ENXIO; + +	/* +	 * In legacy platform device configuration (with one device per channel) +	 * the resource points to the channel base address. +	 */ +	if (mtu->legacy) { +		struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; +		mtu->mapbase += cfg->channel_offset;  	} -	/* map memory, let mapbase point to our channel */ -	p->mapbase = ioremap_nocache(res->start, resource_size(res)); -	if (p->mapbase == NULL) { -		dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); -		goto err0; +	return 0; +} + +static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu) +{ +	if (mtu->legacy) { +		struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; +		mtu->mapbase -= cfg->channel_offset; +	} + +	iounmap(mtu->mapbase); +} + +static int sh_mtu2_setup(struct sh_mtu2_device *mtu, +			 struct platform_device *pdev) +{ +	struct sh_timer_config *cfg = pdev->dev.platform_data; +	const struct platform_device_id *id = pdev->id_entry; +	unsigned int i; +	int ret; + +	mtu->pdev = pdev; +	mtu->legacy = id->driver_data; + +	if (mtu->legacy && !cfg) { +		dev_err(&mtu->pdev->dev, "missing platform data\n"); +		return -ENXIO;  	} -	/* setup data for setup_irq() (too early for request_irq()) */ -	p->irqaction.name = dev_name(&p->pdev->dev); -	p->irqaction.handler = sh_mtu2_interrupt; -	p->irqaction.dev_id = p; -	p->irqaction.irq = irq; -	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ -			     IRQF_IRQPOLL  | IRQF_NOBALANCING; - -	/* get hold of clock */ -	p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); -	if (IS_ERR(p->clk)) { -		dev_err(&p->pdev->dev, "cannot get clock\n"); -		ret = PTR_ERR(p->clk); -		goto err1; +	/* Get hold of clock. */ +	mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck"); +	if (IS_ERR(mtu->clk)) { +		dev_err(&mtu->pdev->dev, "cannot get clock\n"); +		return PTR_ERR(mtu->clk);  	} -	return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), -				cfg->clockevent_rating); - err1: -	iounmap(p->mapbase); - err0: +	ret = clk_prepare(mtu->clk); +	if (ret < 0) +		goto err_clk_put; + +	/* Map the memory resource. */ +	ret = sh_mtu2_map_memory(mtu); +	if (ret < 0) { +		dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n"); +		goto err_clk_unprepare; +	} + +	/* Allocate and setup the channels. */ +	if (mtu->legacy) +		mtu->num_channels = 1; +	else +		mtu->num_channels = 3; + +	mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, +				GFP_KERNEL); +	if (mtu->channels == NULL) { +		ret = -ENOMEM; +		goto err_unmap; +	} + +	if (mtu->legacy) { +		ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu); +		if (ret < 0) +			goto err_unmap; +	} else { +		for (i = 0; i < mtu->num_channels; ++i) { +			ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu); +			if (ret < 0) +				goto err_unmap; +		} +	} + +	platform_set_drvdata(pdev, mtu); + +	return 0; + +err_unmap: +	kfree(mtu->channels); +	sh_mtu2_unmap_memory(mtu); +err_clk_unprepare: +	clk_unprepare(mtu->clk); +err_clk_put: +	clk_put(mtu->clk);  	return ret;  } -static int __devinit sh_mtu2_probe(struct platform_device *pdev) +static int sh_mtu2_probe(struct platform_device *pdev)  { -	struct sh_mtu2_priv *p = platform_get_drvdata(pdev); +	struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);  	int ret; -	if (p) { +	if (!is_early_platform_device(pdev)) { +		pm_runtime_set_active(&pdev->dev); +		pm_runtime_enable(&pdev->dev); +	} + +	if (mtu) {  		dev_info(&pdev->dev, "kept as earlytimer\n"); -		return 0; +		goto out;  	} -	p = kmalloc(sizeof(*p), GFP_KERNEL); -	if (p == NULL) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	mtu = kzalloc(sizeof(*mtu), GFP_KERNEL); +	if (mtu == NULL)  		return -ENOMEM; -	} -	ret = sh_mtu2_setup(p, pdev); +	ret = sh_mtu2_setup(mtu, pdev);  	if (ret) { -		kfree(p); -		platform_set_drvdata(pdev, NULL); +		kfree(mtu); +		pm_runtime_idle(&pdev->dev); +		return ret;  	} -	return ret; +	if (is_early_platform_device(pdev)) +		return 0; + + out: +	if (mtu->has_clockevent) +		pm_runtime_irq_safe(&pdev->dev); +	else +		pm_runtime_idle(&pdev->dev); + +	return 0;  } -static int __devexit sh_mtu2_remove(struct platform_device *pdev) +static int sh_mtu2_remove(struct platform_device *pdev)  {  	return -EBUSY; /* cannot unregister clockevent */  } +static const struct platform_device_id sh_mtu2_id_table[] = { +	{ "sh_mtu2", 1 }, +	{ "sh-mtu2", 0 }, +	{ }, +}; +MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); +  static struct platform_driver sh_mtu2_device_driver = {  	.probe		= sh_mtu2_probe, -	.remove		= __devexit_p(sh_mtu2_remove), +	.remove		= sh_mtu2_remove,  	.driver		= {  		.name	= "sh_mtu2", -	} +	}, +	.id_table	= sh_mtu2_id_table,  };  static int __init sh_mtu2_init(void) @@ -348,7 +586,7 @@ static void __exit sh_mtu2_exit(void)  }  early_platform_init("earlytimer", &sh_mtu2_device_driver); -module_init(sh_mtu2_init); +subsys_initcall(sh_mtu2_init);  module_exit(sh_mtu2_exit);  MODULE_AUTHOR("Magnus Damm"); diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 36aba992306..6bd17a8f3dd 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -11,265 +11,368 @@   * but WITHOUT ANY WARRANTY; without even the implied warranty of   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/delay.h> +#include <linux/err.h>  #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h>  #include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/delay.h>  #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h>  #include <linux/irq.h> -#include <linux/err.h> -#include <linux/clocksource.h> -#include <linux/clockchips.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h>  #include <linux/sh_timer.h>  #include <linux/slab.h> +#include <linux/spinlock.h> + +enum sh_tmu_model { +	SH_TMU_LEGACY, +	SH_TMU, +	SH_TMU_SH3, +}; + +struct sh_tmu_device; + +struct sh_tmu_channel { +	struct sh_tmu_device *tmu; +	unsigned int index; + +	void __iomem *base; +	int irq; -struct sh_tmu_priv { -	void __iomem *mapbase; -	struct clk *clk; -	struct irqaction irqaction; -	struct platform_device *pdev;  	unsigned long rate;  	unsigned long periodic;  	struct clock_event_device ced;  	struct clocksource cs; +	bool cs_enabled; +	unsigned int enable_count; +}; + +struct sh_tmu_device { +	struct platform_device *pdev; + +	void __iomem *mapbase; +	struct clk *clk; + +	enum sh_tmu_model model; + +	struct sh_tmu_channel *channels; +	unsigned int num_channels; + +	bool has_clockevent; +	bool has_clocksource;  }; -static DEFINE_SPINLOCK(sh_tmu_lock); +static DEFINE_RAW_SPINLOCK(sh_tmu_lock);  #define TSTR -1 /* shared register */  #define TCOR  0 /* channel register */  #define TCNT 1 /* channel register */  #define TCR 2 /* channel register */ -static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr) +#define TCR_UNF			(1 << 8) +#define TCR_UNIE		(1 << 5) +#define TCR_TPSC_CLK4		(0 << 0) +#define TCR_TPSC_CLK16		(1 << 0) +#define TCR_TPSC_CLK64		(2 << 0) +#define TCR_TPSC_CLK256		(3 << 0) +#define TCR_TPSC_CLK1024	(4 << 0) +#define TCR_TPSC_MASK		(7 << 0) + +static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs; -	if (reg_nr == TSTR) -		return ioread8(base - cfg->channel_offset); +	if (reg_nr == TSTR) { +		switch (ch->tmu->model) { +		case SH_TMU_LEGACY: +			return ioread8(ch->tmu->mapbase); +		case SH_TMU_SH3: +			return ioread8(ch->tmu->mapbase + 2); +		case SH_TMU: +			return ioread8(ch->tmu->mapbase + 4); +		} +	}  	offs = reg_nr << 2;  	if (reg_nr == TCR) -		return ioread16(base + offs); +		return ioread16(ch->base + offs);  	else -		return ioread32(base + offs); +		return ioread32(ch->base + offs);  } -static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr, +static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,  				unsigned long value)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data; -	void __iomem *base = p->mapbase;  	unsigned long offs;  	if (reg_nr == TSTR) { -		iowrite8(value, base - cfg->channel_offset); -		return; +		switch (ch->tmu->model) { +		case SH_TMU_LEGACY: +			return iowrite8(value, ch->tmu->mapbase); +		case SH_TMU_SH3: +			return iowrite8(value, ch->tmu->mapbase + 2); +		case SH_TMU: +			return iowrite8(value, ch->tmu->mapbase + 4); +		}  	}  	offs = reg_nr << 2;  	if (reg_nr == TCR) -		iowrite16(value, base + offs); +		iowrite16(value, ch->base + offs);  	else -		iowrite32(value, base + offs); +		iowrite32(value, ch->base + offs);  } -static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) +static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)  { -	struct sh_timer_config *cfg = p->pdev->dev.platform_data;  	unsigned long flags, value;  	/* start stop register shared by multiple timer channels */ -	spin_lock_irqsave(&sh_tmu_lock, flags); -	value = sh_tmu_read(p, TSTR); +	raw_spin_lock_irqsave(&sh_tmu_lock, flags); +	value = sh_tmu_read(ch, TSTR);  	if (start) -		value |= 1 << cfg->timer_bit; +		value |= 1 << ch->index;  	else -		value &= ~(1 << cfg->timer_bit); +		value &= ~(1 << ch->index); -	sh_tmu_write(p, TSTR, value); -	spin_unlock_irqrestore(&sh_tmu_lock, flags); +	sh_tmu_write(ch, TSTR, value); +	raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);  } -static int sh_tmu_enable(struct sh_tmu_priv *p) +static int __sh_tmu_enable(struct sh_tmu_channel *ch)  {  	int ret;  	/* enable clock */ -	ret = clk_enable(p->clk); +	ret = clk_enable(ch->tmu->clk);  	if (ret) { -		dev_err(&p->pdev->dev, "cannot enable clock\n"); +		dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n", +			ch->index);  		return ret;  	}  	/* make sure channel is disabled */ -	sh_tmu_start_stop_ch(p, 0); +	sh_tmu_start_stop_ch(ch, 0);  	/* maximum timeout */ -	sh_tmu_write(p, TCOR, 0xffffffff); -	sh_tmu_write(p, TCNT, 0xffffffff); +	sh_tmu_write(ch, TCOR, 0xffffffff); +	sh_tmu_write(ch, TCNT, 0xffffffff);  	/* configure channel to parent clock / 4, irq off */ -	p->rate = clk_get_rate(p->clk) / 4; -	sh_tmu_write(p, TCR, 0x0000); +	ch->rate = clk_get_rate(ch->tmu->clk) / 4; +	sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);  	/* enable channel */ -	sh_tmu_start_stop_ch(p, 1); +	sh_tmu_start_stop_ch(ch, 1);  	return 0;  } -static void sh_tmu_disable(struct sh_tmu_priv *p) +static int sh_tmu_enable(struct sh_tmu_channel *ch) +{ +	if (ch->enable_count++ > 0) +		return 0; + +	pm_runtime_get_sync(&ch->tmu->pdev->dev); +	dev_pm_syscore_device(&ch->tmu->pdev->dev, true); + +	return __sh_tmu_enable(ch); +} + +static void __sh_tmu_disable(struct sh_tmu_channel *ch)  {  	/* disable channel */ -	sh_tmu_start_stop_ch(p, 0); +	sh_tmu_start_stop_ch(ch, 0);  	/* disable interrupts in TMU block */ -	sh_tmu_write(p, TCR, 0x0000); +	sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);  	/* stop clock */ -	clk_disable(p->clk); +	clk_disable(ch->tmu->clk);  } -static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, +static void sh_tmu_disable(struct sh_tmu_channel *ch) +{ +	if (WARN_ON(ch->enable_count == 0)) +		return; + +	if (--ch->enable_count > 0) +		return; + +	__sh_tmu_disable(ch); + +	dev_pm_syscore_device(&ch->tmu->pdev->dev, false); +	pm_runtime_put(&ch->tmu->pdev->dev); +} + +static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,  			    int periodic)  {  	/* stop timer */ -	sh_tmu_start_stop_ch(p, 0); +	sh_tmu_start_stop_ch(ch, 0);  	/* acknowledge interrupt */ -	sh_tmu_read(p, TCR); +	sh_tmu_read(ch, TCR);  	/* enable interrupt */ -	sh_tmu_write(p, TCR, 0x0020); +	sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);  	/* reload delta value in case of periodic timer */  	if (periodic) -		sh_tmu_write(p, TCOR, delta); +		sh_tmu_write(ch, TCOR, delta);  	else -		sh_tmu_write(p, TCOR, 0xffffffff); +		sh_tmu_write(ch, TCOR, 0xffffffff); -	sh_tmu_write(p, TCNT, delta); +	sh_tmu_write(ch, TCNT, delta);  	/* start timer */ -	sh_tmu_start_stop_ch(p, 1); +	sh_tmu_start_stop_ch(ch, 1);  }  static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)  { -	struct sh_tmu_priv *p = dev_id; +	struct sh_tmu_channel *ch = dev_id;  	/* disable or acknowledge interrupt */ -	if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) -		sh_tmu_write(p, TCR, 0x0000); +	if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) +		sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);  	else -		sh_tmu_write(p, TCR, 0x0020); +		sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);  	/* notify clockevent layer */ -	p->ced.event_handler(&p->ced); +	ch->ced.event_handler(&ch->ced);  	return IRQ_HANDLED;  } -static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs) +static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)  { -	return container_of(cs, struct sh_tmu_priv, cs); +	return container_of(cs, struct sh_tmu_channel, cs);  }  static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); -	return sh_tmu_read(p, TCNT) ^ 0xffffffff; +	return sh_tmu_read(ch, TCNT) ^ 0xffffffff;  }  static int sh_tmu_clocksource_enable(struct clocksource *cs)  { -	struct sh_tmu_priv *p = cs_to_sh_tmu(cs); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); +	int ret; + +	if (WARN_ON(ch->cs_enabled)) +		return 0; -	return sh_tmu_enable(p); +	ret = sh_tmu_enable(ch); +	if (!ret) { +		__clocksource_updatefreq_hz(cs, ch->rate); +		ch->cs_enabled = true; +	} + +	return ret;  }  static void sh_tmu_clocksource_disable(struct clocksource *cs)  { -	sh_tmu_disable(cs_to_sh_tmu(cs)); +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); + +	if (WARN_ON(!ch->cs_enabled)) +		return; + +	sh_tmu_disable(ch); +	ch->cs_enabled = false; +} + +static void sh_tmu_clocksource_suspend(struct clocksource *cs) +{ +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); + +	if (!ch->cs_enabled) +		return; + +	if (--ch->enable_count == 0) { +		__sh_tmu_disable(ch); +		pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev); +	} +} + +static void sh_tmu_clocksource_resume(struct clocksource *cs) +{ +	struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); + +	if (!ch->cs_enabled) +		return; + +	if (ch->enable_count++ == 0) { +		pm_genpd_syscore_poweron(&ch->tmu->pdev->dev); +		__sh_tmu_enable(ch); +	}  } -static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, -				       char *name, unsigned long rating) +static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, +				       const char *name)  { -	struct clocksource *cs = &p->cs; +	struct clocksource *cs = &ch->cs;  	cs->name = name; -	cs->rating = rating; +	cs->rating = 200;  	cs->read = sh_tmu_clocksource_read;  	cs->enable = sh_tmu_clocksource_enable;  	cs->disable = sh_tmu_clocksource_disable; +	cs->suspend = sh_tmu_clocksource_suspend; +	cs->resume = sh_tmu_clocksource_resume;  	cs->mask = CLOCKSOURCE_MASK(32);  	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; -	/* clk_get_rate() needs an enabled clock */ -	clk_enable(p->clk); -	/* channel will be configured at parent clock / 4 */ -	p->rate = clk_get_rate(p->clk) / 4; -	clk_disable(p->clk); -	/* TODO: calculate good shift from rate and counter bit width */ -	cs->shift = 10; -	cs->mult = clocksource_hz2mult(p->rate, cs->shift); - -	dev_info(&p->pdev->dev, "used as clock source\n"); -	clocksource_register(cs); +	dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", +		 ch->index); + +	/* Register with dummy 1 Hz value, gets updated in ->enable() */ +	clocksource_register_hz(cs, 1);  	return 0;  } -static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced) +static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)  { -	return container_of(ced, struct sh_tmu_priv, ced); +	return container_of(ced, struct sh_tmu_channel, ced);  } -static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) +static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)  { -	struct clock_event_device *ced = &p->ced; +	struct clock_event_device *ced = &ch->ced; -	sh_tmu_enable(p); +	sh_tmu_enable(ch); -	/* TODO: calculate good shift from rate and counter bit width */ - -	ced->shift = 32; -	ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); -	ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced); -	ced->min_delta_ns = 5000; +	clockevents_config(ced, ch->rate);  	if (periodic) { -		p->periodic = (p->rate + HZ/2) / HZ; -		sh_tmu_set_next(p, p->periodic, 1); +		ch->periodic = (ch->rate + HZ/2) / HZ; +		sh_tmu_set_next(ch, ch->periodic, 1);  	}  }  static void sh_tmu_clock_event_mode(enum clock_event_mode mode,  				    struct clock_event_device *ced)  { -	struct sh_tmu_priv *p = ced_to_sh_tmu(ced); +	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);  	int disabled = 0;  	/* deal with old setting first */  	switch (ced->mode) {  	case CLOCK_EVT_MODE_PERIODIC:  	case CLOCK_EVT_MODE_ONESHOT: -		sh_tmu_disable(p); +		sh_tmu_disable(ch);  		disabled = 1;  		break;  	default: @@ -278,16 +381,18 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,  	switch (mode) {  	case CLOCK_EVT_MODE_PERIODIC: -		dev_info(&p->pdev->dev, "used for periodic clock events\n"); -		sh_tmu_clock_event_start(p, 1); +		dev_info(&ch->tmu->pdev->dev, +			 "ch%u: used for periodic clock events\n", ch->index); +		sh_tmu_clock_event_start(ch, 1);  		break;  	case CLOCK_EVT_MODE_ONESHOT: -		dev_info(&p->pdev->dev, "used for oneshot clock events\n"); -		sh_tmu_clock_event_start(p, 0); +		dev_info(&ch->tmu->pdev->dev, +			 "ch%u: used for oneshot clock events\n", ch->index); +		sh_tmu_clock_event_start(ch, 0);  		break;  	case CLOCK_EVT_MODE_UNUSED:  		if (!disabled) -			sh_tmu_disable(p); +			sh_tmu_disable(ch);  		break;  	case CLOCK_EVT_MODE_SHUTDOWN:  	default: @@ -298,150 +403,288 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,  static int sh_tmu_clock_event_next(unsigned long delta,  				   struct clock_event_device *ced)  { -	struct sh_tmu_priv *p = ced_to_sh_tmu(ced); +	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);  	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);  	/* program new delta value */ -	sh_tmu_set_next(p, delta, 0); +	sh_tmu_set_next(ch, delta, 0);  	return 0;  } -static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, -				       char *name, unsigned long rating) +static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)  { -	struct clock_event_device *ced = &p->ced; -	int ret; +	pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev); +} + +static void sh_tmu_clock_event_resume(struct clock_event_device *ced) +{ +	pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev); +} -	memset(ced, 0, sizeof(*ced)); +static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, +				       const char *name) +{ +	struct clock_event_device *ced = &ch->ced; +	int ret;  	ced->name = name;  	ced->features = CLOCK_EVT_FEAT_PERIODIC;  	ced->features |= CLOCK_EVT_FEAT_ONESHOT; -	ced->rating = rating; +	ced->rating = 200;  	ced->cpumask = cpumask_of(0);  	ced->set_next_event = sh_tmu_clock_event_next;  	ced->set_mode = sh_tmu_clock_event_mode; +	ced->suspend = sh_tmu_clock_event_suspend; +	ced->resume = sh_tmu_clock_event_resume; -	dev_info(&p->pdev->dev, "used for clock events\n"); -	clockevents_register_device(ced); +	dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", +		 ch->index); -	ret = setup_irq(p->irqaction.irq, &p->irqaction); +	clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); + +	ret = request_irq(ch->irq, sh_tmu_interrupt, +			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, +			  dev_name(&ch->tmu->pdev->dev), ch);  	if (ret) { -		dev_err(&p->pdev->dev, "failed to request irq %d\n", -			p->irqaction.irq); +		dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", +			ch->index, ch->irq);  		return;  	}  } -static int sh_tmu_register(struct sh_tmu_priv *p, char *name, -		    unsigned long clockevent_rating, -		    unsigned long clocksource_rating) +static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, +			   bool clockevent, bool clocksource)  { -	if (clockevent_rating) -		sh_tmu_register_clockevent(p, name, clockevent_rating); -	else if (clocksource_rating) -		sh_tmu_register_clocksource(p, name, clocksource_rating); +	if (clockevent) { +		ch->tmu->has_clockevent = true; +		sh_tmu_register_clockevent(ch, name); +	} else if (clocksource) { +		ch->tmu->has_clocksource = true; +		sh_tmu_register_clocksource(ch, name); +	}  	return 0;  } -static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) +static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, +				bool clockevent, bool clocksource, +				struct sh_tmu_device *tmu) +{ +	/* Skip unused channels. */ +	if (!clockevent && !clocksource) +		return 0; + +	ch->tmu = tmu; + +	if (tmu->model == SH_TMU_LEGACY) { +		struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; + +		/* +		 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps +		 * channel registers blocks at base + 2 + 12 * index, while all +		 * other variants map them at base + 4 + 12 * index. We can +		 * compute the index by just dividing by 12, the 2 bytes or 4 +		 * bytes offset being hidden by the integer division. +		 */ +		ch->index = cfg->channel_offset / 12; +		ch->base = tmu->mapbase + cfg->channel_offset; +	} else { +		ch->index = index; + +		if (tmu->model == SH_TMU_SH3) +			ch->base = tmu->mapbase + 4 + ch->index * 12; +		else +			ch->base = tmu->mapbase + 8 + ch->index * 12; +	} + +	ch->irq = platform_get_irq(tmu->pdev, index); +	if (ch->irq < 0) { +		dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n", +			ch->index); +		return ch->irq; +	} + +	ch->cs_enabled = false; +	ch->enable_count = 0; + +	return sh_tmu_register(ch, dev_name(&tmu->pdev->dev), +			       clockevent, clocksource); +} + +static int sh_tmu_map_memory(struct sh_tmu_device *tmu)  { -	struct sh_timer_config *cfg = pdev->dev.platform_data;  	struct resource *res; -	int irq, ret; -	ret = -ENXIO; -	memset(p, 0, sizeof(*p)); -	p->pdev = pdev; +	res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0); +	if (!res) { +		dev_err(&tmu->pdev->dev, "failed to get I/O memory\n"); +		return -ENXIO; +	} + +	tmu->mapbase = ioremap_nocache(res->start, resource_size(res)); +	if (tmu->mapbase == NULL) +		return -ENXIO; + +	/* +	 * In legacy platform device configuration (with one device per channel) +	 * the resource points to the channel base address. +	 */ +	if (tmu->model == SH_TMU_LEGACY) { +		struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; +		tmu->mapbase -= cfg->channel_offset; +	} + +	return 0; +} + +static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) +{ +	if (tmu->model == SH_TMU_LEGACY) { +		struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; +		tmu->mapbase += cfg->channel_offset; +	} + +	iounmap(tmu->mapbase); +} + +static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) +{ +	struct sh_timer_config *cfg = pdev->dev.platform_data; +	const struct platform_device_id *id = pdev->id_entry; +	unsigned int i; +	int ret;  	if (!cfg) { -		dev_err(&p->pdev->dev, "missing platform data\n"); -		goto err0; +		dev_err(&tmu->pdev->dev, "missing platform data\n"); +		return -ENXIO;  	} -	platform_set_drvdata(pdev, p); +	tmu->pdev = pdev; +	tmu->model = id->driver_data; -	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); -	if (!res) { -		dev_err(&p->pdev->dev, "failed to get I/O memory\n"); -		goto err0; +	/* Get hold of clock. */ +	tmu->clk = clk_get(&tmu->pdev->dev, +			   tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck"); +	if (IS_ERR(tmu->clk)) { +		dev_err(&tmu->pdev->dev, "cannot get clock\n"); +		return PTR_ERR(tmu->clk);  	} -	irq = platform_get_irq(p->pdev, 0); -	if (irq < 0) { -		dev_err(&p->pdev->dev, "failed to get irq\n"); -		goto err0; +	ret = clk_prepare(tmu->clk); +	if (ret < 0) +		goto err_clk_put; + +	/* Map the memory resource. */ +	ret = sh_tmu_map_memory(tmu); +	if (ret < 0) { +		dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n"); +		goto err_clk_unprepare;  	} -	/* map memory, let mapbase point to our channel */ -	p->mapbase = ioremap_nocache(res->start, resource_size(res)); -	if (p->mapbase == NULL) { -		dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); -		goto err0; +	/* Allocate and setup the channels. */ +	if (tmu->model == SH_TMU_LEGACY) +		tmu->num_channels = 1; +	else +		tmu->num_channels = hweight8(cfg->channels_mask); + +	tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, +				GFP_KERNEL); +	if (tmu->channels == NULL) { +		ret = -ENOMEM; +		goto err_unmap;  	} -	/* setup data for setup_irq() (too early for request_irq()) */ -	p->irqaction.name = dev_name(&p->pdev->dev); -	p->irqaction.handler = sh_tmu_interrupt; -	p->irqaction.dev_id = p; -	p->irqaction.irq = irq; -	p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ -			     IRQF_IRQPOLL  | IRQF_NOBALANCING; - -	/* get hold of clock */ -	p->clk = clk_get(&p->pdev->dev, "tmu_fck"); -	if (IS_ERR(p->clk)) { -		dev_err(&p->pdev->dev, "cannot get clock\n"); -		ret = PTR_ERR(p->clk); -		goto err1; +	if (tmu->model == SH_TMU_LEGACY) { +		ret = sh_tmu_channel_setup(&tmu->channels[0], 0, +					   cfg->clockevent_rating != 0, +					   cfg->clocksource_rating != 0, tmu); +		if (ret < 0) +			goto err_unmap; +	} else { +		/* +		 * Use the first channel as a clock event device and the second +		 * channel as a clock source. +		 */ +		for (i = 0; i < tmu->num_channels; ++i) { +			ret = sh_tmu_channel_setup(&tmu->channels[i], i, +						   i == 0, i == 1, tmu); +			if (ret < 0) +				goto err_unmap; +		}  	} -	return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), -			       cfg->clockevent_rating, -			       cfg->clocksource_rating); - err1: -	iounmap(p->mapbase); - err0: +	platform_set_drvdata(pdev, tmu); + +	return 0; + +err_unmap: +	kfree(tmu->channels); +	sh_tmu_unmap_memory(tmu); +err_clk_unprepare: +	clk_unprepare(tmu->clk); +err_clk_put: +	clk_put(tmu->clk);  	return ret;  } -static int __devinit sh_tmu_probe(struct platform_device *pdev) +static int sh_tmu_probe(struct platform_device *pdev)  { -	struct sh_tmu_priv *p = platform_get_drvdata(pdev); +	struct sh_tmu_device *tmu = platform_get_drvdata(pdev);  	int ret; -	if (p) { +	if (!is_early_platform_device(pdev)) { +		pm_runtime_set_active(&pdev->dev); +		pm_runtime_enable(&pdev->dev); +	} + +	if (tmu) {  		dev_info(&pdev->dev, "kept as earlytimer\n"); -		return 0; +		goto out;  	} -	p = kmalloc(sizeof(*p), GFP_KERNEL); -	if (p == NULL) { -		dev_err(&pdev->dev, "failed to allocate driver data\n"); +	tmu = kzalloc(sizeof(*tmu), GFP_KERNEL); +	if (tmu == NULL)  		return -ENOMEM; -	} -	ret = sh_tmu_setup(p, pdev); +	ret = sh_tmu_setup(tmu, pdev);  	if (ret) { -		kfree(p); -		platform_set_drvdata(pdev, NULL); +		kfree(tmu); +		pm_runtime_idle(&pdev->dev); +		return ret;  	} -	return ret; +	if (is_early_platform_device(pdev)) +		return 0; + + out: +	if (tmu->has_clockevent || tmu->has_clocksource) +		pm_runtime_irq_safe(&pdev->dev); +	else +		pm_runtime_idle(&pdev->dev); + +	return 0;  } -static int __devexit sh_tmu_remove(struct platform_device *pdev) +static int sh_tmu_remove(struct platform_device *pdev)  {  	return -EBUSY; /* cannot unregister clockevent and clocksource */  } +static const struct platform_device_id sh_tmu_id_table[] = { +	{ "sh_tmu", SH_TMU_LEGACY }, +	{ "sh-tmu", SH_TMU }, +	{ "sh-tmu-sh3", SH_TMU_SH3 }, +	{ } +}; +MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); +  static struct platform_driver sh_tmu_device_driver = {  	.probe		= sh_tmu_probe, -	.remove		= __devexit_p(sh_tmu_remove), +	.remove		= sh_tmu_remove,  	.driver		= {  		.name	= "sh_tmu", -	} +	}, +	.id_table	= sh_tmu_id_table,  };  static int __init sh_tmu_init(void) @@ -455,7 +698,7 @@ static void __exit sh_tmu_exit(void)  }  early_platform_init("earlytimer", &sh_tmu_device_driver); -module_init(sh_tmu_init); +subsys_initcall(sh_tmu_init);  module_exit(sh_tmu_exit);  MODULE_AUTHOR("Magnus Damm"); diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c new file mode 100644 index 00000000000..efb17c3ee12 --- /dev/null +++ b/drivers/clocksource/sun4i_timer.c @@ -0,0 +1,200 @@ +/* + * Allwinner A1X SoCs timer handling. + * + * Copyright (C) 2012 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * Based on code from + * Allwinner Technology Co., Ltd. <www.allwinnertech.com> + * Benn Huang <benn@allwinnertech.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2.  This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/sched_clock.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define TIMER_IRQ_EN_REG	0x00 +#define TIMER_IRQ_EN(val)		BIT(val) +#define TIMER_IRQ_ST_REG	0x04 +#define TIMER_CTL_REG(val)	(0x10 * val + 0x10) +#define TIMER_CTL_ENABLE		BIT(0) +#define TIMER_CTL_RELOAD		BIT(1) +#define TIMER_CTL_CLK_SRC(val)		(((val) & 0x3) << 2) +#define TIMER_CTL_CLK_SRC_OSC24M		(1) +#define TIMER_CTL_CLK_PRES(val)		(((val) & 0x7) << 4) +#define TIMER_CTL_ONESHOT		BIT(7) +#define TIMER_INTVAL_REG(val)	(0x10 * (val) + 0x14) +#define TIMER_CNTVAL_REG(val)	(0x10 * (val) + 0x18) + +#define TIMER_SYNC_TICKS	3 + +static void __iomem *timer_base; +static u32 ticks_per_jiffy; + +/* + * When we disable a timer, we need to wait at least for 2 cycles of + * the timer source clock. We will use for that the clocksource timer + * that is already setup and runs at the same frequency than the other + * timers, and we never will be disabled. + */ +static void sun4i_clkevt_sync(void) +{ +	u32 old = readl(timer_base + TIMER_CNTVAL_REG(1)); + +	while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < TIMER_SYNC_TICKS) +		cpu_relax(); +} + +static void sun4i_clkevt_time_stop(u8 timer) +{ +	u32 val = readl(timer_base + TIMER_CTL_REG(timer)); +	writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer)); +	sun4i_clkevt_sync(); +} + +static void sun4i_clkevt_time_setup(u8 timer, unsigned long delay) +{ +	writel(delay, timer_base + TIMER_INTVAL_REG(timer)); +} + +static void sun4i_clkevt_time_start(u8 timer, bool periodic) +{ +	u32 val = readl(timer_base + TIMER_CTL_REG(timer)); + +	if (periodic) +		val &= ~TIMER_CTL_ONESHOT; +	else +		val |= TIMER_CTL_ONESHOT; + +	writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, +	       timer_base + TIMER_CTL_REG(timer)); +} + +static void sun4i_clkevt_mode(enum clock_event_mode mode, +			      struct clock_event_device *clk) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		sun4i_clkevt_time_stop(0); +		sun4i_clkevt_time_setup(0, ticks_per_jiffy); +		sun4i_clkevt_time_start(0, true); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		sun4i_clkevt_time_stop(0); +		sun4i_clkevt_time_start(0, false); +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	default: +		sun4i_clkevt_time_stop(0); +		break; +	} +} + +static int sun4i_clkevt_next_event(unsigned long evt, +				   struct clock_event_device *unused) +{ +	sun4i_clkevt_time_stop(0); +	sun4i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS); +	sun4i_clkevt_time_start(0, false); + +	return 0; +} + +static struct clock_event_device sun4i_clockevent = { +	.name = "sun4i_tick", +	.rating = 350, +	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +	.set_mode = sun4i_clkevt_mode, +	.set_next_event = sun4i_clkevt_next_event, +}; + + +static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = (struct clock_event_device *)dev_id; + +	writel(0x1, timer_base + TIMER_IRQ_ST_REG); +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct irqaction sun4i_timer_irq = { +	.name = "sun4i_timer0", +	.flags = IRQF_TIMER | IRQF_IRQPOLL, +	.handler = sun4i_timer_interrupt, +	.dev_id = &sun4i_clockevent, +}; + +static u64 notrace sun4i_timer_sched_read(void) +{ +	return ~readl(timer_base + TIMER_CNTVAL_REG(1)); +} + +static void __init sun4i_timer_init(struct device_node *node) +{ +	unsigned long rate = 0; +	struct clk *clk; +	int ret, irq; +	u32 val; + +	timer_base = of_iomap(node, 0); +	if (!timer_base) +		panic("Can't map registers"); + +	irq = irq_of_parse_and_map(node, 0); +	if (irq <= 0) +		panic("Can't parse IRQ"); + +	clk = of_clk_get(node, 0); +	if (IS_ERR(clk)) +		panic("Can't get timer clock"); +	clk_prepare_enable(clk); + +	rate = clk_get_rate(clk); + +	writel(~0, timer_base + TIMER_INTVAL_REG(1)); +	writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD | +	       TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), +	       timer_base + TIMER_CTL_REG(1)); + +	sched_clock_register(sun4i_timer_sched_read, 32, rate); +	clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, +			      rate, 350, 32, clocksource_mmio_readl_down); + +	ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); + +	writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), +	       timer_base + TIMER_CTL_REG(0)); + +	/* Make sure timer is stopped before playing with interrupts */ +	sun4i_clkevt_time_stop(0); + +	ret = setup_irq(irq, &sun4i_timer_irq); +	if (ret) +		pr_warn("failed to setup irq %d\n", irq); + +	/* Enable timer0 interrupt */ +	val = readl(timer_base + TIMER_IRQ_EN_REG); +	writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); + +	sun4i_clockevent.cpumask = cpu_possible_mask; +	sun4i_clockevent.irq = irq; + +	clockevents_config_and_register(&sun4i_clockevent, rate, +					TIMER_SYNC_TICKS, 0xffffffff); +} +CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", +		       sun4i_timer_init); diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 01b886e6882..a8d7ea14f18 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -19,6 +19,8 @@   *   - Two channels combine to create a free-running 32 bit counter   *     with a base rate of 5+ MHz, packaged as a clocksource (with   *     resolution better than 200 nsec). + *   - Some chips support 32 bit counter. A single channel is used for + *     this 32 bit free-running counter. the second channel is not used.   *   *   - The third channel may be used to provide a 16-bit clockevent   *     source, used in either periodic or oneshot mode.  This runs @@ -54,12 +56,16 @@ static cycle_t tc_get_cycles(struct clocksource *cs)  	return (upper << 16) | lower;  } +static cycle_t tc_get_cycles32(struct clocksource *cs) +{ +	return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); +} +  static struct clocksource clksrc = {  	.name           = "tcb_clksrc",  	.rating         = 200,  	.read           = tc_get_cycles,  	.mask           = CLOCKSOURCE_MASK(32), -	.shift          = 18,  	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,  }; @@ -151,7 +157,6 @@ static struct tc_clkevt_device clkevt = {  		.name		= "tc_clkevt",  		.features	= CLOCK_EVT_FEAT_PERIODIC  					| CLOCK_EVT_FEAT_ONESHOT, -		.shift		= 32,  		/* Should be lower than at91rm9200's system timer */  		.rating		= 125,  		.set_next_event	= tc_next_event, @@ -175,41 +180,91 @@ static irqreturn_t ch2_irq(int irq, void *handle)  static struct irqaction tc_irqaction = {  	.name		= "tc_clkevt", -	.flags		= IRQF_TIMER | IRQF_DISABLED, +	.flags		= IRQF_TIMER,  	.handler	= ch2_irq,  }; -static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) +static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)  { +	int ret;  	struct clk *t2_clk = tc->clk[2];  	int irq = tc->irq[2]; +	/* try to enable t2 clk to avoid future errors in mode change */ +	ret = clk_prepare_enable(t2_clk); +	if (ret) +		return ret; +	clk_disable(t2_clk); +  	clkevt.regs = tc->regs;  	clkevt.clk = t2_clk;  	tc_irqaction.dev_id = &clkevt;  	timer_clock = clk32k_divisor_idx; -	clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift); -	clkevt.clkevt.max_delta_ns -		= clockevent_delta2ns(0xffff, &clkevt.clkevt); -	clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;  	clkevt.clkevt.cpumask = cpumask_of(0); -	setup_irq(irq, &tc_irqaction); +	ret = setup_irq(irq, &tc_irqaction); +	if (ret) +		return ret; + +	clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); -	clockevents_register_device(&clkevt.clkevt); +	return ret;  }  #else /* !CONFIG_GENERIC_CLOCKEVENTS */ -static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) +static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)  {  	/* NOTHING */ +	return 0;  }  #endif +static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx) +{ +	/* channel 0:  waveform mode, input mclk/8, clock TIOA0 on overflow */ +	__raw_writel(mck_divisor_idx			/* likely divide-by-8 */ +			| ATMEL_TC_WAVE +			| ATMEL_TC_WAVESEL_UP		/* free-run */ +			| ATMEL_TC_ACPA_SET		/* TIOA0 rises at 0 */ +			| ATMEL_TC_ACPC_CLEAR,		/* (duty cycle 50%) */ +			tcaddr + ATMEL_TC_REG(0, CMR)); +	__raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA)); +	__raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC)); +	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */ +	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); + +	/* channel 1:  waveform mode, input TIOA0 */ +	__raw_writel(ATMEL_TC_XC1			/* input: TIOA0 */ +			| ATMEL_TC_WAVE +			| ATMEL_TC_WAVESEL_UP,		/* free-run */ +			tcaddr + ATMEL_TC_REG(1, CMR)); +	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));	/* no irqs */ +	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR)); + +	/* chain channel 0 to channel 1*/ +	__raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR); +	/* then reset all the timers */ +	__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); +} + +static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx) +{ +	/* channel 0:  waveform mode, input mclk/8 */ +	__raw_writel(mck_divisor_idx			/* likely divide-by-8 */ +			| ATMEL_TC_WAVE +			| ATMEL_TC_WAVESEL_UP,		/* free-run */ +			tcaddr + ATMEL_TC_REG(0, CMR)); +	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */ +	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); + +	/* then reset all the timers */ +	__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); +} +  static int __init tcb_clksrc_init(void)  {  	static char bootinfo[] __initdata @@ -222,6 +277,7 @@ static int __init tcb_clksrc_init(void)  	int best_divisor_idx = -1;  	int clk32k_divisor_idx = -1;  	int i; +	int ret;  	tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name);  	if (!tc) { @@ -232,7 +288,11 @@ static int __init tcb_clksrc_init(void)  	pdev = tc->pdev;  	t0_clk = tc->clk[0]; -	clk_enable(t0_clk); +	ret = clk_prepare_enable(t0_clk); +	if (ret) { +		pr_debug("can't enable T0 clk\n"); +		goto err_free_tc; +	}  	/* How fast will we be counting?  Pick something over 5 MHz.  */  	rate = (u32) clk_get_rate(t0_clk); @@ -256,47 +316,53 @@ static int __init tcb_clksrc_init(void)  		best_divisor_idx = i;  	} -	clksrc.mult = clocksource_hz2mult(divided_rate, clksrc.shift);  	printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,  			divided_rate / 1000000,  			((divided_rate + 500000) % 1000000) / 1000); -	/* tclib will give us three clocks no matter what the -	 * underlying platform supports. -	 */ -	clk_enable(tc->clk[1]); - -	/* channel 0:  waveform mode, input mclk/8, clock TIOA0 on overflow */ -	__raw_writel(best_divisor_idx			/* likely divide-by-8 */ -			| ATMEL_TC_WAVE -			| ATMEL_TC_WAVESEL_UP		/* free-run */ -			| ATMEL_TC_ACPA_SET		/* TIOA0 rises at 0 */ -			| ATMEL_TC_ACPC_CLEAR,		/* (duty cycle 50%) */ -			tcaddr + ATMEL_TC_REG(0, CMR)); -	__raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA)); -	__raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC)); -	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */ -	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); - -	/* channel 1:  waveform mode, input TIOA0 */ -	__raw_writel(ATMEL_TC_XC1			/* input: TIOA0 */ -			| ATMEL_TC_WAVE -			| ATMEL_TC_WAVESEL_UP,		/* free-run */ -			tcaddr + ATMEL_TC_REG(1, CMR)); -	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));	/* no irqs */ -	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR)); - -	/* chain channel 0 to channel 1, then reset all the timers */ -	__raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR); -	__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); +	if (tc->tcb_config && tc->tcb_config->counter_width == 32) { +		/* use apropriate function to read 32 bit counter */ +		clksrc.read = tc_get_cycles32; +		/* setup ony channel 0 */ +		tcb_setup_single_chan(tc, best_divisor_idx); +	} else { +		/* tclib will give us three clocks no matter what the +		 * underlying platform supports. +		 */ +		ret = clk_prepare_enable(tc->clk[1]); +		if (ret) { +			pr_debug("can't enable T1 clk\n"); +			goto err_disable_t0; +		} +		/* setup both channel 0 & 1 */ +		tcb_setup_dual_chan(tc, best_divisor_idx); +	}  	/* and away we go! */ -	clocksource_register(&clksrc); +	ret = clocksource_register_hz(&clksrc, divided_rate); +	if (ret) +		goto err_disable_t1;  	/* channel 2:  periodic and oneshot timer support */ -	setup_clkevents(tc, clk32k_divisor_idx); +	ret = setup_clkevents(tc, clk32k_divisor_idx); +	if (ret) +		goto err_unregister_clksrc;  	return 0; + +err_unregister_clksrc: +	clocksource_unregister(&clksrc); + +err_disable_t1: +	if (!tc->tcb_config || tc->tcb_config->counter_width != 32) +		clk_disable_unprepare(tc->clk[1]); + +err_disable_t0: +	clk_disable_unprepare(t0_clk); + +err_free_tc: +	atmel_tc_free(tc); +	return ret;  }  arch_initcall(tcb_clksrc_init); diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c new file mode 100644 index 00000000000..d1869f02051 --- /dev/null +++ b/drivers/clocksource/tegra20_timer.c @@ -0,0 +1,258 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * Author: + *	Colin Cross <ccross@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/init.h> +#include <linux/err.h> +#include <linux/time.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> + +#include <asm/mach/time.h> +#include <asm/smp_twd.h> + +#define RTC_SECONDS            0x08 +#define RTC_SHADOW_SECONDS     0x0c +#define RTC_MILLISECONDS       0x10 + +#define TIMERUS_CNTR_1US 0x10 +#define TIMERUS_USEC_CFG 0x14 +#define TIMERUS_CNTR_FREEZE 0x4c + +#define TIMER1_BASE 0x0 +#define TIMER2_BASE 0x8 +#define TIMER3_BASE 0x50 +#define TIMER4_BASE 0x58 + +#define TIMER_PTV 0x0 +#define TIMER_PCR 0x4 + +static void __iomem *timer_reg_base; +static void __iomem *rtc_base; + +static struct timespec persistent_ts; +static u64 persistent_ms, last_persistent_ms; + +#define timer_writel(value, reg) \ +	__raw_writel(value, timer_reg_base + (reg)) +#define timer_readl(reg) \ +	__raw_readl(timer_reg_base + (reg)) + +static int tegra_timer_set_next_event(unsigned long cycles, +					 struct clock_event_device *evt) +{ +	u32 reg; + +	reg = 0x80000000 | ((cycles > 1) ? (cycles-1) : 0); +	timer_writel(reg, TIMER3_BASE + TIMER_PTV); + +	return 0; +} + +static void tegra_timer_set_mode(enum clock_event_mode mode, +				    struct clock_event_device *evt) +{ +	u32 reg; + +	timer_writel(0, TIMER3_BASE + TIMER_PTV); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		reg = 0xC0000000 | ((1000000/HZ)-1); +		timer_writel(reg, TIMER3_BASE + TIMER_PTV); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_RESUME: +		break; +	} +} + +static struct clock_event_device tegra_clockevent = { +	.name		= "timer0", +	.rating		= 300, +	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, +	.set_next_event	= tegra_timer_set_next_event, +	.set_mode	= tegra_timer_set_mode, +}; + +static u64 notrace tegra_read_sched_clock(void) +{ +	return timer_readl(TIMERUS_CNTR_1US); +} + +/* + * tegra_rtc_read - Reads the Tegra RTC registers + * Care must be taken that this funciton is not called while the + * tegra_rtc driver could be executing to avoid race conditions + * on the RTC shadow register + */ +static u64 tegra_rtc_read_ms(void) +{ +	u32 ms = readl(rtc_base + RTC_MILLISECONDS); +	u32 s = readl(rtc_base + RTC_SHADOW_SECONDS); +	return (u64)s * MSEC_PER_SEC + ms; +} + +/* + * tegra_read_persistent_clock -  Return time from a persistent clock. + * + * Reads the time from a source which isn't disabled during PM, the + * 32k sync timer.  Convert the cycles elapsed since last read into + * nsecs and adds to a monotonically increasing timespec. + * Care must be taken that this funciton is not called while the + * tegra_rtc driver could be executing to avoid race conditions + * on the RTC shadow register + */ +static void tegra_read_persistent_clock(struct timespec *ts) +{ +	u64 delta; +	struct timespec *tsp = &persistent_ts; + +	last_persistent_ms = persistent_ms; +	persistent_ms = tegra_rtc_read_ms(); +	delta = persistent_ms - last_persistent_ms; + +	timespec_add_ns(tsp, delta * NSEC_PER_MSEC); +	*ts = *tsp; +} + +static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = (struct clock_event_device *)dev_id; +	timer_writel(1<<30, TIMER3_BASE + TIMER_PCR); +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static struct irqaction tegra_timer_irq = { +	.name		= "timer0", +	.flags		= IRQF_TIMER | IRQF_TRIGGER_HIGH, +	.handler	= tegra_timer_interrupt, +	.dev_id		= &tegra_clockevent, +}; + +static void __init tegra20_init_timer(struct device_node *np) +{ +	struct clk *clk; +	unsigned long rate; +	int ret; + +	timer_reg_base = of_iomap(np, 0); +	if (!timer_reg_base) { +		pr_err("Can't map timer registers\n"); +		BUG(); +	} + +	tegra_timer_irq.irq = irq_of_parse_and_map(np, 2); +	if (tegra_timer_irq.irq <= 0) { +		pr_err("Failed to map timer IRQ\n"); +		BUG(); +	} + +	clk = of_clk_get(np, 0); +	if (IS_ERR(clk)) { +		pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n"); +		rate = 12000000; +	} else { +		clk_prepare_enable(clk); +		rate = clk_get_rate(clk); +	} + +	switch (rate) { +	case 12000000: +		timer_writel(0x000b, TIMERUS_USEC_CFG); +		break; +	case 13000000: +		timer_writel(0x000c, TIMERUS_USEC_CFG); +		break; +	case 19200000: +		timer_writel(0x045f, TIMERUS_USEC_CFG); +		break; +	case 26000000: +		timer_writel(0x0019, TIMERUS_USEC_CFG); +		break; +	default: +		WARN(1, "Unknown clock rate"); +	} + +	sched_clock_register(tegra_read_sched_clock, 32, 1000000); + +	if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, +		"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) { +		pr_err("Failed to register clocksource\n"); +		BUG(); +	} + +	ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); +	if (ret) { +		pr_err("Failed to register timer IRQ: %d\n", ret); +		BUG(); +	} + +	tegra_clockevent.cpumask = cpu_all_mask; +	tegra_clockevent.irq = tegra_timer_irq.irq; +	clockevents_config_and_register(&tegra_clockevent, 1000000, +					0x1, 0x1fffffff); +} +CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer); + +static void __init tegra20_init_rtc(struct device_node *np) +{ +	struct clk *clk; + +	rtc_base = of_iomap(np, 0); +	if (!rtc_base) { +		pr_err("Can't map RTC registers"); +		BUG(); +	} + +	/* +	 * rtc registers are used by read_persistent_clock, keep the rtc clock +	 * enabled +	 */ +	clk = of_clk_get(np, 0); +	if (IS_ERR(clk)) +		pr_warn("Unable to get rtc-tegra clock\n"); +	else +		clk_prepare_enable(clk); + +	register_persistent_clock(NULL, tegra_read_persistent_clock); +} +CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc); + +#ifdef CONFIG_PM +static u32 usec_config; + +void tegra_timer_suspend(void) +{ +	usec_config = timer_readl(TIMERUS_USEC_CFG); +} + +void tegra_timer_resume(void) +{ +	timer_writel(usec_config, TIMERUS_USEC_CFG); +} +#endif diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c new file mode 100644 index 00000000000..0451e62fac7 --- /dev/null +++ b/drivers/clocksource/time-armada-370-xp.c @@ -0,0 +1,314 @@ +/* + * Marvell Armada 370/XP SoC timer handling. + * + * Copyright (C) 2012 Marvell + * + * Lior Amsalem <alior@marvell.com> + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2.  This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Timer 0 is used as free-running clocksource, while timer 1 is + * used as clock_event_device. + * + * --- + * Clocksource driver for Armada 370 and Armada XP SoC. + * This driver implements one compatible string for each SoC, given + * each has its own characteristics: + * + *   * Armada 370 has no 25 MHz fixed timer. + * + *   * Armada XP cannot work properly without such 25 MHz fixed timer as + *     doing otherwise leads to using a clocksource whose frequency varies + *     when doing cpufreq frequency changes. + * + * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt + */ + +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/cpu.h> +#include <linux/timer.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/sched_clock.h> +#include <linux/percpu.h> + +/* + * Timer block registers. + */ +#define TIMER_CTRL_OFF		0x0000 +#define  TIMER0_EN		 BIT(0) +#define  TIMER0_RELOAD_EN	 BIT(1) +#define  TIMER0_25MHZ            BIT(11) +#define  TIMER0_DIV(div)         ((div) << 19) +#define  TIMER1_EN		 BIT(2) +#define  TIMER1_RELOAD_EN	 BIT(3) +#define  TIMER1_25MHZ            BIT(12) +#define  TIMER1_DIV(div)         ((div) << 22) +#define TIMER_EVENTS_STATUS	0x0004 +#define  TIMER0_CLR_MASK         (~0x1) +#define  TIMER1_CLR_MASK         (~0x100) +#define TIMER0_RELOAD_OFF	0x0010 +#define TIMER0_VAL_OFF		0x0014 +#define TIMER1_RELOAD_OFF	0x0018 +#define TIMER1_VAL_OFF		0x001c + +#define LCL_TIMER_EVENTS_STATUS	0x0028 +/* Global timers are connected to the coherency fabric clock, and the +   below divider reduces their incrementing frequency. */ +#define TIMER_DIVIDER_SHIFT     5 +#define TIMER_DIVIDER           (1 << TIMER_DIVIDER_SHIFT) + +/* + * SoC-specific data. + */ +static void __iomem *timer_base, *local_base; +static unsigned int timer_clk; +static bool timer25Mhz = true; +static u32 enable_mask; + +/* + * Number of timer ticks per jiffy. + */ +static u32 ticks_per_jiffy; + +static struct clock_event_device __percpu *armada_370_xp_evt; + +static void local_timer_ctrl_clrset(u32 clr, u32 set) +{ +	writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set, +		local_base + TIMER_CTRL_OFF); +} + +static u64 notrace armada_370_xp_read_sched_clock(void) +{ +	return ~readl(timer_base + TIMER0_VAL_OFF); +} + +/* + * Clockevent handling. + */ +static int +armada_370_xp_clkevt_next_event(unsigned long delta, +				struct clock_event_device *dev) +{ +	/* +	 * Clear clockevent timer interrupt. +	 */ +	writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); + +	/* +	 * Setup new clockevent timer value. +	 */ +	writel(delta, local_base + TIMER0_VAL_OFF); + +	/* +	 * Enable the timer. +	 */ +	local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask); +	return 0; +} + +static void +armada_370_xp_clkevt_mode(enum clock_event_mode mode, +			  struct clock_event_device *dev) +{ +	if (mode == CLOCK_EVT_MODE_PERIODIC) { + +		/* +		 * Setup timer to fire at 1/HZ intervals. +		 */ +		writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF); +		writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF); + +		/* +		 * Enable timer. +		 */ +		local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask); +	} else { +		/* +		 * Disable timer. +		 */ +		local_timer_ctrl_clrset(TIMER0_EN, 0); + +		/* +		 * ACK pending timer interrupt. +		 */ +		writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); +	} +} + +static int armada_370_xp_clkevt_irq; + +static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) +{ +	/* +	 * ACK timer interrupt and call event handler. +	 */ +	struct clock_event_device *evt = dev_id; + +	writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS); +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +/* + * Setup the local clock events for a CPU. + */ +static int armada_370_xp_timer_setup(struct clock_event_device *evt) +{ +	u32 clr = 0, set = 0; +	int cpu = smp_processor_id(); + +	if (timer25Mhz) +		set = TIMER0_25MHZ; +	else +		clr = TIMER0_25MHZ; +	local_timer_ctrl_clrset(clr, set); + +	evt->name		= "armada_370_xp_per_cpu_tick", +	evt->features		= CLOCK_EVT_FEAT_ONESHOT | +				  CLOCK_EVT_FEAT_PERIODIC; +	evt->shift		= 32, +	evt->rating		= 300, +	evt->set_next_event	= armada_370_xp_clkevt_next_event, +	evt->set_mode		= armada_370_xp_clkevt_mode, +	evt->irq		= armada_370_xp_clkevt_irq; +	evt->cpumask		= cpumask_of(cpu); + +	clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); +	enable_percpu_irq(evt->irq, 0); + +	return 0; +} + +static void armada_370_xp_timer_stop(struct clock_event_device *evt) +{ +	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); +	disable_percpu_irq(evt->irq); +} + +static int armada_370_xp_timer_cpu_notify(struct notifier_block *self, +					   unsigned long action, void *hcpu) +{ +	/* +	 * Grab cpu pointer in each case to avoid spurious +	 * preemptible warnings +	 */ +	switch (action & ~CPU_TASKS_FROZEN) { +	case CPU_STARTING: +		armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); +		break; +	case CPU_DYING: +		armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt)); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block armada_370_xp_timer_cpu_nb = { +	.notifier_call = armada_370_xp_timer_cpu_notify, +}; + +static void __init armada_370_xp_timer_common_init(struct device_node *np) +{ +	u32 clr = 0, set = 0; +	int res; + +	timer_base = of_iomap(np, 0); +	WARN_ON(!timer_base); +	local_base = of_iomap(np, 1); + +	if (timer25Mhz) { +		set = TIMER0_25MHZ;		 +		enable_mask = TIMER0_EN; +	} else { +		clr = TIMER0_25MHZ; +		enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); +	} +	atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set); +	local_timer_ctrl_clrset(clr, set); + +	/* +	 * We use timer 0 as clocksource, and private(local) timer 0 +	 * for clockevents +	 */ +	armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); + +	ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; + +	/* +	 * Setup free-running clocksource timer (interrupts +	 * disabled). +	 */ +	writel(0xffffffff, timer_base + TIMER0_VAL_OFF); +	writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); + +	atomic_io_modify(timer_base + TIMER_CTRL_OFF, +		TIMER0_RELOAD_EN | enable_mask, +		TIMER0_RELOAD_EN | enable_mask); + +	/* +	 * Set scale and timer for sched_clock. +	 */ +	sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); + +	clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, +			      "armada_370_xp_clocksource", +			      timer_clk, 300, 32, clocksource_mmio_readl_down); + +	register_cpu_notifier(&armada_370_xp_timer_cpu_nb); + +	armada_370_xp_evt = alloc_percpu(struct clock_event_device); + + +	/* +	 * Setup clockevent timer (interrupt-driven). +	 */ +	res = request_percpu_irq(armada_370_xp_clkevt_irq, +				armada_370_xp_timer_interrupt, +				"armada_370_xp_per_cpu_tick", +				armada_370_xp_evt); +	/* Immediately configure the timer on the boot CPU */ +	if (!res) +		armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); +} + +static void __init armada_xp_timer_init(struct device_node *np) +{ +	struct clk *clk = of_clk_get_by_name(np, "fixed"); + +	/* The 25Mhz fixed clock is mandatory, and must always be available */ +	BUG_ON(IS_ERR(clk)); +	timer_clk = clk_get_rate(clk); + +	armada_370_xp_timer_common_init(np); +} +CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer", +		       armada_xp_timer_init); + +static void __init armada_370_timer_init(struct device_node *np) +{ +	struct clk *clk = of_clk_get(np, 0); + +	BUG_ON(IS_ERR(clk)); +	timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; +	timer25Mhz = false; + +	armada_370_xp_timer_common_init(np); +} +CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer", +		       armada_370_timer_init); diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c new file mode 100644 index 00000000000..bba62f9deef --- /dev/null +++ b/drivers/clocksource/time-efm32.c @@ -0,0 +1,276 @@ +/* + * Copyright (C) 2013 Pengutronix + * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/clk.h> + +#define TIMERn_CTRL			0x00 +#define TIMERn_CTRL_PRESC(val)			(((val) & 0xf) << 24) +#define TIMERn_CTRL_PRESC_1024			TIMERn_CTRL_PRESC(10) +#define TIMERn_CTRL_CLKSEL(val)			(((val) & 0x3) << 16) +#define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK	TIMERn_CTRL_CLKSEL(0) +#define TIMERn_CTRL_OSMEN			0x00000010 +#define TIMERn_CTRL_MODE(val)			(((val) & 0x3) <<  0) +#define TIMERn_CTRL_MODE_UP			TIMERn_CTRL_MODE(0) +#define TIMERn_CTRL_MODE_DOWN			TIMERn_CTRL_MODE(1) + +#define TIMERn_CMD			0x04 +#define TIMERn_CMD_START			0x00000001 +#define TIMERn_CMD_STOP				0x00000002 + +#define TIMERn_IEN			0x0c +#define TIMERn_IF			0x10 +#define TIMERn_IFS			0x14 +#define TIMERn_IFC			0x18 +#define TIMERn_IRQ_UF				0x00000002 + +#define TIMERn_TOP			0x1c +#define TIMERn_CNT			0x24 + +struct efm32_clock_event_ddata { +	struct clock_event_device evtdev; +	void __iomem *base; +	unsigned periodic_top; +}; + +static void efm32_clock_event_set_mode(enum clock_event_mode mode, +				       struct clock_event_device *evtdev) +{ +	struct efm32_clock_event_ddata *ddata = +		container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); +		writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP); +		writel_relaxed(TIMERn_CTRL_PRESC_1024 | +			       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | +			       TIMERn_CTRL_MODE_DOWN, +			       ddata->base + TIMERn_CTRL); +		writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); +		break; + +	case CLOCK_EVT_MODE_ONESHOT: +		writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); +		writel_relaxed(TIMERn_CTRL_PRESC_1024 | +			       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | +			       TIMERn_CTRL_OSMEN | +			       TIMERn_CTRL_MODE_DOWN, +			       ddata->base + TIMERn_CTRL); +		break; + +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); +		break; + +	case CLOCK_EVT_MODE_RESUME: +		break; +	} +} + +static int efm32_clock_event_set_next_event(unsigned long evt, +					    struct clock_event_device *evtdev) +{ +	struct efm32_clock_event_ddata *ddata = +		container_of(evtdev, struct efm32_clock_event_ddata, evtdev); + +	writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD); +	writel_relaxed(evt, ddata->base + TIMERn_CNT); +	writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD); + +	return 0; +} + +static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id) +{ +	struct efm32_clock_event_ddata *ddata = dev_id; + +	writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC); + +	ddata->evtdev.event_handler(&ddata->evtdev); + +	return IRQ_HANDLED; +} + +static struct efm32_clock_event_ddata clock_event_ddata = { +	.evtdev = { +		.name = "efm32 clockevent", +		.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_MODE_PERIODIC, +		.set_mode = efm32_clock_event_set_mode, +		.set_next_event = efm32_clock_event_set_next_event, +		.rating = 200, +	}, +}; + +static struct irqaction efm32_clock_event_irq = { +	.name = "efm32 clockevent", +	.flags = IRQF_TIMER, +	.handler = efm32_clock_event_handler, +	.dev_id = &clock_event_ddata, +}; + +static int __init efm32_clocksource_init(struct device_node *np) +{ +	struct clk *clk; +	void __iomem *base; +	unsigned long rate; +	int ret; + +	clk = of_clk_get(np, 0); +	if (IS_ERR(clk)) { +		ret = PTR_ERR(clk); +		pr_err("failed to get clock for clocksource (%d)\n", ret); +		goto err_clk_get; +	} + +	ret = clk_prepare_enable(clk); +	if (ret) { +		pr_err("failed to enable timer clock for clocksource (%d)\n", +		       ret); +		goto err_clk_enable; +	} +	rate = clk_get_rate(clk); + +	base = of_iomap(np, 0); +	if (!base) { +		ret = -EADDRNOTAVAIL; +		pr_err("failed to map registers for clocksource\n"); +		goto err_iomap; +	} + +	writel_relaxed(TIMERn_CTRL_PRESC_1024 | +		       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK | +		       TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL); +	writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD); + +	ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer", +				    DIV_ROUND_CLOSEST(rate, 1024), 200, 16, +				    clocksource_mmio_readl_up); +	if (ret) { +		pr_err("failed to init clocksource (%d)\n", ret); +		goto err_clocksource_init; +	} + +	return 0; + +err_clocksource_init: + +	iounmap(base); +err_iomap: + +	clk_disable_unprepare(clk); +err_clk_enable: + +	clk_put(clk); +err_clk_get: + +	return ret; +} + +static int __init efm32_clockevent_init(struct device_node *np) +{ +	struct clk *clk; +	void __iomem *base; +	unsigned long rate; +	int irq; +	int ret; + +	clk = of_clk_get(np, 0); +	if (IS_ERR(clk)) { +		ret = PTR_ERR(clk); +		pr_err("failed to get clock for clockevent (%d)\n", ret); +		goto err_clk_get; +	} + +	ret = clk_prepare_enable(clk); +	if (ret) { +		pr_err("failed to enable timer clock for clockevent (%d)\n", +		       ret); +		goto err_clk_enable; +	} +	rate = clk_get_rate(clk); + +	base = of_iomap(np, 0); +	if (!base) { +		ret = -EADDRNOTAVAIL; +		pr_err("failed to map registers for clockevent\n"); +		goto err_iomap; +	} + +	irq = irq_of_parse_and_map(np, 0); +	if (!irq) { +		ret = -ENOENT; +		pr_err("failed to get irq for clockevent\n"); +		goto err_get_irq; +	} + +	writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN); + +	clock_event_ddata.base = base; +	clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); + +	setup_irq(irq, &efm32_clock_event_irq); + +	clockevents_config_and_register(&clock_event_ddata.evtdev, +					DIV_ROUND_CLOSEST(rate, 1024), +					0xf, 0xffff); + +	return 0; + +err_get_irq: + +	iounmap(base); +err_iomap: + +	clk_disable_unprepare(clk); +err_clk_enable: + +	clk_put(clk); +err_clk_get: + +	return ret; +} + +/* + * This function asserts that we have exactly one clocksource and one + * clock_event_device in the end. + */ +static void __init efm32_timer_init(struct device_node *np) +{ +	static int has_clocksource, has_clockevent; +	int ret; + +	if (!has_clocksource) { +		ret = efm32_clocksource_init(np); +		if (!ret) { +			has_clocksource = 1; +			return; +		} +	} + +	if (!has_clockevent) { +		ret = efm32_clockevent_init(np); +		if (!ret) { +			has_clockevent = 1; +			return; +		} +	} +} +CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init); +CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init); diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c new file mode 100644 index 00000000000..0b3ce0399c5 --- /dev/null +++ b/drivers/clocksource/time-orion.c @@ -0,0 +1,142 @@ +/* + * Marvell Orion SoC timer handling. + * + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2.  This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Timer 0 is used as free-running clocksource, while timer 1 is + * used as clock_event_device. + */ + +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/spinlock.h> +#include <linux/sched_clock.h> + +#define TIMER_CTRL		0x00 +#define  TIMER0_EN		BIT(0) +#define  TIMER0_RELOAD_EN	BIT(1) +#define  TIMER1_EN		BIT(2) +#define  TIMER1_RELOAD_EN	BIT(3) +#define TIMER0_RELOAD		0x10 +#define TIMER0_VAL		0x14 +#define TIMER1_RELOAD		0x18 +#define TIMER1_VAL		0x1c + +#define ORION_ONESHOT_MIN	1 +#define ORION_ONESHOT_MAX	0xfffffffe + +static void __iomem *timer_base; + +/* + * Free-running clocksource handling. + */ +static u64 notrace orion_read_sched_clock(void) +{ +	return ~readl(timer_base + TIMER0_VAL); +} + +/* + * Clockevent handling. + */ +static u32 ticks_per_jiffy; + +static int orion_clkevt_next_event(unsigned long delta, +				   struct clock_event_device *dev) +{ +	/* setup and enable one-shot timer */ +	writel(delta, timer_base + TIMER1_VAL); +	atomic_io_modify(timer_base + TIMER_CTRL, +		TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN); + +	return 0; +} + +static void orion_clkevt_mode(enum clock_event_mode mode, +			      struct clock_event_device *dev) +{ +	if (mode == CLOCK_EVT_MODE_PERIODIC) { +		/* setup and enable periodic timer at 1/HZ intervals */ +		writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD); +		writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL); +		atomic_io_modify(timer_base + TIMER_CTRL, +			TIMER1_RELOAD_EN | TIMER1_EN, +			TIMER1_RELOAD_EN | TIMER1_EN); +	} else { +		/* disable timer */ +		atomic_io_modify(timer_base + TIMER_CTRL, +			TIMER1_RELOAD_EN | TIMER1_EN, 0); +	} +} + +static struct clock_event_device orion_clkevt = { +	.name		= "orion_event", +	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, +	.shift		= 32, +	.rating		= 300, +	.set_next_event	= orion_clkevt_next_event, +	.set_mode	= orion_clkevt_mode, +}; + +static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id) +{ +	orion_clkevt.event_handler(&orion_clkevt); +	return IRQ_HANDLED; +} + +static struct irqaction orion_clkevt_irq = { +	.name		= "orion_event", +	.flags		= IRQF_TIMER, +	.handler	= orion_clkevt_irq_handler, +}; + +static void __init orion_timer_init(struct device_node *np) +{ +	struct clk *clk; +	int irq; + +	/* timer registers are shared with watchdog timer */ +	timer_base = of_iomap(np, 0); +	if (!timer_base) +		panic("%s: unable to map resource\n", np->name); + +	clk = of_clk_get(np, 0); +	if (IS_ERR(clk)) +		panic("%s: unable to get clk\n", np->name); +	clk_prepare_enable(clk); + +	/* we are only interested in timer1 irq */ +	irq = irq_of_parse_and_map(np, 1); +	if (irq <= 0) +		panic("%s: unable to parse timer1 irq\n", np->name); + +	/* setup timer0 as free-running clocksource */ +	writel(~0, timer_base + TIMER0_VAL); +	writel(~0, timer_base + TIMER0_RELOAD); +	atomic_io_modify(timer_base + TIMER_CTRL, +		TIMER0_RELOAD_EN | TIMER0_EN, +		TIMER0_RELOAD_EN | TIMER0_EN); +	clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", +			      clk_get_rate(clk), 300, 32, +			      clocksource_mmio_readl_down); +	sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk)); + +	/* setup timer1 as clockevent timer */ +	if (setup_irq(irq, &orion_clkevt_irq)) +		panic("%s: unable to setup irq\n", np->name); + +	ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ; +	orion_clkevt.cpumask = cpumask_of(0); +	orion_clkevt.irq = irq; +	clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk), +					ORION_ONESHOT_MIN, ORION_ONESHOT_MAX); +} +CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init); diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c new file mode 100644 index 00000000000..0250354f7e5 --- /dev/null +++ b/drivers/clocksource/timer-keystone.c @@ -0,0 +1,241 @@ +/* + * Keystone broadcast clock-event + * + * Copyright 2013 Texas Instruments, Inc. + * + * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define TIMER_NAME			"timer-keystone" + +/* Timer register offsets */ +#define TIM12				0x10 +#define TIM34				0x14 +#define PRD12				0x18 +#define PRD34				0x1c +#define TCR				0x20 +#define TGCR				0x24 +#define INTCTLSTAT			0x44 + +/* Timer register bitfields */ +#define TCR_ENAMODE_MASK		0xC0 +#define TCR_ENAMODE_ONESHOT_MASK	0x40 +#define TCR_ENAMODE_PERIODIC_MASK	0x80 + +#define TGCR_TIM_UNRESET_MASK		0x03 +#define INTCTLSTAT_ENINT_MASK		0x01 + +/** + * struct keystone_timer: holds timer's data + * @base: timer memory base address + * @hz_period: cycles per HZ period + * @event_dev: event device based on timer + */ +static struct keystone_timer { +	void __iomem *base; +	unsigned long hz_period; +	struct clock_event_device event_dev; +} timer; + +static inline u32 keystone_timer_readl(unsigned long rg) +{ +	return readl_relaxed(timer.base + rg); +} + +static inline void keystone_timer_writel(u32 val, unsigned long rg) +{ +	writel_relaxed(val, timer.base + rg); +} + +/** + * keystone_timer_barrier: write memory barrier + * use explicit barrier to avoid using readl/writel non relaxed function + * variants, because in our case non relaxed variants hide the true places + * where barrier is needed. + */ +static inline void keystone_timer_barrier(void) +{ +	__iowmb(); +} + +/** + * keystone_timer_config: configures timer to work in oneshot/periodic modes. + * @ mode: mode to configure + * @ period: cycles number to configure for + */ +static int keystone_timer_config(u64 period, enum clock_event_mode mode) +{ +	u32 tcr; +	u32 off; + +	tcr = keystone_timer_readl(TCR); +	off = tcr & ~(TCR_ENAMODE_MASK); + +	/* set enable mode */ +	switch (mode) { +	case CLOCK_EVT_MODE_ONESHOT: +		tcr |= TCR_ENAMODE_ONESHOT_MASK; +		break; +	case CLOCK_EVT_MODE_PERIODIC: +		tcr |= TCR_ENAMODE_PERIODIC_MASK; +		break; +	default: +		return -1; +	} + +	/* disable timer */ +	keystone_timer_writel(off, TCR); +	/* here we have to be sure the timer has been disabled */ +	keystone_timer_barrier(); + +	/* reset counter to zero, set new period */ +	keystone_timer_writel(0, TIM12); +	keystone_timer_writel(0, TIM34); +	keystone_timer_writel(period & 0xffffffff, PRD12); +	keystone_timer_writel(period >> 32, PRD34); + +	/* +	 * enable timer +	 * here we have to be sure that CNTLO, CNTHI, PRDLO, PRDHI registers +	 * have been written. +	 */ +	keystone_timer_barrier(); +	keystone_timer_writel(tcr, TCR); +	return 0; +} + +static void keystone_timer_disable(void) +{ +	u32 tcr; + +	tcr = keystone_timer_readl(TCR); + +	/* disable timer */ +	tcr &= ~(TCR_ENAMODE_MASK); +	keystone_timer_writel(tcr, TCR); +} + +static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static int keystone_set_next_event(unsigned long cycles, +				  struct clock_event_device *evt) +{ +	return keystone_timer_config(cycles, evt->mode); +} + +static void keystone_set_mode(enum clock_event_mode mode, +			     struct clock_event_device *evt) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC); +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_ONESHOT: +		keystone_timer_disable(); +		break; +	default: +		break; +	} +} + +static void __init keystone_timer_init(struct device_node *np) +{ +	struct clock_event_device *event_dev = &timer.event_dev; +	unsigned long rate; +	struct clk *clk; +	int irq, error; + +	irq  = irq_of_parse_and_map(np, 0); +	if (irq == NO_IRQ) { +		pr_err("%s: failed to map interrupts\n", __func__); +		return; +	} + +	timer.base = of_iomap(np, 0); +	if (!timer.base) { +		pr_err("%s: failed to map registers\n", __func__); +		return; +	} + +	clk = of_clk_get(np, 0); +	if (IS_ERR(clk)) { +		pr_err("%s: failed to get clock\n", __func__); +		iounmap(timer.base); +		return; +	} + +	error = clk_prepare_enable(clk); +	if (error) { +		pr_err("%s: failed to enable clock\n", __func__); +		goto err; +	} + +	rate = clk_get_rate(clk); + +	/* disable, use internal clock source */ +	keystone_timer_writel(0, TCR); +	/* here we have to be sure the timer has been disabled */ +	keystone_timer_barrier(); + +	/* reset timer as 64-bit, no pre-scaler, plus features are disabled */ +	keystone_timer_writel(0, TGCR); + +	/* unreset timer */ +	keystone_timer_writel(TGCR_TIM_UNRESET_MASK, TGCR); + +	/* init counter to zero */ +	keystone_timer_writel(0, TIM12); +	keystone_timer_writel(0, TIM34); + +	timer.hz_period = DIV_ROUND_UP(rate, HZ); + +	/* enable timer interrupts */ +	keystone_timer_writel(INTCTLSTAT_ENINT_MASK, INTCTLSTAT); + +	error = request_irq(irq, keystone_timer_interrupt, IRQF_TIMER, +			    TIMER_NAME, event_dev); +	if (error) { +		pr_err("%s: failed to setup irq\n", __func__); +		goto err; +	} + +	/* setup clockevent */ +	event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; +	event_dev->set_next_event = keystone_set_next_event; +	event_dev->set_mode = keystone_set_mode; +	event_dev->cpumask = cpu_all_mask; +	event_dev->owner = THIS_MODULE; +	event_dev->name = TIMER_NAME; +	event_dev->irq = irq; + +	clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX); + +	pr_info("keystone timer clock @%lu Hz\n", rate); +	return; +err: +	clk_put(clk); +	iounmap(timer.base); +} + +CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer", +					keystone_timer_init); diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c new file mode 100644 index 00000000000..dbd30398222 --- /dev/null +++ b/drivers/clocksource/timer-marco.c @@ -0,0 +1,306 @@ +/* + * System timer for CSR SiRFprimaII + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/cpu.h> +#include <linux/bitops.h> +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/sched_clock.h> + +#define MARCO_CLOCK_FREQ 1000000 + +#define SIRFSOC_TIMER_32COUNTER_0_CTRL			0x0000 +#define SIRFSOC_TIMER_32COUNTER_1_CTRL			0x0004 +#define SIRFSOC_TIMER_MATCH_0				0x0018 +#define SIRFSOC_TIMER_MATCH_1				0x001c +#define SIRFSOC_TIMER_COUNTER_0				0x0048 +#define SIRFSOC_TIMER_COUNTER_1				0x004c +#define SIRFSOC_TIMER_INTR_STATUS			0x0060 +#define SIRFSOC_TIMER_WATCHDOG_EN			0x0064 +#define SIRFSOC_TIMER_64COUNTER_CTRL			0x0068 +#define SIRFSOC_TIMER_64COUNTER_LO			0x006c +#define SIRFSOC_TIMER_64COUNTER_HI			0x0070 +#define SIRFSOC_TIMER_64COUNTER_LOAD_LO			0x0074 +#define SIRFSOC_TIMER_64COUNTER_LOAD_HI			0x0078 +#define SIRFSOC_TIMER_64COUNTER_RLATCHED_LO		0x007c +#define SIRFSOC_TIMER_64COUNTER_RLATCHED_HI		0x0080 + +#define SIRFSOC_TIMER_REG_CNT 6 + +static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = { +	SIRFSOC_TIMER_WATCHDOG_EN, +	SIRFSOC_TIMER_32COUNTER_0_CTRL, +	SIRFSOC_TIMER_32COUNTER_1_CTRL, +	SIRFSOC_TIMER_64COUNTER_CTRL, +	SIRFSOC_TIMER_64COUNTER_RLATCHED_LO, +	SIRFSOC_TIMER_64COUNTER_RLATCHED_HI, +}; + +static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT]; + +static void __iomem *sirfsoc_timer_base; + +/* disable count and interrupt */ +static inline void sirfsoc_timer_count_disable(int idx) +{ +	writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) & ~0x7, +		sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx); +} + +/* enable count and interrupt */ +static inline void sirfsoc_timer_count_enable(int idx) +{ +	writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) | 0x7, +		sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx); +} + +/* timer interrupt handler */ +static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *ce = dev_id; +	int cpu = smp_processor_id(); + +	/* clear timer interrupt */ +	writel_relaxed(BIT(cpu), sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS); + +	if (ce->mode == CLOCK_EVT_MODE_ONESHOT) +		sirfsoc_timer_count_disable(cpu); + +	ce->event_handler(ce); + +	return IRQ_HANDLED; +} + +/* read 64-bit timer counter */ +static cycle_t sirfsoc_timer_read(struct clocksource *cs) +{ +	u64 cycles; + +	writel_relaxed((readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) | +			BIT(0)) & ~BIT(1), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); + +	cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_HI); +	cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_LO); + +	return cycles; +} + +static int sirfsoc_timer_set_next_event(unsigned long delta, +	struct clock_event_device *ce) +{ +	int cpu = smp_processor_id(); + +	writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0 + +		4 * cpu); +	writel_relaxed(delta, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0 + +		4 * cpu); + +	/* enable the tick */ +	sirfsoc_timer_count_enable(cpu); + +	return 0; +} + +static void sirfsoc_timer_set_mode(enum clock_event_mode mode, +	struct clock_event_device *ce) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_ONESHOT: +		/* enable in set_next_event */ +		break; +	default: +		break; +	} + +	sirfsoc_timer_count_disable(smp_processor_id()); +} + +static void sirfsoc_clocksource_suspend(struct clocksource *cs) +{ +	int i; + +	for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++) +		sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); +} + +static void sirfsoc_clocksource_resume(struct clocksource *cs) +{ +	int i; + +	for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++) +		writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); + +	writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], +		sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO); +	writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], +		sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI); + +	writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) | +		BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); +} + +static struct clock_event_device __percpu *sirfsoc_clockevent; + +static struct clocksource sirfsoc_clocksource = { +	.name = "sirfsoc_clocksource", +	.rating = 200, +	.mask = CLOCKSOURCE_MASK(64), +	.flags = CLOCK_SOURCE_IS_CONTINUOUS, +	.read = sirfsoc_timer_read, +	.suspend = sirfsoc_clocksource_suspend, +	.resume = sirfsoc_clocksource_resume, +}; + +static struct irqaction sirfsoc_timer_irq = { +	.name = "sirfsoc_timer0", +	.flags = IRQF_TIMER | IRQF_NOBALANCING, +	.handler = sirfsoc_timer_interrupt, +}; + +static struct irqaction sirfsoc_timer1_irq = { +	.name = "sirfsoc_timer1", +	.flags = IRQF_TIMER | IRQF_NOBALANCING, +	.handler = sirfsoc_timer_interrupt, +}; + +static int sirfsoc_local_timer_setup(struct clock_event_device *ce) +{ +	int cpu = smp_processor_id(); +	struct irqaction *action; + +	if (cpu == 0) +		action = &sirfsoc_timer_irq; +	else +		action = &sirfsoc_timer1_irq; + +	ce->irq = action->irq; +	ce->name = "local_timer"; +	ce->features = CLOCK_EVT_FEAT_ONESHOT; +	ce->rating = 200; +	ce->set_mode = sirfsoc_timer_set_mode; +	ce->set_next_event = sirfsoc_timer_set_next_event; +	clockevents_calc_mult_shift(ce, MARCO_CLOCK_FREQ, 60); +	ce->max_delta_ns = clockevent_delta2ns(-2, ce); +	ce->min_delta_ns = clockevent_delta2ns(2, ce); +	ce->cpumask = cpumask_of(cpu); + +	action->dev_id = ce; +	BUG_ON(setup_irq(ce->irq, action)); +	irq_force_affinity(action->irq, cpumask_of(cpu)); + +	clockevents_register_device(ce); +	return 0; +} + +static void sirfsoc_local_timer_stop(struct clock_event_device *ce) +{ +	int cpu = smp_processor_id(); + +	sirfsoc_timer_count_disable(1); + +	if (cpu == 0) +		remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); +	else +		remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); +} + +static int sirfsoc_cpu_notify(struct notifier_block *self, +			      unsigned long action, void *hcpu) +{ +	/* +	 * Grab cpu pointer in each case to avoid spurious +	 * preemptible warnings +	 */ +	switch (action & ~CPU_TASKS_FROZEN) { +	case CPU_STARTING: +		sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); +		break; +	case CPU_DYING: +		sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent)); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block sirfsoc_cpu_nb = { +	.notifier_call = sirfsoc_cpu_notify, +}; + +static void __init sirfsoc_clockevent_init(void) +{ +	sirfsoc_clockevent = alloc_percpu(struct clock_event_device); +	BUG_ON(!sirfsoc_clockevent); + +	BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); + +	/* Immediately configure the timer on the boot CPU */ +	sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); +} + +/* initialize the kernel jiffy timer source */ +static void __init sirfsoc_marco_timer_init(struct device_node *np) +{ +	unsigned long rate; +	u32 timer_div; +	struct clk *clk; + +	clk = of_clk_get(np, 0); +	BUG_ON(IS_ERR(clk)); +	rate = clk_get_rate(clk); + +	BUG_ON(rate < MARCO_CLOCK_FREQ); +	BUG_ON(rate % MARCO_CLOCK_FREQ); + +	/* Initialize the timer dividers */ +	timer_div = rate / MARCO_CLOCK_FREQ - 1; +	writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); +	writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL); +	writel_relaxed(timer_div << 16, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_1_CTRL); + +	/* Initialize timer counters to 0 */ +	writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO); +	writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI); +	writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) | +		BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL); +	writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0); +	writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_1); + +	/* Clear all interrupts */ +	writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS); + +	BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, MARCO_CLOCK_FREQ)); + +	sirfsoc_clockevent_init(); +} + +static void __init sirfsoc_of_timer_init(struct device_node *np) +{ +	sirfsoc_timer_base = of_iomap(np, 0); +	if (!sirfsoc_timer_base) +		panic("unable to map timer cpu registers\n"); + +	sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); +	if (!sirfsoc_timer_irq.irq) +		panic("No irq passed for timer0 via DT\n"); + +	sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1); +	if (!sirfsoc_timer1_irq.irq) +		panic("No irq passed for timer1 via DT\n"); + +	sirfsoc_marco_timer_init(np); +} +CLOCKSOURCE_OF_DECLARE(sirfsoc_marco_timer, "sirf,marco-tick", sirfsoc_of_timer_init ); diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c new file mode 100644 index 00000000000..a722aac7ac0 --- /dev/null +++ b/drivers/clocksource/timer-prima2.c @@ -0,0 +1,230 @@ +/* + * System timer for CSR SiRFprimaII + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/bitops.h> +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/sched_clock.h> +#include <asm/mach/time.h> + +#define PRIMA2_CLOCK_FREQ 1000000 + +#define SIRFSOC_TIMER_COUNTER_LO	0x0000 +#define SIRFSOC_TIMER_COUNTER_HI	0x0004 +#define SIRFSOC_TIMER_MATCH_0		0x0008 +#define SIRFSOC_TIMER_MATCH_1		0x000C +#define SIRFSOC_TIMER_MATCH_2		0x0010 +#define SIRFSOC_TIMER_MATCH_3		0x0014 +#define SIRFSOC_TIMER_MATCH_4		0x0018 +#define SIRFSOC_TIMER_MATCH_5		0x001C +#define SIRFSOC_TIMER_STATUS		0x0020 +#define SIRFSOC_TIMER_INT_EN		0x0024 +#define SIRFSOC_TIMER_WATCHDOG_EN	0x0028 +#define SIRFSOC_TIMER_DIV		0x002C +#define SIRFSOC_TIMER_LATCH		0x0030 +#define SIRFSOC_TIMER_LATCHED_LO	0x0034 +#define SIRFSOC_TIMER_LATCHED_HI	0x0038 + +#define SIRFSOC_TIMER_WDT_INDEX		5 + +#define SIRFSOC_TIMER_LATCH_BIT	 BIT(0) + +#define SIRFSOC_TIMER_REG_CNT 11 + +static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = { +	SIRFSOC_TIMER_MATCH_0, SIRFSOC_TIMER_MATCH_1, SIRFSOC_TIMER_MATCH_2, +	SIRFSOC_TIMER_MATCH_3, SIRFSOC_TIMER_MATCH_4, SIRFSOC_TIMER_MATCH_5, +	SIRFSOC_TIMER_INT_EN, SIRFSOC_TIMER_WATCHDOG_EN, SIRFSOC_TIMER_DIV, +	SIRFSOC_TIMER_LATCHED_LO, SIRFSOC_TIMER_LATCHED_HI, +}; + +static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT]; + +static void __iomem *sirfsoc_timer_base; + +/* timer0 interrupt handler */ +static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *ce = dev_id; + +	WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) & +		BIT(0))); + +	/* clear timer0 interrupt */ +	writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); + +	ce->event_handler(ce); + +	return IRQ_HANDLED; +} + +/* read 64-bit timer counter */ +static cycle_t sirfsoc_timer_read(struct clocksource *cs) +{ +	u64 cycles; + +	/* latch the 64-bit timer counter */ +	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, +		sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); +	cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_HI); +	cycles = (cycles << 32) | +		readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); + +	return cycles; +} + +static int sirfsoc_timer_set_next_event(unsigned long delta, +	struct clock_event_device *ce) +{ +	unsigned long now, next; + +	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, +		sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); +	now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); +	next = now + delta; +	writel_relaxed(next, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0); +	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, +		sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); +	now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); + +	return next - now > delta ? -ETIME : 0; +} + +static void sirfsoc_timer_set_mode(enum clock_event_mode mode, +	struct clock_event_device *ce) +{ +	u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		WARN_ON(1); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		writel_relaxed(val | BIT(0), +			sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); +		break; +	case CLOCK_EVT_MODE_SHUTDOWN: +		writel_relaxed(val & ~BIT(0), +			sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_RESUME: +		break; +	} +} + +static void sirfsoc_clocksource_suspend(struct clocksource *cs) +{ +	int i; + +	writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, +		sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); + +	for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++) +		sirfsoc_timer_reg_val[i] = +			readl_relaxed(sirfsoc_timer_base + +				sirfsoc_timer_reg_list[i]); +} + +static void sirfsoc_clocksource_resume(struct clocksource *cs) +{ +	int i; + +	for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++) +		writel_relaxed(sirfsoc_timer_reg_val[i], +			sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); + +	writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], +		sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); +	writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], +		sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); +} + +static struct clock_event_device sirfsoc_clockevent = { +	.name = "sirfsoc_clockevent", +	.rating = 200, +	.features = CLOCK_EVT_FEAT_ONESHOT, +	.set_mode = sirfsoc_timer_set_mode, +	.set_next_event = sirfsoc_timer_set_next_event, +}; + +static struct clocksource sirfsoc_clocksource = { +	.name = "sirfsoc_clocksource", +	.rating = 200, +	.mask = CLOCKSOURCE_MASK(64), +	.flags = CLOCK_SOURCE_IS_CONTINUOUS, +	.read = sirfsoc_timer_read, +	.suspend = sirfsoc_clocksource_suspend, +	.resume = sirfsoc_clocksource_resume, +}; + +static struct irqaction sirfsoc_timer_irq = { +	.name = "sirfsoc_timer0", +	.flags = IRQF_TIMER, +	.irq = 0, +	.handler = sirfsoc_timer_interrupt, +	.dev_id = &sirfsoc_clockevent, +}; + +/* Overwrite weak default sched_clock with more precise one */ +static u64 notrace sirfsoc_read_sched_clock(void) +{ +	return sirfsoc_timer_read(NULL); +} + +static void __init sirfsoc_clockevent_init(void) +{ +	sirfsoc_clockevent.cpumask = cpumask_of(0); +	clockevents_config_and_register(&sirfsoc_clockevent, PRIMA2_CLOCK_FREQ, +					2, -2); +} + +/* initialize the kernel jiffy timer source */ +static void __init sirfsoc_prima2_timer_init(struct device_node *np) +{ +	unsigned long rate; +	struct clk *clk; + +	clk = of_clk_get(np, 0); +	BUG_ON(IS_ERR(clk)); +	rate = clk_get_rate(clk); + +	BUG_ON(rate < PRIMA2_CLOCK_FREQ); +	BUG_ON(rate % PRIMA2_CLOCK_FREQ); + +	sirfsoc_timer_base = of_iomap(np, 0); +	if (!sirfsoc_timer_base) +		panic("unable to map timer cpu registers\n"); + +	sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); + +	writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1, +		sirfsoc_timer_base + SIRFSOC_TIMER_DIV); +	writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); +	writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); +	writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); + +	BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, +				       PRIMA2_CLOCK_FREQ)); + +	sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ); + +	BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); + +	sirfsoc_clockevent_init(); +} +CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, +	"sirf,prima2-tick", sirfsoc_prima2_timer_init); diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c new file mode 100644 index 00000000000..02268448dc8 --- /dev/null +++ b/drivers/clocksource/timer-sun5i.c @@ -0,0 +1,198 @@ +/* + * Allwinner SoCs hstimer driver. + * + * Copyright (C) 2013 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2.  This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/reset.h> +#include <linux/sched_clock.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define TIMER_IRQ_EN_REG		0x00 +#define TIMER_IRQ_EN(val)			BIT(val) +#define TIMER_IRQ_ST_REG		0x04 +#define TIMER_CTL_REG(val)		(0x20 * (val) + 0x10) +#define TIMER_CTL_ENABLE			BIT(0) +#define TIMER_CTL_RELOAD			BIT(1) +#define TIMER_CTL_CLK_PRES(val)			(((val) & 0x7) << 4) +#define TIMER_CTL_ONESHOT			BIT(7) +#define TIMER_INTVAL_LO_REG(val)	(0x20 * (val) + 0x14) +#define TIMER_INTVAL_HI_REG(val)	(0x20 * (val) + 0x18) +#define TIMER_CNTVAL_LO_REG(val)	(0x20 * (val) + 0x1c) +#define TIMER_CNTVAL_HI_REG(val)	(0x20 * (val) + 0x20) + +#define TIMER_SYNC_TICKS	3 + +static void __iomem *timer_base; +static u32 ticks_per_jiffy; + +/* + * When we disable a timer, we need to wait at least for 2 cycles of + * the timer source clock. We will use for that the clocksource timer + * that is already setup and runs at the same frequency than the other + * timers, and we never will be disabled. + */ +static void sun5i_clkevt_sync(void) +{ +	u32 old = readl(timer_base + TIMER_CNTVAL_LO_REG(1)); + +	while ((old - readl(timer_base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS) +		cpu_relax(); +} + +static void sun5i_clkevt_time_stop(u8 timer) +{ +	u32 val = readl(timer_base + TIMER_CTL_REG(timer)); +	writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer)); + +	sun5i_clkevt_sync(); +} + +static void sun5i_clkevt_time_setup(u8 timer, u32 delay) +{ +	writel(delay, timer_base + TIMER_INTVAL_LO_REG(timer)); +} + +static void sun5i_clkevt_time_start(u8 timer, bool periodic) +{ +	u32 val = readl(timer_base + TIMER_CTL_REG(timer)); + +	if (periodic) +		val &= ~TIMER_CTL_ONESHOT; +	else +		val |= TIMER_CTL_ONESHOT; + +	writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, +	       timer_base + TIMER_CTL_REG(timer)); +} + +static void sun5i_clkevt_mode(enum clock_event_mode mode, +			      struct clock_event_device *clk) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		sun5i_clkevt_time_stop(0); +		sun5i_clkevt_time_setup(0, ticks_per_jiffy); +		sun5i_clkevt_time_start(0, true); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		sun5i_clkevt_time_stop(0); +		sun5i_clkevt_time_start(0, false); +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +	default: +		sun5i_clkevt_time_stop(0); +		break; +	} +} + +static int sun5i_clkevt_next_event(unsigned long evt, +				   struct clock_event_device *unused) +{ +	sun5i_clkevt_time_stop(0); +	sun5i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS); +	sun5i_clkevt_time_start(0, false); + +	return 0; +} + +static struct clock_event_device sun5i_clockevent = { +	.name = "sun5i_tick", +	.rating = 340, +	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +	.set_mode = sun5i_clkevt_mode, +	.set_next_event = sun5i_clkevt_next_event, +}; + + +static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = (struct clock_event_device *)dev_id; + +	writel(0x1, timer_base + TIMER_IRQ_ST_REG); +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct irqaction sun5i_timer_irq = { +	.name = "sun5i_timer0", +	.flags = IRQF_TIMER | IRQF_IRQPOLL, +	.handler = sun5i_timer_interrupt, +	.dev_id = &sun5i_clockevent, +}; + +static u64 sun5i_timer_sched_read(void) +{ +	return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1)); +} + +static void __init sun5i_timer_init(struct device_node *node) +{ +	struct reset_control *rstc; +	unsigned long rate; +	struct clk *clk; +	int ret, irq; +	u32 val; + +	timer_base = of_iomap(node, 0); +	if (!timer_base) +		panic("Can't map registers"); + +	irq = irq_of_parse_and_map(node, 0); +	if (irq <= 0) +		panic("Can't parse IRQ"); + +	clk = of_clk_get(node, 0); +	if (IS_ERR(clk)) +		panic("Can't get timer clock"); +	clk_prepare_enable(clk); +	rate = clk_get_rate(clk); + +	rstc = of_reset_control_get(node, NULL); +	if (!IS_ERR(rstc)) +		reset_control_deassert(rstc); + +	writel(~0, timer_base + TIMER_INTVAL_LO_REG(1)); +	writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, +	       timer_base + TIMER_CTL_REG(1)); + +	sched_clock_register(sun5i_timer_sched_read, 32, rate); +	clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, +			      rate, 340, 32, clocksource_mmio_readl_down); + +	ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); + +	ret = setup_irq(irq, &sun5i_timer_irq); +	if (ret) +		pr_warn("failed to setup irq %d\n", irq); + +	/* Enable timer0 interrupt */ +	val = readl(timer_base + TIMER_IRQ_EN_REG); +	writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); + +	sun5i_clockevent.cpumask = cpu_possible_mask; +	sun5i_clockevent.irq = irq; + +	clockevents_config_and_register(&sun5i_clockevent, rate, +					TIMER_SYNC_TICKS, 0xffffffff); +} +CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", +		       sun5i_timer_init); +CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer", +		       sun5i_timer_init); diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c new file mode 100644 index 00000000000..5dcf756970e --- /dev/null +++ b/drivers/clocksource/timer-u300.c @@ -0,0 +1,447 @@ +/* + * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * Timer COH 901 328, runs the OS timer interrupt. + * Author: Linus Walleij <linus.walleij@stericsson.com> + */ +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/types.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> + +/* Generic stuff */ +#include <asm/mach/map.h> +#include <asm/mach/time.h> + +/* + * APP side special timer registers + * This timer contains four timers which can fire an interrupt each. + * OS (operating system) timer @ 32768 Hz + * DD (device driver) timer @ 1 kHz + * GP1 (general purpose 1) timer @ 1MHz + * GP2 (general purpose 2) timer @ 1MHz + */ + +/* Reset OS Timer 32bit (-/W) */ +#define U300_TIMER_APP_ROST					(0x0000) +#define U300_TIMER_APP_ROST_TIMER_RESET				(0x00000000) +/* Enable OS Timer 32bit (-/W) */ +#define U300_TIMER_APP_EOST					(0x0004) +#define U300_TIMER_APP_EOST_TIMER_ENABLE			(0x00000000) +/* Disable OS Timer 32bit (-/W) */ +#define U300_TIMER_APP_DOST					(0x0008) +#define U300_TIMER_APP_DOST_TIMER_DISABLE			(0x00000000) +/* OS Timer Mode Register 32bit (-/W) */ +#define U300_TIMER_APP_SOSTM					(0x000c) +#define U300_TIMER_APP_SOSTM_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_SOSTM_MODE_ONE_SHOT			(0x00000001) +/* OS Timer Status Register 32bit (R/-) */ +#define U300_TIMER_APP_OSTS					(0x0010) +#define U300_TIMER_APP_OSTS_TIMER_STATE_MASK			(0x0000000F) +#define U300_TIMER_APP_OSTS_TIMER_STATE_IDLE			(0x00000001) +#define U300_TIMER_APP_OSTS_TIMER_STATE_ACTIVE			(0x00000002) +#define U300_TIMER_APP_OSTS_ENABLE_IND				(0x00000010) +#define U300_TIMER_APP_OSTS_MODE_MASK				(0x00000020) +#define U300_TIMER_APP_OSTS_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_OSTS_MODE_ONE_SHOT			(0x00000020) +#define U300_TIMER_APP_OSTS_IRQ_ENABLED_IND			(0x00000040) +#define U300_TIMER_APP_OSTS_IRQ_PENDING_IND			(0x00000080) +/* OS Timer Current Count Register 32bit (R/-) */ +#define U300_TIMER_APP_OSTCC					(0x0014) +/* OS Timer Terminal Count Register 32bit (R/W) */ +#define U300_TIMER_APP_OSTTC					(0x0018) +/* OS Timer Interrupt Enable Register 32bit (-/W) */ +#define U300_TIMER_APP_OSTIE					(0x001c) +#define U300_TIMER_APP_OSTIE_IRQ_DISABLE			(0x00000000) +#define U300_TIMER_APP_OSTIE_IRQ_ENABLE				(0x00000001) +/* OS Timer Interrupt Acknowledge Register 32bit (-/W) */ +#define U300_TIMER_APP_OSTIA					(0x0020) +#define U300_TIMER_APP_OSTIA_IRQ_ACK				(0x00000080) + +/* Reset DD Timer 32bit (-/W) */ +#define U300_TIMER_APP_RDDT					(0x0040) +#define U300_TIMER_APP_RDDT_TIMER_RESET				(0x00000000) +/* Enable DD Timer 32bit (-/W) */ +#define U300_TIMER_APP_EDDT					(0x0044) +#define U300_TIMER_APP_EDDT_TIMER_ENABLE			(0x00000000) +/* Disable DD Timer 32bit (-/W) */ +#define U300_TIMER_APP_DDDT					(0x0048) +#define U300_TIMER_APP_DDDT_TIMER_DISABLE			(0x00000000) +/* DD Timer Mode Register 32bit (-/W) */ +#define U300_TIMER_APP_SDDTM					(0x004c) +#define U300_TIMER_APP_SDDTM_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_SDDTM_MODE_ONE_SHOT			(0x00000001) +/* DD Timer Status Register 32bit (R/-) */ +#define U300_TIMER_APP_DDTS					(0x0050) +#define U300_TIMER_APP_DDTS_TIMER_STATE_MASK			(0x0000000F) +#define U300_TIMER_APP_DDTS_TIMER_STATE_IDLE			(0x00000001) +#define U300_TIMER_APP_DDTS_TIMER_STATE_ACTIVE			(0x00000002) +#define U300_TIMER_APP_DDTS_ENABLE_IND				(0x00000010) +#define U300_TIMER_APP_DDTS_MODE_MASK				(0x00000020) +#define U300_TIMER_APP_DDTS_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_DDTS_MODE_ONE_SHOT			(0x00000020) +#define U300_TIMER_APP_DDTS_IRQ_ENABLED_IND			(0x00000040) +#define U300_TIMER_APP_DDTS_IRQ_PENDING_IND			(0x00000080) +/* DD Timer Current Count Register 32bit (R/-) */ +#define U300_TIMER_APP_DDTCC					(0x0054) +/* DD Timer Terminal Count Register 32bit (R/W) */ +#define U300_TIMER_APP_DDTTC					(0x0058) +/* DD Timer Interrupt Enable Register 32bit (-/W) */ +#define U300_TIMER_APP_DDTIE					(0x005c) +#define U300_TIMER_APP_DDTIE_IRQ_DISABLE			(0x00000000) +#define U300_TIMER_APP_DDTIE_IRQ_ENABLE				(0x00000001) +/* DD Timer Interrupt Acknowledge Register 32bit (-/W) */ +#define U300_TIMER_APP_DDTIA					(0x0060) +#define U300_TIMER_APP_DDTIA_IRQ_ACK				(0x00000080) + +/* Reset GP1 Timer 32bit (-/W) */ +#define U300_TIMER_APP_RGPT1					(0x0080) +#define U300_TIMER_APP_RGPT1_TIMER_RESET			(0x00000000) +/* Enable GP1 Timer 32bit (-/W) */ +#define U300_TIMER_APP_EGPT1					(0x0084) +#define U300_TIMER_APP_EGPT1_TIMER_ENABLE			(0x00000000) +/* Disable GP1 Timer 32bit (-/W) */ +#define U300_TIMER_APP_DGPT1					(0x0088) +#define U300_TIMER_APP_DGPT1_TIMER_DISABLE			(0x00000000) +/* GP1 Timer Mode Register 32bit (-/W) */ +#define U300_TIMER_APP_SGPT1M					(0x008c) +#define U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT			(0x00000001) +/* GP1 Timer Status Register 32bit (R/-) */ +#define U300_TIMER_APP_GPT1S					(0x0090) +#define U300_TIMER_APP_GPT1S_TIMER_STATE_MASK			(0x0000000F) +#define U300_TIMER_APP_GPT1S_TIMER_STATE_IDLE			(0x00000001) +#define U300_TIMER_APP_GPT1S_TIMER_STATE_ACTIVE			(0x00000002) +#define U300_TIMER_APP_GPT1S_ENABLE_IND				(0x00000010) +#define U300_TIMER_APP_GPT1S_MODE_MASK				(0x00000020) +#define U300_TIMER_APP_GPT1S_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_GPT1S_MODE_ONE_SHOT			(0x00000020) +#define U300_TIMER_APP_GPT1S_IRQ_ENABLED_IND			(0x00000040) +#define U300_TIMER_APP_GPT1S_IRQ_PENDING_IND			(0x00000080) +/* GP1 Timer Current Count Register 32bit (R/-) */ +#define U300_TIMER_APP_GPT1CC					(0x0094) +/* GP1 Timer Terminal Count Register 32bit (R/W) */ +#define U300_TIMER_APP_GPT1TC					(0x0098) +/* GP1 Timer Interrupt Enable Register 32bit (-/W) */ +#define U300_TIMER_APP_GPT1IE					(0x009c) +#define U300_TIMER_APP_GPT1IE_IRQ_DISABLE			(0x00000000) +#define U300_TIMER_APP_GPT1IE_IRQ_ENABLE			(0x00000001) +/* GP1 Timer Interrupt Acknowledge Register 32bit (-/W) */ +#define U300_TIMER_APP_GPT1IA					(0x00a0) +#define U300_TIMER_APP_GPT1IA_IRQ_ACK				(0x00000080) + +/* Reset GP2 Timer 32bit (-/W) */ +#define U300_TIMER_APP_RGPT2					(0x00c0) +#define U300_TIMER_APP_RGPT2_TIMER_RESET			(0x00000000) +/* Enable GP2 Timer 32bit (-/W) */ +#define U300_TIMER_APP_EGPT2					(0x00c4) +#define U300_TIMER_APP_EGPT2_TIMER_ENABLE			(0x00000000) +/* Disable GP2 Timer 32bit (-/W) */ +#define U300_TIMER_APP_DGPT2					(0x00c8) +#define U300_TIMER_APP_DGPT2_TIMER_DISABLE			(0x00000000) +/* GP2 Timer Mode Register 32bit (-/W) */ +#define U300_TIMER_APP_SGPT2M					(0x00cc) +#define U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_SGPT2M_MODE_ONE_SHOT			(0x00000001) +/* GP2 Timer Status Register 32bit (R/-) */ +#define U300_TIMER_APP_GPT2S					(0x00d0) +#define U300_TIMER_APP_GPT2S_TIMER_STATE_MASK			(0x0000000F) +#define U300_TIMER_APP_GPT2S_TIMER_STATE_IDLE			(0x00000001) +#define U300_TIMER_APP_GPT2S_TIMER_STATE_ACTIVE			(0x00000002) +#define U300_TIMER_APP_GPT2S_ENABLE_IND				(0x00000010) +#define U300_TIMER_APP_GPT2S_MODE_MASK				(0x00000020) +#define U300_TIMER_APP_GPT2S_MODE_CONTINUOUS			(0x00000000) +#define U300_TIMER_APP_GPT2S_MODE_ONE_SHOT			(0x00000020) +#define U300_TIMER_APP_GPT2S_IRQ_ENABLED_IND			(0x00000040) +#define U300_TIMER_APP_GPT2S_IRQ_PENDING_IND			(0x00000080) +/* GP2 Timer Current Count Register 32bit (R/-) */ +#define U300_TIMER_APP_GPT2CC					(0x00d4) +/* GP2 Timer Terminal Count Register 32bit (R/W) */ +#define U300_TIMER_APP_GPT2TC					(0x00d8) +/* GP2 Timer Interrupt Enable Register 32bit (-/W) */ +#define U300_TIMER_APP_GPT2IE					(0x00dc) +#define U300_TIMER_APP_GPT2IE_IRQ_DISABLE			(0x00000000) +#define U300_TIMER_APP_GPT2IE_IRQ_ENABLE			(0x00000001) +/* GP2 Timer Interrupt Acknowledge Register 32bit (-/W) */ +#define U300_TIMER_APP_GPT2IA					(0x00e0) +#define U300_TIMER_APP_GPT2IA_IRQ_ACK				(0x00000080) + +/* Clock request control register - all four timers */ +#define U300_TIMER_APP_CRC					(0x100) +#define U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE			(0x00000001) + +static void __iomem *u300_timer_base; + +struct u300_clockevent_data { +	struct clock_event_device cevd; +	unsigned ticks_per_jiffy; +}; + +/* + * The u300_set_mode() function is always called first, if we + * have oneshot timer active, the oneshot scheduling function + * u300_set_next_event() is called immediately after. + */ +static void u300_set_mode(enum clock_event_mode mode, +			  struct clock_event_device *evt) +{ +	struct u300_clockevent_data *cevdata = +		container_of(evt, struct u300_clockevent_data, cevd); + +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		/* Disable interrupts on GPT1 */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Disable GP1 while we're reprogramming it. */ +		writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, +		       u300_timer_base + U300_TIMER_APP_DGPT1); +		/* +		 * Set the periodic mode to a certain number of ticks per +		 * jiffy. +		 */ +		writel(cevdata->ticks_per_jiffy, +		       u300_timer_base + U300_TIMER_APP_GPT1TC); +		/* +		 * Set continuous mode, so the timer keeps triggering +		 * interrupts. +		 */ +		writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS, +		       u300_timer_base + U300_TIMER_APP_SGPT1M); +		/* Enable timer interrupts */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Then enable the OS timer again */ +		writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, +		       u300_timer_base + U300_TIMER_APP_EGPT1); +		break; +	case CLOCK_EVT_MODE_ONESHOT: +		/* Just break; here? */ +		/* +		 * The actual event will be programmed by the next event hook, +		 * so we just set a dummy value somewhere at the end of the +		 * universe here. +		 */ +		/* Disable interrupts on GPT1 */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Disable GP1 while we're reprogramming it. */ +		writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, +		       u300_timer_base + U300_TIMER_APP_DGPT1); +		/* +		 * Expire far in the future, u300_set_next_event() will be +		 * called soon... +		 */ +		writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC); +		/* We run one shot per tick here! */ +		writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT, +		       u300_timer_base + U300_TIMER_APP_SGPT1M); +		/* Enable interrupts for this timer */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Enable timer */ +		writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, +		       u300_timer_base + U300_TIMER_APP_EGPT1); +		break; +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		/* Disable interrupts on GP1 */ +		writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, +		       u300_timer_base + U300_TIMER_APP_GPT1IE); +		/* Disable GP1 */ +		writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, +		       u300_timer_base + U300_TIMER_APP_DGPT1); +		break; +	case CLOCK_EVT_MODE_RESUME: +		/* Ignore this call */ +		break; +	} +} + +/* + * The app timer in one shot mode obviously has to be reprogrammed + * in EXACTLY this sequence to work properly. Do NOT try to e.g. replace + * the interrupt disable + timer disable commands with a reset command, + * it will fail miserably. Apparently (and I found this the hard way) + * the timer is very sensitive to the instruction order, though you don't + * get that impression from the data sheet. + */ +static int u300_set_next_event(unsigned long cycles, +			       struct clock_event_device *evt) + +{ +	/* Disable interrupts on GPT1 */ +	writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE, +	       u300_timer_base + U300_TIMER_APP_GPT1IE); +	/* Disable GP1 while we're reprogramming it. */ +	writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE, +	       u300_timer_base + U300_TIMER_APP_DGPT1); +	/* Reset the General Purpose timer 1. */ +	writel(U300_TIMER_APP_RGPT1_TIMER_RESET, +	       u300_timer_base + U300_TIMER_APP_RGPT1); +	/* IRQ in n * cycles */ +	writel(cycles, u300_timer_base + U300_TIMER_APP_GPT1TC); +	/* +	 * We run one shot per tick here! (This is necessary to reconfigure, +	 * the timer will tilt if you don't!) +	 */ +	writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT, +	       u300_timer_base + U300_TIMER_APP_SGPT1M); +	/* Enable timer interrupts */ +	writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE, +	       u300_timer_base + U300_TIMER_APP_GPT1IE); +	/* Then enable the OS timer again */ +	writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE, +	       u300_timer_base + U300_TIMER_APP_EGPT1); +	return 0; +} + +static struct u300_clockevent_data u300_clockevent_data = { +	/* Use general purpose timer 1 as clock event */ +	.cevd = { +		.name		= "GPT1", +		/* Reasonably fast and accurate clock event */ +		.rating		= 300, +		.features	= CLOCK_EVT_FEAT_PERIODIC | +			CLOCK_EVT_FEAT_ONESHOT, +		.set_next_event	= u300_set_next_event, +		.set_mode	= u300_set_mode, +	}, +}; + +/* Clock event timer interrupt handler */ +static irqreturn_t u300_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = &u300_clockevent_data.cevd; +	/* ACK/Clear timer IRQ for the APP GPT1 Timer */ + +	writel(U300_TIMER_APP_GPT1IA_IRQ_ACK, +		u300_timer_base + U300_TIMER_APP_GPT1IA); +	evt->event_handler(evt); +	return IRQ_HANDLED; +} + +static struct irqaction u300_timer_irq = { +	.name		= "U300 Timer Tick", +	.flags		= IRQF_TIMER | IRQF_IRQPOLL, +	.handler	= u300_timer_interrupt, +}; + +/* + * Override the global weak sched_clock symbol with this + * local implementation which uses the clocksource to get some + * better resolution when scheduling the kernel. We accept that + * this wraps around for now, since it is just a relative time + * stamp. (Inspired by OMAP implementation.) + */ + +static u64 notrace u300_read_sched_clock(void) +{ +	return readl(u300_timer_base + U300_TIMER_APP_GPT2CC); +} + +static unsigned long u300_read_current_timer(void) +{ +	return readl(u300_timer_base + U300_TIMER_APP_GPT2CC); +} + +static struct delay_timer u300_delay_timer; + +/* + * This sets up the system timers, clock source and clock event. + */ +static void __init u300_timer_init_of(struct device_node *np) +{ +	unsigned int irq; +	struct clk *clk; +	unsigned long rate; + +	u300_timer_base = of_iomap(np, 0); +	if (!u300_timer_base) +		panic("could not ioremap system timer\n"); + +	/* Get the IRQ for the GP1 timer */ +	irq = irq_of_parse_and_map(np, 2); +	if (!irq) +		panic("no IRQ for system timer\n"); + +	pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq); + +	/* Clock the interrupt controller */ +	clk = of_clk_get(np, 0); +	BUG_ON(IS_ERR(clk)); +	clk_prepare_enable(clk); +	rate = clk_get_rate(clk); + +	u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); + +	sched_clock_register(u300_read_sched_clock, 32, rate); + +	u300_delay_timer.read_current_timer = &u300_read_current_timer; +	u300_delay_timer.freq = rate; +	register_current_timer_delay(&u300_delay_timer); + +	/* +	 * Disable the "OS" and "DD" timers - these are designed for Symbian! +	 * Example usage in cnh1601578 cpu subsystem pd_timer_app.c +	 */ +	writel(U300_TIMER_APP_CRC_CLOCK_REQUEST_ENABLE, +		u300_timer_base + U300_TIMER_APP_CRC); +	writel(U300_TIMER_APP_ROST_TIMER_RESET, +		u300_timer_base + U300_TIMER_APP_ROST); +	writel(U300_TIMER_APP_DOST_TIMER_DISABLE, +		u300_timer_base + U300_TIMER_APP_DOST); +	writel(U300_TIMER_APP_RDDT_TIMER_RESET, +		u300_timer_base + U300_TIMER_APP_RDDT); +	writel(U300_TIMER_APP_DDDT_TIMER_DISABLE, +		u300_timer_base + U300_TIMER_APP_DDDT); + +	/* Reset the General Purpose timer 1. */ +	writel(U300_TIMER_APP_RGPT1_TIMER_RESET, +		u300_timer_base + U300_TIMER_APP_RGPT1); + +	/* Set up the IRQ handler */ +	setup_irq(irq, &u300_timer_irq); + +	/* Reset the General Purpose timer 2 */ +	writel(U300_TIMER_APP_RGPT2_TIMER_RESET, +		u300_timer_base + U300_TIMER_APP_RGPT2); +	/* Set this timer to run around forever */ +	writel(0xFFFFFFFFU, u300_timer_base + U300_TIMER_APP_GPT2TC); +	/* Set continuous mode so it wraps around */ +	writel(U300_TIMER_APP_SGPT2M_MODE_CONTINUOUS, +	       u300_timer_base + U300_TIMER_APP_SGPT2M); +	/* Disable timer interrupts */ +	writel(U300_TIMER_APP_GPT2IE_IRQ_DISABLE, +		u300_timer_base + U300_TIMER_APP_GPT2IE); +	/* Then enable the GP2 timer to use as a free running us counter */ +	writel(U300_TIMER_APP_EGPT2_TIMER_ENABLE, +		u300_timer_base + U300_TIMER_APP_EGPT2); + +	/* Use general purpose timer 2 as clock source */ +	if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC, +			"GPT2", rate, 300, 32, clocksource_mmio_readl_up)) +		pr_err("timer: failed to initialize U300 clock source\n"); + +	/* Configure and register the clockevent */ +	clockevents_config_and_register(&u300_clockevent_data.cevd, rate, +					1, 0xffffffff); + +	/* +	 * TODO: init and register the rest of the timers too, they can be +	 * used by hrtimers! +	 */ +} + +CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer", +		       u300_timer_init_of); diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c new file mode 100644 index 00000000000..2798e749223 --- /dev/null +++ b/drivers/clocksource/versatile.c @@ -0,0 +1,40 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/clocksource.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/sched_clock.h> + +#define SYS_24MHZ 0x05c + +static void __iomem *versatile_sys_24mhz; + +static u64 notrace versatile_sys_24mhz_read(void) +{ +	return readl(versatile_sys_24mhz); +} + +static void __init versatile_sched_clock_init(struct device_node *node) +{ +	void __iomem *base = of_iomap(node, 0); + +	if (!base) +		return; + +	versatile_sys_24mhz = base + SYS_24MHZ; + +	sched_clock_register(versatile_sys_24mhz_read, 32, 24000000); +} +CLOCKSOURCE_OF_DECLARE(versatile, "arm,vexpress-sysreg", +		       versatile_sched_clock_init); diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c new file mode 100644 index 00000000000..a918bc481c5 --- /dev/null +++ b/drivers/clocksource/vf_pit_timer.c @@ -0,0 +1,194 @@ +/* + * Copyright 2012-2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + */ + +#include <linux/interrupt.h> +#include <linux/clockchips.h> +#include <linux/clk.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> + +/* + * Each pit takes 0x10 Bytes register space + */ +#define PITMCR		0x00 +#define PIT0_OFFSET	0x100 +#define PITn_OFFSET(n)	(PIT0_OFFSET + 0x10 * (n)) +#define PITLDVAL	0x00 +#define PITCVAL		0x04 +#define PITTCTRL	0x08 +#define PITTFLG		0x0c + +#define PITMCR_MDIS	(0x1 << 1) + +#define PITTCTRL_TEN	(0x1 << 0) +#define PITTCTRL_TIE	(0x1 << 1) +#define PITCTRL_CHN	(0x1 << 2) + +#define PITTFLG_TIF	0x1 + +static void __iomem *clksrc_base; +static void __iomem *clkevt_base; +static unsigned long cycle_per_jiffy; + +static inline void pit_timer_enable(void) +{ +	__raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL); +} + +static inline void pit_timer_disable(void) +{ +	__raw_writel(0, clkevt_base + PITTCTRL); +} + +static inline void pit_irq_acknowledge(void) +{ +	__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); +} + +static u64 pit_read_sched_clock(void) +{ +	return ~__raw_readl(clksrc_base + PITCVAL); +} + +static int __init pit_clocksource_init(unsigned long rate) +{ +	/* set the max load value and start the clock source counter */ +	__raw_writel(0, clksrc_base + PITTCTRL); +	__raw_writel(~0UL, clksrc_base + PITLDVAL); +	__raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL); + +	sched_clock_register(pit_read_sched_clock, 32, rate); +	return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate, +			300, 32, clocksource_mmio_readl_down); +} + +static int pit_set_next_event(unsigned long delta, +				struct clock_event_device *unused) +{ +	/* +	 * set a new value to PITLDVAL register will not restart the timer, +	 * to abort the current cycle and start a timer period with the new +	 * value, the timer must be disabled and enabled again. +	 * and the PITLAVAL should be set to delta minus one according to pit +	 * hardware requirement. +	 */ +	pit_timer_disable(); +	__raw_writel(delta - 1, clkevt_base + PITLDVAL); +	pit_timer_enable(); + +	return 0; +} + +static void pit_set_mode(enum clock_event_mode mode, +				struct clock_event_device *evt) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_PERIODIC: +		pit_set_next_event(cycle_per_jiffy, evt); +		break; +	default: +		break; +	} +} + +static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; + +	pit_irq_acknowledge(); + +	/* +	 * pit hardware doesn't support oneshot, it will generate an interrupt +	 * and reload the counter value from PITLDVAL when PITCVAL reach zero, +	 * and start the counter again. So software need to disable the timer +	 * to stop the counter loop in ONESHOT mode. +	 */ +	if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) +		pit_timer_disable(); + +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct clock_event_device clockevent_pit = { +	.name		= "VF pit timer", +	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, +	.set_mode	= pit_set_mode, +	.set_next_event	= pit_set_next_event, +	.rating		= 300, +}; + +static struct irqaction pit_timer_irq = { +	.name		= "VF pit timer", +	.flags		= IRQF_TIMER | IRQF_IRQPOLL, +	.handler	= pit_timer_interrupt, +	.dev_id		= &clockevent_pit, +}; + +static int __init pit_clockevent_init(unsigned long rate, int irq) +{ +	__raw_writel(0, clkevt_base + PITTCTRL); +	__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); + +	BUG_ON(setup_irq(irq, &pit_timer_irq)); + +	clockevent_pit.cpumask = cpumask_of(0); +	clockevent_pit.irq = irq; +	/* +	 * The value for the LDVAL register trigger is calculated as: +	 * LDVAL trigger = (period / clock period) - 1 +	 * The pit is a 32-bit down count timer, when the conter value +	 * reaches 0, it will generate an interrupt, thus the minimal +	 * LDVAL trigger value is 1. And then the min_delta is +	 * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit. +	 */ +	clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff); + +	return 0; +} + +static void __init pit_timer_init(struct device_node *np) +{ +	struct clk *pit_clk; +	void __iomem *timer_base; +	unsigned long clk_rate; +	int irq; + +	timer_base = of_iomap(np, 0); +	BUG_ON(!timer_base); + +	/* +	 * PIT0 and PIT1 can be chained to build a 64-bit timer, +	 * so choose PIT2 as clocksource, PIT3 as clockevent device, +	 * and leave PIT0 and PIT1 unused for anyone else who needs them. +	 */ +	clksrc_base = timer_base + PITn_OFFSET(2); +	clkevt_base = timer_base + PITn_OFFSET(3); + +	irq = irq_of_parse_and_map(np, 0); +	BUG_ON(irq <= 0); + +	pit_clk = of_clk_get(np, 0); +	BUG_ON(IS_ERR(pit_clk)); + +	BUG_ON(clk_prepare_enable(pit_clk)); + +	clk_rate = clk_get_rate(pit_clk); +	cycle_per_jiffy = clk_rate / (HZ); + +	/* enable the pit module */ +	__raw_writel(~PITMCR_MDIS, timer_base + PITMCR); + +	BUG_ON(pit_clocksource_init(clk_rate)); + +	pit_clockevent_init(clk_rate, irq); +} +CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init); diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c new file mode 100644 index 00000000000..1098ed3b9b8 --- /dev/null +++ b/drivers/clocksource/vt8500_timer.c @@ -0,0 +1,166 @@ +/* + *  arch/arm/mach-vt8500/timer.c + * + *  Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz> + *  Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA + */ + +/* + * This file is copied and modified from the original timer.c provided by + * Alexey Charkov. Minor changes have been made for Device Tree Support. + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <asm/mach/time.h> + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#define VT8500_TIMER_OFFSET	0x0100 +#define VT8500_TIMER_HZ		3000000 +#define TIMER_MATCH_VAL		0x0000 +#define TIMER_COUNT_VAL		0x0010 +#define TIMER_STATUS_VAL	0x0014 +#define TIMER_IER_VAL		0x001c		/* interrupt enable */ +#define TIMER_CTRL_VAL		0x0020 +#define TIMER_AS_VAL		0x0024		/* access status */ +#define TIMER_COUNT_R_ACTIVE	(1 << 5)	/* not ready for read */ +#define TIMER_COUNT_W_ACTIVE	(1 << 4)	/* not ready for write */ +#define TIMER_MATCH_W_ACTIVE	(1 << 0)	/* not ready for write */ + +#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) + +static void __iomem *regbase; + +static cycle_t vt8500_timer_read(struct clocksource *cs) +{ +	int loops = msecs_to_loops(10); +	writel(3, regbase + TIMER_CTRL_VAL); +	while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE) +						&& --loops) +		cpu_relax(); +	return readl(regbase + TIMER_COUNT_VAL); +} + +static struct clocksource clocksource = { +	.name           = "vt8500_timer", +	.rating         = 200, +	.read           = vt8500_timer_read, +	.mask           = CLOCKSOURCE_MASK(32), +	.flags          = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int vt8500_timer_set_next_event(unsigned long cycles, +				    struct clock_event_device *evt) +{ +	int loops = msecs_to_loops(10); +	cycle_t alarm = clocksource.read(&clocksource) + cycles; +	while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) +						&& --loops) +		cpu_relax(); +	writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL); + +	if ((signed)(alarm - clocksource.read(&clocksource)) <= 16) +		return -ETIME; + +	writel(1, regbase + TIMER_IER_VAL); + +	return 0; +} + +static void vt8500_timer_set_mode(enum clock_event_mode mode, +			      struct clock_event_device *evt) +{ +	switch (mode) { +	case CLOCK_EVT_MODE_RESUME: +	case CLOCK_EVT_MODE_PERIODIC: +		break; +	case CLOCK_EVT_MODE_ONESHOT: +	case CLOCK_EVT_MODE_UNUSED: +	case CLOCK_EVT_MODE_SHUTDOWN: +		writel(readl(regbase + TIMER_CTRL_VAL) | 1, +			regbase + TIMER_CTRL_VAL); +		writel(0, regbase + TIMER_IER_VAL); +		break; +	} +} + +static struct clock_event_device clockevent = { +	.name           = "vt8500_timer", +	.features       = CLOCK_EVT_FEAT_ONESHOT, +	.rating         = 200, +	.set_next_event = vt8500_timer_set_next_event, +	.set_mode       = vt8500_timer_set_mode, +}; + +static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id) +{ +	struct clock_event_device *evt = dev_id; +	writel(0xf, regbase + TIMER_STATUS_VAL); +	evt->event_handler(evt); + +	return IRQ_HANDLED; +} + +static struct irqaction irq = { +	.name    = "vt8500_timer", +	.flags   = IRQF_TIMER | IRQF_IRQPOLL, +	.handler = vt8500_timer_interrupt, +	.dev_id  = &clockevent, +}; + +static void __init vt8500_timer_init(struct device_node *np) +{ +	int timer_irq; + +	regbase = of_iomap(np, 0); +	if (!regbase) { +		pr_err("%s: Missing iobase description in Device Tree\n", +								__func__); +		return; +	} +	timer_irq = irq_of_parse_and_map(np, 0); +	if (!timer_irq) { +		pr_err("%s: Missing irq description in Device Tree\n", +								__func__); +		return; +	} + +	writel(1, regbase + TIMER_CTRL_VAL); +	writel(0xf, regbase + TIMER_STATUS_VAL); +	writel(~0, regbase + TIMER_MATCH_VAL); + +	if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ)) +		pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n", +					__func__, clocksource.name); + +	clockevent.cpumask = cpumask_of(0); + +	if (setup_irq(timer_irq, &irq)) +		pr_err("%s: setup_irq failed for %s\n", __func__, +							clockevent.name); +	clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ, +					4, 0xf0000000); +} + +CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init); diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c new file mode 100644 index 00000000000..7ce442148c3 --- /dev/null +++ b/drivers/clocksource/zevio-timer.c @@ -0,0 +1,220 @@ +/* + *  linux/drivers/clocksource/zevio-timer.c + * + *  Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/slab.h> + +#define IO_CURRENT_VAL	0x00 +#define IO_DIVIDER	0x04 +#define IO_CONTROL	0x08 + +#define IO_TIMER1	0x00 +#define IO_TIMER2	0x0C + +#define IO_MATCH_BEGIN	0x18 +#define IO_MATCH(x)	(IO_MATCH_BEGIN + ((x) << 2)) + +#define IO_INTR_STS	0x00 +#define IO_INTR_ACK	0x00 +#define IO_INTR_MSK	0x04 + +#define CNTL_STOP_TIMER	(1 << 4) +#define CNTL_RUN_TIMER	(0 << 4) + +#define CNTL_INC	(1 << 3) +#define CNTL_DEC	(0 << 3) + +#define CNTL_TOZERO	0 +#define CNTL_MATCH(x)	((x) + 1) +#define CNTL_FOREVER	7 + +/* There are 6 match registers but we only use one. */ +#define TIMER_MATCH	0 + +#define TIMER_INTR_MSK	(1 << (TIMER_MATCH)) +#define TIMER_INTR_ALL	0x3F + +struct zevio_timer { +	void __iomem *base; +	void __iomem *timer1, *timer2; +	void __iomem *interrupt_regs; + +	struct clk *clk; +	struct clock_event_device clkevt; +	struct irqaction clkevt_irq; + +	char clocksource_name[64]; +	char clockevent_name[64]; +}; + +static int zevio_timer_set_event(unsigned long delta, +				 struct clock_event_device *dev) +{ +	struct zevio_timer *timer = container_of(dev, struct zevio_timer, +						 clkevt); + +	writel(delta, timer->timer1 + IO_CURRENT_VAL); +	writel(CNTL_RUN_TIMER | CNTL_DEC | CNTL_MATCH(TIMER_MATCH), +			timer->timer1 + IO_CONTROL); + +	return 0; +} + +static void zevio_timer_set_mode(enum clock_event_mode mode, +				 struct clock_event_device *dev) +{ +	struct zevio_timer *timer = container_of(dev, struct zevio_timer, +						 clkevt); + +	switch (mode) { +	case CLOCK_EVT_MODE_RESUME: +	case CLOCK_EVT_MODE_ONESHOT: +		/* Enable timer interrupts */ +		writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK); +		writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); +		break; +	case CLOCK_EVT_MODE_SHUTDOWN: +	case CLOCK_EVT_MODE_UNUSED: +		/* Disable timer interrupts */ +		writel(0, timer->interrupt_regs + IO_INTR_MSK); +		writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); +		/* Stop timer */ +		writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); +		break; +	case CLOCK_EVT_MODE_PERIODIC: +	default: +		/* Unsupported */ +		break; +	} +} + +static irqreturn_t zevio_timer_interrupt(int irq, void *dev_id) +{ +	struct zevio_timer *timer = dev_id; +	u32 intr; + +	intr = readl(timer->interrupt_regs + IO_INTR_ACK); +	if (!(intr & TIMER_INTR_MSK)) +		return IRQ_NONE; + +	writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_ACK); +	writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); + +	if (timer->clkevt.event_handler) +		timer->clkevt.event_handler(&timer->clkevt); + +	return IRQ_HANDLED; +} + +static int __init zevio_timer_add(struct device_node *node) +{ +	struct zevio_timer *timer; +	struct resource res; +	int irqnr, ret; + +	timer = kzalloc(sizeof(*timer), GFP_KERNEL); +	if (!timer) +		return -ENOMEM; + +	timer->base = of_iomap(node, 0); +	if (!timer->base) { +		ret = -EINVAL; +		goto error_free; +	} +	timer->timer1 = timer->base + IO_TIMER1; +	timer->timer2 = timer->base + IO_TIMER2; + +	timer->clk = of_clk_get(node, 0); +	if (IS_ERR(timer->clk)) { +		ret = PTR_ERR(timer->clk); +		pr_err("Timer clock not found! (error %d)\n", ret); +		goto error_unmap; +	} + +	timer->interrupt_regs = of_iomap(node, 1); +	irqnr = irq_of_parse_and_map(node, 0); + +	of_address_to_resource(node, 0, &res); +	scnprintf(timer->clocksource_name, sizeof(timer->clocksource_name), +			"%llx.%s_clocksource", +			(unsigned long long)res.start, node->name); + +	scnprintf(timer->clockevent_name, sizeof(timer->clockevent_name), +			"%llx.%s_clockevent", +			(unsigned long long)res.start, node->name); + +	if (timer->interrupt_regs && irqnr) { +		timer->clkevt.name		= timer->clockevent_name; +		timer->clkevt.set_next_event	= zevio_timer_set_event; +		timer->clkevt.set_mode		= zevio_timer_set_mode; +		timer->clkevt.rating		= 200; +		timer->clkevt.cpumask		= cpu_all_mask; +		timer->clkevt.features		= CLOCK_EVT_FEAT_ONESHOT; +		timer->clkevt.irq		= irqnr; + +		writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL); +		writel(0, timer->timer1 + IO_DIVIDER); + +		/* Start with timer interrupts disabled */ +		writel(0, timer->interrupt_regs + IO_INTR_MSK); +		writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK); + +		/* Interrupt to occur when timer value matches 0 */ +		writel(0, timer->base + IO_MATCH(TIMER_MATCH)); + +		timer->clkevt_irq.name		= timer->clockevent_name; +		timer->clkevt_irq.handler	= zevio_timer_interrupt; +		timer->clkevt_irq.dev_id	= timer; +		timer->clkevt_irq.flags		= IRQF_TIMER | IRQF_IRQPOLL; + +		setup_irq(irqnr, &timer->clkevt_irq); + +		clockevents_config_and_register(&timer->clkevt, +				clk_get_rate(timer->clk), 0x0001, 0xffff); +		pr_info("Added %s as clockevent\n", timer->clockevent_name); +	} + +	writel(CNTL_STOP_TIMER, timer->timer2 + IO_CONTROL); +	writel(0, timer->timer2 + IO_CURRENT_VAL); +	writel(0, timer->timer2 + IO_DIVIDER); +	writel(CNTL_RUN_TIMER | CNTL_FOREVER | CNTL_INC, +			timer->timer2 + IO_CONTROL); + +	clocksource_mmio_init(timer->timer2 + IO_CURRENT_VAL, +			timer->clocksource_name, +			clk_get_rate(timer->clk), +			200, 16, +			clocksource_mmio_readw_up); + +	pr_info("Added %s as clocksource\n", timer->clocksource_name); + +	return 0; +error_unmap: +	iounmap(timer->base); +error_free: +	kfree(timer); +	return ret; +} + +static void __init zevio_timer_init(struct device_node *node) +{ +	BUG_ON(zevio_timer_add(node)); +} + +CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);  | 
