diff options
Diffstat (limited to 'arch/tile')
64 files changed, 1631 insertions, 581 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 8a7cc663b3f..4f3006b600e 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -3,12 +3,13 @@  config TILE  	def_bool y +	select HAVE_PERF_EVENTS +	select USE_PMC if PERF_EVENTS  	select HAVE_DMA_ATTRS  	select HAVE_DMA_API_DEBUG  	select HAVE_KVM if !TILEGX  	select GENERIC_FIND_FIRST_BIT  	select SYSCTL_EXCEPTION_TRACE -	select USE_GENERIC_SMP_HELPERS  	select CC_OPTIMIZE_FOR_SIZE  	select HAVE_DEBUG_KMEMLEAK  	select GENERIC_IRQ_PROBE @@ -67,6 +68,10 @@ config HUGETLB_SUPER_PAGES  config GENERIC_TIME_VSYSCALL  	def_bool y +# Enable PMC if PERF_EVENTS, OPROFILE, or WATCHPOINTS are enabled. +config USE_PMC +	bool +  # FIXME: tilegx can implement a more efficient rwsem.  config RWSEM_GENERIC_SPINLOCK  	def_bool y @@ -120,6 +125,8 @@ config HVC_TILE  config TILEGX  	bool "Building for TILE-Gx (64-bit) processor" +	select SPARSE_IRQ +	select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ  	select HAVE_FUNCTION_TRACER  	select HAVE_FUNCTION_TRACE_MCOUNT_TEST  	select HAVE_FUNCTION_GRAPH_TRACER @@ -361,7 +368,7 @@ config CMDLINE_OVERRIDE  config VMALLOC_RESERVE  	hex -	default 0x1000000 +	default 0x2000000  config HARDWALL  	bool "Hardwall support to allow access to user dynamic network" @@ -406,7 +413,7 @@ config PCI_DOMAINS  config NO_IOMEM  	def_bool !PCI -config NO_IOPORT +config NO_IOPORT_MAP  	def_bool !PCI  config TILE_PCI_IO diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c index 4f8f3d619c4..e19325c4c43 100644 --- a/arch/tile/gxio/iorpc_mpipe.c +++ b/arch/tile/gxio/iorpc_mpipe.c @@ -21,7 +21,7 @@ struct alloc_buffer_stacks_param {  	unsigned int flags;  }; -int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,  				   unsigned int count, unsigned int first,  				   unsigned int flags)  { @@ -45,7 +45,7 @@ struct init_buffer_stack_aux_param {  	unsigned int buffer_size_enum;  }; -int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, +int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,  				     void *mem_va, size_t mem_size,  				     unsigned int mem_flags, unsigned int stack,  				     unsigned int buffer_size_enum) @@ -80,7 +80,7 @@ struct alloc_notif_rings_param {  	unsigned int flags;  }; -int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,  				 unsigned int count, unsigned int first,  				 unsigned int flags)  { @@ -102,7 +102,7 @@ struct init_notif_ring_aux_param {  	unsigned int ring;  }; -int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, +int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,  				   size_t mem_size, unsigned int mem_flags,  				   unsigned int ring)  { @@ -133,7 +133,7 @@ struct request_notif_ring_interrupt_param {  	unsigned int ring;  }; -int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, +int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,  					    int inter_x, int inter_y,  					    int inter_ipi, int inter_event,  					    unsigned int ring) @@ -158,7 +158,7 @@ struct enable_notif_ring_interrupt_param {  	unsigned int ring;  }; -int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, +int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,  					   unsigned int ring)  {  	struct enable_notif_ring_interrupt_param temp; @@ -179,7 +179,7 @@ struct alloc_notif_groups_param {  	unsigned int flags;  }; -int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,  				  unsigned int count, unsigned int first,  				  unsigned int flags)  { @@ -201,7 +201,7 @@ struct init_notif_group_param {  	gxio_mpipe_notif_group_bits_t bits;  }; -int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, +int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,  				unsigned int group,  				gxio_mpipe_notif_group_bits_t bits)  { @@ -223,7 +223,7 @@ struct alloc_buckets_param {  	unsigned int flags;  }; -int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, +int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,  			     unsigned int first, unsigned int flags)  {  	struct alloc_buckets_param temp; @@ -244,7 +244,7 @@ struct init_bucket_param {  	MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info;  }; -int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, +int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,  			   MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info)  {  	struct init_bucket_param temp; @@ -265,7 +265,7 @@ struct alloc_edma_rings_param {  	unsigned int flags;  }; -int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,  				unsigned int count, unsigned int first,  				unsigned int flags)  { @@ -288,7 +288,7 @@ struct init_edma_ring_aux_param {  	unsigned int channel;  }; -int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, +int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,  				  size_t mem_size, unsigned int mem_flags,  				  unsigned int ring, unsigned int channel)  { @@ -315,7 +315,7 @@ int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va,  EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux); -int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, +int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,  			    size_t blob_size)  {  	const void *params = blob; @@ -332,7 +332,7 @@ struct register_client_memory_param {  	unsigned int flags;  }; -int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, +int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,  				      unsigned int iotlb, HV_PTE pte,  				      unsigned int flags)  { @@ -355,7 +355,7 @@ struct link_open_aux_param {  	unsigned int flags;  }; -int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, +int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,  			     _gxio_mpipe_link_name_t name, unsigned int flags)  {  	struct link_open_aux_param temp; @@ -374,7 +374,7 @@ struct link_close_aux_param {  	int mac;  }; -int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac) +int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac)  {  	struct link_close_aux_param temp;  	struct link_close_aux_param *params = &temp; @@ -393,7 +393,7 @@ struct link_set_attr_aux_param {  	int64_t val;  }; -int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac, +int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,  				 uint32_t attr, int64_t val)  {  	struct link_set_attr_aux_param temp; @@ -415,8 +415,8 @@ struct get_timestamp_aux_param {  	uint64_t cycles;  }; -int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, -				 uint64_t * nsec, uint64_t * cycles) +int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec, +				 uint64_t *nsec, uint64_t *cycles)  {  	int __result;  	struct get_timestamp_aux_param temp; @@ -440,7 +440,7 @@ struct set_timestamp_aux_param {  	uint64_t cycles;  }; -int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, +int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,  				 uint64_t nsec, uint64_t cycles)  {  	struct set_timestamp_aux_param temp; @@ -460,8 +460,7 @@ struct adjust_timestamp_aux_param {  	int64_t nsec;  }; -int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, -				    int64_t nsec) +int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, int64_t nsec)  {  	struct adjust_timestamp_aux_param temp;  	struct adjust_timestamp_aux_param *params = &temp; @@ -475,25 +474,6 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,  EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux); -struct adjust_timestamp_freq_param { -	int32_t ppb; -}; - -int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context, -				     int32_t ppb) -{ -	struct adjust_timestamp_freq_param temp; -	struct adjust_timestamp_freq_param *params = &temp; - -	params->ppb = ppb; - -	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, -			     sizeof(*params), -			     GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ); -} - -EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq); -  struct config_edma_ring_blks_param {  	unsigned int ering;  	unsigned int max_blks; @@ -501,7 +481,7 @@ struct config_edma_ring_blks_param {  	unsigned int db;  }; -int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context, +int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,  				     unsigned int ering, unsigned int max_blks,  				     unsigned int min_snf_blks, unsigned int db)  { @@ -520,11 +500,29 @@ int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context,  EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks); +struct adjust_timestamp_freq_param { +	int32_t ppb; +}; + +int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, int32_t ppb) +{ +	struct adjust_timestamp_freq_param temp; +	struct adjust_timestamp_freq_param *params = &temp; + +	params->ppb = ppb; + +	return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, +			     sizeof(*params), +			     GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ); +} + +EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq); +  struct arm_pollfd_param {  	union iorpc_pollfd pollfd;  }; -int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) +int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)  {  	struct arm_pollfd_param temp;  	struct arm_pollfd_param *params = &temp; @@ -541,7 +539,7 @@ struct close_pollfd_param {  	union iorpc_pollfd pollfd;  }; -int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) +int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie)  {  	struct close_pollfd_param temp;  	struct close_pollfd_param *params = &temp; @@ -558,7 +556,7 @@ struct get_mmio_base_param {  	HV_PTE base;  }; -int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base) +int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base)  {  	int __result;  	struct get_mmio_base_param temp; @@ -579,7 +577,7 @@ struct check_mmio_offset_param {  	unsigned long size;  }; -int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, +int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,  				 unsigned long offset, unsigned long size)  {  	struct check_mmio_offset_param temp; diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c index 64883aabeb9..77019c6e9b4 100644 --- a/arch/tile/gxio/iorpc_mpipe_info.c +++ b/arch/tile/gxio/iorpc_mpipe_info.c @@ -15,12 +15,11 @@  /* This file is machine-generated; DO NOT EDIT! */  #include "gxio/iorpc_mpipe_info.h" -  struct instance_aux_param {  	_gxio_mpipe_link_name_t name;  }; -int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,  				 _gxio_mpipe_link_name_t name)  {  	struct instance_aux_param temp; @@ -39,10 +38,10 @@ struct enumerate_aux_param {  	_gxio_mpipe_link_mac_t mac;  }; -int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,  				  unsigned int idx, -				  _gxio_mpipe_link_name_t * name, -				  _gxio_mpipe_link_mac_t * mac) +				  _gxio_mpipe_link_name_t *name, +				  _gxio_mpipe_link_mac_t *mac)  {  	int __result;  	struct enumerate_aux_param temp; @@ -50,7 +49,7 @@ int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,  	__result =  	    hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), -			 (((uint64_t) idx << 32) | +			 (((uint64_t)idx << 32) |  			  GXIO_MPIPE_INFO_OP_ENUMERATE_AUX));  	*name = params->name;  	*mac = params->mac; @@ -64,7 +63,7 @@ struct get_mmio_base_param {  	HV_PTE base;  }; -int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,  				  HV_PTE *base)  {  	int __result; @@ -86,7 +85,7 @@ struct check_mmio_offset_param {  	unsigned long size;  }; -int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,  				      unsigned long offset, unsigned long size)  {  	struct check_mmio_offset_param temp; diff --git a/arch/tile/gxio/iorpc_trio.c b/arch/tile/gxio/iorpc_trio.c index da6e18e049c..1d3cedb9aeb 100644 --- a/arch/tile/gxio/iorpc_trio.c +++ b/arch/tile/gxio/iorpc_trio.c @@ -21,7 +21,7 @@ struct alloc_asids_param {  	unsigned int flags;  }; -int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, +int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,  			  unsigned int first, unsigned int flags)  {  	struct alloc_asids_param temp; @@ -44,7 +44,7 @@ struct alloc_memory_maps_param {  	unsigned int flags;  }; -int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, +int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,  				unsigned int count, unsigned int first,  				unsigned int flags)  { @@ -67,7 +67,7 @@ struct alloc_scatter_queues_param {  	unsigned int flags;  }; -int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context, +int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,  				   unsigned int count, unsigned int first,  				   unsigned int flags)  { @@ -91,7 +91,7 @@ struct alloc_pio_regions_param {  	unsigned int flags;  }; -int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, +int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,  				unsigned int count, unsigned int first,  				unsigned int flags)  { @@ -115,7 +115,7 @@ struct init_pio_region_aux_param {  	unsigned int flags;  }; -int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, +int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,  				  unsigned int pio_region, unsigned int mac,  				  uint32_t bus_address_hi, unsigned int flags)  { @@ -145,7 +145,7 @@ struct init_memory_map_mmu_aux_param {  	unsigned int order_mode;  }; -int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, +int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,  				      unsigned int map, unsigned long va,  				      uint64_t size, unsigned int asid,  				      unsigned int mac, uint64_t bus_address, @@ -175,7 +175,7 @@ struct get_port_property_param {  	struct pcie_trio_ports_property trio_ports;  }; -int gxio_trio_get_port_property(gxio_trio_context_t * context, +int gxio_trio_get_port_property(gxio_trio_context_t *context,  				struct pcie_trio_ports_property *trio_ports)  {  	int __result; @@ -198,7 +198,7 @@ struct config_legacy_intr_param {  	unsigned int intx;  }; -int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, +int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,  				 int inter_y, int inter_ipi, int inter_event,  				 unsigned int mac, unsigned int intx)  { @@ -227,7 +227,7 @@ struct config_msi_intr_param {  	unsigned int asid;  }; -int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, +int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,  			      int inter_y, int inter_ipi, int inter_event,  			      unsigned int mac, unsigned int mem_map,  			      uint64_t mem_map_base, uint64_t mem_map_limit, @@ -259,7 +259,7 @@ struct set_mps_mrs_param {  	unsigned int mac;  }; -int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, +int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,  			  uint16_t mrs, unsigned int mac)  {  	struct set_mps_mrs_param temp; @@ -279,7 +279,7 @@ struct force_rc_link_up_param {  	unsigned int mac;  }; -int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac) +int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac)  {  	struct force_rc_link_up_param temp;  	struct force_rc_link_up_param *params = &temp; @@ -296,7 +296,7 @@ struct force_ep_link_up_param {  	unsigned int mac;  }; -int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac) +int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac)  {  	struct force_ep_link_up_param temp;  	struct force_ep_link_up_param *params = &temp; @@ -313,7 +313,7 @@ struct get_mmio_base_param {  	HV_PTE base;  }; -int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base) +int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base)  {  	int __result;  	struct get_mmio_base_param temp; @@ -334,7 +334,7 @@ struct check_mmio_offset_param {  	unsigned long size;  }; -int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, +int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,  				unsigned long offset, unsigned long size)  {  	struct check_mmio_offset_param temp; diff --git a/arch/tile/gxio/iorpc_usb_host.c b/arch/tile/gxio/iorpc_usb_host.c index cf3c3cc1220..9c820073bfc 100644 --- a/arch/tile/gxio/iorpc_usb_host.c +++ b/arch/tile/gxio/iorpc_usb_host.c @@ -19,7 +19,7 @@ struct cfg_interrupt_param {  	union iorpc_interrupt interrupt;  }; -int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, +int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,  				int inter_y, int inter_ipi, int inter_event)  {  	struct cfg_interrupt_param temp; @@ -41,7 +41,7 @@ struct register_client_memory_param {  	unsigned int flags;  }; -int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, +int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,  					 HV_PTE pte, unsigned int flags)  {  	struct register_client_memory_param temp; @@ -61,7 +61,7 @@ struct get_mmio_base_param {  	HV_PTE base;  }; -int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, HV_PTE *base) +int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base)  {  	int __result;  	struct get_mmio_base_param temp; @@ -82,7 +82,7 @@ struct check_mmio_offset_param {  	unsigned long size;  }; -int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, +int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,  				    unsigned long offset, unsigned long size)  {  	struct check_mmio_offset_param temp; diff --git a/arch/tile/gxio/usb_host.c b/arch/tile/gxio/usb_host.c index 66b002f54ec..785afad7922 100644 --- a/arch/tile/gxio/usb_host.c +++ b/arch/tile/gxio/usb_host.c @@ -26,7 +26,7 @@  #include <gxio/kiorpc.h>  #include <gxio/usb_host.h> -int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, +int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,  		       int is_ehci)  {  	char file[32]; @@ -63,7 +63,7 @@ int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,  EXPORT_SYMBOL_GPL(gxio_usb_host_init); -int gxio_usb_host_destroy(gxio_usb_host_context_t * context) +int gxio_usb_host_destroy(gxio_usb_host_context_t *context)  {  	iounmap((void __force __iomem *)(context->mmio_base));  	hv_dev_close(context->fd); @@ -76,14 +76,14 @@ int gxio_usb_host_destroy(gxio_usb_host_context_t * context)  EXPORT_SYMBOL_GPL(gxio_usb_host_destroy); -void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context) +void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context)  {  	return context->mmio_base;  }  EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start); -size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context) +size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context)  {  	return HV_USB_HOST_MMIO_SIZE;  } diff --git a/arch/tile/include/arch/mpipe.h b/arch/tile/include/arch/mpipe.h index 8a33912fd6c..904538e754d 100644 --- a/arch/tile/include/arch/mpipe.h +++ b/arch/tile/include/arch/mpipe.h @@ -176,7 +176,18 @@ typedef union       */      uint_reg_t stack_idx    : 5;      /* Reserved. */ -    uint_reg_t __reserved_2 : 5; +    uint_reg_t __reserved_2 : 3; +    /* +     * Instance ID.  For devices that support automatic buffer return between +     * mPIPE instances, this field indicates the buffer owner.  If the INST +     * field does not match the mPIPE's instance number when a packet is +     * egressed, buffers with HWB set will be returned to the other mPIPE +     * instance.  Note that not all devices support multi-mPIPE buffer +     * return.  The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates +     * whether the INST field in the buffer descriptor is populated by iDMA +     * hardware. This field is ignored on writes. +     */ +    uint_reg_t inst         : 2;      /*       * Reads as one to indicate that this is a hardware managed buffer.       * Ignored on writes since all buffers on a given stack are the same size. @@ -205,7 +216,8 @@ typedef union      uint_reg_t c            : 2;      uint_reg_t size         : 3;      uint_reg_t hwb          : 1; -    uint_reg_t __reserved_2 : 5; +    uint_reg_t inst         : 2; +    uint_reg_t __reserved_2 : 3;      uint_reg_t stack_idx    : 5;      uint_reg_t __reserved_1 : 6;      int_reg_t va           : 35; @@ -231,9 +243,9 @@ typedef union      /* Reserved. */      uint_reg_t __reserved_0 : 3;      /* eDMA ring being accessed */ -    uint_reg_t ring         : 5; +    uint_reg_t ring         : 6;      /* Reserved. */ -    uint_reg_t __reserved_1 : 18; +    uint_reg_t __reserved_1 : 17;      /*       * This field of the address selects the region (address space) to be       * accessed.  For the egress DMA post region, this field must be 5. @@ -250,8 +262,8 @@ typedef union      uint_reg_t svc_dom      : 5;      uint_reg_t __reserved_2 : 6;      uint_reg_t region       : 3; -    uint_reg_t __reserved_1 : 18; -    uint_reg_t ring         : 5; +    uint_reg_t __reserved_1 : 17; +    uint_reg_t ring         : 6;      uint_reg_t __reserved_0 : 3;  #endif    }; diff --git a/arch/tile/include/arch/mpipe_constants.h b/arch/tile/include/arch/mpipe_constants.h index 410a0400e05..84022ac5fe8 100644 --- a/arch/tile/include/arch/mpipe_constants.h +++ b/arch/tile/include/arch/mpipe_constants.h @@ -16,13 +16,13 @@  #ifndef __ARCH_MPIPE_CONSTANTS_H__  #define __ARCH_MPIPE_CONSTANTS_H__ -#define MPIPE_NUM_CLASSIFIERS 10 +#define MPIPE_NUM_CLASSIFIERS 16  #define MPIPE_CLS_MHZ 1200 -#define MPIPE_NUM_EDMA_RINGS 32 +#define MPIPE_NUM_EDMA_RINGS 64  #define MPIPE_NUM_SGMII_MACS 16 -#define MPIPE_NUM_XAUI_MACS 4 +#define MPIPE_NUM_XAUI_MACS 16  #define MPIPE_NUM_LOOPBACK_CHANNELS 4  #define MPIPE_NUM_NON_LB_CHANNELS 28 diff --git a/arch/tile/include/arch/mpipe_shm.h b/arch/tile/include/arch/mpipe_shm.h index f2e9e122818..13b3c4300e5 100644 --- a/arch/tile/include/arch/mpipe_shm.h +++ b/arch/tile/include/arch/mpipe_shm.h @@ -44,8 +44,14 @@ typedef union       * descriptors toggles each time the ring tail pointer wraps.       */      uint_reg_t gen        : 1; +    /** +     * For devices with EDMA reorder support, this field allows the +     * descriptor to select the egress FIFO.  The associated DMA ring must +     * have ALLOW_EFIFO_SEL enabled. +     */ +    uint_reg_t efifo_sel  : 6;      /** Reserved.  Must be zero. */ -    uint_reg_t r0         : 7; +    uint_reg_t r0         : 1;      /** Checksum generation enabled for this transfer. */      uint_reg_t csum       : 1;      /** @@ -110,7 +116,8 @@ typedef union      uint_reg_t notif      : 1;      uint_reg_t ns         : 1;      uint_reg_t csum       : 1; -    uint_reg_t r0         : 7; +    uint_reg_t r0         : 1; +    uint_reg_t efifo_sel  : 6;      uint_reg_t gen        : 1;  #endif @@ -126,14 +133,16 @@ typedef union      /** Reserved. */      uint_reg_t __reserved_1 : 3;      /** -     * Instance ID.  For devices that support more than one mPIPE instance, -     * this field indicates the buffer owner.  If the INST field does not -     * match the mPIPE's instance number when a packet is egressed, buffers -     * with HWB set will be returned to the other mPIPE instance. +     * Instance ID.  For devices that support automatic buffer return between +     * mPIPE instances, this field indicates the buffer owner.  If the INST +     * field does not match the mPIPE's instance number when a packet is +     * egressed, buffers with HWB set will be returned to the other mPIPE +     * instance.  Note that not all devices support multi-mPIPE buffer +     * return.  The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates +     * whether the INST field in the buffer descriptor is populated by iDMA +     * hardware.       */ -    uint_reg_t inst         : 1; -    /** Reserved. */ -    uint_reg_t __reserved_2 : 1; +    uint_reg_t inst         : 2;      /**       * Always set to one by hardware in iDMA packet descriptors.  For eDMA,       * indicates whether the buffer will be released to the buffer stack @@ -166,8 +175,7 @@ typedef union      uint_reg_t c            : 2;      uint_reg_t size         : 3;      uint_reg_t hwb          : 1; -    uint_reg_t __reserved_2 : 1; -    uint_reg_t inst         : 1; +    uint_reg_t inst         : 2;      uint_reg_t __reserved_1 : 3;      uint_reg_t stack_idx    : 5;      uint_reg_t __reserved_0 : 6; @@ -408,7 +416,10 @@ typedef union      /**       * Sequence number applied when packet is distributed.   Classifier       * selects which sequence number is to be applied by writing the 13-bit -     * SQN-selector into this field. +     * SQN-selector into this field.  For devices that support EXT_SQN (as +     * indicated in IDMA_INFO.EXT_SQN_SUPPORT), the GP_SQN can be extended to +     * 32-bits via the IDMA_CTL.EXT_SQN register.  In this case the +     * PACKET_SQN will be reduced to 32 bits.       */      uint_reg_t gp_sqn     : 16;      /** @@ -451,14 +462,16 @@ typedef union      /** Reserved. */      uint_reg_t __reserved_5 : 3;      /** -     * Instance ID.  For devices that support more than one mPIPE instance, -     * this field indicates the buffer owner.  If the INST field does not -     * match the mPIPE's instance number when a packet is egressed, buffers -     * with HWB set will be returned to the other mPIPE instance. +     * Instance ID.  For devices that support automatic buffer return between +     * mPIPE instances, this field indicates the buffer owner.  If the INST +     * field does not match the mPIPE's instance number when a packet is +     * egressed, buffers with HWB set will be returned to the other mPIPE +     * instance.  Note that not all devices support multi-mPIPE buffer +     * return.  The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates +     * whether the INST field in the buffer descriptor is populated by iDMA +     * hardware.       */ -    uint_reg_t inst         : 1; -    /** Reserved. */ -    uint_reg_t __reserved_6 : 1; +    uint_reg_t inst         : 2;      /**       * Always set to one by hardware in iDMA packet descriptors.  For eDMA,       * indicates whether the buffer will be released to the buffer stack @@ -491,8 +504,7 @@ typedef union      uint_reg_t c            : 2;      uint_reg_t size         : 3;      uint_reg_t hwb          : 1; -    uint_reg_t __reserved_6 : 1; -    uint_reg_t inst         : 1; +    uint_reg_t inst         : 2;      uint_reg_t __reserved_5 : 3;      uint_reg_t stack_idx    : 5;      uint_reg_t __reserved_4 : 6; diff --git a/arch/tile/include/arch/trio_constants.h b/arch/tile/include/arch/trio_constants.h index 628b045436b..85647e91a45 100644 --- a/arch/tile/include/arch/trio_constants.h +++ b/arch/tile/include/arch/trio_constants.h @@ -16,21 +16,21 @@  #ifndef __ARCH_TRIO_CONSTANTS_H__  #define __ARCH_TRIO_CONSTANTS_H__ -#define TRIO_NUM_ASIDS 16 +#define TRIO_NUM_ASIDS 32  #define TRIO_NUM_TLBS_PER_ASID 16  #define TRIO_NUM_TPIO_REGIONS 8  #define TRIO_LOG2_NUM_TPIO_REGIONS 3 -#define TRIO_NUM_MAP_MEM_REGIONS 16 -#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 4 +#define TRIO_NUM_MAP_MEM_REGIONS 32 +#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 5  #define TRIO_NUM_MAP_SQ_REGIONS 8  #define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3  #define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6 -#define TRIO_NUM_PUSH_DMA_RINGS 32 +#define TRIO_NUM_PUSH_DMA_RINGS 64 -#define TRIO_NUM_PULL_DMA_RINGS 32 +#define TRIO_NUM_PULL_DMA_RINGS 64  #endif /* __ARCH_TRIO_CONSTANTS_H__ */ diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 664d6ad23f8..0aa5675e702 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -11,6 +11,7 @@ generic-y += errno.h  generic-y += exec.h  generic-y += fb.h  generic-y += fcntl.h +generic-y += hash.h  generic-y += hw_irq.h  generic-y += ioctl.h  generic-y += ioctls.h @@ -18,12 +19,14 @@ generic-y += ipcbuf.h  generic-y += irq_regs.h  generic-y += local.h  generic-y += local64.h +generic-y += mcs_spinlock.h  generic-y += msgbuf.h  generic-y += mutex.h  generic-y += param.h  generic-y += parport.h  generic-y += poll.h  generic-y += posix_types.h +generic-y += preempt.h  generic-y += resource.h  generic-y += scatterlist.h  generic-y += sembuf.h diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index d385eaadece..70979846076 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h @@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)   *   * Atomically sets @v to @i and returns old @v   */ -static inline u64 atomic64_xchg(atomic64_t *v, u64 n) +static inline long long atomic64_xchg(atomic64_t *v, long long n)  {  	return xchg64(&v->counter, n);  } @@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)   * Atomically checks if @v holds @o and replaces it with @n if so.   * Returns the old value at @v.   */ -static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) +static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, +					long long n)  {  	return cmpxchg64(&v->counter, o, n);  } diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 0d0395b1b15..1b109fad9ff 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)  /* A 64bit atomic type */  typedef struct { -	u64 __aligned(8) counter; +	long long counter;  } atomic64_t;  #define ATOMIC64_INIT(val) { (val) } @@ -91,14 +91,14 @@ typedef struct {   *   * Atomically reads the value of @v.   */ -static inline u64 atomic64_read(const atomic64_t *v) +static inline long long atomic64_read(const atomic64_t *v)  {  	/*  	 * Requires an atomic op to read both 32-bit parts consistently.  	 * Casting away const is safe since the atomic support routines  	 * do not write to memory if the value has not been modified.  	 */ -	return _atomic64_xchg_add((u64 *)&v->counter, 0); +	return _atomic64_xchg_add((long long *)&v->counter, 0);  }  /** @@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)   *   * Atomically adds @i to @v.   */ -static inline void atomic64_add(u64 i, atomic64_t *v) +static inline void atomic64_add(long long i, atomic64_t *v)  {  	_atomic64_xchg_add(&v->counter, i);  } @@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)   *   * Atomically adds @i to @v and returns @i + @v   */ -static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +static inline long long atomic64_add_return(long long i, atomic64_t *v)  {  	smp_mb();  /* barrier for proper semantics */  	return _atomic64_xchg_add(&v->counter, i) + i; @@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)   * Atomically adds @a to @v, so long as @v was not already @u.   * Returns non-zero if @v was not @u, and zero otherwise.   */ -static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +static inline long long atomic64_add_unless(atomic64_t *v, long long a, +					long long u)  {  	smp_mb();  /* barrier for proper semantics */  	return _atomic64_xchg_add_unless(&v->counter, a, u) != u; @@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)   * atomic64_set() can't be just a raw store, since it would be lost if it   * fell between the load and store of one of the other atomic ops.   */ -static inline void atomic64_set(atomic64_t *v, u64 n) +static inline void atomic64_set(atomic64_t *v, long long n)  {  	_atomic64_xchg(&v->counter, n);  } @@ -168,16 +169,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)  #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)  #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL) -/* - * We need to barrier before modifying the word, since the _atomic_xxx() - * routines just tns the lock and then read/modify/write of the word. - * But after the word is updated, the routine issues an "mf" before returning, - * and since it's a function call, we don't even need a compiler barrier. - */ -#define smp_mb__before_atomic_dec()	smp_mb() -#define smp_mb__before_atomic_inc()	smp_mb() -#define smp_mb__after_atomic_dec()	do { } while (0) -#define smp_mb__after_atomic_inc()	do { } while (0)  #endif /* !__ASSEMBLY__ */ @@ -236,11 +227,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,  extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);  extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);  extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); -extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); -extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); -extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); -extern u64 __atomic64_xchg_add_unless(volatile u64 *p, -				      int *lock, u64 o, u64 n); +extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, +					long long o, long long n); +extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); +extern long long __atomic64_xchg_add(volatile long long *p, int *lock, +					long long n); +extern long long __atomic64_xchg_add_unless(volatile long long *p, +					int *lock, long long o, long long n);  /* Return failure from the atomic wrappers. */  struct __get_user __atomic_bad_address(int __user *addr); diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index ad220eed05f..7b11c5fadd4 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -105,12 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)  #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0) -/* Atomic dec and inc don't implement barrier, so provide them if needed. */ -#define smp_mb__before_atomic_dec()	smp_mb() -#define smp_mb__after_atomic_dec()	smp_mb() -#define smp_mb__before_atomic_inc()	smp_mb() -#define smp_mb__after_atomic_inc()	smp_mb() -  /* Define this to indicate that cmpxchg is an efficient operation. */  #define __HAVE_ARCH_CMPXCHG diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index a9a73da5865..96a42ae79f4 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -22,59 +22,6 @@  #include <arch/spr_def.h>  #include <asm/timex.h> -/* - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier.  All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads.  This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies.  See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - *	CPU 0				CPU 1 - * - *	b = 2; - *	memory_barrier(); - *	p = &b;				q = p; - *					read_barrier_depends(); - *					d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends().  However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - *	CPU 0				CPU 1 - * - *	a = 2; - *	memory_barrier(); - *	b = 3;				y = b; - *					read_barrier_depends(); - *					x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b".  Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb() - * in cases like this where there are no data dependencies. - */ -#define read_barrier_depends()	do { } while (0) -  #define __sync()	__insn_mf()  #include <hv/syscall_public.h> @@ -125,20 +72,21 @@ mb_incoherent(void)  #define mb()		fast_mb()  #define iob()		fast_iob() -#ifdef CONFIG_SMP -#define smp_mb()	mb() -#define smp_rmb()	rmb() -#define smp_wmb()	wmb() -#define smp_read_barrier_depends()	read_barrier_depends() -#else -#define smp_mb()	barrier() -#define smp_rmb()	barrier() -#define smp_wmb()	barrier() -#define smp_read_barrier_depends()	do { } while (0) +#ifndef __tilegx__ /* 32 bit */ +/* + * We need to barrier before modifying the word, since the _atomic_xxx() + * routines just tns the lock and then read/modify/write of the word. + * But after the word is updated, the routine issues an "mf" before returning, + * and since it's a function call, we don't even need a compiler barrier. + */ +#define smp_mb__before_atomic()	smp_mb() +#define smp_mb__after_atomic()	do { } while (0) +#else /* 64 bit */ +#define smp_mb__before_atomic()	smp_mb() +#define smp_mb__after_atomic()	smp_mb()  #endif -#define set_mb(var, value) \ -	do { var = value; mb(); } while (0) +#include <asm-generic/barrier.h>  #endif /* !__ASSEMBLY__ */  #endif /* _ASM_TILE_BARRIER_H */ diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index d5a20686503..20caa346ac0 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -17,6 +17,7 @@  #define _ASM_TILE_BITOPS_H  #include <linux/types.h> +#include <asm/barrier.h>  #ifndef _LINUX_BITOPS_H  #error only <linux/bitops.h> can be included directly diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 386865ad2f5..bbf7b666f21 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h @@ -49,8 +49,8 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr)   * restricted to acting on a single-word quantity.   *   * clear_bit() may not contain a memory barrier, so if it is used for - * locking purposes, you should call smp_mb__before_clear_bit() and/or - * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. + * locking purposes, you should call smp_mb__before_atomic() and/or + * smp_mb__after_atomic() to ensure changes are visible on other cpus.   */  static inline void clear_bit(unsigned nr, volatile unsigned long *addr)  { @@ -121,10 +121,6 @@ static inline int test_and_change_bit(unsigned nr,  	return (_atomic_xor(addr, mask) & mask) != 0;  } -/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */ -#define smp_mb__before_clear_bit()	smp_mb() -#define smp_mb__after_clear_bit()	do {} while (0) -  #include <asm-generic/bitops/ext2-atomic.h>  #endif /* _ASM_TILE_BITOPS_32_H */ diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h index ad34cd05608..bb1a29221fc 100644 --- a/arch/tile/include/asm/bitops_64.h +++ b/arch/tile/include/asm/bitops_64.h @@ -32,10 +32,6 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)  	__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);  } -#define smp_mb__before_clear_bit()	smp_mb() -#define smp_mb__after_clear_bit()	smp_mb() - -  static inline void change_bit(unsigned nr, volatile unsigned long *addr)  {  	unsigned long mask = (1UL << (nr % BITS_PER_LONG)); diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h index 4001d5eab4b..0ccda3c425b 100644 --- a/arch/tile/include/asm/cmpxchg.h +++ b/arch/tile/include/asm/cmpxchg.h @@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);  int _atomic_xchg_add(int *v, int i);  int _atomic_xchg_add_unless(int *v, int a, int u);  int _atomic_cmpxchg(int *ptr, int o, int n); -u64 _atomic64_xchg(u64 *v, u64 n); -u64 _atomic64_xchg_add(u64 *v, u64 i); -u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); -u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); +long long _atomic64_xchg(long long *v, long long n); +long long _atomic64_xchg_add(long long *v, long long i); +long long _atomic64_xchg_add_unless(long long *v, long long a, long long u); +long long _atomic64_cmpxchg(long long *v, long long o, long long n);  #define xchg(ptr, n)							\  	({								\ @@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		if (sizeof(*(ptr)) != 4)				\  			__cmpxchg_called_with_bad_pointer();		\  		smp_mb();						\ -		(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ +		(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o,	\ +						(int)n);		\  	})  #define xchg64(ptr, n)							\ @@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		if (sizeof(*(ptr)) != 8)				\  			__xchg_called_with_bad_pointer();		\  		smp_mb();						\ -		(typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n));	\ +		(typeof(*(ptr)))_atomic64_xchg((long long *)(ptr),	\ +						(long long)(n));	\  	})  #define cmpxchg64(ptr, o, n)						\ @@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		if (sizeof(*(ptr)) != 8)				\  			__cmpxchg_called_with_bad_pointer();		\  		smp_mb();						\ -		(typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ +		(typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr,	\ +					(long long)o, (long long)n);	\  	})  #else @@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		switch (sizeof(*(ptr))) {				\  		case 4:							\  			__x = (typeof(__x))(unsigned long)		\ -				__insn_exch4((ptr), (u32)(unsigned long)(n)); \ +				__insn_exch4((ptr),			\ +					(u32)(unsigned long)(n));	\  			break;						\  		case 8:							\ -			__x = (typeof(__x))			\ +			__x = (typeof(__x))				\  				__insn_exch((ptr), (unsigned long)(n));	\  			break;						\  		default:						\ @@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);  		switch (sizeof(*(ptr))) {				\  		case 4:							\  			__x = (typeof(__x))(unsigned long)		\ -				__insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ +				__insn_cmpexch4((ptr),			\ +					(u32)(unsigned long)(n));	\  			break;						\  		case 8:							\ -			__x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ +			__x = (typeof(__x))__insn_cmpexch((ptr),	\ +						(long long)(n));	\  			break;						\  		default:						\  			__cmpxchg_called_with_bad_pointer();		\ diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index 78f1f2ded86..ffd4493efc7 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -281,7 +281,6 @@ long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,  			u32 dummy, u32 low, u32 high);  long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,  			 u32 dummy, u32 low, u32 high); -long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len);  long compat_sys_sync_file_range2(int fd, unsigned int flags,  				 u32 offset_lo, u32 offset_hi,  				 u32 nbytes_lo, u32 nbytes_hi); diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h index c6b9c1b38fd..ffe2637aeb3 100644 --- a/arch/tile/include/asm/fixmap.h +++ b/arch/tile/include/asm/fixmap.h @@ -25,9 +25,6 @@  #include <asm/kmap_types.h>  #endif -#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) -  /*   * Here we define all the compile-time 'special' virtual   * addresses. The point is to have a constant address at @@ -83,35 +80,7 @@ enum fixed_addresses {  #define FIXADDR_START		(FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)  #define FIXADDR_BOOT_START	(FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) -extern void __this_fixmap_does_not_exist(void); - -/* - * 'index to address' translation. If anyone tries to use the idx - * directly without tranlation, we catch the bug with a NULL-deference - * kernel oops. Illegal ranges of incoming indices are caught too. - */ -static __always_inline unsigned long fix_to_virt(const unsigned int idx) -{ -	/* -	 * this branch gets completely eliminated after inlining, -	 * except when someone tries to use fixaddr indices in an -	 * illegal way. (such as mixing up address types or using -	 * out-of-range indices). -	 * -	 * If it doesn't get removed, the linker will complain -	 * loudly with a reasonably clear error message.. -	 */ -	if (idx >= __end_of_fixed_addresses) -		__this_fixmap_does_not_exist(); - -	return __fix_to_virt(idx); -} - -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ -	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); -	return __virt_to_fix(vaddr); -} +#include <asm-generic/fixmap.h>  #endif /* !__ASSEMBLY__ */ diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h index 822390f9a15..54110af2398 100644 --- a/arch/tile/include/asm/hardirq.h +++ b/arch/tile/include/asm/hardirq.h @@ -42,6 +42,4 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);  #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */ -#define HARDIRQ_BITS	8 -  #endif /* _ASM_TILE_HARDIRQ_H */ diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h index 33cff9a3058..1fe86911838 100644 --- a/arch/tile/include/asm/irq.h +++ b/arch/tile/include/asm/irq.h @@ -18,10 +18,12 @@  #include <linux/hardirq.h>  /* The hypervisor interface provides 32 IRQs. */ -#define NR_IRQS 32 +#define NR_IRQS			32  /* IRQ numbers used for linux IPIs. */ -#define IRQ_RESCHEDULE 0 +#define IRQ_RESCHEDULE	0 +/* Interrupts for dynamic allocation start at 1. Let the core allocate irq0 */ +#define NR_IRQS_LEGACY	1  #define irq_canonicalize(irq)   (irq) diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index 6346888f7bd..67276800861 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h @@ -182,10 +182,9 @@ static inline __attribute_const__ int get_order(unsigned long size)  #define PAGE_OFFSET		(-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))  #define KERNEL_HIGH_VADDR	_AC(0xfffffff800000000, UL)  /* high 32GB */ -#define FIXADDR_BASE		(KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */ -#define FIXADDR_TOP		(KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */ +#define FIXADDR_BASE		(KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */ +#define FIXADDR_TOP		(KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */  #define _VMALLOC_START		FIXADDR_TOP -#define HUGE_VMAP_BASE		(KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */  #define MEM_SV_START		(KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */  #define MEM_MODULE_START	(MEM_SV_START + (256*1024*1024)) /* 256 MB */  #define MEM_MODULE_END		(MEM_MODULE_START + (256*1024*1024)) diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h index 63294f5a8ef..4f7ae39fa20 100644 --- a/arch/tile/include/asm/percpu.h +++ b/arch/tile/include/asm/percpu.h @@ -15,9 +15,37 @@  #ifndef _ASM_TILE_PERCPU_H  #define _ASM_TILE_PERCPU_H -register unsigned long __my_cpu_offset __asm__("tp"); -#define __my_cpu_offset __my_cpu_offset -#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) +register unsigned long my_cpu_offset_reg asm("tp"); + +#ifdef CONFIG_PREEMPT +/* + * For full preemption, we can't just use the register variable + * directly, since we need barrier() to hazard against it, causing the + * compiler to reload anything computed from a previous "tp" value. + * But we also don't want to use volatile asm, since we'd like the + * compiler to be able to cache the value across multiple percpu reads. + * So we use a fake stack read as a hazard against barrier(). + * The 'U' constraint is like 'm' but disallows postincrement. + */ +static inline unsigned long __my_cpu_offset(void) +{ +	unsigned long tp; +	register unsigned long *sp asm("sp"); +	asm("move %0, tp" : "=r" (tp) : "U" (*sp)); +	return tp; +} +#define __my_cpu_offset __my_cpu_offset() +#else +/* + * We don't need to hazard against barrier() since "tp" doesn't ever + * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only + * changes at function call points, at which we are already re-reading + * the value of "tp" due to "my_cpu_offset_reg" being a global variable. + */ +#define __my_cpu_offset my_cpu_offset_reg +#endif + +#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))  #include <asm-generic/percpu.h> diff --git a/arch/tile/include/asm/perf_event.h b/arch/tile/include/asm/perf_event.h new file mode 100644 index 00000000000..59c5b164e5b --- /dev/null +++ b/arch/tile/include/asm/perf_event.h @@ -0,0 +1,22 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + *   This program is free software; you can redistribute it and/or + *   modify it under the terms of the GNU General Public License + *   as published by the Free Software Foundation, version 2. + * + *   This program is distributed in the hope that it will be useful, but + *   WITHOUT ANY WARRANTY; without even the implied warranty of + *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + *   NON INFRINGEMENT.  See the GNU General Public License for + *   more details. + */ + +#ifndef _ASM_TILE_PERF_EVENT_H +#define _ASM_TILE_PERF_EVENT_H + +#include <linux/percpu.h> +DECLARE_PER_CPU(u64, perf_irqs); + +unsigned long handle_syscall_link_address(void); +#endif /* _ASM_TILE_PERF_EVENT_H */ diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index 63142ab3b3d..d26a4227903 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h @@ -55,17 +55,9 @@  #define PKMAP_BASE   ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK)  #ifdef CONFIG_HIGHMEM -# define __VMAPPING_END	(PKMAP_BASE & ~(HPAGE_SIZE-1)) +# define _VMALLOC_END	(PKMAP_BASE & ~(HPAGE_SIZE-1))  #else -# define __VMAPPING_END	(FIXADDR_START & ~(HPAGE_SIZE-1)) -#endif - -#ifdef CONFIG_HUGEVMAP -#define HUGE_VMAP_END	__VMAPPING_END -#define HUGE_VMAP_BASE	(HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE) -#define _VMALLOC_END	HUGE_VMAP_BASE -#else -#define _VMALLOC_END	__VMAPPING_END +# define _VMALLOC_END	(FIXADDR_START & ~(HPAGE_SIZE-1))  #endif  /* diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index 3421177f737..2c8a9cd102d 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h @@ -52,12 +52,10 @@   * memory allocation code).  The vmalloc code puts in an internal   * guard page between each allocation.   */ -#define _VMALLOC_END	HUGE_VMAP_BASE +#define _VMALLOC_END	MEM_SV_START  #define VMALLOC_END	_VMALLOC_END  #define VMALLOC_START	_VMALLOC_START -#define HUGE_VMAP_END	(HUGE_VMAP_BASE + PGDIR_SIZE) -  #ifndef __ASSEMBLY__  /* We have no pud since we are a three-level page table. */ diff --git a/arch/tile/include/asm/pmc.h b/arch/tile/include/asm/pmc.h new file mode 100644 index 00000000000..7ae3956d900 --- /dev/null +++ b/arch/tile/include/asm/pmc.h @@ -0,0 +1,64 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + *   This program is free software; you can redistribute it and/or + *   modify it under the terms of the GNU General Public License + *   as published by the Free Software Foundation, version 2. + * + *   This program is distributed in the hope that it will be useful, but + *   WITHOUT ANY WARRANTY; without even the implied warranty of + *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + *   NON INFRINGEMENT.  See the GNU General Public License for + *   more details. + */ + +#ifndef _ASM_TILE_PMC_H +#define _ASM_TILE_PMC_H + +#include <linux/ptrace.h> + +#define TILE_BASE_COUNTERS	2 + +/* Bitfields below are derived from SPR PERF_COUNT_CTL*/ +#ifndef __tilegx__ +/* PERF_COUNT_CTL on TILEPro */ +#define TILE_CTL_EXCL_USER	(1 << 7) /* exclude user level */ +#define TILE_CTL_EXCL_KERNEL	(1 << 8) /* exclude kernel level */ +#define TILE_CTL_EXCL_HV	(1 << 9) /* exclude hypervisor level */ + +#define TILE_SEL_MASK		0x7f	/* 7 bits for event SEL, +					COUNT_0_SEL */ +#define TILE_PLM_MASK		0x780	/* 4 bits priv level msks, +					COUNT_0_MASK*/ +#define TILE_EVENT_MASK	(TILE_SEL_MASK | TILE_PLM_MASK) + +#else /* __tilegx__*/ +/* PERF_COUNT_CTL on TILEGx*/ +#define TILE_CTL_EXCL_USER	(1 << 10) /* exclude user level */ +#define TILE_CTL_EXCL_KERNEL	(1 << 11) /* exclude kernel level */ +#define TILE_CTL_EXCL_HV	(1 << 12) /* exclude hypervisor level */ + +#define TILE_SEL_MASK		0x3f	/* 6 bits for event SEL, +					COUNT_0_SEL*/ +#define TILE_BOX_MASK		0x1c0	/* 3 bits box msks, +					COUNT_0_BOX */ +#define TILE_PLM_MASK		0x3c00	/* 4 bits priv level msks, +					COUNT_0_MASK */ +#define TILE_EVENT_MASK	(TILE_SEL_MASK | TILE_BOX_MASK | TILE_PLM_MASK) +#endif /* __tilegx__*/ + +/* Takes register and fault number.  Returns error to disable the interrupt. */ +typedef int (*perf_irq_t)(struct pt_regs *, int); + +int userspace_perf_handler(struct pt_regs *regs, int fault); + +perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq); +void release_pmc_hardware(void); + +unsigned long pmc_get_overflow(void); +void pmc_ack_overflow(unsigned long status); + +void unmask_pmc_interrupts(void); +void mask_pmc_interrupts(void); + +#endif /* _ASM_TILE_PMC_H */ diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index b8aa6df3e10..48e4fd0f38e 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -94,7 +94,7 @@ register unsigned long stack_pointer __asm__("sp");  /* Sit on a nap instruction until interrupted. */  extern void smp_nap(void); -/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ +/* Enable interrupts racelessly and nap forever: helper for arch_cpu_idle(). */  extern void _cpu_idle(void);  #else /* __ASSEMBLY__ */ @@ -113,8 +113,6 @@ extern void _cpu_idle(void);  #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE		0x10000000 -  /*   * Thread information flags that various assembly files may need to access.   * Keep flags accessed frequently in low bits, particular since it makes @@ -131,6 +129,7 @@ extern void _cpu_idle(void);  #define TIF_MEMDIE		7	/* OOM killer at work */  #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */  #define TIF_SYSCALL_TRACEPOINT	9	/* syscall tracepoint instrumentation */ +#define TIF_POLLING_NRFLAG	10	/* idle is polling for TIF_NEED_RESCHED */  #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)  #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED) @@ -142,6 +141,7 @@ extern void _cpu_idle(void);  #define _TIF_MEMDIE		(1<<TIF_MEMDIE)  #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)  #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT) +#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)  /* Work to do on any return to user space. */  #define _TIF_ALLWORK_MASK \ @@ -164,7 +164,6 @@ extern void _cpu_idle(void);  #ifdef __tilegx__  #define TS_COMPAT		0x0001	/* 32-bit compatibility mode */  #endif -#define TS_POLLING		0x0004	/* in idle loop but not sleeping */  #define TS_RESTORE_SIGMASK	0x0008	/* restore signal mask in do_signal */  #ifndef __ASSEMBLY__ diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h index d15c0d8d550..93831184423 100644 --- a/arch/tile/include/asm/topology.h +++ b/arch/tile/include/asm/topology.h @@ -44,39 +44,6 @@ static inline const struct cpumask *cpumask_of_node(int node)  /* For now, use numa node -1 for global allocation. */  #define pcibus_to_node(bus)		((void)(bus), -1) -/* - * TILE architecture has many cores integrated in one processor, so we need - * setup bigger balance_interval for both CPU/NODE scheduling domains to - * reduce process scheduling costs. - */ - -/* sched_domains SD_CPU_INIT for TILE architecture */ -#define SD_CPU_INIT (struct sched_domain) {				\ -	.min_interval		= 4,					\ -	.max_interval		= 128,					\ -	.busy_factor		= 64,					\ -	.imbalance_pct		= 125,					\ -	.cache_nice_tries	= 1,					\ -	.busy_idx		= 2,					\ -	.idle_idx		= 1,					\ -	.newidle_idx		= 0,					\ -	.wake_idx		= 0,					\ -	.forkexec_idx		= 0,					\ -									\ -	.flags			= 1*SD_LOAD_BALANCE			\ -				| 1*SD_BALANCE_NEWIDLE			\ -				| 1*SD_BALANCE_EXEC			\ -				| 1*SD_BALANCE_FORK			\ -				| 0*SD_BALANCE_WAKE			\ -				| 0*SD_WAKE_AFFINE			\ -				| 0*SD_SHARE_CPUPOWER			\ -				| 0*SD_SHARE_PKG_RESOURCES		\ -				| 0*SD_SERIALIZE			\ -				,					\ -	.last_balance		= jiffies,				\ -	.balance_interval	= 32,					\ -} -  /* By definition, we create nodes based on online memory. */  #define node_has_online_mem(nid) 1 diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h index fdd07f88cfd..4cda03de734 100644 --- a/arch/tile/include/gxio/iorpc_mpipe.h +++ b/arch/tile/include/gxio/iorpc_mpipe.h @@ -56,89 +56,89 @@  #define GXIO_MPIPE_OP_GET_MMIO_BASE    IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)  #define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) -int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,  				   unsigned int count, unsigned int first,  				   unsigned int flags); -int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, +int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,  				     void *mem_va, size_t mem_size,  				     unsigned int mem_flags, unsigned int stack,  				     unsigned int buffer_size_enum); -int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,  				 unsigned int count, unsigned int first,  				 unsigned int flags); -int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, +int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,  				   size_t mem_size, unsigned int mem_flags,  				   unsigned int ring); -int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, +int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,  					    int inter_x, int inter_y,  					    int inter_ipi, int inter_event,  					    unsigned int ring); -int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, +int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,  					   unsigned int ring); -int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,  				  unsigned int count, unsigned int first,  				  unsigned int flags); -int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, +int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,  				unsigned int group,  				gxio_mpipe_notif_group_bits_t bits); -int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, +int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,  			     unsigned int first, unsigned int flags); -int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, +int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,  			   MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info); -int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, +int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,  				unsigned int count, unsigned int first,  				unsigned int flags); -int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, +int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,  				  size_t mem_size, unsigned int mem_flags,  				  unsigned int ring, unsigned int channel); -int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, +int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,  			    size_t blob_size); -int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, +int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,  				      unsigned int iotlb, HV_PTE pte,  				      unsigned int flags); -int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, +int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,  			     _gxio_mpipe_link_name_t name, unsigned int flags); -int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac); +int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac); -int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac, +int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,  				 uint32_t attr, int64_t val); -int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, -				 uint64_t * nsec, uint64_t * cycles); +int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec, +				 uint64_t *nsec, uint64_t *cycles); -int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, +int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,  				 uint64_t nsec, uint64_t cycles); -int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, +int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context,  				    int64_t nsec); -int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context, +int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context,  				     int32_t ppb); -int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); +int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie); -int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); +int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie); -int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base); +int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base); -int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, +int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,  				 unsigned long offset, unsigned long size);  #endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */ diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h index 476c5e5ca22..f0b04284468 100644 --- a/arch/tile/include/gxio/iorpc_mpipe_info.h +++ b/arch/tile/include/gxio/iorpc_mpipe_info.h @@ -33,18 +33,18 @@  #define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) -int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,  				 _gxio_mpipe_link_name_t name); -int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,  				  unsigned int idx, -				  _gxio_mpipe_link_name_t * name, -				  _gxio_mpipe_link_mac_t * mac); +				  _gxio_mpipe_link_name_t *name, +				  _gxio_mpipe_link_mac_t *mac); -int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,  				  HV_PTE *base); -int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, +int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,  				      unsigned long offset, unsigned long size);  #endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */ diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h index d95b96fd6c9..376a4f77116 100644 --- a/arch/tile/include/gxio/iorpc_trio.h +++ b/arch/tile/include/gxio/iorpc_trio.h @@ -46,59 +46,59 @@  #define GXIO_TRIO_OP_GET_MMIO_BASE     IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)  #define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) -int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, +int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,  			  unsigned int first, unsigned int flags); -int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, +int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,  				unsigned int count, unsigned int first,  				unsigned int flags); -int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context, +int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,  				   unsigned int count, unsigned int first,  				   unsigned int flags); -int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, +int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,  				unsigned int count, unsigned int first,  				unsigned int flags); -int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, +int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,  				  unsigned int pio_region, unsigned int mac,  				  uint32_t bus_address_hi, unsigned int flags); -int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, +int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,  				      unsigned int map, unsigned long va,  				      uint64_t size, unsigned int asid,  				      unsigned int mac, uint64_t bus_address,  				      unsigned int node,  				      unsigned int order_mode); -int gxio_trio_get_port_property(gxio_trio_context_t * context, +int gxio_trio_get_port_property(gxio_trio_context_t *context,  				struct pcie_trio_ports_property *trio_ports); -int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, +int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,  				 int inter_y, int inter_ipi, int inter_event,  				 unsigned int mac, unsigned int intx); -int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, +int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,  			      int inter_y, int inter_ipi, int inter_event,  			      unsigned int mac, unsigned int mem_map,  			      uint64_t mem_map_base, uint64_t mem_map_limit,  			      unsigned int asid); -int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, +int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,  			  uint16_t mrs, unsigned int mac); -int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac); +int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac); -int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac); +int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac); -int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base); +int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base); -int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, +int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,  				unsigned long offset, unsigned long size);  #endif /* !__GXIO_TRIO_LINUX_RPC_H__ */ diff --git a/arch/tile/include/gxio/iorpc_usb_host.h b/arch/tile/include/gxio/iorpc_usb_host.h index 8622e7d126a..79962a97de8 100644 --- a/arch/tile/include/gxio/iorpc_usb_host.h +++ b/arch/tile/include/gxio/iorpc_usb_host.h @@ -31,16 +31,16 @@  #define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)  #define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) -int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, +int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,  				int inter_y, int inter_ipi, int inter_event); -int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, +int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,  					 HV_PTE pte, unsigned int flags); -int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, +int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context,  				HV_PTE *base); -int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, +int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,  				    unsigned long offset, unsigned long size);  #endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */ diff --git a/arch/tile/include/gxio/usb_host.h b/arch/tile/include/gxio/usb_host.h index 5eedec0e988..93c9636d2dd 100644 --- a/arch/tile/include/gxio/usb_host.h +++ b/arch/tile/include/gxio/usb_host.h @@ -53,7 +53,7 @@ typedef struct {   * @return Zero if the context was successfully initialized, else a   *  GXIO_ERR_xxx error code.   */ -extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, +extern int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,  			      int is_ehci);  /* Destroy a USB context. @@ -68,20 +68,20 @@ extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,   * @return Zero if the context was successfully destroyed, else a   *  GXIO_ERR_xxx error code.   */ -extern int gxio_usb_host_destroy(gxio_usb_host_context_t * context); +extern int gxio_usb_host_destroy(gxio_usb_host_context_t *context);  /* Retrieve the address of the shim's MMIO registers.   *   * @param context Pointer to a properly initialized gxio_usb_host_context_t.   * @return The address of the shim's MMIO registers.   */ -extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context); +extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context);  /* Retrieve the length of the shim's MMIO registers.   *   * @param context Pointer to a properly initialized gxio_usb_host_context_t.   * @return The length of the shim's MMIO registers.   */ -extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context); +extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context);  #endif /* _GXIO_USB_H_ */ diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile index 27a2bf39dae..21f77bf68c6 100644 --- a/arch/tile/kernel/Makefile +++ b/arch/tile/kernel/Makefile @@ -25,6 +25,8 @@ obj-$(CONFIG_PCI)		+= pci_gx.o  else  obj-$(CONFIG_PCI)		+= pci.o  endif +obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o +obj-$(CONFIG_USE_PMC)		+= pmc.o  obj-$(CONFIG_TILE_USB)		+= usb.o  obj-$(CONFIG_TILE_HVGLUE_TRACE)	+= hvglue_trace.o  obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o mcount_64.o diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c index ed378416b86..49120843ff9 100644 --- a/arch/tile/kernel/compat.c +++ b/arch/tile/kernel/compat.c @@ -84,7 +84,7 @@ COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,  {  	return sys_llseek(fd, offset_high, offset_low, result, origin);  } -  +  /* Provide the compat syscall number to call mapping. */  #undef __SYSCALL  #define __SYSCALL(nr, call) [nr] = (call), diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index 85e00b2f39b..19c04b5ce40 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -49,7 +49,7 @@ struct compat_rt_sigframe {  	struct compat_ucontext uc;  }; -int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from) +int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from)  {  	int err; diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c index f1c452092ee..8d52d83cc51 100644 --- a/arch/tile/kernel/ftrace.c +++ b/arch/tile/kernel/ftrace.c @@ -167,10 +167,8 @@ int ftrace_make_nop(struct module *mod,  	return ret;  } -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void)  { -	*(unsigned long *)data = 0; -  	return 0;  }  #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/tile/kernel/futex_64.S b/arch/tile/kernel/futex_64.S deleted file mode 100644 index f465d1eda20..00000000000 --- a/arch/tile/kernel/futex_64.S +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2011 Tilera Corporation. All Rights Reserved. - * - *   This program is free software; you can redistribute it and/or - *   modify it under the terms of the GNU General Public License - *   as published by the Free Software Foundation, version 2. - * - *   This program is distributed in the hope that it will be useful, but - *   WITHOUT ANY WARRANTY; without even the implied warranty of - *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - *   NON INFRINGEMENT.  See the GNU General Public License for - *   more details. - * - * Atomically access user memory, but use MMU to avoid propagating - * kernel exceptions. - */ - -#include <linux/linkage.h> -#include <asm/errno.h> -#include <asm/futex.h> -#include <asm/page.h> -#include <asm/processor.h> - -/* - * Provide a set of atomic memory operations supporting <asm/futex.h>. - * - * r0: user address to manipulate - * r1: new value to write, or for cmpxchg, old value to compare against - * r2: (cmpxchg only) new value to write - * - * Return __get_user struct, r0 with value, r1 with error. - */ -#define FUTEX_OP(name, ...) \ -STD_ENTRY(futex_##name)			\ -	__VA_ARGS__;			\ -	{				\ -	 move   r1, zero;		\ -	 jrp    lr			\ -	};				\ -	STD_ENDPROC(futex_##name);	\ -	.pushsection __ex_table,"a";	\ -	.quad 1b, get_user_fault;	\ -	.popsection - -	.pushsection .fixup,"ax" -get_user_fault: -	{ movei r1, -EFAULT; jrp lr } -	ENDPROC(get_user_fault) -	.popsection - -FUTEX_OP(cmpxchg, mtspr CMPEXCH_VALUE, r1; 1: cmpexch4 r0, r0, r2) -FUTEX_OP(set, 1: exch4 r0, r0, r1) -FUTEX_OP(add, 1: fetchadd4 r0, r0, r1) -FUTEX_OP(or, 1: fetchor4 r0, r0, r1) -FUTEX_OP(andn, nor r1, r1, zero; 1: fetchand4 r0, r0, r1) diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index df27a1fd94a..531f4c36535 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c @@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {  		0,  		"udn",  		LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), -		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), +		__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),  		NULL  	},  #ifndef __tilepro__ @@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {  		1,  /* disabled pending hypervisor support */  		"idn",  		LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), -		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), +		__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),  		NULL  	},  	{  /* access to user-space IPI */ @@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {  		0,  		"ipi",  		LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), -		__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), +		__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),  		NULL  	},  #endif diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 088d5c141e6..cdbda45a4e4 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -313,13 +313,13 @@ intvec_\vecname:  	 movei  r3, 0  	}  	.else -	.ifc \c_routine, op_handle_perf_interrupt +	.ifc \c_routine, handle_perf_interrupt  	{  	 mfspr  r2, PERF_COUNT_STS  	 movei  r3, -1   /* not used, but set for consistency */  	}  	.else -	.ifc \c_routine, op_handle_aux_perf_interrupt +	.ifc \c_routine, handle_perf_interrupt  	{  	 mfspr  r2, AUX_PERF_COUNT_STS  	 movei  r3, -1   /* not used, but set for consistency */ @@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)  	}  	bzt     r28, 1f  	bnz     r29, 1f +	/* Disable interrupts explicitly for preemption. */ +	IRQ_DISABLE(r20,r21) +	TRACE_IRQS_OFF  	jal     preempt_schedule_irq  	FEEDBACK_REENTER(interrupt_return)  1: @@ -943,6 +946,13 @@ STD_ENTRY(interrupt_return)  	bzt     r30, .Lrestore_regs  3: +	/* We are relying on INT_PERF_COUNT at 33, and AUX_PERF_COUNT at 48 */ +	{ +	 moveli r0, lo16(1 << (INT_PERF_COUNT - 32)) +	 bz     r31, .Lrestore_regs +	} +	auli    r0, r0, ha16(1 << (INT_AUX_PERF_COUNT - 32)) +	mtspr   SPR_INTERRUPT_MASK_RESET_K_1, r0  	/*  	 * We now commit to returning from this interrupt, since we will be @@ -1168,6 +1178,10 @@ handle_nmi:  	 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)  	}  	FEEDBACK_REENTER(handle_nmi) +	{ +	 movei  r30, 1 +	 seq    r31, r0, zero +	}  	j       interrupt_return  	STD_ENDPROC(handle_nmi) @@ -1832,8 +1846,9 @@ int_unalign:  /* Include .intrpt array of interrupt vectors */  	.section ".intrpt", "ax" -#define op_handle_perf_interrupt bad_intr -#define op_handle_aux_perf_interrupt bad_intr +#ifndef CONFIG_USE_PMC +#define handle_perf_interrupt bad_intr +#endif  #ifndef CONFIG_HARDWALL  #define do_hardwall_trap bad_intr @@ -1874,7 +1889,7 @@ int_unalign:  	int_hand     INT_IDN_AVAIL, IDN_AVAIL, bad_intr  	int_hand     INT_UDN_AVAIL, UDN_AVAIL, bad_intr  	int_hand     INT_PERF_COUNT, PERF_COUNT, \ -		     op_handle_perf_interrupt, handle_nmi +		     handle_perf_interrupt, handle_nmi  	int_hand     INT_INTCTRL_3, INTCTRL_3, bad_intr  #if CONFIG_KERNEL_PL == 2  	dc_dispatch  INT_INTCTRL_2, INTCTRL_2 @@ -1899,7 +1914,7 @@ int_unalign:  	int_hand     INT_SN_CPL, SN_CPL, bad_intr  	int_hand     INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap  	int_hand     INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ -		     op_handle_aux_perf_interrupt, handle_nmi +		     handle_perf_interrupt, handle_nmi  	/* Synthetic interrupt delivered only by the simulator */  	int_hand     INT_BREAKPOINT, BREAKPOINT, do_breakpoint diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index ec755d3f373..5b67efcecab 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -509,10 +509,10 @@ intvec_\vecname:  	.ifc \c_routine, do_trap  	mfspr   r2, GPV_REASON  	.else -	.ifc \c_routine, op_handle_perf_interrupt +	.ifc \c_routine, handle_perf_interrupt  	mfspr   r2, PERF_COUNT_STS  	.else -	.ifc \c_routine, op_handle_aux_perf_interrupt +	.ifc \c_routine, handle_perf_interrupt  	mfspr   r2, AUX_PERF_COUNT_STS  	.endif  	.endif @@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)  	}  	beqzt   r28, 1f  	bnez    r29, 1f +	/* Disable interrupts explicitly for preemption. */ +	IRQ_DISABLE(r20,r21) +	TRACE_IRQS_OFF  	jal     preempt_schedule_irq  	FEEDBACK_REENTER(interrupt_return)  1: @@ -968,6 +971,15 @@ STD_ENTRY(interrupt_return)  	beqzt   r30, .Lrestore_regs  3: +#if INT_PERF_COUNT + 1 != INT_AUX_PERF_COUNT +# error Bad interrupt assumption +#endif +	{ +	 movei  r0, 3   /* two adjacent bits for the PERF_COUNT mask */ +	 beqz   r31, .Lrestore_regs +	} +	shli    r0, r0, INT_PERF_COUNT +	mtspr   SPR_INTERRUPT_MASK_RESET_K, r0  	/*  	 * We now commit to returning from this interrupt, since we will be @@ -1184,7 +1196,7 @@ handle_nmi:  	FEEDBACK_REENTER(handle_nmi)  	{  	 movei  r30, 1 -	 move   r31, r0 +	 cmpeq  r31, r0, zero  	}  	j       interrupt_return  	STD_ENDPROC(handle_nmi) @@ -1488,8 +1500,9 @@ STD_ENTRY(fill_ra_stack)  	.global intrpt_start  intrpt_start: -#define op_handle_perf_interrupt bad_intr -#define op_handle_aux_perf_interrupt bad_intr +#ifndef CONFIG_USE_PMC +#define handle_perf_interrupt bad_intr +#endif  #ifndef CONFIG_HARDWALL  #define do_hardwall_trap bad_intr @@ -1537,9 +1550,9 @@ intrpt_start:  #endif  	int_hand     INT_IPI_0, IPI_0, bad_intr  	int_hand     INT_PERF_COUNT, PERF_COUNT, \ -		     op_handle_perf_interrupt, handle_nmi +		     handle_perf_interrupt, handle_nmi  	int_hand     INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ -		     op_handle_perf_interrupt, handle_nmi +		     handle_perf_interrupt, handle_nmi  	int_hand     INT_INTCTRL_3, INTCTRL_3, bad_intr  #if CONFIG_KERNEL_PL == 2  	dc_dispatch  INT_INTCTRL_2, INTCTRL_2 diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 0586fdb9352..637f2ffaa5f 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -21,6 +21,7 @@  #include <hv/drv_pcie_rc_intf.h>  #include <arch/spr_def.h>  #include <asm/traps.h> +#include <linux/perf_event.h>  /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */  #define IS_HW_CLEARED 1 @@ -53,13 +54,6 @@ static DEFINE_PER_CPU(unsigned long, irq_disable_mask)   */  static DEFINE_PER_CPU(int, irq_depth); -/* State for allocating IRQs on Gx. */ -#if CHIP_HAS_IPI() -static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) & -				      (~(1UL << IRQ_RESCHEDULE)); -static DEFINE_SPINLOCK(available_irqs_lock); -#endif -  #if CHIP_HAS_IPI()  /* Use SPRs to manipulate device interrupts. */  #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) @@ -261,37 +255,27 @@ void ack_bad_irq(unsigned int irq)  }  /* - * Generic, controller-independent functions: + * /proc/interrupts printing:   */ - -#if CHIP_HAS_IPI() -int create_irq(void) +int arch_show_interrupts(struct seq_file *p, int prec)  { -	unsigned long flags; -	int result; - -	spin_lock_irqsave(&available_irqs_lock, flags); -	if (available_irqs == 0) -		result = -ENOMEM; -	else { -		result = __ffs(available_irqs); -		available_irqs &= ~(1UL << result); -		dynamic_irq_init(result); -	} -	spin_unlock_irqrestore(&available_irqs_lock, flags); +#ifdef CONFIG_PERF_EVENTS +	int i; -	return result; +	seq_printf(p, "%*s: ", prec, "PMI"); + +	for_each_online_cpu(i) +		seq_printf(p, "%10llu ", per_cpu(perf_irqs, i)); +	seq_puts(p, "  perf_events\n"); +#endif +	return 0;  } -EXPORT_SYMBOL(create_irq); -void destroy_irq(unsigned int irq) +#if CHIP_HAS_IPI() +int arch_setup_hwirq(unsigned int irq, int node)  { -	unsigned long flags; - -	spin_lock_irqsave(&available_irqs_lock, flags); -	available_irqs |= (1UL << irq); -	dynamic_irq_cleanup(irq); -	spin_unlock_irqrestore(&available_irqs_lock, flags); +	return irq >= NR_IRQS ? -EINVAL : 0;  } -EXPORT_SYMBOL(destroy_irq); + +void arch_teardown_hwirq(unsigned int irq) { }  #endif diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c index 00331af9525..7867266f971 100644 --- a/arch/tile/kernel/messaging.c +++ b/arch/tile/kernel/messaging.c @@ -68,8 +68,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum)  #endif  	while (1) { -		rmi = hv_receive_message(__get_cpu_var(msg_state), -					 (HV_VirtAddr) message, +		HV_MsgState *state = this_cpu_ptr(&msg_state); +		rmi = hv_receive_message(*state, (HV_VirtAddr) message,  					 sizeof(message));  		if (rmi.msglen == 0)  			break; diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index b7180e6e900..1f80a88c75a 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -250,16 +250,11 @@ static void fixup_read_and_payload_sizes(void)  	/* Scan for the smallest maximum payload size. */  	for_each_pci_dev(dev) { -		u32 devcap; -		int max_payload; -  		if (!pci_is_pcie(dev))  			continue; -		pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &devcap); -		max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; -		if (max_payload < smallest_max_payload) -			smallest_max_payload = max_payload; +		if (dev->pcie_mpss < smallest_max_payload) +			smallest_max_payload = dev->pcie_mpss;  	}  	/* Now, set the max_payload_size for all devices to that value. */ diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index a97a6452b81..e39f9c54280 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -350,10 +350,9 @@ static int tile_init_irqs(struct pci_controller *controller)  		int cpu;  		/* Ask the kernel to allocate an IRQ. */ -		irq = create_irq(); -		if (irq < 0) { +		irq = irq_alloc_hwirq(-1); +		if (!irq) {  			pr_err("PCI: no free irq vectors, failed for %d\n", i); -  			goto free_irqs;  		}  		controller->irq_intx_table[i] = irq; @@ -382,7 +381,7 @@ static int tile_init_irqs(struct pci_controller *controller)  free_irqs:  	for (j = 0; j < i; j++) -		destroy_irq(controller->irq_intx_table[j]); +		irq_free_hwirq(controller->irq_intx_table[j]);  	return -1;  } @@ -1065,18 +1064,6 @@ char *__init pcibios_setup(char *str)  }  /* - * Enable memory address decoding, as appropriate, for the - * device described by the 'dev' struct. - * - * This is called from the generic PCI layer, and can be called - * for bridges or endpoints. - */ -int pcibios_enable_device(struct pci_dev *dev, int mask) -{ -	return pci_enable_resources(dev, mask); -} - -/*   * Called for each device after PCI setup is done.   * We initialize the PCI device capabilities conservatively, assuming that   * all devices can only address the 32-bit DMA space. The exception here is @@ -1512,9 +1499,9 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)  	int irq;  	int ret; -	irq = create_irq(); -	if (irq < 0) -		return irq; +	irq = irq_alloc_hwirq(-1); +	if (!irq) +		return -ENOSPC;  	/*  	 * Since we use a 64-bit Mem-Map to accept the MSI write, we fail @@ -1613,11 +1600,11 @@ hv_msi_config_failure:  	/* Free mem-map */  msi_mem_map_alloc_failure:  is_64_failure: -	destroy_irq(irq); +	irq_free_hwirq(irq);  	return ret;  }  void arch_teardown_msi_irq(unsigned int irq)  { -	destroy_irq(irq); +	irq_free_hwirq(irq);  } diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c new file mode 100644 index 00000000000..2bf6c9c135c --- /dev/null +++ b/arch/tile/kernel/perf_event.c @@ -0,0 +1,1005 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + *   This program is free software; you can redistribute it and/or + *   modify it under the terms of the GNU General Public License + *   as published by the Free Software Foundation, version 2. + * + *   This program is distributed in the hope that it will be useful, but + *   WITHOUT ANY WARRANTY; without even the implied warranty of + *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + *   NON INFRINGEMENT.  See the GNU General Public License for + *   more details. + * + * + * Perf_events support for Tile processor. + * + * This code is based upon the x86 perf event + * code, which is: + * + *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + *  Copyright (C) 2009 Jaswinder Singh Rajput + *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> + *  Copyright (C) 2009 Google, Inc., Stephane Eranian + */ + +#include <linux/kprobes.h> +#include <linux/kernel.h> +#include <linux/kdebug.h> +#include <linux/mutex.h> +#include <linux/bitmap.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/perf_event.h> +#include <linux/atomic.h> +#include <asm/traps.h> +#include <asm/stack.h> +#include <asm/pmc.h> +#include <hv/hypervisor.h> + +#define TILE_MAX_COUNTERS	4 + +#define PERF_COUNT_0_IDX	0 +#define PERF_COUNT_1_IDX	1 +#define AUX_PERF_COUNT_0_IDX	2 +#define AUX_PERF_COUNT_1_IDX	3 + +struct cpu_hw_events { +	int			n_events; +	struct perf_event	*events[TILE_MAX_COUNTERS]; /* counter order */ +	struct perf_event	*event_list[TILE_MAX_COUNTERS]; /* enabled +								order */ +	int			assign[TILE_MAX_COUNTERS]; +	unsigned long		active_mask[BITS_TO_LONGS(TILE_MAX_COUNTERS)]; +	unsigned long		used_mask; +}; + +/* TILE arch specific performance monitor unit */ +struct tile_pmu { +	const char	*name; +	int		version; +	const int	*hw_events;	/* generic hw events table */ +	/* generic hw cache events table */ +	const int	(*cache_events)[PERF_COUNT_HW_CACHE_MAX] +				       [PERF_COUNT_HW_CACHE_OP_MAX] +				       [PERF_COUNT_HW_CACHE_RESULT_MAX]; +	int		(*map_hw_event)(u64);	 /*method used to map +						  hw events */ +	int		(*map_cache_event)(u64); /*method used to map +						  cache events */ + +	u64		max_period;		/* max sampling period */ +	u64		cntval_mask;		/* counter width mask */ +	int		cntval_bits;		/* counter width */ +	int		max_events;		/* max generic hw events +						in map */ +	int		num_counters;		/* number base + aux counters */ +	int		num_base_counters;	/* number base counters */ +}; + +DEFINE_PER_CPU(u64, perf_irqs); +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +#define TILE_OP_UNSUPP		(-1) + +#ifndef __tilegx__ +/* TILEPro hardware events map */ +static const int tile_hw_event_map[] = { +	[PERF_COUNT_HW_CPU_CYCLES]		= 0x01, /* ONE */ +	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x06, /* MP_BUNDLE_RETIRED */ +	[PERF_COUNT_HW_CACHE_REFERENCES]	= TILE_OP_UNSUPP, +	[PERF_COUNT_HW_CACHE_MISSES]		= TILE_OP_UNSUPP, +	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x16, /* +					  MP_CONDITIONAL_BRANCH_ISSUED */ +	[PERF_COUNT_HW_BRANCH_MISSES]		= 0x14, /* +					  MP_CONDITIONAL_BRANCH_MISSPREDICT */ +	[PERF_COUNT_HW_BUS_CYCLES]		= TILE_OP_UNSUPP, +}; +#else +/* TILEGx hardware events map */ +static const int tile_hw_event_map[] = { +	[PERF_COUNT_HW_CPU_CYCLES]		= 0x181, /* ONE */ +	[PERF_COUNT_HW_INSTRUCTIONS]		= 0xdb, /* INSTRUCTION_BUNDLE */ +	[PERF_COUNT_HW_CACHE_REFERENCES]	= TILE_OP_UNSUPP, +	[PERF_COUNT_HW_CACHE_MISSES]		= TILE_OP_UNSUPP, +	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0xd9, /* +						COND_BRANCH_PRED_CORRECT */ +	[PERF_COUNT_HW_BRANCH_MISSES]		= 0xda, /* +						COND_BRANCH_PRED_INCORRECT */ +	[PERF_COUNT_HW_BUS_CYCLES]		= TILE_OP_UNSUPP, +}; +#endif + +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Generalized hw caching related hw_event table, filled + * in on a per model basis. A value of -1 means + * 'not supported', any other value means the + * raw hw_event ID. + */ +#ifndef __tilegx__ +/* TILEPro hardware cache event map */ +static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX] +				     [PERF_COUNT_HW_CACHE_OP_MAX] +				     [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0x21, /* RD_MISS */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0x22, /* WR_MISS */ +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(L1I)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = 0x12, /* MP_ICACHE_HIT_ISSUED */ +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(LL)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(DTLB)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = 0x1d, /* TLB_CNT */ +		[C(RESULT_MISS)] = 0x20, /* TLB_EXCEPTION */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(ITLB)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = 0x13, /* MP_ITLB_HIT_ISSUED */ +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(BPU)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +}; +#else +/* TILEGx hardware events map */ +static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX] +				     [PERF_COUNT_HW_CACHE_OP_MAX] +				     [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { +	/* +	 * Like some other architectures (e.g. ARM), the performance +	 * counters don't differentiate between read and write +	 * accesses/misses, so this isn't strictly correct, but it's the +	 * best we can do. Writes and reads get combined. +	 */ +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0x44, /* RD_MISS */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0x45, /* WR_MISS */ +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(L1I)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(LL)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(DTLB)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */ +		[C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */ +		[C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */ +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(ITLB)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */ +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */ +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +[C(BPU)] = { +	[C(OP_READ)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_WRITE)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +	[C(OP_PREFETCH)] = { +		[C(RESULT_ACCESS)] = TILE_OP_UNSUPP, +		[C(RESULT_MISS)] = TILE_OP_UNSUPP, +	}, +}, +}; +#endif + +static atomic_t tile_active_events; +static DEFINE_MUTEX(perf_intr_reserve_mutex); + +static int tile_map_hw_event(u64 config); +static int tile_map_cache_event(u64 config); + +static int tile_pmu_handle_irq(struct pt_regs *regs, int fault); + +/* + * To avoid new_raw_count getting larger then pre_raw_count + * in tile_perf_event_update(), we limit the value of max_period to 2^31 - 1. + */ +static const struct tile_pmu tilepmu = { +#ifndef __tilegx__ +	.name = "tilepro", +#else +	.name = "tilegx", +#endif +	.max_events = ARRAY_SIZE(tile_hw_event_map), +	.map_hw_event = tile_map_hw_event, +	.hw_events = tile_hw_event_map, +	.map_cache_event = tile_map_cache_event, +	.cache_events = &tile_cache_event_map, +	.cntval_bits = 32, +	.cntval_mask = (1ULL << 32) - 1, +	.max_period = (1ULL << 31) - 1, +	.num_counters = TILE_MAX_COUNTERS, +	.num_base_counters = TILE_BASE_COUNTERS, +}; + +static const struct tile_pmu *tile_pmu __read_mostly; + +/* + * Check whether perf event is enabled. + */ +int tile_perf_enabled(void) +{ +	return atomic_read(&tile_active_events) != 0; +} + +/* + * Read Performance Counters. + */ +static inline u64 read_counter(int idx) +{ +	u64 val = 0; + +	/* __insn_mfspr() only takes an immediate argument */ +	switch (idx) { +	case PERF_COUNT_0_IDX: +		val = __insn_mfspr(SPR_PERF_COUNT_0); +		break; +	case PERF_COUNT_1_IDX: +		val = __insn_mfspr(SPR_PERF_COUNT_1); +		break; +	case AUX_PERF_COUNT_0_IDX: +		val = __insn_mfspr(SPR_AUX_PERF_COUNT_0); +		break; +	case AUX_PERF_COUNT_1_IDX: +		val = __insn_mfspr(SPR_AUX_PERF_COUNT_1); +		break; +	default: +		WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX || +				idx < PERF_COUNT_0_IDX); +	} + +	return val; +} + +/* + * Write Performance Counters. + */ +static inline void write_counter(int idx, u64 value) +{ +	/* __insn_mtspr() only takes an immediate argument */ +	switch (idx) { +	case PERF_COUNT_0_IDX: +		__insn_mtspr(SPR_PERF_COUNT_0, value); +		break; +	case PERF_COUNT_1_IDX: +		__insn_mtspr(SPR_PERF_COUNT_1, value); +		break; +	case AUX_PERF_COUNT_0_IDX: +		__insn_mtspr(SPR_AUX_PERF_COUNT_0, value); +		break; +	case AUX_PERF_COUNT_1_IDX: +		__insn_mtspr(SPR_AUX_PERF_COUNT_1, value); +		break; +	default: +		WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX || +				idx < PERF_COUNT_0_IDX); +	} +} + +/* + * Enable performance event by setting + * Performance Counter Control registers. + */ +static inline void tile_pmu_enable_event(struct perf_event *event) +{ +	struct hw_perf_event *hwc = &event->hw; +	unsigned long cfg, mask; +	int shift, idx = hwc->idx; + +	/* +	 * prevent early activation from tile_pmu_start() in hw_perf_enable +	 */ + +	if (WARN_ON_ONCE(idx == -1)) +		return; + +	if (idx < tile_pmu->num_base_counters) +		cfg = __insn_mfspr(SPR_PERF_COUNT_CTL); +	else +		cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL); + +	switch (idx) { +	case PERF_COUNT_0_IDX: +	case AUX_PERF_COUNT_0_IDX: +		mask = TILE_EVENT_MASK; +		shift = 0; +		break; +	case PERF_COUNT_1_IDX: +	case AUX_PERF_COUNT_1_IDX: +		mask = TILE_EVENT_MASK << 16; +		shift = 16; +		break; +	default: +		WARN_ON_ONCE(idx < PERF_COUNT_0_IDX || +			idx > AUX_PERF_COUNT_1_IDX); +		return; +	} + +	/* Clear mask bits to enable the event. */ +	cfg &= ~mask; +	cfg |= hwc->config << shift; + +	if (idx < tile_pmu->num_base_counters) +		__insn_mtspr(SPR_PERF_COUNT_CTL, cfg); +	else +		__insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg); +} + +/* + * Disable performance event by clearing + * Performance Counter Control registers. + */ +static inline void tile_pmu_disable_event(struct perf_event *event) +{ +	struct hw_perf_event *hwc = &event->hw; +	unsigned long cfg, mask; +	int idx = hwc->idx; + +	if (idx == -1) +		return; + +	if (idx < tile_pmu->num_base_counters) +		cfg = __insn_mfspr(SPR_PERF_COUNT_CTL); +	else +		cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL); + +	switch (idx) { +	case PERF_COUNT_0_IDX: +	case AUX_PERF_COUNT_0_IDX: +		mask = TILE_PLM_MASK; +		break; +	case PERF_COUNT_1_IDX: +	case AUX_PERF_COUNT_1_IDX: +		mask = TILE_PLM_MASK << 16; +		break; +	default: +		WARN_ON_ONCE(idx < PERF_COUNT_0_IDX || +			idx > AUX_PERF_COUNT_1_IDX); +		return; +	} + +	/* Set mask bits to disable the event. */ +	cfg |= mask; + +	if (idx < tile_pmu->num_base_counters) +		__insn_mtspr(SPR_PERF_COUNT_CTL, cfg); +	else +		__insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg); +} + +/* + * Propagate event elapsed time into the generic event. + * Can only be executed on the CPU where the event is active. + * Returns the delta events processed. + */ +static u64 tile_perf_event_update(struct perf_event *event) +{ +	struct hw_perf_event *hwc = &event->hw; +	int shift = 64 - tile_pmu->cntval_bits; +	u64 prev_raw_count, new_raw_count; +	u64 oldval; +	int idx = hwc->idx; +	u64 delta; + +	/* +	 * Careful: an NMI might modify the previous event value. +	 * +	 * Our tactic to handle this is to first atomically read and +	 * exchange a new raw count - then add that new-prev delta +	 * count to the generic event atomically: +	 */ +again: +	prev_raw_count = local64_read(&hwc->prev_count); +	new_raw_count = read_counter(idx); + +	oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, +				 new_raw_count); +	if (oldval != prev_raw_count) +		goto again; + +	/* +	 * Now we have the new raw value and have updated the prev +	 * timestamp already. We can now calculate the elapsed delta +	 * (event-)time and add that to the generic event. +	 * +	 * Careful, not all hw sign-extends above the physical width +	 * of the count. +	 */ +	delta = (new_raw_count << shift) - (prev_raw_count << shift); +	delta >>= shift; + +	local64_add(delta, &event->count); +	local64_sub(delta, &hwc->period_left); + +	return new_raw_count; +} + +/* + * Set the next IRQ period, based on the hwc->period_left value. + * To be called with the event disabled in hw: + */ +static int tile_event_set_period(struct perf_event *event) +{ +	struct hw_perf_event *hwc = &event->hw; +	int idx = hwc->idx; +	s64 left = local64_read(&hwc->period_left); +	s64 period = hwc->sample_period; +	int ret = 0; + +	/* +	 * If we are way outside a reasonable range then just skip forward: +	 */ +	if (unlikely(left <= -period)) { +		left = period; +		local64_set(&hwc->period_left, left); +		hwc->last_period = period; +		ret = 1; +	} + +	if (unlikely(left <= 0)) { +		left += period; +		local64_set(&hwc->period_left, left); +		hwc->last_period = period; +		ret = 1; +	} +	if (left > tile_pmu->max_period) +		left = tile_pmu->max_period; + +	/* +	 * The hw event starts counting from this event offset, +	 * mark it to be able to extra future deltas: +	 */ +	local64_set(&hwc->prev_count, (u64)-left); + +	write_counter(idx, (u64)(-left) & tile_pmu->cntval_mask); + +	perf_event_update_userpage(event); + +	return ret; +} + +/* + * Stop the event but do not release the PMU counter + */ +static void tile_pmu_stop(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	struct hw_perf_event *hwc = &event->hw; +	int idx = hwc->idx; + +	if (__test_and_clear_bit(idx, cpuc->active_mask)) { +		tile_pmu_disable_event(event); +		cpuc->events[hwc->idx] = NULL; +		WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); +		hwc->state |= PERF_HES_STOPPED; +	} + +	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { +		/* +		 * Drain the remaining delta count out of a event +		 * that we are disabling: +		 */ +		tile_perf_event_update(event); +		hwc->state |= PERF_HES_UPTODATE; +	} +} + +/* + * Start an event (without re-assigning counter) + */ +static void tile_pmu_start(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	int idx = event->hw.idx; + +	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) +		return; + +	if (WARN_ON_ONCE(idx == -1)) +		return; + +	if (flags & PERF_EF_RELOAD) { +		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); +		tile_event_set_period(event); +	} + +	event->hw.state = 0; + +	cpuc->events[idx] = event; +	__set_bit(idx, cpuc->active_mask); + +	unmask_pmc_interrupts(); + +	tile_pmu_enable_event(event); + +	perf_event_update_userpage(event); +} + +/* + * Add a single event to the PMU. + * + * The event is added to the group of enabled events + * but only if it can be scehduled with existing events. + */ +static int tile_pmu_add(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	struct hw_perf_event *hwc; +	unsigned long mask; +	int b, max_cnt; + +	hwc = &event->hw; + +	/* +	 * We are full. +	 */ +	if (cpuc->n_events == tile_pmu->num_counters) +		return -ENOSPC; + +	cpuc->event_list[cpuc->n_events] = event; +	cpuc->n_events++; + +	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; +	if (!(flags & PERF_EF_START)) +		hwc->state |= PERF_HES_ARCH; + +	/* +	 * Find first empty counter. +	 */ +	max_cnt = tile_pmu->num_counters; +	mask = ~cpuc->used_mask; + +	/* Find next free counter. */ +	b = find_next_bit(&mask, max_cnt, 0); + +	/* Should not happen. */ +	if (WARN_ON_ONCE(b == max_cnt)) +		return -ENOSPC; + +	/* +	 * Assign counter to event. +	 */ +	event->hw.idx = b; +	__set_bit(b, &cpuc->used_mask); + +	/* +	 * Start if requested. +	 */ +	if (flags & PERF_EF_START) +		tile_pmu_start(event, PERF_EF_RELOAD); + +	return 0; +} + +/* + * Delete a single event from the PMU. + * + * The event is deleted from the group of enabled events. + * If it is the last event, disable PMU interrupt. + */ +static void tile_pmu_del(struct perf_event *event, int flags) +{ +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	int i; + +	/* +	 * Remove event from list, compact list if necessary. +	 */ +	for (i = 0; i < cpuc->n_events; i++) { +		if (cpuc->event_list[i] == event) { +			while (++i < cpuc->n_events) +				cpuc->event_list[i-1] = cpuc->event_list[i]; +			--cpuc->n_events; +			cpuc->events[event->hw.idx] = NULL; +			__clear_bit(event->hw.idx, &cpuc->used_mask); +			tile_pmu_stop(event, PERF_EF_UPDATE); +			break; +		} +	} +	/* +	 * If there are no events left, then mask PMU interrupt. +	 */ +	if (cpuc->n_events == 0) +		mask_pmc_interrupts(); +	perf_event_update_userpage(event); +} + +/* + * Propagate event elapsed time into the event. + */ +static inline void tile_pmu_read(struct perf_event *event) +{ +	tile_perf_event_update(event); +} + +/* + * Map generic events to Tile PMU. + */ +static int tile_map_hw_event(u64 config) +{ +	if (config >= tile_pmu->max_events) +		return -EINVAL; +	return tile_pmu->hw_events[config]; +} + +/* + * Map generic hardware cache events to Tile PMU. + */ +static int tile_map_cache_event(u64 config) +{ +	unsigned int cache_type, cache_op, cache_result; +	int code; + +	if (!tile_pmu->cache_events) +		return -ENOENT; + +	cache_type = (config >>  0) & 0xff; +	if (cache_type >= PERF_COUNT_HW_CACHE_MAX) +		return -EINVAL; + +	cache_op = (config >>  8) & 0xff; +	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) +		return -EINVAL; + +	cache_result = (config >> 16) & 0xff; +	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) +		return -EINVAL; + +	code = (*tile_pmu->cache_events)[cache_type][cache_op][cache_result]; +	if (code == TILE_OP_UNSUPP) +		return -EINVAL; + +	return code; +} + +static void tile_event_destroy(struct perf_event *event) +{ +	if (atomic_dec_return(&tile_active_events) == 0) +		release_pmc_hardware(); +} + +static int __tile_event_init(struct perf_event *event) +{ +	struct perf_event_attr *attr = &event->attr; +	struct hw_perf_event *hwc = &event->hw; +	int code; + +	switch (attr->type) { +	case PERF_TYPE_HARDWARE: +		code = tile_pmu->map_hw_event(attr->config); +		break; +	case PERF_TYPE_HW_CACHE: +		code = tile_pmu->map_cache_event(attr->config); +		break; +	case PERF_TYPE_RAW: +		code = attr->config & TILE_EVENT_MASK; +		break; +	default: +		/* Should not happen. */ +		return -EOPNOTSUPP; +	} + +	if (code < 0) +		return code; + +	hwc->config = code; +	hwc->idx = -1; + +	if (attr->exclude_user) +		hwc->config |= TILE_CTL_EXCL_USER; + +	if (attr->exclude_kernel) +		hwc->config |= TILE_CTL_EXCL_KERNEL; + +	if (attr->exclude_hv) +		hwc->config |= TILE_CTL_EXCL_HV; + +	if (!hwc->sample_period) { +		hwc->sample_period = tile_pmu->max_period; +		hwc->last_period = hwc->sample_period; +		local64_set(&hwc->period_left, hwc->sample_period); +	} +	event->destroy = tile_event_destroy; +	return 0; +} + +static int tile_event_init(struct perf_event *event) +{ +	int err = 0; +	perf_irq_t old_irq_handler = NULL; + +	if (atomic_inc_return(&tile_active_events) == 1) +		old_irq_handler = reserve_pmc_hardware(tile_pmu_handle_irq); + +	if (old_irq_handler) { +		pr_warn("PMC hardware busy (reserved by oprofile)\n"); + +		atomic_dec(&tile_active_events); +		return -EBUSY; +	} + +	switch (event->attr.type) { +	case PERF_TYPE_RAW: +	case PERF_TYPE_HARDWARE: +	case PERF_TYPE_HW_CACHE: +		break; + +	default: +		return -ENOENT; +	} + +	err = __tile_event_init(event); +	if (err) { +		if (event->destroy) +			event->destroy(event); +	} +	return err; +} + +static struct pmu tilera_pmu = { +	.event_init	= tile_event_init, +	.add		= tile_pmu_add, +	.del		= tile_pmu_del, + +	.start		= tile_pmu_start, +	.stop		= tile_pmu_stop, + +	.read		= tile_pmu_read, +}; + +/* + * PMU's IRQ handler, PMU has 2 interrupts, they share the same handler. + */ +int tile_pmu_handle_irq(struct pt_regs *regs, int fault) +{ +	struct perf_sample_data data; +	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); +	struct perf_event *event; +	struct hw_perf_event *hwc; +	u64 val; +	unsigned long status; +	int bit; + +	__get_cpu_var(perf_irqs)++; + +	if (!atomic_read(&tile_active_events)) +		return 0; + +	status = pmc_get_overflow(); +	pmc_ack_overflow(status); + +	for_each_set_bit(bit, &status, tile_pmu->num_counters) { + +		event = cpuc->events[bit]; + +		if (!event) +			continue; + +		if (!test_bit(bit, cpuc->active_mask)) +			continue; + +		hwc = &event->hw; + +		val = tile_perf_event_update(event); +		if (val & (1ULL << (tile_pmu->cntval_bits - 1))) +			continue; + +		perf_sample_data_init(&data, 0, event->hw.last_period); +		if (!tile_event_set_period(event)) +			continue; + +		if (perf_event_overflow(event, &data, regs)) +			tile_pmu_stop(event, 0); +	} + +	return 0; +} + +static bool __init supported_pmu(void) +{ +	tile_pmu = &tilepmu; +	return true; +} + +int __init init_hw_perf_events(void) +{ +	supported_pmu(); +	perf_pmu_register(&tilera_pmu, "cpu", PERF_TYPE_RAW); +	return 0; +} +arch_initcall(init_hw_perf_events); + +/* Callchain handling code. */ + +/* + * Tile specific backtracing code for perf_events. + */ +static inline void perf_callchain(struct perf_callchain_entry *entry, +		    struct pt_regs *regs) +{ +	struct KBacktraceIterator kbt; +	unsigned int i; + +	/* +	 * Get the address just after the "jalr" instruction that +	 * jumps to the handler for a syscall.  When we find this +	 * address in a backtrace, we silently ignore it, which gives +	 * us a one-step backtrace connection from the sys_xxx() +	 * function in the kernel to the xxx() function in libc. +	 * Otherwise, we lose the ability to properly attribute time +	 * from the libc calls to the kernel implementations, since +	 * oprofile only considers PCs from backtraces a pair at a time. +	 */ +	unsigned long handle_syscall_pc = handle_syscall_link_address(); + +	KBacktraceIterator_init(&kbt, NULL, regs); +	kbt.profile = 1; + +	/* +	 * The sample for the pc is already recorded.  Now we are adding the +	 * address of the callsites on the stack.  Our iterator starts +	 * with the frame of the (already sampled) call site.  If our +	 * iterator contained a "return address" field, we could have just +	 * used it and wouldn't have needed to skip the first +	 * frame.  That's in effect what the arm and x86 versions do. +	 * Instead we peel off the first iteration to get the equivalent +	 * behavior. +	 */ + +	if (KBacktraceIterator_end(&kbt)) +		return; +	KBacktraceIterator_next(&kbt); + +	/* +	 * Set stack depth to 16 for user and kernel space respectively, that +	 * is, total 32 stack frames. +	 */ +	for (i = 0; i < 16; ++i) { +		unsigned long pc; +		if (KBacktraceIterator_end(&kbt)) +			break; +		pc = kbt.it.pc; +		if (pc != handle_syscall_pc) +			perf_callchain_store(entry, pc); +		KBacktraceIterator_next(&kbt); +	} +} + +void perf_callchain_user(struct perf_callchain_entry *entry, +		    struct pt_regs *regs) +{ +	perf_callchain(entry, regs); +} + +void perf_callchain_kernel(struct perf_callchain_entry *entry, +		      struct pt_regs *regs) +{ +	perf_callchain(entry, regs); +} diff --git a/arch/tile/kernel/pmc.c b/arch/tile/kernel/pmc.c new file mode 100644 index 00000000000..db62cc34b95 --- /dev/null +++ b/arch/tile/kernel/pmc.c @@ -0,0 +1,121 @@ +/* + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * + *   This program is free software; you can redistribute it and/or + *   modify it under the terms of the GNU General Public License + *   as published by the Free Software Foundation, version 2. + * + *   This program is distributed in the hope that it will be useful, but + *   WITHOUT ANY WARRANTY; without even the implied warranty of + *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + *   NON INFRINGEMENT.  See the GNU General Public License for + *   more details. + */ + +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/atomic.h> +#include <linux/interrupt.h> + +#include <asm/processor.h> +#include <asm/pmc.h> + +perf_irq_t perf_irq = NULL; +int handle_perf_interrupt(struct pt_regs *regs, int fault) +{ +	int retval; + +	if (!perf_irq) +		panic("Unexpected PERF_COUNT interrupt %d\n", fault); + +	nmi_enter(); +	retval = perf_irq(regs, fault); +	nmi_exit(); +	return retval; +} + +/* Reserve PMC hardware if it is available. */ +perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq) +{ +	return cmpxchg(&perf_irq, NULL, new_perf_irq); +} +EXPORT_SYMBOL(reserve_pmc_hardware); + +/* Release PMC hardware. */ +void release_pmc_hardware(void) +{ +	perf_irq = NULL; +} +EXPORT_SYMBOL(release_pmc_hardware); + + +/* + * Get current overflow status of each performance counter, + * and auxiliary performance counter. + */ +unsigned long +pmc_get_overflow(void) +{ +	unsigned long status; + +	/* +	 * merge base+aux into a single vector +	 */ +	status = __insn_mfspr(SPR_PERF_COUNT_STS); +	status |= __insn_mfspr(SPR_AUX_PERF_COUNT_STS) << TILE_BASE_COUNTERS; +	return status; +} + +/* + * Clear the status bit for the corresponding counter, if written + * with a one. + */ +void +pmc_ack_overflow(unsigned long status) +{ +	/* +	 * clear overflow status by writing ones +	 */ +	__insn_mtspr(SPR_PERF_COUNT_STS, status); +	__insn_mtspr(SPR_AUX_PERF_COUNT_STS, status >> TILE_BASE_COUNTERS); +} + +/* + * The perf count interrupts are masked and unmasked explicitly, + * and only here.  The normal irq_enable() does not enable them, + * and irq_disable() does not disable them.  That lets these + * routines drive the perf count interrupts orthogonally. + * + * We also mask the perf count interrupts on entry to the perf count + * interrupt handler in assembly code, and by default unmask them + * again (with interrupt critical section protection) just before + * returning from the interrupt.  If the perf count handler returns + * a non-zero error code, then we don't re-enable them before returning. + * + * For Pro, we rely on both interrupts being in the same word to update + * them atomically so we never have one enabled and one disabled. + */ + +#if CHIP_HAS_SPLIT_INTR_MASK() +# if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 +#  error Fix assumptions about which word PERF_COUNT interrupts are in +# endif +#endif + +static inline unsigned long long pmc_mask(void) +{ +	unsigned long long mask = 1ULL << INT_PERF_COUNT; +	mask |= 1ULL << INT_AUX_PERF_COUNT; +	return mask; +} + +void unmask_pmc_interrupts(void) +{ +	interrupt_mask_reset_mask(pmc_mask()); +} + +void mask_pmc_interrupts(void) +{ +	interrupt_mask_set_mask(pmc_mask()); +} diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c index 681100c59fd..6829a950864 100644 --- a/arch/tile/kernel/proc.c +++ b/arch/tile/kernel/proc.c @@ -113,7 +113,7 @@ arch_initcall(proc_tile_init);   * Support /proc/sys/tile directory   */ -static ctl_table unaligned_subtable[] = { +static struct ctl_table unaligned_subtable[] = {  	{  		.procname	= "enabled",  		.data		= &unaligned_fixup, @@ -138,7 +138,7 @@ static ctl_table unaligned_subtable[] = {  	{}  }; -static ctl_table unaligned_table[] = { +static struct ctl_table unaligned_table[] = {  	{  		.procname	= "unaligned_fixup",  		.mode		= 0555, diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 4c34caea9dd..112ababa9e5 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -228,13 +228,10 @@ early_param("isolnodes", setup_isolnodes);  #if defined(CONFIG_PCI) && !defined(__tilegx__)  static int __init setup_pci_reserve(char* str)  { -	unsigned long mb; - -	if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || -	    mb > 3 * 1024) +	if (str == NULL || kstrtouint(str, 0, &pci_reserve_mb) != 0 || +	    pci_reserve_mb > 3 * 1024)  		return -EINVAL; -	pci_reserve_mb = mb;  	pr_info("Reserving %dMB for PCIE root complex mappings\n",  		pci_reserve_mb);  	return 0; @@ -691,7 +688,7 @@ static void __init setup_bootmem_allocator(void)  	/* Reserve any memory excluded by "memmap" arguments. */  	for (i = 0; i < memmap_nr; ++i) {  		struct memmap_entry *m = &memmap_map[i]; -		reserve_bootmem(m->addr, m->size, 0); +		reserve_bootmem(m->addr, m->size, BOOTMEM_DEFAULT);  	}  #ifdef CONFIG_BLK_DEV_INITRD @@ -715,7 +712,8 @@ static void __init setup_bootmem_allocator(void)  #ifdef CONFIG_KEXEC  	if (crashk_res.start != crashk_res.end) -		reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); +		reserve_bootmem(crashk_res.start, resource_size(&crashk_res), +				BOOTMEM_DEFAULT);  #endif  } @@ -1268,8 +1266,7 @@ static void __init validate_va(void)  	if ((long)VMALLOC_START >= 0)  		early_panic(  			"Linux VMALLOC region below the 2GB line (%#lx)!\n" -			"Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" -			"or smaller VMALLOC_RESERVE.\n", +			"Reconfigure the kernel with smaller VMALLOC_RESERVE.\n",  			VMALLOC_START);  #endif  } diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 2d1dbf38a9a..d1d026f0126 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -321,14 +321,13 @@ int show_unhandled_signals = 1;  static int __init crashinfo(char *str)  { -	unsigned long val;  	const char *word;  	if (*str == '\0') -		val = 2; -	else if (*str != '=' || strict_strtoul(++str, 0, &val) != 0) +		show_unhandled_signals = 2; +	else if (*str != '=' || kstrtoint(++str, 0, &show_unhandled_signals) != 0)  		return 0; -	show_unhandled_signals = val; +  	switch (show_unhandled_signals) {  	case 0:  		word = "No"; diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 362284af3af..c93977a6211 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -23,6 +23,7 @@  #include <linux/mmzone.h>  #include <linux/dcache.h>  #include <linux/fs.h> +#include <linux/string.h>  #include <asm/backtrace.h>  #include <asm/page.h>  #include <asm/ucontext.h> @@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,  	}  	if (vma->vm_file) { -		char *s;  		p = d_path(&vma->vm_file->f_path, buf, bufsize);  		if (IS_ERR(p))  			p = "?"; -		s = strrchr(p, '/'); -		if (s) -			p = s+1; +		name = kbasename(p);  	} else { -		p = "anon"; +		name = "anon";  	}  	/* Generate a string description of the vma info. */ -	namelen = strlen(p); +	namelen = strlen(name);  	remaining = (bufsize - 1) - namelen; -	memmove(buf, p, namelen); +	memmove(buf, name, namelen);  	snprintf(buf + namelen, remaining, "[%lx+%lx] ",  		 vma->vm_start, vma->vm_end - vma->vm_start);  } diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 5d10642db63..462dcd0c170 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -236,7 +236,15 @@ cycles_t ns2cycles(unsigned long nsecs)  	 * clock frequency.  	 */  	struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer); -	return ((u64)nsecs * dev->mult) >> dev->shift; + +	/* +	 * as in clocksource.h and x86's timer.h, we split the calculation +	 * into 2 parts to avoid unecessary overflow of the intermediate +	 * value. This will not lead to any loss of precision. +	 */ +	u64 quot = (u64)nsecs >> dev->shift; +	u64 rem  = (u64)nsecs & ((1ULL << dev->shift) - 1); +	return quot * dev->mult + ((rem * dev->mult) >> dev->shift);  }  void update_vsyscall_tz(void) diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 6b603d556ca..f3ceb6308e4 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -42,10 +42,9 @@ static int __init setup_unaligned_fixup(char *str)  	 * will still parse the instruction, then fire a SIGBUS with  	 * the correct address from inside the single_step code.  	 */ -	long val; -	if (strict_strtol(str, 0, &val) != 0) +	if (kstrtoint(str, 0, &unaligned_fixup) != 0)  		return 0; -	unaligned_fixup = val; +  	pr_info("Fixups for unaligned data accesses are %s\n",  	       unaligned_fixup >= 0 ?  	       (unaligned_fixup ? "enabled" : "disabled") : diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index b425fb6a480..c02ea2a45f6 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c @@ -182,18 +182,7 @@ static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra,  	int i;  	uint64_t reg;  	uint64_t reg_map = 0, alias_reg_map = 0, map; -	bool alias; - -	*ra = -1; -	*rb = -1; - -	if (rd) -		*rd = -1; - -	*clob1 = -1; -	*clob2 = -1; -	*clob3 = -1; -	alias = false; +	bool alias = false;  	/*  	 * Parse fault bundle, find potential used registers and mark @@ -551,8 +540,8 @@ static tilegx_bundle_bits  jit_x1_bnezt(int ra, int broff)  /*   * This function generates unalign fixup JIT.   * - * We fist find unalign load/store instruction's destination, source - * reguisters: ra, rb and rd. and 3 scratch registers by calling + * We first find unalign load/store instruction's destination, source + * registers: ra, rb and rd. and 3 scratch registers by calling   * find_regs(...). 3 scratch clobbers should not alias with any register   * used in the fault bundle. Then analyze the fault bundle to determine   * if it's a load or store, operand width, branch or address increment etc. @@ -569,7 +558,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,  	tilegx_bundle_bits bundle_2 = 0;  	/* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */  	bool     bundle_2_enable = true; -	uint64_t ra, rb, rd = -1, clob1, clob2, clob3; +	uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1;  	/*  	 * Indicate if the unalign access  	 * instruction's registers hit with diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile index e2b7a2f4ee4..a025f63d54c 100644 --- a/arch/tile/kernel/vdso/Makefile +++ b/arch/tile/kernel/vdso/Makefile @@ -104,7 +104,7 @@ $(obj-vdso32:%=%): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)  $(obj-vdso32:%=%): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)  $(obj)/vgettimeofday32.o: $(obj)/vgettimeofday.c -	$(call if_changed,cc_o_c) +	$(call if_changed_rule,cc_o_c)  $(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S  	$(call if_changed,as_o_S) diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c index 759efa337be..c89b211fd9e 100644 --- a/arch/tile/lib/atomic_32.c +++ b/arch/tile/lib/atomic_32.c @@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)  EXPORT_SYMBOL(_atomic_xor); -u64 _atomic64_xchg(u64 *v, u64 n) +long long _atomic64_xchg(long long *v, long long n)  {  	return __atomic64_xchg(v, __atomic_setup(v), n);  }  EXPORT_SYMBOL(_atomic64_xchg); -u64 _atomic64_xchg_add(u64 *v, u64 i) +long long _atomic64_xchg_add(long long *v, long long i)  {  	return __atomic64_xchg_add(v, __atomic_setup(v), i);  }  EXPORT_SYMBOL(_atomic64_xchg_add); -u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) +long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)  {  	/*  	 * Note: argument order is switched here since it is easier @@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)  }  EXPORT_SYMBOL(_atomic64_xchg_add_unless); -u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) +long long _atomic64_cmpxchg(long long *v, long long o, long long n)  {  	return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);  } diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 4c288f19945..6c0571216a9 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -149,8 +149,6 @@ static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)  	pmd_k = vmalloc_sync_one(pgd, address);  	if (!pmd_k)  		return -1; -	if (pmd_huge(*pmd_k)) -		return 0;   /* support TILE huge_vmap() API */  	pte_k = pte_offset_kernel(pmd_k, address);  	if (!pte_present(*pte_k))  		return -1; diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 004ba568d93..33294fdc402 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)  	if (put_page_testzero(page)) {  		homecache_change_page_home(page, order, PAGE_HOME_HASH);  		if (order == 0) { -			free_hot_cold_page(page, 0); +			free_hot_cold_page(page, false);  		} else {  			init_page_count(page);  			__free_pages(page, order); diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 0cb3bbaa580..e514899e110 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -166,11 +166,6 @@ int pud_huge(pud_t pud)  	return !!(pud_val(pud) & _PAGE_HUGE_PAGE);  } -int pmd_huge_support(void) -{ -	return 1; -} -  struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,  			     pmd_t *pmd, int write)  { diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 4e316deb92f..bfb3127b4df 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -273,9 +273,9 @@ static pgprot_t __init init_pgprot(ulong address)  	/*  	 * Otherwise we just hand out consecutive cpus.  To avoid  	 * requiring this function to hold state, we just walk forward from -	 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach -	 * the requested address, while walking cpu home around kdata_mask. -	 * This is typically no more than a dozen or so iterations. +	 * __end_rodata by PAGE_SIZE, skipping the readonly and init data, to +	 * reach the requested address, while walking cpu home around +	 * kdata_mask. This is typically no more than a dozen or so iterations.  	 */  	page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;  	BUG_ON(address < page || address >= (ulong)_end); @@ -828,10 +828,6 @@ void __init mem_init(void)  	printk(KERN_DEBUG "  PKMAP   %#lx - %#lx\n",  	       PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);  #endif -#ifdef CONFIG_HUGEVMAP -	printk(KERN_DEBUG "  HUGEMAP %#lx - %#lx\n", -	       HUGE_VMAP_BASE, HUGE_VMAP_END - 1); -#endif  	printk(KERN_DEBUG "  VMALLOC %#lx - %#lx\n",  	       _VMALLOC_START, _VMALLOC_END - 1);  #ifdef __tilegx__ @@ -916,7 +912,7 @@ static long __write_once initfree = 1;  static int __init set_initfree(char *str)  {  	long val; -	if (strict_strtol(str, 0, &val) == 0) { +	if (kstrtol(str, 0, &val) == 0) {  		initfree = val;  		pr_info("initfree: %s free init pages\n",  			initfree ? "will" : "won't"); diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 2deaddf3e01..5e86eac4bfa 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -127,8 +127,7 @@ void shatter_huge_page(unsigned long addr)  	}  	/* Shatter the huge page into the preallocated L2 page table. */ -	pmd_populate_kernel(&init_mm, pmd, -			    get_prealloc_pte(pte_pfn(*(pte_t *)pmd))); +	pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd)));  #ifdef __PAGETABLE_PMD_FOLDED  	/* Walk every pgd on the system and update the pmd there. */ @@ -242,6 +241,11 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,  	if (p == NULL)  		return NULL; +	if (!pgtable_page_ctor(p)) { +		__free_pages(p, L2_USER_PGTABLE_ORDER); +		return NULL; +	} +  	/*  	 * Make every page have a page_count() of one, not just the first.  	 * We don't use __GFP_COMP since it doesn't look like it works @@ -252,7 +256,6 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,  		inc_zone_page_state(p+i, NR_PAGETABLE);  	} -	pgtable_page_ctor(p);  	return p;  }  | 
