diff options
Diffstat (limited to 'lib/swiotlb.c')
| -rw-r--r-- | lib/swiotlb.c | 70 | 
1 files changed, 45 insertions, 25 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 4e8686c7e5a..4abda074ea4 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -38,6 +38,9 @@  #include <linux/bootmem.h>  #include <linux/iommu-helper.h> +#define CREATE_TRACE_POINTS +#include <trace/events/swiotlb.h> +  #define OFFSET(val,align) ((unsigned long)	\  	                   ( (val) & ( (align) - 1))) @@ -83,6 +86,7 @@ static unsigned int io_tlb_index;   * We need to save away the original address corresponding to a mapped entry   * for the sync operations.   */ +#define INVALID_PHYS_ADDR (~(phys_addr_t)0)  static phys_addr_t *io_tlb_orig_addr;  /* @@ -169,8 +173,9 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)  	/*  	 * Get the overflow emergency buffer  	 */ -	v_overflow_buffer = alloc_bootmem_low_pages_nopanic( -						PAGE_ALIGN(io_tlb_overflow)); +	v_overflow_buffer = memblock_virt_alloc_low_nopanic( +						PAGE_ALIGN(io_tlb_overflow), +						PAGE_SIZE);  	if (!v_overflow_buffer)  		return -ENOMEM; @@ -181,11 +186,17 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE  	 * between io_tlb_start and io_tlb_end.  	 */ -	io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); -	for (i = 0; i < io_tlb_nslabs; i++) - 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); +	io_tlb_list = memblock_virt_alloc( +				PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), +				PAGE_SIZE); +	io_tlb_orig_addr = memblock_virt_alloc( +				PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), +				PAGE_SIZE); +	for (i = 0; i < io_tlb_nslabs; i++) { +		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); +		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; +	}  	io_tlb_index = 0; -	io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));  	if (verbose)  		swiotlb_print_info(); @@ -212,13 +223,13 @@ swiotlb_init(int verbose)  	bytes = io_tlb_nslabs << IO_TLB_SHIFT;  	/* Get IO TLB memory from the low pages */ -	vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes)); +	vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);  	if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))  		return;  	if (io_tlb_start) -		free_bootmem(io_tlb_start, -				 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); +		memblock_free_early(io_tlb_start, +				    PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));  	pr_warn("Cannot allocate SWIOTLB buffer");  	no_iotlb_memory = true;  } @@ -305,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)  	if (!io_tlb_list)  		goto cleanup3; -	for (i = 0; i < io_tlb_nslabs; i++) - 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); -	io_tlb_index = 0; -  	io_tlb_orig_addr = (phys_addr_t *)  		__get_free_pages(GFP_KERNEL,  				 get_order(io_tlb_nslabs * @@ -316,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)  	if (!io_tlb_orig_addr)  		goto cleanup4; -	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); +	for (i = 0; i < io_tlb_nslabs; i++) { +		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); +		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; +	} +	io_tlb_index = 0;  	swiotlb_print_info(); @@ -354,19 +365,19 @@ void __init swiotlb_free(void)  		free_pages((unsigned long)phys_to_virt(io_tlb_start),  			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));  	} else { -		free_bootmem_late(io_tlb_overflow_buffer, -				  PAGE_ALIGN(io_tlb_overflow)); -		free_bootmem_late(__pa(io_tlb_orig_addr), -				  PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); -		free_bootmem_late(__pa(io_tlb_list), -				  PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); -		free_bootmem_late(io_tlb_start, -				  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); +		memblock_free_late(io_tlb_overflow_buffer, +				   PAGE_ALIGN(io_tlb_overflow)); +		memblock_free_late(__pa(io_tlb_orig_addr), +				   PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); +		memblock_free_late(__pa(io_tlb_list), +				   PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); +		memblock_free_late(io_tlb_start, +				   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));  	}  	io_tlb_nslabs = 0;  } -static int is_swiotlb_buffer(phys_addr_t paddr) +int is_swiotlb_buffer(phys_addr_t paddr)  {  	return paddr >= io_tlb_start && paddr < io_tlb_end;  } @@ -502,6 +513,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,  not_found:  	spin_unlock_irqrestore(&io_tlb_lock, flags); +	if (printk_ratelimit()) +		dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);  	return SWIOTLB_MAP_ERROR;  found:  	spin_unlock_irqrestore(&io_tlb_lock, flags); @@ -546,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,  	/*  	 * First, sync the memory before unmapping the entry  	 */ -	if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) +	if (orig_addr != INVALID_PHYS_ADDR && +	    ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))  		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);  	/* @@ -563,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,  		 * Step 1: return the slots to the free list, merging the  		 * slots with superceeding slots  		 */ -		for (i = index + nslots - 1; i >= index; i--) +		for (i = index + nslots - 1; i >= index; i--) {  			io_tlb_list[i] = ++count; +			io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; +		}  		/*  		 * Step 2: merge the returned slots with the preceding slots,  		 * if available (non zero) @@ -583,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,  	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;  	phys_addr_t orig_addr = io_tlb_orig_addr[index]; +	if (orig_addr == INVALID_PHYS_ADDR) +		return;  	orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);  	switch (target) { @@ -726,6 +744,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,  	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)  		return dev_addr; +	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); +  	/* Oh well, have to allocate and map a bounce buffer. */  	map = map_single(dev, phys, size, dir);  	if (map == SWIOTLB_MAP_ERROR) {  | 
