aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/tlb.h10
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/perf_event_v7.c28
-rw-r--r--arch/arm/kernel/ptrace.c8
-rw-r--r--arch/arm/kernel/signal.c5
-rw-r--r--arch/arm/mach-bcmring/arch.c2
-rw-r--r--arch/arm/mach-bcmring/dma.c812
-rw-r--r--arch/arm/mach-bcmring/include/mach/dma.h196
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c2
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c2
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c2
-rw-r--r--arch/arm/mach-davinci/da850.c32
-rw-r--r--arch/arm/mach-omap2/Kconfig11
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c18
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c18
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c5
-rw-r--r--arch/arm/mach-omap2/devices.c1
-rw-r--r--arch/arm/mach-omap2/display.c4
-rw-r--r--arch/arm/mach-omap2/gpmc.c6
-rw-r--r--arch/arm/mach-omap2/hsmmc.c16
-rw-r--r--arch/arm/mach-omap2/io.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c21
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c22
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c54
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm.c3
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.c1
-rw-r--r--arch/arm/mach-omap2/smartreflex.c2
-rw-r--r--arch/arm/mach-omap2/timer.c2
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c2
-rw-r--r--arch/arm/mm/ioremap.c3
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/microblaze/kernel/setup.c21
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/lib/iomap-pci.c4
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/drivers/pci/pci.c4
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/lib/divdi3.S16
-rw-r--r--arch/x86/include/asm/cmpxchg.h6
-rw-r--r--arch/x86/include/asm/kvm_emulate.h16
-rw-r--r--arch/x86/kernel/dumpstack.c3
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/reboot.c36
-rw-r--r--arch/x86/kvm/emulate.c51
-rw-r--r--arch/x86/kvm/x86.c45
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/xtensa/include/asm/string.h3
52 files changed, 345 insertions, 1180 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 5d3ed7e3856..314d4664eae 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -198,7 +198,15 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
pgtable_page_dtor(pte);
- tlb_add_flush(tlb, addr);
+
+ /*
+ * With the classic ARM MMU, a pte page has two corresponding pmd
+ * entries, each covering 1MB.
+ */
+ addr &= PMD_MASK;
+ tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
+ tlb_add_flush(tlb, addr + SZ_1M);
+
tlb_remove_page(tlb, pte);
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3a456c6c700..be16a48007b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -790,7 +790,7 @@ __kuser_cmpxchg64: @ 0xffff0f60
smp_dmb arm
rsbs r0, r3, #0 @ set returned val and C flag
ldmfd sp!, {r4, r5, r6, r7}
- bx lr
+ usr_ret lr
#elif !defined(CONFIG_SMP)
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 460bbbb6b88..6933244c68f 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -469,6 +469,20 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
};
/*
@@ -579,6 +593,20 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
};
/*
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index e1d5e1929fb..e33870ff0ac 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -699,10 +699,13 @@ static int vfp_set(struct task_struct *target,
{
int ret;
struct thread_info *thread = task_thread_info(target);
- struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
+ struct vfp_hard_struct new_vfp;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
+ vfp_sync_hwstate(thread);
+ new_vfp = thread->vfpstate.hard;
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpregs,
user_fpregs_offset,
@@ -723,9 +726,8 @@ static int vfp_set(struct task_struct *target,
if (ret)
return ret;
- vfp_sync_hwstate(thread);
- thread->vfpstate.hard = new_vfp;
vfp_flush_hwstate(thread);
+ thread->vfpstate.hard = new_vfp;
return 0;
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 0340224cf73..9e617bd4a14 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -227,6 +227,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
+ vfp_flush_hwstate(thread);
+
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
@@ -251,9 +253,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
- if (!err)
- vfp_flush_hwstate(thread);
-
return err ? -EFAULT : 0;
}
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index 9e5e7552498..45c97b1ee9b 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -194,6 +194,6 @@ MACHINE_START(BCMRING, "BCMRING")
.init_early = bcmring_init_early,
.init_irq = bcmring_init_irq,
.timer = &bcmring_timer,
- .init_machine = bcmring_init_machine
+ .init_machine = bcmring_init_machine,
.restart = bcmring_restart,
MACHINE_END
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 1a1a27dd565..1024396797e 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -33,17 +33,11 @@
#include <mach/timer.h>
-#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/atomic.h>
#include <linux/sched.h>
#include <mach/dma.h>
-/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
-/* especially since dc4 doesn't use kmalloc'd memory. */
-
-#define ALLOW_MAP_OF_KMALLOC_MEMORY 0
-
/* ---- Public Variables ------------------------------------------------- */
/* ---- Private Constants and Types -------------------------------------- */
@@ -53,24 +47,12 @@
#define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f)
#define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f)
-#define DMA_MAP_DEBUG 0
-
-#if DMA_MAP_DEBUG
-# define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args)
-#else
-# define DMA_MAP_PRINT(fmt, args...)
-#endif
/* ---- Private Variables ------------------------------------------------ */
static DMA_Global_t gDMA;
static struct proc_dir_entry *gDmaDir;
-static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
-static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
-static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
-static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
-
#include "dma_device.c"
/* ---- Private Function Prototypes -------------------------------------- */
@@ -79,34 +61,6 @@ static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
/****************************************************************************/
/**
-* Displays information for /proc/dma/mem-type
-*/
-/****************************************************************************/
-
-static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
- int count, int *eof, void *data)
-{
- int len = 0;
-
- len += sprintf(buf + len, "dma_map_mem statistics\n");
- len +=
- sprintf(buf + len, "coherent: %d\n",
- atomic_read(&gDmaStatMemTypeCoherent));
- len +=
- sprintf(buf + len, "kmalloc: %d\n",
- atomic_read(&gDmaStatMemTypeKmalloc));
- len +=
- sprintf(buf + len, "vmalloc: %d\n",
- atomic_read(&gDmaStatMemTypeVmalloc));
- len +=
- sprintf(buf + len, "user: %d\n",
- atomic_read(&gDmaStatMemTypeUser));
-
- return len;
-}
-
-/****************************************************************************/
-/**
* Displays information for /proc/dma/channels
*/
/****************************************************************************/
@@ -846,8 +800,6 @@ int dma_init(void)
dma_proc_read_channels, NULL);
create_proc_read_entry("devices", 0, gDmaDir,
dma_proc_read_devices, NULL);
- create_proc_read_entry("mem-type", 0, gDmaDir,
- dma_proc_read_mem_type, NULL);
}
out:
@@ -1565,767 +1517,3 @@ int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for.
}
EXPORT_SYMBOL(dma_set_device_handler);
-
-/****************************************************************************/
-/**
-* Initializes a memory mapping structure
-*/
-/****************************************************************************/
-
-int dma_init_mem_map(DMA_MemMap_t *memMap)
-{
- memset(memMap, 0, sizeof(*memMap));
-
- sema_init(&memMap->lock, 1);
-
- return 0;
-}
-
-EXPORT_SYMBOL(dma_init_mem_map);
-
-/****************************************************************************/
-/**
-* Releases any memory currently being held by a memory mapping structure.
-*/
-/****************************************************************************/
-
-int dma_term_mem_map(DMA_MemMap_t *memMap)
-{
- down(&memMap->lock); /* Just being paranoid */
-
- /* Free up any allocated memory */
-
- up(&memMap->lock);
- memset(memMap, 0, sizeof(*memMap));
-
- return 0;
-}
-
-EXPORT_SYMBOL(dma_term_mem_map);
-
-/****************************************************************************/
-/**
-* Looks at a memory address and categorizes it.
-*
-* @return One of the values from the DMA_MemType_t enumeration.
-*/
-/****************************************************************************/
-
-DMA_MemType_t dma_mem_type(void *addr)
-{
- unsigned long addrVal = (unsigned long)addr;
-
- if (addrVal >= CONSISTENT_BASE) {
- /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
-
- /* dma_alloc_xxx pages are physically and virtually contiguous */
-
- return DMA_MEM_TYPE_DMA;
- }
-
- /* Technically, we could add one more classification. Addresses between VMALLOC_END */
- /* and the beginning of the DMA virtual address could be considered to be I/O space. */
- /* Right now, nobody cares about this particular classification, so we ignore it. */
-
- if (is_vmalloc_addr(addr)) {
- /* Address comes from the vmalloc'd region. Pages are virtually */
- /* contiguous but NOT physically contiguous */
-
- return DMA_MEM_TYPE_VMALLOC;
- }
-
- if (addrVal >= PAGE_OFFSET) {
- /* PAGE_OFFSET is typically 0xC0000000 */
-
- /* kmalloc'd pages are physically contiguous */
-
- return DMA_MEM_TYPE_KMALLOC;
- }
-
- return DMA_MEM_TYPE_USER;
-}
-
-EXPORT_SYMBOL(dma_mem_type);
-
-/****************************************************************************/
-/**
-* Looks at a memory address and determines if we support DMA'ing to/from
-* that type of memory.
-*
-* @return boolean -
-* return value != 0 means dma supported
-* return value == 0 means dma not supported
-*/
-/****************************************************************************/
-
-int dma_mem_supports_dma(void *addr)
-{
- DMA_MemType_t memType = dma_mem_type(addr);
-
- return (memType == DMA_MEM_TYPE_DMA)
-#if ALLOW_MAP_OF_KMALLOC_MEMORY
- || (memType == DMA_MEM_TYPE_KMALLOC)
-#endif
- || (memType == DMA_MEM_TYPE_USER);
-}
-
-EXPORT_SYMBOL(dma_mem_supports_dma);
-
-/****************************************************************************/
-/**
-* Maps in a memory region such that it can be used for performing a DMA.
-*
-* @return
-*/
-/****************************************************************************/
-
-int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
- enum dma_data_direction dir /* Direction that the mapping will be going */
- ) {
- int rc;
-
- down(&memMap->lock);
-
- DMA_MAP_PRINT("memMap: %p\n", memMap);
-
- if (memMap->inUse) {
- printk(KERN_ERR "%s: memory map %p is already being used\n",
- __func__, memMap);
- rc = -EBUSY;
- goto out;
- }
-
- memMap->inUse = 1;
- memMap->dir = dir;
- memMap->numRegionsUsed = 0;
-
- rc = 0;
-
-out:
-
- DMA_MAP_PRINT("returning %d", rc);
-
- up(&memMap->lock);
-
- return rc;
-}
-
-EXPORT_SYMBOL(dma_map_start);
-
-/****************************************************************************/
-/**
-* Adds a segment of memory to a memory map. Each segment is both
-* physically and virtually contiguous.
-*
-* @return 0 on success, error code otherwise.
-*/
-/****************************************************************************/
-
-static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */
- DMA_Region_t *region, /* Region that the segment belongs to */
- void *virtAddr, /* Virtual address of the segment being added */
- dma_addr_t physAddr, /* Physical address of the segment being added */
- size_t numBytes /* Number of bytes of the segment being added */
- ) {
- DMA_Segment_t *segment;
-
- DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
- physAddr, numBytes);
-
- /* Sanity check */
-
- if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
- || (((unsigned long)virtAddr + numBytes)) >
- ((unsigned long)region->virtAddr + region->numBytes)) {
- printk(KERN_ERR
- "%s: virtAddr %p is outside region @ %p len: %d\n",
- __func__, virtAddr, region->virtAddr, region->numBytes);
- return -EINVAL;
- }
-
- if (region->numSegmentsUsed > 0) {
- /* Check to see if this segment is physically contiguous with the previous one */
-
- segment = &region->segment[region->numSegmentsUsed - 1];
-
- if ((segment->physAddr + segment->numBytes) == physAddr) {
- /* It is - just add on to the end */
-
- DMA_MAP_PRINT("appending %d bytes to last segment\n",
- numBytes);
-
- segment->numBytes += numBytes;
-
- return 0;
- }
- }
-
- /* Reallocate to hold more segments, if required. */
-
- if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
- DMA_Segment_t *newSegment;
- size_t oldSize =
- region->numSegmentsAllocated * sizeof(*newSegment);
- int newAlloc = region->numSegmentsAllocated + 4;
- size_t newSize = newAlloc * sizeof(*newSegment);
-
- newSegment = kmalloc(newSize, GFP_KERNEL);
- if (newSegment == NULL) {
- return -ENOMEM;
- }
- memcpy(newSegment, region->segment, oldSize);
- memset(&((uint8_t *) newSegment)[oldSize], 0,
- newSize - oldSize);
- kfree(region->segment);
-
- region->numSegmentsAllocated = newAlloc;
- region->segment = newSegment;
- }
-
- segment = &region->segment[region->numSegmentsUsed];
- region->numSegmentsUsed++;
-
- segment->virtAddr = virtAddr;
- segment->physAddr = physAddr;
- segment->numBytes = numBytes;
-
- DMA_MAP_PRINT("returning success\n");
-
- return 0;
-}
-
-/****************************************************************************/
-/**
-* Adds a region of memory to a memory map. Each region is virtually
-* contiguous, but not necessarily physically contiguous.
-*
-* @return 0 on success, error code otherwise.
-*/
-/****************************************************************************/
-
-int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
- void *mem, /* Virtual address that we want to get a map of */
- size_t numBytes /* Number of bytes being mapped */
- ) {
- unsigned long addr = (unsigned long)mem;
- unsigned int offset;
- int rc = 0;
- DMA_Region_t *region;
- dma_addr_t physAddr;
-
- down(&memMap->lock);
-
- DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
-
- if (!memMap->inUse) {
- printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
- __func__);
- rc = -EINVAL;
- goto out;
- }
-
- /* Reallocate to hold more regions. */
-
- if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
- DMA_Region_t *newRegion;
- size_t oldSize =
- memMap->numRegionsAllocated * sizeof(*newRegion);
- int newAlloc = memMap->numRegionsAllocated + 4;
- size_t newSize = newAlloc * sizeof(*newRegion);
-
- newRegion = kmalloc(newSize, GFP_KERNEL);
- if (newRegion == NULL) {
- rc = -ENOMEM;
- goto out;
- }
- memcpy(newRegion, memMap->region, oldSize);
- memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
-
- kfree(memMap->region);
-
- memMap->numRegionsAllocated = newAlloc;
- memMap->region = newRegion;
- }
-
- region = &memMap->region[memMap->numRegionsUsed];
- memMap->numRegionsUsed++;
-
- offset = addr & ~PAGE_MASK;
-
- region->memType = dma_mem_type(mem);
- region->virtAddr = mem;
- region->numBytes = numBytes;
- region->numSegmentsUsed = 0;
- region->numLockedPages = 0;
- region->lockedPages = NULL;
-
- switch (region->memType) {
- case DMA_MEM_TYPE_VMALLOC:
- {
- atomic_inc(&gDmaStatMemTypeVmalloc);
-
- /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
-
- /* vmalloc'd pages are not physically contiguous */
-
- rc = -EINVAL;
- break;
- }
-
- case DMA_MEM_TYPE_KMALLOC:
- {
- atomic_inc(&gDmaStatMemTypeKmalloc);
-
- /* kmalloc'd pages are physically contiguous, so they'll have exactly */
- /* one segment */
-
-#if ALLOW_MAP_OF_KMALLOC_MEMORY
- physAddr =
- dma_map_single(NULL, mem, numBytes, memMap->dir);
- rc = dma_map_add_segment(memMap, region, mem, physAddr,
- numBytes);
-#else
- rc = -EINVAL;
-#endif
- break;
- }
-
- case DMA_MEM_TYPE_DMA:
- {
- /* dma_alloc_xxx pages are physically contiguous */
-
- atomic_inc(&gDmaStatMemTypeCoherent);
-
- physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
-
- dma_sync_single_for_cpu(NULL, physAddr, numBytes,
- memMap->dir);
- rc = dma_map_add_segment(memMap, region, mem, physAddr,
- numBytes);
- break;
- }
-
- case DMA_MEM_TYPE_USER:
- {
- size_t firstPageOffset;
- size_t firstPageSize;
- struct page **pages;
- struct task_struct *userTask;
-
- atomic_inc(&gDmaStatMemTypeUser);
-
-#if 1
- /* If the pages are user pages, then the dma_mem_map_set_user_task function */
- /* must have been previously called. */
-
- if (memMap->userTask == NULL) {
- printk(KERN_ERR
- "%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
- __func__);
- return -EINVAL;
- }
-
- /* User pages need to be locked. */
-
- firstPageOffset =
- (unsigned long)region->virtAddr & (PAGE_SIZE - 1);
- firstPageSize = PAGE_SIZE - firstPageOffset;
-
- region->numLockedPages = (firstPageOffset
- + region->numBytes +
- PAGE_SIZE - 1) / PAGE_SIZE;
- pages =
- kmalloc(region->numLockedPages *
- sizeof(struct page *), GFP_KERNEL);
-
- if (pages == NULL) {
- region->numLockedPages = 0;
- return -ENOMEM;
- }
-
- userTask = memMap->userTask;
-
- down_read(&userTask->mm->mmap_sem);
- rc = get_user_pages(userTask, /* task */
- userTask->mm, /* mm */
- (unsigned long)region->virtAddr, /* start */
- region->numLockedPages, /* len */
- memMap->dir == DMA_FROM_DEVICE, /* write */
- 0, /* force */
- pages, /* pages (array of pointers to page) */
- NULL); /* vmas */
- up_read(&userTask->mm->mmap_sem);
-
- if (rc != region->numLockedPages) {
- kfree(pages);
- region->numLockedPages = 0;
-
- if (rc >= 0) {
- rc = -EINVAL;
- }
- } else {
- uint8_t *virtAddr = region->virtAddr;
- size_t bytesRemaining;
- int pageIdx;
-
- rc = 0; /* Since get_user_pages returns +ve number */
-
- region->lockedPages = pages;
-
- /* We've locked the user pages. Now we need to walk them and figure */
- /* out the physical addresses. */
-
- /* The first page may be partial */
-
- dma_map_add_segment(memMap,
- region,
- virtAddr,
- PFN_PHYS(page_to_pfn
- (pages[0])) +
- firstPageOffset,
- firstPageSize);
-
- virtAddr += firstPageSize;
- bytesRemaining =
- region->numBytes - firstPageSize;
-
- for (pageIdx = 1;
- pageIdx < region->numLockedPages;
- pageIdx++) {
- size_t bytesThisPage =
- (bytesRemaining >
- PAGE_SIZE ? PAGE_SIZE :
- bytesRemaining);
-
- DMA_MAP_PRINT
- ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
- pageIdx, pages[pageIdx],
- page_to_pfn(pages[pageIdx]),
- PFN_PHYS(page_to_pfn
- (pages[pageIdx])));
-
- dma_map_add_segment(memMap,
- region,
- virtAddr,
- PFN_PHYS(page_to_pfn
- (pages
- [pageIdx])),
- bytesThisPage);
-
- virtAddr += bytesThisPage;
- bytesRemaining -= bytesThisPage;
- }
- }
-#else
- printk(KERN_ERR
- "%s: User mode pages are not yet supported\n",
- __func__);
-
- /* user pages are not physically contiguous */
-
- rc = -EINVAL;
-#endif
- break;
- }
-
- default:
- {
- printk(KERN_ERR "%s: Unsupported memory type: %d\n",
- __func__, region->memType);
-
- rc = -EINVAL;
- break;
- }
- }
-
- if (rc != 0) {
- memMap->numRegionsUsed--;
- }
-
-out:
-
- DMA_MAP_PRINT("returning %d\n", rc);
-
- up(&memMap->lock);
-
- return rc;
-}
-
-EXPORT_SYMBOL(dma_map_add_segment);
-
-/****************************************************************************/
-/**
-* Maps in a memory region such that it can be used for performing a DMA.
-*
-* @return 0 on success, error code otherwise.
-*/
-/****************************************************************************/
-
-int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
- void *mem, /* Virtual address that we want to get a map of */
- size_t