diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-10-22 20:02:46 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-22 21:19:59 +0200 |
commit | 58b053e4ce9d2fc3023645c1b96e537c72aa8d9a (patch) | |
tree | 35fbd72eb62a37375bc06c01e356afd7da0c9693 /arch/sparc/kernel | |
parent | f9527f121bd42c5d300815fbf12216bc1a63f60f (diff) |
Update arch/ to use sg helpers
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r-- | arch/sparc/kernel/ioport.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 9c3ed88853f..97aa50d1e4a 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -727,9 +727,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ for_each_sg(sgl, sg, nents, n) { - BUG_ON(page_address(sg->page) == NULL); - sg->dvma_address = - virt_to_phys(page_address(sg->page)) + sg->offset; + BUG_ON(page_address(sg_page(sg)) == NULL); + sg->dvma_address = virt_to_phys(sg_virt(sg)); sg->dvma_length = sg->length; } return nents; @@ -748,9 +747,9 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { - BUG_ON(page_address(sg->page) == NULL); + BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( - (unsigned long) page_address(sg->page), + (unsigned long) page_address(sg_page(sg)), (sg->length + PAGE_SIZE-1) & PAGE_MASK); } } @@ -798,9 +797,9 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { - BUG_ON(page_address(sg->page) == NULL); + BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( - (unsigned long) page_address(sg->page), + (unsigned long) page_address(sg_page(sg)), (sg->length + PAGE_SIZE-1) & PAGE_MASK); } } @@ -814,9 +813,9 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { - BUG_ON(page_address(sg->page) == NULL); + BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( - (unsigned long) page_address(sg->page), + (unsigned long) page_address(sg_page(sg)), (sg->length + PAGE_SIZE-1) & PAGE_MASK); } } |