diff options
Diffstat (limited to 'lib/scatterlist.c')
| -rw-r--r-- | lib/scatterlist.c | 309 |
1 files changed, 255 insertions, 54 deletions
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 4ceb05d772a..3a8e8e8fb2a 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -6,7 +6,7 @@ * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/highmem.h> @@ -39,6 +39,25 @@ struct scatterlist *sg_next(struct scatterlist *sg) EXPORT_SYMBOL(sg_next); /** + * sg_nents - return total count of entries in scatterlist + * @sg: The scatterlist + * + * Description: + * Allows to know how many entries are in sg, taking into acount + * chaining as well + * + **/ +int sg_nents(struct scatterlist *sg) +{ + int nents; + for (nents = 0; sg; sg = sg_next(sg)) + nents++; + return nents; +} +EXPORT_SYMBOL(sg_nents); + + +/** * sg_last - return the last scatterlist entry in a list * @sgl: First entry in the scatterlist * @nents: Number of entries in the scatterlist @@ -228,12 +247,15 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, struct scatterlist *sg, *prv; unsigned int left; + memset(table, 0, sizeof(*table)); + + if (nents == 0) + return -EINVAL; #ifndef ARCH_HAS_SG_CHAIN - BUG_ON(nents > max_ents); + if (WARN_ON_ONCE(nents > max_ents)) + return -EINVAL; #endif - memset(table, 0, sizeof(*table)); - left = nents; prv = NULL; do { @@ -279,14 +301,6 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, if (!left) sg_mark_end(&sg[sg_size - 1]); - /* - * only really needed for mempool backed sg allocations (like - * SCSI), a possible improvement here would be to pass the - * table pointer into the allocator and let that clear these - * flags - */ - gfp_mask &= ~__GFP_WAIT; - gfp_mask |= __GFP_HIGH; prv = sg; } while (left); @@ -319,6 +333,106 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) EXPORT_SYMBOL(sg_alloc_table); /** + * sg_alloc_table_from_pages - Allocate and initialize an sg table from + * an array of pages + * @sgt: The sg table header to use + * @pages: Pointer to an array of page pointers + * @n_pages: Number of pages in the pages array + * @offset: Offset from start of the first page to the start of a buffer + * @size: Number of valid bytes in the buffer (after offset) + * @gfp_mask: GFP allocation mask + * + * Description: + * Allocate and initialize an sg table from a list of pages. Contiguous + * ranges of the pages are squashed into a single scatterlist node. A user + * may provide an offset at a start and a size of valid data in a buffer + * specified by the page array. The returned sg table is released by + * sg_free_table. + * + * Returns: + * 0 on success, negative error on failure + */ +int sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, unsigned int n_pages, + unsigned long offset, unsigned long size, + gfp_t gfp_mask) +{ + unsigned int chunks; + unsigned int i; + unsigned int cur_page; + int ret; + struct scatterlist *s; + + /* compute number of contiguous chunks */ + chunks = 1; + for (i = 1; i < n_pages; ++i) + if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) + ++chunks; + + ret = sg_alloc_table(sgt, chunks, gfp_mask); + if (unlikely(ret)) + return ret; + + /* merging chunks and putting them into the scatterlist */ + cur_page = 0; + for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { + unsigned long chunk_size; + unsigned int j; + + /* look for the end of the current chunk */ + for (j = cur_page + 1; j < n_pages; ++j) + if (page_to_pfn(pages[j]) != + page_to_pfn(pages[j - 1]) + 1) + break; + + chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; + sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); + size -= chunk_size; + offset = 0; + cur_page = j; + } + + return 0; +} +EXPORT_SYMBOL(sg_alloc_table_from_pages); + +void __sg_page_iter_start(struct sg_page_iter *piter, + struct scatterlist *sglist, unsigned int nents, + unsigned long pgoffset) +{ + piter->__pg_advance = 0; + piter->__nents = nents; + + piter->sg = sglist; + piter->sg_pgoffset = pgoffset; +} +EXPORT_SYMBOL(__sg_page_iter_start); + +static int sg_page_count(struct scatterlist *sg) +{ + return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; +} + +bool __sg_page_iter_next(struct sg_page_iter *piter) +{ + if (!piter->__nents || !piter->sg) + return false; + + piter->sg_pgoffset += piter->__pg_advance; + piter->__pg_advance = 1; + + while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { + piter->sg_pgoffset -= sg_page_count(piter->sg); + piter->sg = sg_next(piter->sg); + if (!--piter->__nents || !piter->sg) + return false; + } + + return true; +} +EXPORT_SYMBOL(__sg_page_iter_next); + +/** * sg_miter_start - start mapping iteration over a sg list * @miter: sg mapping iter to be started * @sgl: sg list to iterate over @@ -335,27 +449,84 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, { memset(miter, 0, sizeof(struct sg_mapping_iter)); - miter->__sg = sgl; - miter->__nents = nents; - miter->__offset = 0; + __sg_page_iter_start(&miter->piter, sgl, nents, 0); WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); miter->__flags = flags; } EXPORT_SYMBOL(sg_miter_start); +static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) +{ + if (!miter->__remaining) { + struct scatterlist *sg; + unsigned long pgoffset; + + if (!__sg_page_iter_next(&miter->piter)) + return false; + + sg = miter->piter.sg; + pgoffset = miter->piter.sg_pgoffset; + + miter->__offset = pgoffset ? 0 : sg->offset; + miter->__remaining = sg->offset + sg->length - + (pgoffset << PAGE_SHIFT) - miter->__offset; + miter->__remaining = min_t(unsigned long, miter->__remaining, + PAGE_SIZE - miter->__offset); + } + + return true; +} + +/** + * sg_miter_skip - reposition mapping iterator + * @miter: sg mapping iter to be skipped + * @offset: number of bytes to plus the current location + * + * Description: + * Sets the offset of @miter to its current location plus @offset bytes. + * If mapping iterator @miter has been proceeded by sg_miter_next(), this + * stops @miter. + * + * Context: + * Don't care if @miter is stopped, or not proceeded yet. + * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. + * + * Returns: + * true if @miter contains the valid mapping. false if end of sg + * list is reached. + */ +bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) +{ + sg_miter_stop(miter); + + while (offset) { + off_t consumed; + + if (!sg_miter_get_next_page(miter)) + return false; + + consumed = min_t(off_t, offset, miter->__remaining); + miter->__offset += consumed; + miter->__remaining -= consumed; + offset -= consumed; + } + + return true; +} +EXPORT_SYMBOL(sg_miter_skip); + /** * sg_miter_next - proceed mapping iterator to the next mapping * @miter: sg mapping iter to proceed * * Description: - * Proceeds @miter@ to the next mapping. @miter@ should have been - * started using sg_miter_start(). On successful return, - * @miter@->page, @miter@->addr and @miter@->length point to the - * current mapping. + * Proceeds @miter to the next mapping. @miter should have been started + * using sg_miter_start(). On successful return, @miter->page, + * @miter->addr and @miter->length point to the current mapping. * * Context: - * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till - * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. + * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled + * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. * * Returns: * true if @miter contains the next mapping. false if end of sg @@ -363,36 +534,22 @@ EXPORT_SYMBOL(sg_miter_start); */ bool sg_miter_next(struct sg_mapping_iter *miter) { - unsigned int off, len; - - /* check for end and drop resources from the last iteration */ - if (!miter->__nents) - return false; - sg_miter_stop(miter); - /* get to the next sg if necessary. __offset is adjusted by stop */ - while (miter->__offset == miter->__sg->length) { - if (--miter->__nents) { - miter->__sg = sg_next(miter->__sg); - miter->__offset = 0; - } else - return false; - } - - /* map the next page */ - off = miter->__sg->offset + miter->__offset; - len = miter->__sg->length - miter->__offset; + /* + * Get to the next page if necessary. + * __remaining, __offset is adjusted by sg_miter_stop + */ + if (!sg_miter_get_next_page(miter)) + return false; - miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); - off &= ~PAGE_MASK; - miter->length = min_t(unsigned int, len, PAGE_SIZE - off); - miter->consumed = miter->length; + miter->page = sg_page_iter_page(&miter->piter); + miter->consumed = miter->length = miter->__remaining; if (miter->__flags & SG_MITER_ATOMIC) - miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; + miter->addr = kmap_atomic(miter->page) + miter->__offset; else - miter->addr = kmap(miter->page) + off; + miter->addr = kmap(miter->page) + miter->__offset; return true; } @@ -409,7 +566,8 @@ EXPORT_SYMBOL(sg_miter_next); * resources (kmap) need to be released during iteration. * * Context: - * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care + * otherwise. */ void sg_miter_stop(struct sg_mapping_iter *miter) { @@ -418,13 +576,15 @@ void sg_miter_stop(struct sg_mapping_iter *miter) /* drop resources from the last iteration */ if (miter->addr) { miter->__offset += miter->consumed; + miter->__remaining -= miter->consumed; - if (miter->__flags & SG_MITER_TO_SG) + if ((miter->__flags & SG_MITER_TO_SG) && + !PageSlab(miter->page)) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON(!irqs_disabled()); - kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); + WARN_ON_ONCE(preemptible()); + kunmap_atomic(miter->addr); } else kunmap(miter->page); @@ -442,14 +602,16 @@ EXPORT_SYMBOL(sg_miter_stop); * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy - * @to_buffer: transfer direction (non zero == from an sg list to a - * buffer, 0 == from a buffer to an sg list + * @skip: Number of bytes to skip before copying + * @to_buffer: transfer direction (true == from an sg list to a + * buffer, false == from a buffer to an sg list * * Returns the number of copied bytes. * **/ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, int to_buffer) + void *buf, size_t buflen, off_t skip, + bool to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; @@ -463,6 +625,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, sg_miter_start(&miter, sgl, nents, sg_flags); + if (!sg_miter_skip(&miter, skip)) + return false; + local_irq_save(flags); while (sg_miter_next(&miter) && offset < buflen) { @@ -497,7 +662,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen) { - return sg_copy_buffer(sgl, nents, buf, buflen, 0); + return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); } EXPORT_SYMBOL(sg_copy_from_buffer); @@ -514,6 +679,42 @@ EXPORT_SYMBOL(sg_copy_from_buffer); size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen) { - return sg_copy_buffer(sgl, nents, buf, buflen, 1); + return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); } EXPORT_SYMBOL(sg_copy_to_buffer); + +/** + * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @skip: Number of bytes to skip before copying + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); +} +EXPORT_SYMBOL(sg_pcopy_from_buffer); + +/** + * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy to + * @skip: Number of bytes to skip before copying + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); +} +EXPORT_SYMBOL(sg_pcopy_to_buffer); |
