diff options
Diffstat (limited to 'lib/dma-debug.c')
| -rw-r--r-- | lib/dma-debug.c | 442 |
1 files changed, 387 insertions, 55 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 4bfb0471f10..98f2d7e91a9 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -24,6 +24,7 @@ #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/uaccess.h> +#include <linux/export.h> #include <linux/device.h> #include <linux/types.h> #include <linux/sched.h> @@ -44,24 +45,48 @@ enum { dma_debug_coherent, }; +enum map_err_types { + MAP_ERR_CHECK_NOT_APPLICABLE, + MAP_ERR_NOT_CHECKED, + MAP_ERR_CHECKED, +}; + #define DMA_DEBUG_STACKTRACE_ENTRIES 5 +/** + * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping + * @list: node on pre-allocated free_entries list + * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent + * @type: single, page, sg, coherent + * @pfn: page frame of the start address + * @offset: offset of mapping relative to pfn + * @size: length of the mapping + * @direction: enum dma_data_direction + * @sg_call_ents: 'nents' from dma_map_sg + * @sg_mapped_ents: 'mapped_ents' from dma_map_sg + * @map_err_type: track whether dma_mapping_error() was checked + * @stacktrace: support backtraces when a violation is detected + */ struct dma_debug_entry { struct list_head list; struct device *dev; int type; - phys_addr_t paddr; + unsigned long pfn; + size_t offset; u64 dev_addr; u64 size; int direction; int sg_call_ents; int sg_mapped_ents; + enum map_err_types map_err_type; #ifdef CONFIG_STACKTRACE struct stack_trace stacktrace; unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; #endif }; +typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); + struct hash_bucket { struct list_head list; spinlock_t lock; @@ -75,7 +100,7 @@ static LIST_HEAD(free_entries); static DEFINE_SPINLOCK(free_entries_lock); /* Global disable flag - will be set in case of an error */ -static bool global_disable __read_mostly; +static u32 global_disable __read_mostly; /* Global error count */ static u32 error_count; @@ -111,17 +136,18 @@ static struct device_driver *current_driver __read_mostly; static DEFINE_RWLOCK(driver_name_lock); +static const char *const maperr2str[] = { + [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", + [MAP_ERR_NOT_CHECKED] = "dma map error not checked", + [MAP_ERR_CHECKED] = "dma map error checked", +}; + static const char *type2name[4] = { "single", "page", "scather-gather", "coherent" }; static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", "DMA_FROM_DEVICE", "DMA_NONE" }; -/* little merge helper - remove it after the merge window */ -#ifndef BUS_NOTIFY_UNBOUND_DRIVER -#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 -#endif - /* * The access to some variables in this macro is racy. We can't use atomic_t * here because all these variables are exported to debugfs. Some of them even @@ -167,7 +193,7 @@ static bool driver_filter(struct device *dev) return false; /* driver filter on but not yet initialized */ - drv = get_driver(dev->driver); + drv = dev->driver; if (!drv) return false; @@ -182,7 +208,6 @@ static bool driver_filter(struct device *dev) } read_unlock_irqrestore(&driver_name_lock, flags); - put_driver(drv); return ret; } @@ -240,18 +265,37 @@ static void put_hash_bucket(struct hash_bucket *bucket, spin_unlock_irqrestore(&bucket->lock, __flags); } +static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) +{ + return ((a->dev_addr == b->dev_addr) && + (a->dev == b->dev)) ? true : false; +} + +static bool containing_match(struct dma_debug_entry *a, + struct dma_debug_entry *b) +{ + if (a->dev != b->dev) + return false; + + if ((b->dev_addr <= a->dev_addr) && + ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) + return true; + + return false; +} + /* * Search a given entry in the hash bucket list */ -static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, - struct dma_debug_entry *ref) +static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, + struct dma_debug_entry *ref, + match_fn match) { struct dma_debug_entry *entry, *ret = NULL; - int matches = 0, match_lvl, last_lvl = 0; + int matches = 0, match_lvl, last_lvl = -1; list_for_each_entry(entry, &bucket->list, list) { - if ((entry->dev_addr != ref->dev_addr) || - (entry->dev != ref->dev)) + if (!match(ref, entry)) continue; /* @@ -277,7 +321,7 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, } else if (match_lvl > last_lvl) { /* * We found an entry that fits better then the - * previous one + * previous one or it is the 1st match. */ last_lvl = match_lvl; ret = entry; @@ -293,6 +337,39 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, return ret; } +static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, + struct dma_debug_entry *ref) +{ + return __hash_bucket_find(bucket, ref, exact_match); +} + +static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, + struct dma_debug_entry *ref, + unsigned long *flags) +{ + + unsigned int max_range = dma_get_max_seg_size(ref->dev); + struct dma_debug_entry *entry, index = *ref; + unsigned int range = 0; + + while (range <= max_range) { + entry = __hash_bucket_find(*bucket, &index, containing_match); + + if (entry) + return entry; + + /* + * Nothing found, go back a hash bucket + */ + put_hash_bucket(*bucket, flags); + range += (1 << HASH_FN_SHIFT); + index.dev_addr -= (1 << HASH_FN_SHIFT); + *bucket = get_hash_bucket(&index, flags); + } + + return NULL; +} + /* * Add an entry to a hash bucket */ @@ -310,6 +387,11 @@ static void hash_bucket_del(struct dma_debug_entry *entry) list_del(&entry->list); } +static unsigned long long phys_addr(struct dma_debug_entry *entry) +{ + return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; +} + /* * Dump mapping entries for debugging purposes */ @@ -327,11 +409,12 @@ void debug_dma_dump_mappings(struct device *dev) list_for_each_entry(entry, &bucket->list, list) { if (!dev || dev == entry->dev) { dev_info(entry->dev, - "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", + "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", type2name[entry->type], idx, - (unsigned long long)entry->paddr, + phys_addr(entry), entry->pfn, entry->dev_addr, entry->size, - dir2name[entry->direction]); + dir2name[entry->direction], + maperr2str[entry->map_err_type]); } } @@ -341,6 +424,176 @@ void debug_dma_dump_mappings(struct device *dev) EXPORT_SYMBOL(debug_dma_dump_mappings); /* + * For each mapping (initial cacheline in the case of + * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a + * scatterlist, or the cacheline specified in dma_map_single) insert + * into this tree using the cacheline as the key. At + * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If + * the entry already exists at insertion time add a tag as a reference + * count for the overlapping mappings. For now, the overlap tracking + * just ensures that 'unmaps' balance 'maps' before marking the + * cacheline idle, but we should also be flagging overlaps as an API + * violation. + * + * Memory usage is mostly constrained by the maximum number of available + * dma-debug entries in that we need a free dma_debug_entry before + * inserting into the tree. In the case of dma_map_page and + * dma_alloc_coherent there is only one dma_debug_entry and one + * dma_active_cacheline entry to track per event. dma_map_sg(), on the + * other hand, consumes a single dma_debug_entry, but inserts 'nents' + * entries into the tree. + * + * At any time debug_dma_assert_idle() can be called to trigger a + * warning if any cachelines in the given page are in the active set. + */ +static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); +static DEFINE_SPINLOCK(radix_lock); +#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) +#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) +#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) + +static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) +{ + return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + + (entry->offset >> L1_CACHE_SHIFT); +} + +static int active_cacheline_read_overlap(phys_addr_t cln) +{ + int overlap = 0, i; + + for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) + if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) + overlap |= 1 << i; + return overlap; +} + +static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) +{ + int i; + + if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) + return overlap; + + for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) + if (overlap & 1 << i) + radix_tree_tag_set(&dma_active_cacheline, cln, i); + else + radix_tree_tag_clear(&dma_active_cacheline, cln, i); + + return overlap; +} + +static void active_cacheline_inc_overlap(phys_addr_t cln) +{ + int overlap = active_cacheline_read_overlap(cln); + + overlap = active_cacheline_set_overlap(cln, ++overlap); + + /* If we overflowed the overlap counter then we're potentially + * leaking dma-mappings. Otherwise, if maps and unmaps are + * balanced then this overflow may cause false negatives in + * debug_dma_assert_idle() as the cacheline may be marked idle + * prematurely. + */ + WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, + "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", + ACTIVE_CACHELINE_MAX_OVERLAP, &cln); +} + +static int active_cacheline_dec_overlap(phys_addr_t cln) +{ + int overlap = active_cacheline_read_overlap(cln); + + return active_cacheline_set_overlap(cln, --overlap); +} + +static int active_cacheline_insert(struct dma_debug_entry *entry) +{ + phys_addr_t cln = to_cacheline_number(entry); + unsigned long flags; + int rc; + + /* If the device is not writing memory then we don't have any + * concerns about the cpu consuming stale data. This mitigates + * legitimate usages of overlapping mappings. + */ + if (entry->direction == DMA_TO_DEVICE) + return 0; + + spin_lock_irqsave(&radix_lock, flags); + rc = radix_tree_insert(&dma_active_cacheline, cln, entry); + if (rc == -EEXIST) + active_cacheline_inc_overlap(cln); + spin_unlock_irqrestore(&radix_lock, flags); + + return rc; +} + +static void active_cacheline_remove(struct dma_debug_entry *entry) +{ + phys_addr_t cln = to_cacheline_number(entry); + unsigned long flags; + + /* ...mirror the insert case */ + if (entry->direction == DMA_TO_DEVICE) + return; + + spin_lock_irqsave(&radix_lock, flags); + /* since we are counting overlaps the final put of the + * cacheline will occur when the overlap count is 0. + * active_cacheline_dec_overlap() returns -1 in that case + */ + if (active_cacheline_dec_overlap(cln) < 0) + radix_tree_delete(&dma_active_cacheline, cln); + spin_unlock_irqrestore(&radix_lock, flags); +} + +/** + * debug_dma_assert_idle() - assert that a page is not undergoing dma + * @page: page to lookup in the dma_active_cacheline tree + * + * Place a call to this routine in cases where the cpu touching the page + * before the dma completes (page is dma_unmapped) will lead to data + * corruption. + */ +void debug_dma_assert_idle(struct page *page) +{ + static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; + struct dma_debug_entry *entry = NULL; + void **results = (void **) &ents; + unsigned int nents, i; + unsigned long flags; + phys_addr_t cln; + + if (!page) + return; + + cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; + spin_lock_irqsave(&radix_lock, flags); + nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, + CACHELINES_PER_PAGE); + for (i = 0; i < nents; i++) { + phys_addr_t ent_cln = to_cacheline_number(ents[i]); + + if (ent_cln == cln) { + entry = ents[i]; + break; + } else if (ent_cln >= cln + CACHELINES_PER_PAGE) + break; + } + spin_unlock_irqrestore(&radix_lock, flags); + + if (!entry) + return; + + cln = to_cacheline_number(entry); + err_printk(entry->dev, entry, + "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", + &cln); +} + +/* * Wrapper function for adding an entry to the hash. * This function takes care of locking itself. */ @@ -348,10 +601,21 @@ static void add_dma_entry(struct dma_debug_entry *entry) { struct hash_bucket *bucket; unsigned long flags; + int rc; bucket = get_hash_bucket(entry, &flags); hash_bucket_add(bucket, entry); put_hash_bucket(bucket, &flags); + + rc = active_cacheline_insert(entry); + if (rc == -ENOMEM) { + pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); + global_disable = true; + } + + /* TODO: report -EEXIST errors here as overlapping mappings are + * not supported by the DMA API + */ } static struct dma_debug_entry *__dma_entry_alloc(void) @@ -376,7 +640,7 @@ static struct dma_debug_entry *__dma_entry_alloc(void) */ static struct dma_debug_entry *dma_entry_alloc(void) { - struct dma_debug_entry *entry = NULL; + struct dma_debug_entry *entry; unsigned long flags; spin_lock_irqsave(&free_entries_lock, flags); @@ -384,11 +648,14 @@ static struct dma_debug_entry *dma_entry_alloc(void) if (list_empty(&free_entries)) { pr_err("DMA-API: debugging out of memory - disabling\n"); global_disable = true; - goto out; + spin_unlock_irqrestore(&free_entries_lock, flags); + return NULL; } entry = __dma_entry_alloc(); + spin_unlock_irqrestore(&free_entries_lock, flags); + #ifdef CONFIG_STACKTRACE entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; entry->stacktrace.entries = entry->st_entries; @@ -396,9 +663,6 @@ static struct dma_debug_entry *dma_entry_alloc(void) save_stack_trace(&entry->stacktrace); #endif -out: - spin_unlock_irqrestore(&free_entries_lock, flags); - return entry; } @@ -406,6 +670,8 @@ static void dma_entry_free(struct dma_debug_entry *entry) { unsigned long flags; + active_cacheline_remove(entry); + /* * add to beginning of the list - this way the entries are * more likely cache hot when they are reallocated. @@ -603,7 +869,7 @@ static int dma_debug_fs_init(void) global_disable_dent = debugfs_create_bool("disabled", 0444, dma_debug_dent, - (u32 *)&global_disable); + &global_disable); if (!global_disable_dent) goto out_err; @@ -649,7 +915,7 @@ out_err: return -ENOMEM; } -static int device_dma_allocations(struct device *dev) +static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) { struct dma_debug_entry *entry; unsigned long flags; @@ -660,8 +926,10 @@ static int device_dma_allocations(struct device *dev) for (i = 0; i < HASH_SIZE; ++i) { spin_lock(&dma_entry_hash[i].lock); list_for_each_entry(entry, &dma_entry_hash[i].list, list) { - if (entry->dev == dev) + if (entry->dev == dev) { count += 1; + *out_entry = entry; + } } spin_unlock(&dma_entry_hash[i].lock); } @@ -674,6 +942,7 @@ static int device_dma_allocations(struct device *dev) static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; + struct dma_debug_entry *uninitialized_var(entry); int count; if (global_disable) @@ -681,12 +950,17 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: - count = device_dma_allocations(dev); + count = device_dma_allocations(dev, &entry); if (count == 0) break; - err_printk(dev, NULL, "DMA-API: device driver has pending " + err_printk(dev, entry, "DMA-API: device driver has pending " "DMA allocations while released from device " - "[count=%d]\n", count); + "[count=%d]\n" + "One of leaked entries details: " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [mapped as %s]\n", + count, entry->dev_addr, entry->size, + dir2name[entry->direction], type2name[entry->type]); break; default: break; @@ -787,21 +1061,25 @@ static void check_unmap(struct dma_debug_entry *ref) struct hash_bucket *bucket; unsigned long flags; - if (dma_mapping_error(ref->dev, ref->dev_addr)) { - err_printk(ref->dev, NULL, "DMA-API: device driver tries " - "to free an invalid DMA memory address\n"); - return; - } - bucket = get_hash_bucket(ref, &flags); - entry = hash_bucket_find(bucket, ref); + entry = bucket_find_exact(bucket, ref); if (!entry) { - err_printk(ref->dev, NULL, "DMA-API: device driver tries " - "to free DMA memory it has not allocated " - "[device address=0x%016llx] [size=%llu bytes]\n", - ref->dev_addr, ref->size); - goto out; + /* must drop lock before calling dma_mapping_error */ + put_hash_bucket(bucket, &flags); + + if (dma_mapping_error(ref->dev, ref->dev_addr)) { + err_printk(ref->dev, NULL, + "DMA-API: device driver tries to free an " + "invalid DMA memory address\n"); + } else { + err_printk(ref->dev, NULL, + "DMA-API: device driver tries to free DMA " + "memory it has not allocated [device " + "address=0x%016llx] [size=%llu bytes]\n", + ref->dev_addr, ref->size); + } + return; } if (ref->size != entry->size) { @@ -820,15 +1098,15 @@ static void check_unmap(struct dma_debug_entry *ref) ref->dev_addr, ref->size, type2name[entry->type], type2name[ref->type]); } else if ((entry->type == dma_debug_coherent) && - (ref->paddr != entry->paddr)) { + (phys_addr(ref) != phys_addr(entry))) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different CPU address " "[device address=0x%016llx] [size=%llu bytes] " "[cpu alloc address=0x%016llx] " "[cpu free address=0x%016llx]", ref->dev_addr, ref->size, - (unsigned long long)entry->paddr, - (unsigned long long)ref->paddr); + phys_addr(entry), + phys_addr(ref)); } if (ref->sg_call_ents && ref->type == dma_debug_sg && @@ -853,10 +1131,18 @@ static void check_unmap(struct dma_debug_entry *ref) dir2name[ref->direction]); } + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + err_printk(ref->dev, entry, + "DMA-API: device driver failed to check map error" + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped as %s]", + ref->dev_addr, ref->size, + type2name[entry->type]); + } + hash_bucket_del(entry); dma_entry_free(entry); -out: put_hash_bucket(bucket, &flags); } @@ -894,7 +1180,7 @@ static void check_sync(struct device *dev, bucket = get_hash_bucket(ref, &flags); - entry = hash_bucket_find(bucket, ref); + entry = bucket_find_contain(&bucket, ref, &flags); if (!entry) { err_printk(dev, NULL, "DMA-API: device driver tries " @@ -960,7 +1246,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, if (unlikely(global_disable)) return; - if (unlikely(dma_mapping_error(dev, dma_addr))) + if (dma_mapping_error(dev, dma_addr)) return; entry = dma_entry_alloc(); @@ -969,10 +1255,12 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, entry->dev = dev; entry->type = dma_debug_page; - entry->paddr = page_to_phys(page) + offset; + entry->pfn = page_to_pfn(page); + entry->offset = offset, entry->dev_addr = dma_addr; entry->size = size; entry->direction = direction; + entry->map_err_type = MAP_ERR_NOT_CHECKED; if (map_single) entry->type = dma_debug_single; @@ -988,6 +1276,44 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, } EXPORT_SYMBOL(debug_dma_map_page); +void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + struct dma_debug_entry ref; + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + + if (unlikely(global_disable)) + return; + + ref.dev = dev; + ref.dev_addr = dma_addr; + bucket = get_hash_bucket(&ref, &flags); + + list_for_each_entry(entry, &bucket->list, list) { + if (!exact_match(&ref, entry)) + continue; + + /* + * The same physical address can be mapped multiple + * times. Without a hardware IOMMU this results in the + * same device addresses being put into the dma-debug + * hash multiple times too. This can result in false + * positives being reported. Therefore we implement a + * best-fit algorithm here which updates the first entry + * from the hash which fits the reference value and is + * not currently listed as being checked. + */ + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + entry->map_err_type = MAP_ERR_CHECKED; + break; + } + } + + put_hash_bucket(bucket, &flags); +} +EXPORT_SYMBOL(debug_dma_mapping_error); + void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, int direction, bool map_single) { @@ -1026,7 +1352,8 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, entry->type = dma_debug_sg; entry->dev = dev; - entry->paddr = sg_phys(s); + entry->pfn = page_to_pfn(sg_page(s)); + entry->offset = s->offset, entry->size = sg_dma_len(s); entry->dev_addr = sg_dma_address(s); entry->direction = direction; @@ -1052,7 +1379,7 @@ static int get_nr_mapped_entries(struct device *dev, int mapped_ents; bucket = get_hash_bucket(ref, &flags); - entry = hash_bucket_find(bucket, ref); + entry = bucket_find_exact(bucket, ref); mapped_ents = 0; if (entry) @@ -1076,7 +1403,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, - .paddr = sg_phys(s), + .pfn = page_to_pfn(sg_page(s)), + .offset = s->offset, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = dir, @@ -1111,7 +1439,8 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, entry->type = dma_debug_coherent; entry->dev = dev; - entry->paddr = virt_to_phys(virt); + entry->pfn = page_to_pfn(virt_to_page(virt)); + entry->offset = (size_t) virt & PAGE_MASK; entry->size = size; entry->dev_addr = dma_addr; entry->direction = DMA_BIDIRECTIONAL; @@ -1126,7 +1455,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size, struct dma_debug_entry ref = { .type = dma_debug_coherent, .dev = dev, - .paddr = virt_to_phys(virt), + .pfn = page_to_pfn(virt_to_page(virt)), + .offset = (size_t) virt & PAGE_MASK, .dev_addr = addr, .size = size, .direction = DMA_BIDIRECTIONAL, @@ -1234,7 +1564,8 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, - .paddr = sg_phys(s), + .pfn = page_to_pfn(sg_page(s)), + .offset = s->offset, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, @@ -1266,7 +1597,8 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, - .paddr = sg_phys(s), + .pfn = page_to_pfn(sg_page(s)), + .offset = s->offset, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, |
