debug.c (082f20b21de20285da2cbfc1be29656f0714c1b8) debug.c (c2bbf9d1e9ac7d4fdd503b190bc1ba8a6302bc49)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 *
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 */
7
8#define pr_fmt(fmt) "DMA-API: " fmt

--- 538 unchanged lines hidden (view full) ---

547 radix_tree_delete(&dma_active_cacheline, cln);
548 spin_unlock_irqrestore(&radix_lock, flags);
549}
550
551/*
552 * Wrapper function for adding an entry to the hash.
553 * This function takes care of locking itself.
554 */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 *
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 */
7
8#define pr_fmt(fmt) "DMA-API: " fmt

--- 538 unchanged lines hidden (view full) ---

547 radix_tree_delete(&dma_active_cacheline, cln);
548 spin_unlock_irqrestore(&radix_lock, flags);
549}
550
551/*
552 * Wrapper function for adding an entry to the hash.
553 * This function takes care of locking itself.
554 */
555static void add_dma_entry(struct dma_debug_entry *entry)
555static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
556{
557 struct hash_bucket *bucket;
558 unsigned long flags;
559 int rc;
560
561 bucket = get_hash_bucket(entry, &flags);
562 hash_bucket_add(bucket, entry);
563 put_hash_bucket(bucket, flags);
564
565 rc = active_cacheline_insert(entry);
566 if (rc == -ENOMEM) {
567 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
568 global_disable = true;
556{
557 struct hash_bucket *bucket;
558 unsigned long flags;
559 int rc;
560
561 bucket = get_hash_bucket(entry, &flags);
562 hash_bucket_add(bucket, entry);
563 put_hash_bucket(bucket, flags);
564
565 rc = active_cacheline_insert(entry);
566 if (rc == -ENOMEM) {
567 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
568 global_disable = true;
569 } else if (rc == -EEXIST) {
569 } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
570 err_printk(entry->dev, entry,
571 "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
572 }
573}
574
575static int dma_debug_create_entries(gfp_t gfp)
576{
577 struct dma_debug_entry *entry;

--- 608 unchanged lines hidden (view full) ---

1186
1187 if (is_vmalloc_addr(addr))
1188 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1189 addr, len);
1190}
1191EXPORT_SYMBOL(debug_dma_map_single);
1192
1193void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
570 err_printk(entry->dev, entry,
571 "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
572 }
573}
574
575static int dma_debug_create_entries(gfp_t gfp)
576{
577 struct dma_debug_entry *entry;

--- 608 unchanged lines hidden (view full) ---

1186
1187 if (is_vmalloc_addr(addr))
1188 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1189 addr, len);
1190}
1191EXPORT_SYMBOL(debug_dma_map_single);
1192
1193void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1194 size_t size, int direction, dma_addr_t dma_addr)
1194 size_t size, int direction, dma_addr_t dma_addr,
1195 unsigned long attrs)
1195{
1196 struct dma_debug_entry *entry;
1197
1198 if (unlikely(dma_debug_disabled()))
1199 return;
1200
1201 if (dma_mapping_error(dev, dma_addr))
1202 return;

--- 14 unchanged lines hidden (view full) ---

1217 check_for_stack(dev, page, offset);
1218
1219 if (!PageHighMem(page)) {
1220 void *addr = page_address(page) + offset;
1221
1222 check_for_illegal_area(dev, addr, size);
1223 }
1224
1196{
1197 struct dma_debug_entry *entry;
1198
1199 if (unlikely(dma_debug_disabled()))
1200 return;
1201
1202 if (dma_mapping_error(dev, dma_addr))
1203 return;

--- 14 unchanged lines hidden (view full) ---

1218 check_for_stack(dev, page, offset);
1219
1220 if (!PageHighMem(page)) {
1221 void *addr = page_address(page) + offset;
1222
1223 check_for_illegal_area(dev, addr, size);
1224 }
1225
1225 add_dma_entry(entry);
1226 add_dma_entry(entry, attrs);
1226}
1227
1228void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1229{
1230 struct dma_debug_entry ref;
1231 struct dma_debug_entry *entry;
1232 struct hash_bucket *bucket;
1233 unsigned long flags;

--- 41 unchanged lines hidden (view full) ---

1275 };
1276
1277 if (unlikely(dma_debug_disabled()))
1278 return;
1279 check_unmap(&ref);
1280}
1281
1282void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1227}
1228
1229void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1230{
1231 struct dma_debug_entry ref;
1232 struct dma_debug_entry *entry;
1233 struct hash_bucket *bucket;
1234 unsigned long flags;

--- 41 unchanged lines hidden (view full) ---

1276 };
1277
1278 if (unlikely(dma_debug_disabled()))
1279 return;
1280 check_unmap(&ref);
1281}
1282
1283void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1283 int nents, int mapped_ents, int direction)
1284 int nents, int mapped_ents, int direction,
1285 unsigned long attrs)
1284{
1285 struct dma_debug_entry *entry;
1286 struct scatterlist *s;
1287 int i;
1288
1289 if (unlikely(dma_debug_disabled()))
1290 return;
1291
1286{
1287 struct dma_debug_entry *entry;
1288 struct scatterlist *s;
1289 int i;
1290
1291 if (unlikely(dma_debug_disabled()))
1292 return;
1293
1294 for_each_sg(sg, s, nents, i) {
1295 check_for_stack(dev, sg_page(s), s->offset);
1296 if (!PageHighMem(sg_page(s)))
1297 check_for_illegal_area(dev, sg_virt(s), s->length);
1298 }
1299
1292 for_each_sg(sg, s, mapped_ents, i) {
1293 entry = dma_entry_alloc();
1294 if (!entry)
1295 return;
1296
1297 entry->type = dma_debug_sg;
1298 entry->dev = dev;
1299 entry->pfn = page_to_pfn(sg_page(s));
1300 entry->offset = s->offset;
1301 entry->size = sg_dma_len(s);
1302 entry->dev_addr = sg_dma_address(s);
1303 entry->direction = direction;
1304 entry->sg_call_ents = nents;
1305 entry->sg_mapped_ents = mapped_ents;
1306
1300 for_each_sg(sg, s, mapped_ents, i) {
1301 entry = dma_entry_alloc();
1302 if (!entry)
1303 return;
1304
1305 entry->type = dma_debug_sg;
1306 entry->dev = dev;
1307 entry->pfn = page_to_pfn(sg_page(s));
1308 entry->offset = s->offset;
1309 entry->size = sg_dma_len(s);
1310 entry->dev_addr = sg_dma_address(s);
1311 entry->direction = direction;
1312 entry->sg_call_ents = nents;
1313 entry->sg_mapped_ents = mapped_ents;
1314
1307 check_for_stack(dev, sg_page(s), s->offset);
1308
1309 if (!PageHighMem(sg_page(s))) {
1310 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1311 }
1312
1313 check_sg_segment(dev, s);
1314
1315 check_sg_segment(dev, s);
1316
1315 add_dma_entry(entry);
1317 add_dma_entry(entry, attrs);
1316 }
1317}
1318
1319static int get_nr_mapped_entries(struct device *dev,
1320 struct dma_debug_entry *ref)
1321{
1322 struct dma_debug_entry *entry;
1323 struct hash_bucket *bucket;

--- 39 unchanged lines hidden (view full) ---

1363 if (!i)
1364 mapped_ents = get_nr_mapped_entries(dev, &ref);
1365
1366 check_unmap(&ref);
1367 }
1368}
1369
1370void debug_dma_alloc_coherent(struct device *dev, size_t size,
1318 }
1319}
1320
1321static int get_nr_mapped_entries(struct device *dev,
1322 struct dma_debug_entry *ref)
1323{
1324 struct dma_debug_entry *entry;
1325 struct hash_bucket *bucket;

--- 39 unchanged lines hidden (view full) ---

1365 if (!i)
1366 mapped_ents = get_nr_mapped_entries(dev, &ref);
1367
1368 check_unmap(&ref);
1369 }
1370}
1371
1372void debug_dma_alloc_coherent(struct device *dev, size_t size,
1371 dma_addr_t dma_addr, void *virt)
1373 dma_addr_t dma_addr, void *virt,
1374 unsigned long attrs)
1372{
1373 struct dma_debug_entry *entry;
1374
1375 if (unlikely(dma_debug_disabled()))
1376 return;
1377
1378 if (unlikely(virt == NULL))
1379 return;

--- 13 unchanged lines hidden (view full) ---

1393 entry->dev_addr = dma_addr;
1394 entry->direction = DMA_BIDIRECTIONAL;
1395
1396 if (is_vmalloc_addr(virt))
1397 entry->pfn = vmalloc_to_pfn(virt);
1398 else
1399 entry->pfn = page_to_pfn(virt_to_page(virt));
1400
1375{
1376 struct dma_debug_entry *entry;
1377
1378 if (unlikely(dma_debug_disabled()))
1379 return;
1380
1381 if (unlikely(virt == NULL))
1382 return;

--- 13 unchanged lines hidden (view full) ---

1396 entry->dev_addr = dma_addr;
1397 entry->direction = DMA_BIDIRECTIONAL;
1398
1399 if (is_vmalloc_addr(virt))
1400 entry->pfn = vmalloc_to_pfn(virt);
1401 else
1402 entry->pfn = page_to_pfn(virt_to_page(virt));
1403
1401 add_dma_entry(entry);
1404 add_dma_entry(entry, attrs);
1402}
1403
1404void debug_dma_free_coherent(struct device *dev, size_t size,
1405 void *virt, dma_addr_t addr)
1406{
1407 struct dma_debug_entry ref = {
1408 .type = dma_debug_coherent,
1409 .dev = dev,

--- 14 unchanged lines hidden (view full) ---

1424
1425 if (unlikely(dma_debug_disabled()))
1426 return;
1427
1428 check_unmap(&ref);
1429}
1430
1431void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1405}
1406
1407void debug_dma_free_coherent(struct device *dev, size_t size,
1408 void *virt, dma_addr_t addr)
1409{
1410 struct dma_debug_entry ref = {
1411 .type = dma_debug_coherent,
1412 .dev = dev,

--- 14 unchanged lines hidden (view full) ---

1427
1428 if (unlikely(dma_debug_disabled()))
1429 return;
1430
1431 check_unmap(&ref);
1432}
1433
1434void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1432 int direction, dma_addr_t dma_addr)
1435 int direction, dma_addr_t dma_addr,
1436 unsigned long attrs)
1433{
1434 struct dma_debug_entry *entry;
1435
1436 if (unlikely(dma_debug_disabled()))
1437 return;
1438
1439 entry = dma_entry_alloc();
1440 if (!entry)
1441 return;
1442
1443 entry->type = dma_debug_resource;
1444 entry->dev = dev;
1445 entry->pfn = PHYS_PFN(addr);
1446 entry->offset = offset_in_page(addr);
1447 entry->size = size;
1448 entry->dev_addr = dma_addr;
1449 entry->direction = direction;
1450 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1451
1437{
1438 struct dma_debug_entry *entry;
1439
1440 if (unlikely(dma_debug_disabled()))
1441 return;
1442
1443 entry = dma_entry_alloc();
1444 if (!entry)
1445 return;
1446
1447 entry->type = dma_debug_resource;
1448 entry->dev = dev;
1449 entry->pfn = PHYS_PFN(addr);
1450 entry->offset = offset_in_page(addr);
1451 entry->size = size;
1452 entry->dev_addr = dma_addr;
1453 entry->direction = direction;
1454 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1455
1452 add_dma_entry(entry);
1456 add_dma_entry(entry, attrs);
1453}
1454
1455void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1456 size_t size, int direction)
1457{
1458 struct dma_debug_entry ref = {
1459 .type = dma_debug_resource,
1460 .dev = dev,

--- 129 unchanged lines hidden ---
1457}
1458
1459void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1460 size_t size, int direction)
1461{
1462 struct dma_debug_entry ref = {
1463 .type = dma_debug_resource,
1464 .dev = dev,

--- 129 unchanged lines hidden ---