1*d6aca350SJuergen Gross // SPDX-License-Identifier: GPL-2.0-only 2*d6aca350SJuergen Gross /* 3*d6aca350SJuergen Gross * Xen grant DMA-mapping layer - contains special DMA-mapping routines 4*d6aca350SJuergen Gross * for providing grant references as DMA addresses to be used by frontends 5*d6aca350SJuergen Gross * (e.g. virtio) in Xen guests 6*d6aca350SJuergen Gross * 7*d6aca350SJuergen Gross * Copyright (c) 2021, Juergen Gross <jgross@suse.com> 8*d6aca350SJuergen Gross */ 9*d6aca350SJuergen Gross 10*d6aca350SJuergen Gross #include <linux/module.h> 11*d6aca350SJuergen Gross #include <linux/dma-map-ops.h> 12*d6aca350SJuergen Gross #include <linux/of.h> 13*d6aca350SJuergen Gross #include <linux/pfn.h> 14*d6aca350SJuergen Gross #include <linux/xarray.h> 15*d6aca350SJuergen Gross #include <xen/xen.h> 16*d6aca350SJuergen Gross #include <xen/xen-ops.h> 17*d6aca350SJuergen Gross #include <xen/grant_table.h> 18*d6aca350SJuergen Gross 19*d6aca350SJuergen Gross struct xen_grant_dma_data { 20*d6aca350SJuergen Gross /* The ID of backend domain */ 21*d6aca350SJuergen Gross domid_t backend_domid; 22*d6aca350SJuergen Gross /* Is device behaving sane? */ 23*d6aca350SJuergen Gross bool broken; 24*d6aca350SJuergen Gross }; 25*d6aca350SJuergen Gross 26*d6aca350SJuergen Gross static DEFINE_XARRAY(xen_grant_dma_devices); 27*d6aca350SJuergen Gross 28*d6aca350SJuergen Gross #define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63) 29*d6aca350SJuergen Gross 30*d6aca350SJuergen Gross static inline dma_addr_t grant_to_dma(grant_ref_t grant) 31*d6aca350SJuergen Gross { 32*d6aca350SJuergen Gross return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); 33*d6aca350SJuergen Gross } 34*d6aca350SJuergen Gross 35*d6aca350SJuergen Gross static inline grant_ref_t dma_to_grant(dma_addr_t dma) 36*d6aca350SJuergen Gross { 37*d6aca350SJuergen Gross return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); 38*d6aca350SJuergen Gross } 39*d6aca350SJuergen Gross 40*d6aca350SJuergen Gross static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) 41*d6aca350SJuergen Gross { 42*d6aca350SJuergen Gross struct xen_grant_dma_data *data; 43*d6aca350SJuergen Gross 44*d6aca350SJuergen Gross xa_lock(&xen_grant_dma_devices); 45*d6aca350SJuergen Gross data = xa_load(&xen_grant_dma_devices, (unsigned long)dev); 46*d6aca350SJuergen Gross xa_unlock(&xen_grant_dma_devices); 47*d6aca350SJuergen Gross 48*d6aca350SJuergen Gross return data; 49*d6aca350SJuergen Gross } 50*d6aca350SJuergen Gross 51*d6aca350SJuergen Gross /* 52*d6aca350SJuergen Gross * DMA ops for Xen frontends (e.g. virtio). 53*d6aca350SJuergen Gross * 54*d6aca350SJuergen Gross * Used to act as a kind of software IOMMU for Xen guests by using grants as 55*d6aca350SJuergen Gross * DMA addresses. 56*d6aca350SJuergen Gross * Such a DMA address is formed by using the grant reference as a frame 57*d6aca350SJuergen Gross * number and setting the highest address bit (this bit is for the backend 58*d6aca350SJuergen Gross * to be able to distinguish it from e.g. a mmio address). 59*d6aca350SJuergen Gross * 60*d6aca350SJuergen Gross * Note that for now we hard wire dom0 to be the backend domain. In order 61*d6aca350SJuergen Gross * to support any domain as backend we'd need to add a way to communicate 62*d6aca350SJuergen Gross * the domid of this backend, e.g. via Xenstore, via the PCI-device's 63*d6aca350SJuergen Gross * config space or DT/ACPI. 64*d6aca350SJuergen Gross */ 65*d6aca350SJuergen Gross static void *xen_grant_dma_alloc(struct device *dev, size_t size, 66*d6aca350SJuergen Gross dma_addr_t *dma_handle, gfp_t gfp, 67*d6aca350SJuergen Gross unsigned long attrs) 68*d6aca350SJuergen Gross { 69*d6aca350SJuergen Gross struct xen_grant_dma_data *data; 70*d6aca350SJuergen Gross unsigned int i, n_pages = PFN_UP(size); 71*d6aca350SJuergen Gross unsigned long pfn; 72*d6aca350SJuergen Gross grant_ref_t grant; 73*d6aca350SJuergen Gross void *ret; 74*d6aca350SJuergen Gross 75*d6aca350SJuergen Gross data = find_xen_grant_dma_data(dev); 76*d6aca350SJuergen Gross if (!data) 77*d6aca350SJuergen Gross return NULL; 78*d6aca350SJuergen Gross 79*d6aca350SJuergen Gross if (unlikely(data->broken)) 80*d6aca350SJuergen Gross return NULL; 81*d6aca350SJuergen Gross 82*d6aca350SJuergen Gross ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); 83*d6aca350SJuergen Gross if (!ret) 84*d6aca350SJuergen Gross return NULL; 85*d6aca350SJuergen Gross 86*d6aca350SJuergen Gross pfn = virt_to_pfn(ret); 87*d6aca350SJuergen Gross 88*d6aca350SJuergen Gross if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { 89*d6aca350SJuergen Gross free_pages_exact(ret, n_pages * PAGE_SIZE); 90*d6aca350SJuergen Gross return NULL; 91*d6aca350SJuergen Gross } 92*d6aca350SJuergen Gross 93*d6aca350SJuergen Gross for (i = 0; i < n_pages; i++) { 94*d6aca350SJuergen Gross gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, 95*d6aca350SJuergen Gross pfn_to_gfn(pfn + i), 0); 96*d6aca350SJuergen Gross } 97*d6aca350SJuergen Gross 98*d6aca350SJuergen Gross *dma_handle = grant_to_dma(grant); 99*d6aca350SJuergen Gross 100*d6aca350SJuergen Gross return ret; 101*d6aca350SJuergen Gross } 102*d6aca350SJuergen Gross 103*d6aca350SJuergen Gross static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, 104*d6aca350SJuergen Gross dma_addr_t dma_handle, unsigned long attrs) 105*d6aca350SJuergen Gross { 106*d6aca350SJuergen Gross struct xen_grant_dma_data *data; 107*d6aca350SJuergen Gross unsigned int i, n_pages = PFN_UP(size); 108*d6aca350SJuergen Gross grant_ref_t grant; 109*d6aca350SJuergen Gross 110*d6aca350SJuergen Gross data = find_xen_grant_dma_data(dev); 111*d6aca350SJuergen Gross if (!data) 112*d6aca350SJuergen Gross return; 113*d6aca350SJuergen Gross 114*d6aca350SJuergen Gross if (unlikely(data->broken)) 115*d6aca350SJuergen Gross return; 116*d6aca350SJuergen Gross 117*d6aca350SJuergen Gross grant = dma_to_grant(dma_handle); 118*d6aca350SJuergen Gross 119*d6aca350SJuergen Gross for (i = 0; i < n_pages; i++) { 120*d6aca350SJuergen Gross if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { 121*d6aca350SJuergen Gross dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); 122*d6aca350SJuergen Gross data->broken = true; 123*d6aca350SJuergen Gross return; 124*d6aca350SJuergen Gross } 125*d6aca350SJuergen Gross } 126*d6aca350SJuergen Gross 127*d6aca350SJuergen Gross gnttab_free_grant_reference_seq(grant, n_pages); 128*d6aca350SJuergen Gross 129*d6aca350SJuergen Gross free_pages_exact(vaddr, n_pages * PAGE_SIZE); 130*d6aca350SJuergen Gross } 131*d6aca350SJuergen Gross 132*d6aca350SJuergen Gross static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, 133*d6aca350SJuergen Gross dma_addr_t *dma_handle, 134*d6aca350SJuergen Gross enum dma_data_direction dir, 135*d6aca350SJuergen Gross gfp_t gfp) 136*d6aca350SJuergen Gross { 137*d6aca350SJuergen Gross void *vaddr; 138*d6aca350SJuergen Gross 139*d6aca350SJuergen Gross vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0); 140*d6aca350SJuergen Gross if (!vaddr) 141*d6aca350SJuergen Gross return NULL; 142*d6aca350SJuergen Gross 143*d6aca350SJuergen Gross return virt_to_page(vaddr); 144*d6aca350SJuergen Gross } 145*d6aca350SJuergen Gross 146*d6aca350SJuergen Gross static void xen_grant_dma_free_pages(struct device *dev, size_t size, 147*d6aca350SJuergen Gross struct page *vaddr, dma_addr_t dma_handle, 148*d6aca350SJuergen Gross enum dma_data_direction dir) 149*d6aca350SJuergen Gross { 150*d6aca350SJuergen Gross xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0); 151*d6aca350SJuergen Gross } 152*d6aca350SJuergen Gross 153*d6aca350SJuergen Gross static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, 154*d6aca350SJuergen Gross unsigned long offset, size_t size, 155*d6aca350SJuergen Gross enum dma_data_direction dir, 156*d6aca350SJuergen Gross unsigned long attrs) 157*d6aca350SJuergen Gross { 158*d6aca350SJuergen Gross struct xen_grant_dma_data *data; 159*d6aca350SJuergen Gross unsigned int i, n_pages = PFN_UP(size); 160*d6aca350SJuergen Gross grant_ref_t grant; 161*d6aca350SJuergen Gross dma_addr_t dma_handle; 162*d6aca350SJuergen Gross 163*d6aca350SJuergen Gross if (WARN_ON(dir == DMA_NONE)) 164*d6aca350SJuergen Gross return DMA_MAPPING_ERROR; 165*d6aca350SJuergen Gross 166*d6aca350SJuergen Gross data = find_xen_grant_dma_data(dev); 167*d6aca350SJuergen Gross if (!data) 168*d6aca350SJuergen Gross return DMA_MAPPING_ERROR; 169*d6aca350SJuergen Gross 170*d6aca350SJuergen Gross if (unlikely(data->broken)) 171*d6aca350SJuergen Gross return DMA_MAPPING_ERROR; 172*d6aca350SJuergen Gross 173*d6aca350SJuergen Gross if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) 174*d6aca350SJuergen Gross return DMA_MAPPING_ERROR; 175*d6aca350SJuergen Gross 176*d6aca350SJuergen Gross for (i = 0; i < n_pages; i++) { 177*d6aca350SJuergen Gross gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, 178*d6aca350SJuergen Gross xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); 179*d6aca350SJuergen Gross } 180*d6aca350SJuergen Gross 181*d6aca350SJuergen Gross dma_handle = grant_to_dma(grant) + offset; 182*d6aca350SJuergen Gross 183*d6aca350SJuergen Gross return dma_handle; 184*d6aca350SJuergen Gross } 185*d6aca350SJuergen Gross 186*d6aca350SJuergen Gross static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 187*d6aca350SJuergen Gross size_t size, enum dma_data_direction dir, 188*d6aca350SJuergen Gross unsigned long attrs) 189*d6aca350SJuergen Gross { 190*d6aca350SJuergen Gross struct xen_grant_dma_data *data; 191*d6aca350SJuergen Gross unsigned int i, n_pages = PFN_UP(size); 192*d6aca350SJuergen Gross grant_ref_t grant; 193*d6aca350SJuergen Gross 194*d6aca350SJuergen Gross if (WARN_ON(dir == DMA_NONE)) 195*d6aca350SJuergen Gross return; 196*d6aca350SJuergen Gross 197*d6aca350SJuergen Gross data = find_xen_grant_dma_data(dev); 198*d6aca350SJuergen Gross if (!data) 199*d6aca350SJuergen Gross return; 200*d6aca350SJuergen Gross 201*d6aca350SJuergen Gross if (unlikely(data->broken)) 202*d6aca350SJuergen Gross return; 203*d6aca350SJuergen Gross 204*d6aca350SJuergen Gross grant = dma_to_grant(dma_handle); 205*d6aca350SJuergen Gross 206*d6aca350SJuergen Gross for (i = 0; i < n_pages; i++) { 207*d6aca350SJuergen Gross if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { 208*d6aca350SJuergen Gross dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); 209*d6aca350SJuergen Gross data->broken = true; 210*d6aca350SJuergen Gross return; 211*d6aca350SJuergen Gross } 212*d6aca350SJuergen Gross } 213*d6aca350SJuergen Gross 214*d6aca350SJuergen Gross gnttab_free_grant_reference_seq(grant, n_pages); 215*d6aca350SJuergen Gross } 216*d6aca350SJuergen Gross 217*d6aca350SJuergen Gross static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 218*d6aca350SJuergen Gross int nents, enum dma_data_direction dir, 219*d6aca350SJuergen Gross unsigned long attrs) 220*d6aca350SJuergen Gross { 221*d6aca350SJuergen Gross struct scatterlist *s; 222*d6aca350SJuergen Gross unsigned int i; 223*d6aca350SJuergen Gross 224*d6aca350SJuergen Gross if (WARN_ON(dir == DMA_NONE)) 225*d6aca350SJuergen Gross return; 226*d6aca350SJuergen Gross 227*d6aca350SJuergen Gross for_each_sg(sg, s, nents, i) 228*d6aca350SJuergen Gross xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir, 229*d6aca350SJuergen Gross attrs); 230*d6aca350SJuergen Gross } 231*d6aca350SJuergen Gross 232*d6aca350SJuergen Gross static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg, 233*d6aca350SJuergen Gross int nents, enum dma_data_direction dir, 234*d6aca350SJuergen Gross unsigned long attrs) 235*d6aca350SJuergen Gross { 236*d6aca350SJuergen Gross struct scatterlist *s; 237*d6aca350SJuergen Gross unsigned int i; 238*d6aca350SJuergen Gross 239*d6aca350SJuergen Gross if (WARN_ON(dir == DMA_NONE)) 240*d6aca350SJuergen Gross return -EINVAL; 241*d6aca350SJuergen Gross 242*d6aca350SJuergen Gross for_each_sg(sg, s, nents, i) { 243*d6aca350SJuergen Gross s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset, 244*d6aca350SJuergen Gross s->length, dir, attrs); 245*d6aca350SJuergen Gross if (s->dma_address == DMA_MAPPING_ERROR) 246*d6aca350SJuergen Gross goto out; 247*d6aca350SJuergen Gross 248*d6aca350SJuergen Gross sg_dma_len(s) = s->length; 249*d6aca350SJuergen Gross } 250*d6aca350SJuergen Gross 251*d6aca350SJuergen Gross return nents; 252*d6aca350SJuergen Gross 253*d6aca350SJuergen Gross out: 254*d6aca350SJuergen Gross xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 255*d6aca350SJuergen Gross sg_dma_len(sg) = 0; 256*d6aca350SJuergen Gross 257*d6aca350SJuergen Gross return -EIO; 258*d6aca350SJuergen Gross } 259*d6aca350SJuergen Gross 260*d6aca350SJuergen Gross static int xen_grant_dma_supported(struct device *dev, u64 mask) 261*d6aca350SJuergen Gross { 262*d6aca350SJuergen Gross return mask == DMA_BIT_MASK(64); 263*d6aca350SJuergen Gross } 264*d6aca350SJuergen Gross 265*d6aca350SJuergen Gross static const struct dma_map_ops xen_grant_dma_ops = { 266*d6aca350SJuergen Gross .alloc = xen_grant_dma_alloc, 267*d6aca350SJuergen Gross .free = xen_grant_dma_free, 268*d6aca350SJuergen Gross .alloc_pages = xen_grant_dma_alloc_pages, 269*d6aca350SJuergen Gross .free_pages = xen_grant_dma_free_pages, 270*d6aca350SJuergen Gross .mmap = dma_common_mmap, 271*d6aca350SJuergen Gross .get_sgtable = dma_common_get_sgtable, 272*d6aca350SJuergen Gross .map_page = xen_grant_dma_map_page, 273*d6aca350SJuergen Gross .unmap_page = xen_grant_dma_unmap_page, 274*d6aca350SJuergen Gross .map_sg = xen_grant_dma_map_sg, 275*d6aca350SJuergen Gross .unmap_sg = xen_grant_dma_unmap_sg, 276*d6aca350SJuergen Gross .dma_supported = xen_grant_dma_supported, 277*d6aca350SJuergen Gross }; 278*d6aca350SJuergen Gross 279*d6aca350SJuergen Gross void xen_grant_setup_dma_ops(struct device *dev) 280*d6aca350SJuergen Gross { 281*d6aca350SJuergen Gross struct xen_grant_dma_data *data; 282*d6aca350SJuergen Gross 283*d6aca350SJuergen Gross data = find_xen_grant_dma_data(dev); 284*d6aca350SJuergen Gross if (data) { 285*d6aca350SJuergen Gross dev_err(dev, "Xen grant DMA data is already created\n"); 286*d6aca350SJuergen Gross return; 287*d6aca350SJuergen Gross } 288*d6aca350SJuergen Gross 289*d6aca350SJuergen Gross data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 290*d6aca350SJuergen Gross if (!data) 291*d6aca350SJuergen Gross goto err; 292*d6aca350SJuergen Gross 293*d6aca350SJuergen Gross /* XXX The dom0 is hardcoded as the backend domain for now */ 294*d6aca350SJuergen Gross data->backend_domid = 0; 295*d6aca350SJuergen Gross 296*d6aca350SJuergen Gross if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data, 297*d6aca350SJuergen Gross GFP_KERNEL))) { 298*d6aca350SJuergen Gross dev_err(dev, "Cannot store Xen grant DMA data\n"); 299*d6aca350SJuergen Gross goto err; 300*d6aca350SJuergen Gross } 301*d6aca350SJuergen Gross 302*d6aca350SJuergen Gross dev->dma_ops = &xen_grant_dma_ops; 303*d6aca350SJuergen Gross 304*d6aca350SJuergen Gross return; 305*d6aca350SJuergen Gross 306*d6aca350SJuergen Gross err: 307*d6aca350SJuergen Gross dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n"); 308*d6aca350SJuergen Gross } 309*d6aca350SJuergen Gross 310*d6aca350SJuergen Gross MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); 311*d6aca350SJuergen Gross MODULE_AUTHOR("Juergen Gross <jgross@suse.com>"); 312*d6aca350SJuergen Gross MODULE_LICENSE("GPL"); 313