1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright 2016-2019 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8 #include "../habanalabs.h" 9 #include "../../include/hw_ip/pci/pci_general.h" 10 11 #include <linux/pci.h> 12 13 #define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 100) 14 15 #define IATU_REGION_CTRL_REGION_EN_MASK BIT(31) 16 #define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30) 17 #define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19) 18 #define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8) 19 20 /** 21 * hl_pci_bars_map() - Map PCI BARs. 22 * @hdev: Pointer to hl_device structure. 23 * @name: Array of BAR names. 24 * @is_wc: Array with flag per BAR whether a write-combined mapping is needed. 25 * 26 * Request PCI regions and map them to kernel virtual addresses. 27 * 28 * Return: 0 on success, non-zero for failure. 29 */ 30 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], 31 bool is_wc[3]) 32 { 33 struct pci_dev *pdev = hdev->pdev; 34 int rc, i, bar; 35 36 rc = pci_request_regions(pdev, HL_NAME); 37 if (rc) { 38 dev_err(hdev->dev, "Cannot obtain PCI resources\n"); 39 return rc; 40 } 41 42 for (i = 0 ; i < 3 ; i++) { 43 bar = i * 2; /* 64-bit BARs */ 44 hdev->pcie_bar[bar] = is_wc[i] ? 45 pci_ioremap_wc_bar(pdev, bar) : 46 pci_ioremap_bar(pdev, bar); 47 if (!hdev->pcie_bar[bar]) { 48 dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n", 49 is_wc[i] ? "_wc" : "", name[i]); 50 rc = -ENODEV; 51 goto err; 52 } 53 } 54 55 return 0; 56 57 err: 58 for (i = 2 ; i >= 0 ; i--) { 59 bar = i * 2; /* 64-bit BARs */ 60 if (hdev->pcie_bar[bar]) 61 iounmap(hdev->pcie_bar[bar]); 62 } 63 64 pci_release_regions(pdev); 65 66 return rc; 67 } 68 69 /** 70 * hl_pci_bars_unmap() - Unmap PCI BARS. 71 * @hdev: Pointer to hl_device structure. 72 * 73 * Release all PCI BARs and unmap their virtual addresses. 74 */ 75 static void hl_pci_bars_unmap(struct hl_device *hdev) 76 { 77 struct pci_dev *pdev = hdev->pdev; 78 int i, bar; 79 80 for (i = 2 ; i >= 0 ; i--) { 81 bar = i * 2; /* 64-bit BARs */ 82 iounmap(hdev->pcie_bar[bar]); 83 } 84 85 pci_release_regions(pdev); 86 } 87 88 int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data) 89 { 90 struct pci_dev *pdev = hdev->pdev; 91 ktime_t timeout; 92 u64 msec; 93 u32 val; 94 95 if (hdev->pldm) 96 msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC; 97 else 98 msec = HL_PCI_ELBI_TIMEOUT_MSEC; 99 100 /* Clear previous status */ 101 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0); 102 103 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr); 104 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, 0); 105 106 timeout = ktime_add_ms(ktime_get(), msec); 107 for (;;) { 108 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val); 109 if (val & PCI_CONFIG_ELBI_STS_MASK) 110 break; 111 if (ktime_compare(ktime_get(), timeout) > 0) { 112 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 113 &val); 114 break; 115 } 116 117 usleep_range(300, 500); 118 } 119 120 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) { 121 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); 122 123 return 0; 124 } 125 126 if (val & PCI_CONFIG_ELBI_STS_ERR) { 127 dev_err(hdev->dev, "Error reading from ELBI\n"); 128 return -EIO; 129 } 130 131 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) { 132 dev_err(hdev->dev, "ELBI read didn't finish in time\n"); 133 return -EIO; 134 } 135 136 dev_err(hdev->dev, "ELBI read has undefined bits in status\n"); 137 return -EIO; 138 } 139 140 /** 141 * hl_pci_elbi_write() - Write through the ELBI interface. 142 * @hdev: Pointer to hl_device structure. 143 * @addr: Address to write to 144 * @data: Data to write 145 * 146 * Return: 0 on success, negative value for failure. 147 */ 148 static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data) 149 { 150 struct pci_dev *pdev = hdev->pdev; 151 ktime_t timeout; 152 u64 msec; 153 u32 val; 154 155 if (hdev->pldm) 156 msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC; 157 else 158 msec = HL_PCI_ELBI_TIMEOUT_MSEC; 159 160 /* Clear previous status */ 161 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0); 162 163 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr); 164 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); 165 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, 166 PCI_CONFIG_ELBI_CTRL_WRITE); 167 168 timeout = ktime_add_ms(ktime_get(), msec); 169 for (;;) { 170 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val); 171 if (val & PCI_CONFIG_ELBI_STS_MASK) 172 break; 173 if (ktime_compare(ktime_get(), timeout) > 0) { 174 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 175 &val); 176 break; 177 } 178 179 usleep_range(300, 500); 180 } 181 182 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) 183 return 0; 184 185 if (val & PCI_CONFIG_ELBI_STS_ERR) 186 return -EIO; 187 188 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) { 189 dev_err(hdev->dev, "ELBI write didn't finish in time\n"); 190 return -EIO; 191 } 192 193 dev_err(hdev->dev, "ELBI write has undefined bits in status\n"); 194 return -EIO; 195 } 196 197 /** 198 * hl_pci_iatu_write() - iatu write routine. 199 * @hdev: Pointer to hl_device structure. 200 * @addr: Address to write to 201 * @data: Data to write 202 * 203 * Return: 0 on success, negative value for failure. 204 */ 205 int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data) 206 { 207 struct asic_fixed_properties *prop = &hdev->asic_prop; 208 u32 dbi_offset; 209 int rc; 210 211 dbi_offset = addr & 0xFFF; 212 213 /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail 214 * in case the firmware security is enabled 215 */ 216 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000); 217 218 rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset, 219 data); 220 221 if (rc) 222 return -EIO; 223 224 return 0; 225 } 226 227 /** 228 * hl_pci_set_inbound_region() - Configure inbound region 229 * @hdev: Pointer to hl_device structure. 230 * @region: Inbound region number. 231 * @pci_region: Inbound region parameters. 232 * 233 * Configure the iATU inbound region. 234 * 235 * Return: 0 on success, negative value for failure. 236 */ 237 int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, 238 struct hl_inbound_pci_region *pci_region) 239 { 240 struct asic_fixed_properties *prop = &hdev->asic_prop; 241 u64 bar_phys_base, region_base, region_end_address; 242 u32 offset, ctrl_reg_val; 243 int rc = 0; 244 245 /* region offset */ 246 offset = (0x200 * region) + 0x100; 247 248 if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) { 249 bar_phys_base = hdev->pcie_bar_phys[pci_region->bar]; 250 region_base = bar_phys_base + pci_region->offset_in_bar; 251 region_end_address = region_base + pci_region->size - 1; 252 253 rc |= hl_pci_iatu_write(hdev, offset + 0x8, 254 lower_32_bits(region_base)); 255 rc |= hl_pci_iatu_write(hdev, offset + 0xC, 256 upper_32_bits(region_base)); 257 rc |= hl_pci_iatu_write(hdev, offset + 0x10, 258 lower_32_bits(region_end_address)); 259 } 260 261 /* Point to the specified address */ 262 rc |= hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(pci_region->addr)); 263 rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(pci_region->addr)); 264 265 /* Set bar type as memory */ 266 rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0); 267 268 /* Enable + bar/address match + match enable + bar number */ 269 ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1); 270 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, pci_region->mode); 271 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1); 272 273 if (pci_region->mode == PCI_BAR_MATCH_MODE) 274 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, pci_region->bar); 275 276 rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val); 277 278 /* Return the DBI window to the default location 279 * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail 280 * in case the firmware security is enabled 281 */ 282 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); 283 284 if (rc) 285 dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n", 286 pci_region->bar, pci_region->addr); 287 288 return rc; 289 } 290 291 /** 292 * hl_pci_set_outbound_region() - Configure outbound region 0 293 * @hdev: Pointer to hl_device structure. 294 * @pci_region: Outbound region parameters. 295 * 296 * Configure the iATU outbound region 0. 297 * 298 * Return: 0 on success, negative value for failure. 299 */ 300 int hl_pci_set_outbound_region(struct hl_device *hdev, 301 struct hl_outbound_pci_region *pci_region) 302 { 303 struct asic_fixed_properties *prop = &hdev->asic_prop; 304 u64 outbound_region_end_address; 305 int rc = 0; 306 307 /* Outbound Region 0 */ 308 outbound_region_end_address = 309 pci_region->addr + pci_region->size - 1; 310 rc |= hl_pci_iatu_write(hdev, 0x008, 311 lower_32_bits(pci_region->addr)); 312 rc |= hl_pci_iatu_write(hdev, 0x00C, 313 upper_32_bits(pci_region->addr)); 314 rc |= hl_pci_iatu_write(hdev, 0x010, 315 lower_32_bits(outbound_region_end_address)); 316 rc |= hl_pci_iatu_write(hdev, 0x014, 0); 317 318 rc |= hl_pci_iatu_write(hdev, 0x018, 0); 319 320 rc |= hl_pci_iatu_write(hdev, 0x020, 321 upper_32_bits(outbound_region_end_address)); 322 /* Increase region size */ 323 rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000); 324 /* Enable */ 325 rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000); 326 327 /* Return the DBI window to the default location 328 * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail 329 * in case the firmware security is enabled 330 */ 331 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0); 332 333 return rc; 334 } 335 336 /** 337 * hl_get_pci_memory_region() - get PCI region for given address 338 * @hdev: Pointer to hl_device structure. 339 * @addr: device address 340 * 341 * @return region index on success, otherwise PCI_REGION_NUMBER (invalid 342 * region index) 343 */ 344 enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr) 345 { 346 int i; 347 348 for (i = 0 ; i < PCI_REGION_NUMBER ; i++) { 349 struct pci_mem_region *region = &hdev->pci_mem_region[i]; 350 351 if (!region->used) 352 continue; 353 354 if ((addr >= region->region_base) && 355 (addr < region->region_base + region->region_size)) 356 return i; 357 } 358 359 return PCI_REGION_NUMBER; 360 } 361 362 /** 363 * hl_pci_init() - PCI initialization code. 364 * @hdev: Pointer to hl_device structure. 365 * 366 * Set DMA masks, initialize the PCI controller and map the PCI BARs. 367 * 368 * Return: 0 on success, non-zero for failure. 369 */ 370 int hl_pci_init(struct hl_device *hdev) 371 { 372 struct asic_fixed_properties *prop = &hdev->asic_prop; 373 struct pci_dev *pdev = hdev->pdev; 374 int rc; 375 376 rc = pci_enable_device_mem(pdev); 377 if (rc) { 378 dev_err(hdev->dev, "can't enable PCI device\n"); 379 return rc; 380 } 381 382 pci_set_master(pdev); 383 384 rc = hdev->asic_funcs->pci_bars_map(hdev); 385 if (rc) { 386 dev_err(hdev->dev, "Failed to map PCI BAR addresses\n"); 387 goto disable_device; 388 } 389 390 rc = hdev->asic_funcs->init_iatu(hdev); 391 if (rc) { 392 dev_err(hdev->dev, "PCI controller was not initialized successfully\n"); 393 goto unmap_pci_bars; 394 } 395 396 /* Driver must sleep in order for FW to finish the iATU configuration */ 397 if (hdev->asic_prop.iatu_done_by_fw) 398 usleep_range(2000, 3000); 399 400 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(prop->dma_mask)); 401 if (rc) { 402 dev_err(hdev->dev, 403 "Failed to set dma mask to %d bits, error %d\n", 404 prop->dma_mask, rc); 405 goto unmap_pci_bars; 406 } 407 408 dma_set_max_seg_size(&pdev->dev, U32_MAX); 409 410 return 0; 411 412 unmap_pci_bars: 413 hl_pci_bars_unmap(hdev); 414 disable_device: 415 pci_clear_master(pdev); 416 pci_disable_device(pdev); 417 418 return rc; 419 } 420 421 /** 422 * hl_pci_fini() - PCI finalization code. 423 * @hdev: Pointer to hl_device structure 424 * 425 * Unmap PCI bars and disable PCI device. 426 */ 427 void hl_pci_fini(struct hl_device *hdev) 428 { 429 hl_pci_bars_unmap(hdev); 430 431 pci_clear_master(hdev->pdev); 432 pci_disable_device(hdev->pdev); 433 } 434