pio.c (16217dc79dbc599b110dda26d0421df47904bba4) | pio.c (60368186fd853899c1819bcefa47f85fe8d5e5ad) |
---|---|
1/* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * --- 537 unchanged lines hidden (view full) --- 546 547/* return the size of a group */ 548static inline u32 group_size(u32 group) 549{ 550 return 1 << group; 551} 552 553/* | 1/* 2 * Copyright(c) 2015, 2016 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * --- 537 unchanged lines hidden (view full) --- 546 547/* return the size of a group */ 548static inline u32 group_size(u32 group) 549{ 550 return 1 << group; 551} 552 553/* |
554 * Obtain the credit return addresses, kernel virtual and physical, for the | 554 * Obtain the credit return addresses, kernel virtual and bus, for the |
555 * given sc. 556 * 557 * To understand this routine: | 555 * given sc. 556 * 557 * To understand this routine: |
558 * o va and pa are arrays of struct credit_return. One for each physical | 558 * o va and dma are arrays of struct credit_return. One for each physical |
559 * send context, per NUMA. 560 * o Each send context always looks in its relative location in a struct 561 * credit_return for its credit return. 562 * o Each send context in a group must have its return address CSR programmed 563 * with the same value. Use the address of the first send context in the 564 * group. 565 */ | 559 * send context, per NUMA. 560 * o Each send context always looks in its relative location in a struct 561 * credit_return for its credit return. 562 * o Each send context in a group must have its return address CSR programmed 563 * with the same value. Use the address of the first send context in the 564 * group. 565 */ |
566static void cr_group_addresses(struct send_context *sc, dma_addr_t *pa) | 566static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma) |
567{ 568 u32 gc = group_context(sc->hw_context, sc->group); 569 u32 index = sc->hw_context & 0x7; 570 571 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; | 567{ 568 u32 gc = group_context(sc->hw_context, sc->group); 569 u32 index = sc->hw_context & 0x7; 570 571 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; |
572 *pa = (unsigned long) 573 &((struct credit_return *)sc->dd->cr_base[sc->node].pa)[gc]; | 572 *dma = (unsigned long) 573 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; |
574} 575 576/* 577 * Work queue function triggered in error interrupt routine for 578 * kernel contexts. 579 */ 580static void sc_halted(struct work_struct *work) 581{ --- 123 unchanged lines hidden (view full) --- 705 * Allocate a NUMA relative send context structure of the given type along 706 * with a HW context. 707 */ 708struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, 709 uint hdrqentsize, int numa) 710{ 711 struct send_context_info *sci; 712 struct send_context *sc = NULL; | 574} 575 576/* 577 * Work queue function triggered in error interrupt routine for 578 * kernel contexts. 579 */ 580static void sc_halted(struct work_struct *work) 581{ --- 123 unchanged lines hidden (view full) --- 705 * Allocate a NUMA relative send context structure of the given type along 706 * with a HW context. 707 */ 708struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, 709 uint hdrqentsize, int numa) 710{ 711 struct send_context_info *sci; 712 struct send_context *sc = NULL; |
713 dma_addr_t pa; | 713 dma_addr_t dma; |
714 unsigned long flags; 715 u64 reg; 716 u32 thresh; 717 u32 sw_index; 718 u32 hw_context; 719 int ret; 720 u8 opval, opmask; 721 --- 36 unchanged lines hidden (view full) --- 758 INIT_WORK(&sc->halt_work, sc_halted); 759 init_waitqueue_head(&sc->halt_wait); 760 761 /* grouping is always single context for now */ 762 sc->group = 0; 763 764 sc->sw_index = sw_index; 765 sc->hw_context = hw_context; | 714 unsigned long flags; 715 u64 reg; 716 u32 thresh; 717 u32 sw_index; 718 u32 hw_context; 719 int ret; 720 u8 opval, opmask; 721 --- 36 unchanged lines hidden (view full) --- 758 INIT_WORK(&sc->halt_work, sc_halted); 759 init_waitqueue_head(&sc->halt_wait); 760 761 /* grouping is always single context for now */ 762 sc->group = 0; 763 764 sc->sw_index = sw_index; 765 sc->hw_context = hw_context; |
766 cr_group_addresses(sc, &pa); | 766 cr_group_addresses(sc, &dma); |
767 sc->credits = sci->credits; 768 769/* PIO Send Memory Address details */ 770#define PIO_ADDR_CONTEXT_MASK 0xfful 771#define PIO_ADDR_CONTEXT_SHIFT 16 772 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) 773 << PIO_ADDR_CONTEXT_SHIFT); 774 --- 25 unchanged lines hidden (view full) --- 800 } 801 802 /* set the send context check opcode mask and value */ 803 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 804 ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | 805 ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); 806 807 /* set up credit return */ | 767 sc->credits = sci->credits; 768 769/* PIO Send Memory Address details */ 770#define PIO_ADDR_CONTEXT_MASK 0xfful 771#define PIO_ADDR_CONTEXT_SHIFT 16 772 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) 773 << PIO_ADDR_CONTEXT_SHIFT); 774 --- 25 unchanged lines hidden (view full) --- 800 } 801 802 /* set the send context check opcode mask and value */ 803 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 804 ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | 805 ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); 806 807 /* set up credit return */ |
808 reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK); | 808 reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK); |
809 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); 810 811 /* 812 * Calculate the initial credit return threshold. 813 * 814 * For Ack contexts, set a threshold for half the credits. 815 * For User contexts use the given percentage. This has been 816 * sanitized on driver start-up. --- 1242 unchanged lines hidden (view full) --- 2059 } 2060 for (i = 0; i < num_numa; i++) { 2061 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); 2062 2063 set_dev_node(&dd->pcidev->dev, i); 2064 dd->cr_base[i].va = dma_zalloc_coherent( 2065 &dd->pcidev->dev, 2066 bytes, | 809 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); 810 811 /* 812 * Calculate the initial credit return threshold. 813 * 814 * For Ack contexts, set a threshold for half the credits. 815 * For User contexts use the given percentage. This has been 816 * sanitized on driver start-up. --- 1242 unchanged lines hidden (view full) --- 2059 } 2060 for (i = 0; i < num_numa; i++) { 2061 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); 2062 2063 set_dev_node(&dd->pcidev->dev, i); 2064 dd->cr_base[i].va = dma_zalloc_coherent( 2065 &dd->pcidev->dev, 2066 bytes, |
2067 &dd->cr_base[i].pa, | 2067 &dd->cr_base[i].dma, |
2068 GFP_KERNEL); 2069 if (!dd->cr_base[i].va) { 2070 set_dev_node(&dd->pcidev->dev, dd->node); 2071 dd_dev_err(dd, 2072 "Unable to allocate credit return DMA range for NUMA %d\n", 2073 i); 2074 ret = -ENOMEM; 2075 goto done; --- 16 unchanged lines hidden (view full) --- 2092 2093 num_numa = num_online_nodes(); 2094 for (i = 0; i < num_numa; i++) { 2095 if (dd->cr_base[i].va) { 2096 dma_free_coherent(&dd->pcidev->dev, 2097 TXE_NUM_CONTEXTS * 2098 sizeof(struct credit_return), 2099 dd->cr_base[i].va, | 2068 GFP_KERNEL); 2069 if (!dd->cr_base[i].va) { 2070 set_dev_node(&dd->pcidev->dev, dd->node); 2071 dd_dev_err(dd, 2072 "Unable to allocate credit return DMA range for NUMA %d\n", 2073 i); 2074 ret = -ENOMEM; 2075 goto done; --- 16 unchanged lines hidden (view full) --- 2092 2093 num_numa = num_online_nodes(); 2094 for (i = 0; i < num_numa; i++) { 2095 if (dd->cr_base[i].va) { 2096 dma_free_coherent(&dd->pcidev->dev, 2097 TXE_NUM_CONTEXTS * 2098 sizeof(struct credit_return), 2099 dd->cr_base[i].va, |
2100 dd->cr_base[i].pa); | 2100 dd->cr_base[i].dma); |
2101 } 2102 } 2103 kfree(dd->cr_base); 2104 dd->cr_base = NULL; 2105} | 2101 } 2102 } 2103 kfree(dd->cr_base); 2104 dd->cr_base = NULL; 2105} |