1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 /*! \file octeon_main.h 19 * \brief Host Driver: This file is included by all host driver source files 20 * to include common definitions. 21 */ 22 23 #ifndef _OCTEON_MAIN_H_ 24 #define _OCTEON_MAIN_H_ 25 26 #include <linux/sched/signal.h> 27 28 #if BITS_PER_LONG == 32 29 #define CVM_CAST64(v) ((long long)(v)) 30 #elif BITS_PER_LONG == 64 31 #define CVM_CAST64(v) ((long long)(long)(v)) 32 #else 33 #error "Unknown system architecture" 34 #endif 35 36 #define DRV_NAME "LiquidIO" 37 38 /** This structure is used by NIC driver to store information required 39 * to free the sk_buff when the packet has been fetched by Octeon. 40 * Bytes offset below assume worst-case of a 64-bit system. 41 */ 42 struct octnet_buf_free_info { 43 /** Bytes 1-8. Pointer to network device private structure. */ 44 struct lio *lio; 45 46 /** Bytes 9-16. Pointer to sk_buff. */ 47 struct sk_buff *skb; 48 49 /** Bytes 17-24. Pointer to gather list. */ 50 struct octnic_gather *g; 51 52 /** Bytes 25-32. Physical address of skb->data or gather list. */ 53 u64 dptr; 54 55 /** Bytes 33-47. Piggybacked soft command, if any */ 56 struct octeon_soft_command *sc; 57 }; 58 59 /* BQL-related functions */ 60 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype); 61 void octeon_update_tx_completion_counters(void *buf, int reqtype, 62 unsigned int *pkts_compl, 63 unsigned int *bytes_compl); 64 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, 65 unsigned int bytes_compl); 66 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac); 67 /** Swap 8B blocks */ 68 static inline void octeon_swap_8B_data(u64 *data, u32 blocks) 69 { 70 while (blocks) { 71 cpu_to_be64s(data); 72 blocks--; 73 data++; 74 } 75 } 76 77 /** 78 * \brief unmaps a PCI BAR 79 * @param oct Pointer to Octeon device 80 * @param baridx bar index 81 */ 82 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx) 83 { 84 dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n", 85 baridx); 86 87 if (oct->mmio[baridx].done) 88 iounmap(oct->mmio[baridx].hw_addr); 89 90 if (oct->mmio[baridx].start) 91 pci_release_region(oct->pci_dev, baridx * 2); 92 } 93 94 /** 95 * \brief maps a PCI BAR 96 * @param oct Pointer to Octeon device 97 * @param baridx bar index 98 * @param max_map_len maximum length of mapped memory 99 */ 100 static inline int octeon_map_pci_barx(struct octeon_device *oct, 101 int baridx, int max_map_len) 102 { 103 u32 mapped_len = 0; 104 105 if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) { 106 dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n", 107 baridx); 108 return 1; 109 } 110 111 oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2); 112 oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2); 113 114 mapped_len = oct->mmio[baridx].len; 115 if (!mapped_len) 116 goto err_release_region; 117 118 if (max_map_len && (mapped_len > max_map_len)) 119 mapped_len = max_map_len; 120 121 oct->mmio[baridx].hw_addr = 122 ioremap(oct->mmio[baridx].start, mapped_len); 123 oct->mmio[baridx].mapped_len = mapped_len; 124 125 dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n", 126 baridx, oct->mmio[baridx].start, mapped_len, 127 oct->mmio[baridx].len); 128 129 if (!oct->mmio[baridx].hw_addr) { 130 dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n", 131 baridx); 132 goto err_release_region; 133 } 134 oct->mmio[baridx].done = 1; 135 136 return 0; 137 138 err_release_region: 139 pci_release_region(oct->pci_dev, baridx * 2); 140 return 1; 141 } 142 143 static inline void * 144 cnnic_numa_alloc_aligned_dma(u32 size, 145 u32 *alloc_size, 146 size_t *orig_ptr, 147 int numa_node) 148 { 149 int retries = 0; 150 void *ptr = NULL; 151 152 #define OCTEON_MAX_ALLOC_RETRIES 1 153 do { 154 struct page *page = NULL; 155 156 page = alloc_pages_node(numa_node, 157 GFP_KERNEL, 158 get_order(size)); 159 if (!page) 160 page = alloc_pages(GFP_KERNEL, 161 get_order(size)); 162 ptr = (void *)page_address(page); 163 if ((unsigned long)ptr & 0x07) { 164 __free_pages(page, get_order(size)); 165 ptr = NULL; 166 /* Increment the size required if the first 167 * attempt failed. 168 */ 169 if (!retries) 170 size += 7; 171 } 172 retries++; 173 } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr); 174 175 *alloc_size = size; 176 *orig_ptr = (unsigned long)ptr; 177 if ((unsigned long)ptr & 0x07) 178 ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL)); 179 return ptr; 180 } 181 182 #define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \ 183 free_pages(orig_ptr, get_order(size)) 184 185 static inline int 186 sleep_cond(wait_queue_head_t *wait_queue, int *condition) 187 { 188 int errno = 0; 189 wait_queue_t we; 190 191 init_waitqueue_entry(&we, current); 192 add_wait_queue(wait_queue, &we); 193 while (!(READ_ONCE(*condition))) { 194 set_current_state(TASK_INTERRUPTIBLE); 195 if (signal_pending(current)) { 196 errno = -EINTR; 197 goto out; 198 } 199 schedule(); 200 } 201 out: 202 set_current_state(TASK_RUNNING); 203 remove_wait_queue(wait_queue, &we); 204 return errno; 205 } 206 207 /* Gives up the CPU for a timeout period. 208 * Check that the condition is not true before we go to sleep for a 209 * timeout period. 210 */ 211 static inline void 212 sleep_timeout_cond(wait_queue_head_t *wait_queue, 213 int *condition, 214 int timeout) 215 { 216 wait_queue_t we; 217 218 init_waitqueue_entry(&we, current); 219 add_wait_queue(wait_queue, &we); 220 set_current_state(TASK_INTERRUPTIBLE); 221 if (!(*condition)) 222 schedule_timeout(timeout); 223 set_current_state(TASK_RUNNING); 224 remove_wait_queue(wait_queue, &we); 225 } 226 227 #ifndef ROUNDUP4 228 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc) 229 #endif 230 231 #ifndef ROUNDUP8 232 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8) 233 #endif 234 235 #ifndef ROUNDUP16 236 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0) 237 #endif 238 239 #ifndef ROUNDUP128 240 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80) 241 #endif 242 243 #endif /* _OCTEON_MAIN_H_ */ 244