1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2015 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * This file may also be available under a different license from Cavium. 20 * Contact Cavium, Inc. for more information 21 **********************************************************************/ 22 23 /*! \file octeon_main.h 24 * \brief Host Driver: This file is included by all host driver source files 25 * to include common definitions. 26 */ 27 28 #ifndef _OCTEON_MAIN_H_ 29 #define _OCTEON_MAIN_H_ 30 31 #if BITS_PER_LONG == 32 32 #define CVM_CAST64(v) ((long long)(v)) 33 #elif BITS_PER_LONG == 64 34 #define CVM_CAST64(v) ((long long)(long)(v)) 35 #else 36 #error "Unknown system architecture" 37 #endif 38 39 #define DRV_NAME "LiquidIO" 40 41 /** 42 * \brief determines if a given console has debug enabled. 43 * @param console console to check 44 * @returns 1 = enabled. 0 otherwise 45 */ 46 int octeon_console_debug_enabled(u32 console); 47 48 /* BQL-related functions */ 49 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype); 50 void octeon_update_tx_completion_counters(void *buf, int reqtype, 51 unsigned int *pkts_compl, 52 unsigned int *bytes_compl); 53 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, 54 unsigned int bytes_compl); 55 56 /** Swap 8B blocks */ 57 static inline void octeon_swap_8B_data(u64 *data, u32 blocks) 58 { 59 while (blocks) { 60 cpu_to_be64s(data); 61 blocks--; 62 data++; 63 } 64 } 65 66 /** 67 * \brief unmaps a PCI BAR 68 * @param oct Pointer to Octeon device 69 * @param baridx bar index 70 */ 71 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx) 72 { 73 dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n", 74 baridx); 75 76 if (oct->mmio[baridx].done) 77 iounmap(oct->mmio[baridx].hw_addr); 78 79 if (oct->mmio[baridx].start) 80 pci_release_region(oct->pci_dev, baridx * 2); 81 } 82 83 /** 84 * \brief maps a PCI BAR 85 * @param oct Pointer to Octeon device 86 * @param baridx bar index 87 * @param max_map_len maximum length of mapped memory 88 */ 89 static inline int octeon_map_pci_barx(struct octeon_device *oct, 90 int baridx, int max_map_len) 91 { 92 u32 mapped_len = 0; 93 94 if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) { 95 dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n", 96 baridx); 97 return 1; 98 } 99 100 oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2); 101 oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2); 102 103 mapped_len = oct->mmio[baridx].len; 104 if (!mapped_len) 105 return 1; 106 107 if (max_map_len && (mapped_len > max_map_len)) 108 mapped_len = max_map_len; 109 110 oct->mmio[baridx].hw_addr = 111 ioremap(oct->mmio[baridx].start, mapped_len); 112 oct->mmio[baridx].mapped_len = mapped_len; 113 114 dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n", 115 baridx, oct->mmio[baridx].start, mapped_len, 116 oct->mmio[baridx].len); 117 118 if (!oct->mmio[baridx].hw_addr) { 119 dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n", 120 baridx); 121 return 1; 122 } 123 oct->mmio[baridx].done = 1; 124 125 return 0; 126 } 127 128 static inline void * 129 cnnic_numa_alloc_aligned_dma(u32 size, 130 u32 *alloc_size, 131 size_t *orig_ptr, 132 int numa_node) 133 { 134 int retries = 0; 135 void *ptr = NULL; 136 137 #define OCTEON_MAX_ALLOC_RETRIES 1 138 do { 139 struct page *page = NULL; 140 141 page = alloc_pages_node(numa_node, 142 GFP_KERNEL, 143 get_order(size)); 144 if (!page) 145 page = alloc_pages(GFP_KERNEL, 146 get_order(size)); 147 ptr = (void *)page_address(page); 148 if ((unsigned long)ptr & 0x07) { 149 __free_pages(page, get_order(size)); 150 ptr = NULL; 151 /* Increment the size required if the first 152 * attempt failed. 153 */ 154 if (!retries) 155 size += 7; 156 } 157 retries++; 158 } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr); 159 160 *alloc_size = size; 161 *orig_ptr = (unsigned long)ptr; 162 if ((unsigned long)ptr & 0x07) 163 ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL)); 164 return ptr; 165 } 166 167 #define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \ 168 free_pages(orig_ptr, get_order(size)) 169 170 static inline void 171 sleep_cond(wait_queue_head_t *wait_queue, int *condition) 172 { 173 wait_queue_t we; 174 175 init_waitqueue_entry(&we, current); 176 add_wait_queue(wait_queue, &we); 177 while (!(READ_ONCE(*condition))) { 178 set_current_state(TASK_INTERRUPTIBLE); 179 if (signal_pending(current)) 180 goto out; 181 schedule(); 182 } 183 out: 184 set_current_state(TASK_RUNNING); 185 remove_wait_queue(wait_queue, &we); 186 } 187 188 static inline void 189 sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond) 190 { 191 wait_queue_t we; 192 193 init_waitqueue_entry(&we, current); 194 add_wait_queue(waitq, &we); 195 while (!atomic_read(pcond)) { 196 set_current_state(TASK_INTERRUPTIBLE); 197 if (signal_pending(current)) 198 goto out; 199 schedule(); 200 } 201 out: 202 set_current_state(TASK_RUNNING); 203 remove_wait_queue(waitq, &we); 204 } 205 206 /* Gives up the CPU for a timeout period. 207 * Check that the condition is not true before we go to sleep for a 208 * timeout period. 209 */ 210 static inline void 211 sleep_timeout_cond(wait_queue_head_t *wait_queue, 212 int *condition, 213 int timeout) 214 { 215 wait_queue_t we; 216 217 init_waitqueue_entry(&we, current); 218 add_wait_queue(wait_queue, &we); 219 set_current_state(TASK_INTERRUPTIBLE); 220 if (!(*condition)) 221 schedule_timeout(timeout); 222 set_current_state(TASK_RUNNING); 223 remove_wait_queue(wait_queue, &we); 224 } 225 226 #ifndef ROUNDUP4 227 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc) 228 #endif 229 230 #ifndef ROUNDUP8 231 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8) 232 #endif 233 234 #ifndef ROUNDUP16 235 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0) 236 #endif 237 238 #ifndef ROUNDUP128 239 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80) 240 #endif 241 242 #endif /* _OCTEON_MAIN_H_ */ 243