1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 /*! \file octeon_main.h 19 * \brief Host Driver: This file is included by all host driver source files 20 * to include common definitions. 21 */ 22 23 #ifndef _OCTEON_MAIN_H_ 24 #define _OCTEON_MAIN_H_ 25 26 #include <linux/sched/signal.h> 27 28 #if BITS_PER_LONG == 32 29 #define CVM_CAST64(v) ((long long)(v)) 30 #elif BITS_PER_LONG == 64 31 #define CVM_CAST64(v) ((long long)(long)(v)) 32 #else 33 #error "Unknown system architecture" 34 #endif 35 36 #define DRV_NAME "LiquidIO" 37 38 /** This structure is used by NIC driver to store information required 39 * to free the sk_buff when the packet has been fetched by Octeon. 40 * Bytes offset below assume worst-case of a 64-bit system. 41 */ 42 struct octnet_buf_free_info { 43 /** Bytes 1-8. Pointer to network device private structure. */ 44 struct lio *lio; 45 46 /** Bytes 9-16. Pointer to sk_buff. */ 47 struct sk_buff *skb; 48 49 /** Bytes 17-24. Pointer to gather list. */ 50 struct octnic_gather *g; 51 52 /** Bytes 25-32. Physical address of skb->data or gather list. */ 53 u64 dptr; 54 55 /** Bytes 33-47. Piggybacked soft command, if any */ 56 struct octeon_soft_command *sc; 57 }; 58 59 /* BQL-related functions */ 60 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype); 61 void octeon_update_tx_completion_counters(void *buf, int reqtype, 62 unsigned int *pkts_compl, 63 unsigned int *bytes_compl); 64 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, 65 unsigned int bytes_compl); 66 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac); 67 /** Swap 8B blocks */ 68 static inline void octeon_swap_8B_data(u64 *data, u32 blocks) 69 { 70 while (blocks) { 71 cpu_to_be64s(data); 72 blocks--; 73 data++; 74 } 75 } 76 77 /** 78 * \brief unmaps a PCI BAR 79 * @param oct Pointer to Octeon device 80 * @param baridx bar index 81 */ 82 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx) 83 { 84 dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n", 85 baridx); 86 87 if (oct->mmio[baridx].done) 88 iounmap(oct->mmio[baridx].hw_addr); 89 90 if (oct->mmio[baridx].start) 91 pci_release_region(oct->pci_dev, baridx * 2); 92 } 93 94 /** 95 * \brief maps a PCI BAR 96 * @param oct Pointer to Octeon device 97 * @param baridx bar index 98 * @param max_map_len maximum length of mapped memory 99 */ 100 static inline int octeon_map_pci_barx(struct octeon_device *oct, 101 int baridx, int max_map_len) 102 { 103 u32 mapped_len = 0; 104 105 if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) { 106 dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n", 107 baridx); 108 return 1; 109 } 110 111 oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2); 112 oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2); 113 114 mapped_len = oct->mmio[baridx].len; 115 if (!mapped_len) 116 goto err_release_region; 117 118 if (max_map_len && (mapped_len > max_map_len)) 119 mapped_len = max_map_len; 120 121 oct->mmio[baridx].hw_addr = 122 ioremap(oct->mmio[baridx].start, mapped_len); 123 oct->mmio[baridx].mapped_len = mapped_len; 124 125 dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n", 126 baridx, oct->mmio[baridx].start, mapped_len, 127 oct->mmio[baridx].len); 128 129 if (!oct->mmio[baridx].hw_addr) { 130 dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n", 131 baridx); 132 goto err_release_region; 133 } 134 oct->mmio[baridx].done = 1; 135 136 return 0; 137 138 err_release_region: 139 pci_release_region(oct->pci_dev, baridx * 2); 140 return 1; 141 } 142 143 static inline int 144 sleep_cond(wait_queue_head_t *wait_queue, int *condition) 145 { 146 int errno = 0; 147 wait_queue_t we; 148 149 init_waitqueue_entry(&we, current); 150 add_wait_queue(wait_queue, &we); 151 while (!(READ_ONCE(*condition))) { 152 set_current_state(TASK_INTERRUPTIBLE); 153 if (signal_pending(current)) { 154 errno = -EINTR; 155 goto out; 156 } 157 schedule(); 158 } 159 out: 160 set_current_state(TASK_RUNNING); 161 remove_wait_queue(wait_queue, &we); 162 return errno; 163 } 164 165 /* Gives up the CPU for a timeout period. 166 * Check that the condition is not true before we go to sleep for a 167 * timeout period. 168 */ 169 static inline void 170 sleep_timeout_cond(wait_queue_head_t *wait_queue, 171 int *condition, 172 int timeout) 173 { 174 wait_queue_t we; 175 176 init_waitqueue_entry(&we, current); 177 add_wait_queue(wait_queue, &we); 178 set_current_state(TASK_INTERRUPTIBLE); 179 if (!(*condition)) 180 schedule_timeout(timeout); 181 set_current_state(TASK_RUNNING); 182 remove_wait_queue(wait_queue, &we); 183 } 184 185 #ifndef ROUNDUP4 186 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc) 187 #endif 188 189 #ifndef ROUNDUP8 190 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8) 191 #endif 192 193 #ifndef ROUNDUP16 194 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0) 195 #endif 196 197 #ifndef ROUNDUP128 198 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80) 199 #endif 200 201 #endif /* _OCTEON_MAIN_H_ */ 202