1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/item.h 3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> 5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the names of the copyright holders nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * Alternatively, this software may be distributed under the terms of the 20 * GNU General Public License ("GPL") version 2 as published by the Free 21 * Software Foundation. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #ifndef _MLXSW_ITEM_H 37 #define _MLXSW_ITEM_H 38 39 #include <linux/types.h> 40 #include <linux/string.h> 41 #include <linux/bitops.h> 42 43 struct mlxsw_item { 44 unsigned short offset; /* bytes in container */ 45 unsigned short step; /* step in bytes for indexed items */ 46 unsigned short in_step_offset; /* offset within one step */ 47 unsigned char shift; /* shift in bits */ 48 unsigned char element_size; /* size of element in bit array */ 49 bool no_real_shift; 50 union { 51 unsigned char bits; 52 unsigned short bytes; 53 } size; 54 const char *name; 55 }; 56 57 static inline unsigned int 58 __mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index, 59 size_t typesize) 60 { 61 BUG_ON(index && !item->step); 62 if (item->offset % typesize != 0 || 63 item->step % typesize != 0 || 64 item->in_step_offset % typesize != 0) { 65 pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n", 66 item->name, item->offset, item->step, 67 item->in_step_offset, typesize); 68 BUG(); 69 } 70 71 return ((item->offset + item->step * index + item->in_step_offset) / 72 typesize); 73 } 74 75 static inline u8 __mlxsw_item_get8(const char *buf, 76 const struct mlxsw_item *item, 77 unsigned short index) 78 { 79 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8)); 80 u8 *b = (u8 *) buf; 81 u8 tmp; 82 83 tmp = b[offset]; 84 tmp >>= item->shift; 85 tmp &= GENMASK(item->size.bits - 1, 0); 86 if (item->no_real_shift) 87 tmp <<= item->shift; 88 return tmp; 89 } 90 91 static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item, 92 unsigned short index, u8 val) 93 { 94 unsigned int offset = __mlxsw_item_offset(item, index, 95 sizeof(u8)); 96 u8 *b = (u8 *) buf; 97 u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift; 98 u8 tmp; 99 100 if (!item->no_real_shift) 101 val <<= item->shift; 102 val &= mask; 103 tmp = b[offset]; 104 tmp &= ~mask; 105 tmp |= val; 106 b[offset] = tmp; 107 } 108 109 static inline u16 __mlxsw_item_get16(const char *buf, 110 const struct mlxsw_item *item, 111 unsigned short index) 112 { 113 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16)); 114 __be16 *b = (__be16 *) buf; 115 u16 tmp; 116 117 tmp = be16_to_cpu(b[offset]); 118 tmp >>= item->shift; 119 tmp &= GENMASK(item->size.bits - 1, 0); 120 if (item->no_real_shift) 121 tmp <<= item->shift; 122 return tmp; 123 } 124 125 static inline void __mlxsw_item_set16(char *buf, const struct mlxsw_item *item, 126 unsigned short index, u16 val) 127 { 128 unsigned int offset = __mlxsw_item_offset(item, index, 129 sizeof(u16)); 130 __be16 *b = (__be16 *) buf; 131 u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift; 132 u16 tmp; 133 134 if (!item->no_real_shift) 135 val <<= item->shift; 136 val &= mask; 137 tmp = be16_to_cpu(b[offset]); 138 tmp &= ~mask; 139 tmp |= val; 140 b[offset] = cpu_to_be16(tmp); 141 } 142 143 static inline u32 __mlxsw_item_get32(const char *buf, 144 const struct mlxsw_item *item, 145 unsigned short index) 146 { 147 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32)); 148 __be32 *b = (__be32 *) buf; 149 u32 tmp; 150 151 tmp = be32_to_cpu(b[offset]); 152 tmp >>= item->shift; 153 tmp &= GENMASK(item->size.bits - 1, 0); 154 if (item->no_real_shift) 155 tmp <<= item->shift; 156 return tmp; 157 } 158 159 static inline void __mlxsw_item_set32(char *buf, const struct mlxsw_item *item, 160 unsigned short index, u32 val) 161 { 162 unsigned int offset = __mlxsw_item_offset(item, index, 163 sizeof(u32)); 164 __be32 *b = (__be32 *) buf; 165 u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift; 166 u32 tmp; 167 168 if (!item->no_real_shift) 169 val <<= item->shift; 170 val &= mask; 171 tmp = be32_to_cpu(b[offset]); 172 tmp &= ~mask; 173 tmp |= val; 174 b[offset] = cpu_to_be32(tmp); 175 } 176 177 static inline u64 __mlxsw_item_get64(const char *buf, 178 const struct mlxsw_item *item, 179 unsigned short index) 180 { 181 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); 182 __be64 *b = (__be64 *) buf; 183 u64 tmp; 184 185 tmp = be64_to_cpu(b[offset]); 186 tmp >>= item->shift; 187 tmp &= GENMASK_ULL(item->size.bits - 1, 0); 188 if (item->no_real_shift) 189 tmp <<= item->shift; 190 return tmp; 191 } 192 193 static inline void __mlxsw_item_set64(char *buf, const struct mlxsw_item *item, 194 unsigned short index, u64 val) 195 { 196 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); 197 __be64 *b = (__be64 *) buf; 198 u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift; 199 u64 tmp; 200 201 if (!item->no_real_shift) 202 val <<= item->shift; 203 val &= mask; 204 tmp = be64_to_cpu(b[offset]); 205 tmp &= ~mask; 206 tmp |= val; 207 b[offset] = cpu_to_be64(tmp); 208 } 209 210 static inline void __mlxsw_item_memcpy_from(const char *buf, char *dst, 211 const struct mlxsw_item *item, 212 unsigned short index) 213 { 214 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); 215 216 memcpy(dst, &buf[offset], item->size.bytes); 217 } 218 219 static inline void __mlxsw_item_memcpy_to(char *buf, const char *src, 220 const struct mlxsw_item *item, 221 unsigned short index) 222 { 223 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); 224 225 memcpy(&buf[offset], src, item->size.bytes); 226 } 227 228 static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item, 229 unsigned short index) 230 { 231 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); 232 233 return &buf[offset]; 234 } 235 236 static inline u16 237 __mlxsw_item_bit_array_offset(const struct mlxsw_item *item, 238 u16 index, u8 *shift) 239 { 240 u16 max_index, be_index; 241 u16 offset; /* byte offset inside the array */ 242 u8 in_byte_index; 243 244 BUG_ON(index && !item->element_size); 245 if (item->offset % sizeof(u32) != 0 || 246 BITS_PER_BYTE % item->element_size != 0) { 247 pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n", 248 item->name, item->offset, item->element_size); 249 BUG(); 250 } 251 252 max_index = (item->size.bytes << 3) / item->element_size - 1; 253 be_index = max_index - index; 254 offset = be_index * item->element_size >> 3; 255 in_byte_index = index % (BITS_PER_BYTE / item->element_size); 256 *shift = in_byte_index * item->element_size; 257 258 return item->offset + offset; 259 } 260 261 static inline u8 __mlxsw_item_bit_array_get(const char *buf, 262 const struct mlxsw_item *item, 263 u16 index) 264 { 265 u8 shift, tmp; 266 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift); 267 268 tmp = buf[offset]; 269 tmp >>= shift; 270 tmp &= GENMASK(item->element_size - 1, 0); 271 return tmp; 272 } 273 274 static inline void __mlxsw_item_bit_array_set(char *buf, 275 const struct mlxsw_item *item, 276 u16 index, u8 val) 277 { 278 u8 shift, tmp; 279 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift); 280 u8 mask = GENMASK(item->element_size - 1, 0) << shift; 281 282 val <<= shift; 283 val &= mask; 284 tmp = buf[offset]; 285 tmp &= ~mask; 286 tmp |= val; 287 buf[offset] = tmp; 288 } 289 290 #define __ITEM_NAME(_type, _cname, _iname) \ 291 mlxsw_##_type##_##_cname##_##_iname##_item 292 293 /* _type: cmd_mbox, reg, etc. 294 * _cname: containter name (e.g. command name, register name) 295 * _iname: item name within the container 296 */ 297 298 #define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \ 299 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 300 .offset = _offset, \ 301 .shift = _shift, \ 302 .size = {.bits = _sizebits,}, \ 303 .name = #_type "_" #_cname "_" #_iname, \ 304 }; \ 305 static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ 306 { \ 307 return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 308 } \ 309 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\ 310 { \ 311 __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ 312 } 313 314 #define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ 315 _step, _instepoffset, _norealshift) \ 316 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 317 .offset = _offset, \ 318 .step = _step, \ 319 .in_step_offset = _instepoffset, \ 320 .shift = _shift, \ 321 .no_real_shift = _norealshift, \ 322 .size = {.bits = _sizebits,}, \ 323 .name = #_type "_" #_cname "_" #_iname, \ 324 }; \ 325 static inline u8 \ 326 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ 327 { \ 328 return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \ 329 index); \ 330 } \ 331 static inline void \ 332 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ 333 u8 val) \ 334 { \ 335 __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \ 336 index, val); \ 337 } 338 339 #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \ 340 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 341 .offset = _offset, \ 342 .shift = _shift, \ 343 .size = {.bits = _sizebits,}, \ 344 .name = #_type "_" #_cname "_" #_iname, \ 345 }; \ 346 static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ 347 { \ 348 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 349 } \ 350 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\ 351 { \ 352 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ 353 } 354 355 #define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ 356 _step, _instepoffset, _norealshift) \ 357 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 358 .offset = _offset, \ 359 .step = _step, \ 360 .in_step_offset = _instepoffset, \ 361 .shift = _shift, \ 362 .no_real_shift = _norealshift, \ 363 .size = {.bits = _sizebits,}, \ 364 .name = #_type "_" #_cname "_" #_iname, \ 365 }; \ 366 static inline u16 \ 367 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ 368 { \ 369 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \ 370 index); \ 371 } \ 372 static inline void \ 373 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ 374 u16 val) \ 375 { \ 376 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \ 377 index, val); \ 378 } 379 380 #define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \ 381 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 382 .offset = _offset, \ 383 .shift = _shift, \ 384 .size = {.bits = _sizebits,}, \ 385 .name = #_type "_" #_cname "_" #_iname, \ 386 }; \ 387 static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ 388 { \ 389 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 390 } \ 391 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\ 392 { \ 393 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ 394 } 395 396 #define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ 397 _step, _instepoffset, _norealshift) \ 398 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 399 .offset = _offset, \ 400 .step = _step, \ 401 .in_step_offset = _instepoffset, \ 402 .shift = _shift, \ 403 .no_real_shift = _norealshift, \ 404 .size = {.bits = _sizebits,}, \ 405 .name = #_type "_" #_cname "_" #_iname, \ 406 }; \ 407 static inline u32 \ 408 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ 409 { \ 410 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \ 411 index); \ 412 } \ 413 static inline void \ 414 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ 415 u32 val) \ 416 { \ 417 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \ 418 index, val); \ 419 } 420 421 #define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \ 422 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 423 .offset = _offset, \ 424 .shift = _shift, \ 425 .size = {.bits = _sizebits,}, \ 426 .name = #_type "_" #_cname "_" #_iname, \ 427 }; \ 428 static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \ 429 { \ 430 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 431 } \ 432 static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\ 433 { \ 434 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ 435 } 436 437 #define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \ 438 _sizebits, _step, _instepoffset, _norealshift) \ 439 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 440 .offset = _offset, \ 441 .step = _step, \ 442 .in_step_offset = _instepoffset, \ 443 .shift = _shift, \ 444 .no_real_shift = _norealshift, \ 445 .size = {.bits = _sizebits,}, \ 446 .name = #_type "_" #_cname "_" #_iname, \ 447 }; \ 448 static inline u64 \ 449 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\ 450 { \ 451 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \ 452 index); \ 453 } \ 454 static inline void \ 455 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ 456 u64 val) \ 457 { \ 458 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \ 459 index, val); \ 460 } 461 462 #define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \ 463 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 464 .offset = _offset, \ 465 .size = {.bytes = _sizebytes,}, \ 466 .name = #_type "_" #_cname "_" #_iname, \ 467 }; \ 468 static inline void \ 469 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \ 470 { \ 471 __mlxsw_item_memcpy_from(buf, dst, \ 472 &__ITEM_NAME(_type, _cname, _iname), 0); \ 473 } \ 474 static inline void \ 475 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \ 476 { \ 477 __mlxsw_item_memcpy_to(buf, src, \ 478 &__ITEM_NAME(_type, _cname, _iname), 0); \ 479 } \ 480 static inline char * \ 481 mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \ 482 { \ 483 return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ 484 } 485 486 #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \ 487 _step, _instepoffset) \ 488 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 489 .offset = _offset, \ 490 .step = _step, \ 491 .in_step_offset = _instepoffset, \ 492 .size = {.bytes = _sizebytes,}, \ 493 .name = #_type "_" #_cname "_" #_iname, \ 494 }; \ 495 static inline void \ 496 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \ 497 unsigned short index, \ 498 char *dst) \ 499 { \ 500 __mlxsw_item_memcpy_from(buf, dst, \ 501 &__ITEM_NAME(_type, _cname, _iname), index); \ 502 } \ 503 static inline void \ 504 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ 505 unsigned short index, \ 506 const char *src) \ 507 { \ 508 __mlxsw_item_memcpy_to(buf, src, \ 509 &__ITEM_NAME(_type, _cname, _iname), index); \ 510 } \ 511 static inline char * \ 512 mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \ 513 { \ 514 return __mlxsw_item_data(buf, \ 515 &__ITEM_NAME(_type, _cname, _iname), index); \ 516 } 517 518 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \ 519 _element_size) \ 520 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ 521 .offset = _offset, \ 522 .element_size = _element_size, \ 523 .size = {.bytes = _sizebytes,}, \ 524 .name = #_type "_" #_cname "_" #_iname, \ 525 }; \ 526 static inline u8 \ 527 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \ 528 { \ 529 return __mlxsw_item_bit_array_get(buf, \ 530 &__ITEM_NAME(_type, _cname, _iname), \ 531 index); \ 532 } \ 533 static inline void \ 534 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \ 535 { \ 536 return __mlxsw_item_bit_array_set(buf, \ 537 &__ITEM_NAME(_type, _cname, _iname), \ 538 index, val); \ 539 } \ 540 541 #endif 542