1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Firmware I/O code for mac80211 Prism54 drivers 4 * 5 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> 6 * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de> 7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 8 * 9 * Based on: 10 * - the islsm (softmac prism54) driver, which is: 11 * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. 12 * - stlc45xx driver 13 * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). 14 */ 15 16 #include <linux/slab.h> 17 #include <linux/firmware.h> 18 #include <linux/etherdevice.h> 19 #include <linux/export.h> 20 21 #include <net/mac80211.h> 22 23 #include "p54.h" 24 #include "eeprom.h" 25 #include "lmac.h" 26 27 int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) 28 { 29 struct p54_common *priv = dev->priv; 30 struct exp_if *exp_if; 31 struct bootrec *bootrec; 32 u32 *data = (u32 *)fw->data; 33 u32 *end_data = (u32 *)fw->data + (fw->size >> 2); 34 u8 *fw_version = NULL; 35 size_t len; 36 int i; 37 int maxlen; 38 39 if (priv->rx_start) 40 return 0; 41 42 while (data < end_data && *data) 43 data++; 44 45 while (data < end_data && !*data) 46 data++; 47 48 bootrec = (struct bootrec *) data; 49 50 while (bootrec->data <= end_data && (bootrec->data + 51 (len = le32_to_cpu(bootrec->len))) <= end_data) { 52 u32 code = le32_to_cpu(bootrec->code); 53 switch (code) { 54 case BR_CODE_COMPONENT_ID: 55 priv->fw_interface = be32_to_cpup((__be32 *) 56 bootrec->data); 57 switch (priv->fw_interface) { 58 case FW_LM86: 59 case FW_LM20: 60 case FW_LM87: { 61 char *iftype = (char *)bootrec->data; 62 wiphy_info(priv->hw->wiphy, 63 "p54 detected a LM%c%c firmware\n", 64 iftype[2], iftype[3]); 65 break; 66 } 67 case FW_FMAC: 68 default: 69 wiphy_err(priv->hw->wiphy, 70 "unsupported firmware\n"); 71 return -ENODEV; 72 } 73 break; 74 case BR_CODE_COMPONENT_VERSION: 75 /* 24 bytes should be enough for all firmwares */ 76 if (strnlen((unsigned char *) bootrec->data, 24) < 24) 77 fw_version = (unsigned char *) bootrec->data; 78 break; 79 case BR_CODE_DESCR: { 80 struct bootrec_desc *desc = 81 (struct bootrec_desc *)bootrec->data; 82 priv->rx_start = le32_to_cpu(desc->rx_start); 83 /* FIXME add sanity checking */ 84 priv->rx_end = le32_to_cpu(desc->rx_end) - 0x3500; 85 priv->headroom = desc->headroom; 86 priv->tailroom = desc->tailroom; 87 priv->privacy_caps = desc->privacy_caps; 88 priv->rx_keycache_size = desc->rx_keycache_size; 89 if (le32_to_cpu(bootrec->len) == 11) 90 priv->rx_mtu = le16_to_cpu(desc->rx_mtu); 91 else 92 priv->rx_mtu = (size_t) 93 0x620 - priv->tx_hdr_len; 94 maxlen = priv->tx_hdr_len + /* USB devices */ 95 sizeof(struct p54_rx_data) + 96 4 + /* rx alignment */ 97 IEEE80211_MAX_FRAG_THRESHOLD; 98 if (priv->rx_mtu > maxlen && PAGE_SIZE == 4096) { 99 printk(KERN_INFO "p54: rx_mtu reduced from %d " 100 "to %d\n", priv->rx_mtu, maxlen); 101 priv->rx_mtu = maxlen; 102 } 103 break; 104 } 105 case BR_CODE_EXPOSED_IF: 106 exp_if = (struct exp_if *) bootrec->data; 107 for (i = 0; i < (len * sizeof(*exp_if) / 4); i++) 108 if (exp_if[i].if_id == cpu_to_le16(IF_ID_LMAC)) 109 priv->fw_var = le16_to_cpu(exp_if[i].variant); 110 break; 111 case BR_CODE_DEPENDENT_IF: 112 break; 113 case BR_CODE_END_OF_BRA: 114 case LEGACY_BR_CODE_END_OF_BRA: 115 end_data = NULL; 116 break; 117 default: 118 break; 119 } 120 bootrec = (struct bootrec *)&bootrec->data[len]; 121 } 122 123 if (fw_version) { 124 wiphy_info(priv->hw->wiphy, 125 "FW rev %s - Softmac protocol %x.%x\n", 126 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); 127 snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version), 128 "%s - %x.%x", fw_version, 129 priv->fw_var >> 8, priv->fw_var & 0xff); 130 } 131 132 if (priv->fw_var < 0x500) 133 wiphy_info(priv->hw->wiphy, 134 "you are using an obsolete firmware. " 135 "visit http://wireless.wiki.kernel.org/en/users/Drivers/p54 " 136 "and grab one for \"kernel >= 2.6.28\"!\n"); 137 138 if (priv->fw_var >= 0x300) { 139 /* Firmware supports QoS, use it! */ 140 141 if (priv->fw_var >= 0x500) { 142 priv->tx_stats[P54_QUEUE_AC_VO].limit = 16; 143 priv->tx_stats[P54_QUEUE_AC_VI].limit = 16; 144 priv->tx_stats[P54_QUEUE_AC_BE].limit = 16; 145 priv->tx_stats[P54_QUEUE_AC_BK].limit = 16; 146 } else { 147 priv->tx_stats[P54_QUEUE_AC_VO].limit = 3; 148 priv->tx_stats[P54_QUEUE_AC_VI].limit = 4; 149 priv->tx_stats[P54_QUEUE_AC_BE].limit = 3; 150 priv->tx_stats[P54_QUEUE_AC_BK].limit = 2; 151 } 152 priv->hw->queues = P54_QUEUE_AC_NUM; 153 } 154 155 wiphy_info(priv->hw->wiphy, 156 "cryptographic accelerator WEP:%s, TKIP:%s, CCMP:%s\n", 157 (priv->privacy_caps & BR_DESC_PRIV_CAP_WEP) ? "YES" : "no", 158 (priv->privacy_caps & 159 (BR_DESC_PRIV_CAP_TKIP | BR_DESC_PRIV_CAP_MICHAEL)) 160 ? "YES" : "no", 161 (priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP) 162 ? "YES" : "no"); 163 164 if (priv->rx_keycache_size) { 165 /* 166 * NOTE: 167 * 168 * The firmware provides at most 255 (0 - 254) slots 169 * for keys which are then used to offload decryption. 170 * As a result the 255 entry (aka 0xff) can be used 171 * safely by the driver to mark keys that didn't fit 172 * into the full cache. This trick saves us from 173 * keeping a extra list for uploaded keys. 174 */ 175 176 priv->used_rxkeys = kcalloc(BITS_TO_LONGS(priv->rx_keycache_size), 177 sizeof(long), 178 GFP_KERNEL); 179 180 if (!priv->used_rxkeys) 181 return -ENOMEM; 182 } 183 184 return 0; 185 } 186 EXPORT_SYMBOL_GPL(p54_parse_firmware); 187 188 static struct sk_buff *p54_alloc_skb(struct p54_common *priv, u16 hdr_flags, 189 u16 payload_len, u16 type, gfp_t memflags) 190 { 191 struct p54_hdr *hdr; 192 struct sk_buff *skb; 193 size_t frame_len = sizeof(*hdr) + payload_len; 194 195 if (frame_len > P54_MAX_CTRL_FRAME_LEN) 196 return NULL; 197 198 if (unlikely(skb_queue_len(&priv->tx_pending) > 64)) 199 return NULL; 200 201 skb = __dev_alloc_skb(priv->tx_hdr_len + frame_len, memflags); 202 if (!skb) 203 return NULL; 204 skb_reserve(skb, priv->tx_hdr_len); 205 206 hdr = skb_put(skb, sizeof(*hdr)); 207 hdr->flags = cpu_to_le16(hdr_flags); 208 hdr->len = cpu_to_le16(payload_len); 209 hdr->type = cpu_to_le16(type); 210 hdr->tries = hdr->rts_tries = 0; 211 return skb; 212 } 213 214 int p54_download_eeprom(struct p54_common *priv, void *buf, 215 u16 offset, u16 len) 216 { 217 struct p54_eeprom_lm86 *eeprom_hdr; 218 struct sk_buff *skb; 219 size_t eeprom_hdr_size; 220 int ret = 0; 221 long timeout; 222 223 if (priv->fw_var >= 0x509) 224 eeprom_hdr_size = sizeof(*eeprom_hdr); 225 else 226 eeprom_hdr_size = 0x4; 227 228 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL, eeprom_hdr_size + 229 len, P54_CONTROL_TYPE_EEPROM_READBACK, 230 GFP_KERNEL); 231 if (unlikely(!skb)) 232 return -ENOMEM; 233 234 mutex_lock(&priv->eeprom_mutex); 235 priv->eeprom = buf; 236 eeprom_hdr = skb_put(skb, eeprom_hdr_size + len); 237 238 if (priv->fw_var < 0x509) { 239 eeprom_hdr->v1.offset = cpu_to_le16(offset); 240 eeprom_hdr->v1.len = cpu_to_le16(len); 241 } else { 242 eeprom_hdr->v2.offset = cpu_to_le32(offset); 243 eeprom_hdr->v2.len = cpu_to_le16(len); 244 eeprom_hdr->v2.magic2 = 0xf; 245 memcpy(eeprom_hdr->v2.magic, (const char *)"LOCK", 4); 246 } 247 248 p54_tx(priv, skb); 249 250 timeout = wait_for_completion_interruptible_timeout( 251 &priv->eeprom_comp, HZ); 252 if (timeout <= 0) { 253 wiphy_err(priv->hw->wiphy, 254 "device does not respond or signal received!\n"); 255 ret = -EBUSY; 256 } 257 priv->eeprom = NULL; 258 mutex_unlock(&priv->eeprom_mutex); 259 return ret; 260 } 261 262 int p54_update_beacon_tim(struct p54_common *priv, u16 aid, bool set) 263 { 264 struct sk_buff *skb; 265 struct p54_tim *tim; 266 267 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*tim), 268 P54_CONTROL_TYPE_TIM, GFP_ATOMIC); 269 if (unlikely(!skb)) 270 return -ENOMEM; 271 272 tim = skb_put(skb, sizeof(*tim)); 273 tim->count = 1; 274 tim->entry[0] = cpu_to_le16(set ? (aid | 0x8000) : aid); 275 p54_tx(priv, skb); 276 return 0; 277 } 278 279 int p54_sta_unlock(struct p54_common *priv, u8 *addr) 280 { 281 struct sk_buff *skb; 282 struct p54_sta_unlock *sta; 283 284 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*sta), 285 P54_CONTROL_TYPE_PSM_STA_UNLOCK, GFP_ATOMIC); 286 if (unlikely(!skb)) 287 return -ENOMEM; 288 289 sta = skb_put(skb, sizeof(*sta)); 290 memcpy(sta->addr, addr, ETH_ALEN); 291 p54_tx(priv, skb); 292 return 0; 293 } 294 295 int p54_tx_cancel(struct p54_common *priv, __le32 req_id) 296 { 297 struct sk_buff *skb; 298 struct p54_txcancel *cancel; 299 u32 _req_id = le32_to_cpu(req_id); 300 301 if (unlikely(_req_id < priv->rx_start || _req_id > priv->rx_end)) 302 return -EINVAL; 303 304 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*cancel), 305 P54_CONTROL_TYPE_TXCANCEL, GFP_ATOMIC); 306 if (unlikely(!skb)) 307 return -ENOMEM; 308 309 cancel = skb_put(skb, sizeof(*cancel)); 310 cancel->req_id = req_id; 311 p54_tx(priv, skb); 312 return 0; 313 } 314 315 int p54_setup_mac(struct p54_common *priv) 316 { 317 struct sk_buff *skb; 318 struct p54_setup_mac *setup; 319 u16 mode; 320 321 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*setup), 322 P54_CONTROL_TYPE_SETUP, GFP_ATOMIC); 323 if (!skb) 324 return -ENOMEM; 325 326 setup = skb_put(skb, sizeof(*setup)); 327 if (!(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) { 328 switch (priv->mode) { 329 case NL80211_IFTYPE_STATION: 330 mode = P54_FILTER_TYPE_STATION; 331 break; 332 case NL80211_IFTYPE_AP: 333 mode = P54_FILTER_TYPE_AP; 334 break; 335 case NL80211_IFTYPE_ADHOC: 336 case NL80211_IFTYPE_MESH_POINT: 337 mode = P54_FILTER_TYPE_IBSS; 338 break; 339 case NL80211_IFTYPE_MONITOR: 340 mode = P54_FILTER_TYPE_PROMISCUOUS; 341 break; 342 default: 343 mode = P54_FILTER_TYPE_HIBERNATE; 344 break; 345 } 346 347 /* 348 * "TRANSPARENT and PROMISCUOUS are mutually exclusive" 349 * STSW45X0C LMAC API - page 12 350 */ 351 if (priv->filter_flags & FIF_OTHER_BSS && 352 (mode != P54_FILTER_TYPE_PROMISCUOUS)) 353 mode |= P54_FILTER_TYPE_TRANSPARENT; 354 } else { 355 mode = P54_FILTER_TYPE_HIBERNATE; 356 } 357 358 setup->mac_mode = cpu_to_le16(mode); 359 memcpy(setup->mac_addr, priv->mac_addr, ETH_ALEN); 360 memcpy(setup->bssid, priv->bssid, ETH_ALEN); 361 setup->rx_antenna = 2 & priv->rx_diversity_mask; /* automatic */ 362 setup->rx_align = 0; 363 if (priv->fw_var < 0x500) { 364 setup->v1.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); 365 memset(setup->v1.rts_rates, 0, 8); 366 setup->v1.rx_addr = cpu_to_le32(priv->rx_end); 367 setup->v1.max_rx = cpu_to_le16(priv->rx_mtu); 368 setup->v1.rxhw = cpu_to_le16(priv->rxhw); 369 setup->v1.wakeup_timer = cpu_to_le16(priv->wakeup_timer); 370 setup->v1.unalloc0 = cpu_to_le16(0); 371 } else { 372 setup->v2.rx_addr = cpu_to_le32(priv->rx_end); 373 setup->v2.max_rx = cpu_to_le16(priv->rx_mtu); 374 setup->v2.rxhw = cpu_to_le16(priv->rxhw); 375 setup->v2.timer = cpu_to_le16(priv->wakeup_timer); 376 setup->v2.truncate = cpu_to_le16(48896); 377 setup->v2.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); 378 setup->v2.sbss_offset = 0; 379 setup->v2.mcast_window = 0; 380 setup->v2.rx_rssi_threshold = 0; 381 setup->v2.rx_ed_threshold = 0; 382 setup->v2.ref_clock = cpu_to_le32(644245094); 383 setup->v2.lpf_bandwidth = cpu_to_le16(65535); 384 setup->v2.osc_start_delay = cpu_to_le16(65535); 385 } 386 p54_tx(priv, skb); 387 priv->phy_idle = mode == P54_FILTER_TYPE_HIBERNATE; 388 return 0; 389 } 390 391 int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) 392 { 393 struct sk_buff *skb; 394 struct p54_hdr *hdr; 395 struct p54_scan_head *head; 396 struct p54_iq_autocal_entry *iq_autocal; 397 union p54_scan_body_union *body; 398 struct p54_scan_tail_rate *rate; 399 struct pda_rssi_cal_entry *rssi; 400 struct p54_rssi_db_entry *rssi_data; 401 unsigned int i; 402 void *entry; 403 __le16 freq = cpu_to_le16(priv->hw->conf.chandef.chan->center_freq); 404 405 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) + 406 2 + sizeof(*iq_autocal) + sizeof(*body) + 407 sizeof(*rate) + 2 * sizeof(*rssi), 408 P54_CONTROL_TYPE_SCAN, GFP_ATOMIC); 409 if (!skb) 410 return -ENOMEM; 411 412 head = skb_put(skb, sizeof(*head)); 413 memset(head->scan_params, 0, sizeof(head->scan_params)); 414 head->mode = cpu_to_le16(mode); 415 head->dwell = cpu_to_le16(dwell); 416 head->freq = freq; 417 418 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { 419 __le16 *pa_power_points = skb_put(skb, 2); 420 *pa_power_points = cpu_to_le16(0x0c); 421 } 422 423 iq_autocal = skb_put(skb, sizeof(*iq_autocal)); 424 for (i = 0; i < priv->iq_autocal_len; i++) { 425 if (priv->iq_autocal[i].freq != freq) 426 continue; 427 428 memcpy(iq_autocal, &priv->iq_autocal[i].params, 429 sizeof(struct p54_iq_autocal_entry)); 430 break; 431 } 432 if (i == priv->iq_autocal_len) 433 goto err; 434 435 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) 436 body = skb_put(skb, sizeof(body->longbow)); 437 else 438 body = skb_put(skb, sizeof(body->normal)); 439 440 for (i = 0; i < priv->output_limit->entries; i++) { 441 __le16 *entry_freq = (void *) (priv->output_limit->data + 442 priv->output_limit->entry_size * i); 443 444 if (*entry_freq != freq) 445 continue; 446 447 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { 448 memcpy(&body->longbow.power_limits, 449 (void *) entry_freq + sizeof(__le16), 450 priv->output_limit->entry_size); 451 } else { 452 struct pda_channel_output_limit *limits = 453 (void *) entry_freq; 454 455 body->normal.val_barker = 0x38; 456 body->normal.val_bpsk = body->normal.dup_bpsk = 457 limits->val_bpsk; 458 body->normal.val_qpsk = body->normal.dup_qpsk = 459 limits->val_qpsk; 460 body->normal.val_16qam = body->normal.dup_16qam = 461 limits->val_16qam; 462 body->normal.val_64qam = body->normal.dup_64qam = 463 limits->val_64qam; 464 } 465 break; 466 } 467 if (i == priv->output_limit->entries) 468 goto err; 469 470 entry = (void *)(priv->curve_data->data + priv->curve_data->offset); 471 for (i = 0; i < priv->curve_data->entries; i++) { 472 if (*((__le16 *)entry) != freq) { 473 entry += priv->curve_data->entry_size; 474 continue; 475 } 476 477 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { 478 memcpy(&body->longbow.curve_data, 479 entry + sizeof(__le16), 480 priv->curve_data->entry_size); 481 } else { 482 struct p54_scan_body *chan = &body->normal; 483 struct pda_pa_curve_data *curve_data = 484 (void *) priv->curve_data->data; 485 486 entry += sizeof(__le16); 487 chan->pa_points_per_curve = 8; 488 memset(chan->curve_data, 0, sizeof(chan->curve_data)); 489 memcpy(chan->curve_data, entry, 490 sizeof(struct p54_pa_curve_data_sample) * 491 min((u8)8, curve_data->points_per_channel)); 492 } 493 break; 494 } 495 if (i == priv->curve_data->entries) 496 goto err; 497 498 if ((priv->fw_var >= 0x500) && (priv->fw_var < 0x509)) { 499 rate = skb_put(skb, sizeof(*rate)); 500 rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); 501 for (i = 0; i < sizeof(rate->rts_rates); i++) 502 rate->rts_rates[i] = i; 503 } 504 505 rssi = skb_put(skb, sizeof(*rssi)); 506 rssi_data = p54_rssi_find(priv, le16_to_cpu(freq)); 507 rssi->mul = cpu_to_le16(rssi_data->mul); 508 rssi->add = cpu_to_le16(rssi_data->add); 509 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { 510 /* Longbow frontend needs ever more */ 511 rssi = skb_put(skb, sizeof(*rssi)); 512 rssi->mul = cpu_to_le16(rssi_data->longbow_unkn); 513 rssi->add = cpu_to_le16(rssi_data->longbow_unk2); 514 } 515 516 if (priv->fw_var >= 0x509) { 517 rate = skb_put(skb, sizeof(*rate)); 518 rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); 519 for (i = 0; i < sizeof(rate->rts_rates); i++) 520 rate->rts_rates[i] = i; 521 } 522 523 hdr = (struct p54_hdr *) skb->data; 524 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); 525 526 p54_tx(priv, skb); 527 priv->cur_rssi = rssi_data; 528 return 0; 529 530 err: 531 wiphy_err(priv->hw->wiphy, "frequency change to channel %d failed.\n", 532 ieee80211_frequency_to_channel( 533 priv->hw->conf.chandef.chan->center_freq)); 534 535 dev_kfree_skb_any(skb); 536 return -EINVAL; 537 } 538 539 int p54_set_leds(struct p54_common *priv) 540 { 541 struct sk_buff *skb; 542 struct p54_led *led; 543 544 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*led), 545 P54_CONTROL_TYPE_LED, GFP_ATOMIC); 546 if (unlikely(!skb)) 547 return -ENOMEM; 548 549 led = skb_put(skb, sizeof(*led)); 550 led->flags = cpu_to_le16(0x0003); 551 led->mask[0] = led->mask[1] = cpu_to_le16(priv->softled_state); 552 led->delay[0] = cpu_to_le16(1); 553 led->delay[1] = cpu_to_le16(0); 554 p54_tx(priv, skb); 555 return 0; 556 } 557 558 int p54_set_edcf(struct p54_common *priv) 559 { 560 struct sk_buff *skb; 561 struct p54_edcf *edcf; 562 u8 rtd; 563 564 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf), 565 P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC); 566 if (unlikely(!skb)) 567 return -ENOMEM; 568 569 edcf = skb_put(skb, sizeof(*edcf)); 570 if (priv->use_short_slot) { 571 edcf->slottime = 9; 572 edcf->sifs = 0x10; 573 edcf->eofpad = 0x00; 574 } else { 575 edcf->slottime = 20; 576 edcf->sifs = 0x0a; 577 edcf->eofpad = 0x06; 578 } 579 /* 580 * calculate the extra round trip delay according to the 581 * formula from 802.11-2007 17.3.8.6. 582 */ 583 rtd = 3 * priv->coverage_class; 584 edcf->slottime += rtd; 585 edcf->round_trip_delay = cpu_to_le16(rtd); 586 /* (see prism54/isl_oid.h for further details) */ 587 edcf->frameburst = cpu_to_le16(0); 588 edcf->flags = 0; 589 memset(edcf->mapping, 0, sizeof(edcf->mapping)); 590 memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue)); 591 p54_tx(priv, skb); 592 return 0; 593 } 594 595 int p54_set_ps(struct p54_common *priv) 596 { 597 struct sk_buff *skb; 598 struct p54_psm *psm; 599 unsigned int i; 600 u16 mode; 601 602 if (priv->hw->conf.flags & IEEE80211_CONF_PS && 603 !priv->powersave_override) 604 mode = P54_PSM | P54_PSM_BEACON_TIMEOUT | P54_PSM_DTIM | 605 P54_PSM_CHECKSUM | P54_PSM_MCBC; 606 else 607 mode = P54_PSM_CAM; 608 609 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*psm), 610 P54_CONTROL_TYPE_PSM, GFP_ATOMIC); 611 if (!skb) 612 return -ENOMEM; 613 614 psm = skb_put(skb, sizeof(*psm)); 615 psm->mode = cpu_to_le16(mode); 616 psm->aid = cpu_to_le16(priv->aid); 617 for (i = 0; i < ARRAY_SIZE(psm->intervals); i++) { 618 psm->intervals[i].interval = 619 cpu_to_le16(priv->hw->conf.listen_interval); 620 psm->intervals[i].periods = cpu_to_le16(1); 621 } 622 623 psm->beacon_rssi_skip_max = 200; 624 psm->rssi_delta_threshold = 0; 625 psm->nr = 1; 626 psm->exclude[0] = WLAN_EID_TIM; 627 628 p54_tx(priv, skb); 629 priv->phy_ps = mode != P54_PSM_CAM; 630 return 0; 631 } 632 633 int p54_init_xbow_synth(struct p54_common *priv) 634 { 635 struct sk_buff *skb; 636 struct p54_xbow_synth *xbow; 637 638 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*xbow), 639 P54_CONTROL_TYPE_XBOW_SYNTH_CFG, GFP_KERNEL); 640 if (unlikely(!skb)) 641 return -ENOMEM; 642 643 xbow = skb_put(skb, sizeof(*xbow)); 644 xbow->magic1 = cpu_to_le16(0x1); 645 xbow->magic2 = cpu_to_le16(0x2); 646 xbow->freq = cpu_to_le16(5390); 647 memset(xbow->padding, 0, sizeof(xbow->padding)); 648 p54_tx(priv, skb); 649 return 0; 650 } 651 652 int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len, 653 u8 *addr, u8* key) 654 { 655 struct sk_buff *skb; 656 struct p54_keycache *rxkey; 657 658 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*rxkey), 659 P54_CONTROL_TYPE_RX_KEYCACHE, GFP_KERNEL); 660 if (unlikely(!skb)) 661 return -ENOMEM; 662 663 rxkey = skb_put(skb, sizeof(*rxkey)); 664 rxkey->entry = slot; 665 rxkey->key_id = idx; 666 rxkey->key_type = algo; 667 if (addr) 668 memcpy(rxkey->mac, addr, ETH_ALEN); 669 else 670 eth_broadcast_addr(rxkey->mac); 671 672 switch (algo) { 673 case P54_CRYPTO_WEP: 674 case P54_CRYPTO_AESCCMP: 675 rxkey->key_len = min_t(u8, 16, len); 676 memcpy(rxkey->key, key, rxkey->key_len); 677 break; 678 679 case P54_CRYPTO_TKIPMICHAEL: 680 rxkey->key_len = 24; 681 memcpy(rxkey->key, key, 16); 682 memcpy(&(rxkey->key[16]), &(key 683 [NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]), 8); 684 break; 685 686 case P54_CRYPTO_NONE: 687 rxkey->key_len = 0; 688 memset(rxkey->key, 0, sizeof(rxkey->key)); 689 break; 690 691 default: 692 wiphy_err(priv->hw->wiphy, 693 "invalid cryptographic algorithm: %d\n", algo); 694 dev_kfree_skb(skb); 695 return -EINVAL; 696 } 697 698 p54_tx(priv, skb); 699 return 0; 700 } 701 702 int p54_fetch_statistics(struct p54_common *priv) 703 { 704 struct ieee80211_tx_info *txinfo; 705 struct p54_tx_info *p54info; 706 struct sk_buff *skb; 707 708 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL, 709 sizeof(struct p54_statistics), 710 P54_CONTROL_TYPE_STAT_READBACK, GFP_KERNEL); 711 if (!skb) 712 return -ENOMEM; 713 714 /* 715 * The statistic feedback causes some extra headaches here, if it 716 * is not to crash/corrupt the firmware data structures. 717 * 718 * Unlike all other Control Get OIDs we can not use helpers like 719 * skb_put to reserve the space for the data we're requesting. 720 * Instead the extra frame length -which will hold the results later- 721 * will only be told to the p54_assign_address, so that following 722 * frames won't be placed into the allegedly empty area. 723 */ 724 txinfo = IEEE80211_SKB_CB(skb); 725 p54info = (void *) txinfo->rate_driver_data; 726 p54info->extra_len = sizeof(struct p54_statistics); 727 728 p54_tx(priv, skb); 729 return 0; 730 } 731 732 int p54_set_groupfilter(struct p54_common *priv) 733 { 734 struct p54_group_address_table *grp; 735 struct sk_buff *skb; 736 bool on = false; 737 738 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*grp), 739 P54_CONTROL_TYPE_GROUP_ADDRESS_TABLE, GFP_KERNEL); 740 if (!skb) 741 return -ENOMEM; 742 743 grp = skb_put(skb, sizeof(*grp)); 744 745 on = !(priv->filter_flags & FIF_ALLMULTI) && 746 (priv->mc_maclist_num > 0 && 747 priv->mc_maclist_num <= MC_FILTER_ADDRESS_NUM); 748 749 if (on) { 750 grp->filter_enable = cpu_to_le16(1); 751 grp->num_address = cpu_to_le16(priv->mc_maclist_num); 752 memcpy(grp->mac_list, priv->mc_maclist, sizeof(grp->mac_list)); 753 } else { 754 grp->filter_enable = cpu_to_le16(0); 755 grp->num_address = cpu_to_le16(0); 756 memset(grp->mac_list, 0, sizeof(grp->mac_list)); 757 } 758 759 p54_tx(priv, skb); 760 return 0; 761 } 762