1 /* 2 * FM Driver for Connectivity chip of Texas Instruments. 3 * 4 * This sub-module of FM driver is common for FM RX and TX 5 * functionality. This module is responsible for: 6 * 1) Forming group of Channel-8 commands to perform particular 7 * functionality (eg., frequency set require more than 8 * one Channel-8 command to be sent to the chip). 9 * 2) Sending each Channel-8 command to the chip and reading 10 * response back over Shared Transport. 11 * 3) Managing TX and RX Queues and Tasklets. 12 * 4) Handling FM Interrupt packet and taking appropriate action. 13 * 5) Loading FM firmware to the chip (common, FM TX, and FM RX 14 * firmware files based on mode selection) 15 * 16 * Copyright (C) 2011 Texas Instruments 17 * Author: Raja Mani <raja_mani@ti.com> 18 * Author: Manjunatha Halli <manjunatha_halli@ti.com> 19 * 20 * This program is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License version 2 as 22 * published by the Free Software Foundation. 23 * 24 * This program is distributed in the hope that it will be useful, 25 * but WITHOUT ANY WARRANTY; without even the implied warranty of 26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 27 * GNU General Public License for more details. 28 * 29 */ 30 31 #include <linux/module.h> 32 #include <linux/firmware.h> 33 #include <linux/delay.h> 34 #include "fmdrv.h" 35 #include "fmdrv_v4l2.h" 36 #include "fmdrv_common.h" 37 #include <linux/ti_wilink_st.h> 38 #include "fmdrv_rx.h" 39 #include "fmdrv_tx.h" 40 41 /* Region info */ 42 static struct region_info region_configs[] = { 43 /* Europe/US */ 44 { 45 .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL, 46 .bot_freq = 87500, /* 87.5 MHz */ 47 .top_freq = 108000, /* 108 MHz */ 48 .fm_band = 0, 49 }, 50 /* Japan */ 51 { 52 .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL, 53 .bot_freq = 76000, /* 76 MHz */ 54 .top_freq = 90000, /* 90 MHz */ 55 .fm_band = 1, 56 }, 57 }; 58 59 /* Band selection */ 60 static u8 default_radio_region; /* Europe/US */ 61 module_param(default_radio_region, byte, 0); 62 MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan"); 63 64 /* RDS buffer blocks */ 65 static u32 default_rds_buf = 300; 66 module_param(default_rds_buf, uint, 0444); 67 MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries"); 68 69 /* Radio Nr */ 70 static u32 radio_nr = -1; 71 module_param(radio_nr, int, 0444); 72 MODULE_PARM_DESC(radio_nr, "Radio Nr"); 73 74 /* FM irq handlers forward declaration */ 75 static void fm_irq_send_flag_getcmd(struct fmdev *); 76 static void fm_irq_handle_flag_getcmd_resp(struct fmdev *); 77 static void fm_irq_handle_hw_malfunction(struct fmdev *); 78 static void fm_irq_handle_rds_start(struct fmdev *); 79 static void fm_irq_send_rdsdata_getcmd(struct fmdev *); 80 static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *); 81 static void fm_irq_handle_rds_finish(struct fmdev *); 82 static void fm_irq_handle_tune_op_ended(struct fmdev *); 83 static void fm_irq_handle_power_enb(struct fmdev *); 84 static void fm_irq_handle_low_rssi_start(struct fmdev *); 85 static void fm_irq_afjump_set_pi(struct fmdev *); 86 static void fm_irq_handle_set_pi_resp(struct fmdev *); 87 static void fm_irq_afjump_set_pimask(struct fmdev *); 88 static void fm_irq_handle_set_pimask_resp(struct fmdev *); 89 static void fm_irq_afjump_setfreq(struct fmdev *); 90 static void fm_irq_handle_setfreq_resp(struct fmdev *); 91 static void fm_irq_afjump_enableint(struct fmdev *); 92 static void fm_irq_afjump_enableint_resp(struct fmdev *); 93 static void fm_irq_start_afjump(struct fmdev *); 94 static void fm_irq_handle_start_afjump_resp(struct fmdev *); 95 static void fm_irq_afjump_rd_freq(struct fmdev *); 96 static void fm_irq_afjump_rd_freq_resp(struct fmdev *); 97 static void fm_irq_handle_low_rssi_finish(struct fmdev *); 98 static void fm_irq_send_intmsk_cmd(struct fmdev *); 99 static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *); 100 101 /* 102 * When FM common module receives interrupt packet, following handlers 103 * will be executed one after another to service the interrupt(s) 104 */ 105 enum fmc_irq_handler_index { 106 FM_SEND_FLAG_GETCMD_IDX, 107 FM_HANDLE_FLAG_GETCMD_RESP_IDX, 108 109 /* HW malfunction irq handler */ 110 FM_HW_MAL_FUNC_IDX, 111 112 /* RDS threshold reached irq handler */ 113 FM_RDS_START_IDX, 114 FM_RDS_SEND_RDS_GETCMD_IDX, 115 FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX, 116 FM_RDS_FINISH_IDX, 117 118 /* Tune operation ended irq handler */ 119 FM_HW_TUNE_OP_ENDED_IDX, 120 121 /* TX power enable irq handler */ 122 FM_HW_POWER_ENB_IDX, 123 124 /* Low RSSI irq handler */ 125 FM_LOW_RSSI_START_IDX, 126 FM_AF_JUMP_SETPI_IDX, 127 FM_AF_JUMP_HANDLE_SETPI_RESP_IDX, 128 FM_AF_JUMP_SETPI_MASK_IDX, 129 FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX, 130 FM_AF_JUMP_SET_AF_FREQ_IDX, 131 FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX, 132 FM_AF_JUMP_ENABLE_INT_IDX, 133 FM_AF_JUMP_ENABLE_INT_RESP_IDX, 134 FM_AF_JUMP_START_AFJUMP_IDX, 135 FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX, 136 FM_AF_JUMP_RD_FREQ_IDX, 137 FM_AF_JUMP_RD_FREQ_RESP_IDX, 138 FM_LOW_RSSI_FINISH_IDX, 139 140 /* Interrupt process post action */ 141 FM_SEND_INTMSK_CMD_IDX, 142 FM_HANDLE_INTMSK_CMD_RESP_IDX, 143 }; 144 145 /* FM interrupt handler table */ 146 static int_handler_prototype int_handler_table[] = { 147 fm_irq_send_flag_getcmd, 148 fm_irq_handle_flag_getcmd_resp, 149 fm_irq_handle_hw_malfunction, 150 fm_irq_handle_rds_start, /* RDS threshold reached irq handler */ 151 fm_irq_send_rdsdata_getcmd, 152 fm_irq_handle_rdsdata_getcmd_resp, 153 fm_irq_handle_rds_finish, 154 fm_irq_handle_tune_op_ended, 155 fm_irq_handle_power_enb, /* TX power enable irq handler */ 156 fm_irq_handle_low_rssi_start, 157 fm_irq_afjump_set_pi, 158 fm_irq_handle_set_pi_resp, 159 fm_irq_afjump_set_pimask, 160 fm_irq_handle_set_pimask_resp, 161 fm_irq_afjump_setfreq, 162 fm_irq_handle_setfreq_resp, 163 fm_irq_afjump_enableint, 164 fm_irq_afjump_enableint_resp, 165 fm_irq_start_afjump, 166 fm_irq_handle_start_afjump_resp, 167 fm_irq_afjump_rd_freq, 168 fm_irq_afjump_rd_freq_resp, 169 fm_irq_handle_low_rssi_finish, 170 fm_irq_send_intmsk_cmd, /* Interrupt process post action */ 171 fm_irq_handle_intmsk_cmd_resp 172 }; 173 174 static long (*g_st_write) (struct sk_buff *skb); 175 static struct completion wait_for_fmdrv_reg_comp; 176 177 static inline void fm_irq_call(struct fmdev *fmdev) 178 { 179 fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev); 180 } 181 182 /* Continue next function in interrupt handler table */ 183 static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage) 184 { 185 fmdev->irq_info.stage = stage; 186 fm_irq_call(fmdev); 187 } 188 189 static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage) 190 { 191 fmdev->irq_info.stage = stage; 192 mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT); 193 } 194 195 #ifdef FM_DUMP_TXRX_PKT 196 /* To dump outgoing FM Channel-8 packets */ 197 inline void dump_tx_skb_data(struct sk_buff *skb) 198 { 199 int len, len_org; 200 u8 index; 201 struct fm_cmd_msg_hdr *cmd_hdr; 202 203 cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data; 204 printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x", 205 fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr, 206 cmd_hdr->len, cmd_hdr->op, 207 cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen); 208 209 len_org = skb->len - FM_CMD_MSG_HDR_SIZE; 210 if (len_org > 0) { 211 printk(KERN_CONT "\n data(%d): ", cmd_hdr->dlen); 212 len = min(len_org, 14); 213 for (index = 0; index < len; index++) 214 printk(KERN_CONT "%x ", 215 skb->data[FM_CMD_MSG_HDR_SIZE + index]); 216 printk(KERN_CONT "%s", (len_org > 14) ? ".." : ""); 217 } 218 printk(KERN_CONT "\n"); 219 } 220 221 /* To dump incoming FM Channel-8 packets */ 222 inline void dump_rx_skb_data(struct sk_buff *skb) 223 { 224 int len, len_org; 225 u8 index; 226 struct fm_event_msg_hdr *evt_hdr; 227 228 evt_hdr = (struct fm_event_msg_hdr *)skb->data; 229 printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x", 230 evt_hdr->hdr, evt_hdr->len, 231 evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op, 232 (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen); 233 234 len_org = skb->len - FM_EVT_MSG_HDR_SIZE; 235 if (len_org > 0) { 236 printk(KERN_CONT "\n data(%d): ", evt_hdr->dlen); 237 len = min(len_org, 14); 238 for (index = 0; index < len; index++) 239 printk(KERN_CONT "%x ", 240 skb->data[FM_EVT_MSG_HDR_SIZE + index]); 241 printk(KERN_CONT "%s", (len_org > 14) ? ".." : ""); 242 } 243 printk(KERN_CONT "\n"); 244 } 245 #endif 246 247 void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set) 248 { 249 fmdev->rx.region = region_configs[region_to_set]; 250 } 251 252 /* 253 * FM common sub-module will schedule this tasklet whenever it receives 254 * FM packet from ST driver. 255 */ 256 static void recv_tasklet(unsigned long arg) 257 { 258 struct fmdev *fmdev; 259 struct fm_irq *irq_info; 260 struct fm_event_msg_hdr *evt_hdr; 261 struct sk_buff *skb; 262 u8 num_fm_hci_cmds; 263 unsigned long flags; 264 265 fmdev = (struct fmdev *)arg; 266 irq_info = &fmdev->irq_info; 267 /* Process all packets in the RX queue */ 268 while ((skb = skb_dequeue(&fmdev->rx_q))) { 269 if (skb->len < sizeof(struct fm_event_msg_hdr)) { 270 fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n", 271 skb, 272 skb->len, sizeof(struct fm_event_msg_hdr)); 273 kfree_skb(skb); 274 continue; 275 } 276 277 evt_hdr = (void *)skb->data; 278 num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds; 279 280 /* FM interrupt packet? */ 281 if (evt_hdr->op == FM_INTERRUPT) { 282 /* FM interrupt handler started already? */ 283 if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) { 284 set_bit(FM_INTTASK_RUNNING, &fmdev->flag); 285 if (irq_info->stage != 0) { 286 fmerr("Inval stage resetting to zero\n"); 287 irq_info->stage = 0; 288 } 289 290 /* 291 * Execute first function in interrupt handler 292 * table. 293 */ 294 irq_info->handlers[irq_info->stage](fmdev); 295 } else { 296 set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag); 297 } 298 kfree_skb(skb); 299 } 300 /* Anyone waiting for this with completion handler? */ 301 else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) { 302 303 spin_lock_irqsave(&fmdev->resp_skb_lock, flags); 304 fmdev->resp_skb = skb; 305 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); 306 complete(fmdev->resp_comp); 307 308 fmdev->resp_comp = NULL; 309 atomic_set(&fmdev->tx_cnt, 1); 310 } 311 /* Is this for interrupt handler? */ 312 else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) { 313 if (fmdev->resp_skb != NULL) 314 fmerr("Response SKB ptr not NULL\n"); 315 316 spin_lock_irqsave(&fmdev->resp_skb_lock, flags); 317 fmdev->resp_skb = skb; 318 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); 319 320 /* Execute interrupt handler where state index points */ 321 irq_info->handlers[irq_info->stage](fmdev); 322 323 kfree_skb(skb); 324 atomic_set(&fmdev->tx_cnt, 1); 325 } else { 326 fmerr("Nobody claimed SKB(%p),purging\n", skb); 327 } 328 329 /* 330 * Check flow control field. If Num_FM_HCI_Commands field is 331 * not zero, schedule FM TX tasklet. 332 */ 333 if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt)) 334 if (!skb_queue_empty(&fmdev->tx_q)) 335 tasklet_schedule(&fmdev->tx_task); 336 } 337 } 338 339 /* FM send tasklet: is scheduled when FM packet has to be sent to chip */ 340 static void send_tasklet(unsigned long arg) 341 { 342 struct fmdev *fmdev; 343 struct sk_buff *skb; 344 int len; 345 346 fmdev = (struct fmdev *)arg; 347 348 if (!atomic_read(&fmdev->tx_cnt)) 349 return; 350 351 /* Check, is there any timeout happened to last transmitted packet */ 352 if ((jiffies - fmdev->last_tx_jiffies) > FM_DRV_TX_TIMEOUT) { 353 fmerr("TX timeout occurred\n"); 354 atomic_set(&fmdev->tx_cnt, 1); 355 } 356 357 /* Send queued FM TX packets */ 358 skb = skb_dequeue(&fmdev->tx_q); 359 if (!skb) 360 return; 361 362 atomic_dec(&fmdev->tx_cnt); 363 fmdev->pre_op = fm_cb(skb)->fm_op; 364 365 if (fmdev->resp_comp != NULL) 366 fmerr("Response completion handler is not NULL\n"); 367 368 fmdev->resp_comp = fm_cb(skb)->completion; 369 370 /* Write FM packet to ST driver */ 371 len = g_st_write(skb); 372 if (len < 0) { 373 kfree_skb(skb); 374 fmdev->resp_comp = NULL; 375 fmerr("TX tasklet failed to send skb(%p)\n", skb); 376 atomic_set(&fmdev->tx_cnt, 1); 377 } else { 378 fmdev->last_tx_jiffies = jiffies; 379 } 380 } 381 382 /* 383 * Queues FM Channel-8 packet to FM TX queue and schedules FM TX tasklet for 384 * transmission 385 */ 386 static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload, 387 int payload_len, struct completion *wait_completion) 388 { 389 struct sk_buff *skb; 390 struct fm_cmd_msg_hdr *hdr; 391 int size; 392 393 if (fm_op >= FM_INTERRUPT) { 394 fmerr("Invalid fm opcode - %d\n", fm_op); 395 return -EINVAL; 396 } 397 if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) { 398 fmerr("Payload data is NULL during fw download\n"); 399 return -EINVAL; 400 } 401 if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag)) 402 size = 403 FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len); 404 else 405 size = payload_len; 406 407 skb = alloc_skb(size, GFP_ATOMIC); 408 if (!skb) { 409 fmerr("No memory to create new SKB\n"); 410 return -ENOMEM; 411 } 412 /* 413 * Don't fill FM header info for the commands which come from 414 * FM firmware file. 415 */ 416 if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) || 417 test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) { 418 /* Fill command header info */ 419 hdr = skb_put(skb, FM_CMD_MSG_HDR_SIZE); 420 hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER; /* 0x08 */ 421 422 /* 3 (fm_opcode,rd_wr,dlen) + payload len) */ 423 hdr->len = ((payload == NULL) ? 0 : payload_len) + 3; 424 425 /* FM opcode */ 426 hdr->op = fm_op; 427 428 /* read/write type */ 429 hdr->rd_wr = type; 430 hdr->dlen = payload_len; 431 fm_cb(skb)->fm_op = fm_op; 432 433 /* 434 * If firmware download has finished and the command is 435 * not a read command then payload is != NULL - a write 436 * command with u16 payload - convert to be16 437 */ 438 if (payload != NULL) 439 *(__be16 *)payload = cpu_to_be16(*(u16 *)payload); 440 441 } else if (payload != NULL) { 442 fm_cb(skb)->fm_op = *((u8 *)payload + 2); 443 } 444 if (payload != NULL) 445 skb_put_data(skb, payload, payload_len); 446 447 fm_cb(skb)->completion = wait_completion; 448 skb_queue_tail(&fmdev->tx_q, skb); 449 tasklet_schedule(&fmdev->tx_task); 450 451 return 0; 452 } 453 454 /* Sends FM Channel-8 command to the chip and waits for the response */ 455 int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload, 456 unsigned int payload_len, void *response, int *response_len) 457 { 458 struct sk_buff *skb; 459 struct fm_event_msg_hdr *evt_hdr; 460 unsigned long flags; 461 int ret; 462 463 init_completion(&fmdev->maintask_comp); 464 ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len, 465 &fmdev->maintask_comp); 466 if (ret) 467 return ret; 468 469 if (!wait_for_completion_timeout(&fmdev->maintask_comp, 470 FM_DRV_TX_TIMEOUT)) { 471 fmerr("Timeout(%d sec),didn't get regcompletion signal from RX tasklet\n", 472 jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000); 473 return -ETIMEDOUT; 474 } 475 if (!fmdev->resp_skb) { 476 fmerr("Response SKB is missing\n"); 477 return -EFAULT; 478 } 479 spin_lock_irqsave(&fmdev->resp_skb_lock, flags); 480 skb = fmdev->resp_skb; 481 fmdev->resp_skb = NULL; 482 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); 483 484 evt_hdr = (void *)skb->data; 485 if (evt_hdr->status != 0) { 486 fmerr("Received event pkt status(%d) is not zero\n", 487 evt_hdr->status); 488 kfree_skb(skb); 489 return -EIO; 490 } 491 /* Send response data to caller */ 492 if (response != NULL && response_len != NULL && evt_hdr->dlen && 493 evt_hdr->dlen <= payload_len) { 494 /* Skip header info and copy only response data */ 495 skb_pull(skb, sizeof(struct fm_event_msg_hdr)); 496 memcpy(response, skb->data, evt_hdr->dlen); 497 *response_len = evt_hdr->dlen; 498 } else if (response_len != NULL && evt_hdr->dlen == 0) { 499 *response_len = 0; 500 } 501 kfree_skb(skb); 502 503 return 0; 504 } 505 506 /* --- Helper functions used in FM interrupt handlers ---*/ 507 static inline int check_cmdresp_status(struct fmdev *fmdev, 508 struct sk_buff **skb) 509 { 510 struct fm_event_msg_hdr *fm_evt_hdr; 511 unsigned long flags; 512 513 del_timer(&fmdev->irq_info.timer); 514 515 spin_lock_irqsave(&fmdev->resp_skb_lock, flags); 516 *skb = fmdev->resp_skb; 517 fmdev->resp_skb = NULL; 518 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); 519 520 fm_evt_hdr = (void *)(*skb)->data; 521 if (fm_evt_hdr->status != 0) { 522 fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n", 523 fm_evt_hdr->op); 524 525 mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT); 526 return -1; 527 } 528 529 return 0; 530 } 531 532 static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage) 533 { 534 struct sk_buff *skb; 535 536 if (!check_cmdresp_status(fmdev, &skb)) 537 fm_irq_call_stage(fmdev, stage); 538 } 539 540 /* 541 * Interrupt process timeout handler. 542 * One of the irq handler did not get proper response from the chip. So take 543 * recovery action here. FM interrupts are disabled in the beginning of 544 * interrupt process. Therefore reset stage index to re-enable default 545 * interrupts. So that next interrupt will be processed as usual. 546 */ 547 static void int_timeout_handler(struct timer_list *t) 548 { 549 struct fmdev *fmdev; 550 struct fm_irq *fmirq; 551 552 fmdbg("irq: timeout,trying to re-enable fm interrupts\n"); 553 fmdev = from_timer(fmdev, t, irq_info.timer); 554 fmirq = &fmdev->irq_info; 555 fmirq->retry++; 556 557 if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) { 558 /* Stop recovery action (interrupt reenable process) and 559 * reset stage index & retry count values */ 560 fmirq->stage = 0; 561 fmirq->retry = 0; 562 fmerr("Recovery action failed duringirq processing, max retry reached\n"); 563 return; 564 } 565 fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX); 566 } 567 568 /* --------- FM interrupt handlers ------------*/ 569 static void fm_irq_send_flag_getcmd(struct fmdev *fmdev) 570 { 571 u16 flag; 572 573 /* Send FLAG_GET command , to know the source of interrupt */ 574 if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL)) 575 fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX); 576 } 577 578 static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev) 579 { 580 struct sk_buff *skb; 581 struct fm_event_msg_hdr *fm_evt_hdr; 582 583 if (check_cmdresp_status(fmdev, &skb)) 584 return; 585 586 fm_evt_hdr = (void *)skb->data; 587 if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag)) 588 return; 589 590 /* Skip header info and copy only response data */ 591 skb_pull(skb, sizeof(struct fm_event_msg_hdr)); 592 memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen); 593 594 fmdev->irq_info.flag = be16_to_cpu((__force __be16)fmdev->irq_info.flag); 595 fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag); 596 597 /* Continue next function in interrupt handler table */ 598 fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX); 599 } 600 601 static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev) 602 { 603 if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask) 604 fmerr("irq: HW MAL int received - do nothing\n"); 605 606 /* Continue next function in interrupt handler table */ 607 fm_irq_call_stage(fmdev, FM_RDS_START_IDX); 608 } 609 610 static void fm_irq_handle_rds_start(struct fmdev *fmdev) 611 { 612 if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) { 613 fmdbg("irq: rds threshold reached\n"); 614 fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX; 615 } else { 616 /* Continue next function in interrupt handler table */ 617 fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX; 618 } 619 620 fm_irq_call(fmdev); 621 } 622 623 static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev) 624 { 625 /* Send the command to read RDS data from the chip */ 626 if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL, 627 (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL)) 628 fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX); 629 } 630 631 /* Keeps track of current RX channel AF (Alternate Frequency) */ 632 static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af) 633 { 634 struct tuned_station_info *stat_info = &fmdev->rx.stat_info; 635 u8 reg_idx = fmdev->rx.region.fm_band; 636 u8 index; 637 u32 freq; 638 639 /* First AF indicates the number of AF follows. Reset the list */ 640 if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) { 641 fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1); 642 fmdev->rx.stat_info.afcache_size = 0; 643 fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max); 644 return; 645 } 646 647 if (af < FM_RDS_MIN_AF) 648 return; 649 if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF) 650 return; 651 if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN) 652 return; 653 654 freq = fmdev->rx.region.bot_freq + (af * 100); 655 if (freq == fmdev->rx.freq) { 656 fmdbg("Current freq(%d) is matching with received AF(%d)\n", 657 fmdev->rx.freq, freq); 658 return; 659 } 660 /* Do check in AF cache */ 661 for (index = 0; index < stat_info->afcache_size; index++) { 662 if (stat_info->af_cache[index] == freq) 663 break; 664 } 665 /* Reached the limit of the list - ignore the next AF */ 666 if (index == stat_info->af_list_max) { 667 fmdbg("AF cache is full\n"); 668 return; 669 } 670 /* 671 * If we reached the end of the list then this AF is not 672 * in the list - add it. 673 */ 674 if (index == stat_info->afcache_size) { 675 fmdbg("Storing AF %d to cache index %d\n", freq, index); 676 stat_info->af_cache[index] = freq; 677 stat_info->afcache_size++; 678 } 679 } 680 681 /* 682 * Converts RDS buffer data from big endian format 683 * to little endian format. 684 */ 685 static void fm_rdsparse_swapbytes(struct fmdev *fmdev, 686 struct fm_rdsdata_format *rds_format) 687 { 688 u8 index = 0; 689 u8 *rds_buff; 690 691 /* 692 * Since in Orca the 2 RDS Data bytes are in little endian and 693 * in Dolphin they are in big endian, the parsing of the RDS data 694 * is chip dependent 695 */ 696 if (fmdev->asci_id != 0x6350) { 697 rds_buff = &rds_format->data.groupdatabuff.buff[0]; 698 while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) { 699 swap(rds_buff[index], rds_buff[index + 1]); 700 index += 2; 701 } 702 } 703 } 704 705 static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev) 706 { 707 struct sk_buff *skb; 708 struct fm_rdsdata_format rds_fmt; 709 struct fm_rds *rds = &fmdev->rx.rds; 710 unsigned long group_idx, flags; 711 u8 *rds_data, meta_data, tmpbuf[FM_RDS_BLK_SIZE]; 712 u8 type, blk_idx; 713 u16 cur_picode; 714 u32 rds_len; 715 716 if (check_cmdresp_status(fmdev, &skb)) 717 return; 718 719 /* Skip header info */ 720 skb_pull(skb, sizeof(struct fm_event_msg_hdr)); 721 rds_data = skb->data; 722 rds_len = skb->len; 723 724 /* Parse the RDS data */ 725 while (rds_len >= FM_RDS_BLK_SIZE) { 726 meta_data = rds_data[2]; 727 /* Get the type: 0=A, 1=B, 2=C, 3=C', 4=D, 5=E */ 728 type = (meta_data & 0x07); 729 730 /* Transform the blk type into index sequence (0, 1, 2, 3, 4) */ 731 blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1)); 732 fmdbg("Block index:%d(%s)\n", blk_idx, 733 (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok"); 734 735 if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0) 736 break; 737 738 if (blk_idx > FM_RDS_BLK_IDX_D) { 739 fmdbg("Block sequence mismatch\n"); 740 rds->last_blk_idx = -1; 741 break; 742 } 743 744 /* Skip checkword (control) byte and copy only data byte */ 745 memcpy(&rds_fmt.data.groupdatabuff. 746 buff[blk_idx * (FM_RDS_BLK_SIZE - 1)], 747 rds_data, (FM_RDS_BLK_SIZE - 1)); 748 749 rds->last_blk_idx = blk_idx; 750 751 /* If completed a whole group then handle it */ 752 if (blk_idx == FM_RDS_BLK_IDX_D) { 753 fmdbg("Good block received\n"); 754 fm_rdsparse_swapbytes(fmdev, &rds_fmt); 755 756 /* 757 * Extract PI code and store in local cache. 758 * We need this during AF switch processing. 759 */ 760 cur_picode = be16_to_cpu((__force __be16)rds_fmt.data.groupgeneral.pidata); 761 if (fmdev->rx.stat_info.picode != cur_picode) 762 fmdev->rx.stat_info.picode = cur_picode; 763 764 fmdbg("picode:%d\n", cur_picode); 765 766 group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3); 767 fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2, 768 (group_idx % 2) ? "B" : "A"); 769 770 group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3); 771 if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) { 772 fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]); 773 fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]); 774 } 775 } 776 rds_len -= FM_RDS_BLK_SIZE; 777 rds_data += FM_RDS_BLK_SIZE; 778 } 779 780 /* Copy raw rds data to internal rds buffer */ 781 rds_data = skb->data; 782 rds_len = skb->len; 783 784 spin_lock_irqsave(&fmdev->rds_buff_lock, flags); 785 while (rds_len > 0) { 786 /* 787 * Fill RDS buffer as per V4L2 specification. 788 * Store control byte 789 */ 790 type = (rds_data[2] & 0x07); 791 blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1)); 792 tmpbuf[2] = blk_idx; /* Offset name */ 793 tmpbuf[2] |= blk_idx << 3; /* Received offset */ 794 795 /* Store data byte */ 796 tmpbuf[0] = rds_data[0]; 797 tmpbuf[1] = rds_data[1]; 798 799 memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE); 800 rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size; 801 802 /* Check for overflow & start over */ 803 if (rds->wr_idx == rds->rd_idx) { 804 fmdbg("RDS buffer overflow\n"); 805 rds->wr_idx = 0; 806 rds->rd_idx = 0; 807 break; 808 } 809 rds_len -= FM_RDS_BLK_SIZE; 810 rds_data += FM_RDS_BLK_SIZE; 811 } 812 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags); 813 814 /* Wakeup read queue */ 815 if (rds->wr_idx != rds->rd_idx) 816 wake_up_interruptible(&rds->read_queue); 817 818 fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX); 819 } 820 821 static void fm_irq_handle_rds_finish(struct fmdev *fmdev) 822 { 823 fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX); 824 } 825 826 static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev) 827 { 828 if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev-> 829 irq_info.mask) { 830 fmdbg("irq: tune ended/bandlimit reached\n"); 831 if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) { 832 fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX; 833 } else { 834 complete(&fmdev->maintask_comp); 835 fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX; 836 } 837 } else 838 fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX; 839 840 fm_irq_call(fmdev); 841 } 842 843 static void fm_irq_handle_power_enb(struct fmdev *fmdev) 844 { 845 if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) { 846 fmdbg("irq: Power Enabled/Disabled\n"); 847 complete(&fmdev->maintask_comp); 848 } 849 850 fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX); 851 } 852 853 static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev) 854 { 855 if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) && 856 (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) && 857 (fmdev->rx.freq != FM_UNDEFINED_FREQ) && 858 (fmdev->rx.stat_info.afcache_size != 0)) { 859 fmdbg("irq: rssi level has fallen below threshold level\n"); 860 861 /* Disable further low RSSI interrupts */ 862 fmdev->irq_info.mask &= ~FM_LEV_EVENT; 863 864 fmdev->rx.afjump_idx = 0; 865 fmdev->rx.freq_before_jump = fmdev->rx.freq; 866 fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX; 867 } else { 868 /* Continue next function in interrupt handler table */ 869 fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX; 870 } 871 872 fm_irq_call(fmdev); 873 } 874 875 static void fm_irq_afjump_set_pi(struct fmdev *fmdev) 876 { 877 u16 payload; 878 879 /* Set PI code - must be updated if the AF list is not empty */ 880 payload = fmdev->rx.stat_info.picode; 881 if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL)) 882 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX); 883 } 884 885 static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev) 886 { 887 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX); 888 } 889 890 /* 891 * Set PI mask. 892 * 0xFFFF = Enable PI code matching 893 * 0x0000 = Disable PI code matching 894 */ 895 static void fm_irq_afjump_set_pimask(struct fmdev *fmdev) 896 { 897 u16 payload; 898 899 payload = 0x0000; 900 if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL)) 901 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX); 902 } 903 904 static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev) 905 { 906 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX); 907 } 908 909 static void fm_irq_afjump_setfreq(struct fmdev *fmdev) 910 { 911 u16 frq_index; 912 u16 payload; 913 914 fmdbg("Switch to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]); 915 frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] - 916 fmdev->rx.region.bot_freq) / FM_FREQ_MUL; 917 918 payload = frq_index; 919 if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL)) 920 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX); 921 } 922 923 static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev) 924 { 925 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX); 926 } 927 928 static void fm_irq_afjump_enableint(struct fmdev *fmdev) 929 { 930 u16 payload; 931 932 /* Enable FR (tuning operation ended) interrupt */ 933 payload = FM_FR_EVENT; 934 if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL)) 935 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX); 936 } 937 938 static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev) 939 { 940 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX); 941 } 942 943 static void fm_irq_start_afjump(struct fmdev *fmdev) 944 { 945 u16 payload; 946 947 payload = FM_TUNER_AF_JUMP_MODE; 948 if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload, 949 sizeof(payload), NULL)) 950 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX); 951 } 952 953 static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev) 954 { 955 struct sk_buff *skb; 956 957 if (check_cmdresp_status(fmdev, &skb)) 958 return; 959 960 fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX; 961 set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag); 962 clear_bit(FM_INTTASK_RUNNING, &fmdev->flag); 963 } 964 965 static void fm_irq_afjump_rd_freq(struct fmdev *fmdev) 966 { 967 u16 payload; 968 969 if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL)) 970 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX); 971 } 972 973 static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev) 974 { 975 struct sk_buff *skb; 976 u16 read_freq; 977 u32 curr_freq, jumped_freq; 978 979 if (check_cmdresp_status(fmdev, &skb)) 980 return; 981 982 /* Skip header info and copy only response data */ 983 skb_pull(skb, sizeof(struct fm_event_msg_hdr)); 984 memcpy(&read_freq, skb->data, sizeof(read_freq)); 985 read_freq = be16_to_cpu((__force __be16)read_freq); 986 curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL); 987 988 jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]; 989 990 /* If the frequency was changed the jump succeeded */ 991 if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) { 992 fmdbg("Successfully switched to alternate freq %d\n", curr_freq); 993 fmdev->rx.freq = curr_freq; 994 fm_rx_reset_rds_cache(fmdev); 995 996 /* AF feature is on, enable low level RSSI interrupt */ 997 if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) 998 fmdev->irq_info.mask |= FM_LEV_EVENT; 999 1000 fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX; 1001 } else { /* jump to the next freq in the AF list */ 1002 fmdev->rx.afjump_idx++; 1003 1004 /* If we reached the end of the list - stop searching */ 1005 if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) { 1006 fmdbg("AF switch processing failed\n"); 1007 fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX; 1008 } else { /* AF List is not over - try next one */ 1009 1010 fmdbg("Trying next freq in AF cache\n"); 1011 fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX; 1012 } 1013 } 1014 fm_irq_call(fmdev); 1015 } 1016 1017 static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev) 1018 { 1019 fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX); 1020 } 1021 1022 static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev) 1023 { 1024 u16 payload; 1025 1026 /* Re-enable FM interrupts */ 1027 payload = fmdev->irq_info.mask; 1028 1029 if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, 1030 sizeof(payload), NULL)) 1031 fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX); 1032 } 1033 1034 static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev) 1035 { 1036 struct sk_buff *skb; 1037 1038 if (check_cmdresp_status(fmdev, &skb)) 1039 return; 1040 /* 1041 * This is last function in interrupt table to be executed. 1042 * So, reset stage index to 0. 1043 */ 1044 fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX; 1045 1046 /* Start processing any pending interrupt */ 1047 if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag)) 1048 fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev); 1049 else 1050 clear_bit(FM_INTTASK_RUNNING, &fmdev->flag); 1051 } 1052 1053 /* Returns availability of RDS data in internal buffer */ 1054 int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file, 1055 struct poll_table_struct *pts) 1056 { 1057 poll_wait(file, &fmdev->rx.rds.read_queue, pts); 1058 if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx) 1059 return 0; 1060 1061 return -EAGAIN; 1062 } 1063 1064 /* Copies RDS data from internal buffer to user buffer */ 1065 int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file, 1066 u8 __user *buf, size_t count) 1067 { 1068 u32 block_count; 1069 u8 tmpbuf[FM_RDS_BLK_SIZE]; 1070 unsigned long flags; 1071 int ret; 1072 1073 if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) { 1074 if (file->f_flags & O_NONBLOCK) 1075 return -EWOULDBLOCK; 1076 1077 ret = wait_event_interruptible(fmdev->rx.rds.read_queue, 1078 (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx)); 1079 if (ret) 1080 return -EINTR; 1081 } 1082 1083 /* Calculate block count from byte count */ 1084 count /= FM_RDS_BLK_SIZE; 1085 block_count = 0; 1086 ret = 0; 1087 1088 while (block_count < count) { 1089 spin_lock_irqsave(&fmdev->rds_buff_lock, flags); 1090 1091 if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) { 1092 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags); 1093 break; 1094 } 1095 memcpy(tmpbuf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx], 1096 FM_RDS_BLK_SIZE); 1097 fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE; 1098 if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size) 1099 fmdev->rx.rds.rd_idx = 0; 1100 1101 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags); 1102 1103 if (copy_to_user(buf, tmpbuf, FM_RDS_BLK_SIZE)) 1104 break; 1105 1106 block_count++; 1107 buf += FM_RDS_BLK_SIZE; 1108 ret += FM_RDS_BLK_SIZE; 1109 } 1110 return ret; 1111 } 1112 1113 int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set) 1114 { 1115 switch (fmdev->curr_fmmode) { 1116 case FM_MODE_RX: 1117 return fm_rx_set_freq(fmdev, freq_to_set); 1118 1119 case FM_MODE_TX: 1120 return fm_tx_set_freq(fmdev, freq_to_set); 1121 1122 default: 1123 return -EINVAL; 1124 } 1125 } 1126 1127 int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq) 1128 { 1129 if (fmdev->rx.freq == FM_UNDEFINED_FREQ) { 1130 fmerr("RX frequency is not set\n"); 1131 return -EPERM; 1132 } 1133 if (cur_tuned_frq == NULL) { 1134 fmerr("Invalid memory\n"); 1135 return -ENOMEM; 1136 } 1137 1138 switch (fmdev->curr_fmmode) { 1139 case FM_MODE_RX: 1140 *cur_tuned_frq = fmdev->rx.freq; 1141 return 0; 1142 1143 case FM_MODE_TX: 1144 *cur_tuned_frq = 0; /* TODO : Change this later */ 1145 return 0; 1146 1147 default: 1148 return -EINVAL; 1149 } 1150 1151 } 1152 1153 int fmc_set_region(struct fmdev *fmdev, u8 region_to_set) 1154 { 1155 switch (fmdev->curr_fmmode) { 1156 case FM_MODE_RX: 1157 return fm_rx_set_region(fmdev, region_to_set); 1158 1159 case FM_MODE_TX: 1160 return fm_tx_set_region(fmdev, region_to_set); 1161 1162 default: 1163 return -EINVAL; 1164 } 1165 } 1166 1167 int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset) 1168 { 1169 switch (fmdev->curr_fmmode) { 1170 case FM_MODE_RX: 1171 return fm_rx_set_mute_mode(fmdev, mute_mode_toset); 1172 1173 case FM_MODE_TX: 1174 return fm_tx_set_mute_mode(fmdev, mute_mode_toset); 1175 1176 default: 1177 return -EINVAL; 1178 } 1179 } 1180 1181 int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode) 1182 { 1183 switch (fmdev->curr_fmmode) { 1184 case FM_MODE_RX: 1185 return fm_rx_set_stereo_mono(fmdev, mode); 1186 1187 case FM_MODE_TX: 1188 return fm_tx_set_stereo_mono(fmdev, mode); 1189 1190 default: 1191 return -EINVAL; 1192 } 1193 } 1194 1195 int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis) 1196 { 1197 switch (fmdev->curr_fmmode) { 1198 case FM_MODE_RX: 1199 return fm_rx_set_rds_mode(fmdev, rds_en_dis); 1200 1201 case FM_MODE_TX: 1202 return fm_tx_set_rds_mode(fmdev, rds_en_dis); 1203 1204 default: 1205 return -EINVAL; 1206 } 1207 } 1208 1209 /* Sends power off command to the chip */ 1210 static int fm_power_down(struct fmdev *fmdev) 1211 { 1212 u16 payload; 1213 int ret; 1214 1215 if (!test_bit(FM_CORE_READY, &fmdev->flag)) { 1216 fmerr("FM core is not ready\n"); 1217 return -EPERM; 1218 } 1219 if (fmdev->curr_fmmode == FM_MODE_OFF) { 1220 fmdbg("FM chip is already in OFF state\n"); 1221 return 0; 1222 } 1223 1224 payload = 0x0; 1225 ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload, 1226 sizeof(payload), NULL, NULL); 1227 if (ret < 0) 1228 return ret; 1229 1230 return fmc_release(fmdev); 1231 } 1232 1233 /* Reads init command from FM firmware file and loads to the chip */ 1234 static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name) 1235 { 1236 const struct firmware *fw_entry; 1237 struct bts_header *fw_header; 1238 struct bts_action *action; 1239 struct bts_action_delay *delay; 1240 u8 *fw_data; 1241 int ret, fw_len, cmd_cnt; 1242 1243 cmd_cnt = 0; 1244 set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag); 1245 1246 ret = request_firmware(&fw_entry, fw_name, 1247 &fmdev->radio_dev->dev); 1248 if (ret < 0) { 1249 fmerr("Unable to read firmware(%s) content\n", fw_name); 1250 return ret; 1251 } 1252 fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size); 1253 1254 fw_data = (void *)fw_entry->data; 1255 fw_len = fw_entry->size; 1256 1257 fw_header = (struct bts_header *)fw_data; 1258 if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) { 1259 fmerr("%s not a legal TI firmware file\n", fw_name); 1260 ret = -EINVAL; 1261 goto rel_fw; 1262 } 1263 fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic); 1264 1265 /* Skip file header info , we already verified it */ 1266 fw_data += sizeof(struct bts_header); 1267 fw_len -= sizeof(struct bts_header); 1268 1269 while (fw_data && fw_len > 0) { 1270 action = (struct bts_action *)fw_data; 1271 1272 switch (action->type) { 1273 case ACTION_SEND_COMMAND: /* Send */ 1274 ret = fmc_send_cmd(fmdev, 0, 0, action->data, 1275 action->size, NULL, NULL); 1276 if (ret) 1277 goto rel_fw; 1278 1279 cmd_cnt++; 1280 break; 1281 1282 case ACTION_DELAY: /* Delay */ 1283 delay = (struct bts_action_delay *)action->data; 1284 mdelay(delay->msec); 1285 break; 1286 } 1287 1288 fw_data += (sizeof(struct bts_action) + (action->size)); 1289 fw_len -= (sizeof(struct bts_action) + (action->size)); 1290 } 1291 fmdbg("Firmware commands(%d) loaded to chip\n", cmd_cnt); 1292 rel_fw: 1293 release_firmware(fw_entry); 1294 clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag); 1295 1296 return ret; 1297 } 1298 1299 /* Loads default RX configuration to the chip */ 1300 static int load_default_rx_configuration(struct fmdev *fmdev) 1301 { 1302 int ret; 1303 1304 ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME); 1305 if (ret < 0) 1306 return ret; 1307 1308 return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD); 1309 } 1310 1311 /* Does FM power on sequence */ 1312 static int fm_power_up(struct fmdev *fmdev, u8 mode) 1313 { 1314 u16 payload; 1315 __be16 asic_id = 0, asic_ver = 0; 1316 int resp_len, ret; 1317 u8 fw_name[50]; 1318 1319 if (mode >= FM_MODE_ENTRY_MAX) { 1320 fmerr("Invalid firmware download option\n"); 1321 return -EINVAL; 1322 } 1323 1324 /* 1325 * Initialize FM common module. FM GPIO toggling is 1326 * taken care in Shared Transport driver. 1327 */ 1328 ret = fmc_prepare(fmdev); 1329 if (ret < 0) { 1330 fmerr("Unable to prepare FM Common\n"); 1331 return ret; 1332 } 1333 1334 payload = FM_ENABLE; 1335 if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload, 1336 sizeof(payload), NULL, NULL)) 1337 goto rel; 1338 1339 /* Allow the chip to settle down in Channel-8 mode */ 1340 msleep(20); 1341 1342 if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL, 1343 sizeof(asic_id), &asic_id, &resp_len)) 1344 goto rel; 1345 1346 if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL, 1347 sizeof(asic_ver), &asic_ver, &resp_len)) 1348 goto rel; 1349 1350 fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n", 1351 be16_to_cpu(asic_id), be16_to_cpu(asic_ver)); 1352 1353 sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START, 1354 be16_to_cpu(asic_id), be16_to_cpu(asic_ver)); 1355 1356 ret = fm_download_firmware(fmdev, fw_name); 1357 if (ret < 0) { 1358 fmdbg("Failed to download firmware file %s\n", fw_name); 1359 goto rel; 1360 } 1361 sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ? 1362 FM_RX_FW_FILE_START : FM_TX_FW_FILE_START, 1363 be16_to_cpu(asic_id), be16_to_cpu(asic_ver)); 1364 1365 ret = fm_download_firmware(fmdev, fw_name); 1366 if (ret < 0) { 1367 fmdbg("Failed to download firmware file %s\n", fw_name); 1368 goto rel; 1369 } else 1370 return ret; 1371 rel: 1372 return fmc_release(fmdev); 1373 } 1374 1375 /* Set FM Modes(TX, RX, OFF) */ 1376 int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode) 1377 { 1378 int ret = 0; 1379 1380 if (fm_mode >= FM_MODE_ENTRY_MAX) { 1381 fmerr("Invalid FM mode\n"); 1382 return -EINVAL; 1383 } 1384 if (fmdev->curr_fmmode == fm_mode) { 1385 fmdbg("Already fm is in mode(%d)\n", fm_mode); 1386 return ret; 1387 } 1388 1389 switch (fm_mode) { 1390 case FM_MODE_OFF: /* OFF Mode */ 1391 ret = fm_power_down(fmdev); 1392 if (ret < 0) { 1393 fmerr("Failed to set OFF mode\n"); 1394 return ret; 1395 } 1396 break; 1397 1398 case FM_MODE_TX: /* TX Mode */ 1399 case FM_MODE_RX: /* RX Mode */ 1400 /* Power down before switching to TX or RX mode */ 1401 if (fmdev->curr_fmmode != FM_MODE_OFF) { 1402 ret = fm_power_down(fmdev); 1403 if (ret < 0) { 1404 fmerr("Failed to set OFF mode\n"); 1405 return ret; 1406 } 1407 msleep(30); 1408 } 1409 ret = fm_power_up(fmdev, fm_mode); 1410 if (ret < 0) { 1411 fmerr("Failed to load firmware\n"); 1412 return ret; 1413 } 1414 } 1415 fmdev->curr_fmmode = fm_mode; 1416 1417 /* Set default configuration */ 1418 if (fmdev->curr_fmmode == FM_MODE_RX) { 1419 fmdbg("Loading default rx configuration..\n"); 1420 ret = load_default_rx_configuration(fmdev); 1421 if (ret < 0) 1422 fmerr("Failed to load default values\n"); 1423 } 1424 1425 return ret; 1426 } 1427 1428 /* Returns current FM mode (TX, RX, OFF) */ 1429 int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode) 1430 { 1431 if (!test_bit(FM_CORE_READY, &fmdev->flag)) { 1432 fmerr("FM core is not ready\n"); 1433 return -EPERM; 1434 } 1435 if (fmmode == NULL) { 1436 fmerr("Invalid memory\n"); 1437 return -ENOMEM; 1438 } 1439 1440 *fmmode = fmdev->curr_fmmode; 1441 return 0; 1442 } 1443 1444 /* Called by ST layer when FM packet is available */ 1445 static long fm_st_receive(void *arg, struct sk_buff *skb) 1446 { 1447 struct fmdev *fmdev; 1448 1449 fmdev = (struct fmdev *)arg; 1450 1451 if (skb == NULL) { 1452 fmerr("Invalid SKB received from ST\n"); 1453 return -EFAULT; 1454 } 1455 1456 if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) { 1457 fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb); 1458 return -EINVAL; 1459 } 1460 1461 memcpy(skb_push(skb, 1), &skb->cb[0], 1); 1462 skb_queue_tail(&fmdev->rx_q, skb); 1463 tasklet_schedule(&fmdev->rx_task); 1464 1465 return 0; 1466 } 1467 1468 /* 1469 * Called by ST layer to indicate protocol registration completion 1470 * status. 1471 */ 1472 static void fm_st_reg_comp_cb(void *arg, int data) 1473 { 1474 struct fmdev *fmdev; 1475 1476 fmdev = (struct fmdev *)arg; 1477 fmdev->streg_cbdata = data; 1478 complete(&wait_for_fmdrv_reg_comp); 1479 } 1480 1481 /* 1482 * This function will be called from FM V4L2 open function. 1483 * Register with ST driver and initialize driver data. 1484 */ 1485 int fmc_prepare(struct fmdev *fmdev) 1486 { 1487 static struct st_proto_s fm_st_proto; 1488 int ret; 1489 1490 if (test_bit(FM_CORE_READY, &fmdev->flag)) { 1491 fmdbg("FM Core is already up\n"); 1492 return 0; 1493 } 1494 1495 memset(&fm_st_proto, 0, sizeof(fm_st_proto)); 1496 fm_st_proto.recv = fm_st_receive; 1497 fm_st_proto.match_packet = NULL; 1498 fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb; 1499 fm_st_proto.write = NULL; /* TI ST driver will fill write pointer */ 1500 fm_st_proto.priv_data = fmdev; 1501 fm_st_proto.chnl_id = 0x08; 1502 fm_st_proto.max_frame_size = 0xff; 1503 fm_st_proto.hdr_len = 1; 1504 fm_st_proto.offset_len_in_hdr = 0; 1505 fm_st_proto.len_size = 1; 1506 fm_st_proto.reserve = 1; 1507 1508 ret = st_register(&fm_st_proto); 1509 if (ret == -EINPROGRESS) { 1510 init_completion(&wait_for_fmdrv_reg_comp); 1511 fmdev->streg_cbdata = -EINPROGRESS; 1512 fmdbg("%s waiting for ST reg completion signal\n", __func__); 1513 1514 if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp, 1515 FM_ST_REG_TIMEOUT)) { 1516 fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n", 1517 jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000); 1518 return -ETIMEDOUT; 1519 } 1520 if (fmdev->streg_cbdata != 0) { 1521 fmerr("ST reg comp CB called with error status %d\n", 1522 fmdev->streg_cbdata); 1523 return -EAGAIN; 1524 } 1525 1526 ret = 0; 1527 } else if (ret < 0) { 1528 fmerr("st_register failed %d\n", ret); 1529 return -EAGAIN; 1530 } 1531 1532 if (fm_st_proto.write != NULL) { 1533 g_st_write = fm_st_proto.write; 1534 } else { 1535 fmerr("Failed to get ST write func pointer\n"); 1536 ret = st_unregister(&fm_st_proto); 1537 if (ret < 0) 1538 fmerr("st_unregister failed %d\n", ret); 1539 return -EAGAIN; 1540 } 1541 1542 spin_lock_init(&fmdev->rds_buff_lock); 1543 spin_lock_init(&fmdev->resp_skb_lock); 1544 1545 /* Initialize TX queue and TX tasklet */ 1546 skb_queue_head_init(&fmdev->tx_q); 1547 tasklet_init(&fmdev->tx_task, send_tasklet, (unsigned long)fmdev); 1548 1549 /* Initialize RX Queue and RX tasklet */ 1550 skb_queue_head_init(&fmdev->rx_q); 1551 tasklet_init(&fmdev->rx_task, recv_tasklet, (unsigned long)fmdev); 1552 1553 fmdev->irq_info.stage = 0; 1554 atomic_set(&fmdev->tx_cnt, 1); 1555 fmdev->resp_comp = NULL; 1556 1557 timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0); 1558 /*TODO: add FM_STIC_EVENT later */ 1559 fmdev->irq_info.mask = FM_MAL_EVENT; 1560 1561 /* Region info */ 1562 fmdev->rx.region = region_configs[default_radio_region]; 1563 1564 fmdev->rx.mute_mode = FM_MUTE_OFF; 1565 fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF; 1566 fmdev->rx.rds.flag = FM_RDS_DISABLE; 1567 fmdev->rx.freq = FM_UNDEFINED_FREQ; 1568 fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS; 1569 fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF; 1570 fmdev->irq_info.retry = 0; 1571 1572 fm_rx_reset_rds_cache(fmdev); 1573 init_waitqueue_head(&fmdev->rx.rds.read_queue); 1574 1575 fm_rx_reset_station_info(fmdev); 1576 set_bit(FM_CORE_READY, &fmdev->flag); 1577 1578 return ret; 1579 } 1580 1581 /* 1582 * This function will be called from FM V4L2 release function. 1583 * Unregister from ST driver. 1584 */ 1585 int fmc_release(struct fmdev *fmdev) 1586 { 1587 static struct st_proto_s fm_st_proto; 1588 int ret; 1589 1590 if (!test_bit(FM_CORE_READY, &fmdev->flag)) { 1591 fmdbg("FM Core is already down\n"); 1592 return 0; 1593 } 1594 /* Service pending read */ 1595 wake_up_interruptible(&fmdev->rx.rds.read_queue); 1596 1597 tasklet_kill(&fmdev->tx_task); 1598 tasklet_kill(&fmdev->rx_task); 1599 1600 skb_queue_purge(&fmdev->tx_q); 1601 skb_queue_purge(&fmdev->rx_q); 1602 1603 fmdev->resp_comp = NULL; 1604 fmdev->rx.freq = 0; 1605 1606 memset(&fm_st_proto, 0, sizeof(fm_st_proto)); 1607 fm_st_proto.chnl_id = 0x08; 1608 1609 ret = st_unregister(&fm_st_proto); 1610 1611 if (ret < 0) 1612 fmerr("Failed to de-register FM from ST %d\n", ret); 1613 else 1614 fmdbg("Successfully unregistered from ST\n"); 1615 1616 clear_bit(FM_CORE_READY, &fmdev->flag); 1617 return ret; 1618 } 1619 1620 /* 1621 * Module init function. Ask FM V4L module to register video device. 1622 * Allocate memory for FM driver context and RX RDS buffer. 1623 */ 1624 static int __init fm_drv_init(void) 1625 { 1626 struct fmdev *fmdev = NULL; 1627 int ret = -ENOMEM; 1628 1629 fmdbg("FM driver version %s\n", FM_DRV_VERSION); 1630 1631 fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL); 1632 if (NULL == fmdev) { 1633 fmerr("Can't allocate operation structure memory\n"); 1634 return ret; 1635 } 1636 fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE; 1637 fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL); 1638 if (NULL == fmdev->rx.rds.buff) { 1639 fmerr("Can't allocate rds ring buffer\n"); 1640 goto rel_dev; 1641 } 1642 1643 ret = fm_v4l2_init_video_device(fmdev, radio_nr); 1644 if (ret < 0) 1645 goto rel_rdsbuf; 1646 1647 fmdev->irq_info.handlers = int_handler_table; 1648 fmdev->curr_fmmode = FM_MODE_OFF; 1649 fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF; 1650 fmdev->tx_data.preemph = FM_TX_PREEMPH_50US; 1651 return ret; 1652 1653 rel_rdsbuf: 1654 kfree(fmdev->rx.rds.buff); 1655 rel_dev: 1656 kfree(fmdev); 1657 1658 return ret; 1659 } 1660 1661 /* Module exit function. Ask FM V4L module to unregister video device */ 1662 static void __exit fm_drv_exit(void) 1663 { 1664 struct fmdev *fmdev = NULL; 1665 1666 fmdev = fm_v4l2_deinit_video_device(); 1667 if (fmdev != NULL) { 1668 kfree(fmdev->rx.rds.buff); 1669 kfree(fmdev); 1670 } 1671 } 1672 1673 module_init(fm_drv_init); 1674 module_exit(fm_drv_exit); 1675 1676 /* ------------- Module Info ------------- */ 1677 MODULE_AUTHOR("Manjunatha Halli <manjunatha_halli@ti.com>"); 1678 MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION); 1679 MODULE_VERSION(FM_DRV_VERSION); 1680 MODULE_LICENSE("GPL"); 1681