1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Digigram miXart soundcards 4 * 5 * low level interface with interrupt handling and mail box implementation 6 * 7 * Copyright (c) 2003 by Digigram <alsa@digigram.com> 8 */ 9 10 #include <linux/interrupt.h> 11 #include <linux/mutex.h> 12 #include <linux/pci.h> 13 #include <linux/io.h> 14 15 #include <sound/core.h> 16 #include "mixart.h" 17 #include "mixart_hwdep.h" 18 #include "mixart_core.h" 19 20 21 #define MSG_TIMEOUT_JIFFIES (400 * HZ) / 1000 /* 400 ms */ 22 23 #define MSG_DESCRIPTOR_SIZE 0x24 24 #define MSG_HEADER_SIZE (MSG_DESCRIPTOR_SIZE + 4) 25 26 #define MSG_DEFAULT_SIZE 512 27 28 #define MSG_TYPE_MASK 0x00000003 /* mask for following types */ 29 #define MSG_TYPE_NOTIFY 0 /* embedded -> driver (only notification, do not get_msg() !) */ 30 #define MSG_TYPE_COMMAND 1 /* driver <-> embedded (a command has no answer) */ 31 #define MSG_TYPE_REQUEST 2 /* driver -> embedded (request will get an answer back) */ 32 #define MSG_TYPE_ANSWER 3 /* embedded -> driver */ 33 #define MSG_CANCEL_NOTIFY_MASK 0x80000000 /* this bit is set for a notification that has been canceled */ 34 35 36 static int retrieve_msg_frame(struct mixart_mgr *mgr, u32 *msg_frame) 37 { 38 /* read the message frame fifo */ 39 u32 headptr, tailptr; 40 41 tailptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_POST_TAIL)); 42 headptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_POST_HEAD)); 43 44 if (tailptr == headptr) 45 return 0; /* no message posted */ 46 47 if (tailptr < MSG_OUTBOUND_POST_STACK) 48 return 0; /* error */ 49 if (tailptr >= MSG_OUTBOUND_POST_STACK + MSG_BOUND_STACK_SIZE) 50 return 0; /* error */ 51 52 *msg_frame = readl_be(MIXART_MEM(mgr, tailptr)); 53 54 /* increment the tail index */ 55 tailptr += 4; 56 if( tailptr >= (MSG_OUTBOUND_POST_STACK+MSG_BOUND_STACK_SIZE) ) 57 tailptr = MSG_OUTBOUND_POST_STACK; 58 writel_be(tailptr, MIXART_MEM(mgr, MSG_OUTBOUND_POST_TAIL)); 59 60 return 1; 61 } 62 63 static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp, 64 u32 msg_frame_address ) 65 { 66 u32 headptr; 67 u32 size; 68 int err; 69 #ifndef __BIG_ENDIAN 70 unsigned int i; 71 #endif 72 73 mutex_lock(&mgr->msg_lock); 74 err = 0; 75 76 /* copy message descriptor from miXart to driver */ 77 size = readl_be(MIXART_MEM(mgr, msg_frame_address)); /* size of descriptor + response */ 78 resp->message_id = readl_be(MIXART_MEM(mgr, msg_frame_address + 4)); /* dwMessageID */ 79 resp->uid.object_id = readl_be(MIXART_MEM(mgr, msg_frame_address + 8)); /* uidDest */ 80 resp->uid.desc = readl_be(MIXART_MEM(mgr, msg_frame_address + 12)); /* */ 81 82 if( (size < MSG_DESCRIPTOR_SIZE) || (resp->size < (size - MSG_DESCRIPTOR_SIZE))) { 83 err = -EINVAL; 84 dev_err(&mgr->pci->dev, 85 "problem with response size = %d\n", size); 86 goto _clean_exit; 87 } 88 size -= MSG_DESCRIPTOR_SIZE; 89 90 memcpy_fromio(resp->data, MIXART_MEM(mgr, msg_frame_address + MSG_HEADER_SIZE ), size); 91 resp->size = size; 92 93 /* swap if necessary */ 94 #ifndef __BIG_ENDIAN 95 size /= 4; /* u32 size */ 96 for(i=0; i < size; i++) { 97 ((u32*)resp->data)[i] = be32_to_cpu(((__be32*)resp->data)[i]); 98 } 99 #endif 100 101 /* 102 * free message frame address 103 */ 104 headptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD)); 105 106 if( (headptr < MSG_OUTBOUND_FREE_STACK) || ( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE))) { 107 err = -EINVAL; 108 goto _clean_exit; 109 } 110 111 /* give address back to outbound fifo */ 112 writel_be(msg_frame_address, MIXART_MEM(mgr, headptr)); 113 114 /* increment the outbound free head */ 115 headptr += 4; 116 if( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE) ) 117 headptr = MSG_OUTBOUND_FREE_STACK; 118 119 writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD)); 120 121 _clean_exit: 122 mutex_unlock(&mgr->msg_lock); 123 124 return err; 125 } 126 127 128 /* 129 * send a message to miXart. return: the msg_frame used for this message 130 */ 131 /* call with mgr->msg_lock held! */ 132 static int send_msg( struct mixart_mgr *mgr, 133 struct mixart_msg *msg, 134 int max_answersize, 135 int mark_pending, 136 u32 *msg_event) 137 { 138 u32 headptr, tailptr; 139 u32 msg_frame_address; 140 int i; 141 142 if (snd_BUG_ON(msg->size % 4)) 143 return -EINVAL; 144 145 /* get message frame address */ 146 tailptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_FREE_TAIL)); 147 headptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_FREE_HEAD)); 148 149 if (tailptr == headptr) { 150 dev_err(&mgr->pci->dev, "error: no message frame available\n"); 151 return -EBUSY; 152 } 153 154 if( (tailptr < MSG_INBOUND_FREE_STACK) || (tailptr >= (MSG_INBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE))) { 155 return -EINVAL; 156 } 157 158 msg_frame_address = readl_be(MIXART_MEM(mgr, tailptr)); 159 writel(0, MIXART_MEM(mgr, tailptr)); /* set address to zero on this fifo position */ 160 161 /* increment the inbound free tail */ 162 tailptr += 4; 163 if( tailptr >= (MSG_INBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE) ) 164 tailptr = MSG_INBOUND_FREE_STACK; 165 166 writel_be(tailptr, MIXART_MEM(mgr, MSG_INBOUND_FREE_TAIL)); 167 168 /* TODO : use memcpy_toio() with intermediate buffer to copy the message */ 169 170 /* copy message descriptor to card memory */ 171 writel_be( msg->size + MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address) ); /* size of descriptor + request */ 172 writel_be( msg->message_id , MIXART_MEM(mgr, msg_frame_address + 4) ); /* dwMessageID */ 173 writel_be( msg->uid.object_id, MIXART_MEM(mgr, msg_frame_address + 8) ); /* uidDest */ 174 writel_be( msg->uid.desc, MIXART_MEM(mgr, msg_frame_address + 12) ); /* */ 175 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 16) ); /* SizeHeader */ 176 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 20) ); /* OffsetDLL_T16 */ 177 writel_be( msg->size, MIXART_MEM(mgr, msg_frame_address + 24) ); /* SizeDLL_T16 */ 178 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 28) ); /* OffsetDLL_DRV */ 179 writel_be( 0, MIXART_MEM(mgr, msg_frame_address + 32) ); /* SizeDLL_DRV */ 180 writel_be( MSG_DESCRIPTOR_SIZE + max_answersize, MIXART_MEM(mgr, msg_frame_address + 36) ); /* dwExpectedAnswerSize */ 181 182 /* copy message data to card memory */ 183 for( i=0; i < msg->size; i+=4 ) { 184 writel_be( *(u32*)(msg->data + i), MIXART_MEM(mgr, MSG_HEADER_SIZE + msg_frame_address + i) ); 185 } 186 187 if( mark_pending ) { 188 if( *msg_event ) { 189 /* the pending event is the notification we wait for ! */ 190 mgr->pending_event = *msg_event; 191 } 192 else { 193 /* the pending event is the answer we wait for (same address than the request)! */ 194 mgr->pending_event = msg_frame_address; 195 196 /* copy address back to caller */ 197 *msg_event = msg_frame_address; 198 } 199 } 200 201 /* mark the frame as a request (will have an answer) */ 202 msg_frame_address |= MSG_TYPE_REQUEST; 203 204 /* post the frame */ 205 headptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_POST_HEAD)); 206 207 if( (headptr < MSG_INBOUND_POST_STACK) || (headptr >= (MSG_INBOUND_POST_STACK+MSG_BOUND_STACK_SIZE))) { 208 return -EINVAL; 209 } 210 211 writel_be(msg_frame_address, MIXART_MEM(mgr, headptr)); 212 213 /* increment the inbound post head */ 214 headptr += 4; 215 if( headptr >= (MSG_INBOUND_POST_STACK+MSG_BOUND_STACK_SIZE) ) 216 headptr = MSG_INBOUND_POST_STACK; 217 218 writel_be(headptr, MIXART_MEM(mgr, MSG_INBOUND_POST_HEAD)); 219 220 return 0; 221 } 222 223 224 int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int max_resp_size, void *resp_data) 225 { 226 struct mixart_msg resp; 227 u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */ 228 int err; 229 wait_queue_entry_t wait; 230 long timeout; 231 232 init_waitqueue_entry(&wait, current); 233 234 mutex_lock(&mgr->msg_lock); 235 /* send the message */ 236 err = send_msg(mgr, request, max_resp_size, 1, &msg_frame); /* send and mark the answer pending */ 237 if (err) { 238 mutex_unlock(&mgr->msg_lock); 239 return err; 240 } 241 242 set_current_state(TASK_UNINTERRUPTIBLE); 243 add_wait_queue(&mgr->msg_sleep, &wait); 244 mutex_unlock(&mgr->msg_lock); 245 timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES); 246 remove_wait_queue(&mgr->msg_sleep, &wait); 247 248 if (! timeout) { 249 /* error - no ack */ 250 dev_err(&mgr->pci->dev, 251 "error: no response on msg %x\n", msg_frame); 252 return -EIO; 253 } 254 255 /* retrieve the answer into the same struct mixart_msg */ 256 resp.message_id = 0; 257 resp.uid = (struct mixart_uid){0,0}; 258 resp.data = resp_data; 259 resp.size = max_resp_size; 260 261 err = get_msg(mgr, &resp, msg_frame); 262 263 if( request->message_id != resp.message_id ) 264 dev_err(&mgr->pci->dev, "RESPONSE ERROR!\n"); 265 266 return err; 267 } 268 269 270 int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr, 271 struct mixart_msg *request, u32 notif_event) 272 { 273 int err; 274 wait_queue_entry_t wait; 275 long timeout; 276 277 if (snd_BUG_ON(!notif_event)) 278 return -EINVAL; 279 if (snd_BUG_ON((notif_event & MSG_TYPE_MASK) != MSG_TYPE_NOTIFY)) 280 return -EINVAL; 281 if (snd_BUG_ON(notif_event & MSG_CANCEL_NOTIFY_MASK)) 282 return -EINVAL; 283 284 init_waitqueue_entry(&wait, current); 285 286 mutex_lock(&mgr->msg_lock); 287 /* send the message */ 288 err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 1, ¬if_event); /* send and mark the notification event pending */ 289 if(err) { 290 mutex_unlock(&mgr->msg_lock); 291 return err; 292 } 293 294 set_current_state(TASK_UNINTERRUPTIBLE); 295 add_wait_queue(&mgr->msg_sleep, &wait); 296 mutex_unlock(&mgr->msg_lock); 297 timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES); 298 remove_wait_queue(&mgr->msg_sleep, &wait); 299 300 if (! timeout) { 301 /* error - no ack */ 302 dev_err(&mgr->pci->dev, 303 "error: notification %x not received\n", notif_event); 304 return -EIO; 305 } 306 307 return 0; 308 } 309 310 311 int snd_mixart_send_msg_nonblock(struct mixart_mgr *mgr, struct mixart_msg *request) 312 { 313 u32 message_frame; 314 int err; 315 316 /* just send the message (do not mark it as a pending one) */ 317 mutex_lock(&mgr->msg_lock); 318 err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 0, &message_frame); 319 mutex_unlock(&mgr->msg_lock); 320 321 /* the answer will be handled by snd_struct mixart_msgasklet() */ 322 atomic_inc(&mgr->msg_processed); 323 324 return err; 325 } 326 327 328 /* common buffer of interrupt to send/receive messages */ 329 static u32 mixart_msg_data[MSG_DEFAULT_SIZE / 4]; 330 331 332 static void snd_mixart_process_msg(struct mixart_mgr *mgr) 333 { 334 struct mixart_msg resp; 335 u32 msg, addr, type; 336 int err; 337 338 while (mgr->msg_fifo_readptr != mgr->msg_fifo_writeptr) { 339 msg = mgr->msg_fifo[mgr->msg_fifo_readptr]; 340 mgr->msg_fifo_readptr++; 341 mgr->msg_fifo_readptr %= MSG_FIFO_SIZE; 342 343 /* process the message ... */ 344 addr = msg & ~MSG_TYPE_MASK; 345 type = msg & MSG_TYPE_MASK; 346 347 switch (type) { 348 case MSG_TYPE_ANSWER: 349 /* answer to a message on that we did not wait for (send_msg_nonblock) */ 350 resp.message_id = 0; 351 resp.data = mixart_msg_data; 352 resp.size = sizeof(mixart_msg_data); 353 err = get_msg(mgr, &resp, addr); 354 if( err < 0 ) { 355 dev_err(&mgr->pci->dev, 356 "error(%d) reading mf %x\n", 357 err, msg); 358 break; 359 } 360 361 switch(resp.message_id) { 362 case MSG_STREAM_START_INPUT_STAGE_PACKET: 363 case MSG_STREAM_START_OUTPUT_STAGE_PACKET: 364 case MSG_STREAM_STOP_INPUT_STAGE_PACKET: 365 case MSG_STREAM_STOP_OUTPUT_STAGE_PACKET: 366 if(mixart_msg_data[0]) 367 dev_err(&mgr->pci->dev, 368 "error MSG_STREAM_ST***_***PUT_STAGE_PACKET status=%x\n", 369 mixart_msg_data[0]); 370 break; 371 default: 372 dev_dbg(&mgr->pci->dev, 373 "received mf(%x) : msg_id(%x) uid(%x, %x) size(%zd)\n", 374 msg, resp.message_id, resp.uid.object_id, resp.uid.desc, resp.size); 375 break; 376 } 377 break; 378 case MSG_TYPE_NOTIFY: 379 /* msg contains no address ! do not get_msg() ! */ 380 case MSG_TYPE_COMMAND: 381 /* get_msg() necessary */ 382 default: 383 dev_err(&mgr->pci->dev, 384 "doesn't know what to do with message %x\n", 385 msg); 386 } /* switch type */ 387 388 /* decrement counter */ 389 atomic_dec(&mgr->msg_processed); 390 391 } /* while there is a msg in fifo */ 392 } 393 394 395 irqreturn_t snd_mixart_interrupt(int irq, void *dev_id) 396 { 397 struct mixart_mgr *mgr = dev_id; 398 u32 it_reg; 399 400 it_reg = readl_le(MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET)); 401 if( !(it_reg & MIXART_OIDI) ) { 402 /* this device did not cause the interrupt */ 403 return IRQ_NONE; 404 } 405 406 /* mask all interrupts */ 407 writel_le(MIXART_HOST_ALL_INTERRUPT_MASKED, MIXART_REG(mgr, MIXART_PCI_OMIMR_OFFSET)); 408 409 /* outdoorbell register clear */ 410 it_reg = readl(MIXART_REG(mgr, MIXART_PCI_ODBR_OFFSET)); 411 writel(it_reg, MIXART_REG(mgr, MIXART_PCI_ODBR_OFFSET)); 412 413 /* clear interrupt */ 414 writel_le( MIXART_OIDI, MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET) ); 415 416 return IRQ_WAKE_THREAD; 417 } 418 419 irqreturn_t snd_mixart_threaded_irq(int irq, void *dev_id) 420 { 421 struct mixart_mgr *mgr = dev_id; 422 int err; 423 struct mixart_msg resp; 424 u32 msg; 425 426 mutex_lock(&mgr->lock); 427 /* process interrupt */ 428 while (retrieve_msg_frame(mgr, &msg)) { 429 430 switch (msg & MSG_TYPE_MASK) { 431 case MSG_TYPE_COMMAND: 432 resp.message_id = 0; 433 resp.data = mixart_msg_data; 434 resp.size = sizeof(mixart_msg_data); 435 err = get_msg(mgr, &resp, msg & ~MSG_TYPE_MASK); 436 if( err < 0 ) { 437 dev_err(&mgr->pci->dev, 438 "interrupt: error(%d) reading mf %x\n", 439 err, msg); 440 break; 441 } 442 443 if(resp.message_id == MSG_SERVICES_TIMER_NOTIFY) { 444 int i; 445 struct mixart_timer_notify *notify; 446 notify = (struct mixart_timer_notify *)mixart_msg_data; 447 448 for(i=0; i<notify->stream_count; i++) { 449 450 u32 buffer_id = notify->streams[i].buffer_id; 451 unsigned int chip_number = (buffer_id & MIXART_NOTIFY_CARD_MASK) >> MIXART_NOTIFY_CARD_OFFSET; /* card0 to 3 */ 452 unsigned int pcm_number = (buffer_id & MIXART_NOTIFY_PCM_MASK ) >> MIXART_NOTIFY_PCM_OFFSET; /* pcm0 to 3 */ 453 unsigned int sub_number = buffer_id & MIXART_NOTIFY_SUBS_MASK; /* 0 to MIXART_PLAYBACK_STREAMS */ 454 unsigned int is_capture = ((buffer_id & MIXART_NOTIFY_CAPT_MASK) != 0); /* playback == 0 / capture == 1 */ 455 456 struct snd_mixart *chip = mgr->chip[chip_number]; 457 struct mixart_stream *stream; 458 459 if ((chip_number >= mgr->num_cards) || (pcm_number >= MIXART_PCM_TOTAL) || (sub_number >= MIXART_PLAYBACK_STREAMS)) { 460 dev_err(&mgr->pci->dev, 461 "error MSG_SERVICES_TIMER_NOTIFY buffer_id (%x) pos(%d)\n", 462 buffer_id, notify->streams[i].sample_pos_low_part); 463 break; 464 } 465 466 if (is_capture) 467 stream = &chip->capture_stream[pcm_number]; 468 else 469 stream = &chip->playback_stream[pcm_number][sub_number]; 470 471 if (stream->substream && (stream->status == MIXART_STREAM_STATUS_RUNNING)) { 472 struct snd_pcm_runtime *runtime = stream->substream->runtime; 473 int elapsed = 0; 474 u64 sample_count = ((u64)notify->streams[i].sample_pos_high_part) << 32; 475 sample_count |= notify->streams[i].sample_pos_low_part; 476 477 while (1) { 478 u64 new_elapse_pos = stream->abs_period_elapsed + runtime->period_size; 479 480 if (new_elapse_pos > sample_count) { 481 break; /* while */ 482 } 483 else { 484 elapsed = 1; 485 stream->buf_periods++; 486 if (stream->buf_periods >= runtime->periods) 487 stream->buf_periods = 0; 488 489 stream->abs_period_elapsed = new_elapse_pos; 490 } 491 } 492 stream->buf_period_frag = (u32)( sample_count - stream->abs_period_elapsed ); 493 494 if(elapsed) { 495 mutex_unlock(&mgr->lock); 496 snd_pcm_period_elapsed(stream->substream); 497 mutex_lock(&mgr->lock); 498 } 499 } 500 } 501 break; 502 } 503 if(resp.message_id == MSG_SERVICES_REPORT_TRACES) { 504 if(resp.size > 1) { 505 #ifndef __BIG_ENDIAN 506 /* Traces are text: the swapped msg_data has to be swapped back ! */ 507 int i; 508 for(i=0; i<(resp.size/4); i++) { 509 ((__be32*)mixart_msg_data)[i] = cpu_to_be32((mixart_msg_data)[i]); 510 } 511 #endif 512 ((char*)mixart_msg_data)[resp.size - 1] = 0; 513 dev_dbg(&mgr->pci->dev, 514 "MIXART TRACE : %s\n", 515 (char *)mixart_msg_data); 516 } 517 break; 518 } 519 520 dev_dbg(&mgr->pci->dev, "command %x not handled\n", 521 resp.message_id); 522 break; 523 524 case MSG_TYPE_NOTIFY: 525 if(msg & MSG_CANCEL_NOTIFY_MASK) { 526 msg &= ~MSG_CANCEL_NOTIFY_MASK; 527 dev_err(&mgr->pci->dev, 528 "canceled notification %x !\n", msg); 529 } 530 /* fall through */ 531 case MSG_TYPE_ANSWER: 532 /* answer or notification to a message we are waiting for*/ 533 mutex_lock(&mgr->msg_lock); 534 if( (msg & ~MSG_TYPE_MASK) == mgr->pending_event ) { 535 wake_up(&mgr->msg_sleep); 536 mgr->pending_event = 0; 537 } 538 /* answer to a message we did't want to wait for */ 539 else { 540 mgr->msg_fifo[mgr->msg_fifo_writeptr] = msg; 541 mgr->msg_fifo_writeptr++; 542 mgr->msg_fifo_writeptr %= MSG_FIFO_SIZE; 543 snd_mixart_process_msg(mgr); 544 } 545 mutex_unlock(&mgr->msg_lock); 546 break; 547 case MSG_TYPE_REQUEST: 548 default: 549 dev_dbg(&mgr->pci->dev, 550 "interrupt received request %x\n", msg); 551 /* TODO : are there things to do here ? */ 552 break; 553 } /* switch on msg type */ 554 } /* while there are msgs */ 555 556 /* allow interrupt again */ 557 writel_le( MIXART_ALLOW_OUTBOUND_DOORBELL, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET)); 558 559 mutex_unlock(&mgr->lock); 560 561 return IRQ_HANDLED; 562 } 563 564 565 void snd_mixart_init_mailbox(struct mixart_mgr *mgr) 566 { 567 writel( 0, MIXART_MEM( mgr, MSG_HOST_RSC_PROTECTION ) ); 568 writel( 0, MIXART_MEM( mgr, MSG_AGENT_RSC_PROTECTION ) ); 569 570 /* allow outbound messagebox to generate interrupts */ 571 if(mgr->irq >= 0) { 572 writel_le( MIXART_ALLOW_OUTBOUND_DOORBELL, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET)); 573 } 574 return; 575 } 576 577 void snd_mixart_exit_mailbox(struct mixart_mgr *mgr) 578 { 579 /* no more interrupts on outbound messagebox */ 580 writel_le( MIXART_HOST_ALL_INTERRUPT_MASKED, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET)); 581 return; 582 } 583 584 void snd_mixart_reset_board(struct mixart_mgr *mgr) 585 { 586 /* reset miXart */ 587 writel_be( 1, MIXART_REG(mgr, MIXART_BA1_BRUTAL_RESET_OFFSET) ); 588 return; 589 } 590