1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Digigram miXart soundcards 4 * 5 * low level interface with interrupt handling and mail box implementation 6 * 7 * Copyright (c) 2003 by Digigram <alsa@digigram.com> 8 */ 9 10 #include <linux/interrupt.h> 11 #include <linux/mutex.h> 12 #include <linux/pci.h> 13 #include <linux/io.h> 14 15 #include <sound/core.h> 16 #include "mixart.h" 17 #include "mixart_hwdep.h" 18 #include "mixart_core.h" 19 20 21 #define MSG_TIMEOUT_JIFFIES (400 * HZ) / 1000 /* 400 ms */ 22 23 #define MSG_DESCRIPTOR_SIZE 0x24 24 #define MSG_HEADER_SIZE (MSG_DESCRIPTOR_SIZE + 4) 25 26 #define MSG_DEFAULT_SIZE 512 27 28 #define MSG_TYPE_MASK 0x00000003 /* mask for following types */ 29 #define MSG_TYPE_NOTIFY 0 /* embedded -> driver (only notification, do not get_msg() !) */ 30 #define MSG_TYPE_COMMAND 1 /* driver <-> embedded (a command has no answer) */ 31 #define MSG_TYPE_REQUEST 2 /* driver -> embedded (request will get an answer back) */ 32 #define MSG_TYPE_ANSWER 3 /* embedded -> driver */ 33 #define MSG_CANCEL_NOTIFY_MASK 0x80000000 /* this bit is set for a notification that has been canceled */ 34 35 36 static int retrieve_msg_frame(struct mixart_mgr *mgr, u32 *msg_frame) 37 { 38 /* read the message frame fifo */ 39 u32 headptr, tailptr; 40 41 tailptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_POST_TAIL)); 42 headptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_POST_HEAD)); 43 44 if (tailptr == headptr) 45 return 0; /* no message posted */ 46 47 if (tailptr < MSG_OUTBOUND_POST_STACK) 48 return 0; /* error */ 49 if (tailptr >= MSG_OUTBOUND_POST_STACK + MSG_BOUND_STACK_SIZE) 50 return 0; /* error */ 51 52 *msg_frame = readl_be(MIXART_MEM(mgr, tailptr)); 53 54 /* increment the tail index */ 55 tailptr += 4; 56 if( tailptr >= (MSG_OUTBOUND_POST_STACK+MSG_BOUND_STACK_SIZE) ) 57 tailptr = MSG_OUTBOUND_POST_STACK; 58 writel_be(tailptr, MIXART_MEM(mgr, MSG_OUTBOUND_POST_TAIL)); 59 60 return 1; 61 } 62 63 static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp, 64 u32 msg_frame_address ) 65 { 66 u32 headptr; 67 u32 size; 68 int err; 69 #ifndef __BIG_ENDIAN 70 unsigned int i; 71 #endif 72 73 err = 0; 74 75 /* copy message descriptor from miXart to driver */ 76 size = readl_be(MIXART_MEM(mgr, msg_frame_address)); /* size of descriptor + response */ 77 resp->message_id = readl_be(MIXART_MEM(mgr, msg_frame_address + 4)); /* dwMessageID */ 78 resp->uid.object_id = readl_be(MIXART_MEM(mgr, msg_frame_address + 8)); /* uidDest */ 79 resp->uid.desc = readl_be(MIXART_MEM(mgr, msg_frame_address + 12)); /* */ 80 81 if( (size < MSG_DESCRIPTOR_SIZE) || (resp->size < (size - MSG_DESCRIPTOR_SIZE))) { 82 err = -EINVAL; 83 dev_err(&mgr->pci->dev, 84 "problem with response size = %d\n", size); 85 goto _clean_exit; 86 } 87 size -= MSG_DESCRIPTOR_SIZE; 88 89 memcpy_fromio(resp->data, MIXART_MEM(mgr, msg_frame_address + MSG_HEADER_SIZE ), size); 90 resp->size = size; 91 92 /* swap if necessary */ 93 #ifndef __BIG_ENDIAN 94 size /= 4; /* u32 size */ 95 for(i=0; i < size; i++) { 96 ((u32*)resp->data)[i] = be32_to_cpu(((__be32*)resp->data)[i]); 97 } 98 #endif 99 100 /* 101 * free message frame address 102 */ 103 headptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD)); 104 105 if( (headptr < MSG_OUTBOUND_FREE_STACK) || ( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE))) { 106 err = -EINVAL; 107 goto _clean_exit; 108 } 109 110 /* give address back to outbound fifo */ 111 writel_be(msg_frame_address, MIXART_MEM(mgr, headptr)); 112 113 /* increment the outbound free head */ 114 headptr += 4; 115 if( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE) ) 116 headptr = MSG_OUTBOUND_FREE_STACK; 117 118 writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD)); 119 120 _clean_exit: 121 return err; 122 } 123 124 125 /* 126 * send a message to miXart. return: the msg_frame used for this message 127 */ 128 /* call with mgr->msg_lock held! */ 129 static int send_msg( struct mixart_mgr *mgr, 130 struct mixart_msg *msg, 131 int max_answersize, 132 int mark_pending, 133 u32 *msg_event) 134 { 135 u32 headptr, tailptr; 136 u32 msg_frame_address; 137 int i; 138 139 if (snd_BUG_ON(msg->size % 4)) 140 return -EINVAL; 141 142 /* get message frame address */ 143 tailptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_FREE_TAIL)); 144 headptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_FREE_HEAD)); 145 146 if (tailptr == headptr) { 147 dev_err(&mgr->pci->dev, "error: no message frame available\n"); 148 return -EBUSY; 149 } 150 151 if( (tailptr < MSG_INBOUND_FREE_STACK) || (tailptr >= (MSG_INBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE))) { 152 return -EINVAL; 153 } 154 155 msg_frame_address = readl_be(MIXART_MEM(mgr, tailptr)); 156 writel(0, MIXART_MEM(mgr, tailptr)); /* set address to zero on this fifo position */ 157 158 /* increment the inbound free tail */ 159 tailptr += 4; 160 if( tailptr >= (MSG_INBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE) ) 161 tailptr = MSG_INBOUND_FREE_STACK; 162 163 writel_be(tailptr, MIXART_MEM(mgr, MSG_INBOUND_FREE_TAIL)); 164 165 /* TODO : use memcpy_toio() with intermediate buffer to copy the message */ 166 167 /* copy message descriptor to card memory */ 168 writel_be( msg->size + MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address) ); /* size of descriptor + request */ 169 writel_be( msg->message_id , MIXART_MEM(mgr, msg_frame_address + 4) ); /* dwMessageID */ 170 writel_be( msg->uid.object_id, MIXART_MEM(mgr, msg_frame_address + 8) ); /* uidDest */ 171 writel_be( msg->uid.desc, MIXART_MEM(mgr, msg_frame_address + 12) ); /* */ 172 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 16) ); /* SizeHeader */ 173 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 20) ); /* OffsetDLL_T16 */ 174 writel_be( msg->size, MIXART_MEM(mgr, msg_frame_address + 24) ); /* SizeDLL_T16 */ 175 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 28) ); /* OffsetDLL_DRV */ 176 writel_be( 0, MIXART_MEM(mgr, msg_frame_address + 32) ); /* SizeDLL_DRV */ 177 writel_be( MSG_DESCRIPTOR_SIZE + max_answersize, MIXART_MEM(mgr, msg_frame_address + 36) ); /* dwExpectedAnswerSize */ 178 179 /* copy message data to card memory */ 180 for( i=0; i < msg->size; i+=4 ) { 181 writel_be( *(u32*)(msg->data + i), MIXART_MEM(mgr, MSG_HEADER_SIZE + msg_frame_address + i) ); 182 } 183 184 if( mark_pending ) { 185 if( *msg_event ) { 186 /* the pending event is the notification we wait for ! */ 187 mgr->pending_event = *msg_event; 188 } 189 else { 190 /* the pending event is the answer we wait for (same address than the request)! */ 191 mgr->pending_event = msg_frame_address; 192 193 /* copy address back to caller */ 194 *msg_event = msg_frame_address; 195 } 196 } 197 198 /* mark the frame as a request (will have an answer) */ 199 msg_frame_address |= MSG_TYPE_REQUEST; 200 201 /* post the frame */ 202 headptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_POST_HEAD)); 203 204 if( (headptr < MSG_INBOUND_POST_STACK) || (headptr >= (MSG_INBOUND_POST_STACK+MSG_BOUND_STACK_SIZE))) { 205 return -EINVAL; 206 } 207 208 writel_be(msg_frame_address, MIXART_MEM(mgr, headptr)); 209 210 /* increment the inbound post head */ 211 headptr += 4; 212 if( headptr >= (MSG_INBOUND_POST_STACK+MSG_BOUND_STACK_SIZE) ) 213 headptr = MSG_INBOUND_POST_STACK; 214 215 writel_be(headptr, MIXART_MEM(mgr, MSG_INBOUND_POST_HEAD)); 216 217 return 0; 218 } 219 220 221 int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int max_resp_size, void *resp_data) 222 { 223 struct mixart_msg resp; 224 u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */ 225 int err; 226 wait_queue_entry_t wait; 227 long timeout; 228 229 init_waitqueue_entry(&wait, current); 230 231 mutex_lock(&mgr->msg_lock); 232 /* send the message */ 233 err = send_msg(mgr, request, max_resp_size, 1, &msg_frame); /* send and mark the answer pending */ 234 if (err) { 235 mutex_unlock(&mgr->msg_lock); 236 return err; 237 } 238 239 set_current_state(TASK_UNINTERRUPTIBLE); 240 add_wait_queue(&mgr->msg_sleep, &wait); 241 mutex_unlock(&mgr->msg_lock); 242 timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES); 243 remove_wait_queue(&mgr->msg_sleep, &wait); 244 245 if (! timeout) { 246 /* error - no ack */ 247 dev_err(&mgr->pci->dev, 248 "error: no response on msg %x\n", msg_frame); 249 return -EIO; 250 } 251 252 /* retrieve the answer into the same struct mixart_msg */ 253 resp.message_id = 0; 254 resp.uid = (struct mixart_uid){0,0}; 255 resp.data = resp_data; 256 resp.size = max_resp_size; 257 258 mutex_lock(&mgr->msg_lock); 259 err = get_msg(mgr, &resp, msg_frame); 260 mutex_unlock(&mgr->msg_lock); 261 262 if( request->message_id != resp.message_id ) 263 dev_err(&mgr->pci->dev, "RESPONSE ERROR!\n"); 264 265 return err; 266 } 267 268 269 int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr, 270 struct mixart_msg *request, u32 notif_event) 271 { 272 int err; 273 wait_queue_entry_t wait; 274 long timeout; 275 276 if (snd_BUG_ON(!notif_event)) 277 return -EINVAL; 278 if (snd_BUG_ON((notif_event & MSG_TYPE_MASK) != MSG_TYPE_NOTIFY)) 279 return -EINVAL; 280 if (snd_BUG_ON(notif_event & MSG_CANCEL_NOTIFY_MASK)) 281 return -EINVAL; 282 283 init_waitqueue_entry(&wait, current); 284 285 mutex_lock(&mgr->msg_lock); 286 /* send the message */ 287 err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 1, ¬if_event); /* send and mark the notification event pending */ 288 if(err) { 289 mutex_unlock(&mgr->msg_lock); 290 return err; 291 } 292 293 set_current_state(TASK_UNINTERRUPTIBLE); 294 add_wait_queue(&mgr->msg_sleep, &wait); 295 mutex_unlock(&mgr->msg_lock); 296 timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES); 297 remove_wait_queue(&mgr->msg_sleep, &wait); 298 299 if (! timeout) { 300 /* error - no ack */ 301 dev_err(&mgr->pci->dev, 302 "error: notification %x not received\n", notif_event); 303 return -EIO; 304 } 305 306 return 0; 307 } 308 309 310 int snd_mixart_send_msg_nonblock(struct mixart_mgr *mgr, struct mixart_msg *request) 311 { 312 u32 message_frame; 313 int err; 314 315 /* just send the message (do not mark it as a pending one) */ 316 mutex_lock(&mgr->msg_lock); 317 err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 0, &message_frame); 318 mutex_unlock(&mgr->msg_lock); 319 320 /* the answer will be handled by snd_struct mixart_msgasklet() */ 321 atomic_inc(&mgr->msg_processed); 322 323 return err; 324 } 325 326 327 /* common buffer of interrupt to send/receive messages */ 328 static u32 mixart_msg_data[MSG_DEFAULT_SIZE / 4]; 329 330 331 static void snd_mixart_process_msg(struct mixart_mgr *mgr) 332 { 333 struct mixart_msg resp; 334 u32 msg, addr, type; 335 int err; 336 337 while (mgr->msg_fifo_readptr != mgr->msg_fifo_writeptr) { 338 msg = mgr->msg_fifo[mgr->msg_fifo_readptr]; 339 mgr->msg_fifo_readptr++; 340 mgr->msg_fifo_readptr %= MSG_FIFO_SIZE; 341 342 /* process the message ... */ 343 addr = msg & ~MSG_TYPE_MASK; 344 type = msg & MSG_TYPE_MASK; 345 346 switch (type) { 347 case MSG_TYPE_ANSWER: 348 /* answer to a message on that we did not wait for (send_msg_nonblock) */ 349 resp.message_id = 0; 350 resp.data = mixart_msg_data; 351 resp.size = sizeof(mixart_msg_data); 352 err = get_msg(mgr, &resp, addr); 353 if( err < 0 ) { 354 dev_err(&mgr->pci->dev, 355 "error(%d) reading mf %x\n", 356 err, msg); 357 break; 358 } 359 360 switch(resp.message_id) { 361 case MSG_STREAM_START_INPUT_STAGE_PACKET: 362 case MSG_STREAM_START_OUTPUT_STAGE_PACKET: 363 case MSG_STREAM_STOP_INPUT_STAGE_PACKET: 364 case MSG_STREAM_STOP_OUTPUT_STAGE_PACKET: 365 if(mixart_msg_data[0]) 366 dev_err(&mgr->pci->dev, 367 "error MSG_STREAM_ST***_***PUT_STAGE_PACKET status=%x\n", 368 mixart_msg_data[0]); 369 break; 370 default: 371 dev_dbg(&mgr->pci->dev, 372 "received mf(%x) : msg_id(%x) uid(%x, %x) size(%zd)\n", 373 msg, resp.message_id, resp.uid.object_id, resp.uid.desc, resp.size); 374 break; 375 } 376 break; 377 case MSG_TYPE_NOTIFY: 378 /* msg contains no address ! do not get_msg() ! */ 379 case MSG_TYPE_COMMAND: 380 /* get_msg() necessary */ 381 default: 382 dev_err(&mgr->pci->dev, 383 "doesn't know what to do with message %x\n", 384 msg); 385 } /* switch type */ 386 387 /* decrement counter */ 388 atomic_dec(&mgr->msg_processed); 389 390 } /* while there is a msg in fifo */ 391 } 392 393 394 irqreturn_t snd_mixart_interrupt(int irq, void *dev_id) 395 { 396 struct mixart_mgr *mgr = dev_id; 397 u32 it_reg; 398 399 it_reg = readl_le(MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET)); 400 if( !(it_reg & MIXART_OIDI) ) { 401 /* this device did not cause the interrupt */ 402 return IRQ_NONE; 403 } 404 405 /* mask all interrupts */ 406 writel_le(MIXART_HOST_ALL_INTERRUPT_MASKED, MIXART_REG(mgr, MIXART_PCI_OMIMR_OFFSET)); 407 408 /* outdoorbell register clear */ 409 it_reg = readl(MIXART_REG(mgr, MIXART_PCI_ODBR_OFFSET)); 410 writel(it_reg, MIXART_REG(mgr, MIXART_PCI_ODBR_OFFSET)); 411 412 /* clear interrupt */ 413 writel_le( MIXART_OIDI, MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET) ); 414 415 return IRQ_WAKE_THREAD; 416 } 417 418 irqreturn_t snd_mixart_threaded_irq(int irq, void *dev_id) 419 { 420 struct mixart_mgr *mgr = dev_id; 421 int err; 422 struct mixart_msg resp; 423 u32 msg; 424 425 mutex_lock(&mgr->lock); 426 /* process interrupt */ 427 while (retrieve_msg_frame(mgr, &msg)) { 428 429 switch (msg & MSG_TYPE_MASK) { 430 case MSG_TYPE_COMMAND: 431 resp.message_id = 0; 432 resp.data = mixart_msg_data; 433 resp.size = sizeof(mixart_msg_data); 434 err = get_msg(mgr, &resp, msg & ~MSG_TYPE_MASK); 435 if( err < 0 ) { 436 dev_err(&mgr->pci->dev, 437 "interrupt: error(%d) reading mf %x\n", 438 err, msg); 439 break; 440 } 441 442 if(resp.message_id == MSG_SERVICES_TIMER_NOTIFY) { 443 int i; 444 struct mixart_timer_notify *notify; 445 notify = (struct mixart_timer_notify *)mixart_msg_data; 446 447 for(i=0; i<notify->stream_count; i++) { 448 449 u32 buffer_id = notify->streams[i].buffer_id; 450 unsigned int chip_number = (buffer_id & MIXART_NOTIFY_CARD_MASK) >> MIXART_NOTIFY_CARD_OFFSET; /* card0 to 3 */ 451 unsigned int pcm_number = (buffer_id & MIXART_NOTIFY_PCM_MASK ) >> MIXART_NOTIFY_PCM_OFFSET; /* pcm0 to 3 */ 452 unsigned int sub_number = buffer_id & MIXART_NOTIFY_SUBS_MASK; /* 0 to MIXART_PLAYBACK_STREAMS */ 453 unsigned int is_capture = ((buffer_id & MIXART_NOTIFY_CAPT_MASK) != 0); /* playback == 0 / capture == 1 */ 454 455 struct snd_mixart *chip = mgr->chip[chip_number]; 456 struct mixart_stream *stream; 457 458 if ((chip_number >= mgr->num_cards) || (pcm_number >= MIXART_PCM_TOTAL) || (sub_number >= MIXART_PLAYBACK_STREAMS)) { 459 dev_err(&mgr->pci->dev, 460 "error MSG_SERVICES_TIMER_NOTIFY buffer_id (%x) pos(%d)\n", 461 buffer_id, notify->streams[i].sample_pos_low_part); 462 break; 463 } 464 465 if (is_capture) 466 stream = &chip->capture_stream[pcm_number]; 467 else 468 stream = &chip->playback_stream[pcm_number][sub_number]; 469 470 if (stream->substream && (stream->status == MIXART_STREAM_STATUS_RUNNING)) { 471 struct snd_pcm_runtime *runtime = stream->substream->runtime; 472 int elapsed = 0; 473 u64 sample_count = ((u64)notify->streams[i].sample_pos_high_part) << 32; 474 sample_count |= notify->streams[i].sample_pos_low_part; 475 476 while (1) { 477 u64 new_elapse_pos = stream->abs_period_elapsed + runtime->period_size; 478 479 if (new_elapse_pos > sample_count) { 480 break; /* while */ 481 } 482 else { 483 elapsed = 1; 484 stream->buf_periods++; 485 if (stream->buf_periods >= runtime->periods) 486 stream->buf_periods = 0; 487 488 stream->abs_period_elapsed = new_elapse_pos; 489 } 490 } 491 stream->buf_period_frag = (u32)( sample_count - stream->abs_period_elapsed ); 492 493 if(elapsed) { 494 mutex_unlock(&mgr->lock); 495 snd_pcm_period_elapsed(stream->substream); 496 mutex_lock(&mgr->lock); 497 } 498 } 499 } 500 break; 501 } 502 if(resp.message_id == MSG_SERVICES_REPORT_TRACES) { 503 if(resp.size > 1) { 504 #ifndef __BIG_ENDIAN 505 /* Traces are text: the swapped msg_data has to be swapped back ! */ 506 int i; 507 for(i=0; i<(resp.size/4); i++) { 508 ((__be32*)mixart_msg_data)[i] = cpu_to_be32((mixart_msg_data)[i]); 509 } 510 #endif 511 ((char*)mixart_msg_data)[resp.size - 1] = 0; 512 dev_dbg(&mgr->pci->dev, 513 "MIXART TRACE : %s\n", 514 (char *)mixart_msg_data); 515 } 516 break; 517 } 518 519 dev_dbg(&mgr->pci->dev, "command %x not handled\n", 520 resp.message_id); 521 break; 522 523 case MSG_TYPE_NOTIFY: 524 if(msg & MSG_CANCEL_NOTIFY_MASK) { 525 msg &= ~MSG_CANCEL_NOTIFY_MASK; 526 dev_err(&mgr->pci->dev, 527 "canceled notification %x !\n", msg); 528 } 529 fallthrough; 530 case MSG_TYPE_ANSWER: 531 /* answer or notification to a message we are waiting for*/ 532 mutex_lock(&mgr->msg_lock); 533 if( (msg & ~MSG_TYPE_MASK) == mgr->pending_event ) { 534 wake_up(&mgr->msg_sleep); 535 mgr->pending_event = 0; 536 } 537 /* answer to a message we did't want to wait for */ 538 else { 539 mgr->msg_fifo[mgr->msg_fifo_writeptr] = msg; 540 mgr->msg_fifo_writeptr++; 541 mgr->msg_fifo_writeptr %= MSG_FIFO_SIZE; 542 snd_mixart_process_msg(mgr); 543 } 544 mutex_unlock(&mgr->msg_lock); 545 break; 546 case MSG_TYPE_REQUEST: 547 default: 548 dev_dbg(&mgr->pci->dev, 549 "interrupt received request %x\n", msg); 550 /* TODO : are there things to do here ? */ 551 break; 552 } /* switch on msg type */ 553 } /* while there are msgs */ 554 555 /* allow interrupt again */ 556 writel_le( MIXART_ALLOW_OUTBOUND_DOORBELL, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET)); 557 558 mutex_unlock(&mgr->lock); 559 560 return IRQ_HANDLED; 561 } 562 563 564 void snd_mixart_init_mailbox(struct mixart_mgr *mgr) 565 { 566 writel( 0, MIXART_MEM( mgr, MSG_HOST_RSC_PROTECTION ) ); 567 writel( 0, MIXART_MEM( mgr, MSG_AGENT_RSC_PROTECTION ) ); 568 569 /* allow outbound messagebox to generate interrupts */ 570 if(mgr->irq >= 0) { 571 writel_le( MIXART_ALLOW_OUTBOUND_DOORBELL, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET)); 572 } 573 return; 574 } 575 576 void snd_mixart_exit_mailbox(struct mixart_mgr *mgr) 577 { 578 /* no more interrupts on outbound messagebox */ 579 writel_le( MIXART_HOST_ALL_INTERRUPT_MASKED, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET)); 580 return; 581 } 582 583 void snd_mixart_reset_board(struct mixart_mgr *mgr) 584 { 585 /* reset miXart */ 586 writel_be( 1, MIXART_REG(mgr, MIXART_BA1_BRUTAL_RESET_OFFSET) ); 587 return; 588 } 589