xref: /openbmc/qemu/hw/ufs/ufs.c (revision 4f7b1ecb)
1 /*
2  * QEMU Universal Flash Storage (UFS) Controller
3  *
4  * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
5  *
6  * Written by Jeuk Kim <jeuk20.kim@samsung.com>
7  *
8  * SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 /**
12  * Reference Specs: https://www.jedec.org/, 4.0
13  *
14  * Usage
15  * -----
16  *
17  * Add options:
18  *      -drive file=<file>,if=none,id=<drive_id>
19  *      -device ufs,serial=<serial>,id=<bus_name>, \
20  *              nutrs=<N[optional]>,nutmrs=<N[optional]>
21  *      -device ufs-lu,drive=<drive_id>,bus=<bus_name>
22  */
23 
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "migration/vmstate.h"
27 #include "scsi/constants.h"
28 #include "trace.h"
29 #include "ufs.h"
30 
31 /* The QEMU-UFS device follows spec version 4.0 */
32 #define UFS_SPEC_VER 0x0400
33 #define UFS_MAX_NUTRS 32
34 #define UFS_MAX_NUTMRS 8
35 #define UFS_MCQ_QCFGPTR 2
36 
37 static void ufs_exec_req(UfsRequest *req);
38 static void ufs_clear_req(UfsRequest *req);
39 
40 static inline uint64_t ufs_mcq_reg_addr(UfsHc *u, int qid)
41 {
42     /* Submission Queue MCQ Registers offset (400h) */
43     return (UFS_MCQ_QCFGPTR * 0x200) + qid * 0x40;
44 }
45 
46 static inline uint64_t ufs_mcq_op_reg_addr(UfsHc *u, int qid)
47 {
48     /* MCQ Operation & Runtime Registers offset (1000h) */
49     return UFS_MCQ_OPR_START + qid * 48;
50 }
51 
52 static inline uint64_t ufs_reg_size(UfsHc *u)
53 {
54     /* Total UFS HCI Register size in bytes */
55     return ufs_mcq_op_reg_addr(u, 0) + sizeof(u->mcq_op_reg);
56 }
57 
58 static inline bool ufs_is_mcq_reg(UfsHc *u, uint64_t addr, unsigned size)
59 {
60     uint64_t mcq_reg_addr = ufs_mcq_reg_addr(u, 0);
61     return (addr >= mcq_reg_addr &&
62             addr + size <= mcq_reg_addr + sizeof(u->mcq_reg));
63 }
64 
65 static inline bool ufs_is_mcq_op_reg(UfsHc *u, uint64_t addr, unsigned size)
66 {
67     uint64_t mcq_op_reg_addr = ufs_mcq_op_reg_addr(u, 0);
68     return (addr >= mcq_op_reg_addr &&
69             addr + size <= mcq_op_reg_addr + sizeof(u->mcq_op_reg));
70 }
71 
72 static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
73 {
74     hwaddr hi = addr + size - 1;
75 
76     if (hi < addr) {
77         return MEMTX_DECODE_ERROR;
78     }
79 
80     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
81         return MEMTX_DECODE_ERROR;
82     }
83 
84     return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
85 }
86 
87 static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
88                                   int size)
89 {
90     hwaddr hi = addr + size - 1;
91     if (hi < addr) {
92         return MEMTX_DECODE_ERROR;
93     }
94 
95     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
96         return MEMTX_DECODE_ERROR;
97     }
98 
99     return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
100 }
101 
102 static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
103 {
104     hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
105     hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
106 
107     return utrd_addr;
108 }
109 
110 static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
111 {
112     uint32_t cmd_desc_base_addr_lo =
113         le32_to_cpu(utrd->command_desc_base_addr_lo);
114     uint32_t cmd_desc_base_addr_hi =
115         le32_to_cpu(utrd->command_desc_base_addr_hi);
116 
117     return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
118 }
119 
120 static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
121 {
122     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
123     uint32_t rsp_upiu_byte_off =
124         le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
125     return req_upiu_base_addr + rsp_upiu_byte_off;
126 }
127 
128 static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
129 {
130     UfsHc *u = req->hc;
131     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
132     MemTxResult ret;
133 
134     ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
135     if (ret) {
136         trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
137     }
138     return ret;
139 }
140 
141 static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
142 {
143     UfsHc *u = req->hc;
144     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
145     UtpUpiuReq *req_upiu = &req->req_upiu;
146     uint32_t copy_size;
147     uint16_t data_segment_length;
148     MemTxResult ret;
149 
150     /*
151      * To know the size of the req_upiu, we need to read the
152      * data_segment_length in the header first.
153      */
154     ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
155                         sizeof(UtpUpiuHeader));
156     if (ret) {
157         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
158         return ret;
159     }
160     data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
161 
162     copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
163                 data_segment_length;
164 
165     if (copy_size > sizeof(req->req_upiu)) {
166         copy_size = sizeof(req->req_upiu);
167     }
168 
169     ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
170     if (ret) {
171         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
172     }
173     return ret;
174 }
175 
176 static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
177 {
178     UfsHc *u = req->hc;
179     uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
180     uint16_t prdt_byte_off =
181         le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
182     uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
183     g_autofree UfshcdSgEntry *prd_entries = NULL;
184     hwaddr req_upiu_base_addr, prdt_base_addr;
185     int err;
186 
187     assert(!req->sg);
188 
189     if (prdt_size == 0) {
190         return MEMTX_OK;
191     }
192     prd_entries = g_new(UfshcdSgEntry, prdt_size);
193 
194     req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
195     prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
196 
197     err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
198     if (err) {
199         trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
200         return err;
201     }
202 
203     req->sg = g_malloc0(sizeof(QEMUSGList));
204     pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
205     req->data_len = 0;
206 
207     for (uint16_t i = 0; i < prdt_len; ++i) {
208         hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
209         uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
210         qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
211         req->data_len += data_byte_count;
212     }
213     return MEMTX_OK;
214 }
215 
216 static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
217 {
218     MemTxResult ret;
219 
220     /*
221      * In case of MCQ, UTRD has already been read from a SQ, so skip it.
222      */
223     if (!ufs_mcq_req(req)) {
224         ret = ufs_dma_read_utrd(req);
225         if (ret) {
226             return ret;
227         }
228     }
229 
230     ret = ufs_dma_read_req_upiu(req);
231     if (ret) {
232         return ret;
233     }
234 
235     ret = ufs_dma_read_prdt(req);
236     if (ret) {
237         return ret;
238     }
239 
240     return 0;
241 }
242 
243 static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
244 {
245     UfsHc *u = req->hc;
246     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
247     MemTxResult ret;
248 
249     ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
250     if (ret) {
251         trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
252     }
253     return ret;
254 }
255 
256 static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
257 {
258     UfsHc *u = req->hc;
259     hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
260     uint32_t rsp_upiu_byte_len =
261         le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
262     uint16_t data_segment_length =
263         be16_to_cpu(req->rsp_upiu.header.data_segment_length);
264     uint32_t copy_size = sizeof(UtpUpiuHeader) +
265                          UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
266                          data_segment_length;
267     MemTxResult ret;
268 
269     if (copy_size > rsp_upiu_byte_len) {
270         copy_size = rsp_upiu_byte_len;
271     }
272 
273     if (copy_size > sizeof(req->rsp_upiu)) {
274         copy_size = sizeof(req->rsp_upiu);
275     }
276 
277     ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
278     if (ret) {
279         trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
280     }
281     return ret;
282 }
283 
284 static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
285 {
286     MemTxResult ret;
287 
288     ret = ufs_dma_write_rsp_upiu(req);
289     if (ret) {
290         return ret;
291     }
292 
293     return ufs_dma_write_utrd(req);
294 }
295 
296 static void ufs_irq_check(UfsHc *u)
297 {
298     PCIDevice *pci = PCI_DEVICE(u);
299 
300     if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
301         trace_ufs_irq_raise();
302         pci_irq_assert(pci);
303     } else {
304         trace_ufs_irq_lower();
305         pci_irq_deassert(pci);
306     }
307 }
308 
309 static void ufs_process_db(UfsHc *u, uint32_t val)
310 {
311     DECLARE_BITMAP(doorbell, UFS_MAX_NUTRS);
312     uint32_t slot;
313     uint32_t nutrs = u->params.nutrs;
314     UfsRequest *req;
315 
316     val &= ~u->reg.utrldbr;
317     if (!val) {
318         return;
319     }
320 
321     doorbell[0] = val;
322     slot = find_first_bit(doorbell, nutrs);
323 
324     while (slot < nutrs) {
325         req = &u->req_list[slot];
326         if (req->state == UFS_REQUEST_ERROR) {
327             trace_ufs_err_utrl_slot_error(req->slot);
328             return;
329         }
330 
331         if (req->state != UFS_REQUEST_IDLE) {
332             trace_ufs_err_utrl_slot_busy(req->slot);
333             return;
334         }
335 
336         trace_ufs_process_db(slot);
337         req->state = UFS_REQUEST_READY;
338         slot = find_next_bit(doorbell, nutrs, slot + 1);
339     }
340 
341     qemu_bh_schedule(u->doorbell_bh);
342 }
343 
344 static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
345 {
346     trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
347                              u->reg.ucmdarg3);
348     /*
349      * Only the essential uic commands for running drivers on Linux and Windows
350      * are implemented.
351      */
352     switch (val) {
353     case UFS_UIC_CMD_DME_LINK_STARTUP:
354         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
355         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
356         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
357         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
358         break;
359     /* TODO: Revisit it when Power Management is implemented */
360     case UFS_UIC_CMD_DME_HIBER_ENTER:
361         u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
362         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
363         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
364         break;
365     case UFS_UIC_CMD_DME_HIBER_EXIT:
366         u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
367         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
368         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
369         break;
370     default:
371         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
372     }
373 
374     u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
375 
376     ufs_irq_check(u);
377 }
378 
379 static void ufs_mcq_init_req(UfsHc *u, UfsRequest *req, UfsSq *sq)
380 {
381     memset(req, 0, sizeof(*req));
382 
383     req->hc = u;
384     req->state = UFS_REQUEST_IDLE;
385     req->slot = UFS_INVALID_SLOT;
386     req->sq = sq;
387 }
388 
389 static void ufs_mcq_process_sq(void *opaque)
390 {
391     UfsSq *sq = opaque;
392     UfsHc *u = sq->u;
393     UfsSqEntry sqe;
394     UfsRequest *req;
395     hwaddr addr;
396     uint16_t head = ufs_mcq_sq_head(u, sq->sqid);
397     int err;
398 
399     while (!(ufs_mcq_sq_empty(u, sq->sqid) || QTAILQ_EMPTY(&sq->req_list))) {
400         addr = sq->addr + head;
401         err = ufs_addr_read(sq->u, addr, (void *)&sqe, sizeof(sqe));
402         if (err) {
403             trace_ufs_err_dma_read_sq(sq->sqid, addr);
404             return;
405         }
406 
407         head = (head + sizeof(sqe)) % (sq->size * sizeof(sqe));
408         ufs_mcq_update_sq_head(u, sq->sqid, head);
409 
410         req = QTAILQ_FIRST(&sq->req_list);
411         QTAILQ_REMOVE(&sq->req_list, req, entry);
412 
413         ufs_mcq_init_req(sq->u, req, sq);
414         memcpy(&req->utrd, &sqe, sizeof(req->utrd));
415 
416         req->state = UFS_REQUEST_RUNNING;
417         ufs_exec_req(req);
418     }
419 }
420 
421 static void ufs_mcq_process_cq(void *opaque)
422 {
423     UfsCq *cq = opaque;
424     UfsHc *u = cq->u;
425     UfsRequest *req, *next;
426     MemTxResult ret;
427     uint32_t tail = ufs_mcq_cq_tail(u, cq->cqid);
428 
429     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next)
430     {
431         ufs_dma_write_rsp_upiu(req);
432 
433         req->cqe.utp_addr =
434             ((uint64_t)req->utrd.command_desc_base_addr_hi << 32ULL) |
435             req->utrd.command_desc_base_addr_lo;
436         req->cqe.utp_addr |= req->sq->sqid;
437         req->cqe.resp_len = req->utrd.response_upiu_length;
438         req->cqe.resp_off = req->utrd.response_upiu_offset;
439         req->cqe.prdt_len = req->utrd.prd_table_length;
440         req->cqe.prdt_off = req->utrd.prd_table_offset;
441         req->cqe.status = req->utrd.header.dword_2 & 0xf;
442         req->cqe.error = 0;
443 
444         ret = ufs_addr_write(u, cq->addr + tail, &req->cqe, sizeof(req->cqe));
445         if (ret) {
446             trace_ufs_err_dma_write_cq(cq->cqid, cq->addr + tail);
447         }
448         QTAILQ_REMOVE(&cq->req_list, req, entry);
449 
450         tail = (tail + sizeof(req->cqe)) % (cq->size * sizeof(req->cqe));
451         ufs_mcq_update_cq_tail(u, cq->cqid, tail);
452 
453         ufs_clear_req(req);
454         QTAILQ_INSERT_TAIL(&req->sq->req_list, req, entry);
455     }
456 
457     if (!ufs_mcq_cq_empty(u, cq->cqid)) {
458         u->mcq_op_reg[cq->cqid].cq_int.is =
459             FIELD_DP32(u->mcq_op_reg[cq->cqid].cq_int.is, CQIS, TEPS, 1);
460 
461         u->reg.is = FIELD_DP32(u->reg.is, IS, CQES, 1);
462         ufs_irq_check(u);
463     }
464 }
465 
466 static bool ufs_mcq_create_sq(UfsHc *u, uint8_t qid, uint32_t attr)
467 {
468     UfsMcqReg *reg = &u->mcq_reg[qid];
469     UfsSq *sq;
470     uint8_t cqid = FIELD_EX32(attr, SQATTR, CQID);
471 
472     if (qid >= u->params.mcq_maxq) {
473         trace_ufs_err_mcq_create_sq_invalid_sqid(qid);
474         return false;
475     }
476 
477     if (u->sq[qid]) {
478         trace_ufs_err_mcq_create_sq_already_exists(qid);
479         return false;
480     }
481 
482     if (!u->cq[cqid]) {
483         trace_ufs_err_mcq_create_sq_invalid_cqid(qid);
484         return false;
485     }
486 
487     sq = g_malloc0(sizeof(*sq));
488     sq->u = u;
489     sq->sqid = qid;
490     sq->cq = u->cq[cqid];
491     sq->addr = ((uint64_t)reg->squba << 32) | reg->sqlba;
492     sq->size = ((FIELD_EX32(attr, SQATTR, SIZE) + 1) << 2) / sizeof(UfsSqEntry);
493 
494     sq->bh = qemu_bh_new_guarded(ufs_mcq_process_sq, sq,
495                                  &DEVICE(u)->mem_reentrancy_guard);
496     sq->req = g_new0(UfsRequest, sq->size);
497     QTAILQ_INIT(&sq->req_list);
498     for (int i = 0; i < sq->size; i++) {
499         ufs_mcq_init_req(u, &sq->req[i], sq);
500         QTAILQ_INSERT_TAIL(&sq->req_list, &sq->req[i], entry);
501     }
502 
503     u->sq[qid] = sq;
504 
505     trace_ufs_mcq_create_sq(sq->sqid, sq->cq->cqid, sq->addr, sq->size);
506     return true;
507 }
508 
509 static bool ufs_mcq_delete_sq(UfsHc *u, uint8_t qid)
510 {
511     UfsSq *sq;
512 
513     if (qid >= u->params.mcq_maxq) {
514         trace_ufs_err_mcq_delete_sq_invalid_sqid(qid);
515         return false;
516     }
517 
518     if (!u->sq[qid]) {
519         trace_ufs_err_mcq_delete_sq_not_exists(qid);
520         return false;
521     }
522 
523     sq = u->sq[qid];
524 
525     qemu_bh_delete(sq->bh);
526     g_free(sq->req);
527     g_free(sq);
528     u->sq[qid] = NULL;
529     return true;
530 }
531 
532 static bool ufs_mcq_create_cq(UfsHc *u, uint8_t qid, uint32_t attr)
533 {
534     UfsMcqReg *reg = &u->mcq_reg[qid];
535     UfsCq *cq;
536 
537     if (qid >= u->params.mcq_maxq) {
538         trace_ufs_err_mcq_create_cq_invalid_cqid(qid);
539         return false;
540     }
541 
542     if (u->cq[qid]) {
543         trace_ufs_err_mcq_create_cq_already_exists(qid);
544         return false;
545     }
546 
547     cq = g_malloc0(sizeof(*cq));
548     cq->u = u;
549     cq->cqid = qid;
550     cq->addr = ((uint64_t)reg->cquba << 32) | reg->cqlba;
551     cq->size = ((FIELD_EX32(attr, CQATTR, SIZE) + 1) << 2) / sizeof(UfsCqEntry);
552 
553     cq->bh = qemu_bh_new_guarded(ufs_mcq_process_cq, cq,
554                                  &DEVICE(u)->mem_reentrancy_guard);
555     QTAILQ_INIT(&cq->req_list);
556 
557     u->cq[qid] = cq;
558 
559     trace_ufs_mcq_create_cq(cq->cqid, cq->addr, cq->size);
560     return true;
561 }
562 
563 static bool ufs_mcq_delete_cq(UfsHc *u, uint8_t qid)
564 {
565     UfsCq *cq;
566 
567     if (qid >= u->params.mcq_maxq) {
568         trace_ufs_err_mcq_delete_cq_invalid_cqid(qid);
569         return false;
570     }
571 
572     if (!u->cq[qid]) {
573         trace_ufs_err_mcq_delete_cq_not_exists(qid);
574         return false;
575     }
576 
577     for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
578         if (u->sq[i] && u->sq[i]->cq->cqid == qid) {
579             trace_ufs_err_mcq_delete_cq_sq_not_deleted(i, qid);
580             return false;
581         }
582     }
583 
584     cq = u->cq[qid];
585 
586     qemu_bh_delete(cq->bh);
587     g_free(cq);
588     u->cq[qid] = NULL;
589     return true;
590 }
591 
592 static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
593 {
594     switch (offset) {
595     case A_IS:
596         u->reg.is &= ~data;
597         ufs_irq_check(u);
598         break;
599     case A_IE:
600         u->reg.ie = data;
601         ufs_irq_check(u);
602         break;
603     case A_HCE:
604         if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
605             u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
606             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
607         } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
608                    !FIELD_EX32(data, HCE, HCE)) {
609             u->reg.hcs = 0;
610             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
611         }
612         break;
613     case A_UTRLBA:
614         u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
615         break;
616     case A_UTRLBAU:
617         u->reg.utrlbau = data;
618         break;
619     case A_UTRLDBR:
620         ufs_process_db(u, data);
621         u->reg.utrldbr |= data;
622         break;
623     case A_UTRLRSR:
624         u->reg.utrlrsr = data;
625         break;
626     case A_UTRLCNR:
627         u->reg.utrlcnr &= ~data;
628         break;
629     case A_UTMRLBA:
630         u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
631         break;
632     case A_UTMRLBAU:
633         u->reg.utmrlbau = data;
634         break;
635     case A_UICCMD:
636         ufs_process_uiccmd(u, data);
637         break;
638     case A_UCMDARG1:
639         u->reg.ucmdarg1 = data;
640         break;
641     case A_UCMDARG2:
642         u->reg.ucmdarg2 = data;
643         break;
644     case A_UCMDARG3:
645         u->reg.ucmdarg3 = data;
646         break;
647     case A_CONFIG:
648         u->reg.config = data;
649         break;
650     case A_MCQCONFIG:
651         u->reg.mcqconfig = data;
652         break;
653     case A_UTRLCLR:
654     case A_UTMRLDBR:
655     case A_UTMRLCLR:
656     case A_UTMRLRSR:
657         trace_ufs_err_unsupport_register_offset(offset);
658         break;
659     default:
660         trace_ufs_err_invalid_register_offset(offset);
661         break;
662     }
663 }
664 
665 static void ufs_write_mcq_reg(UfsHc *u, hwaddr offset, uint32_t data,
666                               unsigned size)
667 {
668     int qid = offset / sizeof(UfsMcqReg);
669     UfsMcqReg *reg = &u->mcq_reg[qid];
670 
671     switch (offset % sizeof(UfsMcqReg)) {
672     case A_SQATTR:
673         if (!FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
674             FIELD_EX32(data, SQATTR, SQEN)) {
675             if (!ufs_mcq_create_sq(u, qid, data)) {
676                 break;
677             }
678         } else if (FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
679                    !FIELD_EX32(data, SQATTR, SQEN)) {
680             if (!ufs_mcq_delete_sq(u, qid)) {
681                 break;
682             }
683         }
684         reg->sqattr = data;
685         break;
686     case A_SQLBA:
687         reg->sqlba = data;
688         break;
689     case A_SQUBA:
690         reg->squba = data;
691         break;
692     case A_SQCFG:
693         reg->sqcfg = data;
694         break;
695     case A_CQATTR:
696         if (!FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
697             FIELD_EX32(data, CQATTR, CQEN)) {
698             if (!ufs_mcq_create_cq(u, qid, data)) {
699                 break;
700             }
701         } else if (FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
702                    !FIELD_EX32(data, CQATTR, CQEN)) {
703             if (!ufs_mcq_delete_cq(u, qid)) {
704                 break;
705             }
706         }
707         reg->cqattr = data;
708         break;
709     case A_CQLBA:
710         reg->cqlba = data;
711         break;
712     case A_CQUBA:
713         reg->cquba = data;
714         break;
715     case A_CQCFG:
716         reg->cqcfg = data;
717         break;
718     case A_SQDAO:
719     case A_SQISAO:
720     case A_CQDAO:
721     case A_CQISAO:
722         trace_ufs_err_unsupport_register_offset(offset);
723         break;
724     default:
725         trace_ufs_err_invalid_register_offset(offset);
726         break;
727     }
728 }
729 
730 static void ufs_mcq_process_db(UfsHc *u, uint8_t qid, uint32_t db)
731 {
732     UfsSq *sq;
733 
734     if (qid >= u->params.mcq_maxq) {
735         trace_ufs_err_mcq_db_wr_invalid_sqid(qid);
736         return;
737     }
738 
739     sq = u->sq[qid];
740     if (sq->size * sizeof(UfsSqEntry) <= db) {
741         trace_ufs_err_mcq_db_wr_invalid_db(qid, db);
742         return;
743     }
744 
745     ufs_mcq_update_sq_tail(u, sq->sqid, db);
746     qemu_bh_schedule(sq->bh);
747 }
748 
749 static void ufs_write_mcq_op_reg(UfsHc *u, hwaddr offset, uint32_t data,
750                                  unsigned size)
751 {
752     int qid = offset / sizeof(UfsMcqOpReg);
753     UfsMcqOpReg *opr = &u->mcq_op_reg[qid];
754 
755     switch (offset % sizeof(UfsMcqOpReg)) {
756     case offsetof(UfsMcqOpReg, sq.tp):
757         if (opr->sq.tp != data) {
758             ufs_mcq_process_db(u, qid, data);
759         }
760         opr->sq.tp = data;
761         break;
762     case offsetof(UfsMcqOpReg, cq.hp):
763         opr->cq.hp = data;
764         ufs_mcq_update_cq_head(u, qid, data);
765         break;
766     case offsetof(UfsMcqOpReg, cq_int.is):
767         opr->cq_int.is &= ~data;
768         break;
769     default:
770         trace_ufs_err_invalid_register_offset(offset);
771         break;
772     }
773 }
774 
775 static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
776 {
777     UfsHc *u = (UfsHc *)opaque;
778     uint32_t *ptr;
779     uint64_t value;
780     uint64_t offset;
781 
782     if (addr + size <= sizeof(u->reg)) {
783         offset = addr;
784         ptr = (uint32_t *)&u->reg;
785     } else if (ufs_is_mcq_reg(u, addr, size)) {
786         offset = addr - ufs_mcq_reg_addr(u, 0);
787         ptr = (uint32_t *)&u->mcq_reg;
788     } else if (ufs_is_mcq_op_reg(u, addr, size)) {
789         offset = addr - ufs_mcq_op_reg_addr(u, 0);
790         ptr = (uint32_t *)&u->mcq_op_reg;
791     } else {
792         trace_ufs_err_invalid_register_offset(addr);
793         return 0;
794     }
795 
796     value = ptr[offset >> 2];
797     trace_ufs_mmio_read(addr, value, size);
798     return value;
799 }
800 
801 static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
802                            unsigned size)
803 {
804     UfsHc *u = (UfsHc *)opaque;
805 
806     trace_ufs_mmio_write(addr, data, size);
807 
808     if (addr + size <= sizeof(u->reg)) {
809         ufs_write_reg(u, addr, data, size);
810     } else if (ufs_is_mcq_reg(u, addr, size)) {
811         ufs_write_mcq_reg(u, addr - ufs_mcq_reg_addr(u, 0), data, size);
812     } else if (ufs_is_mcq_op_reg(u, addr, size)) {
813         ufs_write_mcq_op_reg(u, addr - ufs_mcq_op_reg_addr(u, 0), data, size);
814     } else {
815         trace_ufs_err_invalid_register_offset(addr);
816     }
817 }
818 
819 static const MemoryRegionOps ufs_mmio_ops = {
820     .read = ufs_mmio_read,
821     .write = ufs_mmio_write,
822     .endianness = DEVICE_LITTLE_ENDIAN,
823     .impl = {
824         .min_access_size = 4,
825         .max_access_size = 4,
826     },
827 };
828 
829 
830 void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
831                            uint8_t response, uint8_t scsi_status,
832                            uint16_t data_segment_length)
833 {
834     memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
835     req->rsp_upiu.header.trans_type = trans_type;
836     req->rsp_upiu.header.flags = flags;
837     req->rsp_upiu.header.response = response;
838     req->rsp_upiu.header.scsi_status = scsi_status;
839     req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
840 }
841 
842 static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
843 {
844     UfsHc *u = req->hc;
845     uint8_t lun = req->req_upiu.header.lun;
846 
847     UfsLu *lu = NULL;
848 
849     trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
850 
851     if (!is_wlun(lun) && (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
852         trace_ufs_err_scsi_cmd_invalid_lun(lun);
853         return UFS_REQUEST_FAIL;
854     }
855 
856     switch (lun) {
857     case UFS_UPIU_REPORT_LUNS_WLUN:
858         lu = &u->report_wlu;
859         break;
860     case UFS_UPIU_UFS_DEVICE_WLUN:
861         lu = &u->dev_wlu;
862         break;
863     case UFS_UPIU_BOOT_WLUN:
864         lu = &u->boot_wlu;
865         break;
866     case UFS_UPIU_RPMB_WLUN:
867         lu = &u->rpmb_wlu;
868         break;
869     default:
870         lu = u->lus[lun];
871     }
872 
873     return lu->scsi_op(lu, req);
874 }
875 
876 static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
877 {
878     trace_ufs_exec_nop_cmd(req->slot);
879     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
880     return UFS_REQUEST_SUCCESS;
881 }
882 
883 /*
884  * This defines the permission of flags based on their IDN. There are some
885  * things that are declared read-only, which is inconsistent with the ufs spec,
886  * because we want to return an error for features that are not yet supported.
887  */
888 static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
889     [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
890     /* Write protection is not supported */
891     [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
892     [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
893     [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
894                                     UFS_QUERY_FLAG_CLEAR |
895                                     UFS_QUERY_FLAG_TOGGLE,
896     [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
897         UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
898         UFS_QUERY_FLAG_TOGGLE,
899     /* Purge Operation is not supported */
900     [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
901     /* Refresh Operation is not supported */
902     [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
903     /* Physical Resource Removal is not supported */
904     [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
905     [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
906     [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
907     /* Write Booster is not supported */
908     [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
909     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
910     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
911 };
912 
913 static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
914 {
915     if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
916         return UFS_QUERY_RESULT_INVALID_IDN;
917     }
918 
919     if (!(flag_permission[idn] & op)) {
920         if (op == UFS_QUERY_FLAG_READ) {
921             trace_ufs_err_query_flag_not_readable(idn);
922             return UFS_QUERY_RESULT_NOT_READABLE;
923         }
924         trace_ufs_err_query_flag_not_writable(idn);
925         return UFS_QUERY_RESULT_NOT_WRITEABLE;
926     }
927 
928     return UFS_QUERY_RESULT_SUCCESS;
929 }
930 
931 static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
932     /* booting is not supported */
933     [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
934     [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
935     [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
936         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
937     [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
938     [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
939     [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
940     [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
941         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
942     [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
943         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
944     [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
945     [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
946         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
947     [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
948     [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
949         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
950     [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
951         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
952     [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
953     [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
954     [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
955     [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
956     [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
957     [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
958         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
959     [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
960     [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
961     [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
962     [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
963     [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
964     [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
965     [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
966     [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
967     [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
968     /* refresh operation is not supported */
969     [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
970     [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
971     [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
972 };
973 
974 static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
975 {
976     if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
977         return UFS_QUERY_RESULT_INVALID_IDN;
978     }
979 
980     if (!(attr_permission[idn] & op)) {
981         if (op == UFS_QUERY_ATTR_READ) {
982             trace_ufs_err_query_attr_not_readable(idn);
983             return UFS_QUERY_RESULT_NOT_READABLE;
984         }
985         trace_ufs_err_query_attr_not_writable(idn);
986         return UFS_QUERY_RESULT_NOT_WRITEABLE;
987     }
988 
989     return UFS_QUERY_RESULT_SUCCESS;
990 }
991 
992 static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
993 {
994     UfsHc *u = req->hc;
995     uint8_t idn = req->req_upiu.qr.idn;
996     uint32_t value;
997     QueryRespCode ret;
998 
999     ret = ufs_flag_check_idn_valid(idn, op);
1000     if (ret) {
1001         return ret;
1002     }
1003 
1004     if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
1005         value = 0;
1006     } else if (op == UFS_QUERY_FLAG_READ) {
1007         value = *(((uint8_t *)&u->flags) + idn);
1008     } else if (op == UFS_QUERY_FLAG_SET) {
1009         value = 1;
1010     } else if (op == UFS_QUERY_FLAG_CLEAR) {
1011         value = 0;
1012     } else if (op == UFS_QUERY_FLAG_TOGGLE) {
1013         value = *(((uint8_t *)&u->flags) + idn);
1014         value = !value;
1015     } else {
1016         trace_ufs_err_query_invalid_opcode(op);
1017         return UFS_QUERY_RESULT_INVALID_OPCODE;
1018     }
1019 
1020     *(((uint8_t *)&u->flags) + idn) = value;
1021     req->rsp_upiu.qr.value = cpu_to_be32(value);
1022     return UFS_QUERY_RESULT_SUCCESS;
1023 }
1024 
1025 static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
1026 {
1027     switch (idn) {
1028     case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
1029         return u->attributes.boot_lun_en;
1030     case UFS_QUERY_ATTR_IDN_POWER_MODE:
1031         return u->attributes.current_power_mode;
1032     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1033         return u->attributes.active_icc_level;
1034     case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
1035         return u->attributes.out_of_order_data_en;
1036     case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
1037         return u->attributes.background_op_status;
1038     case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
1039         return u->attributes.purge_status;
1040     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1041         return u->attributes.max_data_in_size;
1042     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1043         return u->attributes.max_data_out_size;
1044     case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
1045         return be32_to_cpu(u->attributes.dyn_cap_needed);
1046     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1047         return u->attributes.ref_clk_freq;
1048     case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
1049         return u->attributes.config_descr_lock;
1050     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1051         return u->attributes.max_num_of_rtt;
1052     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1053         return be16_to_cpu(u->attributes.exception_event_control);
1054     case UFS_QUERY_ATTR_IDN_EE_STATUS:
1055         return be16_to_cpu(u->attributes.exception_event_status);
1056     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1057         return be32_to_cpu(u->attributes.seconds_passed);
1058     case UFS_QUERY_ATTR_IDN_CNTX_CONF:
1059         return be16_to_cpu(u->attributes.context_conf);
1060     case UFS_QUERY_ATTR_IDN_FFU_STATUS:
1061         return u->attributes.device_ffu_status;
1062     case UFS_QUERY_ATTR_IDN_PSA_STATE:
1063         return be32_to_cpu(u->attributes.psa_state);
1064     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1065         return be32_to_cpu(u->attributes.psa_data_size);
1066     case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
1067         return u->attributes.ref_clk_gating_wait_time;
1068     case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
1069         return u->attributes.device_case_rough_temperaure;
1070     case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
1071         return u->attributes.device_too_high_temp_boundary;
1072     case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
1073         return u->attributes.device_too_low_temp_boundary;
1074     case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
1075         return u->attributes.throttling_status;
1076     case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
1077         return u->attributes.wb_buffer_flush_status;
1078     case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
1079         return u->attributes.available_wb_buffer_size;
1080     case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
1081         return u->attributes.wb_buffer_life_time_est;
1082     case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
1083         return be32_to_cpu(u->attributes.current_wb_buffer_size);
1084     case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
1085         return u->attributes.refresh_status;
1086     case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
1087         return u->attributes.refresh_freq;
1088     case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
1089         return u->attributes.refresh_unit;
1090     }
1091     return 0;
1092 }
1093 
1094 static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
1095 {
1096     switch (idn) {
1097     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1098         u->attributes.active_icc_level = value;
1099         break;
1100     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1101         u->attributes.max_data_in_size = value;
1102         break;
1103     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1104         u->attributes.max_data_out_size = value;
1105         break;
1106     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1107         u->attributes.ref_clk_freq = value;
1108         break;
1109     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1110         u->attributes.max_num_of_rtt = value;
1111         break;
1112     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1113         u->attributes.exception_event_control = cpu_to_be16(value);
1114         break;
1115     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1116         u->attributes.seconds_passed = cpu_to_be32(value);
1117         break;
1118     case UFS_QUERY_ATTR_IDN_PSA_STATE:
1119         u->attributes.psa_state = value;
1120         break;
1121     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1122         u->attributes.psa_data_size = cpu_to_be32(value);
1123         break;
1124     }
1125 }
1126 
1127 static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
1128 {
1129     UfsHc *u = req->hc;
1130     uint8_t idn = req->req_upiu.qr.idn;
1131     uint32_t value;
1132     QueryRespCode ret;
1133 
1134     ret = ufs_attr_check_idn_valid(idn, op);
1135     if (ret) {
1136         return ret;
1137     }
1138 
1139     if (op == UFS_QUERY_ATTR_READ) {
1140         value = ufs_read_attr_value(u, idn);
1141     } else {
1142         value = be32_to_cpu(req->req_upiu.qr.value);
1143         ufs_write_attr_value(u, idn, value);
1144     }
1145 
1146     req->rsp_upiu.qr.value = cpu_to_be32(value);
1147     return UFS_QUERY_RESULT_SUCCESS;
1148 }
1149 
1150 static const RpmbUnitDescriptor rpmb_unit_desc = {
1151     .length = sizeof(RpmbUnitDescriptor),
1152     .descriptor_idn = 2,
1153     .unit_index = UFS_UPIU_RPMB_WLUN,
1154     .lu_enable = 0,
1155 };
1156 
1157 static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
1158 {
1159     UfsHc *u = req->hc;
1160     uint8_t lun = req->req_upiu.qr.index;
1161 
1162     if (lun != UFS_UPIU_RPMB_WLUN &&
1163         (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
1164         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
1165         return UFS_QUERY_RESULT_INVALID_INDEX;
1166     }
1167 
1168     if (lun == UFS_UPIU_RPMB_WLUN) {
1169         memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
1170     } else {
1171         memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
1172                sizeof(u->lus[lun]->unit_desc));
1173     }
1174 
1175     return UFS_QUERY_RESULT_SUCCESS;
1176 }
1177 
1178 static inline StringDescriptor manufacturer_str_desc(void)
1179 {
1180     StringDescriptor desc = {
1181         .length = 0x12,
1182         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1183     };
1184     desc.UC[0] = cpu_to_be16('R');
1185     desc.UC[1] = cpu_to_be16('E');
1186     desc.UC[2] = cpu_to_be16('D');
1187     desc.UC[3] = cpu_to_be16('H');
1188     desc.UC[4] = cpu_to_be16('A');
1189     desc.UC[5] = cpu_to_be16('T');
1190     return desc;
1191 }
1192 
1193 static inline StringDescriptor product_name_str_desc(void)
1194 {
1195     StringDescriptor desc = {
1196         .length = 0x22,
1197         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1198     };
1199     desc.UC[0] = cpu_to_be16('Q');
1200     desc.UC[1] = cpu_to_be16('E');
1201     desc.UC[2] = cpu_to_be16('M');
1202     desc.UC[3] = cpu_to_be16('U');
1203     desc.UC[4] = cpu_to_be16(' ');
1204     desc.UC[5] = cpu_to_be16('U');
1205     desc.UC[6] = cpu_to_be16('F');
1206     desc.UC[7] = cpu_to_be16('S');
1207     return desc;
1208 }
1209 
1210 static inline StringDescriptor product_rev_level_str_desc(void)
1211 {
1212     StringDescriptor desc = {
1213         .length = 0x0a,
1214         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1215     };
1216     desc.UC[0] = cpu_to_be16('0');
1217     desc.UC[1] = cpu_to_be16('0');
1218     desc.UC[2] = cpu_to_be16('0');
1219     desc.UC[3] = cpu_to_be16('1');
1220     return desc;
1221 }
1222 
1223 static const StringDescriptor null_str_desc = {
1224     .length = 0x02,
1225     .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1226 };
1227 
1228 static QueryRespCode ufs_read_string_desc(UfsRequest *req)
1229 {
1230     UfsHc *u = req->hc;
1231     uint8_t index = req->req_upiu.qr.index;
1232     StringDescriptor desc;
1233 
1234     if (index == u->device_desc.manufacturer_name) {
1235         desc = manufacturer_str_desc();
1236         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1237     } else if (index == u->device_desc.product_name) {
1238         desc = product_name_str_desc();
1239         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1240     } else if (index == u->device_desc.serial_number) {
1241         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1242     } else if (index == u->device_desc.oem_id) {
1243         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1244     } else if (index == u->device_desc.product_revision_level) {
1245         desc = product_rev_level_str_desc();
1246         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1247     } else {
1248         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
1249         return UFS_QUERY_RESULT_INVALID_INDEX;
1250     }
1251     return UFS_QUERY_RESULT_SUCCESS;
1252 }
1253 
1254 static inline InterconnectDescriptor interconnect_desc(void)
1255 {
1256     InterconnectDescriptor desc = {
1257         .length = sizeof(InterconnectDescriptor),
1258         .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
1259     };
1260     desc.bcd_unipro_version = cpu_to_be16(0x180);
1261     desc.bcd_mphy_version = cpu_to_be16(0x410);
1262     return desc;
1263 }
1264 
1265 static QueryRespCode ufs_read_desc(UfsRequest *req)
1266 {
1267     UfsHc *u = req->hc;
1268     QueryRespCode status;
1269     uint8_t idn = req->req_upiu.qr.idn;
1270     uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
1271     InterconnectDescriptor desc;
1272 
1273     switch (idn) {
1274     case UFS_QUERY_DESC_IDN_DEVICE:
1275         memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
1276         status = UFS_QUERY_RESULT_SUCCESS;
1277         break;
1278     case UFS_QUERY_DESC_IDN_UNIT:
1279         status = ufs_read_unit_desc(req);
1280         break;
1281     case UFS_QUERY_DESC_IDN_GEOMETRY:
1282         memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
1283                sizeof(u->geometry_desc));
1284         status = UFS_QUERY_RESULT_SUCCESS;
1285         break;
1286     case UFS_QUERY_DESC_IDN_INTERCONNECT: {
1287         desc = interconnect_desc();
1288         memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
1289         status = UFS_QUERY_RESULT_SUCCESS;
1290         break;
1291     }
1292     case UFS_QUERY_DESC_IDN_STRING:
1293         status = ufs_read_string_desc(req);
1294         break;
1295     case UFS_QUERY_DESC_IDN_POWER:
1296         /* mocking of power descriptor is not supported */
1297         memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
1298         req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
1299         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
1300         status = UFS_QUERY_RESULT_SUCCESS;
1301         break;
1302     case UFS_QUERY_DESC_IDN_HEALTH:
1303         /* mocking of health descriptor is not supported */
1304         memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
1305         req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
1306         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
1307         status = UFS_QUERY_RESULT_SUCCESS;
1308         break;
1309     default:
1310         length = 0;
1311         trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
1312         status = UFS_QUERY_RESULT_INVALID_IDN;
1313     }
1314 
1315     if (length > req->rsp_upiu.qr.data[0]) {
1316         length = req->rsp_upiu.qr.data[0];
1317     }
1318     req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
1319     req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
1320     req->rsp_upiu.qr.index = req->req_upiu.qr.index;
1321     req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
1322     req->rsp_upiu.qr.length = cpu_to_be16(length);
1323 
1324     return status;
1325 }
1326 
1327 static QueryRespCode ufs_exec_query_read(UfsRequest *req)
1328 {
1329     QueryRespCode status;
1330     switch (req->req_upiu.qr.opcode) {
1331     case UFS_UPIU_QUERY_OPCODE_NOP:
1332         status = UFS_QUERY_RESULT_SUCCESS;
1333         break;
1334     case UFS_UPIU_QUERY_OPCODE_READ_DESC:
1335         status = ufs_read_desc(req);
1336         break;
1337     case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
1338         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
1339         break;
1340     case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
1341         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
1342         break;
1343     default:
1344         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1345         status = UFS_QUERY_RESULT_INVALID_OPCODE;
1346         break;
1347     }
1348 
1349     return status;
1350 }
1351 
1352 static QueryRespCode ufs_exec_query_write(UfsRequest *req)
1353 {
1354     QueryRespCode status;
1355     switch (req->req_upiu.qr.opcode) {
1356     case UFS_UPIU_QUERY_OPCODE_NOP:
1357         status = UFS_QUERY_RESULT_SUCCESS;
1358         break;
1359     case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
1360         /* write descriptor is not supported */
1361         status = UFS_QUERY_RESULT_NOT_WRITEABLE;
1362         break;
1363     case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
1364         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
1365         break;
1366     case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
1367         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
1368         break;
1369     case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
1370         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
1371         break;
1372     case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1373         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
1374         break;
1375     default:
1376         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1377         status = UFS_QUERY_RESULT_INVALID_OPCODE;
1378         break;
1379     }
1380 
1381     return status;
1382 }
1383 
1384 static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
1385 {
1386     uint8_t query_func = req->req_upiu.header.query_func;
1387     uint16_t data_segment_length;
1388     QueryRespCode status;
1389 
1390     trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
1391     if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
1392         status = ufs_exec_query_read(req);
1393     } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
1394         status = ufs_exec_query_write(req);
1395     } else {
1396         status = UFS_QUERY_RESULT_GENERAL_FAILURE;
1397     }
1398 
1399     data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
1400     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
1401                           data_segment_length);
1402 
1403     if (status != UFS_QUERY_RESULT_SUCCESS) {
1404         return UFS_REQUEST_FAIL;
1405     }
1406     return UFS_REQUEST_SUCCESS;
1407 }
1408 
1409 static void ufs_exec_req(UfsRequest *req)
1410 {
1411     UfsReqResult req_result;
1412 
1413     if (ufs_dma_read_upiu(req)) {
1414         return;
1415     }
1416 
1417     switch (req->req_upiu.header.trans_type) {
1418     case UFS_UPIU_TRANSACTION_NOP_OUT:
1419         req_result = ufs_exec_nop_cmd(req);
1420         break;
1421     case UFS_UPIU_TRANSACTION_COMMAND:
1422         req_result = ufs_exec_scsi_cmd(req);
1423         break;
1424     case UFS_UPIU_TRANSACTION_QUERY_REQ:
1425         req_result = ufs_exec_query_cmd(req);
1426         break;
1427     default:
1428         trace_ufs_err_invalid_trans_code(req->slot,
1429                                          req->req_upiu.header.trans_type);
1430         req_result = UFS_REQUEST_FAIL;
1431     }
1432 
1433     /*
1434      * The ufs_complete_req for scsi commands is handled by the
1435      * ufs_scsi_command_complete() callback function. Therefore, to avoid
1436      * duplicate processing, ufs_complete_req() is not called for scsi commands.
1437      */
1438     if (req_result != UFS_REQUEST_NO_COMPLETE) {
1439         ufs_complete_req(req, req_result);
1440     }
1441 }
1442 
1443 static void ufs_process_req(void *opaque)
1444 {
1445     UfsHc *u = opaque;
1446     UfsRequest *req;
1447     int slot;
1448 
1449     for (slot = 0; slot < u->params.nutrs; slot++) {
1450         req = &u->req_list[slot];
1451 
1452         if (req->state != UFS_REQUEST_READY) {
1453             continue;
1454         }
1455         trace_ufs_process_req(slot);
1456         req->state = UFS_REQUEST_RUNNING;
1457 
1458         ufs_exec_req(req);
1459     }
1460 }
1461 
1462 void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
1463 {
1464     UfsHc *u = req->hc;
1465     assert(req->state == UFS_REQUEST_RUNNING);
1466 
1467     if (req_result == UFS_REQUEST_SUCCESS) {
1468         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
1469     } else {
1470         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
1471     }
1472 
1473     req->state = UFS_REQUEST_COMPLETE;
1474 
1475     if (ufs_mcq_req(req)) {
1476         trace_ufs_mcq_complete_req(req->sq->sqid);
1477         QTAILQ_INSERT_TAIL(&req->sq->cq->req_list, req, entry);
1478         qemu_bh_schedule(req->sq->cq->bh);
1479     } else {
1480         trace_ufs_complete_req(req->slot);
1481         qemu_bh_schedule(u->complete_bh);
1482     }
1483 }
1484 
1485 static void ufs_clear_req(UfsRequest *req)
1486 {
1487     if (req->sg != NULL) {
1488         qemu_sglist_destroy(req->sg);
1489         g_free(req->sg);
1490         req->sg = NULL;
1491         req->data_len = 0;
1492     }
1493 
1494     memset(&req->utrd, 0, sizeof(req->utrd));
1495     memset(&req->req_upiu, 0, sizeof(req->req_upiu));
1496     memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
1497 }
1498 
1499 static void ufs_sendback_req(void *opaque)
1500 {
1501     UfsHc *u = opaque;
1502     UfsRequest *req;
1503     int slot;
1504 
1505     for (slot = 0; slot < u->params.nutrs; slot++) {
1506         req = &u->req_list[slot];
1507 
1508         if (req->state != UFS_REQUEST_COMPLETE) {
1509             continue;
1510         }
1511 
1512         if (ufs_dma_write_upiu(req)) {
1513             req->state = UFS_REQUEST_ERROR;
1514             continue;
1515         }
1516 
1517         /*
1518          * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
1519          * supported
1520          */
1521         if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
1522             le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
1523             u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
1524         }
1525 
1526         u->reg.utrldbr &= ~(1 << slot);
1527         u->reg.utrlcnr |= (1 << slot);
1528 
1529         trace_ufs_sendback_req(req->slot);
1530 
1531         ufs_clear_req(req);
1532         req->state = UFS_REQUEST_IDLE;
1533     }
1534 
1535     ufs_irq_check(u);
1536 }
1537 
1538 static bool ufs_check_constraints(UfsHc *u, Error **errp)
1539 {
1540     if (u->params.nutrs > UFS_MAX_NUTRS) {
1541         error_setg(errp, "nutrs must be less than or equal to %d",
1542                    UFS_MAX_NUTRS);
1543         return false;
1544     }
1545 
1546     if (u->params.nutmrs > UFS_MAX_NUTMRS) {
1547         error_setg(errp, "nutmrs must be less than or equal to %d",
1548                    UFS_MAX_NUTMRS);
1549         return false;
1550     }
1551 
1552     if (u->params.mcq_maxq >= UFS_MAX_MCQ_QNUM) {
1553         error_setg(errp, "mcq-maxq must be less than %d", UFS_MAX_MCQ_QNUM);
1554         return false;
1555     }
1556 
1557     return true;
1558 }
1559 
1560 static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
1561 {
1562     uint8_t *pci_conf = pci_dev->config;
1563 
1564     pci_conf[PCI_INTERRUPT_PIN] = 1;
1565     pci_config_set_prog_interface(pci_conf, 0x1);
1566 
1567     memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
1568                           u->reg_size);
1569     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
1570     u->irq = pci_allocate_irq(pci_dev);
1571 }
1572 
1573 static void ufs_init_state(UfsHc *u)
1574 {
1575     u->req_list = g_new0(UfsRequest, u->params.nutrs);
1576 
1577     for (int i = 0; i < u->params.nutrs; i++) {
1578         u->req_list[i].hc = u;
1579         u->req_list[i].slot = i;
1580         u->req_list[i].sg = NULL;
1581         u->req_list[i].state = UFS_REQUEST_IDLE;
1582     }
1583 
1584     u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
1585                                          &DEVICE(u)->mem_reentrancy_guard);
1586     u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
1587                                          &DEVICE(u)->mem_reentrancy_guard);
1588 
1589     if (u->params.mcq) {
1590         memset(u->sq, 0, sizeof(u->sq));
1591         memset(u->cq, 0, sizeof(u->cq));
1592     }
1593 }
1594 
1595 static void ufs_init_hc(UfsHc *u)
1596 {
1597     uint32_t cap = 0;
1598     uint32_t mcqconfig = 0;
1599     uint32_t mcqcap = 0;
1600 
1601     u->reg_size = pow2ceil(ufs_reg_size(u));
1602 
1603     memset(&u->reg, 0, sizeof(u->reg));
1604     memset(&u->mcq_reg, 0, sizeof(u->mcq_reg));
1605     memset(&u->mcq_op_reg, 0, sizeof(u->mcq_op_reg));
1606     cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
1607     cap = FIELD_DP32(cap, CAP, RTT, 2);
1608     cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
1609     cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
1610     cap = FIELD_DP32(cap, CAP, 64AS, 1);
1611     cap = FIELD_DP32(cap, CAP, OODDS, 0);
1612     cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
1613     cap = FIELD_DP32(cap, CAP, CS, 0);
1614     cap = FIELD_DP32(cap, CAP, LSDBS, 1);
1615     cap = FIELD_DP32(cap, CAP, MCQS, u->params.mcq);
1616     u->reg.cap = cap;
1617 
1618     if (u->params.mcq) {
1619         mcqconfig = FIELD_DP32(mcqconfig, MCQCONFIG, MAC, 0x1f);
1620         u->reg.mcqconfig = mcqconfig;
1621 
1622         mcqcap = FIELD_DP32(mcqcap, MCQCAP, MAXQ, u->params.mcq_maxq - 1);
1623         mcqcap = FIELD_DP32(mcqcap, MCQCAP, RRP, 1);
1624         mcqcap = FIELD_DP32(mcqcap, MCQCAP, QCFGPTR, UFS_MCQ_QCFGPTR);
1625         u->reg.mcqcap = mcqcap;
1626 
1627         for (int i = 0; i < ARRAY_SIZE(u->mcq_reg); i++) {
1628             uint64_t addr = ufs_mcq_op_reg_addr(u, i);
1629             u->mcq_reg[i].sqdao = addr;
1630             u->mcq_reg[i].sqisao = addr + sizeof(UfsMcqSqReg);
1631             addr += sizeof(UfsMcqSqReg);
1632             u->mcq_reg[i].cqdao = addr + sizeof(UfsMcqSqIntReg);
1633             addr += sizeof(UfsMcqSqIntReg);
1634             u->mcq_reg[i].cqisao = addr + sizeof(UfsMcqCqReg);
1635         }
1636     }
1637     u->reg.ver = UFS_SPEC_VER;
1638 
1639     memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
1640     u->device_desc.length = sizeof(DeviceDescriptor);
1641     u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
1642     u->device_desc.device_sub_class = 0x01;
1643     u->device_desc.number_lu = 0x00;
1644     u->device_desc.number_wlu = 0x04;
1645     /* TODO: Revisit it when Power Management is implemented */
1646     u->device_desc.init_power_mode = 0x01; /* Active Mode */
1647     u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
1648     u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
1649     u->device_desc.manufacturer_name = 0x00;
1650     u->device_desc.product_name = 0x01;
1651     u->device_desc.serial_number = 0x02;
1652     u->device_desc.oem_id = 0x03;
1653     u->device_desc.ud_0_base_offset = 0x16;
1654     u->device_desc.ud_config_p_length = 0x1A;
1655     u->device_desc.device_rtt_cap = 0x02;
1656     u->device_desc.queue_depth = u->params.nutrs;
1657     u->device_desc.product_revision_level = 0x04;
1658 
1659     memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
1660     u->geometry_desc.length = sizeof(GeometryDescriptor);
1661     u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
1662     u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
1663     u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
1664     u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
1665     u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
1666     u->geometry_desc.max_in_buffer_size = 0x8;
1667     u->geometry_desc.max_out_buffer_size = 0x8;
1668     u->geometry_desc.rpmb_read_write_size = 0x40;
1669     u->geometry_desc.data_ordering =
1670         0x0; /* out-of-order data transfer is not supported */
1671     u->geometry_desc.max_context_id_number = 0x5;
1672     u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
1673 
1674     memset(&u->attributes, 0, sizeof(u->attributes));
1675     u->attributes.max_data_in_size = 0x08;
1676     u->attributes.max_data_out_size = 0x08;
1677     u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
1678     /* configure descriptor is not supported */
1679     u->attributes.config_descr_lock = 0x01;
1680     u->attributes.max_num_of_rtt = 0x02;
1681 
1682     memset(&u->flags, 0, sizeof(u->flags));
1683     u->flags.permanently_disable_fw_update = 1;
1684 }
1685 
1686 static void ufs_realize(PCIDevice *pci_dev, Error **errp)
1687 {
1688     UfsHc *u = UFS(pci_dev);
1689 
1690     if (!ufs_check_constraints(u, errp)) {
1691         return;
1692     }
1693 
1694     qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
1695               u->parent_obj.qdev.id);
1696 
1697     ufs_init_state(u);
1698     ufs_init_hc(u);
1699     ufs_init_pci(u, pci_dev);
1700 
1701     ufs_init_wlu(&u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN);
1702     ufs_init_wlu(&u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN);
1703     ufs_init_wlu(&u->boot_wlu, UFS_UPIU_BOOT_WLUN);
1704     ufs_init_wlu(&u->rpmb_wlu, UFS_UPIU_RPMB_WLUN);
1705 }
1706 
1707 static void ufs_exit(PCIDevice *pci_dev)
1708 {
1709     UfsHc *u = UFS(pci_dev);
1710 
1711     qemu_bh_delete(u->doorbell_bh);
1712     qemu_bh_delete(u->complete_bh);
1713 
1714     for (int i = 0; i < u->params.nutrs; i++) {
1715         ufs_clear_req(&u->req_list[i]);
1716     }
1717     g_free(u->req_list);
1718 
1719     for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
1720         if (u->sq[i]) {
1721             ufs_mcq_delete_sq(u, i);
1722         }
1723     }
1724     for (int i = 0; i < ARRAY_SIZE(u->cq); i++) {
1725         if (u->cq[i]) {
1726             ufs_mcq_delete_cq(u, i);
1727         }
1728     }
1729 }
1730 
1731 static Property ufs_props[] = {
1732     DEFINE_PROP_STRING("serial", UfsHc, params.serial),
1733     DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
1734     DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
1735     DEFINE_PROP_BOOL("mcq", UfsHc, params.mcq, false),
1736     DEFINE_PROP_UINT8("mcq-maxq", UfsHc, params.mcq_maxq, 2),
1737     DEFINE_PROP_END_OF_LIST(),
1738 };
1739 
1740 static const VMStateDescription ufs_vmstate = {
1741     .name = "ufs",
1742     .unmigratable = 1,
1743 };
1744 
1745 static void ufs_class_init(ObjectClass *oc, void *data)
1746 {
1747     DeviceClass *dc = DEVICE_CLASS(oc);
1748     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1749 
1750     pc->realize = ufs_realize;
1751     pc->exit = ufs_exit;
1752     pc->vendor_id = PCI_VENDOR_ID_REDHAT;
1753     pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
1754     pc->class_id = PCI_CLASS_STORAGE_UFS;
1755 
1756     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1757     dc->desc = "Universal Flash Storage";
1758     device_class_set_props(dc, ufs_props);
1759     dc->vmsd = &ufs_vmstate;
1760 }
1761 
1762 static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
1763                                   Error **errp)
1764 {
1765     if (strcmp(object_get_typename(OBJECT(qdev)), TYPE_UFS_LU) != 0) {
1766         error_setg(errp, "%s cannot be connected to ufs-bus",
1767                    object_get_typename(OBJECT(qdev)));
1768         return false;
1769     }
1770 
1771     return true;
1772 }
1773 
1774 static char *ufs_bus_get_dev_path(DeviceState *dev)
1775 {
1776     BusState *bus = qdev_get_parent_bus(dev);
1777 
1778     return qdev_get_dev_path(bus->parent);
1779 }
1780 
1781 static void ufs_bus_class_init(ObjectClass *class, void *data)
1782 {
1783     BusClass *bc = BUS_CLASS(class);
1784     bc->get_dev_path = ufs_bus_get_dev_path;
1785     bc->check_address = ufs_bus_check_address;
1786 }
1787 
1788 static const TypeInfo ufs_info = {
1789     .name = TYPE_UFS,
1790     .parent = TYPE_PCI_DEVICE,
1791     .class_init = ufs_class_init,
1792     .instance_size = sizeof(UfsHc),
1793     .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
1794 };
1795 
1796 static const TypeInfo ufs_bus_info = {
1797     .name = TYPE_UFS_BUS,
1798     .parent = TYPE_BUS,
1799     .class_init = ufs_bus_class_init,
1800     .class_size = sizeof(UfsBusClass),
1801     .instance_size = sizeof(UfsBus),
1802 };
1803 
1804 static void ufs_register_types(void)
1805 {
1806     type_register_static(&ufs_info);
1807     type_register_static(&ufs_bus_info);
1808 }
1809 
1810 type_init(ufs_register_types)
1811