xref: /openbmc/qemu/hw/ufs/ufs.c (revision 0d66549c)
1 /*
2  * QEMU Universal Flash Storage (UFS) Controller
3  *
4  * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
5  *
6  * Written by Jeuk Kim <jeuk20.kim@samsung.com>
7  *
8  * SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 /**
12  * Reference Specs: https://www.jedec.org/, 3.1
13  *
14  * Usage
15  * -----
16  *
17  * Add options:
18  *      -drive file=<file>,if=none,id=<drive_id>
19  *      -device ufs,serial=<serial>,id=<bus_name>, \
20  *              nutrs=<N[optional]>,nutmrs=<N[optional]>
21  *      -device ufs-lu,drive=<drive_id>,bus=<bus_name>
22  */
23 
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "migration/vmstate.h"
27 #include "scsi/constants.h"
28 #include "trace.h"
29 #include "ufs.h"
30 
31 /* The QEMU-UFS device follows spec version 3.1 */
32 #define UFS_SPEC_VER 0x0310
33 #define UFS_MAX_NUTRS 32
34 #define UFS_MAX_NUTMRS 8
35 
36 static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
37 {
38     hwaddr hi = addr + size - 1;
39 
40     if (hi < addr) {
41         return MEMTX_DECODE_ERROR;
42     }
43 
44     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
45         return MEMTX_DECODE_ERROR;
46     }
47 
48     return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
49 }
50 
51 static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
52                                   int size)
53 {
54     hwaddr hi = addr + size - 1;
55     if (hi < addr) {
56         return MEMTX_DECODE_ERROR;
57     }
58 
59     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
60         return MEMTX_DECODE_ERROR;
61     }
62 
63     return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
64 }
65 
66 static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
67 {
68     hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
69     hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
70 
71     return utrd_addr;
72 }
73 
74 static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
75 {
76     uint32_t cmd_desc_base_addr_lo =
77         le32_to_cpu(utrd->command_desc_base_addr_lo);
78     uint32_t cmd_desc_base_addr_hi =
79         le32_to_cpu(utrd->command_desc_base_addr_hi);
80 
81     return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
82 }
83 
84 static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
85 {
86     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
87     uint32_t rsp_upiu_byte_off =
88         le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
89     return req_upiu_base_addr + rsp_upiu_byte_off;
90 }
91 
92 static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
93 {
94     UfsHc *u = req->hc;
95     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
96     MemTxResult ret;
97 
98     ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
99     if (ret) {
100         trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
101     }
102     return ret;
103 }
104 
105 static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
106 {
107     UfsHc *u = req->hc;
108     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
109     UtpUpiuReq *req_upiu = &req->req_upiu;
110     uint32_t copy_size;
111     uint16_t data_segment_length;
112     MemTxResult ret;
113 
114     /*
115      * To know the size of the req_upiu, we need to read the
116      * data_segment_length in the header first.
117      */
118     ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
119                         sizeof(UtpUpiuHeader));
120     if (ret) {
121         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
122         return ret;
123     }
124     data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
125 
126     copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
127                 data_segment_length;
128 
129     ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
130     if (ret) {
131         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
132     }
133     return ret;
134 }
135 
136 static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
137 {
138     UfsHc *u = req->hc;
139     uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
140     uint16_t prdt_byte_off =
141         le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
142     uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
143     g_autofree UfshcdSgEntry *prd_entries = NULL;
144     hwaddr req_upiu_base_addr, prdt_base_addr;
145     int err;
146 
147     assert(!req->sg);
148 
149     if (prdt_size == 0) {
150         return MEMTX_OK;
151     }
152     prd_entries = g_new(UfshcdSgEntry, prdt_size);
153 
154     req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
155     prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
156 
157     err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
158     if (err) {
159         trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
160         return err;
161     }
162 
163     req->sg = g_malloc0(sizeof(QEMUSGList));
164     pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
165     req->data_len = 0;
166 
167     for (uint16_t i = 0; i < prdt_len; ++i) {
168         hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
169         uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
170         qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
171         req->data_len += data_byte_count;
172     }
173     return MEMTX_OK;
174 }
175 
176 static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
177 {
178     MemTxResult ret;
179 
180     ret = ufs_dma_read_utrd(req);
181     if (ret) {
182         return ret;
183     }
184 
185     ret = ufs_dma_read_req_upiu(req);
186     if (ret) {
187         return ret;
188     }
189 
190     ret = ufs_dma_read_prdt(req);
191     if (ret) {
192         return ret;
193     }
194 
195     return 0;
196 }
197 
198 static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
199 {
200     UfsHc *u = req->hc;
201     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
202     MemTxResult ret;
203 
204     ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
205     if (ret) {
206         trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
207     }
208     return ret;
209 }
210 
211 static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
212 {
213     UfsHc *u = req->hc;
214     hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
215     uint32_t rsp_upiu_byte_len =
216         le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
217     uint16_t data_segment_length =
218         be16_to_cpu(req->rsp_upiu.header.data_segment_length);
219     uint32_t copy_size = sizeof(UtpUpiuHeader) +
220                          UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
221                          data_segment_length;
222     MemTxResult ret;
223 
224     if (copy_size > rsp_upiu_byte_len) {
225         copy_size = rsp_upiu_byte_len;
226     }
227 
228     ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
229     if (ret) {
230         trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
231     }
232     return ret;
233 }
234 
235 static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
236 {
237     MemTxResult ret;
238 
239     ret = ufs_dma_write_rsp_upiu(req);
240     if (ret) {
241         return ret;
242     }
243 
244     return ufs_dma_write_utrd(req);
245 }
246 
247 static void ufs_irq_check(UfsHc *u)
248 {
249     PCIDevice *pci = PCI_DEVICE(u);
250 
251     if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
252         trace_ufs_irq_raise();
253         pci_irq_assert(pci);
254     } else {
255         trace_ufs_irq_lower();
256         pci_irq_deassert(pci);
257     }
258 }
259 
260 static void ufs_process_db(UfsHc *u, uint32_t val)
261 {
262     DECLARE_BITMAP(doorbell, UFS_MAX_NUTRS);
263     uint32_t slot;
264     uint32_t nutrs = u->params.nutrs;
265     UfsRequest *req;
266 
267     val &= ~u->reg.utrldbr;
268     if (!val) {
269         return;
270     }
271 
272     doorbell[0] = val;
273     slot = find_first_bit(doorbell, nutrs);
274 
275     while (slot < nutrs) {
276         req = &u->req_list[slot];
277         if (req->state == UFS_REQUEST_ERROR) {
278             trace_ufs_err_utrl_slot_error(req->slot);
279             return;
280         }
281 
282         if (req->state != UFS_REQUEST_IDLE) {
283             trace_ufs_err_utrl_slot_busy(req->slot);
284             return;
285         }
286 
287         trace_ufs_process_db(slot);
288         req->state = UFS_REQUEST_READY;
289         slot = find_next_bit(doorbell, nutrs, slot + 1);
290     }
291 
292     qemu_bh_schedule(u->doorbell_bh);
293 }
294 
295 static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
296 {
297     trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
298                              u->reg.ucmdarg3);
299     /*
300      * Only the essential uic commands for running drivers on Linux and Windows
301      * are implemented.
302      */
303     switch (val) {
304     case UFS_UIC_CMD_DME_LINK_STARTUP:
305         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
306         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
307         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
308         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
309         break;
310     /* TODO: Revisit it when Power Management is implemented */
311     case UFS_UIC_CMD_DME_HIBER_ENTER:
312         u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
313         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
314         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
315         break;
316     case UFS_UIC_CMD_DME_HIBER_EXIT:
317         u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
318         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
319         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
320         break;
321     default:
322         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
323     }
324 
325     u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
326 
327     ufs_irq_check(u);
328 }
329 
330 static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
331 {
332     switch (offset) {
333     case A_IS:
334         u->reg.is &= ~data;
335         ufs_irq_check(u);
336         break;
337     case A_IE:
338         u->reg.ie = data;
339         ufs_irq_check(u);
340         break;
341     case A_HCE:
342         if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
343             u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
344             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
345         } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
346                    !FIELD_EX32(data, HCE, HCE)) {
347             u->reg.hcs = 0;
348             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
349         }
350         break;
351     case A_UTRLBA:
352         u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
353         break;
354     case A_UTRLBAU:
355         u->reg.utrlbau = data;
356         break;
357     case A_UTRLDBR:
358         ufs_process_db(u, data);
359         u->reg.utrldbr |= data;
360         break;
361     case A_UTRLRSR:
362         u->reg.utrlrsr = data;
363         break;
364     case A_UTRLCNR:
365         u->reg.utrlcnr &= ~data;
366         break;
367     case A_UTMRLBA:
368         u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
369         break;
370     case A_UTMRLBAU:
371         u->reg.utmrlbau = data;
372         break;
373     case A_UICCMD:
374         ufs_process_uiccmd(u, data);
375         break;
376     case A_UCMDARG1:
377         u->reg.ucmdarg1 = data;
378         break;
379     case A_UCMDARG2:
380         u->reg.ucmdarg2 = data;
381         break;
382     case A_UCMDARG3:
383         u->reg.ucmdarg3 = data;
384         break;
385     case A_UTRLCLR:
386     case A_UTMRLDBR:
387     case A_UTMRLCLR:
388     case A_UTMRLRSR:
389         trace_ufs_err_unsupport_register_offset(offset);
390         break;
391     default:
392         trace_ufs_err_invalid_register_offset(offset);
393         break;
394     }
395 }
396 
397 static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
398 {
399     UfsHc *u = (UfsHc *)opaque;
400     uint8_t *ptr = (uint8_t *)&u->reg;
401     uint64_t value;
402 
403     if (addr > sizeof(u->reg) - size) {
404         trace_ufs_err_invalid_register_offset(addr);
405         return 0;
406     }
407 
408     value = *(uint32_t *)(ptr + addr);
409     trace_ufs_mmio_read(addr, value, size);
410     return value;
411 }
412 
413 static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
414                            unsigned size)
415 {
416     UfsHc *u = (UfsHc *)opaque;
417 
418     if (addr > sizeof(u->reg) - size) {
419         trace_ufs_err_invalid_register_offset(addr);
420         return;
421     }
422 
423     trace_ufs_mmio_write(addr, data, size);
424     ufs_write_reg(u, addr, data, size);
425 }
426 
427 static const MemoryRegionOps ufs_mmio_ops = {
428     .read = ufs_mmio_read,
429     .write = ufs_mmio_write,
430     .endianness = DEVICE_LITTLE_ENDIAN,
431     .impl = {
432         .min_access_size = 4,
433         .max_access_size = 4,
434     },
435 };
436 
437 
438 void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
439                            uint8_t response, uint8_t scsi_status,
440                            uint16_t data_segment_length)
441 {
442     memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
443     req->rsp_upiu.header.trans_type = trans_type;
444     req->rsp_upiu.header.flags = flags;
445     req->rsp_upiu.header.response = response;
446     req->rsp_upiu.header.scsi_status = scsi_status;
447     req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
448 }
449 
450 static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
451 {
452     UfsHc *u = req->hc;
453     uint8_t lun = req->req_upiu.header.lun;
454 
455     UfsLu *lu = NULL;
456 
457     trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
458 
459     if (!is_wlun(lun) && (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
460         trace_ufs_err_scsi_cmd_invalid_lun(lun);
461         return UFS_REQUEST_FAIL;
462     }
463 
464     switch (lun) {
465     case UFS_UPIU_REPORT_LUNS_WLUN:
466         lu = &u->report_wlu;
467         break;
468     case UFS_UPIU_UFS_DEVICE_WLUN:
469         lu = &u->dev_wlu;
470         break;
471     case UFS_UPIU_BOOT_WLUN:
472         lu = &u->boot_wlu;
473         break;
474     case UFS_UPIU_RPMB_WLUN:
475         lu = &u->rpmb_wlu;
476         break;
477     default:
478         lu = u->lus[lun];
479     }
480 
481     return lu->scsi_op(lu, req);
482 }
483 
484 static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
485 {
486     trace_ufs_exec_nop_cmd(req->slot);
487     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
488     return UFS_REQUEST_SUCCESS;
489 }
490 
491 /*
492  * This defines the permission of flags based on their IDN. There are some
493  * things that are declared read-only, which is inconsistent with the ufs spec,
494  * because we want to return an error for features that are not yet supported.
495  */
496 static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
497     [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
498     /* Write protection is not supported */
499     [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
500     [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
501     [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
502                                     UFS_QUERY_FLAG_CLEAR |
503                                     UFS_QUERY_FLAG_TOGGLE,
504     [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
505         UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
506         UFS_QUERY_FLAG_TOGGLE,
507     /* Purge Operation is not supported */
508     [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
509     /* Refresh Operation is not supported */
510     [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
511     /* Physical Resource Removal is not supported */
512     [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
513     [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
514     [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
515     /* Write Booster is not supported */
516     [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
517     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
518     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
519 };
520 
521 static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
522 {
523     if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
524         return UFS_QUERY_RESULT_INVALID_IDN;
525     }
526 
527     if (!(flag_permission[idn] & op)) {
528         if (op == UFS_QUERY_FLAG_READ) {
529             trace_ufs_err_query_flag_not_readable(idn);
530             return UFS_QUERY_RESULT_NOT_READABLE;
531         }
532         trace_ufs_err_query_flag_not_writable(idn);
533         return UFS_QUERY_RESULT_NOT_WRITEABLE;
534     }
535 
536     return UFS_QUERY_RESULT_SUCCESS;
537 }
538 
539 static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
540     /* booting is not supported */
541     [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
542     [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
543     [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
544         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
545     [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
546     [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
547     [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
548     [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
549         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
550     [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
551         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
552     [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
553     [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
554         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
555     [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
556     [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
557         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
558     [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
559         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
560     [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
561     [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
562     [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
563     [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
564     [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
565     [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
566         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
567     [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
568     [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
569     [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
570     [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
571     [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
572     [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
573     [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
574     [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
575     [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
576     /* refresh operation is not supported */
577     [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
578     [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
579     [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
580 };
581 
582 static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
583 {
584     if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
585         return UFS_QUERY_RESULT_INVALID_IDN;
586     }
587 
588     if (!(attr_permission[idn] & op)) {
589         if (op == UFS_QUERY_ATTR_READ) {
590             trace_ufs_err_query_attr_not_readable(idn);
591             return UFS_QUERY_RESULT_NOT_READABLE;
592         }
593         trace_ufs_err_query_attr_not_writable(idn);
594         return UFS_QUERY_RESULT_NOT_WRITEABLE;
595     }
596 
597     return UFS_QUERY_RESULT_SUCCESS;
598 }
599 
600 static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
601 {
602     UfsHc *u = req->hc;
603     uint8_t idn = req->req_upiu.qr.idn;
604     uint32_t value;
605     QueryRespCode ret;
606 
607     ret = ufs_flag_check_idn_valid(idn, op);
608     if (ret) {
609         return ret;
610     }
611 
612     if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
613         value = 0;
614     } else if (op == UFS_QUERY_FLAG_READ) {
615         value = *(((uint8_t *)&u->flags) + idn);
616     } else if (op == UFS_QUERY_FLAG_SET) {
617         value = 1;
618     } else if (op == UFS_QUERY_FLAG_CLEAR) {
619         value = 0;
620     } else if (op == UFS_QUERY_FLAG_TOGGLE) {
621         value = *(((uint8_t *)&u->flags) + idn);
622         value = !value;
623     } else {
624         trace_ufs_err_query_invalid_opcode(op);
625         return UFS_QUERY_RESULT_INVALID_OPCODE;
626     }
627 
628     *(((uint8_t *)&u->flags) + idn) = value;
629     req->rsp_upiu.qr.value = cpu_to_be32(value);
630     return UFS_QUERY_RESULT_SUCCESS;
631 }
632 
633 static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
634 {
635     switch (idn) {
636     case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
637         return u->attributes.boot_lun_en;
638     case UFS_QUERY_ATTR_IDN_POWER_MODE:
639         return u->attributes.current_power_mode;
640     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
641         return u->attributes.active_icc_level;
642     case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
643         return u->attributes.out_of_order_data_en;
644     case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
645         return u->attributes.background_op_status;
646     case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
647         return u->attributes.purge_status;
648     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
649         return u->attributes.max_data_in_size;
650     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
651         return u->attributes.max_data_out_size;
652     case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
653         return be32_to_cpu(u->attributes.dyn_cap_needed);
654     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
655         return u->attributes.ref_clk_freq;
656     case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
657         return u->attributes.config_descr_lock;
658     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
659         return u->attributes.max_num_of_rtt;
660     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
661         return be16_to_cpu(u->attributes.exception_event_control);
662     case UFS_QUERY_ATTR_IDN_EE_STATUS:
663         return be16_to_cpu(u->attributes.exception_event_status);
664     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
665         return be32_to_cpu(u->attributes.seconds_passed);
666     case UFS_QUERY_ATTR_IDN_CNTX_CONF:
667         return be16_to_cpu(u->attributes.context_conf);
668     case UFS_QUERY_ATTR_IDN_FFU_STATUS:
669         return u->attributes.device_ffu_status;
670     case UFS_QUERY_ATTR_IDN_PSA_STATE:
671         return be32_to_cpu(u->attributes.psa_state);
672     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
673         return be32_to_cpu(u->attributes.psa_data_size);
674     case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
675         return u->attributes.ref_clk_gating_wait_time;
676     case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
677         return u->attributes.device_case_rough_temperaure;
678     case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
679         return u->attributes.device_too_high_temp_boundary;
680     case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
681         return u->attributes.device_too_low_temp_boundary;
682     case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
683         return u->attributes.throttling_status;
684     case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
685         return u->attributes.wb_buffer_flush_status;
686     case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
687         return u->attributes.available_wb_buffer_size;
688     case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
689         return u->attributes.wb_buffer_life_time_est;
690     case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
691         return be32_to_cpu(u->attributes.current_wb_buffer_size);
692     case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
693         return u->attributes.refresh_status;
694     case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
695         return u->attributes.refresh_freq;
696     case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
697         return u->attributes.refresh_unit;
698     }
699     return 0;
700 }
701 
702 static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
703 {
704     switch (idn) {
705     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
706         u->attributes.active_icc_level = value;
707         break;
708     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
709         u->attributes.max_data_in_size = value;
710         break;
711     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
712         u->attributes.max_data_out_size = value;
713         break;
714     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
715         u->attributes.ref_clk_freq = value;
716         break;
717     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
718         u->attributes.max_num_of_rtt = value;
719         break;
720     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
721         u->attributes.exception_event_control = cpu_to_be16(value);
722         break;
723     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
724         u->attributes.seconds_passed = cpu_to_be32(value);
725         break;
726     case UFS_QUERY_ATTR_IDN_PSA_STATE:
727         u->attributes.psa_state = value;
728         break;
729     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
730         u->attributes.psa_data_size = cpu_to_be32(value);
731         break;
732     }
733 }
734 
735 static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
736 {
737     UfsHc *u = req->hc;
738     uint8_t idn = req->req_upiu.qr.idn;
739     uint32_t value;
740     QueryRespCode ret;
741 
742     ret = ufs_attr_check_idn_valid(idn, op);
743     if (ret) {
744         return ret;
745     }
746 
747     if (op == UFS_QUERY_ATTR_READ) {
748         value = ufs_read_attr_value(u, idn);
749     } else {
750         value = be32_to_cpu(req->req_upiu.qr.value);
751         ufs_write_attr_value(u, idn, value);
752     }
753 
754     req->rsp_upiu.qr.value = cpu_to_be32(value);
755     return UFS_QUERY_RESULT_SUCCESS;
756 }
757 
758 static const RpmbUnitDescriptor rpmb_unit_desc = {
759     .length = sizeof(RpmbUnitDescriptor),
760     .descriptor_idn = 2,
761     .unit_index = UFS_UPIU_RPMB_WLUN,
762     .lu_enable = 0,
763 };
764 
765 static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
766 {
767     UfsHc *u = req->hc;
768     uint8_t lun = req->req_upiu.qr.index;
769 
770     if (lun != UFS_UPIU_RPMB_WLUN &&
771         (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
772         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
773         return UFS_QUERY_RESULT_INVALID_INDEX;
774     }
775 
776     if (lun == UFS_UPIU_RPMB_WLUN) {
777         memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
778     } else {
779         memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
780                sizeof(u->lus[lun]->unit_desc));
781     }
782 
783     return UFS_QUERY_RESULT_SUCCESS;
784 }
785 
786 static inline StringDescriptor manufacturer_str_desc(void)
787 {
788     StringDescriptor desc = {
789         .length = 0x12,
790         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
791     };
792     desc.UC[0] = cpu_to_be16('R');
793     desc.UC[1] = cpu_to_be16('E');
794     desc.UC[2] = cpu_to_be16('D');
795     desc.UC[3] = cpu_to_be16('H');
796     desc.UC[4] = cpu_to_be16('A');
797     desc.UC[5] = cpu_to_be16('T');
798     return desc;
799 }
800 
801 static inline StringDescriptor product_name_str_desc(void)
802 {
803     StringDescriptor desc = {
804         .length = 0x22,
805         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
806     };
807     desc.UC[0] = cpu_to_be16('Q');
808     desc.UC[1] = cpu_to_be16('E');
809     desc.UC[2] = cpu_to_be16('M');
810     desc.UC[3] = cpu_to_be16('U');
811     desc.UC[4] = cpu_to_be16(' ');
812     desc.UC[5] = cpu_to_be16('U');
813     desc.UC[6] = cpu_to_be16('F');
814     desc.UC[7] = cpu_to_be16('S');
815     return desc;
816 }
817 
818 static inline StringDescriptor product_rev_level_str_desc(void)
819 {
820     StringDescriptor desc = {
821         .length = 0x0a,
822         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
823     };
824     desc.UC[0] = cpu_to_be16('0');
825     desc.UC[1] = cpu_to_be16('0');
826     desc.UC[2] = cpu_to_be16('0');
827     desc.UC[3] = cpu_to_be16('1');
828     return desc;
829 }
830 
831 static const StringDescriptor null_str_desc = {
832     .length = 0x02,
833     .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
834 };
835 
836 static QueryRespCode ufs_read_string_desc(UfsRequest *req)
837 {
838     UfsHc *u = req->hc;
839     uint8_t index = req->req_upiu.qr.index;
840     StringDescriptor desc;
841 
842     if (index == u->device_desc.manufacturer_name) {
843         desc = manufacturer_str_desc();
844         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
845     } else if (index == u->device_desc.product_name) {
846         desc = product_name_str_desc();
847         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
848     } else if (index == u->device_desc.serial_number) {
849         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
850     } else if (index == u->device_desc.oem_id) {
851         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
852     } else if (index == u->device_desc.product_revision_level) {
853         desc = product_rev_level_str_desc();
854         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
855     } else {
856         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
857         return UFS_QUERY_RESULT_INVALID_INDEX;
858     }
859     return UFS_QUERY_RESULT_SUCCESS;
860 }
861 
862 static inline InterconnectDescriptor interconnect_desc(void)
863 {
864     InterconnectDescriptor desc = {
865         .length = sizeof(InterconnectDescriptor),
866         .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
867     };
868     desc.bcd_unipro_version = cpu_to_be16(0x180);
869     desc.bcd_mphy_version = cpu_to_be16(0x410);
870     return desc;
871 }
872 
873 static QueryRespCode ufs_read_desc(UfsRequest *req)
874 {
875     UfsHc *u = req->hc;
876     QueryRespCode status;
877     uint8_t idn = req->req_upiu.qr.idn;
878     uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
879     InterconnectDescriptor desc;
880 
881     switch (idn) {
882     case UFS_QUERY_DESC_IDN_DEVICE:
883         memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
884         status = UFS_QUERY_RESULT_SUCCESS;
885         break;
886     case UFS_QUERY_DESC_IDN_UNIT:
887         status = ufs_read_unit_desc(req);
888         break;
889     case UFS_QUERY_DESC_IDN_GEOMETRY:
890         memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
891                sizeof(u->geometry_desc));
892         status = UFS_QUERY_RESULT_SUCCESS;
893         break;
894     case UFS_QUERY_DESC_IDN_INTERCONNECT: {
895         desc = interconnect_desc();
896         memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
897         status = UFS_QUERY_RESULT_SUCCESS;
898         break;
899     }
900     case UFS_QUERY_DESC_IDN_STRING:
901         status = ufs_read_string_desc(req);
902         break;
903     case UFS_QUERY_DESC_IDN_POWER:
904         /* mocking of power descriptor is not supported */
905         memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
906         req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
907         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
908         status = UFS_QUERY_RESULT_SUCCESS;
909         break;
910     case UFS_QUERY_DESC_IDN_HEALTH:
911         /* mocking of health descriptor is not supported */
912         memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
913         req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
914         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
915         status = UFS_QUERY_RESULT_SUCCESS;
916         break;
917     default:
918         length = 0;
919         trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
920         status = UFS_QUERY_RESULT_INVALID_IDN;
921     }
922 
923     if (length > req->rsp_upiu.qr.data[0]) {
924         length = req->rsp_upiu.qr.data[0];
925     }
926     req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
927     req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
928     req->rsp_upiu.qr.index = req->req_upiu.qr.index;
929     req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
930     req->rsp_upiu.qr.length = cpu_to_be16(length);
931 
932     return status;
933 }
934 
935 static QueryRespCode ufs_exec_query_read(UfsRequest *req)
936 {
937     QueryRespCode status;
938     switch (req->req_upiu.qr.opcode) {
939     case UFS_UPIU_QUERY_OPCODE_NOP:
940         status = UFS_QUERY_RESULT_SUCCESS;
941         break;
942     case UFS_UPIU_QUERY_OPCODE_READ_DESC:
943         status = ufs_read_desc(req);
944         break;
945     case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
946         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
947         break;
948     case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
949         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
950         break;
951     default:
952         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
953         status = UFS_QUERY_RESULT_INVALID_OPCODE;
954         break;
955     }
956 
957     return status;
958 }
959 
960 static QueryRespCode ufs_exec_query_write(UfsRequest *req)
961 {
962     QueryRespCode status;
963     switch (req->req_upiu.qr.opcode) {
964     case UFS_UPIU_QUERY_OPCODE_NOP:
965         status = UFS_QUERY_RESULT_SUCCESS;
966         break;
967     case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
968         /* write descriptor is not supported */
969         status = UFS_QUERY_RESULT_NOT_WRITEABLE;
970         break;
971     case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
972         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
973         break;
974     case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
975         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
976         break;
977     case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
978         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
979         break;
980     case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
981         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
982         break;
983     default:
984         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
985         status = UFS_QUERY_RESULT_INVALID_OPCODE;
986         break;
987     }
988 
989     return status;
990 }
991 
992 static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
993 {
994     uint8_t query_func = req->req_upiu.header.query_func;
995     uint16_t data_segment_length;
996     QueryRespCode status;
997 
998     trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
999     if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
1000         status = ufs_exec_query_read(req);
1001     } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
1002         status = ufs_exec_query_write(req);
1003     } else {
1004         status = UFS_QUERY_RESULT_GENERAL_FAILURE;
1005     }
1006 
1007     data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
1008     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
1009                           data_segment_length);
1010 
1011     if (status != UFS_QUERY_RESULT_SUCCESS) {
1012         return UFS_REQUEST_FAIL;
1013     }
1014     return UFS_REQUEST_SUCCESS;
1015 }
1016 
1017 static void ufs_exec_req(UfsRequest *req)
1018 {
1019     UfsReqResult req_result;
1020 
1021     if (ufs_dma_read_upiu(req)) {
1022         return;
1023     }
1024 
1025     switch (req->req_upiu.header.trans_type) {
1026     case UFS_UPIU_TRANSACTION_NOP_OUT:
1027         req_result = ufs_exec_nop_cmd(req);
1028         break;
1029     case UFS_UPIU_TRANSACTION_COMMAND:
1030         req_result = ufs_exec_scsi_cmd(req);
1031         break;
1032     case UFS_UPIU_TRANSACTION_QUERY_REQ:
1033         req_result = ufs_exec_query_cmd(req);
1034         break;
1035     default:
1036         trace_ufs_err_invalid_trans_code(req->slot,
1037                                          req->req_upiu.header.trans_type);
1038         req_result = UFS_REQUEST_FAIL;
1039     }
1040 
1041     /*
1042      * The ufs_complete_req for scsi commands is handled by the
1043      * ufs_scsi_command_complete() callback function. Therefore, to avoid
1044      * duplicate processing, ufs_complete_req() is not called for scsi commands.
1045      */
1046     if (req_result != UFS_REQUEST_NO_COMPLETE) {
1047         ufs_complete_req(req, req_result);
1048     }
1049 }
1050 
1051 static void ufs_process_req(void *opaque)
1052 {
1053     UfsHc *u = opaque;
1054     UfsRequest *req;
1055     int slot;
1056 
1057     for (slot = 0; slot < u->params.nutrs; slot++) {
1058         req = &u->req_list[slot];
1059 
1060         if (req->state != UFS_REQUEST_READY) {
1061             continue;
1062         }
1063         trace_ufs_process_req(slot);
1064         req->state = UFS_REQUEST_RUNNING;
1065 
1066         ufs_exec_req(req);
1067     }
1068 }
1069 
1070 void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
1071 {
1072     UfsHc *u = req->hc;
1073     assert(req->state == UFS_REQUEST_RUNNING);
1074 
1075     if (req_result == UFS_REQUEST_SUCCESS) {
1076         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
1077     } else {
1078         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
1079     }
1080 
1081     trace_ufs_complete_req(req->slot);
1082     req->state = UFS_REQUEST_COMPLETE;
1083     qemu_bh_schedule(u->complete_bh);
1084 }
1085 
1086 static void ufs_clear_req(UfsRequest *req)
1087 {
1088     if (req->sg != NULL) {
1089         qemu_sglist_destroy(req->sg);
1090         g_free(req->sg);
1091         req->sg = NULL;
1092         req->data_len = 0;
1093     }
1094 
1095     memset(&req->utrd, 0, sizeof(req->utrd));
1096     memset(&req->req_upiu, 0, sizeof(req->req_upiu));
1097     memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
1098 }
1099 
1100 static void ufs_sendback_req(void *opaque)
1101 {
1102     UfsHc *u = opaque;
1103     UfsRequest *req;
1104     int slot;
1105 
1106     for (slot = 0; slot < u->params.nutrs; slot++) {
1107         req = &u->req_list[slot];
1108 
1109         if (req->state != UFS_REQUEST_COMPLETE) {
1110             continue;
1111         }
1112 
1113         if (ufs_dma_write_upiu(req)) {
1114             req->state = UFS_REQUEST_ERROR;
1115             continue;
1116         }
1117 
1118         /*
1119          * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
1120          * supported
1121          */
1122         if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
1123             le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
1124             u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
1125         }
1126 
1127         u->reg.utrldbr &= ~(1 << slot);
1128         u->reg.utrlcnr |= (1 << slot);
1129 
1130         trace_ufs_sendback_req(req->slot);
1131 
1132         ufs_clear_req(req);
1133         req->state = UFS_REQUEST_IDLE;
1134     }
1135 
1136     ufs_irq_check(u);
1137 }
1138 
1139 static bool ufs_check_constraints(UfsHc *u, Error **errp)
1140 {
1141     if (u->params.nutrs > UFS_MAX_NUTRS) {
1142         error_setg(errp, "nutrs must be less than or equal to %d",
1143                    UFS_MAX_NUTRS);
1144         return false;
1145     }
1146 
1147     if (u->params.nutmrs > UFS_MAX_NUTMRS) {
1148         error_setg(errp, "nutmrs must be less than or equal to %d",
1149                    UFS_MAX_NUTMRS);
1150         return false;
1151     }
1152 
1153     return true;
1154 }
1155 
1156 static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
1157 {
1158     uint8_t *pci_conf = pci_dev->config;
1159 
1160     pci_conf[PCI_INTERRUPT_PIN] = 1;
1161     pci_config_set_prog_interface(pci_conf, 0x1);
1162 
1163     memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
1164                           u->reg_size);
1165     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
1166     u->irq = pci_allocate_irq(pci_dev);
1167 }
1168 
1169 static void ufs_init_state(UfsHc *u)
1170 {
1171     u->req_list = g_new0(UfsRequest, u->params.nutrs);
1172 
1173     for (int i = 0; i < u->params.nutrs; i++) {
1174         u->req_list[i].hc = u;
1175         u->req_list[i].slot = i;
1176         u->req_list[i].sg = NULL;
1177         u->req_list[i].state = UFS_REQUEST_IDLE;
1178     }
1179 
1180     u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
1181                                          &DEVICE(u)->mem_reentrancy_guard);
1182     u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
1183                                          &DEVICE(u)->mem_reentrancy_guard);
1184 }
1185 
1186 static void ufs_init_hc(UfsHc *u)
1187 {
1188     uint32_t cap = 0;
1189 
1190     u->reg_size = pow2ceil(sizeof(UfsReg));
1191 
1192     memset(&u->reg, 0, sizeof(u->reg));
1193     cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
1194     cap = FIELD_DP32(cap, CAP, RTT, 2);
1195     cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
1196     cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
1197     cap = FIELD_DP32(cap, CAP, 64AS, 1);
1198     cap = FIELD_DP32(cap, CAP, OODDS, 0);
1199     cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
1200     cap = FIELD_DP32(cap, CAP, CS, 0);
1201     u->reg.cap = cap;
1202     u->reg.ver = UFS_SPEC_VER;
1203 
1204     memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
1205     u->device_desc.length = sizeof(DeviceDescriptor);
1206     u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
1207     u->device_desc.device_sub_class = 0x01;
1208     u->device_desc.number_lu = 0x00;
1209     u->device_desc.number_wlu = 0x04;
1210     /* TODO: Revisit it when Power Management is implemented */
1211     u->device_desc.init_power_mode = 0x01; /* Active Mode */
1212     u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
1213     u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
1214     u->device_desc.manufacturer_name = 0x00;
1215     u->device_desc.product_name = 0x01;
1216     u->device_desc.serial_number = 0x02;
1217     u->device_desc.oem_id = 0x03;
1218     u->device_desc.ud_0_base_offset = 0x16;
1219     u->device_desc.ud_config_p_length = 0x1A;
1220     u->device_desc.device_rtt_cap = 0x02;
1221     u->device_desc.queue_depth = u->params.nutrs;
1222     u->device_desc.product_revision_level = 0x04;
1223 
1224     memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
1225     u->geometry_desc.length = sizeof(GeometryDescriptor);
1226     u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
1227     u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
1228     u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
1229     u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
1230     u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
1231     u->geometry_desc.max_in_buffer_size = 0x8;
1232     u->geometry_desc.max_out_buffer_size = 0x8;
1233     u->geometry_desc.rpmb_read_write_size = 0x40;
1234     u->geometry_desc.data_ordering =
1235         0x0; /* out-of-order data transfer is not supported */
1236     u->geometry_desc.max_context_id_number = 0x5;
1237     u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
1238 
1239     memset(&u->attributes, 0, sizeof(u->attributes));
1240     u->attributes.max_data_in_size = 0x08;
1241     u->attributes.max_data_out_size = 0x08;
1242     u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
1243     /* configure descriptor is not supported */
1244     u->attributes.config_descr_lock = 0x01;
1245     u->attributes.max_num_of_rtt = 0x02;
1246 
1247     memset(&u->flags, 0, sizeof(u->flags));
1248     u->flags.permanently_disable_fw_update = 1;
1249 }
1250 
1251 static void ufs_realize(PCIDevice *pci_dev, Error **errp)
1252 {
1253     UfsHc *u = UFS(pci_dev);
1254 
1255     if (!ufs_check_constraints(u, errp)) {
1256         return;
1257     }
1258 
1259     qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
1260               u->parent_obj.qdev.id);
1261 
1262     ufs_init_state(u);
1263     ufs_init_hc(u);
1264     ufs_init_pci(u, pci_dev);
1265 
1266     ufs_init_wlu(&u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN);
1267     ufs_init_wlu(&u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN);
1268     ufs_init_wlu(&u->boot_wlu, UFS_UPIU_BOOT_WLUN);
1269     ufs_init_wlu(&u->rpmb_wlu, UFS_UPIU_RPMB_WLUN);
1270 }
1271 
1272 static void ufs_exit(PCIDevice *pci_dev)
1273 {
1274     UfsHc *u = UFS(pci_dev);
1275 
1276     qemu_bh_delete(u->doorbell_bh);
1277     qemu_bh_delete(u->complete_bh);
1278 
1279     for (int i = 0; i < u->params.nutrs; i++) {
1280         ufs_clear_req(&u->req_list[i]);
1281     }
1282     g_free(u->req_list);
1283 }
1284 
1285 static Property ufs_props[] = {
1286     DEFINE_PROP_STRING("serial", UfsHc, params.serial),
1287     DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
1288     DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
1289     DEFINE_PROP_END_OF_LIST(),
1290 };
1291 
1292 static const VMStateDescription ufs_vmstate = {
1293     .name = "ufs",
1294     .unmigratable = 1,
1295 };
1296 
1297 static void ufs_class_init(ObjectClass *oc, void *data)
1298 {
1299     DeviceClass *dc = DEVICE_CLASS(oc);
1300     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1301 
1302     pc->realize = ufs_realize;
1303     pc->exit = ufs_exit;
1304     pc->vendor_id = PCI_VENDOR_ID_REDHAT;
1305     pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
1306     pc->class_id = PCI_CLASS_STORAGE_UFS;
1307 
1308     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1309     dc->desc = "Universal Flash Storage";
1310     device_class_set_props(dc, ufs_props);
1311     dc->vmsd = &ufs_vmstate;
1312 }
1313 
1314 static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
1315                                   Error **errp)
1316 {
1317     if (strcmp(object_get_typename(OBJECT(qdev)), TYPE_UFS_LU) != 0) {
1318         error_setg(errp, "%s cannot be connected to ufs-bus",
1319                    object_get_typename(OBJECT(qdev)));
1320         return false;
1321     }
1322 
1323     return true;
1324 }
1325 
1326 static char *ufs_bus_get_dev_path(DeviceState *dev)
1327 {
1328     BusState *bus = qdev_get_parent_bus(dev);
1329 
1330     return qdev_get_dev_path(bus->parent);
1331 }
1332 
1333 static void ufs_bus_class_init(ObjectClass *class, void *data)
1334 {
1335     BusClass *bc = BUS_CLASS(class);
1336     bc->get_dev_path = ufs_bus_get_dev_path;
1337     bc->check_address = ufs_bus_check_address;
1338 }
1339 
1340 static const TypeInfo ufs_info = {
1341     .name = TYPE_UFS,
1342     .parent = TYPE_PCI_DEVICE,
1343     .class_init = ufs_class_init,
1344     .instance_size = sizeof(UfsHc),
1345     .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
1346 };
1347 
1348 static const TypeInfo ufs_bus_info = {
1349     .name = TYPE_UFS_BUS,
1350     .parent = TYPE_BUS,
1351     .class_init = ufs_bus_class_init,
1352     .class_size = sizeof(UfsBusClass),
1353     .instance_size = sizeof(UfsBus),
1354 };
1355 
1356 static void ufs_register_types(void)
1357 {
1358     type_register_static(&ufs_info);
1359     type_register_static(&ufs_bus_info);
1360 }
1361 
1362 type_init(ufs_register_types)
1363