xref: /openbmc/qemu/hw/ufs/ufs.c (revision 7c85332a)
1 /*
2  * QEMU Universal Flash Storage (UFS) Controller
3  *
4  * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
5  *
6  * Written by Jeuk Kim <jeuk20.kim@samsung.com>
7  *
8  * SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 /**
12  * Reference Specs: https://www.jedec.org/, 4.0
13  *
14  * Usage
15  * -----
16  *
17  * Add options:
18  *      -drive file=<file>,if=none,id=<drive_id>
19  *      -device ufs,serial=<serial>,id=<bus_name>, \
20  *              nutrs=<N[optional]>,nutmrs=<N[optional]>
21  *      -device ufs-lu,drive=<drive_id>,bus=<bus_name>
22  */
23 
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "migration/vmstate.h"
27 #include "scsi/constants.h"
28 #include "trace.h"
29 #include "ufs.h"
30 
31 /* The QEMU-UFS device follows spec version 4.0 */
32 #define UFS_SPEC_VER 0x0400
33 #define UFS_MAX_NUTRS 32
34 #define UFS_MAX_NUTMRS 8
35 #define UFS_MCQ_QCFGPTR 2
36 
37 static void ufs_exec_req(UfsRequest *req);
38 static void ufs_clear_req(UfsRequest *req);
39 
ufs_mcq_reg_addr(UfsHc * u,int qid)40 static inline uint64_t ufs_mcq_reg_addr(UfsHc *u, int qid)
41 {
42     /* Submission Queue MCQ Registers offset (400h) */
43     return (UFS_MCQ_QCFGPTR * 0x200) + qid * 0x40;
44 }
45 
ufs_mcq_op_reg_addr(UfsHc * u,int qid)46 static inline uint64_t ufs_mcq_op_reg_addr(UfsHc *u, int qid)
47 {
48     /* MCQ Operation & Runtime Registers offset (1000h) */
49     return UFS_MCQ_OPR_START + qid * 48;
50 }
51 
ufs_reg_size(UfsHc * u)52 static inline uint64_t ufs_reg_size(UfsHc *u)
53 {
54     /* Total UFS HCI Register size in bytes */
55     return ufs_mcq_op_reg_addr(u, 0) + sizeof(u->mcq_op_reg);
56 }
57 
ufs_is_mcq_reg(UfsHc * u,uint64_t addr,unsigned size)58 static inline bool ufs_is_mcq_reg(UfsHc *u, uint64_t addr, unsigned size)
59 {
60     uint64_t mcq_reg_addr;
61 
62     if (!u->params.mcq) {
63         return false;
64     }
65 
66     mcq_reg_addr = ufs_mcq_reg_addr(u, 0);
67     return (addr >= mcq_reg_addr &&
68             addr + size <= mcq_reg_addr + sizeof(u->mcq_reg));
69 }
70 
ufs_is_mcq_op_reg(UfsHc * u,uint64_t addr,unsigned size)71 static inline bool ufs_is_mcq_op_reg(UfsHc *u, uint64_t addr, unsigned size)
72 {
73     uint64_t mcq_op_reg_addr;
74 
75     if (!u->params.mcq) {
76         return false;
77     }
78 
79     mcq_op_reg_addr = ufs_mcq_op_reg_addr(u, 0);
80     return (addr >= mcq_op_reg_addr &&
81             addr + size <= mcq_op_reg_addr + sizeof(u->mcq_op_reg));
82 }
83 
ufs_addr_read(UfsHc * u,hwaddr addr,void * buf,int size)84 static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
85 {
86     hwaddr hi = addr + size - 1;
87 
88     if (hi < addr) {
89         return MEMTX_DECODE_ERROR;
90     }
91 
92     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
93         return MEMTX_DECODE_ERROR;
94     }
95 
96     return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
97 }
98 
ufs_addr_write(UfsHc * u,hwaddr addr,const void * buf,int size)99 static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
100                                   int size)
101 {
102     hwaddr hi = addr + size - 1;
103     if (hi < addr) {
104         return MEMTX_DECODE_ERROR;
105     }
106 
107     if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
108         return MEMTX_DECODE_ERROR;
109     }
110 
111     return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
112 }
113 
ufs_get_utrd_addr(UfsHc * u,uint32_t slot)114 static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
115 {
116     hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
117     hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
118 
119     return utrd_addr;
120 }
121 
ufs_get_req_upiu_base_addr(const UtpTransferReqDesc * utrd)122 static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
123 {
124     uint32_t cmd_desc_base_addr_lo =
125         le32_to_cpu(utrd->command_desc_base_addr_lo);
126     uint32_t cmd_desc_base_addr_hi =
127         le32_to_cpu(utrd->command_desc_base_addr_hi);
128 
129     return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
130 }
131 
ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc * utrd)132 static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
133 {
134     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
135     uint32_t rsp_upiu_byte_off =
136         le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
137     return req_upiu_base_addr + rsp_upiu_byte_off;
138 }
139 
ufs_dma_read_utrd(UfsRequest * req)140 static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
141 {
142     UfsHc *u = req->hc;
143     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
144     MemTxResult ret;
145 
146     ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
147     if (ret) {
148         trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
149     }
150     return ret;
151 }
152 
ufs_dma_read_req_upiu(UfsRequest * req)153 static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
154 {
155     UfsHc *u = req->hc;
156     hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
157     UtpUpiuReq *req_upiu = &req->req_upiu;
158     uint32_t copy_size;
159     uint16_t data_segment_length;
160     MemTxResult ret;
161 
162     /*
163      * To know the size of the req_upiu, we need to read the
164      * data_segment_length in the header first.
165      */
166     ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
167                         sizeof(UtpUpiuHeader));
168     if (ret) {
169         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
170         return ret;
171     }
172     data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
173 
174     copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
175                 data_segment_length;
176 
177     if (copy_size > sizeof(req->req_upiu)) {
178         copy_size = sizeof(req->req_upiu);
179     }
180 
181     ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
182     if (ret) {
183         trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
184     }
185     return ret;
186 }
187 
ufs_dma_read_prdt(UfsRequest * req)188 static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
189 {
190     UfsHc *u = req->hc;
191     uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
192     uint16_t prdt_byte_off =
193         le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
194     uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
195     g_autofree UfshcdSgEntry *prd_entries = NULL;
196     hwaddr req_upiu_base_addr, prdt_base_addr;
197     int err;
198 
199     assert(!req->sg);
200 
201     if (prdt_size == 0) {
202         return MEMTX_OK;
203     }
204     prd_entries = g_new(UfshcdSgEntry, prdt_size);
205 
206     req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
207     prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
208 
209     err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
210     if (err) {
211         trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
212         return err;
213     }
214 
215     req->sg = g_malloc0(sizeof(QEMUSGList));
216     pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
217     req->data_len = 0;
218 
219     for (uint16_t i = 0; i < prdt_len; ++i) {
220         hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
221         uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
222         qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
223         req->data_len += data_byte_count;
224     }
225     return MEMTX_OK;
226 }
227 
ufs_dma_read_upiu(UfsRequest * req)228 static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
229 {
230     MemTxResult ret;
231 
232     /*
233      * In case of MCQ, UTRD has already been read from a SQ, so skip it.
234      */
235     if (!ufs_mcq_req(req)) {
236         ret = ufs_dma_read_utrd(req);
237         if (ret) {
238             return ret;
239         }
240     }
241 
242     ret = ufs_dma_read_req_upiu(req);
243     if (ret) {
244         return ret;
245     }
246 
247     ret = ufs_dma_read_prdt(req);
248     if (ret) {
249         return ret;
250     }
251 
252     return 0;
253 }
254 
ufs_dma_write_utrd(UfsRequest * req)255 static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
256 {
257     UfsHc *u = req->hc;
258     hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
259     MemTxResult ret;
260 
261     ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
262     if (ret) {
263         trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
264     }
265     return ret;
266 }
267 
ufs_dma_write_rsp_upiu(UfsRequest * req)268 static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
269 {
270     UfsHc *u = req->hc;
271     hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
272     uint32_t rsp_upiu_byte_len =
273         le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
274     uint16_t data_segment_length =
275         be16_to_cpu(req->rsp_upiu.header.data_segment_length);
276     uint32_t copy_size = sizeof(UtpUpiuHeader) +
277                          UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
278                          data_segment_length;
279     MemTxResult ret;
280 
281     if (copy_size > rsp_upiu_byte_len) {
282         copy_size = rsp_upiu_byte_len;
283     }
284 
285     if (copy_size > sizeof(req->rsp_upiu)) {
286         copy_size = sizeof(req->rsp_upiu);
287     }
288 
289     ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
290     if (ret) {
291         trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
292     }
293     return ret;
294 }
295 
ufs_dma_write_upiu(UfsRequest * req)296 static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
297 {
298     MemTxResult ret;
299 
300     ret = ufs_dma_write_rsp_upiu(req);
301     if (ret) {
302         return ret;
303     }
304 
305     return ufs_dma_write_utrd(req);
306 }
307 
ufs_irq_check(UfsHc * u)308 static void ufs_irq_check(UfsHc *u)
309 {
310     PCIDevice *pci = PCI_DEVICE(u);
311 
312     if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
313         trace_ufs_irq_raise();
314         pci_irq_assert(pci);
315     } else {
316         trace_ufs_irq_lower();
317         pci_irq_deassert(pci);
318     }
319 }
320 
ufs_process_db(UfsHc * u,uint32_t val)321 static void ufs_process_db(UfsHc *u, uint32_t val)
322 {
323     DECLARE_BITMAP(doorbell, UFS_MAX_NUTRS);
324     uint32_t slot;
325     uint32_t nutrs = u->params.nutrs;
326     UfsRequest *req;
327 
328     val &= ~u->reg.utrldbr;
329     if (!val) {
330         return;
331     }
332 
333     doorbell[0] = val;
334     slot = find_first_bit(doorbell, nutrs);
335 
336     while (slot < nutrs) {
337         req = &u->req_list[slot];
338         if (req->state == UFS_REQUEST_ERROR) {
339             trace_ufs_err_utrl_slot_error(req->slot);
340             return;
341         }
342 
343         if (req->state != UFS_REQUEST_IDLE) {
344             trace_ufs_err_utrl_slot_busy(req->slot);
345             return;
346         }
347 
348         trace_ufs_process_db(slot);
349         req->state = UFS_REQUEST_READY;
350         slot = find_next_bit(doorbell, nutrs, slot + 1);
351     }
352 
353     qemu_bh_schedule(u->doorbell_bh);
354 }
355 
ufs_process_uiccmd(UfsHc * u,uint32_t val)356 static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
357 {
358     trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
359                              u->reg.ucmdarg3);
360     /*
361      * Only the essential uic commands for running drivers on Linux and Windows
362      * are implemented.
363      */
364     switch (val) {
365     case UFS_UIC_CMD_DME_LINK_STARTUP:
366         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
367         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
368         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
369         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
370         break;
371     /* TODO: Revisit it when Power Management is implemented */
372     case UFS_UIC_CMD_DME_HIBER_ENTER:
373         u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
374         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
375         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
376         break;
377     case UFS_UIC_CMD_DME_HIBER_EXIT:
378         u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
379         u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
380         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
381         break;
382     default:
383         u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
384     }
385 
386     u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
387 
388     ufs_irq_check(u);
389 }
390 
ufs_mcq_init_req(UfsHc * u,UfsRequest * req,UfsSq * sq)391 static void ufs_mcq_init_req(UfsHc *u, UfsRequest *req, UfsSq *sq)
392 {
393     memset(req, 0, sizeof(*req));
394 
395     req->hc = u;
396     req->state = UFS_REQUEST_IDLE;
397     req->slot = UFS_INVALID_SLOT;
398     req->sq = sq;
399 }
400 
ufs_mcq_process_sq(void * opaque)401 static void ufs_mcq_process_sq(void *opaque)
402 {
403     UfsSq *sq = opaque;
404     UfsHc *u = sq->u;
405     UfsSqEntry sqe;
406     UfsRequest *req;
407     hwaddr addr;
408     uint16_t head = ufs_mcq_sq_head(u, sq->sqid);
409     int err;
410 
411     while (!(ufs_mcq_sq_empty(u, sq->sqid) || QTAILQ_EMPTY(&sq->req_list))) {
412         addr = sq->addr + head;
413         err = ufs_addr_read(sq->u, addr, (void *)&sqe, sizeof(sqe));
414         if (err) {
415             trace_ufs_err_dma_read_sq(sq->sqid, addr);
416             return;
417         }
418 
419         head = (head + sizeof(sqe)) % (sq->size * sizeof(sqe));
420         ufs_mcq_update_sq_head(u, sq->sqid, head);
421 
422         req = QTAILQ_FIRST(&sq->req_list);
423         QTAILQ_REMOVE(&sq->req_list, req, entry);
424 
425         ufs_mcq_init_req(sq->u, req, sq);
426         memcpy(&req->utrd, &sqe, sizeof(req->utrd));
427 
428         req->state = UFS_REQUEST_RUNNING;
429         ufs_exec_req(req);
430     }
431 }
432 
ufs_mcq_process_cq(void * opaque)433 static void ufs_mcq_process_cq(void *opaque)
434 {
435     UfsCq *cq = opaque;
436     UfsHc *u = cq->u;
437     UfsRequest *req, *next;
438     MemTxResult ret;
439     uint32_t tail = ufs_mcq_cq_tail(u, cq->cqid);
440 
441     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next)
442     {
443         ufs_dma_write_rsp_upiu(req);
444 
445         req->cqe.utp_addr =
446             ((uint64_t)req->utrd.command_desc_base_addr_hi << 32ULL) |
447             req->utrd.command_desc_base_addr_lo;
448         req->cqe.utp_addr |= req->sq->sqid;
449         req->cqe.resp_len = req->utrd.response_upiu_length;
450         req->cqe.resp_off = req->utrd.response_upiu_offset;
451         req->cqe.prdt_len = req->utrd.prd_table_length;
452         req->cqe.prdt_off = req->utrd.prd_table_offset;
453         req->cqe.status = req->utrd.header.dword_2 & 0xf;
454         req->cqe.error = 0;
455 
456         ret = ufs_addr_write(u, cq->addr + tail, &req->cqe, sizeof(req->cqe));
457         if (ret) {
458             trace_ufs_err_dma_write_cq(cq->cqid, cq->addr + tail);
459         }
460         QTAILQ_REMOVE(&cq->req_list, req, entry);
461 
462         tail = (tail + sizeof(req->cqe)) % (cq->size * sizeof(req->cqe));
463         ufs_mcq_update_cq_tail(u, cq->cqid, tail);
464 
465         ufs_clear_req(req);
466         QTAILQ_INSERT_TAIL(&req->sq->req_list, req, entry);
467     }
468 
469     if (!ufs_mcq_cq_empty(u, cq->cqid)) {
470         u->mcq_op_reg[cq->cqid].cq_int.is =
471             FIELD_DP32(u->mcq_op_reg[cq->cqid].cq_int.is, CQIS, TEPS, 1);
472 
473         u->reg.is = FIELD_DP32(u->reg.is, IS, CQES, 1);
474         ufs_irq_check(u);
475     }
476 }
477 
ufs_mcq_create_sq(UfsHc * u,uint8_t qid,uint32_t attr)478 static bool ufs_mcq_create_sq(UfsHc *u, uint8_t qid, uint32_t attr)
479 {
480     UfsMcqReg *reg = &u->mcq_reg[qid];
481     UfsSq *sq;
482     uint8_t cqid = FIELD_EX32(attr, SQATTR, CQID);
483 
484     if (qid >= u->params.mcq_maxq) {
485         trace_ufs_err_mcq_create_sq_invalid_sqid(qid);
486         return false;
487     }
488 
489     if (u->sq[qid]) {
490         trace_ufs_err_mcq_create_sq_already_exists(qid);
491         return false;
492     }
493 
494     if (!u->cq[cqid]) {
495         trace_ufs_err_mcq_create_sq_invalid_cqid(qid);
496         return false;
497     }
498 
499     sq = g_malloc0(sizeof(*sq));
500     sq->u = u;
501     sq->sqid = qid;
502     sq->cq = u->cq[cqid];
503     sq->addr = ((uint64_t)reg->squba << 32) | reg->sqlba;
504     sq->size = ((FIELD_EX32(attr, SQATTR, SIZE) + 1) << 2) / sizeof(UfsSqEntry);
505 
506     sq->bh = qemu_bh_new_guarded(ufs_mcq_process_sq, sq,
507                                  &DEVICE(u)->mem_reentrancy_guard);
508     sq->req = g_new0(UfsRequest, sq->size);
509     QTAILQ_INIT(&sq->req_list);
510     for (int i = 0; i < sq->size; i++) {
511         ufs_mcq_init_req(u, &sq->req[i], sq);
512         QTAILQ_INSERT_TAIL(&sq->req_list, &sq->req[i], entry);
513     }
514 
515     u->sq[qid] = sq;
516 
517     trace_ufs_mcq_create_sq(sq->sqid, sq->cq->cqid, sq->addr, sq->size);
518     return true;
519 }
520 
ufs_mcq_delete_sq(UfsHc * u,uint8_t qid)521 static bool ufs_mcq_delete_sq(UfsHc *u, uint8_t qid)
522 {
523     UfsSq *sq;
524 
525     if (qid >= u->params.mcq_maxq) {
526         trace_ufs_err_mcq_delete_sq_invalid_sqid(qid);
527         return false;
528     }
529 
530     if (!u->sq[qid]) {
531         trace_ufs_err_mcq_delete_sq_not_exists(qid);
532         return false;
533     }
534 
535     sq = u->sq[qid];
536 
537     qemu_bh_delete(sq->bh);
538     g_free(sq->req);
539     g_free(sq);
540     u->sq[qid] = NULL;
541     return true;
542 }
543 
ufs_mcq_create_cq(UfsHc * u,uint8_t qid,uint32_t attr)544 static bool ufs_mcq_create_cq(UfsHc *u, uint8_t qid, uint32_t attr)
545 {
546     UfsMcqReg *reg = &u->mcq_reg[qid];
547     UfsCq *cq;
548 
549     if (qid >= u->params.mcq_maxq) {
550         trace_ufs_err_mcq_create_cq_invalid_cqid(qid);
551         return false;
552     }
553 
554     if (u->cq[qid]) {
555         trace_ufs_err_mcq_create_cq_already_exists(qid);
556         return false;
557     }
558 
559     cq = g_malloc0(sizeof(*cq));
560     cq->u = u;
561     cq->cqid = qid;
562     cq->addr = ((uint64_t)reg->cquba << 32) | reg->cqlba;
563     cq->size = ((FIELD_EX32(attr, CQATTR, SIZE) + 1) << 2) / sizeof(UfsCqEntry);
564 
565     cq->bh = qemu_bh_new_guarded(ufs_mcq_process_cq, cq,
566                                  &DEVICE(u)->mem_reentrancy_guard);
567     QTAILQ_INIT(&cq->req_list);
568 
569     u->cq[qid] = cq;
570 
571     trace_ufs_mcq_create_cq(cq->cqid, cq->addr, cq->size);
572     return true;
573 }
574 
ufs_mcq_delete_cq(UfsHc * u,uint8_t qid)575 static bool ufs_mcq_delete_cq(UfsHc *u, uint8_t qid)
576 {
577     UfsCq *cq;
578 
579     if (qid >= u->params.mcq_maxq) {
580         trace_ufs_err_mcq_delete_cq_invalid_cqid(qid);
581         return false;
582     }
583 
584     if (!u->cq[qid]) {
585         trace_ufs_err_mcq_delete_cq_not_exists(qid);
586         return false;
587     }
588 
589     for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
590         if (u->sq[i] && u->sq[i]->cq->cqid == qid) {
591             trace_ufs_err_mcq_delete_cq_sq_not_deleted(i, qid);
592             return false;
593         }
594     }
595 
596     cq = u->cq[qid];
597 
598     qemu_bh_delete(cq->bh);
599     g_free(cq);
600     u->cq[qid] = NULL;
601     return true;
602 }
603 
ufs_write_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)604 static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
605 {
606     switch (offset) {
607     case A_IS:
608         u->reg.is &= ~data;
609         ufs_irq_check(u);
610         break;
611     case A_IE:
612         u->reg.ie = data;
613         ufs_irq_check(u);
614         break;
615     case A_HCE:
616         if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
617             u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
618             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
619         } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
620                    !FIELD_EX32(data, HCE, HCE)) {
621             u->reg.hcs = 0;
622             u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
623         }
624         break;
625     case A_UTRLBA:
626         u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
627         break;
628     case A_UTRLBAU:
629         u->reg.utrlbau = data;
630         break;
631     case A_UTRLDBR:
632         ufs_process_db(u, data);
633         u->reg.utrldbr |= data;
634         break;
635     case A_UTRLRSR:
636         u->reg.utrlrsr = data;
637         break;
638     case A_UTRLCNR:
639         u->reg.utrlcnr &= ~data;
640         break;
641     case A_UTMRLBA:
642         u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
643         break;
644     case A_UTMRLBAU:
645         u->reg.utmrlbau = data;
646         break;
647     case A_UICCMD:
648         ufs_process_uiccmd(u, data);
649         break;
650     case A_UCMDARG1:
651         u->reg.ucmdarg1 = data;
652         break;
653     case A_UCMDARG2:
654         u->reg.ucmdarg2 = data;
655         break;
656     case A_UCMDARG3:
657         u->reg.ucmdarg3 = data;
658         break;
659     case A_CONFIG:
660         u->reg.config = data;
661         break;
662     case A_MCQCONFIG:
663         u->reg.mcqconfig = data;
664         break;
665     case A_UTRLCLR:
666     case A_UTMRLDBR:
667     case A_UTMRLCLR:
668     case A_UTMRLRSR:
669         trace_ufs_err_unsupport_register_offset(offset);
670         break;
671     default:
672         trace_ufs_err_invalid_register_offset(offset);
673         break;
674     }
675 }
676 
ufs_write_mcq_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)677 static void ufs_write_mcq_reg(UfsHc *u, hwaddr offset, uint32_t data,
678                               unsigned size)
679 {
680     int qid = offset / sizeof(UfsMcqReg);
681     UfsMcqReg *reg = &u->mcq_reg[qid];
682 
683     switch (offset % sizeof(UfsMcqReg)) {
684     case A_SQATTR:
685         if (!FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
686             FIELD_EX32(data, SQATTR, SQEN)) {
687             if (!ufs_mcq_create_sq(u, qid, data)) {
688                 break;
689             }
690         } else if (FIELD_EX32(reg->sqattr, SQATTR, SQEN) &&
691                    !FIELD_EX32(data, SQATTR, SQEN)) {
692             if (!ufs_mcq_delete_sq(u, qid)) {
693                 break;
694             }
695         }
696         reg->sqattr = data;
697         break;
698     case A_SQLBA:
699         reg->sqlba = data;
700         break;
701     case A_SQUBA:
702         reg->squba = data;
703         break;
704     case A_SQCFG:
705         reg->sqcfg = data;
706         break;
707     case A_CQATTR:
708         if (!FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
709             FIELD_EX32(data, CQATTR, CQEN)) {
710             if (!ufs_mcq_create_cq(u, qid, data)) {
711                 break;
712             }
713         } else if (FIELD_EX32(reg->cqattr, CQATTR, CQEN) &&
714                    !FIELD_EX32(data, CQATTR, CQEN)) {
715             if (!ufs_mcq_delete_cq(u, qid)) {
716                 break;
717             }
718         }
719         reg->cqattr = data;
720         break;
721     case A_CQLBA:
722         reg->cqlba = data;
723         break;
724     case A_CQUBA:
725         reg->cquba = data;
726         break;
727     case A_CQCFG:
728         reg->cqcfg = data;
729         break;
730     case A_SQDAO:
731     case A_SQISAO:
732     case A_CQDAO:
733     case A_CQISAO:
734         trace_ufs_err_unsupport_register_offset(offset);
735         break;
736     default:
737         trace_ufs_err_invalid_register_offset(offset);
738         break;
739     }
740 }
741 
ufs_mcq_process_db(UfsHc * u,uint8_t qid,uint32_t db)742 static void ufs_mcq_process_db(UfsHc *u, uint8_t qid, uint32_t db)
743 {
744     UfsSq *sq;
745 
746     if (qid >= u->params.mcq_maxq) {
747         trace_ufs_err_mcq_db_wr_invalid_sqid(qid);
748         return;
749     }
750 
751     sq = u->sq[qid];
752     if (sq->size * sizeof(UfsSqEntry) <= db) {
753         trace_ufs_err_mcq_db_wr_invalid_db(qid, db);
754         return;
755     }
756 
757     ufs_mcq_update_sq_tail(u, sq->sqid, db);
758     qemu_bh_schedule(sq->bh);
759 }
760 
ufs_write_mcq_op_reg(UfsHc * u,hwaddr offset,uint32_t data,unsigned size)761 static void ufs_write_mcq_op_reg(UfsHc *u, hwaddr offset, uint32_t data,
762                                  unsigned size)
763 {
764     int qid = offset / sizeof(UfsMcqOpReg);
765     UfsMcqOpReg *opr = &u->mcq_op_reg[qid];
766 
767     switch (offset % sizeof(UfsMcqOpReg)) {
768     case offsetof(UfsMcqOpReg, sq.tp):
769         if (opr->sq.tp != data) {
770             ufs_mcq_process_db(u, qid, data);
771         }
772         opr->sq.tp = data;
773         break;
774     case offsetof(UfsMcqOpReg, cq.hp):
775         opr->cq.hp = data;
776         ufs_mcq_update_cq_head(u, qid, data);
777         break;
778     case offsetof(UfsMcqOpReg, cq_int.is):
779         opr->cq_int.is &= ~data;
780         break;
781     default:
782         trace_ufs_err_invalid_register_offset(offset);
783         break;
784     }
785 }
786 
ufs_mmio_read(void * opaque,hwaddr addr,unsigned size)787 static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
788 {
789     UfsHc *u = (UfsHc *)opaque;
790     uint32_t *ptr;
791     uint64_t value;
792     uint64_t offset;
793 
794     if (addr + size <= sizeof(u->reg)) {
795         offset = addr;
796         ptr = (uint32_t *)&u->reg;
797     } else if (ufs_is_mcq_reg(u, addr, size)) {
798         offset = addr - ufs_mcq_reg_addr(u, 0);
799         ptr = (uint32_t *)&u->mcq_reg;
800     } else if (ufs_is_mcq_op_reg(u, addr, size)) {
801         offset = addr - ufs_mcq_op_reg_addr(u, 0);
802         ptr = (uint32_t *)&u->mcq_op_reg;
803     } else {
804         trace_ufs_err_invalid_register_offset(addr);
805         return 0;
806     }
807 
808     value = ptr[offset >> 2];
809     trace_ufs_mmio_read(addr, value, size);
810     return value;
811 }
812 
ufs_mmio_write(void * opaque,hwaddr addr,uint64_t data,unsigned size)813 static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
814                            unsigned size)
815 {
816     UfsHc *u = (UfsHc *)opaque;
817 
818     trace_ufs_mmio_write(addr, data, size);
819 
820     if (addr + size <= sizeof(u->reg)) {
821         ufs_write_reg(u, addr, data, size);
822     } else if (ufs_is_mcq_reg(u, addr, size)) {
823         ufs_write_mcq_reg(u, addr - ufs_mcq_reg_addr(u, 0), data, size);
824     } else if (ufs_is_mcq_op_reg(u, addr, size)) {
825         ufs_write_mcq_op_reg(u, addr - ufs_mcq_op_reg_addr(u, 0), data, size);
826     } else {
827         trace_ufs_err_invalid_register_offset(addr);
828     }
829 }
830 
831 static const MemoryRegionOps ufs_mmio_ops = {
832     .read = ufs_mmio_read,
833     .write = ufs_mmio_write,
834     .endianness = DEVICE_LITTLE_ENDIAN,
835     .impl = {
836         .min_access_size = 4,
837         .max_access_size = 4,
838     },
839 };
840 
841 
ufs_build_upiu_header(UfsRequest * req,uint8_t trans_type,uint8_t flags,uint8_t response,uint8_t scsi_status,uint16_t data_segment_length)842 void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, uint8_t flags,
843                            uint8_t response, uint8_t scsi_status,
844                            uint16_t data_segment_length)
845 {
846     memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
847     req->rsp_upiu.header.trans_type = trans_type;
848     req->rsp_upiu.header.flags = flags;
849     req->rsp_upiu.header.response = response;
850     req->rsp_upiu.header.scsi_status = scsi_status;
851     req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
852 }
853 
ufs_build_query_response(UfsRequest * req)854 void ufs_build_query_response(UfsRequest *req)
855 {
856     req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
857     req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
858     req->rsp_upiu.qr.index = req->req_upiu.qr.index;
859     req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
860 }
861 
ufs_exec_scsi_cmd(UfsRequest * req)862 static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
863 {
864     UfsHc *u = req->hc;
865     uint8_t lun = req->req_upiu.header.lun;
866 
867     UfsLu *lu = NULL;
868 
869     trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
870 
871     if (!is_wlun(lun) && (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
872         trace_ufs_err_scsi_cmd_invalid_lun(lun);
873         return UFS_REQUEST_FAIL;
874     }
875 
876     switch (lun) {
877     case UFS_UPIU_REPORT_LUNS_WLUN:
878         lu = &u->report_wlu;
879         break;
880     case UFS_UPIU_UFS_DEVICE_WLUN:
881         lu = &u->dev_wlu;
882         break;
883     case UFS_UPIU_BOOT_WLUN:
884         lu = &u->boot_wlu;
885         break;
886     case UFS_UPIU_RPMB_WLUN:
887         lu = &u->rpmb_wlu;
888         break;
889     default:
890         lu = u->lus[lun];
891     }
892 
893     return lu->scsi_op(lu, req);
894 }
895 
ufs_exec_nop_cmd(UfsRequest * req)896 static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
897 {
898     trace_ufs_exec_nop_cmd(req->slot);
899     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
900     return UFS_REQUEST_SUCCESS;
901 }
902 
903 /*
904  * This defines the permission of flags based on their IDN. There are some
905  * things that are declared read-only, which is inconsistent with the ufs spec,
906  * because we want to return an error for features that are not yet supported.
907  */
908 static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
909     [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
910     /* Write protection is not supported */
911     [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
912     [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
913     [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
914                                     UFS_QUERY_FLAG_CLEAR |
915                                     UFS_QUERY_FLAG_TOGGLE,
916     [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
917         UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
918         UFS_QUERY_FLAG_TOGGLE,
919     /* Purge Operation is not supported */
920     [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
921     /* Refresh Operation is not supported */
922     [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
923     /* Physical Resource Removal is not supported */
924     [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
925     [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
926     [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
927     /* Write Booster is not supported */
928     [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
929     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
930     [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
931 };
932 
ufs_flag_check_idn_valid(uint8_t idn,int op)933 static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
934 {
935     if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
936         return UFS_QUERY_RESULT_INVALID_IDN;
937     }
938 
939     if (!(flag_permission[idn] & op)) {
940         if (op == UFS_QUERY_FLAG_READ) {
941             trace_ufs_err_query_flag_not_readable(idn);
942             return UFS_QUERY_RESULT_NOT_READABLE;
943         }
944         trace_ufs_err_query_flag_not_writable(idn);
945         return UFS_QUERY_RESULT_NOT_WRITEABLE;
946     }
947 
948     return UFS_QUERY_RESULT_SUCCESS;
949 }
950 
951 static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
952     /* booting is not supported */
953     [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
954     [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
955     [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
956         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
957     [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
958     [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
959     [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
960     [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
961         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
962     [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
963         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
964     [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
965     [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
966         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
967     [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
968     [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
969         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
970     [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
971         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
972     [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
973     [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
974     [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
975     [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
976     [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
977     [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
978         UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
979     [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
980     [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
981     [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
982     [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
983     [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
984     [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
985     [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
986     [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
987     [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
988     /* refresh operation is not supported */
989     [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
990     [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
991     [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
992 };
993 
ufs_attr_check_idn_valid(uint8_t idn,int op)994 static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
995 {
996     if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
997         return UFS_QUERY_RESULT_INVALID_IDN;
998     }
999 
1000     if (!(attr_permission[idn] & op)) {
1001         if (op == UFS_QUERY_ATTR_READ) {
1002             trace_ufs_err_query_attr_not_readable(idn);
1003             return UFS_QUERY_RESULT_NOT_READABLE;
1004         }
1005         trace_ufs_err_query_attr_not_writable(idn);
1006         return UFS_QUERY_RESULT_NOT_WRITEABLE;
1007     }
1008 
1009     return UFS_QUERY_RESULT_SUCCESS;
1010 }
1011 
ufs_exec_query_flag(UfsRequest * req,int op)1012 static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
1013 {
1014     UfsHc *u = req->hc;
1015     uint8_t idn = req->req_upiu.qr.idn;
1016     uint32_t value;
1017     QueryRespCode ret;
1018 
1019     ret = ufs_flag_check_idn_valid(idn, op);
1020     if (ret) {
1021         return ret;
1022     }
1023 
1024     if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
1025         value = 0;
1026     } else if (op == UFS_QUERY_FLAG_READ) {
1027         value = *(((uint8_t *)&u->flags) + idn);
1028     } else if (op == UFS_QUERY_FLAG_SET) {
1029         value = 1;
1030     } else if (op == UFS_QUERY_FLAG_CLEAR) {
1031         value = 0;
1032     } else if (op == UFS_QUERY_FLAG_TOGGLE) {
1033         value = *(((uint8_t *)&u->flags) + idn);
1034         value = !value;
1035     } else {
1036         trace_ufs_err_query_invalid_opcode(op);
1037         return UFS_QUERY_RESULT_INVALID_OPCODE;
1038     }
1039 
1040     *(((uint8_t *)&u->flags) + idn) = value;
1041     req->rsp_upiu.qr.value = cpu_to_be32(value);
1042     return UFS_QUERY_RESULT_SUCCESS;
1043 }
1044 
ufs_read_attr_value(UfsHc * u,uint8_t idn)1045 static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
1046 {
1047     switch (idn) {
1048     case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
1049         return u->attributes.boot_lun_en;
1050     case UFS_QUERY_ATTR_IDN_POWER_MODE:
1051         return u->attributes.current_power_mode;
1052     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1053         return u->attributes.active_icc_level;
1054     case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
1055         return u->attributes.out_of_order_data_en;
1056     case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
1057         return u->attributes.background_op_status;
1058     case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
1059         return u->attributes.purge_status;
1060     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1061         return u->attributes.max_data_in_size;
1062     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1063         return u->attributes.max_data_out_size;
1064     case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
1065         return be32_to_cpu(u->attributes.dyn_cap_needed);
1066     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1067         return u->attributes.ref_clk_freq;
1068     case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
1069         return u->attributes.config_descr_lock;
1070     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1071         return u->attributes.max_num_of_rtt;
1072     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1073         return be16_to_cpu(u->attributes.exception_event_control);
1074     case UFS_QUERY_ATTR_IDN_EE_STATUS:
1075         return be16_to_cpu(u->attributes.exception_event_status);
1076     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1077         return be32_to_cpu(u->attributes.seconds_passed);
1078     case UFS_QUERY_ATTR_IDN_CNTX_CONF:
1079         return be16_to_cpu(u->attributes.context_conf);
1080     case UFS_QUERY_ATTR_IDN_FFU_STATUS:
1081         return u->attributes.device_ffu_status;
1082     case UFS_QUERY_ATTR_IDN_PSA_STATE:
1083         return be32_to_cpu(u->attributes.psa_state);
1084     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1085         return be32_to_cpu(u->attributes.psa_data_size);
1086     case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
1087         return u->attributes.ref_clk_gating_wait_time;
1088     case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
1089         return u->attributes.device_case_rough_temperaure;
1090     case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
1091         return u->attributes.device_too_high_temp_boundary;
1092     case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
1093         return u->attributes.device_too_low_temp_boundary;
1094     case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
1095         return u->attributes.throttling_status;
1096     case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
1097         return u->attributes.wb_buffer_flush_status;
1098     case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
1099         return u->attributes.available_wb_buffer_size;
1100     case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
1101         return u->attributes.wb_buffer_life_time_est;
1102     case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
1103         return be32_to_cpu(u->attributes.current_wb_buffer_size);
1104     case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
1105         return u->attributes.refresh_status;
1106     case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
1107         return u->attributes.refresh_freq;
1108     case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
1109         return u->attributes.refresh_unit;
1110     }
1111     return 0;
1112 }
1113 
ufs_write_attr_value(UfsHc * u,uint8_t idn,uint32_t value)1114 static QueryRespCode ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
1115 {
1116     switch (idn) {
1117     case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
1118         if (value > UFS_QUERY_ATTR_ACTIVE_ICC_MAXVALUE) {
1119             return UFS_QUERY_RESULT_INVALID_VALUE;
1120         }
1121         u->attributes.active_icc_level = value;
1122         break;
1123     case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
1124         u->attributes.max_data_in_size = value;
1125         break;
1126     case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
1127         u->attributes.max_data_out_size = value;
1128         break;
1129     case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
1130         u->attributes.ref_clk_freq = value;
1131         break;
1132     case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
1133         u->attributes.max_num_of_rtt = value;
1134         break;
1135     case UFS_QUERY_ATTR_IDN_EE_CONTROL:
1136         u->attributes.exception_event_control = cpu_to_be16(value);
1137         break;
1138     case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
1139         u->attributes.seconds_passed = cpu_to_be32(value);
1140         break;
1141     case UFS_QUERY_ATTR_IDN_PSA_STATE:
1142         u->attributes.psa_state = value;
1143         break;
1144     case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
1145         u->attributes.psa_data_size = cpu_to_be32(value);
1146         break;
1147     }
1148     return UFS_QUERY_RESULT_SUCCESS;
1149 }
1150 
ufs_exec_query_attr(UfsRequest * req,int op)1151 static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
1152 {
1153     UfsHc *u = req->hc;
1154     uint8_t idn = req->req_upiu.qr.idn;
1155     uint32_t value;
1156     QueryRespCode ret;
1157 
1158     ret = ufs_attr_check_idn_valid(idn, op);
1159     if (ret) {
1160         return ret;
1161     }
1162 
1163     if (op == UFS_QUERY_ATTR_READ) {
1164         value = ufs_read_attr_value(u, idn);
1165         ret = UFS_QUERY_RESULT_SUCCESS;
1166     } else {
1167         value = req->req_upiu.qr.value;
1168         ret = ufs_write_attr_value(u, idn, value);
1169     }
1170     req->rsp_upiu.qr.value = cpu_to_be32(value);
1171     return ret;
1172 }
1173 
1174 static const RpmbUnitDescriptor rpmb_unit_desc = {
1175     .length = sizeof(RpmbUnitDescriptor),
1176     .descriptor_idn = 2,
1177     .unit_index = UFS_UPIU_RPMB_WLUN,
1178     .lu_enable = 0,
1179 };
1180 
ufs_read_unit_desc(UfsRequest * req)1181 static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
1182 {
1183     UfsHc *u = req->hc;
1184     uint8_t lun = req->req_upiu.qr.index;
1185 
1186     if (lun != UFS_UPIU_RPMB_WLUN &&
1187         (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
1188         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
1189         return UFS_QUERY_RESULT_INVALID_INDEX;
1190     }
1191 
1192     if (lun == UFS_UPIU_RPMB_WLUN) {
1193         memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
1194     } else {
1195         memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
1196                sizeof(u->lus[lun]->unit_desc));
1197     }
1198 
1199     return UFS_QUERY_RESULT_SUCCESS;
1200 }
1201 
manufacturer_str_desc(void)1202 static inline StringDescriptor manufacturer_str_desc(void)
1203 {
1204     StringDescriptor desc = {
1205         .length = 0x12,
1206         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1207     };
1208     desc.UC[0] = cpu_to_be16('R');
1209     desc.UC[1] = cpu_to_be16('E');
1210     desc.UC[2] = cpu_to_be16('D');
1211     desc.UC[3] = cpu_to_be16('H');
1212     desc.UC[4] = cpu_to_be16('A');
1213     desc.UC[5] = cpu_to_be16('T');
1214     return desc;
1215 }
1216 
product_name_str_desc(void)1217 static inline StringDescriptor product_name_str_desc(void)
1218 {
1219     StringDescriptor desc = {
1220         .length = 0x22,
1221         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1222     };
1223     desc.UC[0] = cpu_to_be16('Q');
1224     desc.UC[1] = cpu_to_be16('E');
1225     desc.UC[2] = cpu_to_be16('M');
1226     desc.UC[3] = cpu_to_be16('U');
1227     desc.UC[4] = cpu_to_be16(' ');
1228     desc.UC[5] = cpu_to_be16('U');
1229     desc.UC[6] = cpu_to_be16('F');
1230     desc.UC[7] = cpu_to_be16('S');
1231     return desc;
1232 }
1233 
product_rev_level_str_desc(void)1234 static inline StringDescriptor product_rev_level_str_desc(void)
1235 {
1236     StringDescriptor desc = {
1237         .length = 0x0a,
1238         .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1239     };
1240     desc.UC[0] = cpu_to_be16('0');
1241     desc.UC[1] = cpu_to_be16('0');
1242     desc.UC[2] = cpu_to_be16('0');
1243     desc.UC[3] = cpu_to_be16('1');
1244     return desc;
1245 }
1246 
1247 static const StringDescriptor null_str_desc = {
1248     .length = 0x02,
1249     .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
1250 };
1251 
ufs_read_string_desc(UfsRequest * req)1252 static QueryRespCode ufs_read_string_desc(UfsRequest *req)
1253 {
1254     UfsHc *u = req->hc;
1255     uint8_t index = req->req_upiu.qr.index;
1256     StringDescriptor desc;
1257 
1258     if (index == u->device_desc.manufacturer_name) {
1259         desc = manufacturer_str_desc();
1260         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1261     } else if (index == u->device_desc.product_name) {
1262         desc = product_name_str_desc();
1263         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1264     } else if (index == u->device_desc.serial_number) {
1265         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1266     } else if (index == u->device_desc.oem_id) {
1267         memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
1268     } else if (index == u->device_desc.product_revision_level) {
1269         desc = product_rev_level_str_desc();
1270         memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
1271     } else {
1272         trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
1273         return UFS_QUERY_RESULT_INVALID_INDEX;
1274     }
1275     return UFS_QUERY_RESULT_SUCCESS;
1276 }
1277 
interconnect_desc(void)1278 static inline InterconnectDescriptor interconnect_desc(void)
1279 {
1280     InterconnectDescriptor desc = {
1281         .length = sizeof(InterconnectDescriptor),
1282         .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
1283     };
1284     desc.bcd_unipro_version = cpu_to_be16(0x180);
1285     desc.bcd_mphy_version = cpu_to_be16(0x410);
1286     return desc;
1287 }
1288 
ufs_read_desc(UfsRequest * req)1289 static QueryRespCode ufs_read_desc(UfsRequest *req)
1290 {
1291     UfsHc *u = req->hc;
1292     QueryRespCode status;
1293     uint8_t idn = req->req_upiu.qr.idn;
1294     uint8_t selector = req->req_upiu.qr.selector;
1295     uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
1296     InterconnectDescriptor desc;
1297     if (selector != 0) {
1298         return UFS_QUERY_RESULT_INVALID_SELECTOR;
1299     }
1300     switch (idn) {
1301     case UFS_QUERY_DESC_IDN_DEVICE:
1302         memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
1303         status = UFS_QUERY_RESULT_SUCCESS;
1304         break;
1305     case UFS_QUERY_DESC_IDN_UNIT:
1306         status = ufs_read_unit_desc(req);
1307         break;
1308     case UFS_QUERY_DESC_IDN_GEOMETRY:
1309         memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
1310                sizeof(u->geometry_desc));
1311         status = UFS_QUERY_RESULT_SUCCESS;
1312         break;
1313     case UFS_QUERY_DESC_IDN_INTERCONNECT: {
1314         desc = interconnect_desc();
1315         memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
1316         status = UFS_QUERY_RESULT_SUCCESS;
1317         break;
1318     }
1319     case UFS_QUERY_DESC_IDN_STRING:
1320         status = ufs_read_string_desc(req);
1321         break;
1322     case UFS_QUERY_DESC_IDN_POWER:
1323         /* mocking of power descriptor is not supported */
1324         memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
1325         req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
1326         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
1327         status = UFS_QUERY_RESULT_SUCCESS;
1328         break;
1329     case UFS_QUERY_DESC_IDN_HEALTH:
1330         /* mocking of health descriptor is not supported */
1331         memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
1332         req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
1333         req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
1334         status = UFS_QUERY_RESULT_SUCCESS;
1335         break;
1336     default:
1337         length = 0;
1338         trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
1339         status = UFS_QUERY_RESULT_INVALID_IDN;
1340     }
1341 
1342     if (length > req->rsp_upiu.qr.data[0]) {
1343         length = req->rsp_upiu.qr.data[0];
1344     }
1345     req->rsp_upiu.qr.length = cpu_to_be16(length);
1346 
1347     return status;
1348 }
1349 
ufs_exec_query_read(UfsRequest * req)1350 static QueryRespCode ufs_exec_query_read(UfsRequest *req)
1351 {
1352     QueryRespCode status;
1353     switch (req->req_upiu.qr.opcode) {
1354     case UFS_UPIU_QUERY_OPCODE_NOP:
1355         status = UFS_QUERY_RESULT_SUCCESS;
1356         break;
1357     case UFS_UPIU_QUERY_OPCODE_READ_DESC:
1358         status = ufs_read_desc(req);
1359         break;
1360     case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
1361         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
1362         break;
1363     case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
1364         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
1365         break;
1366     default:
1367         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1368         status = UFS_QUERY_RESULT_INVALID_OPCODE;
1369         break;
1370     }
1371 
1372     return status;
1373 }
1374 
ufs_exec_query_write(UfsRequest * req)1375 static QueryRespCode ufs_exec_query_write(UfsRequest *req)
1376 {
1377     QueryRespCode status;
1378     switch (req->req_upiu.qr.opcode) {
1379     case UFS_UPIU_QUERY_OPCODE_NOP:
1380         status = UFS_QUERY_RESULT_SUCCESS;
1381         break;
1382     case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
1383         /* write descriptor is not supported */
1384         status = UFS_QUERY_RESULT_NOT_WRITEABLE;
1385         break;
1386     case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
1387         status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
1388         break;
1389     case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
1390         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
1391         break;
1392     case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
1393         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
1394         break;
1395     case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1396         status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
1397         break;
1398     default:
1399         trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1400         status = UFS_QUERY_RESULT_INVALID_OPCODE;
1401         break;
1402     }
1403 
1404     return status;
1405 }
1406 
ufs_exec_query_cmd(UfsRequest * req)1407 static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
1408 {
1409     uint8_t query_func = req->req_upiu.header.query_func;
1410     uint16_t data_segment_length;
1411     QueryRespCode status;
1412 
1413     trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
1414     if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
1415         status = ufs_exec_query_read(req);
1416     } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
1417         status = ufs_exec_query_write(req);
1418     } else {
1419         status = UFS_QUERY_RESULT_GENERAL_FAILURE;
1420     }
1421 
1422     data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
1423     ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
1424                           data_segment_length);
1425     ufs_build_query_response(req);
1426 
1427     if (status != UFS_QUERY_RESULT_SUCCESS) {
1428         return UFS_REQUEST_FAIL;
1429     }
1430     return UFS_REQUEST_SUCCESS;
1431 }
1432 
ufs_exec_req(UfsRequest * req)1433 static void ufs_exec_req(UfsRequest *req)
1434 {
1435     UfsReqResult req_result;
1436 
1437     if (ufs_dma_read_upiu(req)) {
1438         return;
1439     }
1440 
1441     switch (req->req_upiu.header.trans_type) {
1442     case UFS_UPIU_TRANSACTION_NOP_OUT:
1443         req_result = ufs_exec_nop_cmd(req);
1444         break;
1445     case UFS_UPIU_TRANSACTION_COMMAND:
1446         req_result = ufs_exec_scsi_cmd(req);
1447         break;
1448     case UFS_UPIU_TRANSACTION_QUERY_REQ:
1449         req_result = ufs_exec_query_cmd(req);
1450         break;
1451     default:
1452         trace_ufs_err_invalid_trans_code(req->slot,
1453                                          req->req_upiu.header.trans_type);
1454         req_result = UFS_REQUEST_FAIL;
1455     }
1456 
1457     /*
1458      * The ufs_complete_req for scsi commands is handled by the
1459      * ufs_scsi_command_complete() callback function. Therefore, to avoid
1460      * duplicate processing, ufs_complete_req() is not called for scsi commands.
1461      */
1462     if (req_result != UFS_REQUEST_NO_COMPLETE) {
1463         ufs_complete_req(req, req_result);
1464     }
1465 }
1466 
ufs_process_req(void * opaque)1467 static void ufs_process_req(void *opaque)
1468 {
1469     UfsHc *u = opaque;
1470     UfsRequest *req;
1471     int slot;
1472 
1473     for (slot = 0; slot < u->params.nutrs; slot++) {
1474         req = &u->req_list[slot];
1475 
1476         if (req->state != UFS_REQUEST_READY) {
1477             continue;
1478         }
1479         trace_ufs_process_req(slot);
1480         req->state = UFS_REQUEST_RUNNING;
1481 
1482         ufs_exec_req(req);
1483     }
1484 }
1485 
ufs_complete_req(UfsRequest * req,UfsReqResult req_result)1486 void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
1487 {
1488     UfsHc *u = req->hc;
1489     assert(req->state == UFS_REQUEST_RUNNING);
1490 
1491     if (req_result == UFS_REQUEST_SUCCESS) {
1492         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
1493     } else {
1494         req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
1495     }
1496 
1497     req->state = UFS_REQUEST_COMPLETE;
1498 
1499     if (ufs_mcq_req(req)) {
1500         trace_ufs_mcq_complete_req(req->sq->sqid);
1501         QTAILQ_INSERT_TAIL(&req->sq->cq->req_list, req, entry);
1502         qemu_bh_schedule(req->sq->cq->bh);
1503     } else {
1504         trace_ufs_complete_req(req->slot);
1505         qemu_bh_schedule(u->complete_bh);
1506     }
1507 }
1508 
ufs_clear_req(UfsRequest * req)1509 static void ufs_clear_req(UfsRequest *req)
1510 {
1511     if (req->sg != NULL) {
1512         qemu_sglist_destroy(req->sg);
1513         g_free(req->sg);
1514         req->sg = NULL;
1515         req->data_len = 0;
1516     }
1517 
1518     memset(&req->utrd, 0, sizeof(req->utrd));
1519     memset(&req->req_upiu, 0, sizeof(req->req_upiu));
1520     memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
1521 }
1522 
ufs_sendback_req(void * opaque)1523 static void ufs_sendback_req(void *opaque)
1524 {
1525     UfsHc *u = opaque;
1526     UfsRequest *req;
1527     int slot;
1528 
1529     for (slot = 0; slot < u->params.nutrs; slot++) {
1530         req = &u->req_list[slot];
1531 
1532         if (req->state != UFS_REQUEST_COMPLETE) {
1533             continue;
1534         }
1535 
1536         if (ufs_dma_write_upiu(req)) {
1537             req->state = UFS_REQUEST_ERROR;
1538             continue;
1539         }
1540 
1541         /*
1542          * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
1543          * supported
1544          */
1545         if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
1546             le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
1547             u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
1548         }
1549 
1550         u->reg.utrldbr &= ~(1 << slot);
1551         u->reg.utrlcnr |= (1 << slot);
1552 
1553         trace_ufs_sendback_req(req->slot);
1554 
1555         ufs_clear_req(req);
1556         req->state = UFS_REQUEST_IDLE;
1557     }
1558 
1559     ufs_irq_check(u);
1560 }
1561 
ufs_check_constraints(UfsHc * u,Error ** errp)1562 static bool ufs_check_constraints(UfsHc *u, Error **errp)
1563 {
1564     if (u->params.nutrs > UFS_MAX_NUTRS) {
1565         error_setg(errp, "nutrs must be less than or equal to %d",
1566                    UFS_MAX_NUTRS);
1567         return false;
1568     }
1569 
1570     if (u->params.nutmrs > UFS_MAX_NUTMRS) {
1571         error_setg(errp, "nutmrs must be less than or equal to %d",
1572                    UFS_MAX_NUTMRS);
1573         return false;
1574     }
1575 
1576     if (u->params.mcq_maxq >= UFS_MAX_MCQ_QNUM) {
1577         error_setg(errp, "mcq-maxq must be less than %d", UFS_MAX_MCQ_QNUM);
1578         return false;
1579     }
1580 
1581     return true;
1582 }
1583 
ufs_init_pci(UfsHc * u,PCIDevice * pci_dev)1584 static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
1585 {
1586     uint8_t *pci_conf = pci_dev->config;
1587 
1588     pci_conf[PCI_INTERRUPT_PIN] = 1;
1589     pci_config_set_prog_interface(pci_conf, 0x1);
1590 
1591     memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
1592                           u->reg_size);
1593     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
1594     u->irq = pci_allocate_irq(pci_dev);
1595 }
1596 
ufs_init_state(UfsHc * u)1597 static void ufs_init_state(UfsHc *u)
1598 {
1599     u->req_list = g_new0(UfsRequest, u->params.nutrs);
1600 
1601     for (int i = 0; i < u->params.nutrs; i++) {
1602         u->req_list[i].hc = u;
1603         u->req_list[i].slot = i;
1604         u->req_list[i].sg = NULL;
1605         u->req_list[i].state = UFS_REQUEST_IDLE;
1606     }
1607 
1608     u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
1609                                          &DEVICE(u)->mem_reentrancy_guard);
1610     u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
1611                                          &DEVICE(u)->mem_reentrancy_guard);
1612 
1613     if (u->params.mcq) {
1614         memset(u->sq, 0, sizeof(u->sq));
1615         memset(u->cq, 0, sizeof(u->cq));
1616     }
1617 }
1618 
ufs_init_hc(UfsHc * u)1619 static void ufs_init_hc(UfsHc *u)
1620 {
1621     uint32_t cap = 0;
1622     uint32_t mcqconfig = 0;
1623     uint32_t mcqcap = 0;
1624 
1625     u->reg_size = pow2ceil(ufs_reg_size(u));
1626 
1627     memset(&u->reg, 0, sizeof(u->reg));
1628     memset(&u->mcq_reg, 0, sizeof(u->mcq_reg));
1629     memset(&u->mcq_op_reg, 0, sizeof(u->mcq_op_reg));
1630     cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
1631     cap = FIELD_DP32(cap, CAP, RTT, 2);
1632     cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
1633     cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
1634     cap = FIELD_DP32(cap, CAP, 64AS, 1);
1635     cap = FIELD_DP32(cap, CAP, OODDS, 0);
1636     cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
1637     cap = FIELD_DP32(cap, CAP, CS, 0);
1638     cap = FIELD_DP32(cap, CAP, LSDBS, 1);
1639     cap = FIELD_DP32(cap, CAP, MCQS, u->params.mcq);
1640     u->reg.cap = cap;
1641 
1642     if (u->params.mcq) {
1643         mcqconfig = FIELD_DP32(mcqconfig, MCQCONFIG, MAC, 0x1f);
1644         u->reg.mcqconfig = mcqconfig;
1645 
1646         mcqcap = FIELD_DP32(mcqcap, MCQCAP, MAXQ, u->params.mcq_maxq - 1);
1647         mcqcap = FIELD_DP32(mcqcap, MCQCAP, RRP, 1);
1648         mcqcap = FIELD_DP32(mcqcap, MCQCAP, QCFGPTR, UFS_MCQ_QCFGPTR);
1649         u->reg.mcqcap = mcqcap;
1650 
1651         for (int i = 0; i < ARRAY_SIZE(u->mcq_reg); i++) {
1652             uint64_t addr = ufs_mcq_op_reg_addr(u, i);
1653             u->mcq_reg[i].sqdao = addr;
1654             u->mcq_reg[i].sqisao = addr + sizeof(UfsMcqSqReg);
1655             addr += sizeof(UfsMcqSqReg);
1656             u->mcq_reg[i].cqdao = addr + sizeof(UfsMcqSqIntReg);
1657             addr += sizeof(UfsMcqSqIntReg);
1658             u->mcq_reg[i].cqisao = addr + sizeof(UfsMcqCqReg);
1659         }
1660     }
1661     u->reg.ver = UFS_SPEC_VER;
1662 
1663     memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
1664     u->device_desc.length = sizeof(DeviceDescriptor);
1665     u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
1666     u->device_desc.device_sub_class = 0x01;
1667     u->device_desc.number_lu = 0x00;
1668     u->device_desc.number_wlu = 0x04;
1669     /* TODO: Revisit it when Power Management is implemented */
1670     u->device_desc.init_power_mode = 0x01; /* Active Mode */
1671     u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
1672     u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
1673     u->device_desc.manufacturer_name = 0x00;
1674     u->device_desc.product_name = 0x01;
1675     u->device_desc.serial_number = 0x02;
1676     u->device_desc.oem_id = 0x03;
1677     u->device_desc.ud_0_base_offset = 0x16;
1678     u->device_desc.ud_config_p_length = 0x1A;
1679     u->device_desc.device_rtt_cap = 0x02;
1680     u->device_desc.queue_depth = u->params.nutrs;
1681     u->device_desc.product_revision_level = 0x04;
1682 
1683     memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
1684     u->geometry_desc.length = sizeof(GeometryDescriptor);
1685     u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
1686     u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
1687     u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
1688     u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
1689     u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
1690     u->geometry_desc.max_in_buffer_size = 0x8;
1691     u->geometry_desc.max_out_buffer_size = 0x8;
1692     u->geometry_desc.rpmb_read_write_size = 0x40;
1693     u->geometry_desc.data_ordering =
1694         0x0; /* out-of-order data transfer is not supported */
1695     u->geometry_desc.max_context_id_number = 0x5;
1696     u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
1697 
1698     memset(&u->attributes, 0, sizeof(u->attributes));
1699     u->attributes.max_data_in_size = 0x08;
1700     u->attributes.max_data_out_size = 0x08;
1701     u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
1702     /* configure descriptor is not supported */
1703     u->attributes.config_descr_lock = 0x01;
1704     u->attributes.max_num_of_rtt = 0x02;
1705 
1706     memset(&u->flags, 0, sizeof(u->flags));
1707     u->flags.permanently_disable_fw_update = 1;
1708 }
1709 
ufs_realize(PCIDevice * pci_dev,Error ** errp)1710 static void ufs_realize(PCIDevice *pci_dev, Error **errp)
1711 {
1712     UfsHc *u = UFS(pci_dev);
1713 
1714     if (!ufs_check_constraints(u, errp)) {
1715         return;
1716     }
1717 
1718     qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
1719               u->parent_obj.qdev.id);
1720 
1721     ufs_init_state(u);
1722     ufs_init_hc(u);
1723     ufs_init_pci(u, pci_dev);
1724 
1725     ufs_init_wlu(&u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN);
1726     ufs_init_wlu(&u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN);
1727     ufs_init_wlu(&u->boot_wlu, UFS_UPIU_BOOT_WLUN);
1728     ufs_init_wlu(&u->rpmb_wlu, UFS_UPIU_RPMB_WLUN);
1729 }
1730 
ufs_exit(PCIDevice * pci_dev)1731 static void ufs_exit(PCIDevice *pci_dev)
1732 {
1733     UfsHc *u = UFS(pci_dev);
1734 
1735     qemu_bh_delete(u->doorbell_bh);
1736     qemu_bh_delete(u->complete_bh);
1737 
1738     for (int i = 0; i < u->params.nutrs; i++) {
1739         ufs_clear_req(&u->req_list[i]);
1740     }
1741     g_free(u->req_list);
1742 
1743     for (int i = 0; i < ARRAY_SIZE(u->sq); i++) {
1744         if (u->sq[i]) {
1745             ufs_mcq_delete_sq(u, i);
1746         }
1747     }
1748     for (int i = 0; i < ARRAY_SIZE(u->cq); i++) {
1749         if (u->cq[i]) {
1750             ufs_mcq_delete_cq(u, i);
1751         }
1752     }
1753 }
1754 
1755 static Property ufs_props[] = {
1756     DEFINE_PROP_STRING("serial", UfsHc, params.serial),
1757     DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
1758     DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
1759     DEFINE_PROP_BOOL("mcq", UfsHc, params.mcq, false),
1760     DEFINE_PROP_UINT8("mcq-maxq", UfsHc, params.mcq_maxq, 2),
1761     DEFINE_PROP_END_OF_LIST(),
1762 };
1763 
1764 static const VMStateDescription ufs_vmstate = {
1765     .name = "ufs",
1766     .unmigratable = 1,
1767 };
1768 
ufs_class_init(ObjectClass * oc,void * data)1769 static void ufs_class_init(ObjectClass *oc, void *data)
1770 {
1771     DeviceClass *dc = DEVICE_CLASS(oc);
1772     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1773 
1774     pc->realize = ufs_realize;
1775     pc->exit = ufs_exit;
1776     pc->vendor_id = PCI_VENDOR_ID_REDHAT;
1777     pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
1778     pc->class_id = PCI_CLASS_STORAGE_UFS;
1779 
1780     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1781     dc->desc = "Universal Flash Storage";
1782     device_class_set_props(dc, ufs_props);
1783     dc->vmsd = &ufs_vmstate;
1784 }
1785 
ufs_bus_check_address(BusState * qbus,DeviceState * qdev,Error ** errp)1786 static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
1787                                   Error **errp)
1788 {
1789     if (strcmp(object_get_typename(OBJECT(qdev)), TYPE_UFS_LU) != 0) {
1790         error_setg(errp, "%s cannot be connected to ufs-bus",
1791                    object_get_typename(OBJECT(qdev)));
1792         return false;
1793     }
1794 
1795     return true;
1796 }
1797 
ufs_bus_get_dev_path(DeviceState * dev)1798 static char *ufs_bus_get_dev_path(DeviceState *dev)
1799 {
1800     BusState *bus = qdev_get_parent_bus(dev);
1801 
1802     return qdev_get_dev_path(bus->parent);
1803 }
1804 
ufs_bus_class_init(ObjectClass * class,void * data)1805 static void ufs_bus_class_init(ObjectClass *class, void *data)
1806 {
1807     BusClass *bc = BUS_CLASS(class);
1808     bc->get_dev_path = ufs_bus_get_dev_path;
1809     bc->check_address = ufs_bus_check_address;
1810 }
1811 
1812 static const TypeInfo ufs_info = {
1813     .name = TYPE_UFS,
1814     .parent = TYPE_PCI_DEVICE,
1815     .class_init = ufs_class_init,
1816     .instance_size = sizeof(UfsHc),
1817     .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
1818 };
1819 
1820 static const TypeInfo ufs_bus_info = {
1821     .name = TYPE_UFS_BUS,
1822     .parent = TYPE_BUS,
1823     .class_init = ufs_bus_class_init,
1824     .class_size = sizeof(UfsBusClass),
1825     .instance_size = sizeof(UfsBus),
1826 };
1827 
ufs_register_types(void)1828 static void ufs_register_types(void)
1829 {
1830     type_register_static(&ufs_info);
1831     type_register_static(&ufs_bus_info);
1832 }
1833 
1834 type_init(ufs_register_types)
1835