1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2018-2023 Intel Corporation
4  */
5 #include "iwl-trans.h"
6 #include "iwl-fh.h"
7 #include "iwl-context-info-gen3.h"
8 #include "internal.h"
9 #include "iwl-prph.h"
10 
11 static void
iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans * trans,struct iwl_prph_scratch_hwm_cfg * dbg_cfg,u32 * control_flags)12 iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
13 			      struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
14 			      u32 *control_flags)
15 {
16 	enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
17 	struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
18 	u32 dbg_flags = 0;
19 
20 	if (!iwl_trans_dbg_ini_valid(trans)) {
21 		struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
22 
23 		iwl_pcie_alloc_fw_monitor(trans, 0);
24 
25 		if (fw_mon->size) {
26 			dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
27 
28 			IWL_DEBUG_FW(trans,
29 				     "WRT: Applying DRAM buffer destination\n");
30 
31 			dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
32 			dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
33 		}
34 
35 		goto out;
36 	}
37 
38 	fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
39 
40 	switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
41 	case IWL_FW_INI_LOCATION_SRAM_PATH:
42 		dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
43 		IWL_DEBUG_FW(trans,
44 				"WRT: Applying SMEM buffer destination\n");
45 		break;
46 
47 	case IWL_FW_INI_LOCATION_NPK_PATH:
48 		dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
49 		IWL_DEBUG_FW(trans,
50 			     "WRT: Applying NPK buffer destination\n");
51 		break;
52 
53 	case IWL_FW_INI_LOCATION_DRAM_PATH:
54 		if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
55 			struct iwl_dram_data *frag =
56 				&trans->dbg.fw_mon_ini[alloc_id].frags[0];
57 			dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
58 			dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
59 			dbg_cfg->hwm_size = cpu_to_le32(frag->size);
60 			dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset);
61 			IWL_DEBUG_FW(trans,
62 				     "WRT: Applying DRAM destination (debug_token_config=%u)\n",
63 				     dbg_cfg->debug_token_config);
64 			IWL_DEBUG_FW(trans,
65 				     "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
66 				     alloc_id,
67 				     trans->dbg.fw_mon_ini[alloc_id].num_frags);
68 		}
69 		break;
70 	default:
71 		IWL_ERR(trans, "WRT: Invalid buffer destination\n");
72 	}
73 out:
74 	if (dbg_flags)
75 		*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
76 }
77 
iwl_pcie_ctxt_info_gen3_init(struct iwl_trans * trans,const struct fw_img * fw)78 int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
79 				 const struct fw_img *fw)
80 {
81 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
82 	struct iwl_context_info_gen3 *ctxt_info_gen3;
83 	struct iwl_prph_scratch *prph_scratch;
84 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
85 	struct iwl_prph_info *prph_info;
86 	u32 control_flags = 0;
87 	int ret;
88 	int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
89 			      trans->cfg->min_txq_size);
90 
91 	switch (trans_pcie->rx_buf_size) {
92 	case IWL_AMSDU_DEF:
93 		return -EINVAL;
94 	case IWL_AMSDU_2K:
95 		break;
96 	case IWL_AMSDU_4K:
97 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
98 		break;
99 	case IWL_AMSDU_8K:
100 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
101 		/* if firmware supports the ext size, tell it */
102 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K;
103 		break;
104 	case IWL_AMSDU_12K:
105 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
106 		/* if firmware supports the ext size, tell it */
107 		control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K;
108 		break;
109 	}
110 
111 	/* Allocate prph scratch */
112 	prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
113 					  &trans_pcie->prph_scratch_dma_addr,
114 					  GFP_KERNEL);
115 	if (!prph_scratch)
116 		return -ENOMEM;
117 
118 	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
119 
120 	prph_sc_ctrl->version.version = 0;
121 	prph_sc_ctrl->version.mac_id =
122 		cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
123 	prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
124 
125 	control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
126 	control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
127 
128 	if (trans->trans_cfg->imr_enabled)
129 		control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
130 
131 	/* initialize RX default queue */
132 	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
133 		cpu_to_le64(trans_pcie->rxq->bd_dma);
134 
135 	iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
136 				      &control_flags);
137 	prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
138 
139 	/* initialize the Step equalizer data */
140 	prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
141 	prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step);
142 
143 	/* allocate ucode sections in dram and set addresses */
144 	ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
145 	if (ret)
146 		goto err_free_prph_scratch;
147 
148 
149 	/* Allocate prph information
150 	 * currently we don't assign to the prph info anything, but it would get
151 	 * assigned later
152 	 *
153 	 * We also use the second half of this page to give the device some
154 	 * dummy TR/CR tail pointers - which shouldn't be necessary as we don't
155 	 * use this, but the hardware still reads/writes there and we can't let
156 	 * it go do that with a NULL pointer.
157 	 */
158 	BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
159 	prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
160 				       &trans_pcie->prph_info_dma_addr,
161 				       GFP_KERNEL);
162 	if (!prph_info) {
163 		ret = -ENOMEM;
164 		goto err_free_prph_scratch;
165 	}
166 
167 	/* Allocate context info */
168 	ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
169 					    sizeof(*ctxt_info_gen3),
170 					    &trans_pcie->ctxt_info_dma_addr,
171 					    GFP_KERNEL);
172 	if (!ctxt_info_gen3) {
173 		ret = -ENOMEM;
174 		goto err_free_prph_info;
175 	}
176 
177 	ctxt_info_gen3->prph_info_base_addr =
178 		cpu_to_le64(trans_pcie->prph_info_dma_addr);
179 	ctxt_info_gen3->prph_scratch_base_addr =
180 		cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
181 	ctxt_info_gen3->prph_scratch_size =
182 		cpu_to_le32(sizeof(*prph_scratch));
183 	ctxt_info_gen3->cr_head_idx_arr_base_addr =
184 		cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
185 	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
186 		cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
187 	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
188 		cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
189 	ctxt_info_gen3->mtr_base_addr =
190 		cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
191 	ctxt_info_gen3->mcr_base_addr =
192 		cpu_to_le64(trans_pcie->rxq->used_bd_dma);
193 	ctxt_info_gen3->mtr_size =
194 		cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
195 	ctxt_info_gen3->mcr_size =
196 		cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
197 
198 	trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
199 	trans_pcie->prph_info = prph_info;
200 	trans_pcie->prph_scratch = prph_scratch;
201 
202 	/* Allocate IML */
203 	trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
204 					     &trans_pcie->iml_dma_addr,
205 					     GFP_KERNEL);
206 	if (!trans_pcie->iml) {
207 		ret = -ENOMEM;
208 		goto err_free_ctxt_info;
209 	}
210 
211 	memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
212 
213 	iwl_enable_fw_load_int_ctx_info(trans);
214 
215 	/* kick FW self load */
216 	iwl_write64(trans, CSR_CTXT_INFO_ADDR,
217 		    trans_pcie->ctxt_info_dma_addr);
218 	iwl_write64(trans, CSR_IML_DATA_ADDR,
219 		    trans_pcie->iml_dma_addr);
220 	iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
221 
222 	iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
223 		    CSR_AUTO_FUNC_BOOT_ENA);
224 
225 	return 0;
226 
227 err_free_ctxt_info:
228 	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
229 			  trans_pcie->ctxt_info_gen3,
230 			  trans_pcie->ctxt_info_dma_addr);
231 	trans_pcie->ctxt_info_gen3 = NULL;
232 err_free_prph_info:
233 	dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
234 			  trans_pcie->prph_info_dma_addr);
235 
236 err_free_prph_scratch:
237 	dma_free_coherent(trans->dev,
238 			  sizeof(*prph_scratch),
239 			prph_scratch,
240 			trans_pcie->prph_scratch_dma_addr);
241 	return ret;
242 
243 }
244 
iwl_pcie_ctxt_info_gen3_free(struct iwl_trans * trans,bool alive)245 void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
246 {
247 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
248 
249 	if (trans_pcie->iml) {
250 		dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
251 				  trans_pcie->iml_dma_addr);
252 		trans_pcie->iml_dma_addr = 0;
253 		trans_pcie->iml = NULL;
254 	}
255 
256 	iwl_pcie_ctxt_info_free_fw_img(trans);
257 
258 	if (alive)
259 		return;
260 
261 	if (!trans_pcie->ctxt_info_gen3)
262 		return;
263 
264 	/* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
265 	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
266 			  trans_pcie->ctxt_info_gen3,
267 			  trans_pcie->ctxt_info_dma_addr);
268 	trans_pcie->ctxt_info_dma_addr = 0;
269 	trans_pcie->ctxt_info_gen3 = NULL;
270 
271 	dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
272 			  trans_pcie->prph_scratch,
273 			  trans_pcie->prph_scratch_dma_addr);
274 	trans_pcie->prph_scratch_dma_addr = 0;
275 	trans_pcie->prph_scratch = NULL;
276 
277 	/* this is needed for the entire lifetime */
278 	dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
279 			  trans_pcie->prph_info_dma_addr);
280 	trans_pcie->prph_info_dma_addr = 0;
281 	trans_pcie->prph_info = NULL;
282 }
283 
iwl_pcie_load_payloads_continuously(struct iwl_trans * trans,const struct iwl_pnvm_image * pnvm_data,struct iwl_dram_data * dram)284 static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans,
285 					       const struct iwl_pnvm_image *pnvm_data,
286 					       struct iwl_dram_data *dram)
287 {
288 	u32 len, len0, len1;
289 
290 	if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) {
291 		IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n",
292 			     pnvm_data->n_chunks);
293 		return -EINVAL;
294 	}
295 
296 	len0 = pnvm_data->chunks[0].len;
297 	len1 = pnvm_data->chunks[1].len;
298 	if (len1 > 0xFFFFFFFF - len0) {
299 		IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n");
300 		return -EINVAL;
301 	}
302 	len = len0 + len1;
303 
304 	dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
305 							    &dram->physical);
306 	if (!dram->block) {
307 		IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
308 		return -ENOMEM;
309 	}
310 
311 	dram->size = len;
312 	memcpy(dram->block, pnvm_data->chunks[0].data, len0);
313 	memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1);
314 
315 	return 0;
316 }
317 
iwl_pcie_load_payloads_segments(struct iwl_trans * trans,struct iwl_dram_regions * dram_regions,const struct iwl_pnvm_image * pnvm_data)318 static int iwl_pcie_load_payloads_segments
319 				(struct iwl_trans *trans,
320 				 struct iwl_dram_regions *dram_regions,
321 				 const struct iwl_pnvm_image *pnvm_data)
322 {
323 	struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];
324 	struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
325 	struct iwl_prph_scrath_mem_desc_addr_array *addresses;
326 	const void *data;
327 	u32 len;
328 	int i;
329 
330 	/* allocate and init DRAM descriptors array */
331 	len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array);
332 	desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent
333 						(trans,
334 						 len,
335 						 &desc_dram->physical);
336 	if (!desc_dram->block) {
337 		IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n");
338 		return -ENOMEM;
339 	}
340 	desc_dram->size = len;
341 	memset(desc_dram->block, 0, len);
342 
343 	/* allocate DRAM region for each payload */
344 	dram_regions->n_regions = 0;
345 	for (i = 0; i < pnvm_data->n_chunks; i++) {
346 		len = pnvm_data->chunks[i].len;
347 		data = pnvm_data->chunks[i].data;
348 
349 		if (iwl_pcie_ctxt_info_alloc_dma(trans,
350 						 data,
351 						 len,
352 						 cur_payload_dram)) {
353 			iwl_trans_pcie_free_pnvm_dram_regions(dram_regions,
354 							      trans->dev);
355 			return -ENOMEM;
356 		}
357 
358 		dram_regions->n_regions++;
359 		cur_payload_dram++;
360 	}
361 
362 	/* fill desc with the DRAM payloads addresses */
363 	addresses = desc_dram->block;
364 	for (i = 0; i < pnvm_data->n_chunks; i++) {
365 		addresses->mem_descs[i] =
366 			cpu_to_le64(dram_regions->drams[i].physical);
367 	}
368 
369 	return 0;
370 
371 }
372 
iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans * trans,const struct iwl_pnvm_image * pnvm_payloads,const struct iwl_ucode_capabilities * capa)373 int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
374 					   const struct iwl_pnvm_image *pnvm_payloads,
375 					   const struct iwl_ucode_capabilities *capa)
376 {
377 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
378 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
379 		&trans_pcie->prph_scratch->ctrl_cfg;
380 	struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
381 	int ret = 0;
382 
383 	/* only allocate the DRAM if not allocated yet */
384 	if (trans->pnvm_loaded)
385 		return 0;
386 
387 	if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
388 		return -EBUSY;
389 
390 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
391 		return 0;
392 
393 	if (!pnvm_payloads->n_chunks) {
394 		IWL_DEBUG_FW(trans, "no payloads\n");
395 		return -EINVAL;
396 	}
397 
398 	/* save payloads in several DRAM sections */
399 	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
400 		ret = iwl_pcie_load_payloads_segments(trans,
401 						      dram_regions,
402 						      pnvm_payloads);
403 		if (!ret)
404 			trans->pnvm_loaded = true;
405 	} else {
406 		/* save only in one DRAM section */
407 		ret = iwl_pcie_load_payloads_continuously
408 						(trans,
409 						 pnvm_payloads,
410 						 &dram_regions->drams[0]);
411 		if (!ret) {
412 			dram_regions->n_regions = 1;
413 			trans->pnvm_loaded = true;
414 		}
415 	}
416 
417 	return ret;
418 }
419 
420 static inline size_t
iwl_dram_regions_size(const struct iwl_dram_regions * dram_regions)421 iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions)
422 {
423 	size_t total_size = 0;
424 	int i;
425 
426 	for (i = 0; i < dram_regions->n_regions; i++)
427 		total_size += dram_regions->drams[i].size;
428 
429 	return total_size;
430 }
431 
iwl_pcie_set_pnvm_segments(struct iwl_trans * trans)432 static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
433 {
434 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
435 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
436 		&trans_pcie->prph_scratch->ctrl_cfg;
437 	struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
438 
439 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
440 		cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
441 	prph_sc_ctrl->pnvm_cfg.pnvm_size =
442 		cpu_to_le32(iwl_dram_regions_size(dram_regions));
443 }
444 
iwl_pcie_set_continuous_pnvm(struct iwl_trans * trans)445 static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
446 {
447 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
448 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
449 		&trans_pcie->prph_scratch->ctrl_cfg;
450 
451 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
452 		cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical);
453 	prph_sc_ctrl->pnvm_cfg.pnvm_size =
454 		cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
455 }
456 
iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans * trans,const struct iwl_ucode_capabilities * capa)457 void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
458 					   const struct iwl_ucode_capabilities *capa)
459 {
460 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
461 		return;
462 
463 	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
464 		iwl_pcie_set_pnvm_segments(trans);
465 	else
466 		iwl_pcie_set_continuous_pnvm(trans);
467 }
468 
iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans * trans,const struct iwl_pnvm_image * payloads,const struct iwl_ucode_capabilities * capa)469 int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
470 						   const struct iwl_pnvm_image *payloads,
471 						   const struct iwl_ucode_capabilities *capa)
472 {
473 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
474 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
475 		&trans_pcie->prph_scratch->ctrl_cfg;
476 	struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
477 	int ret = 0;
478 
479 	/* only allocate the DRAM if not allocated yet */
480 	if (trans->reduce_power_loaded)
481 		return 0;
482 
483 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
484 		return 0;
485 
486 	if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
487 		return -EBUSY;
488 
489 	if (!payloads->n_chunks) {
490 		IWL_DEBUG_FW(trans, "no payloads\n");
491 		return -EINVAL;
492 	}
493 
494 	/* save payloads in several DRAM sections */
495 	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
496 		ret = iwl_pcie_load_payloads_segments(trans,
497 						      dram_regions,
498 						      payloads);
499 		if (!ret)
500 			trans->reduce_power_loaded = true;
501 	} else {
502 		/* save only in one DRAM section */
503 		ret = iwl_pcie_load_payloads_continuously
504 						(trans,
505 						 payloads,
506 						 &dram_regions->drams[0]);
507 		if (!ret) {
508 			dram_regions->n_regions = 1;
509 			trans->reduce_power_loaded = true;
510 		}
511 	}
512 
513 	return ret;
514 }
515 
iwl_pcie_set_reduce_power_segments(struct iwl_trans * trans)516 static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
517 {
518 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
519 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
520 		&trans_pcie->prph_scratch->ctrl_cfg;
521 	struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
522 
523 	prph_sc_ctrl->reduce_power_cfg.base_addr =
524 		cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
525 	prph_sc_ctrl->reduce_power_cfg.size =
526 		cpu_to_le32(iwl_dram_regions_size(dram_regions));
527 }
528 
iwl_pcie_set_continuous_reduce_power(struct iwl_trans * trans)529 static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
530 {
531 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
532 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
533 		&trans_pcie->prph_scratch->ctrl_cfg;
534 
535 	prph_sc_ctrl->reduce_power_cfg.base_addr =
536 		cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical);
537 	prph_sc_ctrl->reduce_power_cfg.size =
538 		cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size);
539 }
540 
541 void
iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans * trans,const struct iwl_ucode_capabilities * capa)542 iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
543 					      const struct iwl_ucode_capabilities *capa)
544 {
545 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
546 		return;
547 
548 	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
549 		iwl_pcie_set_reduce_power_segments(trans);
550 	else
551 		iwl_pcie_set_continuous_reduce_power(trans);
552 }
553 
554