1*e65e175bSOded Gabbay // SPDX-License-Identifier: GPL-2.0
2*e65e175bSOded Gabbay 
3*e65e175bSOded Gabbay /*
4*e65e175bSOded Gabbay  * Copyright 2016-2019 HabanaLabs, Ltd.
5*e65e175bSOded Gabbay  * All Rights Reserved.
6*e65e175bSOded Gabbay  */
7*e65e175bSOded Gabbay 
8*e65e175bSOded Gabbay #include "../habanalabs.h"
9*e65e175bSOded Gabbay #include "../../include/hw_ip/mmu/mmu_general.h"
10*e65e175bSOded Gabbay 
11*e65e175bSOded Gabbay #include <linux/slab.h>
12*e65e175bSOded Gabbay 
13*e65e175bSOded Gabbay #define MMU_V1_MAX_HOPS	(MMU_HOP4 + 1)
14*e65e175bSOded Gabbay 
15*e65e175bSOded Gabbay static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
16*e65e175bSOded Gabbay 
get_pgt_info(struct hl_ctx * ctx,u64 hop_addr)17*e65e175bSOded Gabbay static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
18*e65e175bSOded Gabbay {
19*e65e175bSOded Gabbay 	struct pgt_info *pgt_info = NULL;
20*e65e175bSOded Gabbay 
21*e65e175bSOded Gabbay 	hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
22*e65e175bSOded Gabbay 				(unsigned long) hop_addr)
23*e65e175bSOded Gabbay 		if (hop_addr == pgt_info->shadow_addr)
24*e65e175bSOded Gabbay 			break;
25*e65e175bSOded Gabbay 
26*e65e175bSOded Gabbay 	return pgt_info;
27*e65e175bSOded Gabbay }
28*e65e175bSOded Gabbay 
_free_hop(struct hl_ctx * ctx,struct pgt_info * pgt_info)29*e65e175bSOded Gabbay static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
30*e65e175bSOded Gabbay {
31*e65e175bSOded Gabbay 	struct hl_device *hdev = ctx->hdev;
32*e65e175bSOded Gabbay 
33*e65e175bSOded Gabbay 	gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
34*e65e175bSOded Gabbay 			hdev->asic_prop.mmu_hop_table_size);
35*e65e175bSOded Gabbay 	hash_del(&pgt_info->node);
36*e65e175bSOded Gabbay 	kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
37*e65e175bSOded Gabbay 	kfree(pgt_info);
38*e65e175bSOded Gabbay }
39*e65e175bSOded Gabbay 
free_hop(struct hl_ctx * ctx,u64 hop_addr)40*e65e175bSOded Gabbay static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
41*e65e175bSOded Gabbay {
42*e65e175bSOded Gabbay 	struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
43*e65e175bSOded Gabbay 
44*e65e175bSOded Gabbay 	_free_hop(ctx, pgt_info);
45*e65e175bSOded Gabbay }
46*e65e175bSOded Gabbay 
alloc_hop(struct hl_ctx * ctx)47*e65e175bSOded Gabbay static u64 alloc_hop(struct hl_ctx *ctx)
48*e65e175bSOded Gabbay {
49*e65e175bSOded Gabbay 	struct hl_device *hdev = ctx->hdev;
50*e65e175bSOded Gabbay 	struct asic_fixed_properties *prop = &hdev->asic_prop;
51*e65e175bSOded Gabbay 	struct pgt_info *pgt_info;
52*e65e175bSOded Gabbay 	u64 phys_addr, shadow_addr;
53*e65e175bSOded Gabbay 
54*e65e175bSOded Gabbay 	pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
55*e65e175bSOded Gabbay 	if (!pgt_info)
56*e65e175bSOded Gabbay 		return ULLONG_MAX;
57*e65e175bSOded Gabbay 
58*e65e175bSOded Gabbay 	phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
59*e65e175bSOded Gabbay 					prop->mmu_hop_table_size);
60*e65e175bSOded Gabbay 	if (!phys_addr) {
61*e65e175bSOded Gabbay 		dev_err(hdev->dev, "failed to allocate page\n");
62*e65e175bSOded Gabbay 		goto pool_add_err;
63*e65e175bSOded Gabbay 	}
64*e65e175bSOded Gabbay 
65*e65e175bSOded Gabbay 	shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
66*e65e175bSOded Gabbay 						GFP_KERNEL);
67*e65e175bSOded Gabbay 	if (!shadow_addr)
68*e65e175bSOded Gabbay 		goto shadow_err;
69*e65e175bSOded Gabbay 
70*e65e175bSOded Gabbay 	pgt_info->phys_addr = phys_addr;
71*e65e175bSOded Gabbay 	pgt_info->shadow_addr = shadow_addr;
72*e65e175bSOded Gabbay 	pgt_info->ctx = ctx;
73*e65e175bSOded Gabbay 	pgt_info->num_of_ptes = 0;
74*e65e175bSOded Gabbay 	hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
75*e65e175bSOded Gabbay 
76*e65e175bSOded Gabbay 	return shadow_addr;
77*e65e175bSOded Gabbay 
78*e65e175bSOded Gabbay shadow_err:
79*e65e175bSOded Gabbay 	gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr,
80*e65e175bSOded Gabbay 			prop->mmu_hop_table_size);
81*e65e175bSOded Gabbay pool_add_err:
82*e65e175bSOded Gabbay 	kfree(pgt_info);
83*e65e175bSOded Gabbay 
84*e65e175bSOded Gabbay 	return ULLONG_MAX;
85*e65e175bSOded Gabbay }
86*e65e175bSOded Gabbay 
get_phys_hop0_addr(struct hl_ctx * ctx)87*e65e175bSOded Gabbay static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
88*e65e175bSOded Gabbay {
89*e65e175bSOded Gabbay 	return ctx->hdev->asic_prop.mmu_pgt_addr +
90*e65e175bSOded Gabbay 			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
91*e65e175bSOded Gabbay }
92*e65e175bSOded Gabbay 
get_hop0_addr(struct hl_ctx * ctx)93*e65e175bSOded Gabbay static inline u64 get_hop0_addr(struct hl_ctx *ctx)
94*e65e175bSOded Gabbay {
95*e65e175bSOded Gabbay 	return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
96*e65e175bSOded Gabbay 			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
97*e65e175bSOded Gabbay }
98*e65e175bSOded Gabbay 
flush(struct hl_ctx * ctx)99*e65e175bSOded Gabbay static void flush(struct hl_ctx *ctx)
100*e65e175bSOded Gabbay {
101*e65e175bSOded Gabbay 	/* flush all writes from all cores to reach PCI */
102*e65e175bSOded Gabbay 	mb();
103*e65e175bSOded Gabbay 	ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
104*e65e175bSOded Gabbay }
105*e65e175bSOded Gabbay 
106*e65e175bSOded Gabbay /* transform the value to physical address when writing to H/W */
write_pte(struct hl_ctx * ctx,u64 shadow_pte_addr,u64 val)107*e65e175bSOded Gabbay static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
108*e65e175bSOded Gabbay {
109*e65e175bSOded Gabbay 	/*
110*e65e175bSOded Gabbay 	 * The value to write is actually the address of the next shadow hop +
111*e65e175bSOded Gabbay 	 * flags at the 12 LSBs.
112*e65e175bSOded Gabbay 	 * Hence in order to get the value to write to the physical PTE, we
113*e65e175bSOded Gabbay 	 * clear the 12 LSBs and translate the shadow hop to its associated
114*e65e175bSOded Gabbay 	 * physical hop, and add back the original 12 LSBs.
115*e65e175bSOded Gabbay 	 */
116*e65e175bSOded Gabbay 	u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
117*e65e175bSOded Gabbay 				(val & FLAGS_MASK);
118*e65e175bSOded Gabbay 
119*e65e175bSOded Gabbay 	ctx->hdev->asic_funcs->write_pte(ctx->hdev,
120*e65e175bSOded Gabbay 					get_phys_addr(ctx, shadow_pte_addr),
121*e65e175bSOded Gabbay 					phys_val);
122*e65e175bSOded Gabbay 
123*e65e175bSOded Gabbay 	*(u64 *) (uintptr_t) shadow_pte_addr = val;
124*e65e175bSOded Gabbay }
125*e65e175bSOded Gabbay 
126*e65e175bSOded Gabbay /* do not transform the value to physical address when writing to H/W */
write_final_pte(struct hl_ctx * ctx,u64 shadow_pte_addr,u64 val)127*e65e175bSOded Gabbay static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
128*e65e175bSOded Gabbay 					u64 val)
129*e65e175bSOded Gabbay {
130*e65e175bSOded Gabbay 	ctx->hdev->asic_funcs->write_pte(ctx->hdev,
131*e65e175bSOded Gabbay 					get_phys_addr(ctx, shadow_pte_addr),
132*e65e175bSOded Gabbay 					val);
133*e65e175bSOded Gabbay 	*(u64 *) (uintptr_t) shadow_pte_addr = val;
134*e65e175bSOded Gabbay }
135*e65e175bSOded Gabbay 
136*e65e175bSOded Gabbay /* clear the last and present bits */
clear_pte(struct hl_ctx * ctx,u64 pte_addr)137*e65e175bSOded Gabbay static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
138*e65e175bSOded Gabbay {
139*e65e175bSOded Gabbay 	/* no need to transform the value to physical address */
140*e65e175bSOded Gabbay 	write_final_pte(ctx, pte_addr, 0);
141*e65e175bSOded Gabbay }
142*e65e175bSOded Gabbay 
get_pte(struct hl_ctx * ctx,u64 hop_addr)143*e65e175bSOded Gabbay static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
144*e65e175bSOded Gabbay {
145*e65e175bSOded Gabbay 	get_pgt_info(ctx, hop_addr)->num_of_ptes++;
146*e65e175bSOded Gabbay }
147*e65e175bSOded Gabbay 
148*e65e175bSOded Gabbay /*
149*e65e175bSOded Gabbay  * put_pte - decrement the num of ptes and free the hop if possible
150*e65e175bSOded Gabbay  *
151*e65e175bSOded Gabbay  * @ctx: pointer to the context structure
152*e65e175bSOded Gabbay  * @hop_addr: addr of the hop
153*e65e175bSOded Gabbay  *
154*e65e175bSOded Gabbay  * This function returns the number of ptes left on this hop. If the number is
155*e65e175bSOded Gabbay  * 0, it means the pte was freed.
156*e65e175bSOded Gabbay  */
put_pte(struct hl_ctx * ctx,u64 hop_addr)157*e65e175bSOded Gabbay static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
158*e65e175bSOded Gabbay {
159*e65e175bSOded Gabbay 	struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
160*e65e175bSOded Gabbay 	int num_of_ptes_left;
161*e65e175bSOded Gabbay 
162*e65e175bSOded Gabbay 	pgt_info->num_of_ptes--;
163*e65e175bSOded Gabbay 
164*e65e175bSOded Gabbay 	/*
165*e65e175bSOded Gabbay 	 * Need to save the number of ptes left because free_hop might free
166*e65e175bSOded Gabbay 	 * the pgt_info
167*e65e175bSOded Gabbay 	 */
168*e65e175bSOded Gabbay 	num_of_ptes_left = pgt_info->num_of_ptes;
169*e65e175bSOded Gabbay 	if (!num_of_ptes_left)
170*e65e175bSOded Gabbay 		_free_hop(ctx, pgt_info);
171*e65e175bSOded Gabbay 
172*e65e175bSOded Gabbay 	return num_of_ptes_left;
173*e65e175bSOded Gabbay }
174*e65e175bSOded Gabbay 
get_hop_pte_addr(struct hl_ctx * ctx,struct hl_mmu_properties * mmu_prop,u64 * hop_addr_arr,u64 virt_addr,enum mmu_hop_num hop_idx)175*e65e175bSOded Gabbay static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
176*e65e175bSOded Gabbay 					u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx)
177*e65e175bSOded Gabbay {
178*e65e175bSOded Gabbay 	u64 mask, shift;
179*e65e175bSOded Gabbay 
180*e65e175bSOded Gabbay 	mask = mmu_prop->hop_masks[hop_idx];
181*e65e175bSOded Gabbay 	shift = mmu_prop->hop_shifts[hop_idx];
182*e65e175bSOded Gabbay 	return hop_addr_arr[hop_idx] +
183*e65e175bSOded Gabbay 			ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
184*e65e175bSOded Gabbay }
185*e65e175bSOded Gabbay 
get_alloc_next_hop_addr(struct hl_ctx * ctx,u64 curr_pte,bool * is_new_hop)186*e65e175bSOded Gabbay static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
187*e65e175bSOded Gabbay 						bool *is_new_hop)
188*e65e175bSOded Gabbay {
189*e65e175bSOded Gabbay 	u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
190*e65e175bSOded Gabbay 
191*e65e175bSOded Gabbay 	if (hop_addr == ULLONG_MAX) {
192*e65e175bSOded Gabbay 		hop_addr = alloc_hop(ctx);
193*e65e175bSOded Gabbay 		*is_new_hop = (hop_addr != ULLONG_MAX);
194*e65e175bSOded Gabbay 	}
195*e65e175bSOded Gabbay 
196*e65e175bSOded Gabbay 	return hop_addr;
197*e65e175bSOded Gabbay }
198*e65e175bSOded Gabbay 
199*e65e175bSOded Gabbay /* translates shadow address inside hop to a physical address */
get_phys_addr(struct hl_ctx * ctx,u64 shadow_addr)200*e65e175bSOded Gabbay static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
201*e65e175bSOded Gabbay {
202*e65e175bSOded Gabbay 	u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
203*e65e175bSOded Gabbay 	u64 shadow_hop_addr = shadow_addr & ~page_mask;
204*e65e175bSOded Gabbay 	u64 pte_offset = shadow_addr & page_mask;
205*e65e175bSOded Gabbay 	u64 phys_hop_addr;
206*e65e175bSOded Gabbay 
207*e65e175bSOded Gabbay 	if (shadow_hop_addr != get_hop0_addr(ctx))
208*e65e175bSOded Gabbay 		phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
209*e65e175bSOded Gabbay 	else
210*e65e175bSOded Gabbay 		phys_hop_addr = get_phys_hop0_addr(ctx);
211*e65e175bSOded Gabbay 
212*e65e175bSOded Gabbay 	return phys_hop_addr + pte_offset;
213*e65e175bSOded Gabbay }
214*e65e175bSOded Gabbay 
dram_default_mapping_init(struct hl_ctx * ctx)215*e65e175bSOded Gabbay static int dram_default_mapping_init(struct hl_ctx *ctx)
216*e65e175bSOded Gabbay {
217*e65e175bSOded Gabbay 	struct hl_device *hdev = ctx->hdev;
218*e65e175bSOded Gabbay 	struct asic_fixed_properties *prop = &hdev->asic_prop;
219*e65e175bSOded Gabbay 	u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
220*e65e175bSOded Gabbay 		hop2_pte_addr, hop3_pte_addr, pte_val;
221*e65e175bSOded Gabbay 	int rc, i, j, hop3_allocated = 0;
222*e65e175bSOded Gabbay 
223*e65e175bSOded Gabbay 	if ((!prop->dram_supports_virtual_memory) ||
224*e65e175bSOded Gabbay 			(!hdev->dram_default_page_mapping) ||
225*e65e175bSOded Gabbay 			(ctx->asid == HL_KERNEL_ASID_ID))
226*e65e175bSOded Gabbay 		return 0;
227*e65e175bSOded Gabbay 
228*e65e175bSOded Gabbay 	num_of_hop3 = prop->dram_size_for_default_page_mapping;
229*e65e175bSOded Gabbay 	do_div(num_of_hop3, prop->dram_page_size);
230*e65e175bSOded Gabbay 	do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
231*e65e175bSOded Gabbay 
232*e65e175bSOded Gabbay 	/* add hop1 and hop2 */
233*e65e175bSOded Gabbay 	total_hops = num_of_hop3 + 2;
234*e65e175bSOded Gabbay 
235*e65e175bSOded Gabbay 	ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops,  GFP_KERNEL);
236*e65e175bSOded Gabbay 	if (!ctx->dram_default_hops)
237*e65e175bSOded Gabbay 		return -ENOMEM;
238*e65e175bSOded Gabbay 
239*e65e175bSOded Gabbay 	hop0_addr = get_hop0_addr(ctx);
240*e65e175bSOded Gabbay 
241*e65e175bSOded Gabbay 	hop1_addr = alloc_hop(ctx);
242*e65e175bSOded Gabbay 	if (hop1_addr == ULLONG_MAX) {
243*e65e175bSOded Gabbay 		dev_err(hdev->dev, "failed to alloc hop 1\n");
244*e65e175bSOded Gabbay 		rc = -ENOMEM;
245*e65e175bSOded Gabbay 		goto hop1_err;
246*e65e175bSOded Gabbay 	}
247*e65e175bSOded Gabbay 
248*e65e175bSOded Gabbay 	ctx->dram_default_hops[total_hops - 1] = hop1_addr;
249*e65e175bSOded Gabbay 
250*e65e175bSOded Gabbay 	hop2_addr = alloc_hop(ctx);
251*e65e175bSOded Gabbay 	if (hop2_addr == ULLONG_MAX) {
252*e65e175bSOded Gabbay 		dev_err(hdev->dev, "failed to alloc hop 2\n");
253*e65e175bSOded Gabbay 		rc = -ENOMEM;
254*e65e175bSOded Gabbay 		goto hop2_err;
255*e65e175bSOded Gabbay 	}
256*e65e175bSOded Gabbay 
257*e65e175bSOded Gabbay 	ctx->dram_default_hops[total_hops - 2] = hop2_addr;
258*e65e175bSOded Gabbay 
259*e65e175bSOded Gabbay 	for (i = 0 ; i < num_of_hop3 ; i++) {
260*e65e175bSOded Gabbay 		ctx->dram_default_hops[i] = alloc_hop(ctx);
261*e65e175bSOded Gabbay 		if (ctx->dram_default_hops[i] == ULLONG_MAX) {
262*e65e175bSOded Gabbay 			dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
263*e65e175bSOded Gabbay 			rc = -ENOMEM;
264*e65e175bSOded Gabbay 			goto hop3_err;
265*e65e175bSOded Gabbay 		}
266*e65e175bSOded Gabbay 		hop3_allocated++;
267*e65e175bSOded Gabbay 	}
268*e65e175bSOded Gabbay 
269*e65e175bSOded Gabbay 	/* need only pte 0 in hops 0 and 1 */
270*e65e175bSOded Gabbay 	pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
271*e65e175bSOded Gabbay 	write_pte(ctx, hop0_addr, pte_val);
272*e65e175bSOded Gabbay 
273*e65e175bSOded Gabbay 	pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
274*e65e175bSOded Gabbay 	write_pte(ctx, hop1_addr, pte_val);
275*e65e175bSOded Gabbay 	get_pte(ctx, hop1_addr);
276*e65e175bSOded Gabbay 
277*e65e175bSOded Gabbay 	hop2_pte_addr = hop2_addr;
278*e65e175bSOded Gabbay 	for (i = 0 ; i < num_of_hop3 ; i++) {
279*e65e175bSOded Gabbay 		pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
280*e65e175bSOded Gabbay 				PAGE_PRESENT_MASK;
281*e65e175bSOded Gabbay 		write_pte(ctx, hop2_pte_addr, pte_val);
282*e65e175bSOded Gabbay 		get_pte(ctx, hop2_addr);
283*e65e175bSOded Gabbay 		hop2_pte_addr += HL_PTE_SIZE;
284*e65e175bSOded Gabbay 	}
285*e65e175bSOded Gabbay 
286*e65e175bSOded Gabbay 	pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
287*e65e175bSOded Gabbay 			LAST_MASK | PAGE_PRESENT_MASK;
288*e65e175bSOded Gabbay 
289*e65e175bSOded Gabbay 	for (i = 0 ; i < num_of_hop3 ; i++) {
290*e65e175bSOded Gabbay 		hop3_pte_addr = ctx->dram_default_hops[i];
291*e65e175bSOded Gabbay 		for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
292*e65e175bSOded Gabbay 			write_final_pte(ctx, hop3_pte_addr, pte_val);
293*e65e175bSOded Gabbay 			get_pte(ctx, ctx->dram_default_hops[i]);
294*e65e175bSOded Gabbay 			hop3_pte_addr += HL_PTE_SIZE;
295*e65e175bSOded Gabbay 		}
296*e65e175bSOded Gabbay 	}
297*e65e175bSOded Gabbay 
298*e65e175bSOded Gabbay 	flush(ctx);
299*e65e175bSOded Gabbay 
300*e65e175bSOded Gabbay 	return 0;
301*e65e175bSOded Gabbay 
302*e65e175bSOded Gabbay hop3_err:
303*e65e175bSOded Gabbay 	for (i = 0 ; i < hop3_allocated ; i++)
304*e65e175bSOded Gabbay 		free_hop(ctx, ctx->dram_default_hops[i]);
305*e65e175bSOded Gabbay 
306*e65e175bSOded Gabbay 	free_hop(ctx, hop2_addr);
307*e65e175bSOded Gabbay hop2_err:
308*e65e175bSOded Gabbay 	free_hop(ctx, hop1_addr);
309*e65e175bSOded Gabbay hop1_err:
310*e65e175bSOded Gabbay 	kfree(ctx->dram_default_hops);
311*e65e175bSOded Gabbay 
312*e65e175bSOded Gabbay 	return rc;
313*e65e175bSOded Gabbay }
314*e65e175bSOded Gabbay 
dram_default_mapping_fini(struct hl_ctx * ctx)315*e65e175bSOded Gabbay static void dram_default_mapping_fini(struct hl_ctx *ctx)
316*e65e175bSOded Gabbay {
317*e65e175bSOded Gabbay 	struct hl_device *hdev = ctx->hdev;
318*e65e175bSOded Gabbay 	struct asic_fixed_properties *prop = &hdev->asic_prop;
319*e65e175bSOded Gabbay 	u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
320*e65e175bSOded Gabbay 		hop2_pte_addr, hop3_pte_addr;
321*e65e175bSOded Gabbay 	int i, j;
322*e65e175bSOded Gabbay 
323*e65e175bSOded Gabbay 	if ((!prop->dram_supports_virtual_memory) ||
324*e65e175bSOded Gabbay 			(!hdev->dram_default_page_mapping) ||
325*e65e175bSOded Gabbay 			(ctx->asid == HL_KERNEL_ASID_ID))
326*e65e175bSOded Gabbay 		return;
327*e65e175bSOded Gabbay 
328*e65e175bSOded Gabbay 	num_of_hop3 = prop->dram_size_for_default_page_mapping;
329*e65e175bSOded Gabbay 	do_div(num_of_hop3, prop->dram_page_size);
330*e65e175bSOded Gabbay 	do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
331*e65e175bSOded Gabbay 
332*e65e175bSOded Gabbay 	hop0_addr = get_hop0_addr(ctx);
333*e65e175bSOded Gabbay 	/* add hop1 and hop2 */
334*e65e175bSOded Gabbay 	total_hops = num_of_hop3 + 2;
335*e65e175bSOded Gabbay 	hop1_addr = ctx->dram_default_hops[total_hops - 1];
336*e65e175bSOded Gabbay 	hop2_addr = ctx->dram_default_hops[total_hops - 2];
337*e65e175bSOded Gabbay 
338*e65e175bSOded Gabbay 	for (i = 0 ; i < num_of_hop3 ; i++) {
339*e65e175bSOded Gabbay 		hop3_pte_addr = ctx->dram_default_hops[i];
340*e65e175bSOded Gabbay 		for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
341*e65e175bSOded Gabbay 			clear_pte(ctx, hop3_pte_addr);
342*e65e175bSOded Gabbay 			put_pte(ctx, ctx->dram_default_hops[i]);
343*e65e175bSOded Gabbay 			hop3_pte_addr += HL_PTE_SIZE;
344*e65e175bSOded Gabbay 		}
345*e65e175bSOded Gabbay 	}
346*e65e175bSOded Gabbay 
347*e65e175bSOded Gabbay 	hop2_pte_addr = hop2_addr;
348*e65e175bSOded Gabbay 	for (i = 0 ; i < num_of_hop3 ; i++) {
349*e65e175bSOded Gabbay 		clear_pte(ctx, hop2_pte_addr);
350*e65e175bSOded Gabbay 		put_pte(ctx, hop2_addr);
351*e65e175bSOded Gabbay 		hop2_pte_addr += HL_PTE_SIZE;
352*e65e175bSOded Gabbay 	}
353*e65e175bSOded Gabbay 
354*e65e175bSOded Gabbay 	clear_pte(ctx, hop1_addr);
355*e65e175bSOded Gabbay 	put_pte(ctx, hop1_addr);
356*e65e175bSOded Gabbay 	clear_pte(ctx, hop0_addr);
357*e65e175bSOded Gabbay 
358*e65e175bSOded Gabbay 	kfree(ctx->dram_default_hops);
359*e65e175bSOded Gabbay 
360*e65e175bSOded Gabbay 	flush(ctx);
361*e65e175bSOded Gabbay }
362*e65e175bSOded Gabbay 
363*e65e175bSOded Gabbay /**
364*e65e175bSOded Gabbay  * hl_mmu_v1_init() - initialize the MMU module.
365*e65e175bSOded Gabbay  * @hdev: habanalabs device structure.
366*e65e175bSOded Gabbay  *
367*e65e175bSOded Gabbay  * This function does the following:
368*e65e175bSOded Gabbay  * - Create a pool of pages for pgt_infos.
369*e65e175bSOded Gabbay  * - Create a shadow table for pgt
370*e65e175bSOded Gabbay  *
371*e65e175bSOded Gabbay  * Return: 0 for success, non-zero for failure.
372*e65e175bSOded Gabbay  */
hl_mmu_v1_init(struct hl_device * hdev)373*e65e175bSOded Gabbay static int hl_mmu_v1_init(struct hl_device *hdev)
374*e65e175bSOded Gabbay {
375*e65e175bSOded Gabbay 	struct asic_fixed_properties *prop = &hdev->asic_prop;
376*e65e175bSOded Gabbay 	int rc;
377*e65e175bSOded Gabbay 
378*e65e175bSOded Gabbay 	hdev->mmu_priv.dr.mmu_pgt_pool =
379*e65e175bSOded Gabbay 			gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
380*e65e175bSOded Gabbay 
381*e65e175bSOded Gabbay 	if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
382*e65e175bSOded Gabbay 		dev_err(hdev->dev, "Failed to create page gen pool\n");
383*e65e175bSOded Gabbay 		return -ENOMEM;
384*e65e175bSOded Gabbay 	}
385*e65e175bSOded Gabbay 
386*e65e175bSOded Gabbay 	rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
387*e65e175bSOded Gabbay 			prop->mmu_hop0_tables_total_size,
388*e65e175bSOded Gabbay 			prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
389*e65e175bSOded Gabbay 			-1);
390*e65e175bSOded Gabbay 	if (rc) {
391*e65e175bSOded Gabbay 		dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
392*e65e175bSOded Gabbay 		goto err_pool_add;
393*e65e175bSOded Gabbay 	}
394*e65e175bSOded Gabbay 
395*e65e175bSOded Gabbay 	hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, prop->mmu_hop_table_size,
396*e65e175bSOded Gabbay 										GFP_KERNEL);
397*e65e175bSOded Gabbay 	if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
398*e65e175bSOded Gabbay 		rc = -ENOMEM;
399*e65e175bSOded Gabbay 		goto err_pool_add;
400*e65e175bSOded Gabbay 	}
401*e65e175bSOded Gabbay 
402*e65e175bSOded Gabbay 	/* MMU H/W init will be done in device hw_init() */
403*e65e175bSOded Gabbay 
404*e65e175bSOded Gabbay 	return 0;
405*e65e175bSOded Gabbay 
406*e65e175bSOded Gabbay err_pool_add:
407*e65e175bSOded Gabbay 	gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
408*e65e175bSOded Gabbay 
409*e65e175bSOded Gabbay 	return rc;
410*e65e175bSOded Gabbay }
411*e65e175bSOded Gabbay 
412*e65e175bSOded Gabbay /**
413*e65e175bSOded Gabbay  * hl_mmu_v1_fini() - release the MMU module.
414*e65e175bSOded Gabbay  * @hdev: habanalabs device structure.
415*e65e175bSOded Gabbay  *
416*e65e175bSOded Gabbay  * This function does the following:
417*e65e175bSOded Gabbay  * - Disable MMU in H/W.
418*e65e175bSOded Gabbay  * - Free the pgt_infos pool.
419*e65e175bSOded Gabbay  *
420*e65e175bSOded Gabbay  * All contexts should be freed before calling this function.
421*e65e175bSOded Gabbay  */
hl_mmu_v1_fini(struct hl_device * hdev)422*e65e175bSOded Gabbay static void hl_mmu_v1_fini(struct hl_device *hdev)
423*e65e175bSOded Gabbay {
424*e65e175bSOded Gabbay 	/* MMU H/W fini was already done in device hw_fini() */
425*e65e175bSOded Gabbay 
426*e65e175bSOded Gabbay 	if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
427*e65e175bSOded Gabbay 		kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
428*e65e175bSOded Gabbay 		gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
429*e65e175bSOded Gabbay 
430*e65e175bSOded Gabbay 		/* Make sure that if we arrive here again without init was
431*e65e175bSOded Gabbay 		 * called we won't cause kernel panic. This can happen for
432*e65e175bSOded Gabbay 		 * example if we fail during hard reset code at certain points
433*e65e175bSOded Gabbay 		 */
434*e65e175bSOded Gabbay 		hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
435*e65e175bSOded Gabbay 	}
436*e65e175bSOded Gabbay }
437*e65e175bSOded Gabbay 
438*e65e175bSOded Gabbay /**
439*e65e175bSOded Gabbay  * hl_mmu_v1_ctx_init() - initialize a context for using the MMU module.
440*e65e175bSOded Gabbay  * @ctx: pointer to the context structure to initialize.
441*e65e175bSOded Gabbay  *
442*e65e175bSOded Gabbay  * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
443*e65e175bSOded Gabbay  * page tables hops related to this context.
444*e65e175bSOded Gabbay  * Return: 0 on success, non-zero otherwise.
445*e65e175bSOded Gabbay  */
hl_mmu_v1_ctx_init(struct hl_ctx * ctx)446*e65e175bSOded Gabbay static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx)
447*e65e175bSOded Gabbay {
448*e65e175bSOded Gabbay 	hash_init(ctx->mmu_shadow_hash);
449*e65e175bSOded Gabbay 	return dram_default_mapping_init(ctx);
450*e65e175bSOded Gabbay }
451*e65e175bSOded Gabbay 
452*e65e175bSOded Gabbay /*
453*e65e175bSOded Gabbay  * hl_mmu_ctx_fini - disable a ctx from using the mmu module
454*e65e175bSOded Gabbay  *
455*e65e175bSOded Gabbay  * @ctx: pointer to the context structure
456*e65e175bSOded Gabbay  *
457*e65e175bSOded Gabbay  * This function does the following:
458*e65e175bSOded Gabbay  * - Free any pgts which were not freed yet
459*e65e175bSOded Gabbay  * - Free the mutex
460*e65e175bSOded Gabbay  * - Free DRAM default page mapping hops
461*e65e175bSOded Gabbay  */
hl_mmu_v1_ctx_fini(struct hl_ctx * ctx)462*e65e175bSOded Gabbay static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
463*e65e175bSOded Gabbay {
464*e65e175bSOded Gabbay 	struct hl_device *hdev = ctx->hdev;
465*e65e175bSOded Gabbay 	struct pgt_info *pgt_info;
466*e65e175bSOded Gabbay 	struct hlist_node *tmp;
467*e65e175bSOded Gabbay 	int i;
468*e65e175bSOded Gabbay 
469*e65e175bSOded Gabbay 	dram_default_mapping_fini(ctx);
470*e65e175bSOded Gabbay 
471*e65e175bSOded Gabbay 	if (!hash_empty(ctx->mmu_shadow_hash))
472*e65e175bSOded Gabbay 		dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
473*e65e175bSOded Gabbay 			ctx->asid);
474*e65e175bSOded Gabbay 
475*e65e175bSOded Gabbay 	hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
476*e65e175bSOded Gabbay 		dev_err_ratelimited(hdev->dev,
477*e65e175bSOded Gabbay 			"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
478*e65e175bSOded Gabbay 			pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
479*e65e175bSOded Gabbay 		_free_hop(ctx, pgt_info);
480*e65e175bSOded Gabbay 	}
481*e65e175bSOded Gabbay }
482*e65e175bSOded Gabbay 
hl_mmu_v1_unmap(struct hl_ctx * ctx,u64 virt_addr,bool is_dram_addr)483*e65e175bSOded Gabbay static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
484*e65e175bSOded Gabbay 				u64 virt_addr, bool is_dram_addr)
485*e65e175bSOded Gabbay {
486*e65e175bSOded Gabbay 	u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
487*e65e175bSOded Gabbay 	struct hl_device *hdev = ctx->hdev;
488*e65e175bSOded Gabbay 	struct asic_fixed_properties *prop = &hdev->asic_prop;
489*e65e175bSOded Gabbay 	struct hl_mmu_properties *mmu_prop;
490*e65e175bSOded Gabbay 	bool is_huge, clear_hop3 = true;
491*e65e175bSOded Gabbay 	int hop_idx;
492*e65e175bSOded Gabbay 
493*e65e175bSOded Gabbay 	/* shifts and masks are the same in PMMU and HPMMU, use one of them */
494*e65e175bSOded Gabbay 	mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
495*e65e175bSOded Gabbay 
496*e65e175bSOded Gabbay 	for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) {
497*e65e175bSOded Gabbay 		if (hop_idx == MMU_HOP0) {
498*e65e175bSOded Gabbay 			hop_addr[hop_idx] = get_hop0_addr(ctx);
499*e65e175bSOded Gabbay 		} else {
500*e65e175bSOded Gabbay 			hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
501*e65e175bSOded Gabbay 			if (hop_addr[hop_idx] == ULLONG_MAX)
502*e65e175bSOded Gabbay 				goto not_mapped;
503*e65e175bSOded Gabbay 		}
504*e65e175bSOded Gabbay 
505*e65e175bSOded Gabbay 		hop_pte_addr[hop_idx] =
506*e65e175bSOded Gabbay 				get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
507*e65e175bSOded Gabbay 
508*e65e175bSOded Gabbay 		curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
509*e65e175bSOded Gabbay 	}
510*e65e175bSOded Gabbay 
511*e65e175bSOded Gabbay 	is_huge = curr_pte & mmu_prop->last_mask;
512*e65e175bSOded Gabbay 
513*e65e175bSOded Gabbay 	if (is_dram_addr && !is_huge) {
514*e65e175bSOded Gabbay 		dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
515*e65e175bSOded Gabbay 		return -EFAULT;
516*e65e175bSOded Gabbay 	}
517*e65e175bSOded Gabbay 
518*e65e175bSOded Gabbay 	if (!is_huge) {
519*e65e175bSOded Gabbay 		hop_idx = MMU_HOP4;
520*e65e175bSOded Gabbay 		hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
521*e65e175bSOded Gabbay 		if (hop_addr[hop_idx] == ULLONG_MAX)
522*e65e175bSOded Gabbay 			goto not_mapped;
523*e65e175bSOded Gabbay 
524*e65e175bSOded Gabbay 		hop_pte_addr[hop_idx] =
525*e65e175bSOded Gabbay 				get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
526*e65e175bSOded Gabbay 		curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
527*e65e175bSOded Gabbay 		clear_hop3 = false;
528*e65e175bSOded Gabbay 	}
529*e65e175bSOded Gabbay 
530*e65e175bSOded Gabbay 	if (hdev->dram_default_page_mapping && is_dram_addr) {
531*e65e175bSOded Gabbay 		u64 default_pte = (prop->mmu_dram_default_page_addr &
532*e65e175bSOded Gabbay 				HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
533*e65e175bSOded Gabbay 					PAGE_PRESENT_MASK;
534*e65e175bSOded Gabbay 		if (curr_pte == default_pte) {
535*e65e175bSOded Gabbay 			dev_err(hdev->dev,
536*e65e175bSOded Gabbay 				"DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
537*e65e175bSOded Gabbay 					virt_addr);
538*e65e175bSOded Gabbay 			goto not_mapped;
539*e65e175bSOded Gabbay 		}
540*e65e175bSOded Gabbay 
541*e65e175bSOded Gabbay 		if (!(curr_pte & PAGE_PRESENT_MASK)) {
542*e65e175bSOded Gabbay 			dev_err(hdev->dev,
543*e65e175bSOded Gabbay 				"DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
544*e65e175bSOded Gabbay 					virt_addr);
545*e65e175bSOded Gabbay 			goto not_mapped;
546*e65e175bSOded Gabbay 		}
547*e65e175bSOded Gabbay 
548*e65e175bSOded Gabbay 		hop_idx = MMU_HOP3;
549*e65e175bSOded Gabbay 		write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
550*e65e175bSOded Gabbay 		put_pte(ctx, hop_addr[hop_idx]);
551*e65e175bSOded Gabbay 	} else {
552*e65e175bSOded Gabbay 		if (!(curr_pte & PAGE_PRESENT_MASK))
553*e65e175bSOded Gabbay 			goto not_mapped;
554*e65e175bSOded Gabbay 
555*e65e175bSOded Gabbay 		if (hop_addr[MMU_HOP4])
556*e65e175bSOded Gabbay 			clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
557*e65e175bSOded Gabbay 		else
558*e65e175bSOded Gabbay 			clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
559*e65e175bSOded Gabbay 
560*e65e175bSOded Gabbay 		if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4]))
561*e65e175bSOded Gabbay 			clear_hop3 = true;
562*e65e175bSOded Gabbay 
563*e65e175bSOded Gabbay 		if (!clear_hop3)
564*e65e175bSOded Gabbay 			goto mapped;
565*e65e175bSOded Gabbay 
566*e65e175bSOded Gabbay 		for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) {
567*e65e175bSOded Gabbay 			clear_pte(ctx, hop_pte_addr[hop_idx]);
568*e65e175bSOded Gabbay 
569*e65e175bSOded Gabbay 			if (hop_idx == MMU_HOP0)
570*e65e175bSOded Gabbay 				break;
571*e65e175bSOded Gabbay 
572*e65e175bSOded Gabbay 			if (put_pte(ctx, hop_addr[hop_idx]))
573*e65e175bSOded Gabbay 				goto mapped;
574*e65e175bSOded Gabbay 		}
575*e65e175bSOded Gabbay 	}
576*e65e175bSOded Gabbay 
577*e65e175bSOded Gabbay mapped:
578*e65e175bSOded Gabbay 	return 0;
579*e65e175bSOded Gabbay 
580*e65e175bSOded Gabbay not_mapped:
581*e65e175bSOded Gabbay 	dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
582*e65e175bSOded Gabbay 		virt_addr);
583*e65e175bSOded Gabbay 
584*e65e175bSOded Gabbay 	return -EINVAL;
585*e65e175bSOded Gabbay }
586*e65e175bSOded Gabbay 
hl_mmu_v1_map(struct hl_ctx * ctx,u64 virt_addr,u64 phys_addr,u32 page_size,bool is_dram_addr)587*e65e175bSOded Gabbay static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
588*e65e175bSOded Gabbay 			u32 page_size, bool is_dram_addr)
589*e65e175bSOded Gabbay {
590*e65e175bSOded Gabbay 	u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
591*e65e175bSOded Gabbay 	struct hl_device *hdev = ctx->hdev;
592*e65e175bSOded Gabbay 	struct asic_fixed_properties *prop = &hdev->asic_prop;
593*e65e175bSOded Gabbay 	struct hl_mmu_properties *mmu_prop;
594*e65e175bSOded Gabbay 	bool is_huge, hop_new[MMU_V1_MAX_HOPS] = {false};
595*e65e175bSOded Gabbay 	int num_hops, hop_idx, prev_hop, rc = -ENOMEM;
596*e65e175bSOded Gabbay 
597*e65e175bSOded Gabbay 	/*
598*e65e175bSOded Gabbay 	 * This mapping function can map a page or a huge page. For huge page
599*e65e175bSOded Gabbay 	 * there are only 3 hops rather than 4. Currently the DRAM allocation
600*e65e175bSOded Gabbay 	 * uses huge pages only but user memory could have been allocated with
601*e65e175bSOded Gabbay 	 * one of the two page sizes. Since this is a common code for all the
602*e65e175bSOded Gabbay 	 * three cases, we need this hugs page check.
603*e65e175bSOded Gabbay 	 */
604*e65e175bSOded Gabbay 	if (is_dram_addr) {
605*e65e175bSOded Gabbay 		mmu_prop = &prop->dmmu;
606*e65e175bSOded Gabbay 		is_huge = true;
607*e65e175bSOded Gabbay 	} else if (page_size == prop->pmmu_huge.page_size) {
608*e65e175bSOded Gabbay 		mmu_prop = &prop->pmmu_huge;
609*e65e175bSOded Gabbay 		is_huge = true;
610*e65e175bSOded Gabbay 	} else {
611*e65e175bSOded Gabbay 		mmu_prop = &prop->pmmu;
612*e65e175bSOded Gabbay 		is_huge = false;
613*e65e175bSOded Gabbay 	}
614*e65e175bSOded Gabbay 
615*e65e175bSOded Gabbay 	num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS;
616*e65e175bSOded Gabbay 
617*e65e175bSOded Gabbay 	for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) {
618*e65e175bSOded Gabbay 		if (hop_idx == MMU_HOP0) {
619*e65e175bSOded Gabbay 			hop_addr[hop_idx] = get_hop0_addr(ctx);
620*e65e175bSOded Gabbay 		} else {
621*e65e175bSOded Gabbay 			hop_addr[hop_idx] =
622*e65e175bSOded Gabbay 					get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
623*e65e175bSOded Gabbay 			if (hop_addr[hop_idx] == ULLONG_MAX)
624*e65e175bSOded Gabbay 				goto err;
625*e65e175bSOded Gabbay 		}
626*e65e175bSOded Gabbay 
627*e65e175bSOded Gabbay 		hop_pte_addr[hop_idx] =
628*e65e175bSOded Gabbay 				get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
629*e65e175bSOded Gabbay 		curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
630*e65e175bSOded Gabbay 	}
631*e65e175bSOded Gabbay 
632*e65e175bSOded Gabbay 	if (hdev->dram_default_page_mapping && is_dram_addr) {
633*e65e175bSOded Gabbay 		u64 default_pte = (prop->mmu_dram_default_page_addr &
634*e65e175bSOded Gabbay 					HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
635*e65e175bSOded Gabbay 						PAGE_PRESENT_MASK;
636*e65e175bSOded Gabbay 
637*e65e175bSOded Gabbay 		if (curr_pte != default_pte) {
638*e65e175bSOded Gabbay 			dev_err(hdev->dev,
639*e65e175bSOded Gabbay 				"DRAM: mapping already exists for virt_addr 0x%llx\n",
640*e65e175bSOded Gabbay 					virt_addr);
641*e65e175bSOded Gabbay 			rc = -EINVAL;
642*e65e175bSOded Gabbay 			goto err;
643*e65e175bSOded Gabbay 		}
644*e65e175bSOded Gabbay 
645*e65e175bSOded Gabbay 		for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
646*e65e175bSOded Gabbay 			if (hop_new[hop_idx]) {
647*e65e175bSOded Gabbay 				dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n");
648*e65e175bSOded Gabbay 				rc = -EFAULT;
649*e65e175bSOded Gabbay 				goto err;
650*e65e175bSOded Gabbay 			}
651*e65e175bSOded Gabbay 		}
652*e65e175bSOded Gabbay 	} else if (curr_pte & PAGE_PRESENT_MASK) {
653*e65e175bSOded Gabbay 		dev_err(hdev->dev,
654*e65e175bSOded Gabbay 			"mapping already exists for virt_addr 0x%llx\n",
655*e65e175bSOded Gabbay 				virt_addr);
656*e65e175bSOded Gabbay 
657*e65e175bSOded Gabbay 		for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++)
658*e65e175bSOded Gabbay 			dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx,
659*e65e175bSOded Gabbay 					*(u64 *) (uintptr_t) hop_pte_addr[hop_idx],
660*e65e175bSOded Gabbay 					hop_pte_addr[hop_idx]);
661*e65e175bSOded Gabbay 
662*e65e175bSOded Gabbay 		rc = -EINVAL;
663*e65e175bSOded Gabbay 		goto err;
664*e65e175bSOded Gabbay 	}
665*e65e175bSOded Gabbay 
666*e65e175bSOded Gabbay 	curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
667*e65e175bSOded Gabbay 			| PAGE_PRESENT_MASK;
668*e65e175bSOded Gabbay 
669*e65e175bSOded Gabbay 	write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
670*e65e175bSOded Gabbay 
671*e65e175bSOded Gabbay 	for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
672*e65e175bSOded Gabbay 		prev_hop = hop_idx - 1;
673*e65e175bSOded Gabbay 
674*e65e175bSOded Gabbay 		if (hop_new[hop_idx]) {
675*e65e175bSOded Gabbay 			curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
676*e65e175bSOded Gabbay 			write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
677*e65e175bSOded Gabbay 			if (hop_idx != MMU_HOP1)
678*e65e175bSOded Gabbay 				get_pte(ctx, hop_addr[prev_hop]);
679*e65e175bSOded Gabbay 		}
680*e65e175bSOded Gabbay 	}
681*e65e175bSOded Gabbay 
682*e65e175bSOded Gabbay 	get_pte(ctx, hop_addr[num_hops - 1]);
683*e65e175bSOded Gabbay 
684*e65e175bSOded Gabbay 	return 0;
685*e65e175bSOded Gabbay 
686*e65e175bSOded Gabbay err:
687*e65e175bSOded Gabbay 	for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) {
688*e65e175bSOded Gabbay 		if (hop_new[hop_idx])
689*e65e175bSOded Gabbay 			free_hop(ctx, hop_addr[hop_idx]);
690*e65e175bSOded Gabbay 	}
691*e65e175bSOded Gabbay 
692*e65e175bSOded Gabbay 	return rc;
693*e65e175bSOded Gabbay }
694*e65e175bSOded Gabbay 
695*e65e175bSOded Gabbay /*
696*e65e175bSOded Gabbay  * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out
697*e65e175bSOded Gabbay  *
698*e65e175bSOded Gabbay  * @ctx: pointer to the context structure
699*e65e175bSOded Gabbay  *
700*e65e175bSOded Gabbay  */
hl_mmu_v1_swap_out(struct hl_ctx * ctx)701*e65e175bSOded Gabbay static void hl_mmu_v1_swap_out(struct hl_ctx *ctx)
702*e65e175bSOded Gabbay {
703*e65e175bSOded Gabbay 
704*e65e175bSOded Gabbay }
705*e65e175bSOded Gabbay 
706*e65e175bSOded Gabbay /*
707*e65e175bSOded Gabbay  * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in
708*e65e175bSOded Gabbay  *
709*e65e175bSOded Gabbay  * @ctx: pointer to the context structure
710*e65e175bSOded Gabbay  *
711*e65e175bSOded Gabbay  */
hl_mmu_v1_swap_in(struct hl_ctx * ctx)712*e65e175bSOded Gabbay static void hl_mmu_v1_swap_in(struct hl_ctx *ctx)
713*e65e175bSOded Gabbay {
714*e65e175bSOded Gabbay 
715*e65e175bSOded Gabbay }
716*e65e175bSOded Gabbay 
hl_mmu_v1_get_tlb_info(struct hl_ctx * ctx,u64 virt_addr,struct hl_mmu_hop_info * hops)717*e65e175bSOded Gabbay static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
718*e65e175bSOded Gabbay 				struct hl_mmu_hop_info *hops)
719*e65e175bSOded Gabbay {
720*e65e175bSOded Gabbay 	struct hl_device *hdev = ctx->hdev;
721*e65e175bSOded Gabbay 	struct asic_fixed_properties *prop = &hdev->asic_prop;
722*e65e175bSOded Gabbay 	struct hl_mmu_properties *mmu_prop;
723*e65e175bSOded Gabbay 	bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge;
724*e65e175bSOded Gabbay 	int i, used_hops;
725*e65e175bSOded Gabbay 
726*e65e175bSOded Gabbay 	is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
727*e65e175bSOded Gabbay 						prop->dmmu.start_addr,
728*e65e175bSOded Gabbay 						prop->dmmu.end_addr);
729*e65e175bSOded Gabbay 	is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
730*e65e175bSOded Gabbay 						prop->pmmu.start_addr,
731*e65e175bSOded Gabbay 						prop->pmmu.end_addr);
732*e65e175bSOded Gabbay 	is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
733*e65e175bSOded Gabbay 						prop->pmmu_huge.page_size,
734*e65e175bSOded Gabbay 						prop->pmmu_huge.start_addr,
735*e65e175bSOded Gabbay 						prop->pmmu_huge.end_addr);
736*e65e175bSOded Gabbay 	if (is_dram_addr) {
737*e65e175bSOded Gabbay 		mmu_prop = &prop->dmmu;
738*e65e175bSOded Gabbay 		is_huge = true;
739*e65e175bSOded Gabbay 	} else if (is_pmmu_addr) {
740*e65e175bSOded Gabbay 		mmu_prop = &prop->pmmu;
741*e65e175bSOded Gabbay 		is_huge = false;
742*e65e175bSOded Gabbay 	} else if (is_pmmu_h_addr) {
743*e65e175bSOded Gabbay 		mmu_prop = &prop->pmmu_huge;
744*e65e175bSOded Gabbay 		is_huge = true;
745*e65e175bSOded Gabbay 	} else {
746*e65e175bSOded Gabbay 		return -EINVAL;
747*e65e175bSOded Gabbay 	}
748*e65e175bSOded Gabbay 
749*e65e175bSOded Gabbay 	used_hops = mmu_prop->num_hops;
750*e65e175bSOded Gabbay 
751*e65e175bSOded Gabbay 	/* huge pages use lesser hops */
752*e65e175bSOded Gabbay 	if (is_huge)
753*e65e175bSOded Gabbay 		used_hops--;
754*e65e175bSOded Gabbay 
755*e65e175bSOded Gabbay 	hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx);
756*e65e175bSOded Gabbay 	hops->hop_info[0].hop_pte_addr =
757*e65e175bSOded Gabbay 			hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
758*e65e175bSOded Gabbay 					hops->hop_info[0].hop_addr, virt_addr);
759*e65e175bSOded Gabbay 	hops->hop_info[0].hop_pte_val =
760*e65e175bSOded Gabbay 			hdev->asic_funcs->read_pte(hdev,
761*e65e175bSOded Gabbay 						hops->hop_info[0].hop_pte_addr);
762*e65e175bSOded Gabbay 
763*e65e175bSOded Gabbay 	for (i = 1 ; i < used_hops ; i++) {
764*e65e175bSOded Gabbay 		hops->hop_info[i].hop_addr =
765*e65e175bSOded Gabbay 			hl_mmu_get_next_hop_addr(ctx,
766*e65e175bSOded Gabbay 					hops->hop_info[i - 1].hop_pte_val);
767*e65e175bSOded Gabbay 		if (hops->hop_info[i].hop_addr == ULLONG_MAX)
768*e65e175bSOded Gabbay 			return -EFAULT;
769*e65e175bSOded Gabbay 
770*e65e175bSOded Gabbay 		hops->hop_info[i].hop_pte_addr =
771*e65e175bSOded Gabbay 				hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
772*e65e175bSOded Gabbay 						hops->hop_info[i].hop_addr,
773*e65e175bSOded Gabbay 						virt_addr);
774*e65e175bSOded Gabbay 		hops->hop_info[i].hop_pte_val =
775*e65e175bSOded Gabbay 				hdev->asic_funcs->read_pte(hdev,
776*e65e175bSOded Gabbay 						hops->hop_info[i].hop_pte_addr);
777*e65e175bSOded Gabbay 
778*e65e175bSOded Gabbay 		if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
779*e65e175bSOded Gabbay 			return -EFAULT;
780*e65e175bSOded Gabbay 
781*e65e175bSOded Gabbay 		if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
782*e65e175bSOded Gabbay 			break;
783*e65e175bSOded Gabbay 	}
784*e65e175bSOded Gabbay 
785*e65e175bSOded Gabbay 	/* if passed over all hops then no last hop was found */
786*e65e175bSOded Gabbay 	if (i == mmu_prop->num_hops)
787*e65e175bSOded Gabbay 		return -EFAULT;
788*e65e175bSOded Gabbay 
789*e65e175bSOded Gabbay 	if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
790*e65e175bSOded Gabbay 		return -EFAULT;
791*e65e175bSOded Gabbay 
792*e65e175bSOded Gabbay 	hops->used_hops = i + 1;
793*e65e175bSOded Gabbay 
794*e65e175bSOded Gabbay 	return 0;
795*e65e175bSOded Gabbay }
796*e65e175bSOded Gabbay 
797*e65e175bSOded Gabbay /*
798*e65e175bSOded Gabbay  * hl_mmu_v1_prepare - prepare mmu  for working with mmu v1
799*e65e175bSOded Gabbay  *
800*e65e175bSOded Gabbay  * @hdev: pointer to the device structure
801*e65e175bSOded Gabbay  */
hl_mmu_v1_set_funcs(struct hl_device * hdev,struct hl_mmu_funcs * mmu)802*e65e175bSOded Gabbay void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
803*e65e175bSOded Gabbay {
804*e65e175bSOded Gabbay 	mmu->init = hl_mmu_v1_init;
805*e65e175bSOded Gabbay 	mmu->fini = hl_mmu_v1_fini;
806*e65e175bSOded Gabbay 	mmu->ctx_init = hl_mmu_v1_ctx_init;
807*e65e175bSOded Gabbay 	mmu->ctx_fini = hl_mmu_v1_ctx_fini;
808*e65e175bSOded Gabbay 	mmu->map = hl_mmu_v1_map;
809*e65e175bSOded Gabbay 	mmu->unmap = hl_mmu_v1_unmap;
810*e65e175bSOded Gabbay 	mmu->flush = flush;
811*e65e175bSOded Gabbay 	mmu->swap_out = hl_mmu_v1_swap_out;
812*e65e175bSOded Gabbay 	mmu->swap_in = hl_mmu_v1_swap_in;
813*e65e175bSOded Gabbay 	mmu->get_tlb_info = hl_mmu_v1_get_tlb_info;
814*e65e175bSOded Gabbay }
815