xref: /openbmc/linux/net/core/xdp.c (revision b4bc93bd76d4da32600795cd323c971f00a2e788)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* net/core/xdp.c
3  *
4  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5  */
6 #include <linux/bpf.h>
7 #include <linux/filter.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/netdevice.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/rhashtable.h>
14 #include <linux/bug.h>
15 #include <net/page_pool.h>
16 
17 #include <net/xdp.h>
18 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
19 #include <trace/events/xdp.h>
20 #include <net/xdp_sock_drv.h>
21 
22 #define REG_STATE_NEW		0x0
23 #define REG_STATE_REGISTERED	0x1
24 #define REG_STATE_UNREGISTERED	0x2
25 #define REG_STATE_UNUSED	0x3
26 
27 static DEFINE_IDA(mem_id_pool);
28 static DEFINE_MUTEX(mem_id_lock);
29 #define MEM_ID_MAX 0xFFFE
30 #define MEM_ID_MIN 1
31 static int mem_id_next = MEM_ID_MIN;
32 
33 static bool mem_id_init; /* false */
34 static struct rhashtable *mem_id_ht;
35 
36 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
37 {
38 	const u32 *k = data;
39 	const u32 key = *k;
40 
41 	BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
42 		     != sizeof(u32));
43 
44 	/* Use cyclic increasing ID as direct hash key */
45 	return key;
46 }
47 
48 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
49 			  const void *ptr)
50 {
51 	const struct xdp_mem_allocator *xa = ptr;
52 	u32 mem_id = *(u32 *)arg->key;
53 
54 	return xa->mem.id != mem_id;
55 }
56 
57 static const struct rhashtable_params mem_id_rht_params = {
58 	.nelem_hint = 64,
59 	.head_offset = offsetof(struct xdp_mem_allocator, node),
60 	.key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
61 	.key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
62 	.max_size = MEM_ID_MAX,
63 	.min_size = 8,
64 	.automatic_shrinking = true,
65 	.hashfn    = xdp_mem_id_hashfn,
66 	.obj_cmpfn = xdp_mem_id_cmp,
67 };
68 
69 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
70 {
71 	struct xdp_mem_allocator *xa;
72 
73 	xa = container_of(rcu, struct xdp_mem_allocator, rcu);
74 
75 	/* Allow this ID to be reused */
76 	ida_simple_remove(&mem_id_pool, xa->mem.id);
77 
78 	kfree(xa);
79 }
80 
81 static void mem_xa_remove(struct xdp_mem_allocator *xa)
82 {
83 	trace_mem_disconnect(xa);
84 
85 	if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
86 		call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
87 }
88 
89 static void mem_allocator_disconnect(void *allocator)
90 {
91 	struct xdp_mem_allocator *xa;
92 	struct rhashtable_iter iter;
93 
94 	mutex_lock(&mem_id_lock);
95 
96 	rhashtable_walk_enter(mem_id_ht, &iter);
97 	do {
98 		rhashtable_walk_start(&iter);
99 
100 		while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
101 			if (xa->allocator == allocator)
102 				mem_xa_remove(xa);
103 		}
104 
105 		rhashtable_walk_stop(&iter);
106 
107 	} while (xa == ERR_PTR(-EAGAIN));
108 	rhashtable_walk_exit(&iter);
109 
110 	mutex_unlock(&mem_id_lock);
111 }
112 
113 void xdp_unreg_mem_model(struct xdp_mem_info *mem)
114 {
115 	struct xdp_mem_allocator *xa;
116 	int type = mem->type;
117 	int id = mem->id;
118 
119 	/* Reset mem info to defaults */
120 	mem->id = 0;
121 	mem->type = 0;
122 
123 	if (id == 0)
124 		return;
125 
126 	if (type == MEM_TYPE_PAGE_POOL) {
127 		rcu_read_lock();
128 		xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
129 		page_pool_destroy(xa->page_pool);
130 		rcu_read_unlock();
131 	}
132 }
133 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
134 
135 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
136 {
137 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
138 		WARN(1, "Missing register, driver bug");
139 		return;
140 	}
141 
142 	xdp_unreg_mem_model(&xdp_rxq->mem);
143 }
144 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
145 
146 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
147 {
148 	/* Simplify driver cleanup code paths, allow unreg "unused" */
149 	if (xdp_rxq->reg_state == REG_STATE_UNUSED)
150 		return;
151 
152 	xdp_rxq_info_unreg_mem_model(xdp_rxq);
153 
154 	xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
155 	xdp_rxq->dev = NULL;
156 }
157 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
158 
159 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
160 {
161 	memset(xdp_rxq, 0, sizeof(*xdp_rxq));
162 }
163 
164 /* Returns 0 on success, negative on failure */
165 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
166 		     struct net_device *dev, u32 queue_index, unsigned int napi_id)
167 {
168 	if (!dev) {
169 		WARN(1, "Missing net_device from driver");
170 		return -ENODEV;
171 	}
172 
173 	if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
174 		WARN(1, "Driver promised not to register this");
175 		return -EINVAL;
176 	}
177 
178 	if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
179 		WARN(1, "Missing unregister, handled but fix driver");
180 		xdp_rxq_info_unreg(xdp_rxq);
181 	}
182 
183 	/* State either UNREGISTERED or NEW */
184 	xdp_rxq_info_init(xdp_rxq);
185 	xdp_rxq->dev = dev;
186 	xdp_rxq->queue_index = queue_index;
187 	xdp_rxq->napi_id = napi_id;
188 
189 	xdp_rxq->reg_state = REG_STATE_REGISTERED;
190 	return 0;
191 }
192 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
193 
194 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
195 {
196 	xdp_rxq->reg_state = REG_STATE_UNUSED;
197 }
198 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
199 
200 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
201 {
202 	return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
203 }
204 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
205 
206 static int __mem_id_init_hash_table(void)
207 {
208 	struct rhashtable *rht;
209 	int ret;
210 
211 	if (unlikely(mem_id_init))
212 		return 0;
213 
214 	rht = kzalloc(sizeof(*rht), GFP_KERNEL);
215 	if (!rht)
216 		return -ENOMEM;
217 
218 	ret = rhashtable_init(rht, &mem_id_rht_params);
219 	if (ret < 0) {
220 		kfree(rht);
221 		return ret;
222 	}
223 	mem_id_ht = rht;
224 	smp_mb(); /* mutex lock should provide enough pairing */
225 	mem_id_init = true;
226 
227 	return 0;
228 }
229 
230 /* Allocate a cyclic ID that maps to allocator pointer.
231  * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
232  *
233  * Caller must lock mem_id_lock.
234  */
235 static int __mem_id_cyclic_get(gfp_t gfp)
236 {
237 	int retries = 1;
238 	int id;
239 
240 again:
241 	id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
242 	if (id < 0) {
243 		if (id == -ENOSPC) {
244 			/* Cyclic allocator, reset next id */
245 			if (retries--) {
246 				mem_id_next = MEM_ID_MIN;
247 				goto again;
248 			}
249 		}
250 		return id; /* errno */
251 	}
252 	mem_id_next = id + 1;
253 
254 	return id;
255 }
256 
257 static bool __is_supported_mem_type(enum xdp_mem_type type)
258 {
259 	if (type == MEM_TYPE_PAGE_POOL)
260 		return is_page_pool_compiled_in();
261 
262 	if (type >= MEM_TYPE_MAX)
263 		return false;
264 
265 	return true;
266 }
267 
268 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
269 						     enum xdp_mem_type type,
270 						     void *allocator)
271 {
272 	struct xdp_mem_allocator *xdp_alloc;
273 	gfp_t gfp = GFP_KERNEL;
274 	int id, errno, ret;
275 	void *ptr;
276 
277 	if (!__is_supported_mem_type(type))
278 		return ERR_PTR(-EOPNOTSUPP);
279 
280 	mem->type = type;
281 
282 	if (!allocator) {
283 		if (type == MEM_TYPE_PAGE_POOL)
284 			return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
285 		return NULL;
286 	}
287 
288 	/* Delay init of rhashtable to save memory if feature isn't used */
289 	if (!mem_id_init) {
290 		mutex_lock(&mem_id_lock);
291 		ret = __mem_id_init_hash_table();
292 		mutex_unlock(&mem_id_lock);
293 		if (ret < 0) {
294 			WARN_ON(1);
295 			return ERR_PTR(ret);
296 		}
297 	}
298 
299 	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
300 	if (!xdp_alloc)
301 		return ERR_PTR(-ENOMEM);
302 
303 	mutex_lock(&mem_id_lock);
304 	id = __mem_id_cyclic_get(gfp);
305 	if (id < 0) {
306 		errno = id;
307 		goto err;
308 	}
309 	mem->id = id;
310 	xdp_alloc->mem = *mem;
311 	xdp_alloc->allocator = allocator;
312 
313 	/* Insert allocator into ID lookup table */
314 	ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
315 	if (IS_ERR(ptr)) {
316 		ida_simple_remove(&mem_id_pool, mem->id);
317 		mem->id = 0;
318 		errno = PTR_ERR(ptr);
319 		goto err;
320 	}
321 
322 	if (type == MEM_TYPE_PAGE_POOL)
323 		page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
324 
325 	mutex_unlock(&mem_id_lock);
326 
327 	return xdp_alloc;
328 err:
329 	mutex_unlock(&mem_id_lock);
330 	kfree(xdp_alloc);
331 	return ERR_PTR(errno);
332 }
333 
334 int xdp_reg_mem_model(struct xdp_mem_info *mem,
335 		      enum xdp_mem_type type, void *allocator)
336 {
337 	struct xdp_mem_allocator *xdp_alloc;
338 
339 	xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
340 	if (IS_ERR(xdp_alloc))
341 		return PTR_ERR(xdp_alloc);
342 	return 0;
343 }
344 EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
345 
346 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
347 			       enum xdp_mem_type type, void *allocator)
348 {
349 	struct xdp_mem_allocator *xdp_alloc;
350 
351 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
352 		WARN(1, "Missing register, driver bug");
353 		return -EFAULT;
354 	}
355 
356 	xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
357 	if (IS_ERR(xdp_alloc))
358 		return PTR_ERR(xdp_alloc);
359 
360 	if (trace_mem_connect_enabled() && xdp_alloc)
361 		trace_mem_connect(xdp_alloc, xdp_rxq);
362 	return 0;
363 }
364 
365 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
366 
367 /* XDP RX runs under NAPI protection, and in different delivery error
368  * scenarios (e.g. queue full), it is possible to return the xdp_frame
369  * while still leveraging this protection.  The @napi_direct boolean
370  * is used for those calls sites.  Thus, allowing for faster recycling
371  * of xdp_frames/pages in those cases.
372  */
373 static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
374 			 struct xdp_buff *xdp)
375 {
376 	struct xdp_mem_allocator *xa;
377 	struct page *page;
378 
379 	switch (mem->type) {
380 	case MEM_TYPE_PAGE_POOL:
381 		rcu_read_lock();
382 		/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
383 		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
384 		page = virt_to_head_page(data);
385 		if (napi_direct && xdp_return_frame_no_direct())
386 			napi_direct = false;
387 		page_pool_put_full_page(xa->page_pool, page, napi_direct);
388 		rcu_read_unlock();
389 		break;
390 	case MEM_TYPE_PAGE_SHARED:
391 		page_frag_free(data);
392 		break;
393 	case MEM_TYPE_PAGE_ORDER0:
394 		page = virt_to_page(data); /* Assumes order0 page*/
395 		put_page(page);
396 		break;
397 	case MEM_TYPE_XSK_BUFF_POOL:
398 		/* NB! Only valid from an xdp_buff! */
399 		xsk_buff_free(xdp);
400 		break;
401 	default:
402 		/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
403 		WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
404 		break;
405 	}
406 }
407 
408 void xdp_return_frame(struct xdp_frame *xdpf)
409 {
410 	__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
411 }
412 EXPORT_SYMBOL_GPL(xdp_return_frame);
413 
414 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
415 {
416 	__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
417 }
418 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
419 
420 /* XDP bulk APIs introduce a defer/flush mechanism to return
421  * pages belonging to the same xdp_mem_allocator object
422  * (identified via the mem.id field) in bulk to optimize
423  * I-cache and D-cache.
424  * The bulk queue size is set to 16 to be aligned to how
425  * XDP_REDIRECT bulking works. The bulk is flushed when
426  * it is full or when mem.id changes.
427  * xdp_frame_bulk is usually stored/allocated on the function
428  * call-stack to avoid locking penalties.
429  */
430 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
431 {
432 	struct xdp_mem_allocator *xa = bq->xa;
433 
434 	if (unlikely(!xa || !bq->count))
435 		return;
436 
437 	page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
438 	/* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
439 	bq->count = 0;
440 }
441 EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
442 
443 /* Must be called with rcu_read_lock held */
444 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
445 			   struct xdp_frame_bulk *bq)
446 {
447 	struct xdp_mem_info *mem = &xdpf->mem;
448 	struct xdp_mem_allocator *xa;
449 
450 	if (mem->type != MEM_TYPE_PAGE_POOL) {
451 		__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
452 		return;
453 	}
454 
455 	xa = bq->xa;
456 	if (unlikely(!xa)) {
457 		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
458 		bq->count = 0;
459 		bq->xa = xa;
460 	}
461 
462 	if (bq->count == XDP_BULK_QUEUE_SIZE)
463 		xdp_flush_frame_bulk(bq);
464 
465 	if (unlikely(mem->id != xa->mem.id)) {
466 		xdp_flush_frame_bulk(bq);
467 		bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
468 	}
469 
470 	bq->q[bq->count++] = xdpf->data;
471 }
472 EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
473 
474 void xdp_return_buff(struct xdp_buff *xdp)
475 {
476 	__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
477 }
478 
479 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
480 void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
481 {
482 	struct xdp_mem_allocator *xa;
483 	struct page *page;
484 
485 	rcu_read_lock();
486 	xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
487 	page = virt_to_head_page(data);
488 	if (xa)
489 		page_pool_release_page(xa->page_pool, page);
490 	rcu_read_unlock();
491 }
492 EXPORT_SYMBOL_GPL(__xdp_release_frame);
493 
494 void xdp_attachment_setup(struct xdp_attachment_info *info,
495 			  struct netdev_bpf *bpf)
496 {
497 	if (info->prog)
498 		bpf_prog_put(info->prog);
499 	info->prog = bpf->prog;
500 	info->flags = bpf->flags;
501 }
502 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
503 
504 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
505 {
506 	unsigned int metasize, totsize;
507 	void *addr, *data_to_copy;
508 	struct xdp_frame *xdpf;
509 	struct page *page;
510 
511 	/* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
512 	metasize = xdp_data_meta_unsupported(xdp) ? 0 :
513 		   xdp->data - xdp->data_meta;
514 	totsize = xdp->data_end - xdp->data + metasize;
515 
516 	if (sizeof(*xdpf) + totsize > PAGE_SIZE)
517 		return NULL;
518 
519 	page = dev_alloc_page();
520 	if (!page)
521 		return NULL;
522 
523 	addr = page_to_virt(page);
524 	xdpf = addr;
525 	memset(xdpf, 0, sizeof(*xdpf));
526 
527 	addr += sizeof(*xdpf);
528 	data_to_copy = metasize ? xdp->data_meta : xdp->data;
529 	memcpy(addr, data_to_copy, totsize);
530 
531 	xdpf->data = addr + metasize;
532 	xdpf->len = totsize - metasize;
533 	xdpf->headroom = 0;
534 	xdpf->metasize = metasize;
535 	xdpf->frame_sz = PAGE_SIZE;
536 	xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
537 
538 	xsk_buff_free(xdp);
539 	return xdpf;
540 }
541 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
542 
543 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
544 void xdp_warn(const char *msg, const char *func, const int line)
545 {
546 	WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
547 };
548 EXPORT_SYMBOL_GPL(xdp_warn);
549 
550 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
551 {
552 	n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
553 				      n_skb, skbs);
554 	if (unlikely(!n_skb))
555 		return -ENOMEM;
556 
557 	return 0;
558 }
559 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
560 
561 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
562 					   struct sk_buff *skb,
563 					   struct net_device *dev)
564 {
565 	unsigned int headroom, frame_size;
566 	void *hard_start;
567 
568 	/* Part of headroom was reserved to xdpf */
569 	headroom = sizeof(*xdpf) + xdpf->headroom;
570 
571 	/* Memory size backing xdp_frame data already have reserved
572 	 * room for build_skb to place skb_shared_info in tailroom.
573 	 */
574 	frame_size = xdpf->frame_sz;
575 
576 	hard_start = xdpf->data - headroom;
577 	skb = build_skb_around(skb, hard_start, frame_size);
578 	if (unlikely(!skb))
579 		return NULL;
580 
581 	skb_reserve(skb, headroom);
582 	__skb_put(skb, xdpf->len);
583 	if (xdpf->metasize)
584 		skb_metadata_set(skb, xdpf->metasize);
585 
586 	/* Essential SKB info: protocol and skb->dev */
587 	skb->protocol = eth_type_trans(skb, dev);
588 
589 	/* Optional SKB info, currently missing:
590 	 * - HW checksum info		(skb->ip_summed)
591 	 * - HW RX hash			(skb_set_hash)
592 	 * - RX ring dev queue index	(skb_record_rx_queue)
593 	 */
594 
595 	/* Until page_pool get SKB return path, release DMA here */
596 	xdp_release_frame(xdpf);
597 
598 	/* Allow SKB to reuse area used by xdp_frame */
599 	xdp_scrub_frame(xdpf);
600 
601 	return skb;
602 }
603 EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
604 
605 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
606 					 struct net_device *dev)
607 {
608 	struct sk_buff *skb;
609 
610 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
611 	if (unlikely(!skb))
612 		return NULL;
613 
614 	memset(skb, 0, offsetof(struct sk_buff, tail));
615 
616 	return __xdp_build_skb_from_frame(xdpf, skb, dev);
617 }
618 EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
619 
620 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
621 {
622 	unsigned int headroom, totalsize;
623 	struct xdp_frame *nxdpf;
624 	struct page *page;
625 	void *addr;
626 
627 	headroom = xdpf->headroom + sizeof(*xdpf);
628 	totalsize = headroom + xdpf->len;
629 
630 	if (unlikely(totalsize > PAGE_SIZE))
631 		return NULL;
632 	page = dev_alloc_page();
633 	if (!page)
634 		return NULL;
635 	addr = page_to_virt(page);
636 
637 	memcpy(addr, xdpf, totalsize);
638 
639 	nxdpf = addr;
640 	nxdpf->data = addr + headroom;
641 	nxdpf->frame_sz = PAGE_SIZE;
642 	nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
643 	nxdpf->mem.id = 0;
644 
645 	return nxdpf;
646 }
647