xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/flower/offload.c (revision 9dae47aba0a055f761176d9297371d5bb24289ec)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 #include <net/devlink.h>
36 #include <net/pkt_cls.h>
37 
38 #include "cmsg.h"
39 #include "main.h"
40 #include "../nfpcore/nfp_cpp.h"
41 #include "../nfpcore/nfp_nsp.h"
42 #include "../nfp_app.h"
43 #include "../nfp_main.h"
44 #include "../nfp_net.h"
45 #include "../nfp_port.h"
46 
47 #define NFP_FLOWER_WHITELIST_DISSECTOR \
48 	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
49 	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
50 	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
51 	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
52 	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
53 	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
54 	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
55 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
56 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
57 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
58 	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
59 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
60 	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
61 	 BIT(FLOW_DISSECTOR_KEY_IP))
62 
63 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
64 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
65 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
66 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
67 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
68 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
69 
70 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
71 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
72 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
73 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
74 
75 static int
76 nfp_flower_xmit_flow(struct net_device *netdev,
77 		     struct nfp_fl_payload *nfp_flow, u8 mtype)
78 {
79 	u32 meta_len, key_len, mask_len, act_len, tot_len;
80 	struct nfp_repr *priv = netdev_priv(netdev);
81 	struct sk_buff *skb;
82 	unsigned char *msg;
83 
84 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
85 	key_len = nfp_flow->meta.key_len;
86 	mask_len = nfp_flow->meta.mask_len;
87 	act_len = nfp_flow->meta.act_len;
88 
89 	tot_len = meta_len + key_len + mask_len + act_len;
90 
91 	/* Convert to long words as firmware expects
92 	 * lengths in units of NFP_FL_LW_SIZ.
93 	 */
94 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
95 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
96 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
97 
98 	skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
99 	if (!skb)
100 		return -ENOMEM;
101 
102 	msg = nfp_flower_cmsg_get_data(skb);
103 	memcpy(msg, &nfp_flow->meta, meta_len);
104 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
105 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
106 	memcpy(&msg[meta_len + key_len + mask_len],
107 	       nfp_flow->action_data, act_len);
108 
109 	/* Convert back to bytes as software expects
110 	 * lengths in units of bytes.
111 	 */
112 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
113 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
114 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
115 
116 	nfp_ctrl_tx(priv->app->ctrl, skb);
117 
118 	return 0;
119 }
120 
121 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
122 {
123 	return dissector_uses_key(f->dissector,
124 				  FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
125 		dissector_uses_key(f->dissector,
126 				   FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
127 		dissector_uses_key(f->dissector,
128 				   FLOW_DISSECTOR_KEY_PORTS) ||
129 		dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
130 }
131 
132 static int
133 nfp_flower_calculate_key_layers(struct nfp_app *app,
134 				struct nfp_fl_key_ls *ret_key_ls,
135 				struct tc_cls_flower_offload *flow,
136 				bool egress,
137 				enum nfp_flower_tun_type *tun_type)
138 {
139 	struct flow_dissector_key_basic *mask_basic = NULL;
140 	struct flow_dissector_key_basic *key_basic = NULL;
141 	struct nfp_flower_priv *priv = app->priv;
142 	u32 key_layer_two;
143 	u8 key_layer;
144 	int key_size;
145 
146 	if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
147 		return -EOPNOTSUPP;
148 
149 	/* If any tun dissector is used then the required set must be used. */
150 	if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
151 	    (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
152 	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
153 		return -EOPNOTSUPP;
154 
155 	key_layer_two = 0;
156 	key_layer = NFP_FLOWER_LAYER_PORT;
157 	key_size = sizeof(struct nfp_flower_meta_tci) +
158 		   sizeof(struct nfp_flower_in_port);
159 
160 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
161 	    dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
162 		key_layer |= NFP_FLOWER_LAYER_MAC;
163 		key_size += sizeof(struct nfp_flower_mac_mpls);
164 	}
165 
166 	if (dissector_uses_key(flow->dissector,
167 			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
168 		struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
169 		struct flow_dissector_key_ports *mask_enc_ports = NULL;
170 		struct flow_dissector_key_ports *enc_ports = NULL;
171 		struct flow_dissector_key_control *mask_enc_ctl =
172 			skb_flow_dissector_target(flow->dissector,
173 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
174 						  flow->mask);
175 		struct flow_dissector_key_control *enc_ctl =
176 			skb_flow_dissector_target(flow->dissector,
177 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
178 						  flow->key);
179 		if (!egress)
180 			return -EOPNOTSUPP;
181 
182 		if (mask_enc_ctl->addr_type != 0xffff ||
183 		    enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
184 			return -EOPNOTSUPP;
185 
186 		/* These fields are already verified as used. */
187 		mask_ipv4 =
188 			skb_flow_dissector_target(flow->dissector,
189 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
190 						  flow->mask);
191 		if (mask_ipv4->dst != cpu_to_be32(~0))
192 			return -EOPNOTSUPP;
193 
194 		mask_enc_ports =
195 			skb_flow_dissector_target(flow->dissector,
196 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
197 						  flow->mask);
198 		enc_ports =
199 			skb_flow_dissector_target(flow->dissector,
200 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
201 						  flow->key);
202 
203 		if (mask_enc_ports->dst != cpu_to_be16(~0))
204 			return -EOPNOTSUPP;
205 
206 		switch (enc_ports->dst) {
207 		case htons(NFP_FL_VXLAN_PORT):
208 			*tun_type = NFP_FL_TUNNEL_VXLAN;
209 			key_layer |= NFP_FLOWER_LAYER_VXLAN;
210 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
211 			break;
212 		case htons(NFP_FL_GENEVE_PORT):
213 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
214 				return -EOPNOTSUPP;
215 			*tun_type = NFP_FL_TUNNEL_GENEVE;
216 			key_layer |= NFP_FLOWER_LAYER_EXT_META;
217 			key_size += sizeof(struct nfp_flower_ext_meta);
218 			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
219 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
220 			break;
221 		default:
222 			return -EOPNOTSUPP;
223 		}
224 	} else if (egress) {
225 		/* Reject non tunnel matches offloaded to egress repr. */
226 		return -EOPNOTSUPP;
227 	}
228 
229 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
230 		mask_basic = skb_flow_dissector_target(flow->dissector,
231 						       FLOW_DISSECTOR_KEY_BASIC,
232 						       flow->mask);
233 
234 		key_basic = skb_flow_dissector_target(flow->dissector,
235 						      FLOW_DISSECTOR_KEY_BASIC,
236 						      flow->key);
237 	}
238 
239 	if (mask_basic && mask_basic->n_proto) {
240 		/* Ethernet type is present in the key. */
241 		switch (key_basic->n_proto) {
242 		case cpu_to_be16(ETH_P_IP):
243 			key_layer |= NFP_FLOWER_LAYER_IPV4;
244 			key_size += sizeof(struct nfp_flower_ipv4);
245 			break;
246 
247 		case cpu_to_be16(ETH_P_IPV6):
248 			key_layer |= NFP_FLOWER_LAYER_IPV6;
249 			key_size += sizeof(struct nfp_flower_ipv6);
250 			break;
251 
252 		/* Currently we do not offload ARP
253 		 * because we rely on it to get to the host.
254 		 */
255 		case cpu_to_be16(ETH_P_ARP):
256 			return -EOPNOTSUPP;
257 
258 		/* Will be included in layer 2. */
259 		case cpu_to_be16(ETH_P_8021Q):
260 			break;
261 
262 		default:
263 			/* Other ethtype - we need check the masks for the
264 			 * remainder of the key to ensure we can offload.
265 			 */
266 			if (nfp_flower_check_higher_than_mac(flow))
267 				return -EOPNOTSUPP;
268 			break;
269 		}
270 	}
271 
272 	if (mask_basic && mask_basic->ip_proto) {
273 		/* Ethernet type is present in the key. */
274 		switch (key_basic->ip_proto) {
275 		case IPPROTO_TCP:
276 		case IPPROTO_UDP:
277 		case IPPROTO_SCTP:
278 		case IPPROTO_ICMP:
279 		case IPPROTO_ICMPV6:
280 			key_layer |= NFP_FLOWER_LAYER_TP;
281 			key_size += sizeof(struct nfp_flower_tp_ports);
282 			break;
283 		default:
284 			/* Other ip proto - we need check the masks for the
285 			 * remainder of the key to ensure we can offload.
286 			 */
287 			return -EOPNOTSUPP;
288 		}
289 	}
290 
291 	ret_key_ls->key_layer = key_layer;
292 	ret_key_ls->key_layer_two = key_layer_two;
293 	ret_key_ls->key_size = key_size;
294 
295 	return 0;
296 }
297 
298 static struct nfp_fl_payload *
299 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
300 {
301 	struct nfp_fl_payload *flow_pay;
302 
303 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
304 	if (!flow_pay)
305 		return NULL;
306 
307 	flow_pay->meta.key_len = key_layer->key_size;
308 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
309 	if (!flow_pay->unmasked_data)
310 		goto err_free_flow;
311 
312 	flow_pay->meta.mask_len = key_layer->key_size;
313 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
314 	if (!flow_pay->mask_data)
315 		goto err_free_unmasked;
316 
317 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
318 	if (!flow_pay->action_data)
319 		goto err_free_mask;
320 
321 	flow_pay->nfp_tun_ipv4_addr = 0;
322 	flow_pay->meta.flags = 0;
323 	spin_lock_init(&flow_pay->lock);
324 
325 	return flow_pay;
326 
327 err_free_mask:
328 	kfree(flow_pay->mask_data);
329 err_free_unmasked:
330 	kfree(flow_pay->unmasked_data);
331 err_free_flow:
332 	kfree(flow_pay);
333 	return NULL;
334 }
335 
336 /**
337  * nfp_flower_add_offload() - Adds a new flow to hardware.
338  * @app:	Pointer to the APP handle
339  * @netdev:	netdev structure.
340  * @flow:	TC flower classifier offload structure.
341  * @egress:	NFP netdev is the egress.
342  *
343  * Adds a new flow to the repeated hash structure and action payload.
344  *
345  * Return: negative value on error, 0 if configured successfully.
346  */
347 static int
348 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
349 		       struct tc_cls_flower_offload *flow, bool egress)
350 {
351 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
352 	struct nfp_flower_priv *priv = app->priv;
353 	struct nfp_fl_payload *flow_pay;
354 	struct nfp_fl_key_ls *key_layer;
355 	int err;
356 
357 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
358 	if (!key_layer)
359 		return -ENOMEM;
360 
361 	err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
362 					      &tun_type);
363 	if (err)
364 		goto err_free_key_ls;
365 
366 	flow_pay = nfp_flower_allocate_new(key_layer);
367 	if (!flow_pay) {
368 		err = -ENOMEM;
369 		goto err_free_key_ls;
370 	}
371 
372 	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
373 					    tun_type);
374 	if (err)
375 		goto err_destroy_flow;
376 
377 	err = nfp_flower_compile_action(flow, netdev, flow_pay);
378 	if (err)
379 		goto err_destroy_flow;
380 
381 	err = nfp_compile_flow_metadata(app, flow, flow_pay);
382 	if (err)
383 		goto err_destroy_flow;
384 
385 	err = nfp_flower_xmit_flow(netdev, flow_pay,
386 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
387 	if (err)
388 		goto err_destroy_flow;
389 
390 	INIT_HLIST_NODE(&flow_pay->link);
391 	flow_pay->tc_flower_cookie = flow->cookie;
392 	hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
393 
394 	/* Deallocate flow payload when flower rule has been destroyed. */
395 	kfree(key_layer);
396 
397 	return 0;
398 
399 err_destroy_flow:
400 	kfree(flow_pay->action_data);
401 	kfree(flow_pay->mask_data);
402 	kfree(flow_pay->unmasked_data);
403 	kfree(flow_pay);
404 err_free_key_ls:
405 	kfree(key_layer);
406 	return err;
407 }
408 
409 /**
410  * nfp_flower_del_offload() - Removes a flow from hardware.
411  * @app:	Pointer to the APP handle
412  * @netdev:	netdev structure.
413  * @flow:	TC flower classifier offload structure
414  *
415  * Removes a flow from the repeated hash structure and clears the
416  * action payload.
417  *
418  * Return: negative value on error, 0 if removed successfully.
419  */
420 static int
421 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
422 		       struct tc_cls_flower_offload *flow)
423 {
424 	struct nfp_fl_payload *nfp_flow;
425 	int err;
426 
427 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
428 	if (!nfp_flow)
429 		return -ENOENT;
430 
431 	err = nfp_modify_flow_metadata(app, nfp_flow);
432 	if (err)
433 		goto err_free_flow;
434 
435 	if (nfp_flow->nfp_tun_ipv4_addr)
436 		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
437 
438 	err = nfp_flower_xmit_flow(netdev, nfp_flow,
439 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
440 	if (err)
441 		goto err_free_flow;
442 
443 err_free_flow:
444 	hash_del_rcu(&nfp_flow->link);
445 	kfree(nfp_flow->action_data);
446 	kfree(nfp_flow->mask_data);
447 	kfree(nfp_flow->unmasked_data);
448 	kfree_rcu(nfp_flow, rcu);
449 	return err;
450 }
451 
452 /**
453  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
454  * @app:	Pointer to the APP handle
455  * @flow:	TC flower classifier offload structure
456  *
457  * Populates a flow statistics structure which which corresponds to a
458  * specific flow.
459  *
460  * Return: negative value on error, 0 if stats populated successfully.
461  */
462 static int
463 nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
464 {
465 	struct nfp_fl_payload *nfp_flow;
466 
467 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
468 	if (!nfp_flow)
469 		return -EINVAL;
470 
471 	spin_lock_bh(&nfp_flow->lock);
472 	tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
473 			      nfp_flow->stats.pkts, nfp_flow->stats.used);
474 
475 	nfp_flow->stats.pkts = 0;
476 	nfp_flow->stats.bytes = 0;
477 	spin_unlock_bh(&nfp_flow->lock);
478 
479 	return 0;
480 }
481 
482 static int
483 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
484 			struct tc_cls_flower_offload *flower, bool egress)
485 {
486 	if (!eth_proto_is_802_3(flower->common.protocol) ||
487 	    flower->common.chain_index)
488 		return -EOPNOTSUPP;
489 
490 	switch (flower->command) {
491 	case TC_CLSFLOWER_REPLACE:
492 		return nfp_flower_add_offload(app, netdev, flower, egress);
493 	case TC_CLSFLOWER_DESTROY:
494 		return nfp_flower_del_offload(app, netdev, flower);
495 	case TC_CLSFLOWER_STATS:
496 		return nfp_flower_get_stats(app, flower);
497 	}
498 
499 	return -EOPNOTSUPP;
500 }
501 
502 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
503 				  void *cb_priv)
504 {
505 	struct nfp_repr *repr = cb_priv;
506 
507 	if (!tc_can_offload(repr->netdev))
508 		return -EOPNOTSUPP;
509 
510 	switch (type) {
511 	case TC_SETUP_CLSFLOWER:
512 		return nfp_flower_repr_offload(repr->app, repr->netdev,
513 					       type_data, true);
514 	default:
515 		return -EOPNOTSUPP;
516 	}
517 }
518 
519 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
520 					void *type_data, void *cb_priv)
521 {
522 	struct nfp_repr *repr = cb_priv;
523 
524 	if (!tc_can_offload(repr->netdev))
525 		return -EOPNOTSUPP;
526 
527 	switch (type) {
528 	case TC_SETUP_CLSFLOWER:
529 		return nfp_flower_repr_offload(repr->app, repr->netdev,
530 					       type_data, false);
531 	default:
532 		return -EOPNOTSUPP;
533 	}
534 }
535 
536 static int nfp_flower_setup_tc_block(struct net_device *netdev,
537 				     struct tc_block_offload *f)
538 {
539 	struct nfp_repr *repr = netdev_priv(netdev);
540 
541 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
542 		return -EOPNOTSUPP;
543 
544 	switch (f->command) {
545 	case TC_BLOCK_BIND:
546 		return tcf_block_cb_register(f->block,
547 					     nfp_flower_setup_tc_block_cb,
548 					     repr, repr);
549 	case TC_BLOCK_UNBIND:
550 		tcf_block_cb_unregister(f->block,
551 					nfp_flower_setup_tc_block_cb,
552 					repr);
553 		return 0;
554 	default:
555 		return -EOPNOTSUPP;
556 	}
557 }
558 
559 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
560 			enum tc_setup_type type, void *type_data)
561 {
562 	switch (type) {
563 	case TC_SETUP_BLOCK:
564 		return nfp_flower_setup_tc_block(netdev, type_data);
565 	default:
566 		return -EOPNOTSUPP;
567 	}
568 }
569