1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 /*
57  * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
58  *    public, protected, and private methods.
59  */
60 #include "remote_node_table.h"
61 #include "remote_node_context.h"
62 
63 /**
64  * sci_remote_node_table_get_group_index()
65  * @remote_node_table: This is the remote node index table from which the
66  *    selection will be made.
67  * @group_table_index: This is the index to the group table from which to
68  *    search for an available selection.
69  *
70  * This routine will find the bit position in absolute bit terms of the next 32
71  * + bit position.  If there are available bits in the first u32 then it is
72  * just bit position. u32 This is the absolute bit position for an available
73  * group.
74  */
75 static u32 sci_remote_node_table_get_group_index(
76 	struct sci_remote_node_table *remote_node_table,
77 	u32 group_table_index)
78 {
79 	u32 dword_index;
80 	u32 *group_table;
81 	u32 bit_index;
82 
83 	group_table = remote_node_table->remote_node_groups[group_table_index];
84 
85 	for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
86 		if (group_table[dword_index] != 0) {
87 			for (bit_index = 0; bit_index < 32; bit_index++) {
88 				if ((group_table[dword_index] & (1 << bit_index)) != 0) {
89 					return (dword_index * 32) + bit_index;
90 				}
91 			}
92 		}
93 	}
94 
95 	return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
96 }
97 
98 /**
99  * sci_remote_node_table_clear_group_index()
100  * @remote_node_table: This the remote node table in which to clear the
101  *    selector.
102  * @group_table_index: This is the remote node selector in which the change will be
103  *    made.
104  * @group_index: This is the bit index in the table to be modified.
105  *
106  * This method will clear the group index entry in the specified group index
107  * table. none
108  */
109 static void sci_remote_node_table_clear_group_index(
110 	struct sci_remote_node_table *remote_node_table,
111 	u32 group_table_index,
112 	u32 group_index)
113 {
114 	u32 dword_index;
115 	u32 bit_index;
116 	u32 *group_table;
117 
118 	BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
119 	BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
120 
121 	dword_index = group_index / 32;
122 	bit_index   = group_index % 32;
123 	group_table = remote_node_table->remote_node_groups[group_table_index];
124 
125 	group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
126 }
127 
128 /**
129  * sci_remote_node_table_set_group_index()
130  * @remote_node_table: This the remote node table in which to set the
131  *    selector.
132  * @group_table_index: This is the remote node selector in which the change
133  *    will be made.
134  * @group_index: This is the bit position in the table to be modified.
135  *
136  * This method will set the group index bit entry in the specified gropu index
137  * table. none
138  */
139 static void sci_remote_node_table_set_group_index(
140 	struct sci_remote_node_table *remote_node_table,
141 	u32 group_table_index,
142 	u32 group_index)
143 {
144 	u32 dword_index;
145 	u32 bit_index;
146 	u32 *group_table;
147 
148 	BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
149 	BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
150 
151 	dword_index = group_index / 32;
152 	bit_index   = group_index % 32;
153 	group_table = remote_node_table->remote_node_groups[group_table_index];
154 
155 	group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
156 }
157 
158 /**
159  * sci_remote_node_table_set_node_index()
160  * @remote_node_table: This is the remote node table in which to modify
161  *    the remote node availability.
162  * @remote_node_index: This is the remote node index that is being returned to
163  *    the table.
164  *
165  * This method will set the remote to available in the remote node allocation
166  * table. none
167  */
168 static void sci_remote_node_table_set_node_index(
169 	struct sci_remote_node_table *remote_node_table,
170 	u32 remote_node_index)
171 {
172 	u32 dword_location;
173 	u32 dword_remainder;
174 	u32 slot_normalized;
175 	u32 slot_position;
176 
177 	BUG_ON(
178 		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
179 		<= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
180 		);
181 
182 	dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
183 	dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
184 	slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
185 	slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
186 
187 	remote_node_table->available_remote_nodes[dword_location] |=
188 		1 << (slot_normalized + slot_position);
189 }
190 
191 /**
192  * sci_remote_node_table_clear_node_index()
193  * @remote_node_table: This is the remote node table from which to clear
194  *    the available remote node bit.
195  * @remote_node_index: This is the remote node index which is to be cleared
196  *    from the table.
197  *
198  * This method clears the remote node index from the table of available remote
199  * nodes. none
200  */
201 static void sci_remote_node_table_clear_node_index(
202 	struct sci_remote_node_table *remote_node_table,
203 	u32 remote_node_index)
204 {
205 	u32 dword_location;
206 	u32 dword_remainder;
207 	u32 slot_position;
208 	u32 slot_normalized;
209 
210 	BUG_ON(
211 		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
212 		<= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
213 		);
214 
215 	dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
216 	dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
217 	slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
218 	slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
219 
220 	remote_node_table->available_remote_nodes[dword_location] &=
221 		~(1 << (slot_normalized + slot_position));
222 }
223 
224 /**
225  * sci_remote_node_table_clear_group()
226  * @remote_node_table: The remote node table from which the slot will be
227  *    cleared.
228  * @group_index: The index for the slot that is to be cleared.
229  *
230  * This method clears the entire table slot at the specified slot index. none
231  */
232 static void sci_remote_node_table_clear_group(
233 	struct sci_remote_node_table *remote_node_table,
234 	u32 group_index)
235 {
236 	u32 dword_location;
237 	u32 dword_remainder;
238 	u32 dword_value;
239 
240 	BUG_ON(
241 		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
242 		<= (group_index / SCU_STP_REMOTE_NODE_COUNT)
243 		);
244 
245 	dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
246 	dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
247 
248 	dword_value = remote_node_table->available_remote_nodes[dword_location];
249 	dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
250 	remote_node_table->available_remote_nodes[dword_location] = dword_value;
251 }
252 
253 /*
254  * sci_remote_node_table_set_group()
255  *
256  * THis method sets an entire remote node group in the remote node table.
257  */
258 static void sci_remote_node_table_set_group(
259 	struct sci_remote_node_table *remote_node_table,
260 	u32 group_index)
261 {
262 	u32 dword_location;
263 	u32 dword_remainder;
264 	u32 dword_value;
265 
266 	BUG_ON(
267 		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
268 		<= (group_index / SCU_STP_REMOTE_NODE_COUNT)
269 		);
270 
271 	dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
272 	dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
273 
274 	dword_value = remote_node_table->available_remote_nodes[dword_location];
275 	dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
276 	remote_node_table->available_remote_nodes[dword_location] = dword_value;
277 }
278 
279 /**
280  * sci_remote_node_table_get_group_value()
281  * @remote_node_table: This is the remote node table that for which the group
282  *    value is to be returned.
283  * @group_index: This is the group index to use to find the group value.
284  *
285  * This method will return the group value for the specified group index. The
286  * bit values at the specified remote node group index.
287  */
288 static u8 sci_remote_node_table_get_group_value(
289 	struct sci_remote_node_table *remote_node_table,
290 	u32 group_index)
291 {
292 	u32 dword_location;
293 	u32 dword_remainder;
294 	u32 dword_value;
295 
296 	dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
297 	dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
298 
299 	dword_value = remote_node_table->available_remote_nodes[dword_location];
300 	dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
301 	dword_value = dword_value >> (dword_remainder * 4);
302 
303 	return (u8)dword_value;
304 }
305 
306 /**
307  * sci_remote_node_table_initialize()
308  * @remote_node_table: The remote that which is to be initialized.
309  * @remote_node_entries: The number of entries to put in the table.
310  *
311  * This method will initialize the remote node table for use. none
312  */
313 void sci_remote_node_table_initialize(
314 	struct sci_remote_node_table *remote_node_table,
315 	u32 remote_node_entries)
316 {
317 	u32 index;
318 
319 	/*
320 	 * Initialize the raw data we could improve the speed by only initializing
321 	 * those entries that we are actually going to be used */
322 	memset(
323 		remote_node_table->available_remote_nodes,
324 		0x00,
325 		sizeof(remote_node_table->available_remote_nodes)
326 		);
327 
328 	memset(
329 		remote_node_table->remote_node_groups,
330 		0x00,
331 		sizeof(remote_node_table->remote_node_groups)
332 		);
333 
334 	/* Initialize the available remote node sets */
335 	remote_node_table->available_nodes_array_size = (u16)
336 							(remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
337 							+ ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
338 
339 
340 	/* Initialize each full DWORD to a FULL SET of remote nodes */
341 	for (index = 0; index < remote_node_entries; index++) {
342 		sci_remote_node_table_set_node_index(remote_node_table, index);
343 	}
344 
345 	remote_node_table->group_array_size = (u16)
346 					      (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
347 					      + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
348 
349 	for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
350 		/*
351 		 * These are all guaranteed to be full slot values so fill them in the
352 		 * available sets of 3 remote nodes */
353 		sci_remote_node_table_set_group_index(remote_node_table, 2, index);
354 	}
355 
356 	/* Now fill in any remainders that we may find */
357 	if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
358 		sci_remote_node_table_set_group_index(remote_node_table, 1, index);
359 	} else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
360 		sci_remote_node_table_set_group_index(remote_node_table, 0, index);
361 	}
362 }
363 
364 /**
365  * sci_remote_node_table_allocate_single_remote_node()
366  * @remote_node_table: The remote node table from which to allocate a
367  *    remote node.
368  * @group_table_index: The group index that is to be used for the search.
369  *
370  * This method will allocate a single RNi from the remote node table.  The
371  * table index will determine from which remote node group table to search.
372  * This search may fail and another group node table can be specified.  The
373  * function is designed to allow a serach of the available single remote node
374  * group up to the triple remote node group.  If an entry is found in the
375  * specified table the remote node is removed and the remote node groups are
376  * updated. The RNi value or an invalid remote node context if an RNi can not
377  * be found.
378  */
379 static u16 sci_remote_node_table_allocate_single_remote_node(
380 	struct sci_remote_node_table *remote_node_table,
381 	u32 group_table_index)
382 {
383 	u8 index;
384 	u8 group_value;
385 	u32 group_index;
386 	u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
387 
388 	group_index = sci_remote_node_table_get_group_index(
389 		remote_node_table, group_table_index);
390 
391 	/* We could not find an available slot in the table selector 0 */
392 	if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
393 		group_value = sci_remote_node_table_get_group_value(
394 			remote_node_table, group_index);
395 
396 		for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
397 			if (((1 << index) & group_value) != 0) {
398 				/* We have selected a bit now clear it */
399 				remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
400 							  + index);
401 
402 				sci_remote_node_table_clear_group_index(
403 					remote_node_table, group_table_index, group_index
404 					);
405 
406 				sci_remote_node_table_clear_node_index(
407 					remote_node_table, remote_node_index
408 					);
409 
410 				if (group_table_index > 0) {
411 					sci_remote_node_table_set_group_index(
412 						remote_node_table, group_table_index - 1, group_index
413 						);
414 				}
415 
416 				break;
417 			}
418 		}
419 	}
420 
421 	return remote_node_index;
422 }
423 
424 /**
425  * sci_remote_node_table_allocate_triple_remote_node()
426  * @remote_node_table: This is the remote node table from which to allocate the
427  *    remote node entries.
428  * @group_table_index: This is the group table index which must equal two (2)
429  *    for this operation.
430  *
431  * This method will allocate three consecutive remote node context entries. If
432  * there are no remaining triple entries the function will return a failure.
433  * The remote node index that represents three consecutive remote node entries
434  * or an invalid remote node context if none can be found.
435  */
436 static u16 sci_remote_node_table_allocate_triple_remote_node(
437 	struct sci_remote_node_table *remote_node_table,
438 	u32 group_table_index)
439 {
440 	u32 group_index;
441 	u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
442 
443 	group_index = sci_remote_node_table_get_group_index(
444 		remote_node_table, group_table_index);
445 
446 	if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
447 		remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
448 
449 		sci_remote_node_table_clear_group_index(
450 			remote_node_table, group_table_index, group_index
451 			);
452 
453 		sci_remote_node_table_clear_group(
454 			remote_node_table, group_index
455 			);
456 	}
457 
458 	return remote_node_index;
459 }
460 
461 /**
462  * sci_remote_node_table_allocate_remote_node()
463  * @remote_node_table: This is the remote node table from which the remote node
464  *    allocation is to take place.
465  * @remote_node_count: This is ther remote node count which is one of
466  *    SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
467  *
468  * This method will allocate a remote node that mataches the remote node count
469  * specified by the caller.  Valid values for remote node count is
470  * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
471  * the remote node index that is returned or an invalid remote node context.
472  */
473 u16 sci_remote_node_table_allocate_remote_node(
474 	struct sci_remote_node_table *remote_node_table,
475 	u32 remote_node_count)
476 {
477 	u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
478 
479 	if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
480 		remote_node_index =
481 			sci_remote_node_table_allocate_single_remote_node(
482 				remote_node_table, 0);
483 
484 		if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
485 			remote_node_index =
486 				sci_remote_node_table_allocate_single_remote_node(
487 					remote_node_table, 1);
488 		}
489 
490 		if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
491 			remote_node_index =
492 				sci_remote_node_table_allocate_single_remote_node(
493 					remote_node_table, 2);
494 		}
495 	} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
496 		remote_node_index =
497 			sci_remote_node_table_allocate_triple_remote_node(
498 				remote_node_table, 2);
499 	}
500 
501 	return remote_node_index;
502 }
503 
504 /**
505  * sci_remote_node_table_release_single_remote_node()
506  * @remote_node_table: This is the remote node table from which the remote node
507  *    release is to take place.
508  * @remote_node_index: This is the remote node index that is being released.
509  * This method will free a single remote node index back to the remote node
510  * table.  This routine will update the remote node groups
511  */
512 static void sci_remote_node_table_release_single_remote_node(
513 	struct sci_remote_node_table *remote_node_table,
514 	u16 remote_node_index)
515 {
516 	u32 group_index;
517 	u8 group_value;
518 
519 	group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
520 
521 	group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
522 
523 	/*
524 	 * Assert that we are not trying to add an entry to a slot that is already
525 	 * full. */
526 	BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
527 
528 	if (group_value == 0x00) {
529 		/*
530 		 * There are no entries in this slot so it must be added to the single
531 		 * slot table. */
532 		sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
533 	} else if ((group_value & (group_value - 1)) == 0) {
534 		/*
535 		 * There is only one entry in this slot so it must be moved from the
536 		 * single slot table to the dual slot table */
537 		sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
538 		sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
539 	} else {
540 		/*
541 		 * There are two entries in the slot so it must be moved from the dual
542 		 * slot table to the tripple slot table. */
543 		sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
544 		sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
545 	}
546 
547 	sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
548 }
549 
550 /**
551  * sci_remote_node_table_release_triple_remote_node()
552  * @remote_node_table: This is the remote node table to which the remote node
553  *    index is to be freed.
554  * @remote_node_index: This is the remote node index that is being released.
555  *
556  * This method will release a group of three consecutive remote nodes back to
557  * the free remote nodes.
558  */
559 static void sci_remote_node_table_release_triple_remote_node(
560 	struct sci_remote_node_table *remote_node_table,
561 	u16 remote_node_index)
562 {
563 	u32 group_index;
564 
565 	group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
566 
567 	sci_remote_node_table_set_group_index(
568 		remote_node_table, 2, group_index
569 		);
570 
571 	sci_remote_node_table_set_group(remote_node_table, group_index);
572 }
573 
574 /**
575  * sci_remote_node_table_release_remote_node_index()
576  * @remote_node_table: The remote node table to which the remote node index is
577  *    to be freed.
578  * @remote_node_count: This is the count of consecutive remote nodes that are
579  *    to be freed.
580  * @remote_node_index: This is the remote node index that is being released.
581  *
582  * This method will release the remote node index back into the remote node
583  * table free pool.
584  */
585 void sci_remote_node_table_release_remote_node_index(
586 	struct sci_remote_node_table *remote_node_table,
587 	u32 remote_node_count,
588 	u16 remote_node_index)
589 {
590 	if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
591 		sci_remote_node_table_release_single_remote_node(
592 			remote_node_table, remote_node_index);
593 	} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
594 		sci_remote_node_table_release_triple_remote_node(
595 			remote_node_table, remote_node_index);
596 	}
597 }
598 
599