1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * BSD LICENSE
25  *
26  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27  * All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions
31  * are met:
32  *
33  *   * Redistributions of source code must retain the above copyright
34  *     notice, this list of conditions and the following disclaimer.
35  *   * Redistributions in binary form must reproduce the above copyright
36  *     notice, this list of conditions and the following disclaimer in
37  *     the documentation and/or other materials provided with the
38  *     distribution.
39  *   * Neither the name of Intel Corporation nor the names of its
40  *     contributors may be used to endorse or promote products derived
41  *     from this software without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  */
55 
56 /**
57  * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
58  *    public, protected, and private methods.
59  *
60  *
61  */
62 #include "remote_node_table.h"
63 #include "remote_node_context.h"
64 
65 /**
66  *
67  * @remote_node_table: This is the remote node index table from which the
68  *    selection will be made.
69  * @group_table_index: This is the index to the group table from which to
70  *    search for an available selection.
71  *
72  * This routine will find the bit position in absolute bit terms of the next 32
73  * + bit position.  If there are available bits in the first u32 then it is
74  * just bit position. u32 This is the absolute bit position for an available
75  * group.
76  */
77 static u32 sci_remote_node_table_get_group_index(
78 	struct sci_remote_node_table *remote_node_table,
79 	u32 group_table_index)
80 {
81 	u32 dword_index;
82 	u32 *group_table;
83 	u32 bit_index;
84 
85 	group_table = remote_node_table->remote_node_groups[group_table_index];
86 
87 	for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
88 		if (group_table[dword_index] != 0) {
89 			for (bit_index = 0; bit_index < 32; bit_index++) {
90 				if ((group_table[dword_index] & (1 << bit_index)) != 0) {
91 					return (dword_index * 32) + bit_index;
92 				}
93 			}
94 		}
95 	}
96 
97 	return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
98 }
99 
100 /**
101  *
102  * @out]: remote_node_table This the remote node table in which to clear the
103  *    selector.
104  * @set_index: This is the remote node selector in which the change will be
105  *    made.
106  * @group_index: This is the bit index in the table to be modified.
107  *
108  * This method will clear the group index entry in the specified group index
109  * table. none
110  */
111 static void sci_remote_node_table_clear_group_index(
112 	struct sci_remote_node_table *remote_node_table,
113 	u32 group_table_index,
114 	u32 group_index)
115 {
116 	u32 dword_index;
117 	u32 bit_index;
118 	u32 *group_table;
119 
120 	BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
121 	BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
122 
123 	dword_index = group_index / 32;
124 	bit_index   = group_index % 32;
125 	group_table = remote_node_table->remote_node_groups[group_table_index];
126 
127 	group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
128 }
129 
130 /**
131  *
132  * @out]: remote_node_table This the remote node table in which to set the
133  *    selector.
134  * @group_table_index: This is the remote node selector in which the change
135  *    will be made.
136  * @group_index: This is the bit position in the table to be modified.
137  *
138  * This method will set the group index bit entry in the specified gropu index
139  * table. none
140  */
141 static void sci_remote_node_table_set_group_index(
142 	struct sci_remote_node_table *remote_node_table,
143 	u32 group_table_index,
144 	u32 group_index)
145 {
146 	u32 dword_index;
147 	u32 bit_index;
148 	u32 *group_table;
149 
150 	BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
151 	BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
152 
153 	dword_index = group_index / 32;
154 	bit_index   = group_index % 32;
155 	group_table = remote_node_table->remote_node_groups[group_table_index];
156 
157 	group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
158 }
159 
160 /**
161  *
162  * @out]: remote_node_table This is the remote node table in which to modify
163  *    the remote node availability.
164  * @remote_node_index: This is the remote node index that is being returned to
165  *    the table.
166  *
167  * This method will set the remote to available in the remote node allocation
168  * table. none
169  */
170 static void sci_remote_node_table_set_node_index(
171 	struct sci_remote_node_table *remote_node_table,
172 	u32 remote_node_index)
173 {
174 	u32 dword_location;
175 	u32 dword_remainder;
176 	u32 slot_normalized;
177 	u32 slot_position;
178 
179 	BUG_ON(
180 		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
181 		<= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
182 		);
183 
184 	dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
185 	dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
186 	slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
187 	slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
188 
189 	remote_node_table->available_remote_nodes[dword_location] |=
190 		1 << (slot_normalized + slot_position);
191 }
192 
193 /**
194  *
195  * @out]: remote_node_table This is the remote node table from which to clear
196  *    the available remote node bit.
197  * @remote_node_index: This is the remote node index which is to be cleared
198  *    from the table.
199  *
200  * This method clears the remote node index from the table of available remote
201  * nodes. none
202  */
203 static void sci_remote_node_table_clear_node_index(
204 	struct sci_remote_node_table *remote_node_table,
205 	u32 remote_node_index)
206 {
207 	u32 dword_location;
208 	u32 dword_remainder;
209 	u32 slot_position;
210 	u32 slot_normalized;
211 
212 	BUG_ON(
213 		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
214 		<= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
215 		);
216 
217 	dword_location  = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
218 	dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
219 	slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
220 	slot_position   = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
221 
222 	remote_node_table->available_remote_nodes[dword_location] &=
223 		~(1 << (slot_normalized + slot_position));
224 }
225 
226 /**
227  *
228  * @out]: remote_node_table The remote node table from which the slot will be
229  *    cleared.
230  * @group_index: The index for the slot that is to be cleared.
231  *
232  * This method clears the entire table slot at the specified slot index. none
233  */
234 static void sci_remote_node_table_clear_group(
235 	struct sci_remote_node_table *remote_node_table,
236 	u32 group_index)
237 {
238 	u32 dword_location;
239 	u32 dword_remainder;
240 	u32 dword_value;
241 
242 	BUG_ON(
243 		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
244 		<= (group_index / SCU_STP_REMOTE_NODE_COUNT)
245 		);
246 
247 	dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
248 	dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
249 
250 	dword_value = remote_node_table->available_remote_nodes[dword_location];
251 	dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
252 	remote_node_table->available_remote_nodes[dword_location] = dword_value;
253 }
254 
255 /**
256  *
257  * @remote_node_table:
258  *
259  * THis method sets an entire remote node group in the remote node table.
260  */
261 static void sci_remote_node_table_set_group(
262 	struct sci_remote_node_table *remote_node_table,
263 	u32 group_index)
264 {
265 	u32 dword_location;
266 	u32 dword_remainder;
267 	u32 dword_value;
268 
269 	BUG_ON(
270 		(remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
271 		<= (group_index / SCU_STP_REMOTE_NODE_COUNT)
272 		);
273 
274 	dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
275 	dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
276 
277 	dword_value = remote_node_table->available_remote_nodes[dword_location];
278 	dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
279 	remote_node_table->available_remote_nodes[dword_location] = dword_value;
280 }
281 
282 /**
283  *
284  * @remote_node_table: This is the remote node table that for which the group
285  *    value is to be returned.
286  * @group_index: This is the group index to use to find the group value.
287  *
288  * This method will return the group value for the specified group index. The
289  * bit values at the specified remote node group index.
290  */
291 static u8 sci_remote_node_table_get_group_value(
292 	struct sci_remote_node_table *remote_node_table,
293 	u32 group_index)
294 {
295 	u32 dword_location;
296 	u32 dword_remainder;
297 	u32 dword_value;
298 
299 	dword_location  = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
300 	dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
301 
302 	dword_value = remote_node_table->available_remote_nodes[dword_location];
303 	dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
304 	dword_value = dword_value >> (dword_remainder * 4);
305 
306 	return (u8)dword_value;
307 }
308 
309 /**
310  *
311  * @out]: remote_node_table The remote that which is to be initialized.
312  * @remote_node_entries: The number of entries to put in the table.
313  *
314  * This method will initialize the remote node table for use. none
315  */
316 void sci_remote_node_table_initialize(
317 	struct sci_remote_node_table *remote_node_table,
318 	u32 remote_node_entries)
319 {
320 	u32 index;
321 
322 	/*
323 	 * Initialize the raw data we could improve the speed by only initializing
324 	 * those entries that we are actually going to be used */
325 	memset(
326 		remote_node_table->available_remote_nodes,
327 		0x00,
328 		sizeof(remote_node_table->available_remote_nodes)
329 		);
330 
331 	memset(
332 		remote_node_table->remote_node_groups,
333 		0x00,
334 		sizeof(remote_node_table->remote_node_groups)
335 		);
336 
337 	/* Initialize the available remote node sets */
338 	remote_node_table->available_nodes_array_size = (u16)
339 							(remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
340 							+ ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
341 
342 
343 	/* Initialize each full DWORD to a FULL SET of remote nodes */
344 	for (index = 0; index < remote_node_entries; index++) {
345 		sci_remote_node_table_set_node_index(remote_node_table, index);
346 	}
347 
348 	remote_node_table->group_array_size = (u16)
349 					      (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
350 					      + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
351 
352 	for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
353 		/*
354 		 * These are all guaranteed to be full slot values so fill them in the
355 		 * available sets of 3 remote nodes */
356 		sci_remote_node_table_set_group_index(remote_node_table, 2, index);
357 	}
358 
359 	/* Now fill in any remainders that we may find */
360 	if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
361 		sci_remote_node_table_set_group_index(remote_node_table, 1, index);
362 	} else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
363 		sci_remote_node_table_set_group_index(remote_node_table, 0, index);
364 	}
365 }
366 
367 /**
368  *
369  * @out]: remote_node_table The remote node table from which to allocate a
370  *    remote node.
371  * @table_index: The group index that is to be used for the search.
372  *
373  * This method will allocate a single RNi from the remote node table.  The
374  * table index will determine from which remote node group table to search.
375  * This search may fail and another group node table can be specified.  The
376  * function is designed to allow a serach of the available single remote node
377  * group up to the triple remote node group.  If an entry is found in the
378  * specified table the remote node is removed and the remote node groups are
379  * updated. The RNi value or an invalid remote node context if an RNi can not
380  * be found.
381  */
382 static u16 sci_remote_node_table_allocate_single_remote_node(
383 	struct sci_remote_node_table *remote_node_table,
384 	u32 group_table_index)
385 {
386 	u8 index;
387 	u8 group_value;
388 	u32 group_index;
389 	u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
390 
391 	group_index = sci_remote_node_table_get_group_index(
392 		remote_node_table, group_table_index);
393 
394 	/* We could not find an available slot in the table selector 0 */
395 	if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
396 		group_value = sci_remote_node_table_get_group_value(
397 			remote_node_table, group_index);
398 
399 		for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
400 			if (((1 << index) & group_value) != 0) {
401 				/* We have selected a bit now clear it */
402 				remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
403 							  + index);
404 
405 				sci_remote_node_table_clear_group_index(
406 					remote_node_table, group_table_index, group_index
407 					);
408 
409 				sci_remote_node_table_clear_node_index(
410 					remote_node_table, remote_node_index
411 					);
412 
413 				if (group_table_index > 0) {
414 					sci_remote_node_table_set_group_index(
415 						remote_node_table, group_table_index - 1, group_index
416 						);
417 				}
418 
419 				break;
420 			}
421 		}
422 	}
423 
424 	return remote_node_index;
425 }
426 
427 /**
428  *
429  * @remote_node_table: This is the remote node table from which to allocate the
430  *    remote node entries.
431  * @group_table_index: THis is the group table index which must equal two (2)
432  *    for this operation.
433  *
434  * This method will allocate three consecutive remote node context entries. If
435  * there are no remaining triple entries the function will return a failure.
436  * The remote node index that represents three consecutive remote node entries
437  * or an invalid remote node context if none can be found.
438  */
439 static u16 sci_remote_node_table_allocate_triple_remote_node(
440 	struct sci_remote_node_table *remote_node_table,
441 	u32 group_table_index)
442 {
443 	u32 group_index;
444 	u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
445 
446 	group_index = sci_remote_node_table_get_group_index(
447 		remote_node_table, group_table_index);
448 
449 	if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
450 		remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
451 
452 		sci_remote_node_table_clear_group_index(
453 			remote_node_table, group_table_index, group_index
454 			);
455 
456 		sci_remote_node_table_clear_group(
457 			remote_node_table, group_index
458 			);
459 	}
460 
461 	return remote_node_index;
462 }
463 
464 /**
465  *
466  * @remote_node_table: This is the remote node table from which the remote node
467  *    allocation is to take place.
468  * @remote_node_count: This is ther remote node count which is one of
469  *    SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
470  *
471  * This method will allocate a remote node that mataches the remote node count
472  * specified by the caller.  Valid values for remote node count is
473  * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
474  * the remote node index that is returned or an invalid remote node context.
475  */
476 u16 sci_remote_node_table_allocate_remote_node(
477 	struct sci_remote_node_table *remote_node_table,
478 	u32 remote_node_count)
479 {
480 	u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
481 
482 	if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
483 		remote_node_index =
484 			sci_remote_node_table_allocate_single_remote_node(
485 				remote_node_table, 0);
486 
487 		if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
488 			remote_node_index =
489 				sci_remote_node_table_allocate_single_remote_node(
490 					remote_node_table, 1);
491 		}
492 
493 		if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
494 			remote_node_index =
495 				sci_remote_node_table_allocate_single_remote_node(
496 					remote_node_table, 2);
497 		}
498 	} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
499 		remote_node_index =
500 			sci_remote_node_table_allocate_triple_remote_node(
501 				remote_node_table, 2);
502 	}
503 
504 	return remote_node_index;
505 }
506 
507 /**
508  *
509  * @remote_node_table:
510  *
511  * This method will free a single remote node index back to the remote node
512  * table.  This routine will update the remote node groups
513  */
514 static void sci_remote_node_table_release_single_remote_node(
515 	struct sci_remote_node_table *remote_node_table,
516 	u16 remote_node_index)
517 {
518 	u32 group_index;
519 	u8 group_value;
520 
521 	group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
522 
523 	group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
524 
525 	/*
526 	 * Assert that we are not trying to add an entry to a slot that is already
527 	 * full. */
528 	BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
529 
530 	if (group_value == 0x00) {
531 		/*
532 		 * There are no entries in this slot so it must be added to the single
533 		 * slot table. */
534 		sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
535 	} else if ((group_value & (group_value - 1)) == 0) {
536 		/*
537 		 * There is only one entry in this slot so it must be moved from the
538 		 * single slot table to the dual slot table */
539 		sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
540 		sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
541 	} else {
542 		/*
543 		 * There are two entries in the slot so it must be moved from the dual
544 		 * slot table to the tripple slot table. */
545 		sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
546 		sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
547 	}
548 
549 	sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
550 }
551 
552 /**
553  *
554  * @remote_node_table: This is the remote node table to which the remote node
555  *    index is to be freed.
556  *
557  * This method will release a group of three consecutive remote nodes back to
558  * the free remote nodes.
559  */
560 static void sci_remote_node_table_release_triple_remote_node(
561 	struct sci_remote_node_table *remote_node_table,
562 	u16 remote_node_index)
563 {
564 	u32 group_index;
565 
566 	group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
567 
568 	sci_remote_node_table_set_group_index(
569 		remote_node_table, 2, group_index
570 		);
571 
572 	sci_remote_node_table_set_group(remote_node_table, group_index);
573 }
574 
575 /**
576  *
577  * @remote_node_table: The remote node table to which the remote node index is
578  *    to be freed.
579  * @remote_node_count: This is the count of consecutive remote nodes that are
580  *    to be freed.
581  *
582  * This method will release the remote node index back into the remote node
583  * table free pool.
584  */
585 void sci_remote_node_table_release_remote_node_index(
586 	struct sci_remote_node_table *remote_node_table,
587 	u32 remote_node_count,
588 	u16 remote_node_index)
589 {
590 	if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
591 		sci_remote_node_table_release_single_remote_node(
592 			remote_node_table, remote_node_index);
593 	} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
594 		sci_remote_node_table_release_triple_remote_node(
595 			remote_node_table, remote_node_index);
596 	}
597 }
598 
599