channel_mgmt.c (057841713cfff62b4485cdd2b245f05b7ea3ba16) | channel_mgmt.c (7415aea6072bab15969b6c3c5b2a193d88095326) |
---|---|
1/* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT --- 585 unchanged lines hidden (view full) --- 594 /* 595 * Prior to win8, all channel interrupts are 596 * delivered on cpu 0. 597 * Also if the channel is not a performance critical 598 * channel, bind it to cpu 0. 599 */ 600 channel->numa_node = 0; 601 channel->target_cpu = 0; | 1/* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT --- 585 unchanged lines hidden (view full) --- 594 /* 595 * Prior to win8, all channel interrupts are 596 * delivered on cpu 0. 597 * Also if the channel is not a performance critical 598 * channel, bind it to cpu 0. 599 */ 600 channel->numa_node = 0; 601 channel->target_cpu = 0; |
602 channel->target_vp = hv_context.vp_index[0]; | 602 channel->target_vp = hv_cpu_number_to_vp_number(0); |
603 return; 604 } 605 606 /* 607 * Based on the channel affinity policy, we will assign the NUMA 608 * nodes. 609 */ 610 --- 67 unchanged lines hidden (view full) --- 678 } 679 } else { 680 cpumask_set_cpu(cur_cpu, alloced_mask); 681 break; 682 } 683 } 684 685 channel->target_cpu = cur_cpu; | 603 return; 604 } 605 606 /* 607 * Based on the channel affinity policy, we will assign the NUMA 608 * nodes. 609 */ 610 --- 67 unchanged lines hidden (view full) --- 678 } 679 } else { 680 cpumask_set_cpu(cur_cpu, alloced_mask); 681 break; 682 } 683 } 684 685 channel->target_cpu = cur_cpu; |
686 channel->target_vp = hv_context.vp_index[cur_cpu]; | 686 channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); |
687} 688 689static void vmbus_wait_for_unload(void) 690{ 691 int cpu; 692 void *page_addr; 693 struct hv_message *msg; 694 struct vmbus_channel_message_header *hdr; --- 519 unchanged lines hidden (view full) --- 1214 1215 next_channel = primary->next_oc++; 1216 1217 if (next_channel > (primary->num_sc)) { 1218 primary->next_oc = 0; 1219 return outgoing_channel; 1220 } 1221 | 687} 688 689static void vmbus_wait_for_unload(void) 690{ 691 int cpu; 692 void *page_addr; 693 struct hv_message *msg; 694 struct vmbus_channel_message_header *hdr; --- 519 unchanged lines hidden (view full) --- 1214 1215 next_channel = primary->next_oc++; 1216 1217 if (next_channel > (primary->num_sc)) { 1218 primary->next_oc = 0; 1219 return outgoing_channel; 1220 } 1221 |
1222 cur_cpu = hv_context.vp_index[get_cpu()]; 1223 put_cpu(); | 1222 cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id()); |
1224 list_for_each_safe(cur, tmp, &primary->sc_list) { 1225 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 1226 if (cur_channel->state != CHANNEL_OPENED_STATE) 1227 continue; 1228 1229 if (cur_channel->target_vp == cur_cpu) 1230 return cur_channel; 1231 --- 57 unchanged lines hidden --- | 1223 list_for_each_safe(cur, tmp, &primary->sc_list) { 1224 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 1225 if (cur_channel->state != CHANNEL_OPENED_STATE) 1226 continue; 1227 1228 if (cur_channel->target_vp == cur_cpu) 1229 return cur_channel; 1230 --- 57 unchanged lines hidden --- |