kvm_main.c (26b8345abc75a7404716864710930407b7d873f9) kvm_main.c (ed922739c9199bf515a3e7fec3e319ce1edeef2a)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.

--- 498 unchanged lines hidden (view full) ---

507 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
508 */
509static void kvm_null_fn(void)
510{
511
512}
513#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
514
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.

--- 498 unchanged lines hidden (view full) ---

507 * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
508 */
509static void kvm_null_fn(void)
510{
511
512}
513#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
514
515/* Iterate over each memslot intersecting [start, last] (inclusive) range */
516#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
517 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
518 node; \
519 node = interval_tree_iter_next(node, start, last)) \
520
515static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
516 const struct kvm_hva_range *range)
517{
518 bool ret = false, locked = false;
519 struct kvm_gfn_range gfn_range;
520 struct kvm_memory_slot *slot;
521 struct kvm_memslots *slots;
522 int i, idx;
523
521static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
522 const struct kvm_hva_range *range)
523{
524 bool ret = false, locked = false;
525 struct kvm_gfn_range gfn_range;
526 struct kvm_memory_slot *slot;
527 struct kvm_memslots *slots;
528 int i, idx;
529
530 if (WARN_ON_ONCE(range->end <= range->start))
531 return 0;
532
524 /* A null handler is allowed if and only if on_lock() is provided. */
525 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
526 IS_KVM_NULL_FN(range->handler)))
527 return 0;
528
529 idx = srcu_read_lock(&kvm->srcu);
530
531 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
533 /* A null handler is allowed if and only if on_lock() is provided. */
534 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
535 IS_KVM_NULL_FN(range->handler)))
536 return 0;
537
538 idx = srcu_read_lock(&kvm->srcu);
539
540 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
541 struct interval_tree_node *node;
542
532 slots = __kvm_memslots(kvm, i);
543 slots = __kvm_memslots(kvm, i);
533 kvm_for_each_memslot(slot, slots) {
544 kvm_for_each_memslot_in_hva_range(node, slots,
545 range->start, range->end - 1) {
534 unsigned long hva_start, hva_end;
535
546 unsigned long hva_start, hva_end;
547
548 slot = container_of(node, struct kvm_memory_slot, hva_node);
536 hva_start = max(range->start, slot->userspace_addr);
537 hva_end = min(range->end, slot->userspace_addr +
538 (slot->npages << PAGE_SHIFT));
549 hva_start = max(range->start, slot->userspace_addr);
550 hva_end = min(range->end, slot->userspace_addr +
551 (slot->npages << PAGE_SHIFT));
539 if (hva_start >= hva_end)
540 continue;
541
542 /*
543 * To optimize for the likely case where the address
544 * range is covered by zero or one memslots, don't
545 * bother making these conditional (to avoid writes on
546 * the second or later invocation of the handler).
547 */
548 gfn_range.pte = range->pte;

--- 319 unchanged lines hidden (view full) ---

868static struct kvm_memslots *kvm_alloc_memslots(void)
869{
870 struct kvm_memslots *slots;
871
872 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
873 if (!slots)
874 return NULL;
875
552
553 /*
554 * To optimize for the likely case where the address
555 * range is covered by zero or one memslots, don't
556 * bother making these conditional (to avoid writes on
557 * the second or later invocation of the handler).
558 */
559 gfn_range.pte = range->pte;

--- 319 unchanged lines hidden (view full) ---

879static struct kvm_memslots *kvm_alloc_memslots(void)
880{
881 struct kvm_memslots *slots;
882
883 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
884 if (!slots)
885 return NULL;
886
887 slots->hva_tree = RB_ROOT_CACHED;
876 hash_init(slots->id_hash);
877
878 return slots;
879}
880
881static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
882{
883 if (!memslot->dirty_bitmap)

--- 388 unchanged lines hidden (view full) ---

1272 return 0;
1273}
1274
1275static void kvm_replace_memslot(struct kvm_memslots *slots,
1276 struct kvm_memory_slot *old,
1277 struct kvm_memory_slot *new)
1278{
1279 /*
888 hash_init(slots->id_hash);
889
890 return slots;
891}
892
893static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
894{
895 if (!memslot->dirty_bitmap)

--- 388 unchanged lines hidden (view full) ---

1284 return 0;
1285}
1286
1287static void kvm_replace_memslot(struct kvm_memslots *slots,
1288 struct kvm_memory_slot *old,
1289 struct kvm_memory_slot *new)
1290{
1291 /*
1280 * Remove the old memslot from the hash list, copying the node data
1281 * would corrupt the list.
1292 * Remove the old memslot from the hash list and interval tree, copying
1293 * the node data would corrupt the structures.
1282 */
1283 if (old) {
1284 hash_del(&old->id_node);
1294 */
1295 if (old) {
1296 hash_del(&old->id_node);
1297 interval_tree_remove(&old->hva_node, &slots->hva_tree);
1285
1286 if (!new)
1287 return;
1288
1289 /* Copy the source *data*, not the pointer, to the destination. */
1290 *new = *old;
1298
1299 if (!new)
1300 return;
1301
1302 /* Copy the source *data*, not the pointer, to the destination. */
1303 *new = *old;
1304 } else {
1305 /* If @old is NULL, initialize @new's hva range. */
1306 new->hva_node.start = new->userspace_addr;
1307 new->hva_node.last = new->userspace_addr +
1308 (new->npages << PAGE_SHIFT) - 1;
1291 }
1292
1293 /* (Re)Add the new memslot. */
1294 hash_add(slots->id_hash, &new->id_node, new->id);
1309 }
1310
1311 /* (Re)Add the new memslot. */
1312 hash_add(slots->id_hash, &new->id_node, new->id);
1313 interval_tree_insert(&new->hva_node, &slots->hva_tree);
1295}
1296
1297static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src)
1298{
1299 struct kvm_memory_slot *mslots = slots->memslots;
1300
1301 kvm_replace_memslot(slots, &mslots[src], &mslots[dst]);
1302}

--- 14 unchanged lines hidden (view full) ---

1317 return;
1318
1319 slots->used_slots--;
1320
1321 if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
1322 atomic_set(&slots->last_used_slot, 0);
1323
1324 /*
1314}
1315
1316static void kvm_shift_memslot(struct kvm_memslots *slots, int dst, int src)
1317{
1318 struct kvm_memory_slot *mslots = slots->memslots;
1319
1320 kvm_replace_memslot(slots, &mslots[src], &mslots[dst]);
1321}

--- 14 unchanged lines hidden (view full) ---

1336 return;
1337
1338 slots->used_slots--;
1339
1340 if (atomic_read(&slots->last_used_slot) >= slots->used_slots)
1341 atomic_set(&slots->last_used_slot, 0);
1342
1343 /*
1325 * Remove the to-be-deleted memslot from the list _before_ shifting
1344 * Remove the to-be-deleted memslot from the list/tree _before_ shifting
1326 * the trailing memslots forward, its data will be overwritten.
1327 * Defer the (somewhat pointless) copying of the memslot until after
1328 * the last slot has been shifted to avoid overwriting said last slot.
1329 */
1330 kvm_replace_memslot(slots, oldslot, NULL);
1331
1332 for (i = oldslot - mslots; i < slots->used_slots; i++)
1333 kvm_shift_memslot(slots, i, i + 1);

--- 10 unchanged lines hidden (view full) ---

1344}
1345
1346/*
1347 * Move a changed memslot backwards in the array by shifting existing slots
1348 * with a higher GFN toward the front of the array. Note, the changed memslot
1349 * itself is not preserved in the array, i.e. not swapped at this time, only
1350 * its new index into the array is tracked. Returns the changed memslot's
1351 * current index into the memslots array.
1345 * the trailing memslots forward, its data will be overwritten.
1346 * Defer the (somewhat pointless) copying of the memslot until after
1347 * the last slot has been shifted to avoid overwriting said last slot.
1348 */
1349 kvm_replace_memslot(slots, oldslot, NULL);
1350
1351 for (i = oldslot - mslots; i < slots->used_slots; i++)
1352 kvm_shift_memslot(slots, i, i + 1);

--- 10 unchanged lines hidden (view full) ---

1363}
1364
1365/*
1366 * Move a changed memslot backwards in the array by shifting existing slots
1367 * with a higher GFN toward the front of the array. Note, the changed memslot
1368 * itself is not preserved in the array, i.e. not swapped at this time, only
1369 * its new index into the array is tracked. Returns the changed memslot's
1370 * current index into the memslots array.
1352 * The memslot at the returned index will not be in @slots->id_hash by then.
1371 * The memslot at the returned index will not be in @slots->hva_tree or
1372 * @slots->id_hash by then.
1353 * @memslot is a detached struct with desired final data of the changed slot.
1354 */
1355static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
1356 struct kvm_memory_slot *memslot)
1357{
1358 struct kvm_memory_slot *mslots = slots->memslots;
1359 struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
1360 int i;
1361
1362 if (!oldslot || !slots->used_slots)
1363 return -1;
1364
1365 /*
1373 * @memslot is a detached struct with desired final data of the changed slot.
1374 */
1375static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
1376 struct kvm_memory_slot *memslot)
1377{
1378 struct kvm_memory_slot *mslots = slots->memslots;
1379 struct kvm_memory_slot *oldslot = id_to_memslot(slots, memslot->id);
1380 int i;
1381
1382 if (!oldslot || !slots->used_slots)
1383 return -1;
1384
1385 /*
1366 * Delete the slot from the hash table before sorting the remaining
1367 * slots, the slot's data may be overwritten when copying slots as part
1368 * of the sorting proccess. update_memslots() will unconditionally
1369 * rewrite the entire slot and re-add it to the hash table.
1386 * Delete the slot from the hash table and interval tree before sorting
1387 * the remaining slots, the slot's data may be overwritten when copying
1388 * slots as part of the sorting proccess. update_memslots() will
1389 * unconditionally rewrite and re-add the entire slot.
1370 */
1371 kvm_replace_memslot(slots, oldslot, NULL);
1372
1373 /*
1374 * Move the target memslot backward in the array by shifting existing
1375 * memslots with a higher GFN (than the target memslot) towards the
1376 * front of the array.
1377 */

--- 9 unchanged lines hidden (view full) ---

1387}
1388
1389/*
1390 * Move a changed memslot forwards in the array by shifting existing slots with
1391 * a lower GFN toward the back of the array. Note, the changed memslot itself
1392 * is not preserved in the array, i.e. not swapped at this time, only its new
1393 * index into the array is tracked. Returns the changed memslot's final index
1394 * into the memslots array.
1390 */
1391 kvm_replace_memslot(slots, oldslot, NULL);
1392
1393 /*
1394 * Move the target memslot backward in the array by shifting existing
1395 * memslots with a higher GFN (than the target memslot) towards the
1396 * front of the array.
1397 */

--- 9 unchanged lines hidden (view full) ---

1407}
1408
1409/*
1410 * Move a changed memslot forwards in the array by shifting existing slots with
1411 * a lower GFN toward the back of the array. Note, the changed memslot itself
1412 * is not preserved in the array, i.e. not swapped at this time, only its new
1413 * index into the array is tracked. Returns the changed memslot's final index
1414 * into the memslots array.
1395 * The memslot at the returned index will not be in @slots->id_hash by then.
1415 * The memslot at the returned index will not be in @slots->hva_tree or
1416 * @slots->id_hash by then.
1396 * @memslot is a detached struct with desired final data of the new or
1397 * changed slot.
1417 * @memslot is a detached struct with desired final data of the new or
1418 * changed slot.
1398 * Assumes that the memslot at @start index is not in @slots->id_hash.
1419 * Assumes that the memslot at @start index is not in @slots->hva_tree or
1420 * @slots->id_hash.
1399 */
1400static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
1401 struct kvm_memory_slot *memslot,
1402 int start)
1403{
1404 struct kvm_memory_slot *mslots = slots->memslots;
1405 int i;
1406

--- 176 unchanged lines hidden (view full) ---

1583 new_size = kvm_memslots_size(old->used_slots);
1584
1585 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
1586 if (unlikely(!slots))
1587 return NULL;
1588
1589 memcpy(slots, old, kvm_memslots_size(old->used_slots));
1590
1421 */
1422static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
1423 struct kvm_memory_slot *memslot,
1424 int start)
1425{
1426 struct kvm_memory_slot *mslots = slots->memslots;
1427 int i;
1428

--- 176 unchanged lines hidden (view full) ---

1605 new_size = kvm_memslots_size(old->used_slots);
1606
1607 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
1608 if (unlikely(!slots))
1609 return NULL;
1610
1611 memcpy(slots, old, kvm_memslots_size(old->used_slots));
1612
1613 slots->hva_tree = RB_ROOT_CACHED;
1591 hash_init(slots->id_hash);
1614 hash_init(slots->id_hash);
1592 kvm_for_each_memslot(memslot, slots)
1615 kvm_for_each_memslot(memslot, slots) {
1616 interval_tree_insert(&memslot->hva_node, &slots->hva_tree);
1593 hash_add(slots->id_hash, &memslot->id_node, memslot->id);
1617 hash_add(slots->id_hash, &memslot->id_node, memslot->id);
1618 }
1594
1595 return slots;
1596}
1597
1598static void kvm_copy_memslots_arch(struct kvm_memslots *to,
1599 struct kvm_memslots *from)
1600{
1601 int i;

--- 4149 unchanged lines hidden ---
1619
1620 return slots;
1621}
1622
1623static void kvm_copy_memslots_arch(struct kvm_memslots *to,
1624 struct kvm_memslots *from)
1625{
1626 int i;

--- 4149 unchanged lines hidden ---