qed.c (fae25ac7bd6d64724c415027262a532531decc48) qed.c (fb18de21e01b4c37cd4aa074bb65a0c441e01fb3)
1/*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>

--- 944 unchanged lines hidden (view full) ---

953 qed_start_need_check_timer(s);
954 }
955 }
956}
957
958/**
959 * Update L1 table with new L2 table offset and write it out
960 */
1/*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>

--- 944 unchanged lines hidden (view full) ---

953 qed_start_need_check_timer(s);
954 }
955 }
956}
957
958/**
959 * Update L1 table with new L2 table offset and write it out
960 */
961static void qed_aio_write_l1_update(void *opaque, int ret)
961static int qed_aio_write_l1_update(QEDAIOCB *acb)
962{
962{
963 QEDAIOCB *acb = opaque;
964 BDRVQEDState *s = acb_to_s(acb);
965 CachedL2Table *l2_table = acb->request.l2_table;
966 uint64_t l2_offset = l2_table->offset;
963 BDRVQEDState *s = acb_to_s(acb);
964 CachedL2Table *l2_table = acb->request.l2_table;
965 uint64_t l2_offset = l2_table->offset;
967 int index;
966 int index, ret;
968
967
969 if (ret) {
970 qed_aio_complete(acb, ret);
971 return;
972 }
973
974 index = qed_l1_index(s, acb->cur_pos);
975 s->l1_table->offsets[index] = l2_table->offset;
976
977 ret = qed_write_l1_table(s, index, 1);
978
979 /* Commit the current L2 table to the cache */
980 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
981
982 /* This is guaranteed to succeed because we just committed the entry to the
983 * cache.
984 */
985 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
986 assert(acb->request.l2_table != NULL);
987
968 index = qed_l1_index(s, acb->cur_pos);
969 s->l1_table->offsets[index] = l2_table->offset;
970
971 ret = qed_write_l1_table(s, index, 1);
972
973 /* Commit the current L2 table to the cache */
974 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
975
976 /* This is guaranteed to succeed because we just committed the entry to the
977 * cache.
978 */
979 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
980 assert(acb->request.l2_table != NULL);
981
988 qed_aio_next_io(acb, ret);
982 return ret;
989}
990
991
992/**
993 * Update L2 table with new cluster offsets and write them out
994 */
995static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
996{

--- 12 unchanged lines hidden (view full) ---

1009
1010 index = qed_l2_index(s, acb->cur_pos);
1011 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1012 offset);
1013
1014 if (need_alloc) {
1015 /* Write out the whole new L2 table */
1016 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
983}
984
985
986/**
987 * Update L2 table with new cluster offsets and write them out
988 */
989static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
990{

--- 12 unchanged lines hidden (view full) ---

1003
1004 index = qed_l2_index(s, acb->cur_pos);
1005 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1006 offset);
1007
1008 if (need_alloc) {
1009 /* Write out the whole new L2 table */
1010 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
1017 qed_aio_write_l1_update(acb, ret);
1011 if (ret) {
1012 goto err;
1013 }
1014 ret = qed_aio_write_l1_update(acb);
1015 qed_aio_next_io(acb, ret);
1016
1018 } else {
1019 /* Write out only the updated part of the L2 table */
1020 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
1021 false);
1022 qed_aio_next_io(acb, ret);
1023 }
1024 return;
1025

--- 633 unchanged lines hidden ---
1017 } else {
1018 /* Write out only the updated part of the L2 table */
1019 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
1020 false);
1021 qed_aio_next_io(acb, ret);
1022 }
1023 return;
1024

--- 633 unchanged lines hidden ---