15212e11fSVishal Verma /* 25212e11fSVishal Verma * Block Translation Table 35212e11fSVishal Verma * Copyright (c) 2014-2015, Intel Corporation. 45212e11fSVishal Verma * 55212e11fSVishal Verma * This program is free software; you can redistribute it and/or modify it 65212e11fSVishal Verma * under the terms and conditions of the GNU General Public License, 75212e11fSVishal Verma * version 2, as published by the Free Software Foundation. 85212e11fSVishal Verma * 95212e11fSVishal Verma * This program is distributed in the hope it will be useful, but WITHOUT 105212e11fSVishal Verma * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 115212e11fSVishal Verma * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 125212e11fSVishal Verma * more details. 135212e11fSVishal Verma */ 145212e11fSVishal Verma #include <linux/highmem.h> 155212e11fSVishal Verma #include <linux/debugfs.h> 165212e11fSVishal Verma #include <linux/blkdev.h> 175212e11fSVishal Verma #include <linux/module.h> 185212e11fSVishal Verma #include <linux/device.h> 195212e11fSVishal Verma #include <linux/mutex.h> 205212e11fSVishal Verma #include <linux/hdreg.h> 215212e11fSVishal Verma #include <linux/genhd.h> 225212e11fSVishal Verma #include <linux/sizes.h> 235212e11fSVishal Verma #include <linux/ndctl.h> 245212e11fSVishal Verma #include <linux/fs.h> 255212e11fSVishal Verma #include <linux/nd.h> 265212e11fSVishal Verma #include "btt.h" 275212e11fSVishal Verma #include "nd.h" 285212e11fSVishal Verma 295212e11fSVishal Verma enum log_ent_request { 305212e11fSVishal Verma LOG_NEW_ENT = 0, 315212e11fSVishal Verma LOG_OLD_ENT 325212e11fSVishal Verma }; 335212e11fSVishal Verma 345212e11fSVishal Verma static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, 355212e11fSVishal Verma void *buf, size_t n) 365212e11fSVishal Verma { 375212e11fSVishal Verma struct nd_btt *nd_btt = arena->nd_btt; 385212e11fSVishal Verma struct nd_namespace_common *ndns = nd_btt->ndns; 395212e11fSVishal Verma 405212e11fSVishal Verma /* arena offsets are 4K from the base of the device */ 415212e11fSVishal Verma offset += SZ_4K; 425212e11fSVishal Verma return nvdimm_read_bytes(ndns, offset, buf, n); 435212e11fSVishal Verma } 445212e11fSVishal Verma 455212e11fSVishal Verma static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, 465212e11fSVishal Verma void *buf, size_t n) 475212e11fSVishal Verma { 485212e11fSVishal Verma struct nd_btt *nd_btt = arena->nd_btt; 495212e11fSVishal Verma struct nd_namespace_common *ndns = nd_btt->ndns; 505212e11fSVishal Verma 515212e11fSVishal Verma /* arena offsets are 4K from the base of the device */ 525212e11fSVishal Verma offset += SZ_4K; 535212e11fSVishal Verma return nvdimm_write_bytes(ndns, offset, buf, n); 545212e11fSVishal Verma } 555212e11fSVishal Verma 565212e11fSVishal Verma static int btt_info_write(struct arena_info *arena, struct btt_sb *super) 575212e11fSVishal Verma { 585212e11fSVishal Verma int ret; 595212e11fSVishal Verma 605212e11fSVishal Verma ret = arena_write_bytes(arena, arena->info2off, super, 615212e11fSVishal Verma sizeof(struct btt_sb)); 625212e11fSVishal Verma if (ret) 635212e11fSVishal Verma return ret; 645212e11fSVishal Verma 655212e11fSVishal Verma return arena_write_bytes(arena, arena->infooff, super, 665212e11fSVishal Verma sizeof(struct btt_sb)); 675212e11fSVishal Verma } 685212e11fSVishal Verma 695212e11fSVishal Verma static int btt_info_read(struct arena_info *arena, struct btt_sb *super) 705212e11fSVishal Verma { 715212e11fSVishal Verma WARN_ON(!super); 725212e11fSVishal Verma return arena_read_bytes(arena, arena->infooff, super, 735212e11fSVishal Verma sizeof(struct btt_sb)); 745212e11fSVishal Verma } 755212e11fSVishal Verma 765212e11fSVishal Verma /* 775212e11fSVishal Verma * 'raw' version of btt_map write 785212e11fSVishal Verma * Assumptions: 795212e11fSVishal Verma * mapping is in little-endian 805212e11fSVishal Verma * mapping contains 'E' and 'Z' flags as desired 815212e11fSVishal Verma */ 825212e11fSVishal Verma static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping) 835212e11fSVishal Verma { 845212e11fSVishal Verma u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); 855212e11fSVishal Verma 865212e11fSVishal Verma WARN_ON(lba >= arena->external_nlba); 875212e11fSVishal Verma return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE); 885212e11fSVishal Verma } 895212e11fSVishal Verma 905212e11fSVishal Verma static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, 915212e11fSVishal Verma u32 z_flag, u32 e_flag) 925212e11fSVishal Verma { 935212e11fSVishal Verma u32 ze; 945212e11fSVishal Verma __le32 mapping_le; 955212e11fSVishal Verma 965212e11fSVishal Verma /* 975212e11fSVishal Verma * This 'mapping' is supposed to be just the LBA mapping, without 985212e11fSVishal Verma * any flags set, so strip the flag bits. 995212e11fSVishal Verma */ 1005212e11fSVishal Verma mapping &= MAP_LBA_MASK; 1015212e11fSVishal Verma 1025212e11fSVishal Verma ze = (z_flag << 1) + e_flag; 1035212e11fSVishal Verma switch (ze) { 1045212e11fSVishal Verma case 0: 1055212e11fSVishal Verma /* 1065212e11fSVishal Verma * We want to set neither of the Z or E flags, and 1075212e11fSVishal Verma * in the actual layout, this means setting the bit 1085212e11fSVishal Verma * positions of both to '1' to indicate a 'normal' 1095212e11fSVishal Verma * map entry 1105212e11fSVishal Verma */ 1115212e11fSVishal Verma mapping |= MAP_ENT_NORMAL; 1125212e11fSVishal Verma break; 1135212e11fSVishal Verma case 1: 1145212e11fSVishal Verma mapping |= (1 << MAP_ERR_SHIFT); 1155212e11fSVishal Verma break; 1165212e11fSVishal Verma case 2: 1175212e11fSVishal Verma mapping |= (1 << MAP_TRIM_SHIFT); 1185212e11fSVishal Verma break; 1195212e11fSVishal Verma default: 1205212e11fSVishal Verma /* 1215212e11fSVishal Verma * The case where Z and E are both sent in as '1' could be 1225212e11fSVishal Verma * construed as a valid 'normal' case, but we decide not to, 1235212e11fSVishal Verma * to avoid confusion 1245212e11fSVishal Verma */ 1255212e11fSVishal Verma WARN_ONCE(1, "Invalid use of Z and E flags\n"); 1265212e11fSVishal Verma return -EIO; 1275212e11fSVishal Verma } 1285212e11fSVishal Verma 1295212e11fSVishal Verma mapping_le = cpu_to_le32(mapping); 1305212e11fSVishal Verma return __btt_map_write(arena, lba, mapping_le); 1315212e11fSVishal Verma } 1325212e11fSVishal Verma 1335212e11fSVishal Verma static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, 1345212e11fSVishal Verma int *trim, int *error) 1355212e11fSVishal Verma { 1365212e11fSVishal Verma int ret; 1375212e11fSVishal Verma __le32 in; 1385212e11fSVishal Verma u32 raw_mapping, postmap, ze, z_flag, e_flag; 1395212e11fSVishal Verma u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); 1405212e11fSVishal Verma 1415212e11fSVishal Verma WARN_ON(lba >= arena->external_nlba); 1425212e11fSVishal Verma 1435212e11fSVishal Verma ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE); 1445212e11fSVishal Verma if (ret) 1455212e11fSVishal Verma return ret; 1465212e11fSVishal Verma 1475212e11fSVishal Verma raw_mapping = le32_to_cpu(in); 1485212e11fSVishal Verma 1495212e11fSVishal Verma z_flag = (raw_mapping & MAP_TRIM_MASK) >> MAP_TRIM_SHIFT; 1505212e11fSVishal Verma e_flag = (raw_mapping & MAP_ERR_MASK) >> MAP_ERR_SHIFT; 1515212e11fSVishal Verma ze = (z_flag << 1) + e_flag; 1525212e11fSVishal Verma postmap = raw_mapping & MAP_LBA_MASK; 1535212e11fSVishal Verma 1545212e11fSVishal Verma /* Reuse the {z,e}_flag variables for *trim and *error */ 1555212e11fSVishal Verma z_flag = 0; 1565212e11fSVishal Verma e_flag = 0; 1575212e11fSVishal Verma 1585212e11fSVishal Verma switch (ze) { 1595212e11fSVishal Verma case 0: 1605212e11fSVishal Verma /* Initial state. Return postmap = premap */ 1615212e11fSVishal Verma *mapping = lba; 1625212e11fSVishal Verma break; 1635212e11fSVishal Verma case 1: 1645212e11fSVishal Verma *mapping = postmap; 1655212e11fSVishal Verma e_flag = 1; 1665212e11fSVishal Verma break; 1675212e11fSVishal Verma case 2: 1685212e11fSVishal Verma *mapping = postmap; 1695212e11fSVishal Verma z_flag = 1; 1705212e11fSVishal Verma break; 1715212e11fSVishal Verma case 3: 1725212e11fSVishal Verma *mapping = postmap; 1735212e11fSVishal Verma break; 1745212e11fSVishal Verma default: 1755212e11fSVishal Verma return -EIO; 1765212e11fSVishal Verma } 1775212e11fSVishal Verma 1785212e11fSVishal Verma if (trim) 1795212e11fSVishal Verma *trim = z_flag; 1805212e11fSVishal Verma if (error) 1815212e11fSVishal Verma *error = e_flag; 1825212e11fSVishal Verma 1835212e11fSVishal Verma return ret; 1845212e11fSVishal Verma } 1855212e11fSVishal Verma 1865212e11fSVishal Verma static int btt_log_read_pair(struct arena_info *arena, u32 lane, 1875212e11fSVishal Verma struct log_entry *ent) 1885212e11fSVishal Verma { 1895212e11fSVishal Verma WARN_ON(!ent); 1905212e11fSVishal Verma return arena_read_bytes(arena, 1915212e11fSVishal Verma arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, 1925212e11fSVishal Verma 2 * LOG_ENT_SIZE); 1935212e11fSVishal Verma } 1945212e11fSVishal Verma 1955212e11fSVishal Verma static struct dentry *debugfs_root; 1965212e11fSVishal Verma 1975212e11fSVishal Verma static void arena_debugfs_init(struct arena_info *a, struct dentry *parent, 1985212e11fSVishal Verma int idx) 1995212e11fSVishal Verma { 2005212e11fSVishal Verma char dirname[32]; 2015212e11fSVishal Verma struct dentry *d; 2025212e11fSVishal Verma 2035212e11fSVishal Verma /* If for some reason, parent bttN was not created, exit */ 2045212e11fSVishal Verma if (!parent) 2055212e11fSVishal Verma return; 2065212e11fSVishal Verma 2075212e11fSVishal Verma snprintf(dirname, 32, "arena%d", idx); 2085212e11fSVishal Verma d = debugfs_create_dir(dirname, parent); 2095212e11fSVishal Verma if (IS_ERR_OR_NULL(d)) 2105212e11fSVishal Verma return; 2115212e11fSVishal Verma a->debugfs_dir = d; 2125212e11fSVishal Verma 2135212e11fSVishal Verma debugfs_create_x64("size", S_IRUGO, d, &a->size); 2145212e11fSVishal Verma debugfs_create_x64("external_lba_start", S_IRUGO, d, 2155212e11fSVishal Verma &a->external_lba_start); 2165212e11fSVishal Verma debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba); 2175212e11fSVishal Verma debugfs_create_u32("internal_lbasize", S_IRUGO, d, 2185212e11fSVishal Verma &a->internal_lbasize); 2195212e11fSVishal Verma debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba); 2205212e11fSVishal Verma debugfs_create_u32("external_lbasize", S_IRUGO, d, 2215212e11fSVishal Verma &a->external_lbasize); 2225212e11fSVishal Verma debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree); 2235212e11fSVishal Verma debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major); 2245212e11fSVishal Verma debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor); 2255212e11fSVishal Verma debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff); 2265212e11fSVishal Verma debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff); 2275212e11fSVishal Verma debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff); 2285212e11fSVishal Verma debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff); 2295212e11fSVishal Verma debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); 2305212e11fSVishal Verma debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); 2315212e11fSVishal Verma debugfs_create_x32("flags", S_IRUGO, d, &a->flags); 2325212e11fSVishal Verma } 2335212e11fSVishal Verma 2345212e11fSVishal Verma static void btt_debugfs_init(struct btt *btt) 2355212e11fSVishal Verma { 2365212e11fSVishal Verma int i = 0; 2375212e11fSVishal Verma struct arena_info *arena; 2385212e11fSVishal Verma 2395212e11fSVishal Verma btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev), 2405212e11fSVishal Verma debugfs_root); 2415212e11fSVishal Verma if (IS_ERR_OR_NULL(btt->debugfs_dir)) 2425212e11fSVishal Verma return; 2435212e11fSVishal Verma 2445212e11fSVishal Verma list_for_each_entry(arena, &btt->arena_list, list) { 2455212e11fSVishal Verma arena_debugfs_init(arena, btt->debugfs_dir, i); 2465212e11fSVishal Verma i++; 2475212e11fSVishal Verma } 2485212e11fSVishal Verma } 2495212e11fSVishal Verma 2505212e11fSVishal Verma /* 2515212e11fSVishal Verma * This function accepts two log entries, and uses the 2525212e11fSVishal Verma * sequence number to find the 'older' entry. 2535212e11fSVishal Verma * It also updates the sequence number in this old entry to 2545212e11fSVishal Verma * make it the 'new' one if the mark_flag is set. 2555212e11fSVishal Verma * Finally, it returns which of the entries was the older one. 2565212e11fSVishal Verma * 2575212e11fSVishal Verma * TODO The logic feels a bit kludge-y. make it better.. 2585212e11fSVishal Verma */ 2595212e11fSVishal Verma static int btt_log_get_old(struct log_entry *ent) 2605212e11fSVishal Verma { 2615212e11fSVishal Verma int old; 2625212e11fSVishal Verma 2635212e11fSVishal Verma /* 2645212e11fSVishal Verma * the first ever time this is seen, the entry goes into [0] 2655212e11fSVishal Verma * the next time, the following logic works out to put this 2665212e11fSVishal Verma * (next) entry into [1] 2675212e11fSVishal Verma */ 2685212e11fSVishal Verma if (ent[0].seq == 0) { 2695212e11fSVishal Verma ent[0].seq = cpu_to_le32(1); 2705212e11fSVishal Verma return 0; 2715212e11fSVishal Verma } 2725212e11fSVishal Verma 2735212e11fSVishal Verma if (ent[0].seq == ent[1].seq) 2745212e11fSVishal Verma return -EINVAL; 2755212e11fSVishal Verma if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5) 2765212e11fSVishal Verma return -EINVAL; 2775212e11fSVishal Verma 2785212e11fSVishal Verma if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) { 2795212e11fSVishal Verma if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1) 2805212e11fSVishal Verma old = 0; 2815212e11fSVishal Verma else 2825212e11fSVishal Verma old = 1; 2835212e11fSVishal Verma } else { 2845212e11fSVishal Verma if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1) 2855212e11fSVishal Verma old = 1; 2865212e11fSVishal Verma else 2875212e11fSVishal Verma old = 0; 2885212e11fSVishal Verma } 2895212e11fSVishal Verma 2905212e11fSVishal Verma return old; 2915212e11fSVishal Verma } 2925212e11fSVishal Verma 2935212e11fSVishal Verma static struct device *to_dev(struct arena_info *arena) 2945212e11fSVishal Verma { 2955212e11fSVishal Verma return &arena->nd_btt->dev; 2965212e11fSVishal Verma } 2975212e11fSVishal Verma 2985212e11fSVishal Verma /* 2995212e11fSVishal Verma * This function copies the desired (old/new) log entry into ent if 3005212e11fSVishal Verma * it is not NULL. It returns the sub-slot number (0 or 1) 3015212e11fSVishal Verma * where the desired log entry was found. Negative return values 3025212e11fSVishal Verma * indicate errors. 3035212e11fSVishal Verma */ 3045212e11fSVishal Verma static int btt_log_read(struct arena_info *arena, u32 lane, 3055212e11fSVishal Verma struct log_entry *ent, int old_flag) 3065212e11fSVishal Verma { 3075212e11fSVishal Verma int ret; 3085212e11fSVishal Verma int old_ent, ret_ent; 3095212e11fSVishal Verma struct log_entry log[2]; 3105212e11fSVishal Verma 3115212e11fSVishal Verma ret = btt_log_read_pair(arena, lane, log); 3125212e11fSVishal Verma if (ret) 3135212e11fSVishal Verma return -EIO; 3145212e11fSVishal Verma 3155212e11fSVishal Verma old_ent = btt_log_get_old(log); 3165212e11fSVishal Verma if (old_ent < 0 || old_ent > 1) { 3175212e11fSVishal Verma dev_info(to_dev(arena), 3185212e11fSVishal Verma "log corruption (%d): lane %d seq [%d, %d]\n", 3195212e11fSVishal Verma old_ent, lane, log[0].seq, log[1].seq); 3205212e11fSVishal Verma /* TODO set error state? */ 3215212e11fSVishal Verma return -EIO; 3225212e11fSVishal Verma } 3235212e11fSVishal Verma 3245212e11fSVishal Verma ret_ent = (old_flag ? old_ent : (1 - old_ent)); 3255212e11fSVishal Verma 3265212e11fSVishal Verma if (ent != NULL) 3275212e11fSVishal Verma memcpy(ent, &log[ret_ent], LOG_ENT_SIZE); 3285212e11fSVishal Verma 3295212e11fSVishal Verma return ret_ent; 3305212e11fSVishal Verma } 3315212e11fSVishal Verma 3325212e11fSVishal Verma /* 3335212e11fSVishal Verma * This function commits a log entry to media 3345212e11fSVishal Verma * It does _not_ prepare the freelist entry for the next write 3355212e11fSVishal Verma * btt_flog_write is the wrapper for updating the freelist elements 3365212e11fSVishal Verma */ 3375212e11fSVishal Verma static int __btt_log_write(struct arena_info *arena, u32 lane, 3385212e11fSVishal Verma u32 sub, struct log_entry *ent) 3395212e11fSVishal Verma { 3405212e11fSVishal Verma int ret; 3415212e11fSVishal Verma /* 3425212e11fSVishal Verma * Ignore the padding in log_entry for calculating log_half. 3435212e11fSVishal Verma * The entry is 'committed' when we write the sequence number, 3445212e11fSVishal Verma * and we want to ensure that that is the last thing written. 3455212e11fSVishal Verma * We don't bother writing the padding as that would be extra 3465212e11fSVishal Verma * media wear and write amplification 3475212e11fSVishal Verma */ 3485212e11fSVishal Verma unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2; 3495212e11fSVishal Verma u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE); 3505212e11fSVishal Verma void *src = ent; 3515212e11fSVishal Verma 3525212e11fSVishal Verma /* split the 16B write into atomic, durable halves */ 3535212e11fSVishal Verma ret = arena_write_bytes(arena, ns_off, src, log_half); 3545212e11fSVishal Verma if (ret) 3555212e11fSVishal Verma return ret; 3565212e11fSVishal Verma 3575212e11fSVishal Verma ns_off += log_half; 3585212e11fSVishal Verma src += log_half; 3595212e11fSVishal Verma return arena_write_bytes(arena, ns_off, src, log_half); 3605212e11fSVishal Verma } 3615212e11fSVishal Verma 3625212e11fSVishal Verma static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, 3635212e11fSVishal Verma struct log_entry *ent) 3645212e11fSVishal Verma { 3655212e11fSVishal Verma int ret; 3665212e11fSVishal Verma 3675212e11fSVishal Verma ret = __btt_log_write(arena, lane, sub, ent); 3685212e11fSVishal Verma if (ret) 3695212e11fSVishal Verma return ret; 3705212e11fSVishal Verma 3715212e11fSVishal Verma /* prepare the next free entry */ 3725212e11fSVishal Verma arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; 3735212e11fSVishal Verma if (++(arena->freelist[lane].seq) == 4) 3745212e11fSVishal Verma arena->freelist[lane].seq = 1; 3755212e11fSVishal Verma arena->freelist[lane].block = le32_to_cpu(ent->old_map); 3765212e11fSVishal Verma 3775212e11fSVishal Verma return ret; 3785212e11fSVishal Verma } 3795212e11fSVishal Verma 3805212e11fSVishal Verma /* 3815212e11fSVishal Verma * This function initializes the BTT map to the initial state, which is 3825212e11fSVishal Verma * all-zeroes, and indicates an identity mapping 3835212e11fSVishal Verma */ 3845212e11fSVishal Verma static int btt_map_init(struct arena_info *arena) 3855212e11fSVishal Verma { 3865212e11fSVishal Verma int ret = -EINVAL; 3875212e11fSVishal Verma void *zerobuf; 3885212e11fSVishal Verma size_t offset = 0; 3895212e11fSVishal Verma size_t chunk_size = SZ_2M; 3905212e11fSVishal Verma size_t mapsize = arena->logoff - arena->mapoff; 3915212e11fSVishal Verma 3925212e11fSVishal Verma zerobuf = kzalloc(chunk_size, GFP_KERNEL); 3935212e11fSVishal Verma if (!zerobuf) 3945212e11fSVishal Verma return -ENOMEM; 3955212e11fSVishal Verma 3965212e11fSVishal Verma while (mapsize) { 3975212e11fSVishal Verma size_t size = min(mapsize, chunk_size); 3985212e11fSVishal Verma 3995212e11fSVishal Verma ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, 4005212e11fSVishal Verma size); 4015212e11fSVishal Verma if (ret) 4025212e11fSVishal Verma goto free; 4035212e11fSVishal Verma 4045212e11fSVishal Verma offset += size; 4055212e11fSVishal Verma mapsize -= size; 4065212e11fSVishal Verma cond_resched(); 4075212e11fSVishal Verma } 4085212e11fSVishal Verma 4095212e11fSVishal Verma free: 4105212e11fSVishal Verma kfree(zerobuf); 4115212e11fSVishal Verma return ret; 4125212e11fSVishal Verma } 4135212e11fSVishal Verma 4145212e11fSVishal Verma /* 4155212e11fSVishal Verma * This function initializes the BTT log with 'fake' entries pointing 4165212e11fSVishal Verma * to the initial reserved set of blocks as being free 4175212e11fSVishal Verma */ 4185212e11fSVishal Verma static int btt_log_init(struct arena_info *arena) 4195212e11fSVishal Verma { 4205212e11fSVishal Verma int ret; 4215212e11fSVishal Verma u32 i; 4225212e11fSVishal Verma struct log_entry log, zerolog; 4235212e11fSVishal Verma 4245212e11fSVishal Verma memset(&zerolog, 0, sizeof(zerolog)); 4255212e11fSVishal Verma 4265212e11fSVishal Verma for (i = 0; i < arena->nfree; i++) { 4275212e11fSVishal Verma log.lba = cpu_to_le32(i); 4285212e11fSVishal Verma log.old_map = cpu_to_le32(arena->external_nlba + i); 4295212e11fSVishal Verma log.new_map = cpu_to_le32(arena->external_nlba + i); 4305212e11fSVishal Verma log.seq = cpu_to_le32(LOG_SEQ_INIT); 4315212e11fSVishal Verma ret = __btt_log_write(arena, i, 0, &log); 4325212e11fSVishal Verma if (ret) 4335212e11fSVishal Verma return ret; 4345212e11fSVishal Verma ret = __btt_log_write(arena, i, 1, &zerolog); 4355212e11fSVishal Verma if (ret) 4365212e11fSVishal Verma return ret; 4375212e11fSVishal Verma } 4385212e11fSVishal Verma 4395212e11fSVishal Verma return 0; 4405212e11fSVishal Verma } 4415212e11fSVishal Verma 4425212e11fSVishal Verma static int btt_freelist_init(struct arena_info *arena) 4435212e11fSVishal Verma { 4445212e11fSVishal Verma int old, new, ret; 4455212e11fSVishal Verma u32 i, map_entry; 4465212e11fSVishal Verma struct log_entry log_new, log_old; 4475212e11fSVishal Verma 4485212e11fSVishal Verma arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), 4495212e11fSVishal Verma GFP_KERNEL); 4505212e11fSVishal Verma if (!arena->freelist) 4515212e11fSVishal Verma return -ENOMEM; 4525212e11fSVishal Verma 4535212e11fSVishal Verma for (i = 0; i < arena->nfree; i++) { 4545212e11fSVishal Verma old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT); 4555212e11fSVishal Verma if (old < 0) 4565212e11fSVishal Verma return old; 4575212e11fSVishal Verma 4585212e11fSVishal Verma new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); 4595212e11fSVishal Verma if (new < 0) 4605212e11fSVishal Verma return new; 4615212e11fSVishal Verma 4625212e11fSVishal Verma /* sub points to the next one to be overwritten */ 4635212e11fSVishal Verma arena->freelist[i].sub = 1 - new; 4645212e11fSVishal Verma arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); 4655212e11fSVishal Verma arena->freelist[i].block = le32_to_cpu(log_new.old_map); 4665212e11fSVishal Verma 4675212e11fSVishal Verma /* This implies a newly created or untouched flog entry */ 4685212e11fSVishal Verma if (log_new.old_map == log_new.new_map) 4695212e11fSVishal Verma continue; 4705212e11fSVishal Verma 4715212e11fSVishal Verma /* Check if map recovery is needed */ 4725212e11fSVishal Verma ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry, 4735212e11fSVishal Verma NULL, NULL); 4745212e11fSVishal Verma if (ret) 4755212e11fSVishal Verma return ret; 4765212e11fSVishal Verma if ((le32_to_cpu(log_new.new_map) != map_entry) && 4775212e11fSVishal Verma (le32_to_cpu(log_new.old_map) == map_entry)) { 4785212e11fSVishal Verma /* 4795212e11fSVishal Verma * Last transaction wrote the flog, but wasn't able 4805212e11fSVishal Verma * to complete the map write. So fix up the map. 4815212e11fSVishal Verma */ 4825212e11fSVishal Verma ret = btt_map_write(arena, le32_to_cpu(log_new.lba), 4835212e11fSVishal Verma le32_to_cpu(log_new.new_map), 0, 0); 4845212e11fSVishal Verma if (ret) 4855212e11fSVishal Verma return ret; 4865212e11fSVishal Verma } 4875212e11fSVishal Verma 4885212e11fSVishal Verma } 4895212e11fSVishal Verma 4905212e11fSVishal Verma return 0; 4915212e11fSVishal Verma } 4925212e11fSVishal Verma 4935212e11fSVishal Verma static int btt_rtt_init(struct arena_info *arena) 4945212e11fSVishal Verma { 4955212e11fSVishal Verma arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); 4965212e11fSVishal Verma if (arena->rtt == NULL) 4975212e11fSVishal Verma return -ENOMEM; 4985212e11fSVishal Verma 4995212e11fSVishal Verma return 0; 5005212e11fSVishal Verma } 5015212e11fSVishal Verma 5025212e11fSVishal Verma static int btt_maplocks_init(struct arena_info *arena) 5035212e11fSVishal Verma { 5045212e11fSVishal Verma u32 i; 5055212e11fSVishal Verma 5065212e11fSVishal Verma arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock), 5075212e11fSVishal Verma GFP_KERNEL); 5085212e11fSVishal Verma if (!arena->map_locks) 5095212e11fSVishal Verma return -ENOMEM; 5105212e11fSVishal Verma 5115212e11fSVishal Verma for (i = 0; i < arena->nfree; i++) 5125212e11fSVishal Verma spin_lock_init(&arena->map_locks[i].lock); 5135212e11fSVishal Verma 5145212e11fSVishal Verma return 0; 5155212e11fSVishal Verma } 5165212e11fSVishal Verma 5175212e11fSVishal Verma static struct arena_info *alloc_arena(struct btt *btt, size_t size, 5185212e11fSVishal Verma size_t start, size_t arena_off) 5195212e11fSVishal Verma { 5205212e11fSVishal Verma struct arena_info *arena; 5215212e11fSVishal Verma u64 logsize, mapsize, datasize; 5225212e11fSVishal Verma u64 available = size; 5235212e11fSVishal Verma 5245212e11fSVishal Verma arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL); 5255212e11fSVishal Verma if (!arena) 5265212e11fSVishal Verma return NULL; 5275212e11fSVishal Verma arena->nd_btt = btt->nd_btt; 5285212e11fSVishal Verma 5295212e11fSVishal Verma if (!size) 5305212e11fSVishal Verma return arena; 5315212e11fSVishal Verma 5325212e11fSVishal Verma arena->size = size; 5335212e11fSVishal Verma arena->external_lba_start = start; 5345212e11fSVishal Verma arena->external_lbasize = btt->lbasize; 5355212e11fSVishal Verma arena->internal_lbasize = roundup(arena->external_lbasize, 5365212e11fSVishal Verma INT_LBASIZE_ALIGNMENT); 5375212e11fSVishal Verma arena->nfree = BTT_DEFAULT_NFREE; 5385212e11fSVishal Verma arena->version_major = 1; 5395212e11fSVishal Verma arena->version_minor = 1; 5405212e11fSVishal Verma 5415212e11fSVishal Verma if (available % BTT_PG_SIZE) 5425212e11fSVishal Verma available -= (available % BTT_PG_SIZE); 5435212e11fSVishal Verma 5445212e11fSVishal Verma /* Two pages are reserved for the super block and its copy */ 5455212e11fSVishal Verma available -= 2 * BTT_PG_SIZE; 5465212e11fSVishal Verma 5475212e11fSVishal Verma /* The log takes a fixed amount of space based on nfree */ 5485212e11fSVishal Verma logsize = roundup(2 * arena->nfree * sizeof(struct log_entry), 5495212e11fSVishal Verma BTT_PG_SIZE); 5505212e11fSVishal Verma available -= logsize; 5515212e11fSVishal Verma 5525212e11fSVishal Verma /* Calculate optimal split between map and data area */ 5535212e11fSVishal Verma arena->internal_nlba = div_u64(available - BTT_PG_SIZE, 5545212e11fSVishal Verma arena->internal_lbasize + MAP_ENT_SIZE); 5555212e11fSVishal Verma arena->external_nlba = arena->internal_nlba - arena->nfree; 5565212e11fSVishal Verma 5575212e11fSVishal Verma mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE); 5585212e11fSVishal Verma datasize = available - mapsize; 5595212e11fSVishal Verma 5605212e11fSVishal Verma /* 'Absolute' values, relative to start of storage space */ 5615212e11fSVishal Verma arena->infooff = arena_off; 5625212e11fSVishal Verma arena->dataoff = arena->infooff + BTT_PG_SIZE; 5635212e11fSVishal Verma arena->mapoff = arena->dataoff + datasize; 5645212e11fSVishal Verma arena->logoff = arena->mapoff + mapsize; 5655212e11fSVishal Verma arena->info2off = arena->logoff + logsize; 5665212e11fSVishal Verma return arena; 5675212e11fSVishal Verma } 5685212e11fSVishal Verma 5695212e11fSVishal Verma static void free_arenas(struct btt *btt) 5705212e11fSVishal Verma { 5715212e11fSVishal Verma struct arena_info *arena, *next; 5725212e11fSVishal Verma 5735212e11fSVishal Verma list_for_each_entry_safe(arena, next, &btt->arena_list, list) { 5745212e11fSVishal Verma list_del(&arena->list); 5755212e11fSVishal Verma kfree(arena->rtt); 5765212e11fSVishal Verma kfree(arena->map_locks); 5775212e11fSVishal Verma kfree(arena->freelist); 5785212e11fSVishal Verma debugfs_remove_recursive(arena->debugfs_dir); 5795212e11fSVishal Verma kfree(arena); 5805212e11fSVishal Verma } 5815212e11fSVishal Verma } 5825212e11fSVishal Verma 5835212e11fSVishal Verma /* 5845212e11fSVishal Verma * This function reads an existing valid btt superblock and 5855212e11fSVishal Verma * populates the corresponding arena_info struct 5865212e11fSVishal Verma */ 5875212e11fSVishal Verma static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super, 5885212e11fSVishal Verma u64 arena_off) 5895212e11fSVishal Verma { 5905212e11fSVishal Verma arena->internal_nlba = le32_to_cpu(super->internal_nlba); 5915212e11fSVishal Verma arena->internal_lbasize = le32_to_cpu(super->internal_lbasize); 5925212e11fSVishal Verma arena->external_nlba = le32_to_cpu(super->external_nlba); 5935212e11fSVishal Verma arena->external_lbasize = le32_to_cpu(super->external_lbasize); 5945212e11fSVishal Verma arena->nfree = le32_to_cpu(super->nfree); 5955212e11fSVishal Verma arena->version_major = le16_to_cpu(super->version_major); 5965212e11fSVishal Verma arena->version_minor = le16_to_cpu(super->version_minor); 5975212e11fSVishal Verma 5985212e11fSVishal Verma arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off + 5995212e11fSVishal Verma le64_to_cpu(super->nextoff)); 6005212e11fSVishal Verma arena->infooff = arena_off; 6015212e11fSVishal Verma arena->dataoff = arena_off + le64_to_cpu(super->dataoff); 6025212e11fSVishal Verma arena->mapoff = arena_off + le64_to_cpu(super->mapoff); 6035212e11fSVishal Verma arena->logoff = arena_off + le64_to_cpu(super->logoff); 6045212e11fSVishal Verma arena->info2off = arena_off + le64_to_cpu(super->info2off); 6055212e11fSVishal Verma 6065e329406SDan Williams arena->size = (le64_to_cpu(super->nextoff) > 0) 6075e329406SDan Williams ? (le64_to_cpu(super->nextoff)) 6085e329406SDan Williams : (arena->info2off - arena->infooff + BTT_PG_SIZE); 6095212e11fSVishal Verma 6105212e11fSVishal Verma arena->flags = le32_to_cpu(super->flags); 6115212e11fSVishal Verma } 6125212e11fSVishal Verma 6135212e11fSVishal Verma static int discover_arenas(struct btt *btt) 6145212e11fSVishal Verma { 6155212e11fSVishal Verma int ret = 0; 6165212e11fSVishal Verma struct arena_info *arena; 6175212e11fSVishal Verma struct btt_sb *super; 6185212e11fSVishal Verma size_t remaining = btt->rawsize; 6195212e11fSVishal Verma u64 cur_nlba = 0; 6205212e11fSVishal Verma size_t cur_off = 0; 6215212e11fSVishal Verma int num_arenas = 0; 6225212e11fSVishal Verma 6235212e11fSVishal Verma super = kzalloc(sizeof(*super), GFP_KERNEL); 6245212e11fSVishal Verma if (!super) 6255212e11fSVishal Verma return -ENOMEM; 6265212e11fSVishal Verma 6275212e11fSVishal Verma while (remaining) { 6285212e11fSVishal Verma /* Alloc memory for arena */ 6295212e11fSVishal Verma arena = alloc_arena(btt, 0, 0, 0); 6305212e11fSVishal Verma if (!arena) { 6315212e11fSVishal Verma ret = -ENOMEM; 6325212e11fSVishal Verma goto out_super; 6335212e11fSVishal Verma } 6345212e11fSVishal Verma 6355212e11fSVishal Verma arena->infooff = cur_off; 6365212e11fSVishal Verma ret = btt_info_read(arena, super); 6375212e11fSVishal Verma if (ret) 6385212e11fSVishal Verma goto out; 6395212e11fSVishal Verma 640ab45e763SVishal Verma if (!nd_btt_arena_is_valid(btt->nd_btt, super)) { 6415212e11fSVishal Verma if (remaining == btt->rawsize) { 6425212e11fSVishal Verma btt->init_state = INIT_NOTFOUND; 6435212e11fSVishal Verma dev_info(to_dev(arena), "No existing arenas\n"); 6445212e11fSVishal Verma goto out; 6455212e11fSVishal Verma } else { 6465212e11fSVishal Verma dev_info(to_dev(arena), 6475212e11fSVishal Verma "Found corrupted metadata!\n"); 6485212e11fSVishal Verma ret = -ENODEV; 6495212e11fSVishal Verma goto out; 6505212e11fSVishal Verma } 6515212e11fSVishal Verma } 6525212e11fSVishal Verma 6535212e11fSVishal Verma arena->external_lba_start = cur_nlba; 6545212e11fSVishal Verma parse_arena_meta(arena, super, cur_off); 6555212e11fSVishal Verma 6565212e11fSVishal Verma ret = btt_freelist_init(arena); 6575212e11fSVishal Verma if (ret) 6585212e11fSVishal Verma goto out; 6595212e11fSVishal Verma 6605212e11fSVishal Verma ret = btt_rtt_init(arena); 6615212e11fSVishal Verma if (ret) 6625212e11fSVishal Verma goto out; 6635212e11fSVishal Verma 6645212e11fSVishal Verma ret = btt_maplocks_init(arena); 6655212e11fSVishal Verma if (ret) 6665212e11fSVishal Verma goto out; 6675212e11fSVishal Verma 6685212e11fSVishal Verma list_add_tail(&arena->list, &btt->arena_list); 6695212e11fSVishal Verma 6705212e11fSVishal Verma remaining -= arena->size; 6715212e11fSVishal Verma cur_off += arena->size; 6725212e11fSVishal Verma cur_nlba += arena->external_nlba; 6735212e11fSVishal Verma num_arenas++; 6745212e11fSVishal Verma 6755212e11fSVishal Verma if (arena->nextoff == 0) 6765212e11fSVishal Verma break; 6775212e11fSVishal Verma } 6785212e11fSVishal Verma btt->num_arenas = num_arenas; 6795212e11fSVishal Verma btt->nlba = cur_nlba; 6805212e11fSVishal Verma btt->init_state = INIT_READY; 6815212e11fSVishal Verma 6825212e11fSVishal Verma kfree(super); 6835212e11fSVishal Verma return ret; 6845212e11fSVishal Verma 6855212e11fSVishal Verma out: 6865212e11fSVishal Verma kfree(arena); 6875212e11fSVishal Verma free_arenas(btt); 6885212e11fSVishal Verma out_super: 6895212e11fSVishal Verma kfree(super); 6905212e11fSVishal Verma return ret; 6915212e11fSVishal Verma } 6925212e11fSVishal Verma 6935212e11fSVishal Verma static int create_arenas(struct btt *btt) 6945212e11fSVishal Verma { 6955212e11fSVishal Verma size_t remaining = btt->rawsize; 6965212e11fSVishal Verma size_t cur_off = 0; 6975212e11fSVishal Verma 6985212e11fSVishal Verma while (remaining) { 6995212e11fSVishal Verma struct arena_info *arena; 7005212e11fSVishal Verma size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining); 7015212e11fSVishal Verma 7025212e11fSVishal Verma remaining -= arena_size; 7035212e11fSVishal Verma if (arena_size < ARENA_MIN_SIZE) 7045212e11fSVishal Verma break; 7055212e11fSVishal Verma 7065212e11fSVishal Verma arena = alloc_arena(btt, arena_size, btt->nlba, cur_off); 7075212e11fSVishal Verma if (!arena) { 7085212e11fSVishal Verma free_arenas(btt); 7095212e11fSVishal Verma return -ENOMEM; 7105212e11fSVishal Verma } 7115212e11fSVishal Verma btt->nlba += arena->external_nlba; 7125212e11fSVishal Verma if (remaining >= ARENA_MIN_SIZE) 7135212e11fSVishal Verma arena->nextoff = arena->size; 7145212e11fSVishal Verma else 7155212e11fSVishal Verma arena->nextoff = 0; 7165212e11fSVishal Verma cur_off += arena_size; 7175212e11fSVishal Verma list_add_tail(&arena->list, &btt->arena_list); 7185212e11fSVishal Verma } 7195212e11fSVishal Verma 7205212e11fSVishal Verma return 0; 7215212e11fSVishal Verma } 7225212e11fSVishal Verma 7235212e11fSVishal Verma /* 7245212e11fSVishal Verma * This function completes arena initialization by writing 7255212e11fSVishal Verma * all the metadata. 7265212e11fSVishal Verma * It is only called for an uninitialized arena when a write 7275212e11fSVishal Verma * to that arena occurs for the first time. 7285212e11fSVishal Verma */ 729fbde1414SVishal Verma static int btt_arena_write_layout(struct arena_info *arena) 7305212e11fSVishal Verma { 7315212e11fSVishal Verma int ret; 732e1455744SDan Williams u64 sum; 7335212e11fSVishal Verma struct btt_sb *super; 734fbde1414SVishal Verma struct nd_btt *nd_btt = arena->nd_btt; 7356ec68954SVishal Verma const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev); 7365212e11fSVishal Verma 7375212e11fSVishal Verma ret = btt_map_init(arena); 7385212e11fSVishal Verma if (ret) 7395212e11fSVishal Verma return ret; 7405212e11fSVishal Verma 7415212e11fSVishal Verma ret = btt_log_init(arena); 7425212e11fSVishal Verma if (ret) 7435212e11fSVishal Verma return ret; 7445212e11fSVishal Verma 7455212e11fSVishal Verma super = kzalloc(sizeof(struct btt_sb), GFP_NOIO); 7465212e11fSVishal Verma if (!super) 7475212e11fSVishal Verma return -ENOMEM; 7485212e11fSVishal Verma 7495212e11fSVishal Verma strncpy(super->signature, BTT_SIG, BTT_SIG_LEN); 750fbde1414SVishal Verma memcpy(super->uuid, nd_btt->uuid, 16); 7516ec68954SVishal Verma memcpy(super->parent_uuid, parent_uuid, 16); 7525212e11fSVishal Verma super->flags = cpu_to_le32(arena->flags); 7535212e11fSVishal Verma super->version_major = cpu_to_le16(arena->version_major); 7545212e11fSVishal Verma super->version_minor = cpu_to_le16(arena->version_minor); 7555212e11fSVishal Verma super->external_lbasize = cpu_to_le32(arena->external_lbasize); 7565212e11fSVishal Verma super->external_nlba = cpu_to_le32(arena->external_nlba); 7575212e11fSVishal Verma super->internal_lbasize = cpu_to_le32(arena->internal_lbasize); 7585212e11fSVishal Verma super->internal_nlba = cpu_to_le32(arena->internal_nlba); 7595212e11fSVishal Verma super->nfree = cpu_to_le32(arena->nfree); 7605212e11fSVishal Verma super->infosize = cpu_to_le32(sizeof(struct btt_sb)); 7615212e11fSVishal Verma super->nextoff = cpu_to_le64(arena->nextoff); 7625212e11fSVishal Verma /* 7635212e11fSVishal Verma * Subtract arena->infooff (arena start) so numbers are relative 7645212e11fSVishal Verma * to 'this' arena 7655212e11fSVishal Verma */ 7665212e11fSVishal Verma super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff); 7675212e11fSVishal Verma super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff); 7685212e11fSVishal Verma super->logoff = cpu_to_le64(arena->logoff - arena->infooff); 7695212e11fSVishal Verma super->info2off = cpu_to_le64(arena->info2off - arena->infooff); 7705212e11fSVishal Verma 7715212e11fSVishal Verma super->flags = 0; 772e1455744SDan Williams sum = nd_sb_checksum((struct nd_gen_sb *) super); 773e1455744SDan Williams super->checksum = cpu_to_le64(sum); 7745212e11fSVishal Verma 7755212e11fSVishal Verma ret = btt_info_write(arena, super); 7765212e11fSVishal Verma 7775212e11fSVishal Verma kfree(super); 7785212e11fSVishal Verma return ret; 7795212e11fSVishal Verma } 7805212e11fSVishal Verma 7815212e11fSVishal Verma /* 7825212e11fSVishal Verma * This function completes the initialization for the BTT namespace 7835212e11fSVishal Verma * such that it is ready to accept IOs 7845212e11fSVishal Verma */ 7855212e11fSVishal Verma static int btt_meta_init(struct btt *btt) 7865212e11fSVishal Verma { 7875212e11fSVishal Verma int ret = 0; 7885212e11fSVishal Verma struct arena_info *arena; 7895212e11fSVishal Verma 7905212e11fSVishal Verma mutex_lock(&btt->init_lock); 7915212e11fSVishal Verma list_for_each_entry(arena, &btt->arena_list, list) { 792fbde1414SVishal Verma ret = btt_arena_write_layout(arena); 7935212e11fSVishal Verma if (ret) 7945212e11fSVishal Verma goto unlock; 7955212e11fSVishal Verma 7965212e11fSVishal Verma ret = btt_freelist_init(arena); 7975212e11fSVishal Verma if (ret) 7985212e11fSVishal Verma goto unlock; 7995212e11fSVishal Verma 8005212e11fSVishal Verma ret = btt_rtt_init(arena); 8015212e11fSVishal Verma if (ret) 8025212e11fSVishal Verma goto unlock; 8035212e11fSVishal Verma 8045212e11fSVishal Verma ret = btt_maplocks_init(arena); 8055212e11fSVishal Verma if (ret) 8065212e11fSVishal Verma goto unlock; 8075212e11fSVishal Verma } 8085212e11fSVishal Verma 8095212e11fSVishal Verma btt->init_state = INIT_READY; 8105212e11fSVishal Verma 8115212e11fSVishal Verma unlock: 8125212e11fSVishal Verma mutex_unlock(&btt->init_lock); 8135212e11fSVishal Verma return ret; 8145212e11fSVishal Verma } 8155212e11fSVishal Verma 81641cd8b70SVishal Verma static u32 btt_meta_size(struct btt *btt) 81741cd8b70SVishal Verma { 81841cd8b70SVishal Verma return btt->lbasize - btt->sector_size; 81941cd8b70SVishal Verma } 82041cd8b70SVishal Verma 8215212e11fSVishal Verma /* 8225212e11fSVishal Verma * This function calculates the arena in which the given LBA lies 8235212e11fSVishal Verma * by doing a linear walk. This is acceptable since we expect only 8245212e11fSVishal Verma * a few arenas. If we have backing devices that get much larger, 8255212e11fSVishal Verma * we can construct a balanced binary tree of arenas at init time 8265212e11fSVishal Verma * so that this range search becomes faster. 8275212e11fSVishal Verma */ 8285212e11fSVishal Verma static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap, 8295212e11fSVishal Verma struct arena_info **arena) 8305212e11fSVishal Verma { 8315212e11fSVishal Verma struct arena_info *arena_list; 8325212e11fSVishal Verma __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size); 8335212e11fSVishal Verma 8345212e11fSVishal Verma list_for_each_entry(arena_list, &btt->arena_list, list) { 8355212e11fSVishal Verma if (lba < arena_list->external_nlba) { 8365212e11fSVishal Verma *arena = arena_list; 8375212e11fSVishal Verma *premap = lba; 8385212e11fSVishal Verma return 0; 8395212e11fSVishal Verma } 8405212e11fSVishal Verma lba -= arena_list->external_nlba; 8415212e11fSVishal Verma } 8425212e11fSVishal Verma 8435212e11fSVishal Verma return -EIO; 8445212e11fSVishal Verma } 8455212e11fSVishal Verma 8465212e11fSVishal Verma /* 8475212e11fSVishal Verma * The following (lock_map, unlock_map) are mostly just to improve 8485212e11fSVishal Verma * readability, since they index into an array of locks 8495212e11fSVishal Verma */ 8505212e11fSVishal Verma static void lock_map(struct arena_info *arena, u32 premap) 8515212e11fSVishal Verma __acquires(&arena->map_locks[idx].lock) 8525212e11fSVishal Verma { 8535212e11fSVishal Verma u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; 8545212e11fSVishal Verma 8555212e11fSVishal Verma spin_lock(&arena->map_locks[idx].lock); 8565212e11fSVishal Verma } 8575212e11fSVishal Verma 8585212e11fSVishal Verma static void unlock_map(struct arena_info *arena, u32 premap) 8595212e11fSVishal Verma __releases(&arena->map_locks[idx].lock) 8605212e11fSVishal Verma { 8615212e11fSVishal Verma u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; 8625212e11fSVishal Verma 8635212e11fSVishal Verma spin_unlock(&arena->map_locks[idx].lock); 8645212e11fSVishal Verma } 8655212e11fSVishal Verma 8665212e11fSVishal Verma static u64 to_namespace_offset(struct arena_info *arena, u64 lba) 8675212e11fSVishal Verma { 8685212e11fSVishal Verma return arena->dataoff + ((u64)lba * arena->internal_lbasize); 8695212e11fSVishal Verma } 8705212e11fSVishal Verma 8715212e11fSVishal Verma static int btt_data_read(struct arena_info *arena, struct page *page, 8725212e11fSVishal Verma unsigned int off, u32 lba, u32 len) 8735212e11fSVishal Verma { 8745212e11fSVishal Verma int ret; 8755212e11fSVishal Verma u64 nsoff = to_namespace_offset(arena, lba); 8765212e11fSVishal Verma void *mem = kmap_atomic(page); 8775212e11fSVishal Verma 8785212e11fSVishal Verma ret = arena_read_bytes(arena, nsoff, mem + off, len); 8795212e11fSVishal Verma kunmap_atomic(mem); 8805212e11fSVishal Verma 8815212e11fSVishal Verma return ret; 8825212e11fSVishal Verma } 8835212e11fSVishal Verma 8845212e11fSVishal Verma static int btt_data_write(struct arena_info *arena, u32 lba, 8855212e11fSVishal Verma struct page *page, unsigned int off, u32 len) 8865212e11fSVishal Verma { 8875212e11fSVishal Verma int ret; 8885212e11fSVishal Verma u64 nsoff = to_namespace_offset(arena, lba); 8895212e11fSVishal Verma void *mem = kmap_atomic(page); 8905212e11fSVishal Verma 8915212e11fSVishal Verma ret = arena_write_bytes(arena, nsoff, mem + off, len); 8925212e11fSVishal Verma kunmap_atomic(mem); 8935212e11fSVishal Verma 8945212e11fSVishal Verma return ret; 8955212e11fSVishal Verma } 8965212e11fSVishal Verma 8975212e11fSVishal Verma static void zero_fill_data(struct page *page, unsigned int off, u32 len) 8985212e11fSVishal Verma { 8995212e11fSVishal Verma void *mem = kmap_atomic(page); 9005212e11fSVishal Verma 9015212e11fSVishal Verma memset(mem + off, 0, len); 9025212e11fSVishal Verma kunmap_atomic(mem); 9035212e11fSVishal Verma } 9045212e11fSVishal Verma 90541cd8b70SVishal Verma #ifdef CONFIG_BLK_DEV_INTEGRITY 90641cd8b70SVishal Verma static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, 90741cd8b70SVishal Verma struct arena_info *arena, u32 postmap, int rw) 90841cd8b70SVishal Verma { 90941cd8b70SVishal Verma unsigned int len = btt_meta_size(btt); 91041cd8b70SVishal Verma u64 meta_nsoff; 91141cd8b70SVishal Verma int ret = 0; 91241cd8b70SVishal Verma 91341cd8b70SVishal Verma if (bip == NULL) 91441cd8b70SVishal Verma return 0; 91541cd8b70SVishal Verma 91641cd8b70SVishal Verma meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size; 91741cd8b70SVishal Verma 91841cd8b70SVishal Verma while (len) { 91941cd8b70SVishal Verma unsigned int cur_len; 92041cd8b70SVishal Verma struct bio_vec bv; 92141cd8b70SVishal Verma void *mem; 92241cd8b70SVishal Verma 92341cd8b70SVishal Verma bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); 92441cd8b70SVishal Verma /* 92541cd8b70SVishal Verma * The 'bv' obtained from bvec_iter_bvec has its .bv_len and 92641cd8b70SVishal Verma * .bv_offset already adjusted for iter->bi_bvec_done, and we 92741cd8b70SVishal Verma * can use those directly 92841cd8b70SVishal Verma */ 92941cd8b70SVishal Verma 93041cd8b70SVishal Verma cur_len = min(len, bv.bv_len); 93141cd8b70SVishal Verma mem = kmap_atomic(bv.bv_page); 93241cd8b70SVishal Verma if (rw) 93341cd8b70SVishal Verma ret = arena_write_bytes(arena, meta_nsoff, 93441cd8b70SVishal Verma mem + bv.bv_offset, cur_len); 93541cd8b70SVishal Verma else 93641cd8b70SVishal Verma ret = arena_read_bytes(arena, meta_nsoff, 93741cd8b70SVishal Verma mem + bv.bv_offset, cur_len); 93841cd8b70SVishal Verma 93941cd8b70SVishal Verma kunmap_atomic(mem); 94041cd8b70SVishal Verma if (ret) 94141cd8b70SVishal Verma return ret; 94241cd8b70SVishal Verma 94341cd8b70SVishal Verma len -= cur_len; 94441cd8b70SVishal Verma meta_nsoff += cur_len; 94541cd8b70SVishal Verma bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len); 94641cd8b70SVishal Verma } 94741cd8b70SVishal Verma 94841cd8b70SVishal Verma return ret; 94941cd8b70SVishal Verma } 95041cd8b70SVishal Verma 95141cd8b70SVishal Verma #else /* CONFIG_BLK_DEV_INTEGRITY */ 95241cd8b70SVishal Verma static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, 95341cd8b70SVishal Verma struct arena_info *arena, u32 postmap, int rw) 95441cd8b70SVishal Verma { 95541cd8b70SVishal Verma return 0; 95641cd8b70SVishal Verma } 95741cd8b70SVishal Verma #endif 95841cd8b70SVishal Verma 95941cd8b70SVishal Verma static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, 96041cd8b70SVishal Verma struct page *page, unsigned int off, sector_t sector, 96141cd8b70SVishal Verma unsigned int len) 9625212e11fSVishal Verma { 9635212e11fSVishal Verma int ret = 0; 9645212e11fSVishal Verma int t_flag, e_flag; 9655212e11fSVishal Verma struct arena_info *arena = NULL; 9665212e11fSVishal Verma u32 lane = 0, premap, postmap; 9675212e11fSVishal Verma 9685212e11fSVishal Verma while (len) { 9695212e11fSVishal Verma u32 cur_len; 9705212e11fSVishal Verma 9715212e11fSVishal Verma lane = nd_region_acquire_lane(btt->nd_region); 9725212e11fSVishal Verma 9735212e11fSVishal Verma ret = lba_to_arena(btt, sector, &premap, &arena); 9745212e11fSVishal Verma if (ret) 9755212e11fSVishal Verma goto out_lane; 9765212e11fSVishal Verma 9775212e11fSVishal Verma cur_len = min(btt->sector_size, len); 9785212e11fSVishal Verma 9795212e11fSVishal Verma ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag); 9805212e11fSVishal Verma if (ret) 9815212e11fSVishal Verma goto out_lane; 9825212e11fSVishal Verma 9835212e11fSVishal Verma /* 9845212e11fSVishal Verma * We loop to make sure that the post map LBA didn't change 9855212e11fSVishal Verma * from under us between writing the RTT and doing the actual 9865212e11fSVishal Verma * read. 9875212e11fSVishal Verma */ 9885212e11fSVishal Verma while (1) { 9895212e11fSVishal Verma u32 new_map; 9905212e11fSVishal Verma 9915212e11fSVishal Verma if (t_flag) { 9925212e11fSVishal Verma zero_fill_data(page, off, cur_len); 9935212e11fSVishal Verma goto out_lane; 9945212e11fSVishal Verma } 9955212e11fSVishal Verma 9965212e11fSVishal Verma if (e_flag) { 9975212e11fSVishal Verma ret = -EIO; 9985212e11fSVishal Verma goto out_lane; 9995212e11fSVishal Verma } 10005212e11fSVishal Verma 10015212e11fSVishal Verma arena->rtt[lane] = RTT_VALID | postmap; 10025212e11fSVishal Verma /* 10035212e11fSVishal Verma * Barrier to make sure this write is not reordered 10045212e11fSVishal Verma * to do the verification map_read before the RTT store 10055212e11fSVishal Verma */ 10065212e11fSVishal Verma barrier(); 10075212e11fSVishal Verma 10085212e11fSVishal Verma ret = btt_map_read(arena, premap, &new_map, &t_flag, 10095212e11fSVishal Verma &e_flag); 10105212e11fSVishal Verma if (ret) 10115212e11fSVishal Verma goto out_rtt; 10125212e11fSVishal Verma 10135212e11fSVishal Verma if (postmap == new_map) 10145212e11fSVishal Verma break; 10155212e11fSVishal Verma 10165212e11fSVishal Verma postmap = new_map; 10175212e11fSVishal Verma } 10185212e11fSVishal Verma 10195212e11fSVishal Verma ret = btt_data_read(arena, page, off, postmap, cur_len); 10205212e11fSVishal Verma if (ret) 10215212e11fSVishal Verma goto out_rtt; 10225212e11fSVishal Verma 102341cd8b70SVishal Verma if (bip) { 102441cd8b70SVishal Verma ret = btt_rw_integrity(btt, bip, arena, postmap, READ); 102541cd8b70SVishal Verma if (ret) 102641cd8b70SVishal Verma goto out_rtt; 102741cd8b70SVishal Verma } 102841cd8b70SVishal Verma 10295212e11fSVishal Verma arena->rtt[lane] = RTT_INVALID; 10305212e11fSVishal Verma nd_region_release_lane(btt->nd_region, lane); 10315212e11fSVishal Verma 10325212e11fSVishal Verma len -= cur_len; 10335212e11fSVishal Verma off += cur_len; 10345212e11fSVishal Verma sector += btt->sector_size >> SECTOR_SHIFT; 10355212e11fSVishal Verma } 10365212e11fSVishal Verma 10375212e11fSVishal Verma return 0; 10385212e11fSVishal Verma 10395212e11fSVishal Verma out_rtt: 10405212e11fSVishal Verma arena->rtt[lane] = RTT_INVALID; 10415212e11fSVishal Verma out_lane: 10425212e11fSVishal Verma nd_region_release_lane(btt->nd_region, lane); 10435212e11fSVishal Verma return ret; 10445212e11fSVishal Verma } 10455212e11fSVishal Verma 104641cd8b70SVishal Verma static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, 104741cd8b70SVishal Verma sector_t sector, struct page *page, unsigned int off, 104841cd8b70SVishal Verma unsigned int len) 10495212e11fSVishal Verma { 10505212e11fSVishal Verma int ret = 0; 10515212e11fSVishal Verma struct arena_info *arena = NULL; 10525212e11fSVishal Verma u32 premap = 0, old_postmap, new_postmap, lane = 0, i; 10535212e11fSVishal Verma struct log_entry log; 10545212e11fSVishal Verma int sub; 10555212e11fSVishal Verma 10565212e11fSVishal Verma while (len) { 10575212e11fSVishal Verma u32 cur_len; 10585212e11fSVishal Verma 10595212e11fSVishal Verma lane = nd_region_acquire_lane(btt->nd_region); 10605212e11fSVishal Verma 10615212e11fSVishal Verma ret = lba_to_arena(btt, sector, &premap, &arena); 10625212e11fSVishal Verma if (ret) 10635212e11fSVishal Verma goto out_lane; 10645212e11fSVishal Verma cur_len = min(btt->sector_size, len); 10655212e11fSVishal Verma 10665212e11fSVishal Verma if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) { 10675212e11fSVishal Verma ret = -EIO; 10685212e11fSVishal Verma goto out_lane; 10695212e11fSVishal Verma } 10705212e11fSVishal Verma 10715212e11fSVishal Verma new_postmap = arena->freelist[lane].block; 10725212e11fSVishal Verma 10735212e11fSVishal Verma /* Wait if the new block is being read from */ 10745212e11fSVishal Verma for (i = 0; i < arena->nfree; i++) 10755212e11fSVishal Verma while (arena->rtt[i] == (RTT_VALID | new_postmap)) 10765212e11fSVishal Verma cpu_relax(); 10775212e11fSVishal Verma 10785212e11fSVishal Verma 10795212e11fSVishal Verma if (new_postmap >= arena->internal_nlba) { 10805212e11fSVishal Verma ret = -EIO; 10815212e11fSVishal Verma goto out_lane; 108241cd8b70SVishal Verma } 108341cd8b70SVishal Verma 108441cd8b70SVishal Verma ret = btt_data_write(arena, new_postmap, page, off, cur_len); 10855212e11fSVishal Verma if (ret) 10865212e11fSVishal Verma goto out_lane; 10875212e11fSVishal Verma 108841cd8b70SVishal Verma if (bip) { 108941cd8b70SVishal Verma ret = btt_rw_integrity(btt, bip, arena, new_postmap, 109041cd8b70SVishal Verma WRITE); 109141cd8b70SVishal Verma if (ret) 109241cd8b70SVishal Verma goto out_lane; 109341cd8b70SVishal Verma } 109441cd8b70SVishal Verma 10955212e11fSVishal Verma lock_map(arena, premap); 10965212e11fSVishal Verma ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL); 10975212e11fSVishal Verma if (ret) 10985212e11fSVishal Verma goto out_map; 10995212e11fSVishal Verma if (old_postmap >= arena->internal_nlba) { 11005212e11fSVishal Verma ret = -EIO; 11015212e11fSVishal Verma goto out_map; 11025212e11fSVishal Verma } 11035212e11fSVishal Verma 11045212e11fSVishal Verma log.lba = cpu_to_le32(premap); 11055212e11fSVishal Verma log.old_map = cpu_to_le32(old_postmap); 11065212e11fSVishal Verma log.new_map = cpu_to_le32(new_postmap); 11075212e11fSVishal Verma log.seq = cpu_to_le32(arena->freelist[lane].seq); 11085212e11fSVishal Verma sub = arena->freelist[lane].sub; 11095212e11fSVishal Verma ret = btt_flog_write(arena, lane, sub, &log); 11105212e11fSVishal Verma if (ret) 11115212e11fSVishal Verma goto out_map; 11125212e11fSVishal Verma 11135212e11fSVishal Verma ret = btt_map_write(arena, premap, new_postmap, 0, 0); 11145212e11fSVishal Verma if (ret) 11155212e11fSVishal Verma goto out_map; 11165212e11fSVishal Verma 11175212e11fSVishal Verma unlock_map(arena, premap); 11185212e11fSVishal Verma nd_region_release_lane(btt->nd_region, lane); 11195212e11fSVishal Verma 11205212e11fSVishal Verma len -= cur_len; 11215212e11fSVishal Verma off += cur_len; 11225212e11fSVishal Verma sector += btt->sector_size >> SECTOR_SHIFT; 11235212e11fSVishal Verma } 11245212e11fSVishal Verma 11255212e11fSVishal Verma return 0; 11265212e11fSVishal Verma 11275212e11fSVishal Verma out_map: 11285212e11fSVishal Verma unlock_map(arena, premap); 11295212e11fSVishal Verma out_lane: 11305212e11fSVishal Verma nd_region_release_lane(btt->nd_region, lane); 11315212e11fSVishal Verma return ret; 11325212e11fSVishal Verma } 11335212e11fSVishal Verma 113441cd8b70SVishal Verma static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, 113541cd8b70SVishal Verma struct page *page, unsigned int len, unsigned int off, 1136*c11f0c0bSJens Axboe bool is_write, sector_t sector) 11375212e11fSVishal Verma { 11385212e11fSVishal Verma int ret; 11395212e11fSVishal Verma 1140*c11f0c0bSJens Axboe if (!is_write) { 114141cd8b70SVishal Verma ret = btt_read_pg(btt, bip, page, off, sector, len); 11425212e11fSVishal Verma flush_dcache_page(page); 11435212e11fSVishal Verma } else { 11445212e11fSVishal Verma flush_dcache_page(page); 114541cd8b70SVishal Verma ret = btt_write_pg(btt, bip, sector, page, off, len); 11465212e11fSVishal Verma } 11475212e11fSVishal Verma 11485212e11fSVishal Verma return ret; 11495212e11fSVishal Verma } 11505212e11fSVishal Verma 1151dece1635SJens Axboe static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) 11525212e11fSVishal Verma { 115341cd8b70SVishal Verma struct bio_integrity_payload *bip = bio_integrity(bio); 11545212e11fSVishal Verma struct btt *btt = q->queuedata; 11555212e11fSVishal Verma struct bvec_iter iter; 1156f0dc089cSDan Williams unsigned long start; 11575212e11fSVishal Verma struct bio_vec bvec; 1158abf54548SMike Christie int err = 0; 1159f0dc089cSDan Williams bool do_acct; 11605212e11fSVishal Verma 116141cd8b70SVishal Verma /* 116241cd8b70SVishal Verma * bio_integrity_enabled also checks if the bio already has an 116341cd8b70SVishal Verma * integrity payload attached. If it does, we *don't* do a 116441cd8b70SVishal Verma * bio_integrity_prep here - the payload has been generated by 116541cd8b70SVishal Verma * another kernel subsystem, and we just pass it through. 116641cd8b70SVishal Verma */ 116741cd8b70SVishal Verma if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 11684246a0b6SChristoph Hellwig bio->bi_error = -EIO; 116941cd8b70SVishal Verma goto out; 117041cd8b70SVishal Verma } 117141cd8b70SVishal Verma 1172f0dc089cSDan Williams do_acct = nd_iostat_start(bio, &start); 11735212e11fSVishal Verma bio_for_each_segment(bvec, bio, iter) { 11745212e11fSVishal Verma unsigned int len = bvec.bv_len; 11755212e11fSVishal Verma 11765212e11fSVishal Verma BUG_ON(len > PAGE_SIZE); 11775212e11fSVishal Verma /* Make sure len is in multiples of sector size. */ 11785212e11fSVishal Verma /* XXX is this right? */ 11795212e11fSVishal Verma BUG_ON(len < btt->sector_size); 11805212e11fSVishal Verma BUG_ON(len % btt->sector_size); 11815212e11fSVishal Verma 118241cd8b70SVishal Verma err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, 1183*c11f0c0bSJens Axboe op_is_write(bio_op(bio)), iter.bi_sector); 11845212e11fSVishal Verma if (err) { 11855212e11fSVishal Verma dev_info(&btt->nd_btt->dev, 11865212e11fSVishal Verma "io error in %s sector %lld, len %d,\n", 1187abf54548SMike Christie (op_is_write(bio_op(bio))) ? "WRITE" : 1188abf54548SMike Christie "READ", 11895212e11fSVishal Verma (unsigned long long) iter.bi_sector, len); 11904246a0b6SChristoph Hellwig bio->bi_error = err; 1191f0dc089cSDan Williams break; 11925212e11fSVishal Verma } 11935212e11fSVishal Verma } 1194f0dc089cSDan Williams if (do_acct) 1195f0dc089cSDan Williams nd_iostat_end(bio, start); 11965212e11fSVishal Verma 11975212e11fSVishal Verma out: 11984246a0b6SChristoph Hellwig bio_endio(bio); 1199dece1635SJens Axboe return BLK_QC_T_NONE; 12005212e11fSVishal Verma } 12015212e11fSVishal Verma 12025212e11fSVishal Verma static int btt_rw_page(struct block_device *bdev, sector_t sector, 1203*c11f0c0bSJens Axboe struct page *page, bool is_write) 12045212e11fSVishal Verma { 12055212e11fSVishal Verma struct btt *btt = bdev->bd_disk->private_data; 12065212e11fSVishal Verma 1207*c11f0c0bSJens Axboe btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector); 1208*c11f0c0bSJens Axboe page_endio(page, is_write, 0); 12095212e11fSVishal Verma return 0; 12105212e11fSVishal Verma } 12115212e11fSVishal Verma 12125212e11fSVishal Verma 12135212e11fSVishal Verma static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo) 12145212e11fSVishal Verma { 12155212e11fSVishal Verma /* some standard values */ 12165212e11fSVishal Verma geo->heads = 1 << 6; 12175212e11fSVishal Verma geo->sectors = 1 << 5; 12185212e11fSVishal Verma geo->cylinders = get_capacity(bd->bd_disk) >> 11; 12195212e11fSVishal Verma return 0; 12205212e11fSVishal Verma } 12215212e11fSVishal Verma 12225212e11fSVishal Verma static const struct block_device_operations btt_fops = { 12235212e11fSVishal Verma .owner = THIS_MODULE, 12245212e11fSVishal Verma .rw_page = btt_rw_page, 12255212e11fSVishal Verma .getgeo = btt_getgeo, 122658138820SDan Williams .revalidate_disk = nvdimm_revalidate_disk, 12275212e11fSVishal Verma }; 12285212e11fSVishal Verma 12295212e11fSVishal Verma static int btt_blk_init(struct btt *btt) 12305212e11fSVishal Verma { 12315212e11fSVishal Verma struct nd_btt *nd_btt = btt->nd_btt; 12325212e11fSVishal Verma struct nd_namespace_common *ndns = nd_btt->ndns; 12335212e11fSVishal Verma 12345212e11fSVishal Verma /* create a new disk and request queue for btt */ 12355212e11fSVishal Verma btt->btt_queue = blk_alloc_queue(GFP_KERNEL); 12365212e11fSVishal Verma if (!btt->btt_queue) 12375212e11fSVishal Verma return -ENOMEM; 12385212e11fSVishal Verma 12395212e11fSVishal Verma btt->btt_disk = alloc_disk(0); 12405212e11fSVishal Verma if (!btt->btt_disk) { 12415212e11fSVishal Verma blk_cleanup_queue(btt->btt_queue); 12425212e11fSVishal Verma return -ENOMEM; 12435212e11fSVishal Verma } 12445212e11fSVishal Verma 12455212e11fSVishal Verma nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); 12465212e11fSVishal Verma btt->btt_disk->first_minor = 0; 12475212e11fSVishal Verma btt->btt_disk->fops = &btt_fops; 12485212e11fSVishal Verma btt->btt_disk->private_data = btt; 12495212e11fSVishal Verma btt->btt_disk->queue = btt->btt_queue; 12505212e11fSVishal Verma btt->btt_disk->flags = GENHD_FL_EXT_DEVT; 12515212e11fSVishal Verma 12525212e11fSVishal Verma blk_queue_make_request(btt->btt_queue, btt_make_request); 12535212e11fSVishal Verma blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); 12545212e11fSVishal Verma blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); 12555212e11fSVishal Verma blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY); 12565212e11fSVishal Verma queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); 12575212e11fSVishal Verma btt->btt_queue->queuedata = btt; 12585212e11fSVishal Verma 125941cd8b70SVishal Verma set_capacity(btt->btt_disk, 0); 12600d52c756SDan Williams device_add_disk(&btt->nd_btt->dev, btt->btt_disk); 126141cd8b70SVishal Verma if (btt_meta_size(btt)) { 126241cd8b70SVishal Verma int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); 126341cd8b70SVishal Verma 126441cd8b70SVishal Verma if (rc) { 126541cd8b70SVishal Verma del_gendisk(btt->btt_disk); 126641cd8b70SVishal Verma put_disk(btt->btt_disk); 126741cd8b70SVishal Verma blk_cleanup_queue(btt->btt_queue); 126841cd8b70SVishal Verma return rc; 126941cd8b70SVishal Verma } 127041cd8b70SVishal Verma } 127141cd8b70SVishal Verma set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); 127258138820SDan Williams revalidate_disk(btt->btt_disk); 12735212e11fSVishal Verma 12745212e11fSVishal Verma return 0; 12755212e11fSVishal Verma } 12765212e11fSVishal Verma 12775212e11fSVishal Verma static void btt_blk_cleanup(struct btt *btt) 12785212e11fSVishal Verma { 12795212e11fSVishal Verma del_gendisk(btt->btt_disk); 12805212e11fSVishal Verma put_disk(btt->btt_disk); 12815212e11fSVishal Verma blk_cleanup_queue(btt->btt_queue); 12825212e11fSVishal Verma } 12835212e11fSVishal Verma 12845212e11fSVishal Verma /** 12855212e11fSVishal Verma * btt_init - initialize a block translation table for the given device 12865212e11fSVishal Verma * @nd_btt: device with BTT geometry and backing device info 12875212e11fSVishal Verma * @rawsize: raw size in bytes of the backing device 12885212e11fSVishal Verma * @lbasize: lba size of the backing device 12895212e11fSVishal Verma * @uuid: A uuid for the backing device - this is stored on media 12905212e11fSVishal Verma * @maxlane: maximum number of parallel requests the device can handle 12915212e11fSVishal Verma * 12925212e11fSVishal Verma * Initialize a Block Translation Table on a backing device to provide 12935212e11fSVishal Verma * single sector power fail atomicity. 12945212e11fSVishal Verma * 12955212e11fSVishal Verma * Context: 12965212e11fSVishal Verma * Might sleep. 12975212e11fSVishal Verma * 12985212e11fSVishal Verma * Returns: 12995212e11fSVishal Verma * Pointer to a new struct btt on success, NULL on failure. 13005212e11fSVishal Verma */ 13015212e11fSVishal Verma static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize, 13025212e11fSVishal Verma u32 lbasize, u8 *uuid, struct nd_region *nd_region) 13035212e11fSVishal Verma { 13045212e11fSVishal Verma int ret; 13055212e11fSVishal Verma struct btt *btt; 13065212e11fSVishal Verma struct device *dev = &nd_btt->dev; 13075212e11fSVishal Verma 1308e32bc729SDan Williams btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL); 13095212e11fSVishal Verma if (!btt) 13105212e11fSVishal Verma return NULL; 13115212e11fSVishal Verma 13125212e11fSVishal Verma btt->nd_btt = nd_btt; 13135212e11fSVishal Verma btt->rawsize = rawsize; 13145212e11fSVishal Verma btt->lbasize = lbasize; 13155212e11fSVishal Verma btt->sector_size = ((lbasize >= 4096) ? 4096 : 512); 13165212e11fSVishal Verma INIT_LIST_HEAD(&btt->arena_list); 13175212e11fSVishal Verma mutex_init(&btt->init_lock); 13185212e11fSVishal Verma btt->nd_region = nd_region; 13195212e11fSVishal Verma 13205212e11fSVishal Verma ret = discover_arenas(btt); 13215212e11fSVishal Verma if (ret) { 13225212e11fSVishal Verma dev_err(dev, "init: error in arena_discover: %d\n", ret); 1323e32bc729SDan Williams return NULL; 13245212e11fSVishal Verma } 13255212e11fSVishal Verma 132658138820SDan Williams if (btt->init_state != INIT_READY && nd_region->ro) { 132758138820SDan Williams dev_info(dev, "%s is read-only, unable to init btt metadata\n", 132858138820SDan Williams dev_name(&nd_region->dev)); 1329e32bc729SDan Williams return NULL; 133058138820SDan Williams } else if (btt->init_state != INIT_READY) { 13315212e11fSVishal Verma btt->num_arenas = (rawsize / ARENA_MAX_SIZE) + 13325212e11fSVishal Verma ((rawsize % ARENA_MAX_SIZE) ? 1 : 0); 13335212e11fSVishal Verma dev_dbg(dev, "init: %d arenas for %llu rawsize\n", 13345212e11fSVishal Verma btt->num_arenas, rawsize); 13355212e11fSVishal Verma 13365212e11fSVishal Verma ret = create_arenas(btt); 13375212e11fSVishal Verma if (ret) { 13385212e11fSVishal Verma dev_info(dev, "init: create_arenas: %d\n", ret); 1339e32bc729SDan Williams return NULL; 13405212e11fSVishal Verma } 13415212e11fSVishal Verma 13425212e11fSVishal Verma ret = btt_meta_init(btt); 13435212e11fSVishal Verma if (ret) { 13445212e11fSVishal Verma dev_err(dev, "init: error in meta_init: %d\n", ret); 1345e32bc729SDan Williams return NULL; 13465212e11fSVishal Verma } 13475212e11fSVishal Verma } 13485212e11fSVishal Verma 13495212e11fSVishal Verma ret = btt_blk_init(btt); 13505212e11fSVishal Verma if (ret) { 13515212e11fSVishal Verma dev_err(dev, "init: error in blk_init: %d\n", ret); 1352e32bc729SDan Williams return NULL; 13535212e11fSVishal Verma } 13545212e11fSVishal Verma 13555212e11fSVishal Verma btt_debugfs_init(btt); 13565212e11fSVishal Verma 13575212e11fSVishal Verma return btt; 13585212e11fSVishal Verma } 13595212e11fSVishal Verma 13605212e11fSVishal Verma /** 13615212e11fSVishal Verma * btt_fini - de-initialize a BTT 13625212e11fSVishal Verma * @btt: the BTT handle that was generated by btt_init 13635212e11fSVishal Verma * 13645212e11fSVishal Verma * De-initialize a Block Translation Table on device removal 13655212e11fSVishal Verma * 13665212e11fSVishal Verma * Context: 13675212e11fSVishal Verma * Might sleep. 13685212e11fSVishal Verma */ 13695212e11fSVishal Verma static void btt_fini(struct btt *btt) 13705212e11fSVishal Verma { 13715212e11fSVishal Verma if (btt) { 13725212e11fSVishal Verma btt_blk_cleanup(btt); 13735212e11fSVishal Verma free_arenas(btt); 13745212e11fSVishal Verma debugfs_remove_recursive(btt->debugfs_dir); 13755212e11fSVishal Verma } 13765212e11fSVishal Verma } 13775212e11fSVishal Verma 13785212e11fSVishal Verma int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns) 13795212e11fSVishal Verma { 13805212e11fSVishal Verma struct nd_btt *nd_btt = to_nd_btt(ndns->claim); 13815212e11fSVishal Verma struct nd_region *nd_region; 13825212e11fSVishal Verma struct btt *btt; 13835212e11fSVishal Verma size_t rawsize; 13845212e11fSVishal Verma 13859dec4892SDan Williams if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) { 13869dec4892SDan Williams dev_dbg(&nd_btt->dev, "incomplete btt configuration\n"); 13875212e11fSVishal Verma return -ENODEV; 13889dec4892SDan Williams } 13895212e11fSVishal Verma 13905212e11fSVishal Verma rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K; 13915212e11fSVishal Verma if (rawsize < ARENA_MIN_SIZE) { 13929dec4892SDan Williams dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n", 13939dec4892SDan Williams dev_name(&ndns->dev), ARENA_MIN_SIZE + SZ_4K); 13945212e11fSVishal Verma return -ENXIO; 13955212e11fSVishal Verma } 13965212e11fSVishal Verma nd_region = to_nd_region(nd_btt->dev.parent); 13975212e11fSVishal Verma btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid, 13985212e11fSVishal Verma nd_region); 13995212e11fSVishal Verma if (!btt) 14005212e11fSVishal Verma return -ENOMEM; 14015212e11fSVishal Verma nd_btt->btt = btt; 14025212e11fSVishal Verma 14035212e11fSVishal Verma return 0; 14045212e11fSVishal Verma } 14055212e11fSVishal Verma EXPORT_SYMBOL(nvdimm_namespace_attach_btt); 14065212e11fSVishal Verma 1407298f2bc5SDan Williams int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt) 14085212e11fSVishal Verma { 14095212e11fSVishal Verma struct btt *btt = nd_btt->btt; 14105212e11fSVishal Verma 14115212e11fSVishal Verma btt_fini(btt); 14125212e11fSVishal Verma nd_btt->btt = NULL; 14135212e11fSVishal Verma 14145212e11fSVishal Verma return 0; 14155212e11fSVishal Verma } 14165212e11fSVishal Verma EXPORT_SYMBOL(nvdimm_namespace_detach_btt); 14175212e11fSVishal Verma 14185212e11fSVishal Verma static int __init nd_btt_init(void) 14195212e11fSVishal Verma { 1420ff8e92d5SNeilBrown int rc = 0; 14215212e11fSVishal Verma 14225212e11fSVishal Verma debugfs_root = debugfs_create_dir("btt", NULL); 1423ff8e92d5SNeilBrown if (IS_ERR_OR_NULL(debugfs_root)) 14245212e11fSVishal Verma rc = -ENXIO; 14255212e11fSVishal Verma 14265212e11fSVishal Verma return rc; 14275212e11fSVishal Verma } 14285212e11fSVishal Verma 14295212e11fSVishal Verma static void __exit nd_btt_exit(void) 14305212e11fSVishal Verma { 14315212e11fSVishal Verma debugfs_remove_recursive(debugfs_root); 14325212e11fSVishal Verma } 14335212e11fSVishal Verma 14345212e11fSVishal Verma MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT); 14355212e11fSVishal Verma MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>"); 14365212e11fSVishal Verma MODULE_LICENSE("GPL v2"); 14375212e11fSVishal Verma module_init(nd_btt_init); 14385212e11fSVishal Verma module_exit(nd_btt_exit); 1439