1 /* 2 * Copyright (c) 2014-2016 Christoph Hellwig. 3 */ 4 #include <linux/exportfs.h> 5 #include <linux/genhd.h> 6 #include <linux/slab.h> 7 #include <linux/pr.h> 8 9 #include <linux/nfsd/debug.h> 10 #include <scsi/scsi_proto.h> 11 #include <scsi/scsi_common.h> 12 13 #include "blocklayoutxdr.h" 14 #include "pnfs.h" 15 16 #define NFSDDBG_FACILITY NFSDDBG_PNFS 17 18 19 static __be32 20 nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, 21 struct nfsd4_layoutget *args) 22 { 23 struct nfsd4_layout_seg *seg = &args->lg_seg; 24 struct super_block *sb = inode->i_sb; 25 u32 block_size = (1 << inode->i_blkbits); 26 struct pnfs_block_extent *bex; 27 struct iomap iomap; 28 u32 device_generation = 0; 29 int error; 30 31 if (seg->offset & (block_size - 1)) { 32 dprintk("pnfsd: I/O misaligned\n"); 33 goto out_layoutunavailable; 34 } 35 36 /* 37 * Some clients barf on non-zero block numbers for NONE or INVALID 38 * layouts, so make sure to zero the whole structure. 39 */ 40 error = -ENOMEM; 41 bex = kzalloc(sizeof(*bex), GFP_KERNEL); 42 if (!bex) 43 goto out_error; 44 args->lg_content = bex; 45 46 error = sb->s_export_op->map_blocks(inode, seg->offset, seg->length, 47 &iomap, seg->iomode != IOMODE_READ, 48 &device_generation); 49 if (error) { 50 if (error == -ENXIO) 51 goto out_layoutunavailable; 52 goto out_error; 53 } 54 55 if (iomap.length < args->lg_minlength) { 56 dprintk("pnfsd: extent smaller than minlength\n"); 57 goto out_layoutunavailable; 58 } 59 60 switch (iomap.type) { 61 case IOMAP_MAPPED: 62 if (seg->iomode == IOMODE_READ) 63 bex->es = PNFS_BLOCK_READ_DATA; 64 else 65 bex->es = PNFS_BLOCK_READWRITE_DATA; 66 bex->soff = (iomap.blkno << 9); 67 break; 68 case IOMAP_UNWRITTEN: 69 if (seg->iomode & IOMODE_RW) { 70 /* 71 * Crack monkey special case from section 2.3.1. 72 */ 73 if (args->lg_minlength == 0) { 74 dprintk("pnfsd: no soup for you!\n"); 75 goto out_layoutunavailable; 76 } 77 78 bex->es = PNFS_BLOCK_INVALID_DATA; 79 bex->soff = (iomap.blkno << 9); 80 break; 81 } 82 /*FALLTHRU*/ 83 case IOMAP_HOLE: 84 if (seg->iomode == IOMODE_READ) { 85 bex->es = PNFS_BLOCK_NONE_DATA; 86 break; 87 } 88 /*FALLTHRU*/ 89 case IOMAP_DELALLOC: 90 default: 91 WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type); 92 goto out_layoutunavailable; 93 } 94 95 error = nfsd4_set_deviceid(&bex->vol_id, fhp, device_generation); 96 if (error) 97 goto out_error; 98 bex->foff = iomap.offset; 99 bex->len = iomap.length; 100 101 seg->offset = iomap.offset; 102 seg->length = iomap.length; 103 104 dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es); 105 return 0; 106 107 out_error: 108 seg->length = 0; 109 return nfserrno(error); 110 out_layoutunavailable: 111 seg->length = 0; 112 return nfserr_layoutunavailable; 113 } 114 115 static __be32 116 nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp, 117 struct iomap *iomaps, int nr_iomaps) 118 { 119 loff_t new_size = lcp->lc_last_wr + 1; 120 struct iattr iattr = { .ia_valid = 0 }; 121 int error; 122 123 if (lcp->lc_mtime.tv_nsec == UTIME_NOW || 124 timespec_compare(&lcp->lc_mtime, &inode->i_mtime) < 0) 125 lcp->lc_mtime = current_fs_time(inode->i_sb); 126 iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME; 127 iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime; 128 129 if (new_size > i_size_read(inode)) { 130 iattr.ia_valid |= ATTR_SIZE; 131 iattr.ia_size = new_size; 132 } 133 134 error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps, 135 nr_iomaps, &iattr); 136 kfree(iomaps); 137 return nfserrno(error); 138 } 139 140 #ifdef CONFIG_NFSD_BLOCKLAYOUT 141 static int 142 nfsd4_block_get_device_info_simple(struct super_block *sb, 143 struct nfsd4_getdeviceinfo *gdp) 144 { 145 struct pnfs_block_deviceaddr *dev; 146 struct pnfs_block_volume *b; 147 148 dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) + 149 sizeof(struct pnfs_block_volume), GFP_KERNEL); 150 if (!dev) 151 return -ENOMEM; 152 gdp->gd_device = dev; 153 154 dev->nr_volumes = 1; 155 b = &dev->volumes[0]; 156 157 b->type = PNFS_BLOCK_VOLUME_SIMPLE; 158 b->simple.sig_len = PNFS_BLOCK_UUID_LEN; 159 return sb->s_export_op->get_uuid(sb, b->simple.sig, &b->simple.sig_len, 160 &b->simple.offset); 161 } 162 163 static __be32 164 nfsd4_block_proc_getdeviceinfo(struct super_block *sb, 165 struct nfs4_client *clp, 166 struct nfsd4_getdeviceinfo *gdp) 167 { 168 if (sb->s_bdev != sb->s_bdev->bd_contains) 169 return nfserr_inval; 170 return nfserrno(nfsd4_block_get_device_info_simple(sb, gdp)); 171 } 172 173 static __be32 174 nfsd4_block_proc_layoutcommit(struct inode *inode, 175 struct nfsd4_layoutcommit *lcp) 176 { 177 struct iomap *iomaps; 178 int nr_iomaps; 179 180 nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, 181 lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); 182 if (nr_iomaps < 0) 183 return nfserrno(nr_iomaps); 184 185 return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps); 186 } 187 188 const struct nfsd4_layout_ops bl_layout_ops = { 189 /* 190 * Pretend that we send notification to the client. This is a blatant 191 * lie to force recent Linux clients to cache our device IDs. 192 * We rarely ever change the device ID, so the harm of leaking deviceids 193 * for a while isn't too bad. Unfortunately RFC5661 is a complete mess 194 * in this regard, but I filed errata 4119 for this a while ago, and 195 * hopefully the Linux client will eventually start caching deviceids 196 * without this again. 197 */ 198 .notify_types = 199 NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE, 200 .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo, 201 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo, 202 .proc_layoutget = nfsd4_block_proc_layoutget, 203 .encode_layoutget = nfsd4_block_encode_layoutget, 204 .proc_layoutcommit = nfsd4_block_proc_layoutcommit, 205 }; 206 #endif /* CONFIG_NFSD_BLOCKLAYOUT */ 207 208 #ifdef CONFIG_NFSD_SCSILAYOUT 209 static int nfsd4_scsi_identify_device(struct block_device *bdev, 210 struct pnfs_block_volume *b) 211 { 212 struct request_queue *q = bdev->bd_disk->queue; 213 struct request *rq; 214 size_t bufflen = 252, len, id_len; 215 u8 *buf, *d, type, assoc; 216 int error; 217 218 buf = kzalloc(bufflen, GFP_KERNEL); 219 if (!buf) 220 return -ENOMEM; 221 222 rq = blk_get_request(q, READ, GFP_KERNEL); 223 if (IS_ERR(rq)) { 224 error = -ENOMEM; 225 goto out_free_buf; 226 } 227 blk_rq_set_block_pc(rq); 228 229 error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL); 230 if (error) 231 goto out_put_request; 232 233 rq->cmd[0] = INQUIRY; 234 rq->cmd[1] = 1; 235 rq->cmd[2] = 0x83; 236 rq->cmd[3] = bufflen >> 8; 237 rq->cmd[4] = bufflen & 0xff; 238 rq->cmd_len = COMMAND_SIZE(INQUIRY); 239 240 error = blk_execute_rq(rq->q, NULL, rq, 1); 241 if (error) { 242 pr_err("pNFS: INQUIRY 0x83 failed with: %x\n", 243 rq->errors); 244 goto out_put_request; 245 } 246 247 len = (buf[2] << 8) + buf[3] + 4; 248 if (len > bufflen) { 249 pr_err("pNFS: INQUIRY 0x83 response invalid (len = %zd)\n", 250 len); 251 goto out_put_request; 252 } 253 254 d = buf + 4; 255 for (d = buf + 4; d < buf + len; d += id_len + 4) { 256 id_len = d[3]; 257 type = d[1] & 0xf; 258 assoc = (d[1] >> 4) & 0x3; 259 260 /* 261 * We only care about a EUI-64 and NAA designator types 262 * with LU association. 263 */ 264 if (assoc != 0x00) 265 continue; 266 if (type != 0x02 && type != 0x03) 267 continue; 268 if (id_len != 8 && id_len != 12 && id_len != 16) 269 continue; 270 271 b->scsi.code_set = PS_CODE_SET_BINARY; 272 b->scsi.designator_type = type == 0x02 ? 273 PS_DESIGNATOR_EUI64 : PS_DESIGNATOR_NAA; 274 b->scsi.designator_len = id_len; 275 memcpy(b->scsi.designator, d + 4, id_len); 276 277 /* 278 * If we found a 8 or 12 byte descriptor continue on to 279 * see if a 16 byte one is available. If we find a 280 * 16 byte descriptor we're done. 281 */ 282 if (id_len == 16) 283 break; 284 } 285 286 out_put_request: 287 blk_put_request(rq); 288 out_free_buf: 289 kfree(buf); 290 return error; 291 } 292 293 #define NFSD_MDS_PR_KEY 0x0100000000000000ULL 294 295 /* 296 * We use the client ID as a unique key for the reservations. 297 * This allows us to easily fence a client when recalls fail. 298 */ 299 static u64 nfsd4_scsi_pr_key(struct nfs4_client *clp) 300 { 301 return ((u64)clp->cl_clientid.cl_boot << 32) | clp->cl_clientid.cl_id; 302 } 303 304 static int 305 nfsd4_block_get_device_info_scsi(struct super_block *sb, 306 struct nfs4_client *clp, 307 struct nfsd4_getdeviceinfo *gdp) 308 { 309 struct pnfs_block_deviceaddr *dev; 310 struct pnfs_block_volume *b; 311 const struct pr_ops *ops; 312 int error; 313 314 dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) + 315 sizeof(struct pnfs_block_volume), GFP_KERNEL); 316 if (!dev) 317 return -ENOMEM; 318 gdp->gd_device = dev; 319 320 dev->nr_volumes = 1; 321 b = &dev->volumes[0]; 322 323 b->type = PNFS_BLOCK_VOLUME_SCSI; 324 b->scsi.pr_key = nfsd4_scsi_pr_key(clp); 325 326 error = nfsd4_scsi_identify_device(sb->s_bdev, b); 327 if (error) 328 return error; 329 330 ops = sb->s_bdev->bd_disk->fops->pr_ops; 331 if (!ops) { 332 pr_err("pNFS: device %s does not support PRs.\n", 333 sb->s_id); 334 return -EINVAL; 335 } 336 337 error = ops->pr_register(sb->s_bdev, 0, NFSD_MDS_PR_KEY, true); 338 if (error) { 339 pr_err("pNFS: failed to register key for device %s.\n", 340 sb->s_id); 341 return -EINVAL; 342 } 343 344 error = ops->pr_reserve(sb->s_bdev, NFSD_MDS_PR_KEY, 345 PR_EXCLUSIVE_ACCESS_REG_ONLY, 0); 346 if (error) { 347 pr_err("pNFS: failed to reserve device %s.\n", 348 sb->s_id); 349 return -EINVAL; 350 } 351 352 return 0; 353 } 354 355 static __be32 356 nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb, 357 struct nfs4_client *clp, 358 struct nfsd4_getdeviceinfo *gdp) 359 { 360 if (sb->s_bdev != sb->s_bdev->bd_contains) 361 return nfserr_inval; 362 return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp)); 363 } 364 static __be32 365 nfsd4_scsi_proc_layoutcommit(struct inode *inode, 366 struct nfsd4_layoutcommit *lcp) 367 { 368 struct iomap *iomaps; 369 int nr_iomaps; 370 371 nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout, 372 lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); 373 if (nr_iomaps < 0) 374 return nfserrno(nr_iomaps); 375 376 return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps); 377 } 378 379 static void 380 nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls) 381 { 382 struct nfs4_client *clp = ls->ls_stid.sc_client; 383 struct block_device *bdev = ls->ls_file->f_path.mnt->mnt_sb->s_bdev; 384 385 bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY, 386 nfsd4_scsi_pr_key(clp), 0, true); 387 } 388 389 const struct nfsd4_layout_ops scsi_layout_ops = { 390 /* 391 * Pretend that we send notification to the client. This is a blatant 392 * lie to force recent Linux clients to cache our device IDs. 393 * We rarely ever change the device ID, so the harm of leaking deviceids 394 * for a while isn't too bad. Unfortunately RFC5661 is a complete mess 395 * in this regard, but I filed errata 4119 for this a while ago, and 396 * hopefully the Linux client will eventually start caching deviceids 397 * without this again. 398 */ 399 .notify_types = 400 NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE, 401 .proc_getdeviceinfo = nfsd4_scsi_proc_getdeviceinfo, 402 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo, 403 .proc_layoutget = nfsd4_block_proc_layoutget, 404 .encode_layoutget = nfsd4_block_encode_layoutget, 405 .proc_layoutcommit = nfsd4_scsi_proc_layoutcommit, 406 .fence_client = nfsd4_scsi_fence_client, 407 }; 408 #endif /* CONFIG_NFSD_SCSILAYOUT */ 409