1 /* 2 * Block driver for s390 storage class memory. 3 * 4 * Copyright IBM Corp. 2012 5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> 6 */ 7 8 #define KMSG_COMPONENT "scm_block" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/interrupt.h> 12 #include <linux/spinlock.h> 13 #include <linux/module.h> 14 #include <linux/blkdev.h> 15 #include <linux/genhd.h> 16 #include <linux/slab.h> 17 #include <linux/list.h> 18 #include <asm/eadm.h> 19 #include "scm_blk.h" 20 21 debug_info_t *scm_debug; 22 static int scm_major; 23 static DEFINE_SPINLOCK(list_lock); 24 static LIST_HEAD(inactive_requests); 25 static unsigned int nr_requests = 64; 26 static atomic_t nr_devices = ATOMIC_INIT(0); 27 module_param(nr_requests, uint, S_IRUGO); 28 MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); 29 30 MODULE_DESCRIPTION("Block driver for s390 storage class memory."); 31 MODULE_LICENSE("GPL"); 32 MODULE_ALIAS("scm:scmdev*"); 33 34 static void __scm_free_rq(struct scm_request *scmrq) 35 { 36 struct aob_rq_header *aobrq = to_aobrq(scmrq); 37 38 free_page((unsigned long) scmrq->aob); 39 free_page((unsigned long) scmrq->aidaw); 40 __scm_free_rq_cluster(scmrq); 41 kfree(aobrq); 42 } 43 44 static void scm_free_rqs(void) 45 { 46 struct list_head *iter, *safe; 47 struct scm_request *scmrq; 48 49 spin_lock_irq(&list_lock); 50 list_for_each_safe(iter, safe, &inactive_requests) { 51 scmrq = list_entry(iter, struct scm_request, list); 52 list_del(&scmrq->list); 53 __scm_free_rq(scmrq); 54 } 55 spin_unlock_irq(&list_lock); 56 } 57 58 static int __scm_alloc_rq(void) 59 { 60 struct aob_rq_header *aobrq; 61 struct scm_request *scmrq; 62 63 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL); 64 if (!aobrq) 65 return -ENOMEM; 66 67 scmrq = (void *) aobrq->data; 68 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA); 69 scmrq->aob = (void *) get_zeroed_page(GFP_DMA); 70 if (!scmrq->aob || !scmrq->aidaw) { 71 __scm_free_rq(scmrq); 72 return -ENOMEM; 73 } 74 75 if (__scm_alloc_rq_cluster(scmrq)) { 76 __scm_free_rq(scmrq); 77 return -ENOMEM; 78 } 79 80 INIT_LIST_HEAD(&scmrq->list); 81 spin_lock_irq(&list_lock); 82 list_add(&scmrq->list, &inactive_requests); 83 spin_unlock_irq(&list_lock); 84 85 return 0; 86 } 87 88 static int scm_alloc_rqs(unsigned int nrqs) 89 { 90 int ret = 0; 91 92 while (nrqs-- && !ret) 93 ret = __scm_alloc_rq(); 94 95 return ret; 96 } 97 98 static struct scm_request *scm_request_fetch(void) 99 { 100 struct scm_request *scmrq = NULL; 101 102 spin_lock(&list_lock); 103 if (list_empty(&inactive_requests)) 104 goto out; 105 scmrq = list_first_entry(&inactive_requests, struct scm_request, list); 106 list_del(&scmrq->list); 107 out: 108 spin_unlock(&list_lock); 109 return scmrq; 110 } 111 112 static void scm_request_done(struct scm_request *scmrq) 113 { 114 unsigned long flags; 115 116 spin_lock_irqsave(&list_lock, flags); 117 list_add(&scmrq->list, &inactive_requests); 118 spin_unlock_irqrestore(&list_lock, flags); 119 } 120 121 static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) 122 { 123 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; 124 } 125 126 static void scm_request_prepare(struct scm_request *scmrq) 127 { 128 struct scm_blk_dev *bdev = scmrq->bdev; 129 struct scm_device *scmdev = bdev->gendisk->private_data; 130 struct aidaw *aidaw = scmrq->aidaw; 131 struct msb *msb = &scmrq->aob->msb[0]; 132 struct req_iterator iter; 133 struct bio_vec bv; 134 135 msb->bs = MSB_BS_4K; 136 scmrq->aob->request.msb_count = 1; 137 msb->scm_addr = scmdev->address + 138 ((u64) blk_rq_pos(scmrq->request) << 9); 139 msb->oc = (rq_data_dir(scmrq->request) == READ) ? 140 MSB_OC_READ : MSB_OC_WRITE; 141 msb->flags |= MSB_FLAG_IDA; 142 msb->data_addr = (u64) aidaw; 143 144 rq_for_each_segment(bv, scmrq->request, iter) { 145 WARN_ON(bv.bv_offset); 146 msb->blk_count += bv.bv_len >> 12; 147 aidaw->data_addr = (u64) page_address(bv.bv_page); 148 aidaw++; 149 } 150 } 151 152 static inline void scm_request_init(struct scm_blk_dev *bdev, 153 struct scm_request *scmrq, 154 struct request *req) 155 { 156 struct aob_rq_header *aobrq = to_aobrq(scmrq); 157 struct aob *aob = scmrq->aob; 158 159 memset(aob, 0, sizeof(*aob)); 160 memset(scmrq->aidaw, 0, PAGE_SIZE); 161 aobrq->scmdev = bdev->scmdev; 162 aob->request.cmd_code = ARQB_CMD_MOVE; 163 aob->request.data = (u64) aobrq; 164 scmrq->request = req; 165 scmrq->bdev = bdev; 166 scmrq->retries = 4; 167 scmrq->error = 0; 168 scm_request_cluster_init(scmrq); 169 } 170 171 static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) 172 { 173 if (atomic_read(&bdev->queued_reqs)) { 174 /* Queue restart is triggered by the next interrupt. */ 175 return; 176 } 177 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); 178 } 179 180 void scm_request_requeue(struct scm_request *scmrq) 181 { 182 struct scm_blk_dev *bdev = scmrq->bdev; 183 184 scm_release_cluster(scmrq); 185 blk_requeue_request(bdev->rq, scmrq->request); 186 atomic_dec(&bdev->queued_reqs); 187 scm_request_done(scmrq); 188 scm_ensure_queue_restart(bdev); 189 } 190 191 void scm_request_finish(struct scm_request *scmrq) 192 { 193 struct scm_blk_dev *bdev = scmrq->bdev; 194 195 scm_release_cluster(scmrq); 196 blk_end_request_all(scmrq->request, scmrq->error); 197 atomic_dec(&bdev->queued_reqs); 198 scm_request_done(scmrq); 199 } 200 201 static void scm_blk_request(struct request_queue *rq) 202 { 203 struct scm_device *scmdev = rq->queuedata; 204 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); 205 struct scm_request *scmrq; 206 struct request *req; 207 int ret; 208 209 while ((req = blk_peek_request(rq))) { 210 if (req->cmd_type != REQ_TYPE_FS) { 211 blk_start_request(req); 212 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); 213 blk_end_request_all(req, -EIO); 214 continue; 215 } 216 217 if (!scm_permit_request(bdev, req)) { 218 scm_ensure_queue_restart(bdev); 219 return; 220 } 221 scmrq = scm_request_fetch(); 222 if (!scmrq) { 223 SCM_LOG(5, "no request"); 224 scm_ensure_queue_restart(bdev); 225 return; 226 } 227 scm_request_init(bdev, scmrq, req); 228 if (!scm_reserve_cluster(scmrq)) { 229 SCM_LOG(5, "cluster busy"); 230 scm_request_done(scmrq); 231 return; 232 } 233 if (scm_need_cluster_request(scmrq)) { 234 atomic_inc(&bdev->queued_reqs); 235 blk_start_request(req); 236 scm_initiate_cluster_request(scmrq); 237 return; 238 } 239 scm_request_prepare(scmrq); 240 atomic_inc(&bdev->queued_reqs); 241 blk_start_request(req); 242 243 ret = eadm_start_aob(scmrq->aob); 244 if (ret) { 245 SCM_LOG(5, "no subchannel"); 246 scm_request_requeue(scmrq); 247 return; 248 } 249 } 250 } 251 252 static void __scmrq_log_error(struct scm_request *scmrq) 253 { 254 struct aob *aob = scmrq->aob; 255 256 if (scmrq->error == -ETIMEDOUT) 257 SCM_LOG(1, "Request timeout"); 258 else { 259 SCM_LOG(1, "Request error"); 260 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response)); 261 } 262 if (scmrq->retries) 263 SCM_LOG(1, "Retry request"); 264 else 265 pr_err("An I/O operation to SCM failed with rc=%d\n", 266 scmrq->error); 267 } 268 269 void scm_blk_irq(struct scm_device *scmdev, void *data, int error) 270 { 271 struct scm_request *scmrq = data; 272 struct scm_blk_dev *bdev = scmrq->bdev; 273 274 scmrq->error = error; 275 if (error) 276 __scmrq_log_error(scmrq); 277 278 spin_lock(&bdev->lock); 279 list_add_tail(&scmrq->list, &bdev->finished_requests); 280 spin_unlock(&bdev->lock); 281 tasklet_hi_schedule(&bdev->tasklet); 282 } 283 284 static void scm_blk_handle_error(struct scm_request *scmrq) 285 { 286 struct scm_blk_dev *bdev = scmrq->bdev; 287 unsigned long flags; 288 289 if (scmrq->error != -EIO) 290 goto restart; 291 292 /* For -EIO the response block is valid. */ 293 switch (scmrq->aob->response.eqc) { 294 case EQC_WR_PROHIBIT: 295 spin_lock_irqsave(&bdev->lock, flags); 296 if (bdev->state != SCM_WR_PROHIBIT) 297 pr_info("%lx: Write access to the SCM increment is suspended\n", 298 (unsigned long) bdev->scmdev->address); 299 bdev->state = SCM_WR_PROHIBIT; 300 spin_unlock_irqrestore(&bdev->lock, flags); 301 goto requeue; 302 default: 303 break; 304 } 305 306 restart: 307 if (!eadm_start_aob(scmrq->aob)) 308 return; 309 310 requeue: 311 spin_lock_irqsave(&bdev->rq_lock, flags); 312 scm_request_requeue(scmrq); 313 spin_unlock_irqrestore(&bdev->rq_lock, flags); 314 } 315 316 static void scm_blk_tasklet(struct scm_blk_dev *bdev) 317 { 318 struct scm_request *scmrq; 319 unsigned long flags; 320 321 spin_lock_irqsave(&bdev->lock, flags); 322 while (!list_empty(&bdev->finished_requests)) { 323 scmrq = list_first_entry(&bdev->finished_requests, 324 struct scm_request, list); 325 list_del(&scmrq->list); 326 spin_unlock_irqrestore(&bdev->lock, flags); 327 328 if (scmrq->error && scmrq->retries-- > 0) { 329 scm_blk_handle_error(scmrq); 330 331 /* Request restarted or requeued, handle next. */ 332 spin_lock_irqsave(&bdev->lock, flags); 333 continue; 334 } 335 336 if (scm_test_cluster_request(scmrq)) { 337 scm_cluster_request_irq(scmrq); 338 spin_lock_irqsave(&bdev->lock, flags); 339 continue; 340 } 341 342 scm_request_finish(scmrq); 343 spin_lock_irqsave(&bdev->lock, flags); 344 } 345 spin_unlock_irqrestore(&bdev->lock, flags); 346 /* Look out for more requests. */ 347 blk_run_queue(bdev->rq); 348 } 349 350 static const struct block_device_operations scm_blk_devops = { 351 .owner = THIS_MODULE, 352 }; 353 354 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) 355 { 356 struct request_queue *rq; 357 int len, ret = -ENOMEM; 358 unsigned int devindex, nr_max_blk; 359 360 devindex = atomic_inc_return(&nr_devices) - 1; 361 /* scma..scmz + scmaa..scmzz */ 362 if (devindex > 701) { 363 ret = -ENODEV; 364 goto out; 365 } 366 367 bdev->scmdev = scmdev; 368 bdev->state = SCM_OPER; 369 spin_lock_init(&bdev->rq_lock); 370 spin_lock_init(&bdev->lock); 371 INIT_LIST_HEAD(&bdev->finished_requests); 372 atomic_set(&bdev->queued_reqs, 0); 373 tasklet_init(&bdev->tasklet, 374 (void (*)(unsigned long)) scm_blk_tasklet, 375 (unsigned long) bdev); 376 377 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); 378 if (!rq) 379 goto out; 380 381 bdev->rq = rq; 382 nr_max_blk = min(scmdev->nr_max_block, 383 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); 384 385 blk_queue_logical_block_size(rq, 1 << 12); 386 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ 387 blk_queue_max_segments(rq, nr_max_blk); 388 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); 389 scm_blk_dev_cluster_setup(bdev); 390 391 bdev->gendisk = alloc_disk(SCM_NR_PARTS); 392 if (!bdev->gendisk) 393 goto out_queue; 394 395 rq->queuedata = scmdev; 396 bdev->gendisk->driverfs_dev = &scmdev->dev; 397 bdev->gendisk->private_data = scmdev; 398 bdev->gendisk->fops = &scm_blk_devops; 399 bdev->gendisk->queue = rq; 400 bdev->gendisk->major = scm_major; 401 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; 402 403 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); 404 if (devindex > 25) { 405 len += snprintf(bdev->gendisk->disk_name + len, 406 DISK_NAME_LEN - len, "%c", 407 'a' + (devindex / 26) - 1); 408 devindex = devindex % 26; 409 } 410 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", 411 'a' + devindex); 412 413 /* 512 byte sectors */ 414 set_capacity(bdev->gendisk, scmdev->size >> 9); 415 add_disk(bdev->gendisk); 416 return 0; 417 418 out_queue: 419 blk_cleanup_queue(rq); 420 out: 421 atomic_dec(&nr_devices); 422 return ret; 423 } 424 425 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) 426 { 427 tasklet_kill(&bdev->tasklet); 428 del_gendisk(bdev->gendisk); 429 blk_cleanup_queue(bdev->gendisk->queue); 430 put_disk(bdev->gendisk); 431 } 432 433 void scm_blk_set_available(struct scm_blk_dev *bdev) 434 { 435 unsigned long flags; 436 437 spin_lock_irqsave(&bdev->lock, flags); 438 if (bdev->state == SCM_WR_PROHIBIT) 439 pr_info("%lx: Write access to the SCM increment is restored\n", 440 (unsigned long) bdev->scmdev->address); 441 bdev->state = SCM_OPER; 442 spin_unlock_irqrestore(&bdev->lock, flags); 443 } 444 445 static int __init scm_blk_init(void) 446 { 447 int ret = -EINVAL; 448 449 if (!scm_cluster_size_valid()) 450 goto out; 451 452 ret = register_blkdev(0, "scm"); 453 if (ret < 0) 454 goto out; 455 456 scm_major = ret; 457 ret = scm_alloc_rqs(nr_requests); 458 if (ret) 459 goto out_free; 460 461 scm_debug = debug_register("scm_log", 16, 1, 16); 462 if (!scm_debug) { 463 ret = -ENOMEM; 464 goto out_free; 465 } 466 467 debug_register_view(scm_debug, &debug_hex_ascii_view); 468 debug_set_level(scm_debug, 2); 469 470 ret = scm_drv_init(); 471 if (ret) 472 goto out_dbf; 473 474 return ret; 475 476 out_dbf: 477 debug_unregister(scm_debug); 478 out_free: 479 scm_free_rqs(); 480 unregister_blkdev(scm_major, "scm"); 481 out: 482 return ret; 483 } 484 module_init(scm_blk_init); 485 486 static void __exit scm_blk_cleanup(void) 487 { 488 scm_drv_cleanup(); 489 debug_unregister(scm_debug); 490 scm_free_rqs(); 491 unregister_blkdev(scm_major, "scm"); 492 } 493 module_exit(scm_blk_cleanup); 494