1530static void 1531flashcache_write(struct cache_c *dmc, struct bio *bio) 1532{ 1533 int index; 1534 int res; 1535 struct cacheblock *cacheblk; 1536 int queued; 1537 1538 spin_lock_irq(&dmc->cache_spin_lock); 1539 res = flashcache_lookup(dmc, bio, &index); 1540 /* 1541 * If cache hit and !BUSY, simply redirty page. 1542 * If cache hit and BUSY, must wait for IO in prog to complete. 1543 * If cache miss and found a block to recycle, we need to 1544 * (a) invalidate any partial hits, 1545 * (b) write to cache. 1546 */ 1547 if (res != -1) { 1548 /* Cache Hit */ 1549 cacheblk = &dmc->cache[index]; 1550 if ((cacheblk->cache_state & VALID) && 1551 (cacheblk->dbn == bio->bi_sector)) { 1552 /* Cache Hit */ 1553 flashcache_write_hit(dmc, bio, index); 1554 } else { 1555 /* Cache Miss, found block to recycle */ 1556 flashcache_write_miss(dmc, bio, index); 1557 } 1558 return; 1559 } 1560 /* 1561 * No room in the set. We cannot write to the cache and have to 1562 * send the request to disk. Before we do that, we must check 1563 * for potential invalidations ! 1564 */ 1565 queued = flashcache_inval_blocks(dmc, bio); 1566 spin_unlock_irq(&dmc->cache_spin_lock); 1567 if (queued) { 1568 if (unlikely(queued < 0)) 1569 flashcache_bio_endio(bio, -EIO); 1570 return; 1571 } 1572 /* Start uncached IO */ 1573 flashcache_start_uncached_io(dmc, bio); 1574 flashcache_clean_set(dmc, hash_block(dmc, bio->bi_sector)); 1575}
1468static void 1469flashcache_write_hit(struct cache_c *dmc, struct bio *bio, int index) 1470{ 1471 struct cacheblock *cacheblk; 1472 struct pending_job *pjob; 1473 struct kcached_job *job; 1474 1475 cacheblk = &dmc->cache[index]; 1476 if (!(cacheblk->cache_state & BLOCK_IO_INPROG) && (cacheblk->head == NULL)) { 1477 if (cacheblk->cache_state & DIRTY) 1478 dmc->dirty_write_hits++; 1479 dmc->write_hits++; 1480 cacheblk->cache_state |= CACHEWRITEINPROG; 1481 spin_unlock_irq(&dmc->cache_spin_lock); 1482 job = new_kcached_job(dmc, bio, index); 1483 if (unlikely(sysctl_flashcache_error_inject & WRITE_HIT_JOB_ALLOC_FAIL)) { 1484 if (job) 1485 flashcache_free_cache_job(job); 1486 job = NULL; 1487 sysctl_flashcache_error_inject &= ~WRITE_HIT_JOB_ALLOC_FAIL; 1488 } 1489 if (unlikely(job == NULL)) { 1490 /* 1491 * We have a write hit, and can't allocate a job. 1492 * Since we dropped the spinlock, we have to drain any 1493 * pending jobs. 1494 */ 1495 DMERR("flashcache: Write (hit) failed ! Can't allocate memory for cache IO, block %lu", 1496 cacheblk->dbn); 1497 flashcache_bio_endio(bio, -EIO); 1498 spin_lock_irq(&dmc->cache_spin_lock); 1499 flashcache_free_pending_jobs(dmc, cacheblk, -EIO); 1500 cacheblk->cache_state &= ~(BLOCK_IO_INPROG); 1501 spin_unlock_irq(&dmc->cache_spin_lock); 1502 } else { 1503 job->action = WRITECACHE; /* Write data to the source device */ 1504 DPRINTK("Queue job for %llu", bio->bi_sector); 1505 atomic_inc(&dmc->nr_jobs); 1506 dmc->ssd_writes++; 1507 dm_io_async_bvec(1, &job->cache, WRITE, 1508 bio->bi_io_vec + bio->bi_idx, 1509 flashcache_io_callback, job); 1510 flashcache_unplug_device(dmc->cache_dev->bdev); 1511 flashcache_clean_set(dmc, index / dmc->assoc); 1512 } 1513 } else { 1514 pjob = flashcache_alloc_pending_job(dmc); 1515 if (unlikely(sysctl_flashcache_error_inject & WRITE_HIT_PENDING_JOB_ALLOC_FAIL)) { 1516 if (pjob) { 1517 flashcache_free_pending_job(pjob); 1518 pjob = NULL; 1519 } 1520 sysctl_flashcache_error_inject &= ~WRITE_HIT_PENDING_JOB_ALLOC_FAIL; 1521 } 1522 if (unlikely(pjob == NULL)) 1523 flashcache_bio_endio(bio, -EIO); 1524 else 1525 flashcache_enq_pending(dmc, bio, index, WRITECACHE, pjob); 1526 spin_unlock_irq(&dmc->cache_spin_lock); 1527 } 1528}
188 case WRITECACHE: 189 DPRINTK("flashcache_io_callback: WRITECACHE %d", 190 index); 191 spin_lock_irqsave(&dmc->cache_spin_lock, flags); 192 if (unlikely(sysctl_flashcache_error_inject & WRITECACHE_ERROR)) { 193 job->error = error = -EIO; 194 sysctl_flashcache_error_inject &= ~WRITECACHE_ERROR; 195 } 196 VERIFY(cacheblk->cache_state & CACHEWRITEINPROG); 197 if (likely(error == 0)) { 198#ifdef FLASHCACHE_DO_CHECKSUMS 199 dmc->checksum_store++; 200 spin_unlock_irqrestore(&dmc->cache_spin_lock, flags); 201 flashcache_store_checksum(job); 202 /* 203 * We need to update the metadata on a DIRTY->DIRTY as well 204 * since we save the checksums. 205 */ 206 push_md_io(job); 207 schedule_work(&_kcached_wq); 208 return; 209#else 210 spin_unlock_irqrestore(&dmc->cache_spin_lock, flags); 211 /* Only do cache metadata update on a non-DIRTY->DIRTY transition */ 212 if ((cacheblk->cache_state & DIRTY) == 0) { 213 push_md_io(job); 214 schedule_work(&_kcached_wq); 215 return; 216 } 217#endif 218 } else { 219 dmc->ssd_write_errors++; 220 spin_unlock_irqrestore(&dmc->cache_spin_lock, flags); 221 } 222 flashcache_bio_endio(bio, error); 223 break;
272void 273push_md_io(struct kcached_job *job) 274{ 275 push(&_md_io_jobs, job); 276}
303 process_jobs(&_md_io_jobs, flashcache_md_write);
1411static void 1412flashcache_write_miss(struct cache_c *dmc, struct bio *bio, int index) 1413{ 1414 struct cacheblock *cacheblk; 1415 struct kcached_job *job; 1416 int queued; 1417 1418 cacheblk = &dmc->cache[index]; 1419 queued = flashcache_inval_blocks(dmc, bio); 1420 if (queued) { 1421 if (unlikely(queued < 0)) 1422 flashcache_bio_endio(bio, -EIO); 1423 spin_unlock_irq(&dmc->cache_spin_lock); 1424 return; 1425 } 1426 if (cacheblk->cache_state & VALID) 1427 dmc->wr_replace++; 1428 else 1429 dmc->cached_blocks++; 1430 cacheblk->cache_state = VALID | CACHEWRITEINPROG; 1431 cacheblk->dbn = bio->bi_sector; 1432 spin_unlock_irq(&dmc->cache_spin_lock); 1433 job = new_kcached_job(dmc, bio, index); 1434 if (unlikely(sysctl_flashcache_error_inject & WRITE_MISS_JOB_ALLOC_FAIL)) { 1435 if (job) 1436 flashcache_free_cache_job(job); 1437 job = NULL; 1438 sysctl_flashcache_error_inject &= ~WRITE_MISS_JOB_ALLOC_FAIL; 1439 } 1440 if (unlikely(job == NULL)) { 1441 /* 1442 * We have a write miss, and can't allocate a job. 1443 * Since we dropped the spinlock, we have to drain any 1444 * pending jobs. 1445 */ 1446 DMERR("flashcache: Write (miss) failed ! Can't allocate memory for cache IO, block %lu", 1447 cacheblk->dbn); 1448 flashcache_bio_endio(bio, -EIO); 1449 spin_lock_irq(&dmc->cache_spin_lock); 1450 dmc->cached_blocks--; 1451 cacheblk->cache_state &= ~VALID; 1452 cacheblk->cache_state |= INVALID; 1453 flashcache_free_pending_jobs(dmc, cacheblk, -EIO); 1454 cacheblk->cache_state &= ~(BLOCK_IO_INPROG); 1455 spin_unlock_irq(&dmc->cache_spin_lock); 1456 } else { 1457 job->action = WRITECACHE; 1458 atomic_inc(&dmc->nr_jobs); 1459 dmc->ssd_writes++; 1460 dm_io_async_bvec(1, &job->cache, WRITE, 1461 bio->bi_io_vec + bio->bi_idx, 1462 flashcache_io_callback, job); 1463 flashcache_unplug_device(dmc->cache_dev->bdev); 1464 flashcache_clean_set(dmc, index / dmc->assoc); 1465 } 1466}