1221static void 1222flashcache_read(struct cache_c *dmc, struct bio *bio) 1223{ 1224 int index; 1225 int res; 1226 struct cacheblock *cacheblk; 1227 int queued; 1228 1229 DPRINTK("Got a %s for %llu %u bytes)", 1230 (bio_rw(bio) == READ ? "READ":"READA"), 1231 bio->bi_sector, bio->bi_size); 1232 1233 spin_lock_irq(&dmc->cache_spin_lock); 1234 res = flashcache_lookup(dmc, bio, &index); 1235 /* 1236 * Handle Cache Hit case first. 1237 * We need to handle 2 cases, BUSY and !BUSY. If BUSY, we enqueue the 1238 * bio for later. 1239 */ 1240 if (res > 0) { 1241 cacheblk = &dmc->cache[index]; 1242 if ((cacheblk->cache_state & VALID) && 1243 (cacheblk->dbn == bio->bi_sector)) { 1244 flashcache_read_hit(dmc, bio, index); 1245 return; 1246 } 1247 } 1248 /* 1249 * In all cases except for a cache hit (and VALID), test for potential 1250 * invalidations that we need to do. 1251 */ 1252 queued = flashcache_inval_blocks(dmc, bio); 1253 if (queued) { 1254 if (unlikely(queued < 0)) 1255 flashcache_bio_endio(bio, -EIO); 1256 spin_unlock_irq(&dmc->cache_spin_lock); 1257 return; 1258 } 1259 if (res == -1 || flashcache_uncacheable(dmc)) { 1260 /* No room or non-cacheable */ 1261 spin_unlock_irq(&dmc->cache_spin_lock); 1262 DPRINTK("Cache read: Block %llu(%lu):%s", 1263 bio->bi_sector, bio->bi_size, "CACHE MISS & NO ROOM"); 1264 if (res == -1) 1265 flashcache_clean_set(dmc, hash_block(dmc, bio->bi_sector)); 1266 /* Start uncached IO */ 1267 flashcache_start_uncached_io(dmc, bio); 1268 return; 1269 } 1270 /* 1271 * (res == INVALID) Cache Miss 1272 * And we found cache blocks to replace 1273 * Claim the cache blocks before giving up the spinlock 1274 */ 1275 if (dmc->cache[index].cache_state & VALID) 1276 dmc->replace++; 1277 else 1278 dmc->cached_blocks++; 1279 dmc->cache[index].cache_state = VALID | DISKREADINPROG; 1280 dmc->cache[index].dbn = bio->bi_sector; 1281 spin_unlock_irq(&dmc->cache_spin_lock); 1282 1283 DPRINTK("Cache read: Block %llu(%lu), index = %d:%s", 1284 bio->bi_sector, bio->bi_size, index, "CACHE MISS & REPLACE"); 1285 flashcache_read_miss(dmc, bio, index); 1286}
1119static void 1120flashcache_read_hit(struct cache_c *dmc, struct bio* bio, int index) 1121{ 1122 struct cacheblock *cacheblk; 1123 struct pending_job *pjob; 1124 1125 cacheblk = &dmc->cache[index]; 1126 if (!(cacheblk->cache_state & BLOCK_IO_INPROG) && (cacheblk->head == NULL)) { 1127 struct kcached_job *job; 1128 1129 cacheblk->cache_state |= CACHEREADINPROG; 1130 dmc->read_hits++; 1131 spin_unlock_irq(&dmc->cache_spin_lock); 1132 DPRINTK("Cache read: Block %llu(%lu), index = %d:%s", 1133 bio->bi_sector, bio->bi_size, index, "CACHE HIT"); 1134 job = new_kcached_job(dmc, bio, index); 1135 if (unlikely(sysctl_flashcache_error_inject & READ_HIT_JOB_ALLOC_FAIL)) { 1136 if (job) 1137 flashcache_free_cache_job(job); 1138 job = NULL; 1139 sysctl_flashcache_error_inject &= ~READ_HIT_JOB_ALLOC_FAIL; 1140 } 1141 if (unlikely(job == NULL)) { 1142 /* 1143 * We have a read hit, and can't allocate a job. 1144 * Since we dropped the spinlock, we have to drain any 1145 * pending jobs. 1146 */ 1147 DMERR("flashcache: Read (hit) failed ! Can't allocate memory for cache IO, block %lu", 1148 cacheblk->dbn); 1149 flashcache_bio_endio(bio, -EIO); 1150 spin_lock_irq(&dmc->cache_spin_lock); 1151 flashcache_free_pending_jobs(dmc, cacheblk, -EIO); 1152 cacheblk->cache_state &= ~(BLOCK_IO_INPROG); 1153 spin_unlock_irq(&dmc->cache_spin_lock); 1154 } else { 1155 job->action = READCACHE; /* Fetch data from cache */ 1156 atomic_inc(&dmc->nr_jobs); 1157 dmc->ssd_reads++; 1158 dm_io_async_bvec(1, &job->cache, READ, 1159 bio->bi_io_vec + bio->bi_idx, 1160 flashcache_io_callback, job); 1161 flashcache_unplug_device(dmc->cache_dev->bdev); 1162 } 1163 } else { 1164 pjob = flashcache_alloc_pending_job(dmc); 1165 if (unlikely(sysctl_flashcache_error_inject & READ_HIT_PENDING_JOB_ALLOC_FAIL)) { 1166 if (pjob) { 1167 flashcache_free_pending_job(pjob); 1168 pjob = NULL; 1169 } 1170 sysctl_flashcache_error_inject &= ~READ_HIT_PENDING_JOB_ALLOC_FAIL; 1171 } 1172 if (pjob == NULL) 1173 flashcache_bio_endio(bio, -EIO); 1174 else 1175 flashcache_enq_pending(dmc, bio, index, READCACHE, pjob); 1176 spin_unlock_irq(&dmc->cache_spin_lock); 1177 } 1178}
151 case READCACHE: 152 DPRINTK("flashcache_io_callback: READCACHE %d", 153 index); 154 spin_lock_irqsave(&dmc->cache_spin_lock, flags); 155 if (unlikely(sysctl_flashcache_error_inject & READCACHE_ERROR)) { 156 job->error = error = -EIO; 157 sysctl_flashcache_error_inject &= ~READCACHE_ERROR; 158 } 159 VERIFY(cacheblk->cache_state & CACHEREADINPROG); 160 spin_unlock_irqrestore(&dmc->cache_spin_lock, flags); 161 if (unlikely(error)) 162 dmc->ssd_read_errors++; 163#ifdef FLASHCACHE_DO_CHECKSUMS 164 if (likely(error == 0)) { 165 if (flashcache_validate_checksum(job)) { 166 DMERR("flashcache_io_callback: Checksum mismatch at disk offset %lu", 167 job->disk.sector); 168 error = -EIO; 169 } 170 } 171#endif 172 flashcache_bio_endio(bio, error); 173 break;
1180static void 1181flashcache_read_miss(struct cache_c *dmc, struct bio* bio, 1182 int index) 1183{ 1184 struct kcached_job *job; 1185 struct cacheblock *cacheblk = &dmc->cache[index]; 1186 1187 job = new_kcached_job(dmc, bio, index); 1188 if (unlikely(sysctl_flashcache_error_inject & READ_MISS_JOB_ALLOC_FAIL)) { 1189 if (job) 1190 flashcache_free_cache_job(job); 1191 job = NULL; 1192 sysctl_flashcache_error_inject &= ~READ_MISS_JOB_ALLOC_FAIL; 1193 } 1194 if (unlikely(job == NULL)) { 1195 /* 1196 * We have a read miss, and can't allocate a job. 1197 * Since we dropped the spinlock, we have to drain any 1198 * pending jobs. 1199 */ 1200 DMERR("flashcache: Read (miss) failed ! Can't allocate memory for cache IO, block %lu", 1201 cacheblk->dbn); 1202 flashcache_bio_endio(bio, -EIO); 1203 spin_lock_irq(&dmc->cache_spin_lock); 1204 dmc->cached_blocks--; 1205 cacheblk->cache_state &= ~VALID; 1206 cacheblk->cache_state |= INVALID; 1207 flashcache_free_pending_jobs(dmc, cacheblk, -EIO); 1208 cacheblk->cache_state &= ~(BLOCK_IO_INPROG); 1209 spin_unlock_irq(&dmc->cache_spin_lock); 1210 } else { 1211 job->action = READDISK; /* Fetch data from the source device */ 1212 atomic_inc(&dmc->nr_jobs); 1213 dmc->disk_reads++; 1214 dm_io_async_bvec(1, &job->disk, READ, 1215 bio->bi_io_vec + bio->bi_idx, 1216 flashcache_io_callback, job); 1217 flashcache_clean_set(dmc, index / dmc->assoc); 1218 } 1219}
113void 114flashcache_io_callback(unsigned long error, void *context) 115{ 116 struct kcached_job *job = (struct kcached_job *) context; 117 struct cache_c *dmc = job->dmc; 118 struct bio *bio; 119 unsigned long flags; 120 int index = job->index; 121 struct cacheblock *cacheblk = &dmc->cache[index]; 122 123 VERIFY(index != -1); 124 bio = job->bio; 125 VERIFY(bio != NULL); 126 if (error) 127 DMERR("flashcache_io_callback: io error %ld block %lu action %d", 128 error, job->disk.sector, job->action); 129 job->error = error; 130 switch (job->action) { 131 case READDISK: 132 DPRINTK("flashcache_io_callback: READDISK %d", 133 index); 134 spin_lock_irqsave(&dmc->cache_spin_lock, flags); 135 if (unlikely(sysctl_flashcache_error_inject & READDISK_ERROR)) { 136 job->error = error = -EIO; 137 sysctl_flashcache_error_inject &= ~READDISK_ERROR; 138 } 139 VERIFY(cacheblk->cache_state & DISKREADINPROG); 140 spin_unlock_irqrestore(&dmc->cache_spin_lock, flags); 141 if (likely(error == 0)) { 142 /* Kick off the write to the cache */ 143 job->action = READFILL; 144 flashcache_enqueue_readfill(dmc, job); 145 return; 146 } else { 147 dmc->disk_read_errors++; 148 flashcache_bio_endio(bio, error); 149 } 150 break; 174 case READFILL: 175 DPRINTK("flashcache_io_callback: READFILL %d", 176 index); 177 spin_lock_irqsave(&dmc->cache_spin_lock, flags); 178 if (unlikely(sysctl_flashcache_error_inject & READFILL_ERROR)) { 179 job->error = error = -EIO; 180 sysctl_flashcache_error_inject &= ~READFILL_ERROR; 181 } 182 if (unlikely(error)) 183 dmc->ssd_write_errors++; 184 VERIFY(cacheblk->cache_state & DISKREADINPROG); 185 spin_unlock_irqrestore(&dmc->cache_spin_lock, flags); 186 flashcache_bio_endio(bio, error); 187 break;