From: Mikulas Patocka <mpatocka@redhat.com> Date: Tue, 19 Aug 2008 16:46:06 -0400 Subject: [md] dm-snapshots: race condition and data corruption Message-id: Pine.LNX.4.64.0808191640370.3053@hs20-bc2-1.build.redhat.com O-Subject: [PATCH 1/2 RHEL 5.3] bz459337 Race condition and data corruption in dm-snapshots Bugzilla: 459337 RH-Acked-by: Alasdair G Kergon <agk@redhat.com> Hi This is the first patch (of 2 total, both must be applied to fix the bug) that fixes the known data corruption in dm-snapshots. This patch adds tracking of in-progress I/O into dm-snapshot. It creates infrastructure for the next patch that actually fixes the bug. testing: I compiled it on my workstation, run testcases from http://people.redhat.com/mpatocka/testcases/ Before both patches were applied, tescases testcase-read-snapshot-vs-write-origin-1.c and testcase-read-snapshot-vs-write-origin-2.c reported data corruption. After the patches were applied, the testcases pass ok. upstream commit: cd45daffd1f7b53aac0835b23e97f814ec3f10dc (2.6.27-rc1) dm snapshot: track snapshot reads Whenever a snapshot read gets mapped through to the origin, track it in a per-snapshot hash table indexed by chunk number, using memory allocated from a new per-snapshot mempool. We need to track these reads to avoid race conditions which will be fixed by patches that follow. diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 1a33aea..2c360a1 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -39,6 +39,11 @@ */ #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) +/* + * The size of the mempool used to track chunks in use. + */ +#define MIN_IOS 256 + struct workqueue_struct *ksnapd; static void flush_queued_bios(void *data); @@ -92,6 +97,42 @@ static kmem_cache_t *exception_cache; static kmem_cache_t *pending_cache; static mempool_t *pending_pool; +struct dm_snap_tracked_chunk { + struct hlist_node node; + chunk_t chunk; +}; + +static struct kmem_cache *tracked_chunk_cache; + +static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, + chunk_t chunk) +{ + struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, + GFP_NOIO); + unsigned long flags; + + c->chunk = chunk; + + spin_lock_irqsave(&s->tracked_chunk_lock, flags); + hlist_add_head(&c->node, + &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); + spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); + + return c; +} + +static void stop_tracking_chunk(struct dm_snapshot *s, + struct dm_snap_tracked_chunk *c) +{ + unsigned long flags; + + spin_lock_irqsave(&s->tracked_chunk_lock, flags); + hlist_del(&c->node); + spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); + + mempool_free(c, s->tracked_chunk_pool); +} + /* * One of these per registered origin, held in the snapshot_origins hash */ @@ -491,6 +532,7 @@ static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg, static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct dm_snapshot *s; + int i; int r = -EINVAL; char persistent; char *origin_path; @@ -573,11 +615,24 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad5; } + s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, + tracked_chunk_cache); + if (!s->tracked_chunk_pool) { + ti->error = "Could not allocate tracked_chunk mempool for " + "tracking reads"; + goto bad6; + } + + for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) + INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); + + spin_lock_init(&s->tracked_chunk_lock); + /* Metadata must only be loaded into one table at once */ r = s->store.read_metadata(&s->store); if (r < 0) { ti->error = "Failed to read snapshot metadata"; - goto bad6; + goto bad_load_and_register; } else if (r > 0) { s->valid = 0; DMWARN("Snapshot is marked invalid."); @@ -591,7 +646,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (register_snapshot(s)) { r = -EINVAL; ti->error = "Cannot register snapshot origin"; - goto bad6; + goto bad_load_and_register; } ti->private = s; @@ -599,6 +654,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) return 0; + bad_load_and_register: + mempool_destroy(s->tracked_chunk_pool); + bad6: kcopyd_client_destroy(s->kcopyd_client); @@ -622,6 +680,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) static void snapshot_dtr(struct dm_target *ti) { +#ifdef CONFIG_DM_DEBUG + int i; +#endif struct dm_snapshot *s = (struct dm_snapshot *) ti->private; flush_workqueue(ksnapd); @@ -630,6 +691,13 @@ static void snapshot_dtr(struct dm_target *ti) /* After this returns there can be no new kcopyd jobs. */ unregister_snapshot(s); +#ifdef CONFIG_DM_DEBUG + for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) + BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); +#endif + + mempool_destroy(s->tracked_chunk_pool); + kcopyd_client_destroy(s->kcopyd_client); exit_exception_table(&s->pending, pending_cache); @@ -979,14 +1047,10 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, start_copy(pe); goto out; } - } else - /* - * FIXME: this read path scares me because we - * always use the origin when we have a pending - * exception. However I can't think of a - * situation where this is wrong - ejt. - */ + } else { bio->bi_bdev = s->origin->bdev; + map_context->ptr = track_chunk(s, chunk); + } out_unlock: up_write(&s->lock); @@ -994,6 +1058,18 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, return r; } +static int snapshot_end_io(struct dm_target *ti, struct bio *bio, + int error, union map_info *map_context) +{ + struct dm_snapshot *s = ti->private; + struct dm_snap_tracked_chunk *c = map_context->ptr; + + if (c) + stop_tracking_chunk(s, c); + + return 0; +} + static void snapshot_resume(struct dm_target *ti) { struct dm_snapshot *s = (struct dm_snapshot *) ti->private; @@ -1274,6 +1350,7 @@ static struct target_type snapshot_target = { .ctr = snapshot_ctr, .dtr = snapshot_dtr, .map = snapshot_map, + .end_io = snapshot_end_io, .resume = snapshot_resume, .status = snapshot_status, }; @@ -1321,11 +1398,22 @@ static int __init dm_snapshot_init(void) goto bad4; } + tracked_chunk_cache = + kmem_cache_create("dm-tracked-chunk", + sizeof(struct dm_snap_tracked_chunk), + __alignof__(struct dm_snap_tracked_chunk), + 0, NULL, NULL); + if (!tracked_chunk_cache) { + DMERR("Couldn't create cache to track chunks in use."); + r = -ENOMEM; + goto bad5; + } + pending_pool = mempool_create_slab_pool(128, pending_cache); if (!pending_pool) { DMERR("Couldn't create pending pool."); r = -ENOMEM; - goto bad5; + goto bad_pending_pool; } ksnapd = create_singlethread_workqueue("ksnapd"); @@ -1339,6 +1427,8 @@ static int __init dm_snapshot_init(void) bad6: mempool_destroy(pending_pool); + bad_pending_pool: + kmem_cache_destroy(tracked_chunk_cache); bad5: kmem_cache_destroy(pending_cache); bad4: @@ -1370,6 +1460,7 @@ static void __exit dm_snapshot_exit(void) mempool_destroy(pending_pool); kmem_cache_destroy(pending_cache); kmem_cache_destroy(exception_cache); + kmem_cache_destroy(tracked_chunk_cache); } /* Module hooks */ diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h index cc02695..1cb1b2a 100644 --- a/drivers/md/dm-snap.h +++ b/drivers/md/dm-snap.h @@ -132,6 +132,10 @@ struct exception_store { void *context; }; +#define DM_TRACKED_CHUNK_HASH_SIZE 16 +#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ + (DM_TRACKED_CHUNK_HASH_SIZE - 1)) + struct dm_snapshot { uint64_t features; struct rw_semaphore lock; @@ -177,6 +181,11 @@ struct dm_snapshot { /* The on disk metadata handler */ struct exception_store store; + + /* Chunks with outstanding reads */ + mempool_t *tracked_chunk_pool; + spinlock_t tracked_chunk_lock; + struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; }; /*