4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * 2000 - 2002 Heinz Mauelshagen, Sistina Software
7 * LVM snapshot driver is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
12 * LVM snapshot driver is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with GNU CC; see the file COPYING. If not, write to
19 * the Free Software Foundation, 59 Temple Place - Suite 330,
20 * Boston, MA 02111-1307, USA.
27 * 05/07/2000 - implemented persistent snapshot support
28 * 23/11/2000 - used cpu_to_le64 rather than my own macro
29 * 25/01/2001 - Put LockPage back in
30 * 01/02/2001 - A dropped snapshot is now set as inactive
31 * 14/02/2001 - tidied debug statements
32 * 19/02/2001 - changed rawio calls to pass in preallocated buffer_heads
33 * 26/02/2001 - introduced __brw_kiovec to remove a lot of conditional
35 * 07/03/2001 - fixed COW exception table not persistent on 2.2 (HM)
36 * 12/03/2001 - lvm_pv_get_number changes:
38 * o renamed it to _pv_get_number
39 * o pv number is returned in new uint * arg
40 * o -1 returned on error
41 * lvm_snapshot_fill_COW_table has a return value too.
42 * 15/10/2001 - fix snapshot alignment problem [CM]
43 * - fix snapshot full oops (always check lv_block_exception) [CM]
44 * 26/06/2002 - support for new list_move macro [patch@luckynet.dynu.com]
45 * 26/07/2002 - removed conditional list_move macro because we will
46 * discontinue LVM1 before 2.6 anyway
47 * 27/08/2003 - fixed unsafe list handling in lvm_find_exception_table() [HM]
51 #include <linux/kernel.h>
52 #include <linux/vmalloc.h>
53 #include <linux/blkdev.h>
54 #include <linux/smp_lock.h>
55 #include <linux/types.h>
56 #include <linux/iobuf.h>
57 #include <linux/lvm.h>
58 #include <linux/devfs_fs_kernel.h>
61 #include "lvm-internal.h"
63 static char *lvm_snap_version __attribute__ ((unused)) =
64 "LVM " LVM_RELEASE_NAME " snapshot code (" LVM_RELEASE_DATE ")\n";
67 extern const char *const lvm_name;
68 extern int lvm_blocksizes[];
70 void lvm_snapshot_release(lv_t *);
72 static int _write_COW_table_block(vg_t * vg, lv_t * lv, int idx,
74 static void _disable_snapshot(vg_t * vg, lv_t * lv);
77 static inline int __brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
78 kdev_t dev, unsigned long b[], int size,
81 return brw_kiovec(rw, nr, iovec, dev, b, size);
85 static int _pv_get_number(vg_t * vg, kdev_t rdev, uint * pvn)
88 for (p = 0; p < vg->pv_max; p++) {
89 if (vg->pv[p] == NULL)
92 if (vg->pv[p]->pv_dev == rdev)
96 if (p >= vg->pv_max) {
97 /* bad news, the snapshot COW table is probably corrupt */
99 "%s -- _pv_get_number failed for rdev = %u\n",
104 *pvn = vg->pv[p]->pv_number;
109 #define hashfn(dev,block,mask,chunk_size) \
110 ((HASHDEV(dev)^((block)/(chunk_size))) & (mask))
112 static inline lv_block_exception_t *lvm_find_exception_table(kdev_t
118 struct list_head *hash_table = lv->lv_snapshot_hash_table, *next, *n;
119 unsigned long mask = lv->lv_snapshot_hash_mask;
120 int chunk_size = lv->lv_chunk_size;
121 lv_block_exception_t *ret;
124 &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
127 for (next = hash_table->next, n = next->next; next != hash_table;
128 next = n, n = next->next) {
129 lv_block_exception_t *exception;
131 exception = list_entry(next, lv_block_exception_t, hash);
132 if (exception->rsector_org == org_start &&
133 exception->rdev_org == org_dev) {
141 inline void lvm_hash_link(lv_block_exception_t * exception,
142 kdev_t org_dev, unsigned long org_start,
145 struct list_head *hash_table = lv->lv_snapshot_hash_table;
146 unsigned long mask = lv->lv_snapshot_hash_mask;
147 int chunk_size = lv->lv_chunk_size;
152 &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
153 list_add(&exception->hash, hash_table);
157 * Determine if we already have a snapshot chunk for this block.
158 * Return: 1 if it the chunk already exists
159 * 0 if we need to COW this block and allocate a new chunk
160 * -1 if the snapshot was disabled because it ran out of space
162 * We need to be holding at least a read lock on lv->lv_lock.
164 int lvm_snapshot_remap_block(kdev_t * org_dev, unsigned long *org_sector,
165 unsigned long pe_start, lv_t * lv)
168 unsigned long pe_off, pe_adjustment, __org_start;
170 int chunk_size = lv->lv_chunk_size;
171 lv_block_exception_t *exception;
173 if (!lv->lv_block_exception)
176 pe_off = pe_start % chunk_size;
177 pe_adjustment = (*org_sector - pe_off) % chunk_size;
178 __org_start = *org_sector - pe_adjustment;
179 __org_dev = *org_dev;
181 exception = lvm_find_exception_table(__org_dev, __org_start, lv);
183 *org_dev = exception->rdev_new;
184 *org_sector = exception->rsector_new + pe_adjustment;
190 void lvm_drop_snapshot(vg_t * vg, lv_t * lv_snap, const char *reason)
195 /* no exception storage space available for this snapshot
196 or error on this snapshot --> release it */
197 invalidate_buffers(lv_snap->lv_dev);
199 /* wipe the snapshot since it's inconsistent now */
200 _disable_snapshot(vg, lv_snap);
202 for (i = last_dev = 0; i < lv_snap->lv_remap_ptr; i++) {
203 if (lv_snap->lv_block_exception[i].rdev_new != last_dev) {
204 last_dev = lv_snap->lv_block_exception[i].rdev_new;
205 invalidate_buffers(last_dev);
209 lvm_snapshot_release(lv_snap);
210 lv_snap->lv_status &= ~LV_ACTIVE;
213 "%s -- giving up to snapshot %s on %s: %s\n",
214 lvm_name, lv_snap->lv_snapshot_org->lv_name,
215 lv_snap->lv_name, reason);
218 static inline int lvm_snapshot_prepare_blocks(unsigned long *blocks,
223 int i, sectors_per_block, nr_blocks;
225 sectors_per_block = blocksize / SECTOR_SIZE;
227 if (start & (sectors_per_block - 1))
230 nr_blocks = nr_sectors / sectors_per_block;
231 start /= sectors_per_block;
233 for (i = 0; i < nr_blocks; i++)
239 inline int lvm_get_blksize(kdev_t dev)
241 int correct_size = BLOCK_SIZE, i, major;
244 if (blksize_size[major]) {
245 i = blksize_size[major][MINOR(dev)];
252 #ifdef DEBUG_SNAPSHOT
253 static inline void invalidate_snap_cache(unsigned long start,
254 unsigned long nr, kdev_t dev)
256 struct buffer_head *bh;
257 int sectors_per_block, i, blksize, minor;
260 blksize = lvm_blocksizes[minor];
261 sectors_per_block = blksize >> 9;
262 nr /= sectors_per_block;
263 start /= sectors_per_block;
265 for (i = 0; i < nr; i++) {
266 bh = get_hash_table(dev, start++, blksize);
274 int lvm_snapshot_fill_COW_page(vg_t * vg, lv_t * lv_snap)
276 int id = 0, is = lv_snap->lv_remap_ptr;
278 lv_COW_table_disk_t *lv_COW_table = (lv_COW_table_disk_t *)
279 page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
286 lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
287 is -= is % (blksize_snap / sizeof(lv_COW_table_disk_t));
289 memset(lv_COW_table, 0, blksize_snap);
290 for (; is < lv_snap->lv_remap_ptr; is++, id++) {
291 /* store new COW_table entry */
292 lv_block_exception_t *be =
293 lv_snap->lv_block_exception + is;
296 if (_pv_get_number(vg, be->rdev_org, &pvn))
299 lv_COW_table[id].pv_org_number = cpu_to_le64(pvn);
300 lv_COW_table[id].pv_org_rsector =
301 cpu_to_le64(be->rsector_org);
303 if (_pv_get_number(vg, be->rdev_new, &pvn))
306 lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn);
307 lv_COW_table[id].pv_snap_rsector =
308 cpu_to_le64(be->rsector_new);
314 printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed",
321 * writes a COW exception table sector to disk (HM)
323 * We need to hold a write lock on lv_snap->lv_lock.
325 int lvm_write_COW_table_block(vg_t * vg, lv_t * lv_snap)
329 if ((r = _write_COW_table_block(vg, lv_snap,
330 lv_snap->lv_remap_ptr - 1, &err)))
331 lvm_drop_snapshot(vg, lv_snap, err);
336 * copy on write handler for one snapshot logical volume
338 * read the original blocks and store it/them on the new one(s).
339 * if there is no exception storage space free any longer --> release snapshot.
341 * this routine gets called for each _first_ write to a physical chunk.
343 * We need to hold a write lock on lv_snap->lv_lock. It is assumed that
344 * lv->lv_block_exception is non-NULL (checked by lvm_snapshot_remap_block())
345 * when this function is called.
347 int lvm_snapshot_COW(kdev_t org_phys_dev,
348 unsigned long org_phys_sector,
349 unsigned long org_pe_start,
350 unsigned long org_virt_sector,
351 vg_t * vg, lv_t * lv_snap)
354 unsigned long org_start, snap_start, snap_phys_dev, virt_start,
356 unsigned long phys_start;
357 int idx = lv_snap->lv_remap_ptr, chunk_size =
358 lv_snap->lv_chunk_size;
359 struct kiobuf *iobuf = lv_snap->lv_iobuf;
360 unsigned long *blocks = iobuf->blocks;
361 int blksize_snap, blksize_org, min_blksize, max_blksize;
362 int max_sectors, nr_sectors;
364 /* check if we are out of snapshot space */
365 if (idx >= lv_snap->lv_remap_end)
366 goto fail_out_of_space;
368 /* calculate physical boundaries of source chunk */
369 pe_off = org_pe_start % chunk_size;
371 org_phys_sector - ((org_phys_sector - pe_off) % chunk_size);
372 virt_start = org_virt_sector - (org_phys_sector - org_start);
374 /* calculate physical boundaries of destination chunk */
375 snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new;
376 snap_start = lv_snap->lv_block_exception[idx].rsector_new;
378 #ifdef DEBUG_SNAPSHOT
381 "org %s faulting %lu start %lu, snap %s start %lu, "
382 "size %d, pe_start %lu pe_off %lu, virt_sec %lu\n",
384 kdevname(org_phys_dev), org_phys_sector, org_start,
385 kdevname(snap_phys_dev), snap_start,
386 chunk_size, org_pe_start, pe_off, org_virt_sector);
389 blksize_org = lvm_sectsize(org_phys_dev);
390 blksize_snap = lvm_sectsize(snap_phys_dev);
391 max_blksize = max(blksize_org, blksize_snap);
392 min_blksize = min(blksize_org, blksize_snap);
393 max_sectors = KIO_MAX_SECTORS * (min_blksize >> 9);
395 if (chunk_size % (max_blksize >> 9))
398 /* Don't change org_start, we need it to fill in the exception table */
399 phys_start = org_start;
402 nr_sectors = min(chunk_size, max_sectors);
403 chunk_size -= nr_sectors;
405 iobuf->length = nr_sectors << 9;
407 if (!lvm_snapshot_prepare_blocks(blocks, phys_start,
408 nr_sectors, blksize_org))
411 if (__brw_kiovec(READ, 1, &iobuf, org_phys_dev, blocks,
413 lv_snap) != (nr_sectors << 9))
416 if (!lvm_snapshot_prepare_blocks(blocks, snap_start,
417 nr_sectors, blksize_snap))
420 if (__brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev, blocks,
422 lv_snap) != (nr_sectors << 9))
425 phys_start += nr_sectors;
426 snap_start += nr_sectors;
429 #ifdef DEBUG_SNAPSHOT
430 /* invalidate the logical snapshot buffer cache */
431 invalidate_snap_cache(virt_start, lv_snap->lv_chunk_size,
435 /* the original chunk is now stored on the snapshot volume
436 so update the execption table */
437 lv_snap->lv_block_exception[idx].rdev_org = org_phys_dev;
438 lv_snap->lv_block_exception[idx].rsector_org = org_start;
440 lvm_hash_link(lv_snap->lv_block_exception + idx,
441 org_phys_dev, org_start, lv_snap);
442 lv_snap->lv_remap_ptr = idx + 1;
443 if (lv_snap->lv_snapshot_use_rate > 0) {
444 if (lv_snap->lv_remap_ptr * 100 / lv_snap->lv_remap_end >=
445 lv_snap->lv_snapshot_use_rate)
446 wake_up_interruptible(&lv_snap->lv_snapshot_wait);
452 lvm_drop_snapshot(vg, lv_snap, reason);
456 reason = "out of space";
459 reason = "read error";
462 reason = "write error";
465 reason = "blocksize error";
469 reason = "couldn't prepare kiovec blocks "
470 "(start probably isn't block aligned)";
474 int lvm_snapshot_alloc_iobuf_pages(struct kiobuf *iobuf, int sectors)
476 int bytes, nr_pages, err, i;
478 bytes = sectors * SECTOR_SIZE;
479 nr_pages = (bytes + ~PAGE_MASK) >> PAGE_SHIFT;
480 err = expand_kiobuf(iobuf, nr_pages);
487 for (i = 0; i < nr_pages; i++) {
490 page = alloc_page(GFP_KERNEL);
494 iobuf->maplist[i] = page;
506 static int calc_max_buckets(void)
510 mem = num_physpages << PAGE_SHIFT;
513 mem /= sizeof(struct list_head);
518 int lvm_snapshot_alloc_hash_table(lv_t * lv)
521 unsigned long buckets, max_buckets, size;
522 struct list_head *hash;
524 buckets = lv->lv_remap_end;
525 max_buckets = calc_max_buckets();
526 buckets = min(buckets, max_buckets);
527 while (buckets & (buckets - 1))
528 buckets &= (buckets - 1);
530 size = buckets * sizeof(struct list_head);
533 hash = vmalloc(size);
534 lv->lv_snapshot_hash_table = hash;
538 lv->lv_snapshot_hash_table_size = size;
540 lv->lv_snapshot_hash_mask = buckets - 1;
542 INIT_LIST_HEAD(hash + buckets);
548 int lvm_snapshot_alloc(lv_t * lv_snap)
552 /* allocate kiovec to do chunk io */
553 ret = alloc_kiovec(1, &lv_snap->lv_iobuf);
557 ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_iobuf,
560 goto out_free_kiovec;
562 /* allocate kiovec to do exception table io */
563 ret = alloc_kiovec(1, &lv_snap->lv_COW_table_iobuf);
565 goto out_free_kiovec;
567 ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_COW_table_iobuf,
568 PAGE_SIZE / SECTOR_SIZE);
570 goto out_free_both_kiovecs;
572 ret = lvm_snapshot_alloc_hash_table(lv_snap);
574 goto out_free_both_kiovecs;
579 out_free_both_kiovecs:
580 unmap_kiobuf(lv_snap->lv_COW_table_iobuf);
581 free_kiovec(1, &lv_snap->lv_COW_table_iobuf);
582 lv_snap->lv_COW_table_iobuf = NULL;
585 unmap_kiobuf(lv_snap->lv_iobuf);
586 free_kiovec(1, &lv_snap->lv_iobuf);
587 lv_snap->lv_iobuf = NULL;
588 vfree(lv_snap->lv_snapshot_hash_table);
589 lv_snap->lv_snapshot_hash_table = NULL;
593 void lvm_snapshot_release(lv_t * lv)
595 if (lv->lv_block_exception) {
596 vfree(lv->lv_block_exception);
597 lv->lv_block_exception = NULL;
599 if (lv->lv_snapshot_hash_table) {
600 vfree(lv->lv_snapshot_hash_table);
601 lv->lv_snapshot_hash_table = NULL;
602 lv->lv_snapshot_hash_table_size = 0;
605 kiobuf_wait_for_io(lv->lv_iobuf);
606 unmap_kiobuf(lv->lv_iobuf);
607 free_kiovec(1, &lv->lv_iobuf);
610 if (lv->lv_COW_table_iobuf) {
611 kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
612 unmap_kiobuf(lv->lv_COW_table_iobuf);
613 free_kiovec(1, &lv->lv_COW_table_iobuf);
614 lv->lv_COW_table_iobuf = NULL;
619 static int _write_COW_table_block(vg_t * vg, lv_t * lv_snap,
620 int idx, const char **reason)
626 ulong snap_pe_start, COW_table_sector_offset,
627 COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
629 kdev_t snap_phys_dev;
630 lv_block_exception_t *be;
631 struct kiobuf *COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
632 lv_COW_table_disk_t *lv_COW_table =
633 (lv_COW_table_disk_t *) page_address(lv_snap->
637 COW_chunks_per_pe = LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg, lv_snap);
638 COW_entries_per_pe = LVM_GET_COW_TABLE_ENTRIES_PER_PE(vg, lv_snap);
640 /* get physical addresse of destination chunk */
641 snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new;
643 lv_snap->lv_block_exception[idx -
645 COW_entries_per_pe)].rsector_new -
646 lv_snap->lv_chunk_size;
648 blksize_snap = lvm_sectsize(snap_phys_dev);
650 COW_entries_per_block = blksize_snap / sizeof(lv_COW_table_disk_t);
651 idx_COW_table = idx % COW_entries_per_pe % COW_entries_per_block;
653 if (idx_COW_table == 0)
654 memset(lv_COW_table, 0, blksize_snap);
656 /* sector offset into the on disk COW table */
657 COW_table_sector_offset =
658 (idx % COW_entries_per_pe) / (SECTOR_SIZE /
659 sizeof(lv_COW_table_disk_t));
661 /* COW table block to write next */
664 COW_table_sector_offset) >> (blksize_snap >> 10);
666 /* store new COW_table entry */
667 be = lv_snap->lv_block_exception + idx;
668 if (_pv_get_number(vg, be->rdev_org, &pvn))
669 goto fail_pv_get_number;
671 lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn);
672 lv_COW_table[idx_COW_table].pv_org_rsector =
673 cpu_to_le64(be->rsector_org);
674 if (_pv_get_number(vg, snap_phys_dev, &pvn))
675 goto fail_pv_get_number;
677 lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn);
678 lv_COW_table[idx_COW_table].pv_snap_rsector =
679 cpu_to_le64(be->rsector_new);
681 COW_table_iobuf->length = blksize_snap;
682 /* COW_table_iobuf->nr_pages = 1; */
684 if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
685 blocks, blksize_snap, lv_snap) != blksize_snap)
688 /* initialization of next COW exception table block with zeroes */
689 end_of_table = idx % COW_entries_per_pe == COW_entries_per_pe - 1;
690 if (idx_COW_table % COW_entries_per_block ==
691 COW_entries_per_block - 1 || end_of_table) {
692 /* don't go beyond the end */
693 if (idx + 1 >= lv_snap->lv_remap_end)
696 memset(lv_COW_table, 0, blksize_snap);
701 lv_snap->lv_block_exception[idx].rdev_new;
703 lv_snap->lv_block_exception[idx -
705 COW_entries_per_pe)].
706 rsector_new - lv_snap->lv_chunk_size;
707 blksize_snap = lvm_sectsize(snap_phys_dev);
708 blocks[0] = snap_pe_start >> (blksize_snap >> 10);
712 if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
713 blocks, blksize_snap, lv_snap) !=
722 *reason = "write error";
726 *reason = "_pv_get_number failed";
732 * This function is a bit of a hack; we need to ensure that the
733 * snapshot is never made active again, because it will surely be
734 * corrupt. At the moment we do not have access to the LVM metadata
735 * from within the kernel. So we set the first exception to point to
736 * sector 1 (which will always be within the metadata, and as such
737 * invalid). User land tools will check for this when they are asked
738 * to activate the snapshot and prevent this from happening.
741 static void _disable_snapshot(vg_t * vg, lv_t * lv)
744 lv->lv_block_exception[0].rsector_org =
745 LVM_SNAPSHOT_DROPPED_SECTOR;
746 if (_write_COW_table_block(vg, lv, 0, &err) < 0) {
747 printk(KERN_ERR "%s -- couldn't disable snapshot: %s\n",