--- linux/include/linux/lvm.h.orig Fri Jan 24 07:57:33 2003 +++ linux/include/linux/lvm.h Thu Mar 6 20:34:36 2003 @@ -80,8 +80,8 @@ #ifndef _LVM_H_INCLUDE #define _LVM_H_INCLUDE -#define LVM_RELEASE_NAME "1.0.5+" -#define LVM_RELEASE_DATE "22/07/2002" +#define LVM_RELEASE_NAME "1.0.7" +#define LVM_RELEASE_DATE "28/03/2003" #define _LVM_KERNEL_H_VERSION "LVM "LVM_RELEASE_NAME" ("LVM_RELEASE_DATE")" @@ -94,7 +94,7 @@ #define LVM_TOTAL_RESET #ifdef __KERNEL__ -#undef LVM_HD_NAME /* display nice names in /proc/partitions */ +#undef LVM_HD_NAME /* display nice names in /proc/partitions */ /* lots of debugging output (see driver source) #define DEBUG_LVM_GET_INFO @@ -118,7 +118,7 @@ causes problems on some platforms. It's not nice but then neither is the alternative. */ struct list_head { - struct list_head *next, *prev; + struct list_head *next, *prev; }; #define __KERNEL__ #include @@ -258,9 +258,9 @@ #define LVM_PE_T_MAX ( ( 1 << ( sizeof ( uint16_t) * 8)) - 2) #define LVM_LV_SIZE_MAX(a) ( ( long long) LVM_PE_T_MAX * (a)->pe_size > ( long long) 1024*1024/SECTOR_SIZE*1024*1024 ? ( long long) 1024*1024/SECTOR_SIZE*1024*1024 : ( long long) LVM_PE_T_MAX * (a)->pe_size) -#define LVM_MIN_PE_SIZE ( 8192L / SECTOR_SIZE) /* 8 KB in sectors */ +#define LVM_MIN_PE_SIZE ( 8192L / SECTOR_SIZE) /* 8 KB in sectors */ #define LVM_MAX_PE_SIZE ( 16L * 1024L * 1024L / SECTOR_SIZE * 1024) /* 16GB in sectors */ -#define LVM_DEFAULT_PE_SIZE ( 4096L * 1024 / SECTOR_SIZE) /* 4 MB in sectors */ +#define LVM_DEFAULT_PE_SIZE ( 32768L * 1024 / SECTOR_SIZE) /* 32 MB in sectors */ #define LVM_DEFAULT_STRIPE_SIZE 16L /* 16 KB */ #define LVM_MIN_STRIPE_SIZE ( PAGE_SIZE/SECTOR_SIZE) /* PAGESIZE in sectors */ #define LVM_MAX_STRIPE_SIZE ( 512L * 1024 / SECTOR_SIZE) /* 512 KB in sectors */ @@ -416,9 +416,9 @@ typedef struct lv_block_exception_v1 { struct list_head hash; uint32_t rsector_org; - kdev_t rdev_org; + kdev_t rdev_org; uint32_t rsector_new; - kdev_t rdev_new; + kdev_t rdev_new; } lv_block_exception_t; /* disk stored pe information */ @@ -462,7 +462,7 @@ uint pe_stale; /* for future use */ pe_disk_t *pe; /* HM */ struct block_device *bd; - char pv_uuid[UUID_LEN+1]; + char pv_uuid[UUID_LEN + 1]; #ifndef __KERNEL__ uint32_t pe_start; /* in sectors */ @@ -473,7 +473,7 @@ /* disk */ typedef struct pv_disk_v2 { uint8_t id[2]; /* Identifier */ - uint16_t version; /* HM lvm version */ + uint16_t version; /* HM lvm version */ lvm_disk_data_t pv_on_disk; lvm_disk_data_t vg_on_disk; lvm_disk_data_t pv_uuidlist_on_disk; @@ -486,14 +486,14 @@ uint32_t pv_number; uint32_t pv_status; uint32_t pv_allocatable; - uint32_t pv_size; /* HM */ + uint32_t pv_size; /* HM */ uint32_t lv_cur; uint32_t pe_size; uint32_t pe_total; uint32_t pe_allocated; - + /* new in struct version 2 */ - uint32_t pe_start; /* in sectors */ + uint32_t pe_start; /* in sectors */ } pv_disk_t; @@ -567,8 +567,8 @@ uint32_t lv_snapshot_hash_table_size; uint32_t lv_snapshot_hash_mask; wait_queue_head_t lv_snapshot_wait; - int lv_snapshot_use_rate; - struct vg_v3 *vg; + int lv_snapshot_use_rate; + struct vg_v3 *vg; uint lv_allocated_snapshot_le; #else @@ -582,14 +582,14 @@ uint8_t vg_name[NAME_LEN]; uint32_t lv_access; uint32_t lv_status; - uint32_t lv_open; /* HM */ - uint32_t lv_dev; /* HM */ + uint32_t lv_open; /* HM */ + uint32_t lv_dev; /* HM */ uint32_t lv_number; /* HM */ uint32_t lv_mirror_copies; /* for future use */ uint32_t lv_recovery; /* " */ uint32_t lv_schedule; /* " */ uint32_t lv_size; - uint32_t lv_snapshot_minor;/* minor number of original */ + uint32_t lv_snapshot_minor; /* minor number of original */ uint16_t lv_chunk_size; /* chunk size of snapshot */ uint16_t dummy; uint32_t lv_allocated_le; @@ -626,7 +626,7 @@ struct proc_dir_entry *proc; pv_t *pv[ABS_MAX_PV + 1]; /* physical volume struct pointers */ lv_t *lv[ABS_MAX_LV + 1]; /* logical volume struct pointers */ - char vg_uuid[UUID_LEN+1]; /* volume group UUID */ + char vg_uuid[UUID_LEN + 1]; /* volume group UUID */ #ifdef __KERNEL__ struct proc_dir_entry *vg_dir_pde; struct proc_dir_entry *lv_subdir_pde; @@ -640,20 +640,20 @@ /* disk */ typedef struct vg_disk_v2 { uint8_t vg_uuid[UUID_LEN]; /* volume group UUID */ - uint8_t vg_name_dummy[NAME_LEN-UUID_LEN]; /* rest of v1 VG name */ + uint8_t vg_name_dummy[NAME_LEN - UUID_LEN]; /* rest of v1 VG name */ uint32_t vg_number; /* volume group number */ uint32_t vg_access; /* read/write */ uint32_t vg_status; /* active or not */ - uint32_t lv_max; /* maximum logical volumes */ - uint32_t lv_cur; /* current logical volumes */ - uint32_t lv_open; /* open logical volumes */ - uint32_t pv_max; /* maximum physical volumes */ - uint32_t pv_cur; /* current physical volumes FU */ - uint32_t pv_act; /* active physical volumes */ + uint32_t lv_max; /* maximum logical volumes */ + uint32_t lv_cur; /* current logical volumes */ + uint32_t lv_open; /* open logical volumes */ + uint32_t pv_max; /* maximum physical volumes */ + uint32_t pv_cur; /* current physical volumes FU */ + uint32_t pv_act; /* active physical volumes */ uint32_t dummy; uint32_t vgda; /* volume group descriptor arrays FU */ - uint32_t pe_size; /* physical extent size in sectors */ - uint32_t pe_total; /* total of physical extents */ + uint32_t pe_size; /* physical extent size in sectors */ + uint32_t pe_total; /* total of physical extents */ uint32_t pe_allocated; /* allocated physical extents */ uint32_t pvg_total; /* physical volume groups FU */ } vg_disk_t; @@ -712,40 +712,44 @@ /* Request structure LV_SNAPSHOT_USE_RATE */ typedef struct { - int block; - int rate; + int block; + int rate; } lv_snapshot_use_rate_req_t; /* useful inlines */ -static inline ulong round_up(ulong n, ulong size) { +static inline ulong round_up(ulong n, ulong size) +{ size--; return (n + size) & ~size; } -static inline ulong div_up(ulong n, ulong size) { +static inline ulong div_up(ulong n, ulong size) +{ return round_up(n, size) / size; } /* FIXME: nasty capital letters */ -static int inline LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg_t *vg, lv_t *lv) { +static int inline LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg_t * vg, lv_t * lv) +{ return vg->pe_size / lv->lv_chunk_size; } -static int inline LVM_GET_COW_TABLE_ENTRIES_PER_PE(vg_t *vg, lv_t *lv) { +static int inline LVM_GET_COW_TABLE_ENTRIES_PER_PE(vg_t * vg, lv_t * lv) +{ ulong chunks = vg->pe_size / lv->lv_chunk_size; ulong entry_size = sizeof(lv_COW_table_disk_t); ulong chunk_size = lv->lv_chunk_size * SECTOR_SIZE; ulong entries = (vg->pe_size * SECTOR_SIZE) / - (entry_size + chunk_size); + (entry_size + chunk_size); - if(chunks < 2) + if (chunks < 2) return 0; - for(; entries; entries--) - if((div_up(entries * entry_size, chunk_size) + entries) <= - chunks) + for (; entries; entries--) + if ((div_up(entries * entry_size, chunk_size) + entries) <= + chunks) break; return entries; @@ -753,4 +757,3 @@ #endif /* #ifndef _LVM_H_INCLUDE */ - --- linux/drivers/md/lvm.c.orig Sun Mar 2 15:26:38 2003 +++ linux/drivers/md/lvm.c Thu Mar 6 20:34:36 2003 @@ -8,7 +8,8 @@ * January-March,May,July,September,October 1999 * January,February,July,September-November 2000 * January-May,June,October 2001 - * May-July 2002 + * May-August 2002 + * February 2003 * * * LVM driver is free software; you can redistribute it and/or modify @@ -220,6 +221,12 @@ * - support HDIO_GETGEO_BIG ioctl * 05/07/2002 - fixed OBO error on vg array access [benh@kernel.crashing.org] * 22/07/2002 - streamlined blk_ioctl() call + * 14/08/2002 - stored fs handle in lvm_do_lv_rename + * [kaoru@bsd.tnes.nec.co.jp] + * 06/02/2003 - fix persistent snapshot extend/reduce bug in + * lvm_do_lv_extend_reduce() [dalestephenson@mac.com] + * 04/03/2003 - snapshot extend/reduce memory leak + * - VG PE counter wrong [dalestephenson@mac.com] * */ @@ -283,13 +290,14 @@ /* * External function prototypes */ -static int lvm_make_request_fn(request_queue_t*, int, struct buffer_head*); +static int lvm_make_request_fn(request_queue_t *, int, + struct buffer_head *); static int lvm_blk_ioctl(struct inode *, struct file *, uint, ulong); static int lvm_blk_open(struct inode *, struct file *); static int lvm_blk_close(struct inode *, struct file *); -static int lvm_get_snapshot_use_rate(lv_t *lv_ptr, void *arg); +static int lvm_get_snapshot_use_rate(lv_t * lv_ptr, void *arg); static int lvm_user_bmap(struct inode *, struct lv_bmap *); static int lvm_chr_open(struct inode *, struct file *); @@ -319,13 +327,13 @@ static int lvm_do_lv_extend_reduce(int, char *, lv_t *); static int lvm_do_lv_remove(int, char *, int); static int lvm_do_lv_rename(vg_t *, lv_req_t *, lv_t *); -static int lvm_do_lv_status_byname(vg_t *r, void *); +static int lvm_do_lv_status_byname(vg_t * r, void *); static int lvm_do_lv_status_byindex(vg_t *, void *); static int lvm_do_lv_status_bydev(vg_t *, void *); -static int lvm_do_pe_lock_unlock(vg_t *r, void *); +static int lvm_do_pe_lock_unlock(vg_t * r, void *); -static int lvm_do_pv_change(vg_t*, void*); +static int lvm_do_pv_change(vg_t *, void *); static int lvm_do_pv_status(vg_t *, void *); static int lvm_do_pv_flush(void *); @@ -335,15 +343,15 @@ static int lvm_do_vg_rename(vg_t *, void *); static int lvm_do_vg_remove(int); static void lvm_geninit(struct gendisk *); -static void __update_hardsectsize(lv_t *lv); +static void __update_hardsectsize(lv_t * lv); static void _queue_io(struct buffer_head *bh, int rw); static struct buffer_head *_dequeue_io(void); static void _flush_io(struct buffer_head *bh); -static int _open_pv(pv_t *pv); -static void _close_pv(pv_t *pv); +static int _open_pv(pv_t * pv); +static void _close_pv(pv_t * pv); static unsigned long _sectors_to_k(unsigned long sect); @@ -354,7 +362,8 @@ /* variables */ -char *lvm_version = "LVM version "LVM_RELEASE_NAME"("LVM_RELEASE_DATE")"; +char *lvm_version = + "LVM version " LVM_RELEASE_NAME "(" LVM_RELEASE_DATE ")"; ushort lvm_iop_version = LVM_DRIVER_IOP_VERSION; int loadtime = 0; const char *const lvm_name = LVM_NAME; @@ -397,19 +406,18 @@ struct file_operations lvm_chr_fops = { - owner: THIS_MODULE, - open: lvm_chr_open, - release: lvm_chr_close, - ioctl: lvm_chr_ioctl, + owner:THIS_MODULE, + open:lvm_chr_open, + release:lvm_chr_close, + ioctl:lvm_chr_ioctl, }; /* block device operations structure needed for 2.3.38? and above */ -struct block_device_operations lvm_blk_dops = -{ - owner: THIS_MODULE, - open: lvm_blk_open, - release: lvm_blk_close, - ioctl: lvm_blk_ioctl, +struct block_device_operations lvm_blk_dops = { + .owner = THIS_MODULE, + .open = lvm_blk_open, + .release = lvm_blk_close, + .ioctl = lvm_blk_ioctl, }; @@ -419,15 +427,14 @@ static int lvm_hardsectsizes[MAX_LV]; static int lvm_size[MAX_LV]; -static struct gendisk lvm_gendisk = -{ - major: MAJOR_NR, - major_name: LVM_NAME, - minor_shift: 0, - max_p: 1, - part: lvm_hd_struct, - sizes: lvm_size, - nr_real: MAX_LV, +static struct gendisk lvm_gendisk = { + .major = MAJOR_NR, + .major_name = LVM_NAME, + .minor_shift = 0, + .max_p = 1, + .part = lvm_hd_struct, + .sizes = lvm_size, + .nr_real = MAX_LV, }; @@ -464,7 +471,8 @@ lvm_hd_name_ptr = lvm_hd_name; #endif - blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), lvm_make_request_fn); + blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), + lvm_make_request_fn); /* initialise the pe lock */ @@ -482,7 +490,7 @@ #endif return 0; -} /* lvm_init() */ +} /* lvm_init() */ /* * cleanup... @@ -515,11 +523,12 @@ lvm_fin_fs(); #ifdef MODULE - printk(KERN_INFO "%s -- Module successfully deactivated\n", lvm_name); + printk(KERN_INFO "%s -- Module successfully deactivated\n", + lvm_name); #endif return; -} /* lvm_cleanup() */ +} /* lvm_cleanup() */ /* * support function to initialize lvm variables @@ -549,7 +558,7 @@ } return; -} /* lvm_init_vars() */ +} /* lvm_init_vars() */ /******************************************************************** @@ -572,13 +581,15 @@ minor, VG_CHR(minor), MODE_TO_STR(file->f_mode), lock); /* super user validation */ - if (!capable(CAP_SYS_ADMIN)) return -EACCES; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; /* Group special file open */ - if (VG_CHR(minor) > MAX_VG) return -ENXIO; + if (VG_CHR(minor) > MAX_VG) + return -ENXIO; spin_lock(&lvm_lock); - if(lock == current->pid) + if (lock == current->pid) _lock_open_count++; spin_unlock(&lvm_lock); @@ -587,7 +598,7 @@ MOD_INC_USE_COUNT; return 0; -} /* lvm_chr_open() */ +} /* lvm_chr_open() */ /* @@ -604,16 +615,19 @@ uint extendable, l, v; void *arg = (void *) a; lv_t lv; - vg_t* vg_ptr = vg[VG_CHR(minor)]; + vg_t *vg_ptr = vg[VG_CHR(minor)]; /* otherwise cc will complain about unused variables */ (void) lvm_lock; - P_IOCTL("chr MINOR: %d command: 0x%X arg: %p VG#: %d mode: %s%s\n", - minor, command, arg, VG_CHR(minor), MODE_TO_STR(file->f_mode)); + P_IOCTL + ("chr MINOR: %d command: 0x%X arg: %p VG#: %d mode: %s%s\n", + minor, command, arg, VG_CHR(minor), + MODE_TO_STR(file->f_mode)); #ifdef LVM_TOTAL_RESET - if (lvm_reset_spindown > 0) return -EACCES; + if (lvm_reset_spindown > 0) + return -EACCES; #endif /* Main command switch */ @@ -625,7 +639,8 @@ case LVM_GET_IOP_VERSION: /* check lvm version to ensure driver/tools+lib interoperability */ - if (copy_to_user(arg, &lvm_iop_version, sizeof(ushort)) != 0) + if (copy_to_user(arg, &lvm_iop_version, sizeof(ushort)) != + 0) return -EFAULT; return 0; @@ -634,7 +649,8 @@ /* lock reset function */ lvm_reset_spindown = 1; for (v = 0; v < ABS_MAX_VG; v++) { - if (vg[v] != NULL) lvm_do_vg_remove(v); + if (vg[v] != NULL) + lvm_do_vg_remove(v); } #ifdef MODULE @@ -642,28 +658,28 @@ MOD_INC_USE_COUNT; while (GET_USE_COUNT(&__this_module) > 1) MOD_DEC_USE_COUNT; -#endif /* MODULE */ +#endif /* MODULE */ lock = 0; /* release lock */ wake_up_interruptible(&lvm_wait); return 0; -#endif /* LVM_TOTAL_RESET */ +#endif /* LVM_TOTAL_RESET */ case LE_REMAP: /* remap a logical extent (after moving the physical extent) */ - return lvm_do_le_remap(vg_ptr,arg); + return lvm_do_le_remap(vg_ptr, arg); case PE_LOCK_UNLOCK: /* lock/unlock i/o to a physical extent to move it to another physical volume (move's done in user space's pvmove) */ - return lvm_do_pe_lock_unlock(vg_ptr,arg); + return lvm_do_pe_lock_unlock(vg_ptr, arg); case VG_CREATE_OLD: /* create a VGDA */ return lvm_do_vg_create(arg, minor); case VG_CREATE: - /* create a VGDA, assume VG number is filled in */ + /* create a VGDA, assume VG number is filled in */ return lvm_do_vg_create(arg, -1); case VG_EXTEND: @@ -685,8 +701,10 @@ case VG_SET_EXTENDABLE: /* set/clear extendability flag of volume group */ - if (vg_ptr == NULL) return -ENXIO; - if (copy_from_user(&extendable, arg, sizeof(extendable)) != 0) + if (vg_ptr == NULL) + return -ENXIO; + if (copy_from_user(&extendable, arg, sizeof(extendable)) != + 0) return -EFAULT; if (extendable == VG_EXTENDABLE || @@ -695,13 +713,15 @@ vg_ptr->vg_status |= VG_EXTENDABLE; else vg_ptr->vg_status &= ~VG_EXTENDABLE; - } else return -EINVAL; + } else + return -EINVAL; return 0; case VG_STATUS: /* get volume group data (only the vg_t struct) */ - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_to_user(arg, vg_ptr, sizeof(vg_t)) != 0) return -EFAULT; return 0; @@ -734,21 +754,26 @@ case LV_REMOVE: case LV_RENAME: /* create, extend, reduce, remove or rename a logical volume */ - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_from_user(&lv_req, arg, sizeof(lv_req)) != 0) return -EFAULT; if (command != LV_REMOVE) { - if (copy_from_user(&lv, lv_req.lv, sizeof(lv_t)) != 0) + if (copy_from_user(&lv, lv_req.lv, sizeof(lv_t)) != + 0) return -EFAULT; } switch (command) { case LV_CREATE: - return lvm_do_lv_create(minor, lv_req.lv_name, &lv); + return lvm_do_lv_create(minor, lv_req.lv_name, + &lv); case LV_EXTEND: case LV_REDUCE: - return lvm_do_lv_extend_reduce(minor, lv_req.lv_name, &lv); + return lvm_do_lv_extend_reduce(minor, + lv_req.lv_name, + &lv); case LV_REMOVE: return lvm_do_lv_remove(minor, lv_req.lv_name, -1); @@ -776,12 +801,12 @@ case PV_CHANGE: /* change a physical volume */ - return lvm_do_pv_change(vg_ptr,arg); + return lvm_do_pv_change(vg_ptr, arg); case PV_STATUS: /* get physical volume data (pv_t structure only) */ - return lvm_do_pv_status(vg_ptr,arg); + return lvm_do_pv_status(vg_ptr, arg); case PV_FLUSH: @@ -797,7 +822,7 @@ } return 0; -} /* lvm_chr_ioctl */ +} /* lvm_chr_ioctl */ /* @@ -815,12 +840,14 @@ } #endif - if (lvm_chr_open_count > 0) lvm_chr_open_count--; + if (lvm_chr_open_count > 0) + lvm_chr_open_count--; spin_lock(&lvm_lock); - if(lock == current->pid) { - if(!_lock_open_count) { - P_DEV("chr_close: unlocking LVM for pid %d\n", lock); + if (lock == current->pid) { + if (!_lock_open_count) { + P_DEV("chr_close: unlocking LVM for pid %d\n", + lock); lock = 0; wake_up_interruptible(&lvm_wait); } else @@ -831,7 +858,7 @@ MOD_DEC_USE_COUNT; return 0; -} /* lvm_chr_close() */ +} /* lvm_chr_close() */ @@ -851,7 +878,8 @@ vg_t *vg_ptr = vg[VG_BLK(minor)]; P_DEV("blk_open MINOR: %d VG#: %d LV#: %d mode: %s%s\n", - minor, VG_BLK(minor), LV_BLK(minor), MODE_TO_STR(file->f_mode)); + minor, VG_BLK(minor), LV_BLK(minor), + MODE_TO_STR(file->f_mode)); #ifdef LVM_TOTAL_RESET if (lvm_reset_spindown > 0) @@ -861,26 +889,27 @@ if (vg_ptr != NULL && (vg_ptr->vg_status & VG_ACTIVE) && (lv_ptr = vg_ptr->lv[LV_BLK(minor)]) != NULL && - LV_BLK(minor) >= 0 && - LV_BLK(minor) < vg_ptr->lv_max) { + LV_BLK(minor) >= 0 && LV_BLK(minor) < vg_ptr->lv_max) { /* Check parallel LV spindown (LV remove) */ - if (lv_ptr->lv_status & LV_SPINDOWN) return -EPERM; + if (lv_ptr->lv_status & LV_SPINDOWN) + return -EPERM; /* Check inactive LV and open for read/write */ /* We need to be able to "read" an inactive LV to re-activate it again */ if ((file->f_mode & FMODE_WRITE) && (!(lv_ptr->lv_status & LV_ACTIVE))) - return -EPERM; + return -EPERM; if (!(lv_ptr->lv_access & LV_WRITE) && (file->f_mode & FMODE_WRITE)) return -EACCES; - /* be sure to increment VG counter */ - if (lv_ptr->lv_open == 0) vg_ptr->lv_open++; + /* be sure to increment VG counter */ + if (lv_ptr->lv_open == 0) + vg_ptr->lv_open++; lv_ptr->lv_open++; MOD_INC_USE_COUNT; @@ -890,10 +919,10 @@ return 0; } return -ENXIO; -} /* lvm_blk_open() */ +} /* lvm_blk_open() */ /* Deliver "hard disk geometry" */ -static int _hdio_getgeo(ulong a, lv_t *lv_ptr, int what) +static int _hdio_getgeo(ulong a, lv_t * lv_ptr, int what) { int ret = 0; uchar heads = 128; @@ -901,34 +930,34 @@ ulong start = 0; uint cylinders; - while ( heads * sectors > lv_ptr->lv_size) { + while (heads * sectors > lv_ptr->lv_size) { heads >>= 1; sectors >>= 1; } cylinders = lv_ptr->lv_size / heads / sectors; switch (what) { - case 0: + case 0: { struct hd_geometry *hd = (struct hd_geometry *) a; if (put_user(heads, &hd->heads) || - put_user(sectors, &hd->sectors) || - put_user((ushort) cylinders, &hd->cylinders) || + put_user(sectors, &hd->sectors) || + put_user((ushort) cylinders, &hd->cylinders) || put_user(start, &hd->start)) return -EFAULT; break; } #ifdef HDIO_GETGEO_BIG - case 1: + case 1: { struct hd_big_geometry *hd = - (struct hd_big_geometry *) a; + (struct hd_big_geometry *) a; if (put_user(heads, &hd->heads) || - put_user(sectors, &hd->sectors) || - put_user(cylinders, &hd->cylinders) || + put_user(sectors, &hd->sectors) || + put_user(cylinders, &hd->cylinders) || put_user(start, &hd->start)) return -EFAULT; break; @@ -960,91 +989,92 @@ LV_BLK(minor), MODE_TO_STR(file->f_mode)); switch (cmd) { - case BLKRASET: - /* set read ahead for block device */ - ret = blk_ioctl(dev, cmd, a); - if (ret) - return ret; - lv_ptr->lv_read_ahead = (long) a; - LVM_CORRECT_READ_AHEAD(lv_ptr->lv_read_ahead); - break; - - case HDIO_GETGEO: + case BLKRASET: + /* set read ahead for block device */ + ret = blk_ioctl(dev, cmd, a); + if (ret) + return ret; + lv_ptr->lv_read_ahead = (long) a; + LVM_CORRECT_READ_AHEAD(lv_ptr->lv_read_ahead); + break; + + case HDIO_GETGEO: #ifdef HDIO_GETGEO_BIG - case HDIO_GETGEO_BIG: + case HDIO_GETGEO_BIG: #endif - /* get disk geometry */ - P_IOCTL("%s -- lvm_blk_ioctl -- HDIO_GETGEO\n", - lvm_name); - if (!a) - return -EINVAL; - - switch (cmd) { - case HDIO_GETGEO: - return _hdio_getgeo(a, lv_ptr, 0); + /* get disk geometry */ + P_IOCTL("%s -- lvm_blk_ioctl -- HDIO_GETGEO\n", lvm_name); + if (!a) + return -EINVAL; + + switch (cmd) { + case HDIO_GETGEO: + return _hdio_getgeo(a, lv_ptr, 0); #ifdef HDIO_GETGEO_BIG - case HDIO_GETGEO_BIG: - return _hdio_getgeo(a, lv_ptr, 1); + case HDIO_GETGEO_BIG: + return _hdio_getgeo(a, lv_ptr, 1); #endif - } - - case LV_BMAP: - /* turn logical block into (dev_t, block). non privileged. */ - /* don't bmap a snapshot, since the mapping can change */ - if (lv_ptr->lv_access & LV_SNAPSHOT) - return -EPERM; - - return lvm_user_bmap(inode, (struct lv_bmap *) arg); - - case LV_SET_ACCESS: - /* set access flags of a logical volume */ - if (!capable(CAP_SYS_ADMIN)) return -EACCES; - - down_write(&lv_ptr->lv_lock); - lv_ptr->lv_access = (ulong) arg; - up_write(&lv_ptr->lv_lock); - - if ( lv_ptr->lv_access & LV_WRITE) - set_device_ro(lv_ptr->lv_dev, 0); - else - set_device_ro(lv_ptr->lv_dev, 1); - break; - - - case LV_SET_ALLOCATION: - /* set allocation flags of a logical volume */ - if (!capable(CAP_SYS_ADMIN)) return -EACCES; - down_write(&lv_ptr->lv_lock); - lv_ptr->lv_allocation = (ulong) arg; - up_write(&lv_ptr->lv_lock); - break; - - case LV_SET_STATUS: - /* set status flags of a logical volume */ - if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (!((ulong) arg & LV_ACTIVE) && lv_ptr->lv_open > 1) - return -EPERM; - down_write(&lv_ptr->lv_lock); - lv_ptr->lv_status = (ulong) arg; - up_write(&lv_ptr->lv_lock); - break; - - case LV_SNAPSHOT_USE_RATE: - return lvm_get_snapshot_use_rate(lv_ptr, arg); - - default: - /* Handle rest here */ - ret = blk_ioctl(dev, cmd, a); - if (ret) - printk(KERN_WARNING - "%s -- lvm_blk_ioctl: unknown " - "cmd 0x%x\n", - lvm_name, cmd); - return ret; + } + + case LV_BMAP: + /* turn logical block into (dev_t, block). non privileged. */ + /* don't bmap a snapshot, since the mapping can change */ + if (lv_ptr->lv_access & LV_SNAPSHOT) + return -EPERM; + + return lvm_user_bmap(inode, (struct lv_bmap *) arg); + + case LV_SET_ACCESS: + /* set access flags of a logical volume */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + down_write(&lv_ptr->lv_lock); + lv_ptr->lv_access = (ulong) arg; + up_write(&lv_ptr->lv_lock); + + if (lv_ptr->lv_access & LV_WRITE) + set_device_ro(lv_ptr->lv_dev, 0); + else + set_device_ro(lv_ptr->lv_dev, 1); + break; + + + case LV_SET_ALLOCATION: + /* set allocation flags of a logical volume */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + down_write(&lv_ptr->lv_lock); + lv_ptr->lv_allocation = (ulong) arg; + up_write(&lv_ptr->lv_lock); + break; + + case LV_SET_STATUS: + /* set status flags of a logical volume */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (!((ulong) arg & LV_ACTIVE) && lv_ptr->lv_open > 1) + return -EPERM; + down_write(&lv_ptr->lv_lock); + lv_ptr->lv_status = (ulong) arg; + up_write(&lv_ptr->lv_lock); + break; + + case LV_SNAPSHOT_USE_RATE: + return lvm_get_snapshot_use_rate(lv_ptr, arg); + + default: + /* Handle rest here */ + ret = blk_ioctl(dev, cmd, a); + if (ret) + printk(KERN_WARNING + "%s -- lvm_blk_ioctl: unknown " + "cmd 0x%x\n", lvm_name, cmd); + return ret; } return 0; -} /* lvm_blk_ioctl() */ +} /* lvm_blk_ioctl() */ /* @@ -1059,15 +1089,16 @@ P_DEV("blk_close MINOR: %d VG#: %d LV#: %d\n", minor, VG_BLK(minor), LV_BLK(minor)); - if (lv_ptr->lv_open == 1) vg_ptr->lv_open--; + if (lv_ptr->lv_open == 1) + vg_ptr->lv_open--; lv_ptr->lv_open--; MOD_DEC_USE_COUNT; return 0; -} /* lvm_blk_close() */ +} /* lvm_blk_close() */ -static int lvm_get_snapshot_use_rate(lv_t *lv, void *arg) +static int lvm_get_snapshot_use_rate(lv_t * lv, void *arg) { lv_snapshot_use_rate_req_t lv_rate_req; @@ -1122,20 +1153,20 @@ if (get_user(block, &user_result->lv_block)) return -EFAULT; - memset(&bh,0,sizeof bh); + memset(&bh, 0, sizeof bh); bh.b_blocknr = block; bh.b_dev = bh.b_rdev = inode->i_rdev; bh.b_size = lvm_get_blksize(bh.b_dev); bh.b_rsector = block * (bh.b_size >> 9); bh.b_end_io = NULL; - if ((err = lvm_map(&bh, READ)) < 0) { + if ((err = lvm_map(&bh, READ)) < 0) { printk("lvm map failed: %d\n", err); return -EINVAL; } return put_user(kdev_t_to_nr(bh.b_rdev), &user_result->lv_dev) || - put_user(bh.b_rsector/(bh.b_size>>9), &user_result->lv_block) ? - -EFAULT : 0; + put_user(bh.b_rsector / (bh.b_size >> 9), + &user_result->lv_block) ? -EFAULT : 0; } @@ -1144,7 +1175,8 @@ * (see init_module/lvm_init) */ static void __remap_snapshot(kdev_t rdev, ulong rsector, - ulong pe_start, lv_t *lv, vg_t *vg) { + ulong pe_start, lv_t * lv, vg_t * vg) +{ /* copy a chunk from the origin to a snapshot device */ down_write(&lv->lv_lock); @@ -1159,7 +1191,8 @@ } static inline void _remap_snapshot(kdev_t rdev, ulong rsector, - ulong pe_start, lv_t *lv, vg_t *vg) { + ulong pe_start, lv_t * lv, vg_t * vg) +{ int r; /* check to see if this chunk is already in the snapshot */ @@ -1176,7 +1209,8 @@ /* * extents destined for a pe that is on the move should be deferred */ -static inline int _should_defer(kdev_t pv, ulong sector, uint32_t pe_size) { +static inline int _should_defer(kdev_t pv, ulong sector, uint32_t pe_size) +{ return ((pe_lock_req.lock == LOCK_PE) && (pv == pe_lock_req.data.pv_dev) && (sector >= pe_lock_req.data.pv_offset) && @@ -1223,34 +1257,32 @@ goto bad; } - if ((rw == WRITE || rw == WRITEA) && - !(lv->lv_access & LV_WRITE)) { + if ((rw == WRITE || rw == WRITEA) && !(lv->lv_access & LV_WRITE)) { printk(KERN_CRIT "%s - lvm_map: ll_rw_blk write for readonly LV %s\n", lvm_name, lv->lv_name); goto bad; } - P_MAP("%s - lvm_map minor: %d *rdev: %s *rsector: %lu size:%lu\n", - lvm_name, minor, - kdevname(bh->b_rdev), - rsector_org, size); + P_MAP + ("%s - lvm_map minor: %d *rdev: %s *rsector: %lu size:%lu\n", + lvm_name, minor, kdevname(bh->b_rdev), rsector_org, size); if (rsector_org + size > lv->lv_size) { printk(KERN_ALERT "%s - lvm_map access beyond end of device; *rsector: " - "%lu or size: %lu wrong for minor: %2d\n", - lvm_name, rsector_org, size, minor); + "%lu or size: %lu wrong for minor: %2d\n", + lvm_name, rsector_org, size, minor); goto bad; } - if (lv->lv_stripes < 2) { /* linear mapping */ + if (lv->lv_stripes < 2) { /* linear mapping */ /* get the index */ index = rsector_org / vg_this->pe_size; pe_start = lv->lv_current_pe[index].pe; rsector_map = lv->lv_current_pe[index].pe + - (rsector_org % vg_this->pe_size); + (rsector_org % vg_this->pe_size); rdev_map = lv->lv_current_pe[index].dev; P_MAP("lv_current_pe[%ld].pe: %d rdev: %s rsector:%ld\n", @@ -1263,22 +1295,23 @@ stripe_length = vg_this->pe_size * lv->lv_stripes; stripe_index = (rsector_org % stripe_length) / - lv->lv_stripesize; + lv->lv_stripesize; index = rsector_org / stripe_length + - (stripe_index % lv->lv_stripes) * - (lv->lv_allocated_le / lv->lv_stripes); + (stripe_index % lv->lv_stripes) * + (lv->lv_allocated_le / lv->lv_stripes); pe_start = lv->lv_current_pe[index].pe; rsector_map = lv->lv_current_pe[index].pe + - (rsector_org % stripe_length) - - (stripe_index % lv->lv_stripes) * lv->lv_stripesize - - stripe_index / lv->lv_stripes * - (lv->lv_stripes - 1) * lv->lv_stripesize; + (rsector_org % stripe_length) - + (stripe_index % lv->lv_stripes) * lv->lv_stripesize - + stripe_index / lv->lv_stripes * + (lv->lv_stripes - 1) * lv->lv_stripesize; rdev_map = lv->lv_current_pe[index].dev; P_MAP("lv_current_pe[%ld].pe: %d rdev: %s rsector:%ld\n" "stripe_length: %ld stripe_index: %ld\n", - index, lv->lv_current_pe[index].pe, kdevname(rdev_map), - rsector_map, stripe_length, stripe_index); + index, lv->lv_current_pe[index].pe, + kdevname(rdev_map), rsector_map, stripe_length, + stripe_index); } /* @@ -1287,8 +1320,8 @@ * we need to queue this request, because this is in the fast path. */ if (rw == WRITE || rw == WRITEA) { - if(_defer_extent(bh, rw, rdev_map, - rsector_map, vg_this->pe_size)) { + if (_defer_extent(bh, rw, rdev_map, + rsector_map, vg_this->pe_size)) { up_read(&lv->lv_lock); return 0; @@ -1299,15 +1332,15 @@ lv->lv_current_pe[index].reads++; /* statistic */ /* snapshot volume exception handling on physical device address base */ - if (!(lv->lv_access & (LV_SNAPSHOT|LV_SNAPSHOT_ORG))) + if (!(lv->lv_access & (LV_SNAPSHOT | LV_SNAPSHOT_ORG))) goto out; - if (lv->lv_access & LV_SNAPSHOT) { /* remap snapshot */ + if (lv->lv_access & LV_SNAPSHOT) { /* remap snapshot */ if (lvm_snapshot_remap_block(&rdev_map, &rsector_map, pe_start, lv) < 0) goto bad; - } else if (rw == WRITE || rw == WRITEA) { /* snapshot origin */ + } else if (rw == WRITE || rw == WRITEA) { /* snapshot origin */ lv_t *snap; /* start with first snapshot and loop through all of @@ -1321,22 +1354,22 @@ /* Serializes the COW with the accesses to the snapshot device */ _remap_snapshot(rdev_map, rsector_map, - pe_start, snap, vg_this); + pe_start, snap, vg_this); } } - out: + out: bh->b_rdev = rdev_map; bh->b_rsector = rsector_map; up_read(&lv->lv_lock); return 1; - bad: + bad: if (bh->b_end_io) - buffer_IO_error(bh); + buffer_IO_error(bh); up_read(&lv->lv_lock); return -1; -} /* lvm_map() */ +} /* lvm_map() */ /* @@ -1368,9 +1401,8 @@ /* * make request function */ -static int lvm_make_request_fn(request_queue_t *q, - int rw, - struct buffer_head *bh) +static int lvm_make_request_fn(request_queue_t * q, + int rw, struct buffer_head *bh) { return (lvm_map(bh, rw) <= 0) ? 0 : 1; } @@ -1386,7 +1418,7 @@ */ static int lvm_do_lock_lvm(void) { -lock_try_again: + lock_try_again: spin_lock(&lvm_lock); if (lock != 0 && lock != current->pid) { P_DEV("lvm_do_lock_lvm: locked by pid %d ...\n", lock); @@ -1404,19 +1436,20 @@ P_DEV("lvm_do_lock_lvm: locking LVM for pid %d\n", lock); spin_unlock(&lvm_lock); return 0; -} /* lvm_do_lock_lvm */ +} /* lvm_do_lock_lvm */ /* * character device support function lock/unlock physical extend */ -static int lvm_do_pe_lock_unlock(vg_t *vg_ptr, void *arg) +static int lvm_do_pe_lock_unlock(vg_t * vg_ptr, void *arg) { pe_lock_req_t new_lock; struct buffer_head *bh; uint p; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_from_user(&new_lock, arg, sizeof(new_lock)) != 0) return -EFAULT; @@ -1427,7 +1460,8 @@ new_lock.data.pv_dev == vg_ptr->pv[p]->pv_dev) break; } - if (p == vg_ptr->pv_max) return -ENXIO; + if (p == vg_ptr->pv_max) + return -ENXIO; /* * this sync releaves memory pressure to lessen the @@ -1478,12 +1512,13 @@ /* * character device support function logical extend remap */ -static int lvm_do_le_remap(vg_t *vg_ptr, void *arg) +static int lvm_do_le_remap(vg_t * vg_ptr, void *arg) { uint l, le; lv_t *lv_ptr; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_from_user(&le_remap_req, arg, sizeof(le_remap_req_t)) != 0) return -EFAULT; @@ -1515,7 +1550,7 @@ } } return -ENXIO; -} /* lvm_do_le_remap() */ +} /* lvm_do_le_remap() */ /* @@ -1529,7 +1564,7 @@ vg_t *vg_ptr; lv_t **snap_lv_ptr; - if ((vg_ptr = kmalloc(sizeof(vg_t),GFP_KERNEL)) == NULL) { + if ((vg_ptr = kmalloc(sizeof(vg_t), GFP_KERNEL)) == NULL) { printk(KERN_CRIT "%s -- VG_CREATE: kmalloc error VG at line %d\n", lvm_name, __LINE__); @@ -1537,8 +1572,9 @@ } /* get the volume group structure */ if (copy_from_user(vg_ptr, arg, sizeof(vg_t)) != 0) { - P_IOCTL("lvm_do_vg_create ERROR: copy VG ptr %p (%d bytes)\n", - arg, sizeof(vg_t)); + P_IOCTL + ("lvm_do_vg_create ERROR: copy VG ptr %p (%d bytes)\n", + arg, sizeof(vg_t)); kfree(vg_ptr); return -EFAULT; } @@ -1572,7 +1608,7 @@ if (vg_ptr->lv_max > ABS_MAX_LV) { printk(KERN_WARNING - "%s -- Can't activate VG: ABS_MAX_LV too small for %u\n", + "%s -- Can't activate VG: ABS_MAX_LV too small for %u\n", lvm_name, vg_ptr->lv_max); kfree(vg_ptr); return -EPERM; @@ -1590,7 +1626,7 @@ /* user space address */ if ((pvp = vg_ptr->pv[p]) != NULL) { ret = lvm_do_pv_create(pvp, vg_ptr, p); - if ( ret != 0) { + if (ret != 0) { lvm_do_vg_remove(minor); return ret; } @@ -1598,7 +1634,7 @@ } size = vg_ptr->lv_max * sizeof(lv_t *); - if ((snap_lv_ptr = vmalloc ( size)) == NULL) { + if ((snap_lv_ptr = vmalloc(size)) == NULL) { printk(KERN_CRIT "%s -- VG_CREATE: vmalloc error snapshot LVs at line %d\n", lvm_name, __LINE__); @@ -1614,12 +1650,13 @@ /* user space address */ if ((lvp = vg_ptr->lv[l]) != NULL) { if (copy_from_user(&lv, lvp, sizeof(lv_t)) != 0) { - P_IOCTL("ERROR: copying LV ptr %p (%d bytes)\n", - lvp, sizeof(lv_t)); + P_IOCTL + ("ERROR: copying LV ptr %p (%d bytes)\n", + lvp, sizeof(lv_t)); lvm_do_vg_remove(minor); return -EFAULT; } - if ( lv.lv_access & LV_SNAPSHOT) { + if (lv.lv_access & LV_SNAPSHOT) { snap_lv_ptr[ls] = lvp; vg_ptr->lv[l] = NULL; ls++; @@ -1659,24 +1696,26 @@ vg_ptr->vg_status |= VG_ACTIVE; return 0; -} /* lvm_do_vg_create() */ +} /* lvm_do_vg_create() */ /* * character device support function VGDA extend */ -static int lvm_do_vg_extend(vg_t *vg_ptr, void *arg) +static int lvm_do_vg_extend(vg_t * vg_ptr, void *arg) { int ret = 0; uint p; pv_t *pv_ptr; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (vg_ptr->pv_cur < vg_ptr->pv_max) { for (p = 0; p < vg_ptr->pv_max; p++) { - if ( ( pv_ptr = vg_ptr->pv[p]) == NULL) { + if ((pv_ptr = vg_ptr->pv[p]) == NULL) { ret = lvm_do_pv_create(arg, vg_ptr, p); - if ( ret != 0) return ret; + if (ret != 0) + return ret; pv_ptr = vg_ptr->pv[p]; vg_ptr->pe_total += pv_ptr->pe_total; return 0; @@ -1684,26 +1723,28 @@ } } return -EPERM; -} /* lvm_do_vg_extend() */ +} /* lvm_do_vg_extend() */ /* * character device support function VGDA reduce */ -static int lvm_do_vg_reduce(vg_t *vg_ptr, void *arg) { +static int lvm_do_vg_reduce(vg_t * vg_ptr, void *arg) +{ uint p; pv_t *pv_ptr; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_from_user(pv_name, arg, sizeof(pv_name)) != 0) return -EFAULT; for (p = 0; p < vg_ptr->pv_max; p++) { pv_ptr = vg_ptr->pv[p]; if (pv_ptr != NULL && - strcmp(pv_ptr->pv_name, - pv_name) == 0) { - if (pv_ptr->lv_cur > 0) return -EPERM; + strcmp(pv_ptr->pv_name, pv_name) == 0) { + if (pv_ptr->lv_cur > 0) + return -EPERM; lvm_do_pv_remove(vg_ptr, p); /* Make PV pointer array contiguous */ for (; p < vg_ptr->pv_max - 1; p++) @@ -1713,55 +1754,56 @@ } } return -ENXIO; -} /* lvm_do_vg_reduce */ +} /* lvm_do_vg_reduce */ /* * character device support function VG rename */ -static int lvm_do_vg_rename(vg_t *vg_ptr, void *arg) +static int lvm_do_vg_rename(vg_t * vg_ptr, void *arg) { int l = 0, p = 0, len = 0; - char vg_name[NAME_LEN] = { 0,}; - char lv_name[NAME_LEN] = { 0,}; + char vg_name[NAME_LEN] = { 0, }; + char lv_name[NAME_LEN] = { 0, }; char *ptr = NULL; lv_t *lv_ptr = NULL; pv_t *pv_ptr = NULL; /* If the VG doesn't exist in the kernel then just exit */ - if (!vg_ptr) return 0; + if (!vg_ptr) + return 0; if (copy_from_user(vg_name, arg, sizeof(vg_name)) != 0) return -EFAULT; lvm_fs_remove_vg(vg_ptr); - strncpy ( vg_ptr->vg_name, vg_name, sizeof ( vg_name)-1); - for ( l = 0; l < vg_ptr->lv_max; l++) - { - if ((lv_ptr = vg_ptr->lv[l]) == NULL) continue; - memset (lv_ptr->vg_name, 0, sizeof (*vg_name)); - strncpy(lv_ptr->vg_name, vg_name, sizeof ( vg_name)); + strncpy(vg_ptr->vg_name, vg_name, sizeof(vg_name) - 1); + for (l = 0; l < vg_ptr->lv_max; l++) { + if ((lv_ptr = vg_ptr->lv[l]) == NULL) + continue; + memset(lv_ptr->vg_name, 0, sizeof(*vg_name)); + strncpy(lv_ptr->vg_name, vg_name, sizeof(vg_name)); ptr = strrchr(lv_ptr->lv_name, '/'); ptr = ptr ? ptr + 1 : lv_ptr->lv_name; - strncpy(lv_name, ptr, sizeof ( lv_name)); + strncpy(lv_name, ptr, sizeof(lv_name)); len = sizeof(LVM_DIR_PREFIX); strcpy(lv_ptr->lv_name, LVM_DIR_PREFIX); strncat(lv_ptr->lv_name, vg_name, NAME_LEN - len); - strcat (lv_ptr->lv_name, "/"); + strcat(lv_ptr->lv_name, "/"); len += strlen(vg_name) + 1; strncat(lv_ptr->lv_name, lv_name, NAME_LEN - len); } - for ( p = 0; p < vg_ptr->pv_max; p++) - { - if ( (pv_ptr = vg_ptr->pv[p]) == NULL) continue; + for (p = 0; p < vg_ptr->pv_max; p++) { + if ((pv_ptr = vg_ptr->pv[p]) == NULL) + continue; strncpy(pv_ptr->vg_name, vg_name, NAME_LEN); } lvm_fs_create_vg(vg_ptr); /* Need to add PV entries */ - for ( p = 0; p < vg_ptr->pv_act; p++) { + for (p = 0; p < vg_ptr->pv_act; p++) { pv_t *pv_ptr = vg_ptr->pv[p]; if (pv_ptr) @@ -1769,18 +1811,18 @@ } /* Need to add LV entries */ - for ( l = 0; l < vg_ptr->lv_max; l++) { + for (l = 0; l < vg_ptr->lv_max; l++) { lv_t *lv_ptr = vg_ptr->lv[l]; if (!lv_ptr) continue; lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de = - lvm_fs_create_lv(vg_ptr, lv_ptr); + lvm_fs_create_lv(vg_ptr, lv_ptr); } return 0; -} /* lvm_do_vg_rename */ +} /* lvm_do_vg_rename */ /* @@ -1792,7 +1834,8 @@ vg_t *vg_ptr = vg[VG_CHR(minor)]; pv_t *pv_ptr; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; #ifdef LVM_TOTAL_RESET if (vg_ptr->lv_open > 0 && lvm_reset_spindown == 0) @@ -1843,20 +1886,21 @@ MOD_DEC_USE_COUNT; return 0; -} /* lvm_do_vg_remove() */ +} /* lvm_do_vg_remove() */ /* * character device support function physical volume create */ -static int lvm_do_pv_create(pv_t *pvp, vg_t *vg_ptr, ulong p) { +static int lvm_do_pv_create(pv_t * pvp, vg_t * vg_ptr, ulong p) +{ pv_t *pv; int err; if (!vg_ptr) return -ENXIO; - pv = kmalloc(sizeof(pv_t),GFP_KERNEL); + pv = kmalloc(sizeof(pv_t), GFP_KERNEL); if (pv == NULL) { printk(KERN_CRIT "%s -- PV_CREATE: kmalloc error PV at line %d\n", @@ -1867,8 +1911,9 @@ memset(pv, 0, sizeof(*pv)); if (copy_from_user(pv, pvp, sizeof(pv_t)) != 0) { - P_IOCTL("lvm_do_pv_create ERROR: copy PV ptr %p (%d bytes)\n", - pvp, sizeof(pv_t)); + P_IOCTL + ("lvm_do_pv_create ERROR: copy PV ptr %p (%d bytes)\n", + pvp, sizeof(pv_t)); kfree(pv); return -EFAULT; } @@ -1889,13 +1934,14 @@ vg_ptr->pv[p] = pv; return 0; -} /* lvm_do_pv_create() */ +} /* lvm_do_pv_create() */ /* * character device support function physical volume remove */ -static int lvm_do_pv_remove(vg_t *vg_ptr, ulong p) { +static int lvm_do_pv_remove(vg_t * vg_ptr, ulong p) +{ pv_t *pv = vg_ptr->pv[p]; lvm_fs_remove_pv(vg_ptr, pv); @@ -1913,7 +1959,7 @@ } -static void __update_hardsectsize(lv_t *lv) +static void __update_hardsectsize(lv_t * lv) { int max_hardsectsize = 0, hardsectsize = 0; int p; @@ -1925,9 +1971,10 @@ if (max_hardsectsize == 0) max_hardsectsize = hardsectsize; else if (hardsectsize != max_hardsectsize) { - P_DEV("%s PV[%d] (%s) sector size %d, not %d\n", - lv->lv_name, p, kdevname(pv->pv_dev), - hardsectsize, max_hardsectsize); + P_DEV + ("%s PV[%d] (%s) sector size %d, not %d\n", + lv->lv_name, p, kdevname(pv->pv_dev), + hardsectsize, max_hardsectsize); break; } } @@ -1937,12 +1984,14 @@ if (hardsectsize != max_hardsectsize) { int le; for (le = 0; le < lv->lv_allocated_le; le++) { - hardsectsize = lvm_sectsize(lv->lv_current_pe[le].dev); + hardsectsize = + lvm_sectsize(lv->lv_current_pe[le].dev); if (hardsectsize > max_hardsectsize) { - P_DEV("%s LE[%d] (%s) blocksize %d not %d\n", - lv->lv_name, le, - kdevname(lv->lv_current_pe[le].dev), - hardsectsize, max_hardsectsize); + P_DEV + ("%s LE[%d] (%s) blocksize %d not %d\n", + lv->lv_name, le, + kdevname(lv->lv_current_pe[le].dev), + hardsectsize, max_hardsectsize); max_hardsectsize = hardsectsize; } } @@ -1952,7 +2001,9 @@ (lv->lv_status & LV_ACTIVE)) { int e; for (e = 0; e < lv->lv_remap_end; e++) { - hardsectsize = lvm_sectsize(lv->lv_block_exception[e].rdev_new); + hardsectsize = + lvm_sectsize(lv->lv_block_exception[e]. + rdev_new); if (hardsectsize > max_hardsectsize) max_hardsectsize = hardsectsize; } @@ -1969,7 +2020,7 @@ /* * character device support function logical volume create */ -static int lvm_do_lv_create(int minor, char *lv_name, lv_t *lv) +static int lvm_do_lv_create(int minor, char *lv_name, lv_t * lv) { int e, ret, l, le, l_new, p, size, activate = 1; ulong lv_status_save; @@ -1997,14 +2048,18 @@ else { for (l = 0; l < vg_ptr->lv_max; l++) { if (vg_ptr->lv[l] == NULL) - if (l_new == -1) l_new = l; + if (l_new == -1) + l_new = l; } } - if (l_new == -1) return -EPERM; - else l = l_new; + if (l_new == -1) + return -EPERM; + else + l = l_new; - if ((lv_ptr = kmalloc(sizeof(lv_t),GFP_KERNEL)) == NULL) {; - printk(KERN_CRIT "%s -- LV_CREATE: kmalloc error LV at line %d\n", + if ((lv_ptr = kmalloc(sizeof(lv_t), GFP_KERNEL)) == NULL) {; + printk(KERN_CRIT + "%s -- LV_CREATE: kmalloc error LV at line %d\n", lvm_name, __LINE__); return -ENOMEM; } @@ -2036,8 +2091,7 @@ if ((lv_ptr->lv_current_pe = vmalloc(size)) == NULL) { printk(KERN_CRIT "%s -- LV_CREATE: vmalloc error LV_CURRENT_PE of %d Byte " - "at line %d\n", - lvm_name, size, __LINE__); + "at line %d\n", lvm_name, size, __LINE__); P_KFREE("%s -- kfree %d\n", lvm_name, __LINE__); kfree(lv_ptr); vg_ptr->lv[l] = NULL; @@ -2066,7 +2120,9 @@ lv_ptr->lv_snapshot_org = vg_ptr->lv[LV_BLK(lv_ptr->lv_snapshot_minor)]; if (lv_ptr->lv_snapshot_org != NULL) { - size = lv_ptr->lv_remap_end * sizeof(lv_block_exception_t); + size = + lv_ptr->lv_remap_end * + sizeof(lv_block_exception_t); if (!size) { printk(KERN_WARNING @@ -2076,29 +2132,32 @@ return -EINVAL; } - if ((lv_ptr->lv_block_exception = vmalloc(size)) == NULL) { + if ((lv_ptr->lv_block_exception = + vmalloc(size)) == NULL) { printk(KERN_CRIT "%s -- lvm_do_lv_create: vmalloc error LV_BLOCK_EXCEPTION " "of %d byte at line %d\n", lvm_name, size, __LINE__); - P_KFREE("%s -- kfree %d\n", lvm_name, - __LINE__); + P_KFREE("%s -- kfree %d\n", + lvm_name, __LINE__); kfree(lv_ptr); vg_ptr->lv[l] = NULL; return -ENOMEM; } - if (copy_from_user(lv_ptr->lv_block_exception, lvbe, size)) { + if (copy_from_user + (lv_ptr->lv_block_exception, lvbe, + size)) { vfree(lv_ptr->lv_block_exception); kfree(lv_ptr); vg_ptr->lv[l] = NULL; return -EFAULT; } - if(lv_ptr->lv_block_exception[0].rsector_org == - LVM_SNAPSHOT_DROPPED_SECTOR) - { + if (lv_ptr->lv_block_exception[0]. + rsector_org == + LVM_SNAPSHOT_DROPPED_SECTOR) { printk(KERN_WARNING - "%s -- lvm_do_lv_create: snapshot has been dropped and will not be activated\n", + "%s -- lvm_do_lv_create: snapshot has been dropped and will not be activated\n", lvm_name); activate = 0; } @@ -2112,36 +2171,54 @@ which can be the original logical volume */ lv_ptr = vg_ptr->lv[l]; /* now lv_ptr points to our new last snapshot logical volume */ - lv_ptr->lv_current_pe = lv_ptr->lv_snapshot_org->lv_current_pe; - lv_ptr->lv_allocated_snapshot_le = lv_ptr->lv_allocated_le; - lv_ptr->lv_allocated_le = lv_ptr->lv_snapshot_org->lv_allocated_le; - lv_ptr->lv_current_le = lv_ptr->lv_snapshot_org->lv_current_le; - lv_ptr->lv_size = lv_ptr->lv_snapshot_org->lv_size; - lv_ptr->lv_stripes = lv_ptr->lv_snapshot_org->lv_stripes; - lv_ptr->lv_stripesize = lv_ptr->lv_snapshot_org->lv_stripesize; + lv_ptr->lv_current_pe = + lv_ptr->lv_snapshot_org->lv_current_pe; + lv_ptr->lv_allocated_snapshot_le = + lv_ptr->lv_allocated_le; + lv_ptr->lv_allocated_le = + lv_ptr->lv_snapshot_org-> + lv_allocated_le; + lv_ptr->lv_current_le = + lv_ptr->lv_snapshot_org->lv_current_le; + lv_ptr->lv_size = + lv_ptr->lv_snapshot_org->lv_size; + lv_ptr->lv_stripes = + lv_ptr->lv_snapshot_org->lv_stripes; + lv_ptr->lv_stripesize = + lv_ptr->lv_snapshot_org->lv_stripesize; /* Update the VG PE(s) used by snapshot reserve space. */ - vg_ptr->pe_allocated += lv_ptr->lv_allocated_snapshot_le; + vg_ptr->pe_allocated += + lv_ptr->lv_allocated_snapshot_le; - if ((ret = lvm_snapshot_alloc(lv_ptr)) != 0) - { + if ((ret = + lvm_snapshot_alloc(lv_ptr)) != 0) { vfree(lv_ptr->lv_block_exception); kfree(lv_ptr); vg_ptr->lv[l] = NULL; return ret; } - for ( e = 0; e < lv_ptr->lv_remap_ptr; e++) - lvm_hash_link (lv_ptr->lv_block_exception + e, - lv_ptr->lv_block_exception[e].rdev_org, - lv_ptr->lv_block_exception[e].rsector_org, lv_ptr); + for (e = 0; e < lv_ptr->lv_remap_ptr; e++) + lvm_hash_link(lv_ptr-> + lv_block_exception + + e, + lv_ptr-> + lv_block_exception + [e].rdev_org, + lv_ptr-> + lv_block_exception + [e].rsector_org, + lv_ptr); /* need to fill the COW exception table data into the page for disk i/o */ - if(lvm_snapshot_fill_COW_page(vg_ptr, lv_ptr)) { + if (lvm_snapshot_fill_COW_page + (vg_ptr, lv_ptr)) { kfree(lv_ptr); vg_ptr->lv[l] = NULL; return -EINVAL; } - init_waitqueue_head(&lv_ptr->lv_snapshot_wait); + init_waitqueue_head(&lv_ptr-> + lv_snapshot_wait); } else { kfree(lv_ptr); vg_ptr->lv[l] = NULL; @@ -2152,7 +2229,7 @@ vg_ptr->lv[l] = NULL; return -EINVAL; } - } /* if ( vg[VG_CHR(minor)]->lv[l]->lv_access & LV_SNAPSHOT) */ + } /* if ( vg[VG_CHR(minor)]->lv[l]->lv_access & LV_SNAPSHOT) */ lv_ptr = vg_ptr->lv[l]; lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].start_sect = 0; @@ -2180,23 +2257,24 @@ down_write(&org->lv_lock); org->lv_access |= LV_SNAPSHOT_ORG; - lv_ptr->lv_access &= ~LV_SNAPSHOT_ORG; /* this can only hide an userspace bug */ + lv_ptr->lv_access &= ~LV_SNAPSHOT_ORG; /* this can only hide an userspace bug */ /* Link in the list of snapshot volumes */ - for (last = org; last->lv_snapshot_next; last = last->lv_snapshot_next); + for (last = org; last->lv_snapshot_next; + last = last->lv_snapshot_next); lv_ptr->lv_snapshot_prev = last; last->lv_snapshot_next = lv_ptr; up_write(&org->lv_lock); } /* activate the logical volume */ - if(activate) + if (activate) lv_ptr->lv_status |= LV_ACTIVE; else lv_ptr->lv_status &= ~LV_ACTIVE; - if ( lv_ptr->lv_access & LV_WRITE) + if (lv_ptr->lv_access & LV_WRITE) set_device_ro(lv_ptr->lv_dev, 0); else set_device_ro(lv_ptr->lv_dev, 1); @@ -2210,7 +2288,7 @@ lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de = lvm_fs_create_lv(vg_ptr, lv_ptr); return 0; -} /* lvm_do_lv_create() */ +} /* lvm_do_lv_create() */ /* @@ -2233,7 +2311,8 @@ } } } - if (l == vg_ptr->lv_max) return -ENXIO; + if (l == vg_ptr->lv_max) + return -ENXIO; lv_ptr = vg_ptr->lv[l]; #ifdef LVM_TOTAL_RESET @@ -2253,14 +2332,15 @@ if (lv_ptr->lv_access & LV_SNAPSHOT) { /* - * Atomically make the snapshot invisible + * Atomically make the the snapshot invisible * to the original lv before playing with it. */ - lv_t * org = lv_ptr->lv_snapshot_org; + lv_t *org = lv_ptr->lv_snapshot_org; down_write(&org->lv_lock); /* remove this snapshot logical volume from the chain */ - lv_ptr->lv_snapshot_prev->lv_snapshot_next = lv_ptr->lv_snapshot_next; + lv_ptr->lv_snapshot_prev->lv_snapshot_next = + lv_ptr->lv_snapshot_next; if (lv_ptr->lv_snapshot_next != NULL) { lv_ptr->lv_snapshot_next->lv_snapshot_prev = lv_ptr->lv_snapshot_prev; @@ -2299,7 +2379,7 @@ vg_lv_map[MINOR(lv_ptr->lv_dev)].lv_number = -1; /* correct the PE count in PVs if this is not a snapshot - logical volume */ + logical volume */ if (!(lv_ptr->lv_access & LV_SNAPSHOT)) { /* only if this is no snapshot logical volume because we share the lv_current_pe[] structs with the @@ -2320,13 +2400,15 @@ vg_ptr->lv[l] = NULL; vg_ptr->lv_cur--; return 0; -} /* lvm_do_lv_remove() */ +} /* lvm_do_lv_remove() */ /* * logical volume extend / reduce */ -static int __extend_reduce_snapshot(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) { +static int __extend_reduce_snapshot(vg_t * vg_ptr, lv_t * old_lv, + lv_t * new_lv) +{ ulong size; lv_block_exception_t *lvbe; @@ -2357,7 +2439,8 @@ return 0; } -static int __extend_reduce(vg_t *vg_ptr, lv_t *old_lv, lv_t *new_lv) { +static int __extend_reduce(vg_t * vg_ptr, lv_t * old_lv, lv_t * new_lv) +{ ulong size, l, p, end; pe_t *pe; @@ -2373,7 +2456,7 @@ /* get the PE structures from user space */ if (copy_from_user(pe, new_lv->lv_current_pe, size)) { - if(old_lv->lv_access & LV_SNAPSHOT) + if (old_lv->lv_access & LV_SNAPSHOT) vfree(new_lv->lv_snapshot_hash_table); vfree(pe); return -EFAULT; @@ -2398,7 +2481,7 @@ vg_ptr->pe_allocated++; for (p = 0; p < vg_ptr->pv_cur; p++) { if (vg_ptr->pv[p]->pv_dev == - new_lv->lv_current_pe[l].dev) { + new_lv->lv_current_pe[l].dev) { vg_ptr->pv[p]->pe_allocated++; break; } @@ -2410,25 +2493,30 @@ end = min(old_lv->lv_current_le, new_lv->lv_current_le); for (l = 0; l < end; l++) { new_lv->lv_current_pe[l].reads += - old_lv->lv_current_pe[l].reads; + old_lv->lv_current_pe[l].reads; new_lv->lv_current_pe[l].writes += - old_lv->lv_current_pe[l].writes; + old_lv->lv_current_pe[l].writes; } } else { /* striped logical volume */ - uint i, j, source, dest, end, old_stripe_size, new_stripe_size; + uint i, j, source, dest, end, old_stripe_size, + new_stripe_size; - old_stripe_size = old_lv->lv_allocated_le / old_lv->lv_stripes; - new_stripe_size = new_lv->lv_allocated_le / new_lv->lv_stripes; + old_stripe_size = + old_lv->lv_allocated_le / old_lv->lv_stripes; + new_stripe_size = + new_lv->lv_allocated_le / new_lv->lv_stripes; end = min(old_stripe_size, new_stripe_size); for (i = source = dest = 0; i < new_lv->lv_stripes; i++) { for (j = 0; j < end; j++) { new_lv->lv_current_pe[dest + j].reads += - old_lv->lv_current_pe[source + j].reads; + old_lv->lv_current_pe[source + + j].reads; new_lv->lv_current_pe[dest + j].writes += - old_lv->lv_current_pe[source + j].writes; + old_lv->lv_current_pe[source + + j].writes; } source += old_stripe_size; dest += new_stripe_size; @@ -2438,7 +2526,7 @@ return 0; } -static int lvm_do_lv_extend_reduce(int minor, char *lv_name, lv_t *new_lv) +static int lvm_do_lv_extend_reduce(int minor, char *lv_name, lv_t * new_lv) { int r; ulong l, e, size; @@ -2453,7 +2541,8 @@ return -EINVAL; for (l = 0; l < vg_ptr->lv_max; l++) - if (vg_ptr->lv[l] && !strcmp(vg_ptr->lv[l]->lv_name, lv_name)) + if (vg_ptr->lv[l] + && !strcmp(vg_ptr->lv[l]->lv_name, lv_name)) break; if (l == vg_ptr->lv_max) @@ -2464,43 +2553,48 @@ if (old_lv->lv_access & LV_SNAPSHOT) { /* only perform this operation on active snapshots */ if (old_lv->lv_status & LV_ACTIVE) - r = __extend_reduce_snapshot(vg_ptr, old_lv, new_lv); + r = __extend_reduce_snapshot(vg_ptr, old_lv, + new_lv); else r = -EPERM; } else r = __extend_reduce(vg_ptr, old_lv, new_lv); - if(r) + if (r) return r; - /* copy relevent fields */ + /* copy relevant fields */ down_write(&old_lv->lv_lock); - if(new_lv->lv_access & LV_SNAPSHOT) { + if (new_lv->lv_access & LV_SNAPSHOT) { size = (new_lv->lv_remap_end > old_lv->lv_remap_end) ? - old_lv->lv_remap_ptr : new_lv->lv_remap_end; + old_lv->lv_remap_ptr : new_lv->lv_remap_end; size *= sizeof(lv_block_exception_t); memcpy(new_lv->lv_block_exception, old_lv->lv_block_exception, size); + vfree(old_lv->lv_block_exception); + vfree(old_lv->lv_snapshot_hash_table); old_lv->lv_remap_end = new_lv->lv_remap_end; old_lv->lv_block_exception = new_lv->lv_block_exception; old_lv->lv_snapshot_hash_table = - new_lv->lv_snapshot_hash_table; + new_lv->lv_snapshot_hash_table; old_lv->lv_snapshot_hash_table_size = - new_lv->lv_snapshot_hash_table_size; + new_lv->lv_snapshot_hash_table_size; old_lv->lv_snapshot_hash_mask = - new_lv->lv_snapshot_hash_mask; + new_lv->lv_snapshot_hash_mask; - for (e = 0; e < new_lv->lv_remap_ptr; e++) + for (e = 0; e < old_lv->lv_remap_ptr; e++) lvm_hash_link(new_lv->lv_block_exception + e, - new_lv->lv_block_exception[e].rdev_org, - new_lv->lv_block_exception[e].rsector_org, - new_lv); + new_lv->lv_block_exception[e]. + rdev_org, + new_lv->lv_block_exception[e]. + rsector_org, new_lv); + vg_ptr->pe_allocated -= old_lv->lv_allocated_le; + vg_ptr->pe_allocated += new_lv->lv_allocated_le; } else { - vfree(old_lv->lv_current_pe); vfree(old_lv->lv_snapshot_hash_table); @@ -2509,24 +2603,26 @@ old_lv->lv_current_le = new_lv->lv_current_le; old_lv->lv_current_pe = new_lv->lv_current_pe; lvm_gendisk.part[MINOR(old_lv->lv_dev)].nr_sects = - old_lv->lv_size; + old_lv->lv_size; lvm_size[MINOR(old_lv->lv_dev)] = old_lv->lv_size >> 1; if (old_lv->lv_access & LV_SNAPSHOT_ORG) { lv_t *snap; - for(snap = old_lv->lv_snapshot_next; snap; - snap = snap->lv_snapshot_next) { + for (snap = old_lv->lv_snapshot_next; snap; + snap = snap->lv_snapshot_next) { down_write(&snap->lv_lock); - snap->lv_current_pe = old_lv->lv_current_pe; + snap->lv_current_pe = + old_lv->lv_current_pe; snap->lv_allocated_le = - old_lv->lv_allocated_le; - snap->lv_current_le = old_lv->lv_current_le; + old_lv->lv_allocated_le; + snap->lv_current_le = + old_lv->lv_current_le; snap->lv_size = old_lv->lv_size; - lvm_gendisk.part[MINOR(snap->lv_dev)].nr_sects - = old_lv->lv_size; + lvm_gendisk.part[MINOR(snap->lv_dev)]. + nr_sects = old_lv->lv_size; lvm_size[MINOR(snap->lv_dev)] = - old_lv->lv_size >> 1; + old_lv->lv_size >> 1; __update_hardsectsize(snap); up_write(&snap->lv_lock); } @@ -2537,13 +2633,13 @@ up_write(&old_lv->lv_lock); return 0; -} /* lvm_do_lv_extend_reduce() */ +} /* lvm_do_lv_extend_reduce() */ /* * character device support function logical volume status by name */ -static int lvm_do_lv_status_byname(vg_t *vg_ptr, void *arg) +static int lvm_do_lv_status_byname(vg_t * vg_ptr, void *arg) { uint l; lv_status_byname_req_t lv_status_byname_req; @@ -2551,137 +2647,166 @@ void *saved_ptr2; lv_t *lv_ptr; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_from_user(&lv_status_byname_req, arg, sizeof(lv_status_byname_req_t)) != 0) return -EFAULT; - if (lv_status_byname_req.lv == NULL) return -EINVAL; + if (lv_status_byname_req.lv == NULL) + return -EINVAL; for (l = 0; l < vg_ptr->lv_max; l++) { if ((lv_ptr = vg_ptr->lv[l]) != NULL && strcmp(lv_ptr->lv_name, lv_status_byname_req.lv_name) == 0) { - /* Save usermode pointers */ - if (copy_from_user(&saved_ptr1, &lv_status_byname_req.lv->lv_current_pe, sizeof(void*)) != 0) + /* Save usermode pointers */ + if (copy_from_user + (&saved_ptr1, + &lv_status_byname_req.lv->lv_current_pe, + sizeof(void *)) != 0) + return -EFAULT; + if (copy_from_user + (&saved_ptr2, + &lv_status_byname_req.lv->lv_block_exception, + sizeof(void *)) != 0) return -EFAULT; - if (copy_from_user(&saved_ptr2, &lv_status_byname_req.lv->lv_block_exception, sizeof(void*)) != 0) - return -EFAULT; - if (copy_to_user(lv_status_byname_req.lv, - lv_ptr, - sizeof(lv_t)) != 0) + if (copy_to_user(lv_status_byname_req.lv, + lv_ptr, sizeof(lv_t)) != 0) return -EFAULT; if (saved_ptr1 != NULL) { if (copy_to_user(saved_ptr1, lv_ptr->lv_current_pe, lv_ptr->lv_allocated_le * - sizeof(pe_t)) != 0) + sizeof(pe_t)) != 0) return -EFAULT; } /* Restore usermode pointers */ - if (copy_to_user(&lv_status_byname_req.lv->lv_current_pe, &saved_ptr1, sizeof(void*)) != 0) - return -EFAULT; + if (copy_to_user + (&lv_status_byname_req.lv->lv_current_pe, + &saved_ptr1, sizeof(void *)) != 0) + return -EFAULT; return 0; } } return -ENXIO; -} /* lvm_do_lv_status_byname() */ +} /* lvm_do_lv_status_byname() */ /* * character device support function logical volume status by index */ -static int lvm_do_lv_status_byindex(vg_t *vg_ptr,void *arg) +static int lvm_do_lv_status_byindex(vg_t * vg_ptr, void *arg) { lv_status_byindex_req_t lv_status_byindex_req; void *saved_ptr1; void *saved_ptr2; lv_t *lv_ptr; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_from_user(&lv_status_byindex_req, arg, sizeof(lv_status_byindex_req)) != 0) return -EFAULT; if (lv_status_byindex_req.lv == NULL) return -EINVAL; - if ( ( lv_ptr = vg_ptr->lv[lv_status_byindex_req.lv_index]) == NULL) + if ((lv_ptr = vg_ptr->lv[lv_status_byindex_req.lv_index]) == NULL) return -ENXIO; /* Save usermode pointers */ - if (copy_from_user(&saved_ptr1, &lv_status_byindex_req.lv->lv_current_pe, sizeof(void*)) != 0) - return -EFAULT; - if (copy_from_user(&saved_ptr2, &lv_status_byindex_req.lv->lv_block_exception, sizeof(void*)) != 0) - return -EFAULT; + if (copy_from_user + (&saved_ptr1, &lv_status_byindex_req.lv->lv_current_pe, + sizeof(void *)) != 0) + return -EFAULT; + if (copy_from_user + (&saved_ptr2, &lv_status_byindex_req.lv->lv_block_exception, + sizeof(void *)) != 0) + return -EFAULT; - if (copy_to_user(lv_status_byindex_req.lv, lv_ptr, sizeof(lv_t)) != 0) + if (copy_to_user(lv_status_byindex_req.lv, lv_ptr, sizeof(lv_t)) != + 0) return -EFAULT; if (saved_ptr1 != NULL) { if (copy_to_user(saved_ptr1, lv_ptr->lv_current_pe, lv_ptr->lv_allocated_le * - sizeof(pe_t)) != 0) + sizeof(pe_t)) != 0) return -EFAULT; } /* Restore usermode pointers */ - if (copy_to_user(&lv_status_byindex_req.lv->lv_current_pe, &saved_ptr1, sizeof(void *)) != 0) - return -EFAULT; + if (copy_to_user + (&lv_status_byindex_req.lv->lv_current_pe, &saved_ptr1, + sizeof(void *)) != 0) + return -EFAULT; return 0; -} /* lvm_do_lv_status_byindex() */ +} /* lvm_do_lv_status_byindex() */ /* * character device support function logical volume status by device number */ -static int lvm_do_lv_status_bydev(vg_t * vg_ptr, void * arg) { +static int lvm_do_lv_status_bydev(vg_t * vg_ptr, void *arg) +{ int l; lv_status_bydev_req_t lv_status_bydev_req; void *saved_ptr1; void *saved_ptr2; lv_t *lv_ptr; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_from_user(&lv_status_bydev_req, arg, sizeof(lv_status_bydev_req)) != 0) return -EFAULT; - for ( l = 0; l < vg_ptr->lv_max; l++) { - if ( vg_ptr->lv[l] == NULL) continue; - if ( vg_ptr->lv[l]->lv_dev == lv_status_bydev_req.dev) break; + for (l = 0; l < vg_ptr->lv_max; l++) { + if (vg_ptr->lv[l] == NULL) + continue; + if (vg_ptr->lv[l]->lv_dev == lv_status_bydev_req.dev) + break; } - if ( l == vg_ptr->lv_max) return -ENXIO; + if (l == vg_ptr->lv_max) + return -ENXIO; lv_ptr = vg_ptr->lv[l]; /* Save usermode pointers */ - if (copy_from_user(&saved_ptr1, &lv_status_bydev_req.lv->lv_current_pe, sizeof(void*)) != 0) - return -EFAULT; - if (copy_from_user(&saved_ptr2, &lv_status_bydev_req.lv->lv_block_exception, sizeof(void*)) != 0) - return -EFAULT; + if (copy_from_user + (&saved_ptr1, &lv_status_bydev_req.lv->lv_current_pe, + sizeof(void *)) != 0) + return -EFAULT; + if (copy_from_user + (&saved_ptr2, &lv_status_bydev_req.lv->lv_block_exception, + sizeof(void *)) != 0) + return -EFAULT; - if (copy_to_user(lv_status_bydev_req.lv, lv_ptr, sizeof(lv_t)) != 0) + if (copy_to_user(lv_status_bydev_req.lv, lv_ptr, sizeof(lv_t)) != + 0) return -EFAULT; if (saved_ptr1 != NULL) { if (copy_to_user(saved_ptr1, lv_ptr->lv_current_pe, lv_ptr->lv_allocated_le * - sizeof(pe_t)) != 0) + sizeof(pe_t)) != 0) return -EFAULT; } /* Restore usermode pointers */ - if (copy_to_user(&lv_status_bydev_req.lv->lv_current_pe, &saved_ptr1, sizeof(void *)) != 0) - return -EFAULT; + if (copy_to_user + (&lv_status_bydev_req.lv->lv_current_pe, &saved_ptr1, + sizeof(void *)) != 0) + return -EFAULT; return 0; -} /* lvm_do_lv_status_bydev() */ +} /* lvm_do_lv_status_bydev() */ /* * character device support function rename a logical volume */ -static int lvm_do_lv_rename(vg_t *vg_ptr, lv_req_t *lv_req, lv_t *lv) +static int lvm_do_lv_rename(vg_t * vg_ptr, lv_req_t * lv_req, lv_t * lv) { int l = 0; int ret = 0; @@ -2690,33 +2815,36 @@ if (!vg_ptr) return -ENXIO; - for (l = 0; l < vg_ptr->lv_max; l++) - { - if ( (lv_ptr = vg_ptr->lv[l]) == NULL) continue; - if (lv_ptr->lv_dev == lv->lv_dev) - { + for (l = 0; l < vg_ptr->lv_max; l++) { + if ((lv_ptr = vg_ptr->lv[l]) == NULL) + continue; + if (lv_ptr->lv_dev == lv->lv_dev) { lvm_fs_remove_lv(vg_ptr, lv_ptr); - strncpy(lv_ptr->lv_name, lv_req->lv_name, NAME_LEN); - lvm_fs_create_lv(vg_ptr, lv_ptr); + strncpy(lv_ptr->lv_name, lv_req->lv_name, + NAME_LEN); + lvm_gendisk.part[MINOR(lv_ptr->lv_dev)].de = + lvm_fs_create_lv(vg_ptr, lv_ptr); break; } } - if (l == vg_ptr->lv_max) ret = -ENODEV; + if (l == vg_ptr->lv_max) + ret = -ENODEV; return ret; -} /* lvm_do_lv_rename */ +} /* lvm_do_lv_rename */ /* * character device support function physical volume change */ -static int lvm_do_pv_change(vg_t *vg_ptr, void *arg) +static int lvm_do_pv_change(vg_t * vg_ptr, void *arg) { uint p; pv_t *pv_ptr; struct block_device *bd; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_from_user(&pv_change_req, arg, sizeof(pv_change_req)) != 0) return -EFAULT; @@ -2724,8 +2852,7 @@ for (p = 0; p < vg_ptr->pv_max; p++) { pv_ptr = vg_ptr->pv[p]; if (pv_ptr != NULL && - strcmp(pv_ptr->pv_name, - pv_change_req.pv_name) == 0) { + strcmp(pv_ptr->pv_name, pv_change_req.pv_name) == 0) { bd = pv_ptr->bd; if (copy_from_user(pv_ptr, @@ -2741,17 +2868,18 @@ } } return -ENXIO; -} /* lvm_do_pv_change() */ +} /* lvm_do_pv_change() */ /* * character device support function get physical volume status */ -static int lvm_do_pv_status(vg_t *vg_ptr, void *arg) +static int lvm_do_pv_status(vg_t * vg_ptr, void *arg) { uint p; pv_t *pv_ptr; - if (vg_ptr == NULL) return -ENXIO; + if (vg_ptr == NULL) + return -ENXIO; if (copy_from_user(&pv_status_req, arg, sizeof(pv_status_req)) != 0) return -EFAULT; @@ -2759,17 +2887,15 @@ for (p = 0; p < vg_ptr->pv_max; p++) { pv_ptr = vg_ptr->pv[p]; if (pv_ptr != NULL && - strcmp(pv_ptr->pv_name, - pv_status_req.pv_name) == 0) { + strcmp(pv_ptr->pv_name, pv_status_req.pv_name) == 0) { if (copy_to_user(pv_status_req.pv, - pv_ptr, - sizeof(pv_t)) != 0) + pv_ptr, sizeof(pv_t)) != 0) return -EFAULT; return 0; } } return -ENXIO; -} /* lvm_do_pv_status() */ +} /* lvm_do_pv_status() */ /* @@ -2811,13 +2937,15 @@ hardsect_size[MAJOR_NR] = lvm_hardsectsizes; return; -} /* lvm_gen_init() */ +} /* lvm_gen_init() */ /* Must have down_write(_pe_lock) when we enqueue buffers */ -static void _queue_io(struct buffer_head *bh, int rw) { - if (bh->b_reqnext) BUG(); +static void _queue_io(struct buffer_head *bh, int rw) +{ + if (bh->b_reqnext) + BUG(); bh->b_reqnext = _pe_requests; _pe_requests = bh; } @@ -2855,14 +2983,15 @@ /* * we must open the pv's before we use them */ -static int _open_pv(pv_t *pv) { +static int _open_pv(pv_t * pv) +{ int err; struct block_device *bd; if (!(bd = bdget(kdev_t_to_nr(pv->pv_dev)))) return -ENOMEM; - err = blkdev_get(bd, FMODE_READ|FMODE_WRITE, 0, BDEV_FILE); + err = blkdev_get(bd, FMODE_READ | FMODE_WRITE, 0, BDEV_FILE); if (err) return err; @@ -2870,7 +2999,8 @@ return 0; } -static void _close_pv(pv_t *pv) { +static void _close_pv(pv_t * pv) +{ if (pv) { struct block_device *bdev = pv->bd; pv->bd = NULL; @@ -2882,7 +3012,7 @@ static unsigned long _sectors_to_k(unsigned long sect) { - if(SECTOR_SIZE > 1024) { + if (SECTOR_SIZE > 1024) { return sect * (SECTOR_SIZE / 1024); } --- linux/drivers/md/lvm-internal.h.orig Fri Jan 24 07:57:44 2003 +++ linux/drivers/md/lvm-internal.h Thu Mar 6 20:34:36 2003 @@ -50,7 +50,7 @@ extern struct file_operations lvm_chr_fops; #ifndef uchar -typedef unsigned char uchar; +typedef unsigned char uchar; #endif extern struct block_device_operations lvm_blk_dops; @@ -89,24 +89,24 @@ int lvm_get_blksize(kdev_t); int lvm_snapshot_alloc(lv_t *); int lvm_snapshot_fill_COW_page(vg_t *, lv_t *); -int lvm_snapshot_COW(kdev_t, ulong, ulong, ulong, vg_t *vg, lv_t *); +int lvm_snapshot_COW(kdev_t, ulong, ulong, ulong, vg_t * vg, lv_t *); int lvm_snapshot_remap_block(kdev_t *, ulong *, ulong, lv_t *); void lvm_snapshot_release(lv_t *); int lvm_write_COW_table_block(vg_t *, lv_t *); void lvm_hash_link(lv_block_exception_t *, kdev_t, ulong, lv_t *); int lvm_snapshot_alloc_hash_table(lv_t *); -void lvm_drop_snapshot(vg_t *vg, lv_t *, const char *); +void lvm_drop_snapshot(vg_t * vg, lv_t *, const char *); /* lvm_fs.c */ void lvm_init_fs(void); void lvm_fin_fs(void); -void lvm_fs_create_vg(vg_t *vg_ptr); -void lvm_fs_remove_vg(vg_t *vg_ptr); -devfs_handle_t lvm_fs_create_lv(vg_t *vg_ptr, lv_t *lv); -void lvm_fs_remove_lv(vg_t *vg_ptr, lv_t *lv); -void lvm_fs_create_pv(vg_t *vg_ptr, pv_t *pv); -void lvm_fs_remove_pv(vg_t *vg_ptr, pv_t *pv); +void lvm_fs_create_vg(vg_t * vg_ptr); +void lvm_fs_remove_vg(vg_t * vg_ptr); +devfs_handle_t lvm_fs_create_lv(vg_t * vg_ptr, lv_t * lv); +void lvm_fs_remove_lv(vg_t * vg_ptr, lv_t * lv); +void lvm_fs_create_pv(vg_t * vg_ptr, pv_t * pv); +void lvm_fs_remove_pv(vg_t * vg_ptr, pv_t * pv); #endif --- linux/drivers/md/lvm-snap.c.orig Tue Jan 7 15:50:31 2003 +++ linux/drivers/md/lvm-snap.c Thu Mar 6 20:34:36 2003 @@ -42,6 +42,8 @@ * 15/10/2001 - fix snapshot alignment problem [CM] * - fix snapshot full oops (always check lv_block_exception) [CM] * 26/06/2002 - support for new list_move macro [patch@luckynet.dynu.com] + * 26/07/2002 - removed conditional list_move macro because we will + * discontinue LVM1 before 2.6 anyway * */ @@ -57,7 +59,8 @@ #include "lvm-internal.h" -static char *lvm_snap_version __attribute__ ((unused)) = "LVM "LVM_RELEASE_NAME" snapshot code ("LVM_RELEASE_DATE")\n"; +static char *lvm_snap_version __attribute__ ((unused)) = + "LVM " LVM_RELEASE_NAME " snapshot code (" LVM_RELEASE_DATE ")\n"; extern const char *const lvm_name; @@ -65,19 +68,20 @@ void lvm_snapshot_release(lv_t *); -static int _write_COW_table_block(vg_t *vg, lv_t *lv, int idx, +static int _write_COW_table_block(vg_t * vg, lv_t * lv, int idx, const char **reason); -static void _disable_snapshot(vg_t *vg, lv_t *lv); +static void _disable_snapshot(vg_t * vg, lv_t * lv); static inline int __brw_kiovec(int rw, int nr, struct kiobuf *iovec[], kdev_t dev, unsigned long b[], int size, - lv_t *lv) { + lv_t * lv) +{ return brw_kiovec(rw, nr, iovec, dev, b, size); } -static int _pv_get_number(vg_t * vg, kdev_t rdev, uint *pvn) +static int _pv_get_number(vg_t * vg, kdev_t rdev, uint * pvn) { uint p; for (p = 0; p < vg->pv_max; p++) { @@ -104,34 +108,32 @@ #define hashfn(dev,block,mask,chunk_size) \ ((HASHDEV(dev)^((block)/(chunk_size))) & (mask)) -static inline lv_block_exception_t * -lvm_find_exception_table(kdev_t org_dev, unsigned long org_start, lv_t * lv) +static inline lv_block_exception_t *lvm_find_exception_table(kdev_t + org_dev, + unsigned long + org_start, + lv_t * lv) { - struct list_head * hash_table = lv->lv_snapshot_hash_table, * next; + struct list_head *hash_table = lv->lv_snapshot_hash_table, *next; unsigned long mask = lv->lv_snapshot_hash_mask; int chunk_size = lv->lv_chunk_size; - lv_block_exception_t * ret; + lv_block_exception_t *ret; int i = 0; - hash_table = &hash_table[hashfn(org_dev, org_start, mask, chunk_size)]; + hash_table = + &hash_table[hashfn(org_dev, org_start, mask, chunk_size)]; ret = NULL; - for (next = hash_table->next; next != hash_table; next = next->next) - { - lv_block_exception_t * exception; + for (next = hash_table->next; next != hash_table; + next = next->next) { + lv_block_exception_t *exception; exception = list_entry(next, lv_block_exception_t, hash); if (exception->rsector_org == org_start && - exception->rdev_org == org_dev) - { - if (i) - { + exception->rdev_org == org_dev) { + if (i) { /* fun, isn't it? :) */ -#ifdef list_move - list_move(next, hash_table); -#else list_del(next); list_add(next, hash_table); -#endif } ret = exception; break; @@ -145,13 +147,14 @@ kdev_t org_dev, unsigned long org_start, lv_t * lv) { - struct list_head * hash_table = lv->lv_snapshot_hash_table; + struct list_head *hash_table = lv->lv_snapshot_hash_table; unsigned long mask = lv->lv_snapshot_hash_mask; int chunk_size = lv->lv_chunk_size; if (!hash_table) BUG(); - hash_table = &hash_table[hashfn(org_dev, org_start, mask, chunk_size)]; + hash_table = + &hash_table[hashfn(org_dev, org_start, mask, chunk_size)]; list_add(&exception->hash, hash_table); } @@ -163,26 +166,25 @@ * * We need to be holding at least a read lock on lv->lv_lock. */ -int lvm_snapshot_remap_block(kdev_t * org_dev, unsigned long * org_sector, +int lvm_snapshot_remap_block(kdev_t * org_dev, unsigned long *org_sector, unsigned long pe_start, lv_t * lv) { int ret; unsigned long pe_off, pe_adjustment, __org_start; kdev_t __org_dev; int chunk_size = lv->lv_chunk_size; - lv_block_exception_t * exception; + lv_block_exception_t *exception; if (!lv->lv_block_exception) return -1; pe_off = pe_start % chunk_size; - pe_adjustment = (*org_sector-pe_off) % chunk_size; + pe_adjustment = (*org_sector - pe_off) % chunk_size; __org_start = *org_sector - pe_adjustment; __org_dev = *org_dev; ret = 0; exception = lvm_find_exception_table(__org_dev, __org_start, lv); - if (exception) - { + if (exception) { *org_dev = exception->rdev_new; *org_sector = exception->rsector_new + pe_adjustment; ret = 1; @@ -190,7 +192,7 @@ return ret; } -void lvm_drop_snapshot(vg_t *vg, lv_t *lv_snap, const char *reason) +void lvm_drop_snapshot(vg_t * vg, lv_t * lv_snap, const char *reason) { kdev_t last_dev; int i; @@ -203,7 +205,7 @@ _disable_snapshot(vg, lv_snap); for (i = last_dev = 0; i < lv_snap->lv_remap_ptr; i++) { - if ( lv_snap->lv_block_exception[i].rdev_new != last_dev) { + if (lv_snap->lv_block_exception[i].rdev_new != last_dev) { last_dev = lv_snap->lv_block_exception[i].rdev_new; invalidate_buffers(last_dev); } @@ -214,14 +216,14 @@ printk(KERN_INFO "%s -- giving up to snapshot %s on %s: %s\n", - lvm_name, lv_snap->lv_snapshot_org->lv_name, lv_snap->lv_name, - reason); + lvm_name, lv_snap->lv_snapshot_org->lv_name, + lv_snap->lv_name, reason); } static inline int lvm_snapshot_prepare_blocks(unsigned long *blocks, - unsigned long start, - int nr_sectors, - int blocksize) + unsigned long start, + int nr_sectors, + int blocksize) { int i, sectors_per_block, nr_blocks; @@ -244,8 +246,7 @@ int correct_size = BLOCK_SIZE, i, major; major = MAJOR(dev); - if (blksize_size[major]) - { + if (blksize_size[major]) { i = blksize_size[major][MINOR(dev)]; if (i) correct_size = i; @@ -254,10 +255,10 @@ } #ifdef DEBUG_SNAPSHOT -static inline void invalidate_snap_cache(unsigned long start, unsigned long nr, - kdev_t dev) +static inline void invalidate_snap_cache(unsigned long start, + unsigned long nr, kdev_t dev) { - struct buffer_head * bh; + struct buffer_head *bh; int sectors_per_block, i, blksize, minor; minor = MINOR(dev); @@ -266,8 +267,7 @@ nr /= sectors_per_block; start /= sectors_per_block; - for (i = 0; i < nr; i++) - { + for (i = 0; i < nr; i++) { bh = get_hash_table(dev, start++, blksize); if (bh) bforget(bh); @@ -280,40 +280,44 @@ { int id = 0, is = lv_snap->lv_remap_ptr; ulong blksize_snap; - lv_COW_table_disk_t * lv_COW_table = (lv_COW_table_disk_t *) - page_address(lv_snap->lv_COW_table_iobuf->maplist[0]); + lv_COW_table_disk_t *lv_COW_table = (lv_COW_table_disk_t *) + page_address(lv_snap->lv_COW_table_iobuf->maplist[0]); if (is == 0) return 0; is--; blksize_snap = - lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new); + lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new); is -= is % (blksize_snap / sizeof(lv_COW_table_disk_t)); memset(lv_COW_table, 0, blksize_snap); - for ( ; is < lv_snap->lv_remap_ptr; is++, id++) { + for (; is < lv_snap->lv_remap_ptr; is++, id++) { /* store new COW_table entry */ - lv_block_exception_t *be = lv_snap->lv_block_exception + is; + lv_block_exception_t *be = + lv_snap->lv_block_exception + is; uint pvn; if (_pv_get_number(vg, be->rdev_org, &pvn)) goto bad; lv_COW_table[id].pv_org_number = cpu_to_le64(pvn); - lv_COW_table[id].pv_org_rsector = cpu_to_le64(be->rsector_org); + lv_COW_table[id].pv_org_rsector = + cpu_to_le64(be->rsector_org); if (_pv_get_number(vg, be->rdev_new, &pvn)) goto bad; lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn); - lv_COW_table[id].pv_snap_rsector = cpu_to_le64(be->rsector_new); + lv_COW_table[id].pv_snap_rsector = + cpu_to_le64(be->rsector_new); } return 0; - bad: - printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed", lvm_name); + bad: + printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed", + lvm_name); return -1; } @@ -323,12 +327,12 @@ * * We need to hold a write lock on lv_snap->lv_lock. */ -int lvm_write_COW_table_block(vg_t * vg, lv_t *lv_snap) +int lvm_write_COW_table_block(vg_t * vg, lv_t * lv_snap) { int r; const char *err; - if((r = _write_COW_table_block(vg, lv_snap, - lv_snap->lv_remap_ptr - 1, &err))) + if ((r = _write_COW_table_block(vg, lv_snap, + lv_snap->lv_remap_ptr - 1, &err))) lvm_drop_snapshot(vg, lv_snap, err); return r; } @@ -349,13 +353,15 @@ unsigned long org_phys_sector, unsigned long org_pe_start, unsigned long org_virt_sector, - vg_t *vg, lv_t* lv_snap) + vg_t * vg, lv_t * lv_snap) { - const char * reason; - unsigned long org_start, snap_start, snap_phys_dev, virt_start, pe_off; + const char *reason; + unsigned long org_start, snap_start, snap_phys_dev, virt_start, + pe_off; unsigned long phys_start; - int idx = lv_snap->lv_remap_ptr, chunk_size = lv_snap->lv_chunk_size; - struct kiobuf * iobuf = lv_snap->lv_iobuf; + int idx = lv_snap->lv_remap_ptr, chunk_size = + lv_snap->lv_chunk_size; + struct kiobuf *iobuf = lv_snap->lv_iobuf; unsigned long *blocks = iobuf->blocks; int blksize_snap, blksize_org, min_blksize, max_blksize; int max_sectors, nr_sectors; @@ -366,7 +372,8 @@ /* calculate physical boundaries of source chunk */ pe_off = org_pe_start % chunk_size; - org_start = org_phys_sector - ((org_phys_sector-pe_off) % chunk_size); + org_start = + org_phys_sector - ((org_phys_sector - pe_off) % chunk_size); virt_start = org_virt_sector - (org_phys_sector - org_start); /* calculate physical boundaries of destination chunk */ @@ -381,25 +388,22 @@ lvm_name, kdevname(org_phys_dev), org_phys_sector, org_start, kdevname(snap_phys_dev), snap_start, - chunk_size, - org_pe_start, pe_off, - org_virt_sector); + chunk_size, org_pe_start, pe_off, org_virt_sector); #endif blksize_org = lvm_sectsize(org_phys_dev); blksize_snap = lvm_sectsize(snap_phys_dev); max_blksize = max(blksize_org, blksize_snap); min_blksize = min(blksize_org, blksize_snap); - max_sectors = KIO_MAX_SECTORS * (min_blksize>>9); + max_sectors = KIO_MAX_SECTORS * (min_blksize >> 9); - if (chunk_size % (max_blksize>>9)) + if (chunk_size % (max_blksize >> 9)) goto fail_blksize; /* Don't change org_start, we need it to fill in the exception table */ phys_start = org_start; - while (chunk_size) - { + while (chunk_size) { nr_sectors = min(chunk_size, max_sectors); chunk_size -= nr_sectors; @@ -410,7 +414,8 @@ goto fail_prepare; if (__brw_kiovec(READ, 1, &iobuf, org_phys_dev, blocks, - blksize_org, lv_snap) != (nr_sectors<<9)) + blksize_org, + lv_snap) != (nr_sectors << 9)) goto fail_raw_read; if (!lvm_snapshot_prepare_blocks(blocks, snap_start, @@ -418,7 +423,8 @@ goto fail_prepare; if (__brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev, blocks, - blksize_snap, lv_snap) != (nr_sectors<<9)) + blksize_snap, + lv_snap) != (nr_sectors << 9)) goto fail_raw_write; phys_start += nr_sectors; @@ -440,53 +446,55 @@ org_phys_dev, org_start, lv_snap); lv_snap->lv_remap_ptr = idx + 1; if (lv_snap->lv_snapshot_use_rate > 0) { - if (lv_snap->lv_remap_ptr * 100 / lv_snap->lv_remap_end >= lv_snap->lv_snapshot_use_rate) + if (lv_snap->lv_remap_ptr * 100 / lv_snap->lv_remap_end >= + lv_snap->lv_snapshot_use_rate) wake_up_interruptible(&lv_snap->lv_snapshot_wait); } return 0; /* slow path */ -out: + out: lvm_drop_snapshot(vg, lv_snap, reason); return 1; -fail_out_of_space: + fail_out_of_space: reason = "out of space"; goto out; -fail_raw_read: + fail_raw_read: reason = "read error"; goto out; -fail_raw_write: + fail_raw_write: reason = "write error"; goto out; -fail_blksize: + fail_blksize: reason = "blocksize error"; goto out; -fail_prepare: + fail_prepare: reason = "couldn't prepare kiovec blocks " - "(start probably isn't block aligned)"; + "(start probably isn't block aligned)"; goto out; } -int lvm_snapshot_alloc_iobuf_pages(struct kiobuf * iobuf, int sectors) +int lvm_snapshot_alloc_iobuf_pages(struct kiobuf *iobuf, int sectors) { int bytes, nr_pages, err, i; bytes = sectors * SECTOR_SIZE; nr_pages = (bytes + ~PAGE_MASK) >> PAGE_SHIFT; err = expand_kiobuf(iobuf, nr_pages); - if (err) goto out; + if (err) + goto out; err = -ENOMEM; iobuf->locked = 1; iobuf->nr_pages = 0; - for (i = 0; i < nr_pages; i++) - { - struct page * page; + for (i = 0; i < nr_pages; i++) { + struct page *page; page = alloc_page(GFP_KERNEL); - if (!page) goto out; + if (!page) + goto out; iobuf->maplist[i] = page; LockPage(page); @@ -496,7 +504,7 @@ err = 0; -out: + out: return err; } @@ -516,13 +524,13 @@ { int err; unsigned long buckets, max_buckets, size; - struct list_head * hash; + struct list_head *hash; buckets = lv->lv_remap_end; max_buckets = calc_max_buckets(); buckets = min(buckets, max_buckets); - while (buckets & (buckets-1)) - buckets &= (buckets-1); + while (buckets & (buckets - 1)) + buckets &= (buckets - 1); size = buckets * sizeof(struct list_head); @@ -534,11 +542,11 @@ goto out; lv->lv_snapshot_hash_table_size = size; - lv->lv_snapshot_hash_mask = buckets-1; + lv->lv_snapshot_hash_mask = buckets - 1; while (buckets--) - INIT_LIST_HEAD(hash+buckets); + INIT_LIST_HEAD(hash + buckets); err = 0; -out: + out: return err; } @@ -548,33 +556,39 @@ /* allocate kiovec to do chunk io */ ret = alloc_kiovec(1, &lv_snap->lv_iobuf); - if (ret) goto out; + if (ret) + goto out; - max_sectors = KIO_MAX_SECTORS << (PAGE_SHIFT-9); + max_sectors = KIO_MAX_SECTORS << (PAGE_SHIFT - 9); - ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_iobuf, max_sectors); - if (ret) goto out_free_kiovec; + ret = + lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_iobuf, max_sectors); + if (ret) + goto out_free_kiovec; /* allocate kiovec to do exception table io */ ret = alloc_kiovec(1, &lv_snap->lv_COW_table_iobuf); - if (ret) goto out_free_kiovec; + if (ret) + goto out_free_kiovec; ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_COW_table_iobuf, - PAGE_SIZE/SECTOR_SIZE); - if (ret) goto out_free_both_kiovecs; + PAGE_SIZE / SECTOR_SIZE); + if (ret) + goto out_free_both_kiovecs; ret = lvm_snapshot_alloc_hash_table(lv_snap); - if (ret) goto out_free_both_kiovecs; + if (ret) + goto out_free_both_kiovecs; -out: + out: return ret; -out_free_both_kiovecs: + out_free_both_kiovecs: unmap_kiobuf(lv_snap->lv_COW_table_iobuf); free_kiovec(1, &lv_snap->lv_COW_table_iobuf); lv_snap->lv_COW_table_iobuf = NULL; -out_free_kiovec: + out_free_kiovec: unmap_kiobuf(lv_snap->lv_iobuf); free_kiovec(1, &lv_snap->lv_iobuf); lv_snap->lv_iobuf = NULL; @@ -585,27 +599,23 @@ void lvm_snapshot_release(lv_t * lv) { - if (lv->lv_block_exception) - { + if (lv->lv_block_exception) { vfree(lv->lv_block_exception); lv->lv_block_exception = NULL; } - if (lv->lv_snapshot_hash_table) - { + if (lv->lv_snapshot_hash_table) { vfree(lv->lv_snapshot_hash_table); lv->lv_snapshot_hash_table = NULL; lv->lv_snapshot_hash_table_size = 0; } - if (lv->lv_iobuf) - { - kiobuf_wait_for_io(lv->lv_iobuf); + if (lv->lv_iobuf) { + kiobuf_wait_for_io(lv->lv_iobuf); unmap_kiobuf(lv->lv_iobuf); free_kiovec(1, &lv->lv_iobuf); lv->lv_iobuf = NULL; } - if (lv->lv_COW_table_iobuf) - { - kiobuf_wait_for_io(lv->lv_COW_table_iobuf); + if (lv->lv_COW_table_iobuf) { + kiobuf_wait_for_io(lv->lv_COW_table_iobuf); unmap_kiobuf(lv->lv_COW_table_iobuf); free_kiovec(1, &lv->lv_COW_table_iobuf); lv->lv_COW_table_iobuf = NULL; @@ -613,55 +623,67 @@ } -static int _write_COW_table_block(vg_t *vg, lv_t *lv_snap, - int idx, const char **reason) { +static int _write_COW_table_block(vg_t * vg, lv_t * lv_snap, + int idx, const char **reason) +{ int blksize_snap; int end_of_table; int idx_COW_table; uint pvn; ulong snap_pe_start, COW_table_sector_offset, - COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block; + COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block; ulong blocks[1]; kdev_t snap_phys_dev; lv_block_exception_t *be; struct kiobuf *COW_table_iobuf = lv_snap->lv_COW_table_iobuf; - lv_COW_table_disk_t * lv_COW_table = - ( lv_COW_table_disk_t *) page_address(lv_snap->lv_COW_table_iobuf->maplist[0]); + lv_COW_table_disk_t *lv_COW_table = + (lv_COW_table_disk_t *) page_address(lv_snap-> + lv_COW_table_iobuf-> + maplist[0]); COW_chunks_per_pe = LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg, lv_snap); COW_entries_per_pe = LVM_GET_COW_TABLE_ENTRIES_PER_PE(vg, lv_snap); /* get physical addresse of destination chunk */ snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new; - snap_pe_start = lv_snap->lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->lv_chunk_size; + snap_pe_start = + lv_snap->lv_block_exception[idx - + (idx % + COW_entries_per_pe)].rsector_new - + lv_snap->lv_chunk_size; blksize_snap = lvm_sectsize(snap_phys_dev); - COW_entries_per_block = blksize_snap / sizeof(lv_COW_table_disk_t); - idx_COW_table = idx % COW_entries_per_pe % COW_entries_per_block; + COW_entries_per_block = blksize_snap / sizeof(lv_COW_table_disk_t); + idx_COW_table = idx % COW_entries_per_pe % COW_entries_per_block; - if ( idx_COW_table == 0) memset(lv_COW_table, 0, blksize_snap); + if (idx_COW_table == 0) + memset(lv_COW_table, 0, blksize_snap); /* sector offset into the on disk COW table */ - COW_table_sector_offset = (idx % COW_entries_per_pe) / (SECTOR_SIZE / sizeof(lv_COW_table_disk_t)); - - /* COW table block to write next */ - blocks[0] = (snap_pe_start + COW_table_sector_offset) >> (blksize_snap >> 10); + COW_table_sector_offset = + (idx % COW_entries_per_pe) / (SECTOR_SIZE / + sizeof(lv_COW_table_disk_t)); + + /* COW table block to write next */ + blocks[0] = + (snap_pe_start + + COW_table_sector_offset) >> (blksize_snap >> 10); /* store new COW_table entry */ be = lv_snap->lv_block_exception + idx; - if(_pv_get_number(vg, be->rdev_org, &pvn)) + if (_pv_get_number(vg, be->rdev_org, &pvn)) goto fail_pv_get_number; lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn); lv_COW_table[idx_COW_table].pv_org_rsector = - cpu_to_le64(be->rsector_org); - if(_pv_get_number(vg, snap_phys_dev, &pvn)) + cpu_to_le64(be->rsector_org); + if (_pv_get_number(vg, snap_phys_dev, &pvn)) goto fail_pv_get_number; lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn); lv_COW_table[idx_COW_table].pv_snap_rsector = - cpu_to_le64(be->rsector_new); + cpu_to_le64(be->rsector_new); COW_table_iobuf->length = blksize_snap; /* COW_table_iobuf->nr_pages = 1; */ @@ -672,36 +694,42 @@ /* initialization of next COW exception table block with zeroes */ end_of_table = idx % COW_entries_per_pe == COW_entries_per_pe - 1; - if (idx_COW_table % COW_entries_per_block == COW_entries_per_block - 1 || end_of_table) - { + if (idx_COW_table % COW_entries_per_block == + COW_entries_per_block - 1 || end_of_table) { /* don't go beyond the end */ - if (idx + 1 >= lv_snap->lv_remap_end) goto out; + if (idx + 1 >= lv_snap->lv_remap_end) + goto out; memset(lv_COW_table, 0, blksize_snap); - if (end_of_table) - { + if (end_of_table) { idx++; - snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new; - snap_pe_start = lv_snap->lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->lv_chunk_size; + snap_phys_dev = + lv_snap->lv_block_exception[idx].rdev_new; + snap_pe_start = + lv_snap->lv_block_exception[idx - + (idx % + COW_entries_per_pe)]. + rsector_new - lv_snap->lv_chunk_size; blksize_snap = lvm_sectsize(snap_phys_dev); blocks[0] = snap_pe_start >> (blksize_snap >> 10); - } else blocks[0]++; + } else + blocks[0]++; if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev, - blocks, blksize_snap, lv_snap) != - blksize_snap) + blocks, blksize_snap, lv_snap) != + blksize_snap) goto fail_raw_write; } -out: + out: return 0; -fail_raw_write: + fail_raw_write: *reason = "write error"; return 1; -fail_pv_get_number: + fail_pv_get_number: *reason = "_pv_get_number failed"; return 1; } @@ -717,10 +745,12 @@ * to activate the snapshot and prevent this from happening. */ -static void _disable_snapshot(vg_t *vg, lv_t *lv) { +static void _disable_snapshot(vg_t * vg, lv_t * lv) +{ const char *err; - lv->lv_block_exception[0].rsector_org = LVM_SNAPSHOT_DROPPED_SECTOR; - if(_write_COW_table_block(vg, lv, 0, &err) < 0) { + lv->lv_block_exception[0].rsector_org = + LVM_SNAPSHOT_DROPPED_SECTOR; + if (_write_COW_table_block(vg, lv, 0, &err) < 0) { printk(KERN_ERR "%s -- couldn't disable snapshot: %s\n", lvm_name, err); } --- linux/drivers/md/lvm-fs.c.orig Tue Jan 7 15:50:31 2003 +++ linux/drivers/md/lvm-fs.c Thu Mar 6 20:34:36 2003 @@ -59,9 +59,9 @@ static int _proc_read_global(char *page, char **start, off_t off, int count, int *eof, void *data); -static int _vg_info(vg_t *vg_ptr, char *buf); -static int _lv_info(vg_t *vg_ptr, lv_t *lv_ptr, char *buf); -static int _pv_info(pv_t *pv_ptr, char *buf); +static int _vg_info(vg_t * vg_ptr, char *buf); +static int _lv_info(vg_t * vg_ptr, lv_t * lv_ptr, char *buf); +static int _pv_info(pv_t * pv_ptr, char *buf); static void _show_uuid(const char *src, char *b, char *e); @@ -78,65 +78,72 @@ /* inline functions */ /* public interface */ -void __init lvm_init_fs() { +void __init lvm_init_fs() +{ struct proc_dir_entry *pde; /* User-space has already registered this */ #if 0 - lvm_devfs_handle = devfs_register( - 0 , "lvm", 0, LVM_CHAR_MAJOR, 0, - S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, - &lvm_chr_fops, NULL); + lvm_devfs_handle = devfs_register(0, "lvm", 0, LVM_CHAR_MAJOR, 0, + S_IFCHR | S_IRUSR | S_IWUSR | + S_IRGRP, &lvm_chr_fops, NULL); #endif lvm_proc_dir = create_proc_entry(LVM_DIR, S_IFDIR, &proc_root); if (lvm_proc_dir) { - lvm_proc_vg_subdir = create_proc_entry(LVM_VG_SUBDIR, S_IFDIR, - lvm_proc_dir); + lvm_proc_vg_subdir = + create_proc_entry(LVM_VG_SUBDIR, S_IFDIR, + lvm_proc_dir); pde = create_proc_entry(LVM_GLOBAL, S_IFREG, lvm_proc_dir); - if ( pde != NULL) pde->read_proc = _proc_read_global; + if (pde != NULL) + pde->read_proc = _proc_read_global; } } -void lvm_fin_fs() { +void lvm_fin_fs() +{ #if 0 - devfs_unregister (lvm_devfs_handle); + devfs_unregister(lvm_devfs_handle); #endif remove_proc_entry(LVM_GLOBAL, lvm_proc_dir); remove_proc_entry(LVM_VG_SUBDIR, lvm_proc_dir); remove_proc_entry(LVM_DIR, &proc_root); } -void lvm_fs_create_vg(vg_t *vg_ptr) { +void lvm_fs_create_vg(vg_t * vg_ptr) +{ struct proc_dir_entry *pde; if (!vg_ptr) return; vg_devfs_handle[vg_ptr->vg_number] = - devfs_mk_dir(0, vg_ptr->vg_name, NULL); + devfs_mk_dir(0, vg_ptr->vg_name, NULL); - ch_devfs_handle[vg_ptr->vg_number] = devfs_register( - vg_devfs_handle[vg_ptr->vg_number] , "group", - DEVFS_FL_DEFAULT, LVM_CHAR_MAJOR, vg_ptr->vg_number, - S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, - &lvm_chr_fops, NULL); + ch_devfs_handle[vg_ptr->vg_number] = + devfs_register(vg_devfs_handle[vg_ptr->vg_number], "group", + DEVFS_FL_DEFAULT, LVM_CHAR_MAJOR, + vg_ptr->vg_number, + S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, + &lvm_chr_fops, NULL); vg_ptr->vg_dir_pde = create_proc_entry(vg_ptr->vg_name, S_IFDIR, lvm_proc_vg_subdir); - if((pde = create_proc_entry("group", S_IFREG, vg_ptr->vg_dir_pde))) { + if ((pde = + create_proc_entry("group", S_IFREG, vg_ptr->vg_dir_pde))) { pde->read_proc = _proc_read_vg; pde->data = vg_ptr; } vg_ptr->lv_subdir_pde = - create_proc_entry(LVM_LV_SUBDIR, S_IFDIR, vg_ptr->vg_dir_pde); + create_proc_entry(LVM_LV_SUBDIR, S_IFDIR, vg_ptr->vg_dir_pde); vg_ptr->pv_subdir_pde = - create_proc_entry(LVM_PV_SUBDIR, S_IFDIR, vg_ptr->vg_dir_pde); + create_proc_entry(LVM_PV_SUBDIR, S_IFDIR, vg_ptr->vg_dir_pde); } -void lvm_fs_remove_vg(vg_t *vg_ptr) { +void lvm_fs_remove_vg(vg_t * vg_ptr) +{ int i; if (!vg_ptr) @@ -146,18 +153,20 @@ ch_devfs_handle[vg_ptr->vg_number] = NULL; /* remove lv's */ - for(i = 0; i < vg_ptr->lv_max; i++) - if(vg_ptr->lv[i]) lvm_fs_remove_lv(vg_ptr, vg_ptr->lv[i]); + for (i = 0; i < vg_ptr->lv_max; i++) + if (vg_ptr->lv[i]) + lvm_fs_remove_lv(vg_ptr, vg_ptr->lv[i]); /* must not remove directory before leaf nodes */ devfs_unregister(vg_devfs_handle[vg_ptr->vg_number]); vg_devfs_handle[vg_ptr->vg_number] = NULL; /* remove pv's */ - for(i = 0; i < vg_ptr->pv_max; i++) - if(vg_ptr->pv[i]) lvm_fs_remove_pv(vg_ptr, vg_ptr->pv[i]); + for (i = 0; i < vg_ptr->pv_max; i++) + if (vg_ptr->pv[i]) + lvm_fs_remove_pv(vg_ptr, vg_ptr->pv[i]); - if(vg_ptr->vg_dir_pde) { + if (vg_ptr->vg_dir_pde) { remove_proc_entry(LVM_LV_SUBDIR, vg_ptr->vg_dir_pde); vg_ptr->lv_subdir_pde = NULL; @@ -172,13 +181,15 @@ } -static inline const char *_basename(const char *str) { +static inline const char *_basename(const char *str) +{ const char *name = strrchr(str, '/'); name = name ? name + 1 : str; return name; } -devfs_handle_t lvm_fs_create_lv(vg_t *vg_ptr, lv_t *lv) { +devfs_handle_t lvm_fs_create_lv(vg_t * vg_ptr, lv_t * lv) +{ struct proc_dir_entry *pde; const char *name; @@ -187,21 +198,24 @@ name = _basename(lv->lv_name); - lv_devfs_handle[MINOR(lv->lv_dev)] = devfs_register( - vg_devfs_handle[vg_ptr->vg_number], name, - DEVFS_FL_DEFAULT, LVM_BLK_MAJOR, MINOR(lv->lv_dev), - S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP, - &lvm_blk_dops, NULL); - - if(vg_ptr->lv_subdir_pde && - (pde = create_proc_entry(name, S_IFREG, vg_ptr->lv_subdir_pde))) { + lv_devfs_handle[MINOR(lv->lv_dev)] = + devfs_register(vg_devfs_handle[vg_ptr->vg_number], name, + DEVFS_FL_DEFAULT, LVM_BLK_MAJOR, + MINOR(lv->lv_dev), + S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP, + &lvm_blk_dops, NULL); + + if (vg_ptr->lv_subdir_pde && + (pde = + create_proc_entry(name, S_IFREG, vg_ptr->lv_subdir_pde))) { pde->read_proc = _proc_read_lv; pde->data = lv; } return lv_devfs_handle[MINOR(lv->lv_dev)]; } -void lvm_fs_remove_lv(vg_t *vg_ptr, lv_t *lv) { +void lvm_fs_remove_lv(vg_t * vg_ptr, lv_t * lv) +{ if (!vg_ptr || !lv) return; @@ -209,51 +223,55 @@ devfs_unregister(lv_devfs_handle[MINOR(lv->lv_dev)]); lv_devfs_handle[MINOR(lv->lv_dev)] = NULL; - if(vg_ptr->lv_subdir_pde) { + if (vg_ptr->lv_subdir_pde) { const char *name = _basename(lv->lv_name); remove_proc_entry(name, vg_ptr->lv_subdir_pde); } } -static inline void _make_pv_name(const char *src, char *b, char *e) { +static inline void _make_pv_name(const char *src, char *b, char *e) +{ int offset = strlen(LVM_DIR_PREFIX); - if(strncmp(src, LVM_DIR_PREFIX, offset)) + if (strncmp(src, LVM_DIR_PREFIX, offset)) offset = 0; e--; src += offset; - while(*src && (b != e)) { + while (*src && (b != e)) { *b++ = (*src == '/') ? '_' : *src; src++; } *b = '\0'; } -void lvm_fs_create_pv(vg_t *vg_ptr, pv_t *pv) { +void lvm_fs_create_pv(vg_t * vg_ptr, pv_t * pv) +{ struct proc_dir_entry *pde; char name[NAME_LEN]; if (!vg_ptr || !pv) return; - if(!vg_ptr->pv_subdir_pde) + if (!vg_ptr->pv_subdir_pde) return; _make_pv_name(pv->pv_name, name, name + sizeof(name)); - if((pde = create_proc_entry(name, S_IFREG, vg_ptr->pv_subdir_pde))) { + if ((pde = + create_proc_entry(name, S_IFREG, vg_ptr->pv_subdir_pde))) { pde->read_proc = _proc_read_pv; pde->data = pv; } } -void lvm_fs_remove_pv(vg_t *vg_ptr, pv_t *pv) { +void lvm_fs_remove_pv(vg_t * vg_ptr, pv_t * pv) +{ char name[NAME_LEN]; if (!vg_ptr || !pv) return; - if(!vg_ptr->pv_subdir_pde) + if (!vg_ptr->pv_subdir_pde) return; _make_pv_name(pv->pv_name, name, name + sizeof(name)); @@ -262,7 +280,8 @@ static int _proc_read_vg(char *page, char **start, off_t off, - int count, int *eof, void *data) { + int count, int *eof, void *data) +{ int sz = 0; vg_t *vg_ptr = data; char uuid[NAME_LEN]; @@ -279,9 +298,11 @@ sz += sprintf(page + sz, "PV max: %u\n", vg_ptr->pv_max); sz += sprintf(page + sz, "PV current: %u\n", vg_ptr->pv_cur); sz += sprintf(page + sz, "PV active: %u\n", vg_ptr->pv_act); - sz += sprintf(page + sz, "PE size: %u\n", vg_ptr->pe_size / 2); + sz += + sprintf(page + sz, "PE size: %u\n", vg_ptr->pe_size / 2); sz += sprintf(page + sz, "PE total: %u\n", vg_ptr->pe_total); - sz += sprintf(page + sz, "PE allocated: %u\n", vg_ptr->pe_allocated); + sz += + sprintf(page + sz, "PE allocated: %u\n", vg_ptr->pe_allocated); _show_uuid(vg_ptr->vg_uuid, uuid, uuid + sizeof(uuid)); sz += sprintf(page + sz, "uuid: %s\n", uuid); @@ -290,7 +311,8 @@ } static int _proc_read_lv(char *page, char **start, off_t off, - int count, int *eof, void *data) { + int count, int *eof, void *data) +{ int sz = 0; lv_t *lv = data; @@ -301,7 +323,7 @@ sz += sprintf(page + sz, "number: %u\n", lv->lv_number); sz += sprintf(page + sz, "open: %u\n", lv->lv_open); sz += sprintf(page + sz, "allocation: %u\n", lv->lv_allocation); - if(lv->lv_stripes > 1) { + if (lv->lv_stripes > 1) { sz += sprintf(page + sz, "stripes: %u\n", lv->lv_stripes); sz += sprintf(page + sz, "stripesize: %u\n", @@ -314,7 +336,8 @@ } static int _proc_read_pv(char *page, char **start, off_t off, - int count, int *eof, void *data) { + int count, int *eof, void *data) +{ int sz = 0; pv_t *pv = data; char uuid[NAME_LEN]; @@ -329,7 +352,7 @@ sz += sprintf(page + sz, "PE total: %u\n", pv->pe_total); sz += sprintf(page + sz, "PE allocated: %u\n", pv->pe_allocated); sz += sprintf(page + sz, "device: %02u:%02u\n", - MAJOR(pv->pv_dev), MINOR(pv->pv_dev)); + MAJOR(pv->pv_dev), MINOR(pv->pv_dev)); _show_uuid(pv->pv_uuid, uuid, uuid + sizeof(uuid)); sz += sprintf(page + sz, "uuid: %s\n", uuid); @@ -337,13 +360,15 @@ return sz; } -static int _proc_read_global(char *page, char **start, off_t pos, int count, - int *eof, void *data) { +static int _proc_read_global(char *page, char **start, off_t pos, + int count, int *eof, void *data) +{ #define LVM_PROC_BUF ( i == 0 ? dummy_buf : &buf[sz]) - int c, i, l, p, v, vg_counter, pv_counter, lv_counter, lv_open_counter, - lv_open_total, pe_t_bytes, hash_table_bytes, lv_block_exception_t_bytes, seconds; + int c, i, l, p, v, vg_counter, pv_counter, lv_counter, + lv_open_counter, lv_open_total, pe_t_bytes, hash_table_bytes, + lv_block_exception_t_bytes, seconds; static off_t sz; off_t sz_last; static char *buf = NULL; @@ -359,12 +384,12 @@ lvm_name, pos, count); #endif - if(pos != 0 && buf != NULL) + if (pos != 0 && buf != NULL) goto out; - sz_last = vg_counter = pv_counter = lv_counter = lv_open_counter = \ - lv_open_total = pe_t_bytes = hash_table_bytes = \ - lv_block_exception_t_bytes = 0; + sz_last = vg_counter = pv_counter = lv_counter = lv_open_counter = + lv_open_total = pe_t_bytes = hash_table_bytes = + lv_block_exception_t_bytes = 0; /* get some statistics */ for (v = 0; v < ABS_MAX_VG; v++) { @@ -374,14 +399,26 @@ lv_counter += vg_ptr->lv_cur; if (vg_ptr->lv_cur > 0) { for (l = 0; l < vg[v]->lv_max; l++) { - if ((lv_ptr = vg_ptr->lv[l]) != NULL) { - pe_t_bytes += lv_ptr->lv_allocated_le; - hash_table_bytes += lv_ptr->lv_snapshot_hash_table_size; - if (lv_ptr->lv_block_exception != NULL) - lv_block_exception_t_bytes += lv_ptr->lv_remap_end; + if ((lv_ptr = + vg_ptr->lv[l]) != NULL) { + pe_t_bytes += + lv_ptr-> + lv_allocated_le; + hash_table_bytes += + lv_ptr-> + lv_snapshot_hash_table_size; + if (lv_ptr-> + lv_block_exception != + NULL) + lv_block_exception_t_bytes + += + lv_ptr-> + lv_remap_end; if (lv_ptr->lv_open > 0) { lv_open_counter++; - lv_open_total += lv_ptr->lv_open; + lv_open_total += + lv_ptr-> + lv_open; } } } @@ -403,8 +440,7 @@ 2nd to fill the malloced buffer */ for (i = 0; i < 2; i++) { sz = 0; - sz += sprintf(LVM_PROC_BUF, - "LVM " + sz += sprintf(LVM_PROC_BUF, "LVM " #ifdef MODULE "module" #else @@ -422,8 +458,7 @@ lv_open_counter == 1 ? "" : "s"); if (lv_open_total > 0) sz += sprintf(LVM_PROC_BUF, - " %d times)\n", - lv_open_total); + " %d times)\n", lv_open_total); else sz += sprintf(LVM_PROC_BUF, ")"); sz += sprintf(LVM_PROC_BUF, @@ -431,7 +466,8 @@ vg_counter * sizeof(vg_t) + pv_counter * sizeof(pv_t) + lv_counter * sizeof(lv_t) + - pe_t_bytes + hash_table_bytes + lv_block_exception_t_bytes + sz_last, + pe_t_bytes + hash_table_bytes + + lv_block_exception_t_bytes + sz_last, lvm_iop_version); seconds = CURRENT_TIME - loadtime; @@ -445,46 +481,70 @@ } sz += sprintf(LVM_PROC_BUF, "%d:%02d:%02d active\n", (seconds % 86400) / 3600, - (seconds % 3600) / 60, - seconds % 60); + (seconds % 3600) / 60, seconds % 60); if (vg_counter > 0) { for (v = 0; v < ABS_MAX_VG; v++) { /* volume group */ if ((vg_ptr = vg[v]) != NULL) { - sz += _vg_info(vg_ptr, LVM_PROC_BUF); + sz += + _vg_info(vg_ptr, LVM_PROC_BUF); /* physical volumes */ sz += sprintf(LVM_PROC_BUF, "\n PV%s ", - vg_ptr->pv_cur == 1 ? ": " : "s:"); + vg_ptr->pv_cur == + 1 ? ": " : "s:"); c = 0; - for (p = 0; p < vg_ptr->pv_max; p++) { - if ((pv_ptr = vg_ptr->pv[p]) != NULL) { - sz += _pv_info(pv_ptr, LVM_PROC_BUF); + for (p = 0; p < vg_ptr->pv_max; + p++) { + if ((pv_ptr = + vg_ptr->pv[p]) != + NULL) { + sz += + _pv_info + (pv_ptr, + LVM_PROC_BUF); c++; - if (c < vg_ptr->pv_cur) - sz += sprintf(LVM_PROC_BUF, - "\n "); + if (c < + vg_ptr->pv_cur) + sz += + sprintf + (LVM_PROC_BUF, + "\n "); } } /* logical volumes */ sz += sprintf(LVM_PROC_BUF, "\n LV%s ", - vg_ptr->lv_cur == 1 ? ": " : "s:"); + vg_ptr->lv_cur == + 1 ? ": " : "s:"); c = 0; - for (l = 0; l < vg_ptr->lv_max; l++) { - if ((lv_ptr = vg_ptr->lv[l]) != NULL) { - sz += _lv_info(vg_ptr, lv_ptr, LVM_PROC_BUF); + for (l = 0; l < vg_ptr->lv_max; + l++) { + if ((lv_ptr = + vg_ptr->lv[l]) != + NULL) { + sz += + _lv_info + (vg_ptr, + lv_ptr, + LVM_PROC_BUF); c++; - if (c < vg_ptr->lv_cur) - sz += sprintf(LVM_PROC_BUF, - "\n "); + if (c < + vg_ptr->lv_cur) + sz += + sprintf + (LVM_PROC_BUF, + "\n "); } } - if (vg_ptr->lv_cur == 0) sz += sprintf(LVM_PROC_BUF, "none"); + if (vg_ptr->lv_cur == 0) + sz += + sprintf(LVM_PROC_BUF, + "none"); sz += sprintf(LVM_PROC_BUF, "\n"); } } @@ -495,14 +555,15 @@ unlock_kernel(); if (buf == NULL) { sz = 0; - return sprintf(page, "%s - vmalloc error at line %d\n", + return sprintf(page, + "%s - vmalloc error at line %d\n", lvm_name, __LINE__); } } sz_last = sz; } - out: + out: if (pos > sz - 1) { lock_kernel(); vfree(buf); @@ -522,11 +583,13 @@ /* * provide VG info for proc filesystem use (global) */ -static int _vg_info(vg_t *vg_ptr, char *buf) { +static int _vg_info(vg_t * vg_ptr, char *buf) +{ int sz = 0; char inactive_flag = ' '; - if (!(vg_ptr->vg_status & VG_ACTIVE)) inactive_flag = 'I'; + if (!(vg_ptr->vg_status & VG_ACTIVE)) + inactive_flag = 'I'; sz = sprintf(buf, "\nVG: %c%s [%d PV, %d LV/%d open] " " PE Size: %d KB\n" @@ -537,13 +600,13 @@ vg_ptr->pv_cur, vg_ptr->lv_cur, vg_ptr->lv_open, - vg_ptr->pe_size >> 1, + vg_ptr->pe_size >> 1, vg_ptr->pe_size * vg_ptr->pe_total >> 1, vg_ptr->pe_total, vg_ptr->pe_allocated * vg_ptr->pe_size >> 1, - vg_ptr->pe_allocated, + vg_ptr->pe_allocated, (vg_ptr->pe_total - vg_ptr->pe_allocated) * - vg_ptr->pe_size >> 1, + vg_ptr->pe_size >> 1, vg_ptr->pe_total - vg_ptr->pe_allocated); return sz; } @@ -552,10 +615,11 @@ /* * provide LV info for proc filesystem use (global) */ -static int _lv_info(vg_t *vg_ptr, lv_t *lv_ptr, char *buf) { +static int _lv_info(vg_t * vg_ptr, lv_t * lv_ptr, char *buf) +{ int sz = 0; char inactive_flag = 'A', allocation_flag = ' ', - stripes_flag = ' ', rw_flag = ' ', *basename; + stripes_flag = ' ', rw_flag = ' ', *basename; if (!(lv_ptr->lv_status & LV_ACTIVE)) inactive_flag = 'I'; @@ -568,35 +632,33 @@ stripes_flag = 'L'; if (lv_ptr->lv_stripes > 1) stripes_flag = 'S'; - sz += sprintf(buf+sz, + sz += sprintf(buf + sz, "[%c%c%c%c", inactive_flag, - rw_flag, - allocation_flag, - stripes_flag); + rw_flag, allocation_flag, stripes_flag); if (lv_ptr->lv_stripes > 1) - sz += sprintf(buf+sz, "%-2d", - lv_ptr->lv_stripes); + sz += sprintf(buf + sz, "%-2d", lv_ptr->lv_stripes); else - sz += sprintf(buf+sz, " "); + sz += sprintf(buf + sz, " "); /* FIXME: use _basename */ basename = strrchr(lv_ptr->lv_name, '/'); - if ( basename == 0) basename = lv_ptr->lv_name; - else basename++; - sz += sprintf(buf+sz, "] %-25s", basename); + if (basename == 0) + basename = lv_ptr->lv_name; + else + basename++; + sz += sprintf(buf + sz, "] %-25s", basename); if (strlen(basename) > 25) - sz += sprintf(buf+sz, + sz += sprintf(buf + sz, "\n "); - sz += sprintf(buf+sz, "%9d /%-6d ", + sz += sprintf(buf + sz, "%9d /%-6d ", lv_ptr->lv_size >> 1, lv_ptr->lv_size / vg_ptr->pe_size); if (lv_ptr->lv_open == 0) - sz += sprintf(buf+sz, "close"); + sz += sprintf(buf + sz, "close"); else - sz += sprintf(buf+sz, "%dx open", - lv_ptr->lv_open); + sz += sprintf(buf + sz, "%dx open", lv_ptr->lv_open); return sz; } @@ -605,7 +667,8 @@ /* * provide PV info for proc filesystem use (global) */ -static int _pv_info(pv_t *pv, char *buf) { +static int _pv_info(pv_t * pv, char *buf) +{ int sz = 0; char inactive_flag = 'A', allocation_flag = ' '; char *pv_name = NULL; @@ -615,9 +678,11 @@ allocation_flag = 'A'; if (!(pv->pv_allocatable & PV_ALLOCATABLE)) allocation_flag = 'N'; - pv_name = strchr(pv->pv_name+1,'/'); - if ( pv_name == 0) pv_name = pv->pv_name; - else pv_name++; + pv_name = strchr(pv->pv_name + 1, '/'); + if (pv_name == 0) + pv_name = pv->pv_name; + else + pv_name++; sz = sprintf(buf, "[%c%c] %-21s %8d /%-6d " "%8d /%-6d %8d /%-6d", @@ -629,17 +694,17 @@ pv->pe_allocated * pv->pe_size >> 1, pv->pe_allocated, (pv->pe_total - pv->pe_allocated) * - pv->pe_size >> 1, - pv->pe_total - pv->pe_allocated); + pv->pe_size >> 1, pv->pe_total - pv->pe_allocated); return sz; } -static void _show_uuid(const char *src, char *b, char *e) { +static void _show_uuid(const char *src, char *b, char *e) +{ int i; e--; - for(i = 0; *src && (b != e); i++) { - if(i && !(i & 0x3)) + for (i = 0; *src && (b != e); i++) { + if (i && !(i & 0x3)) *b++ = '-'; *b++ = *src++; }