# for i in vdb vdc vdd vde; do parted -s /dev/$i mklabel gpt parted -s /dev/$i mkpart $i 1 100% parted -s /dev/$i set 1 lvm on done
# parted -l | grep lvm
출력 예시:
1 1049kB 137GB 137GB vdd lvm 1 1049kB 137GB 137GB vdb lvm 1 1049kB 137GB 137GB vde lvm 1 1049kB 137GB 137GB vdc lvm
# pvcreate /dev/vdb1 /dev/vdc1 /dev/vdd1 /dev/vde1
출력 예시:
Physical volume "/dev/vdb1" successfully created. Physical volume "/dev/vdc1" successfully created. Physical volume "/dev/vdd1" successfully created. Physical volume "/dev/vde1" successfully created. Creating devices file /etc/lvm/devices/system.devices
# pvdisplay -s
출력 예시:
Device "/dev/vdb1" has a capacity of <128.00 GiB Device "/dev/vdc1" has a capacity of <128.00 GiB Device "/dev/vdd1" has a capacity of <128.00 GiB Device "/dev/vde1" has a capacity of <128.00 GiB
# vgcreate vg_raid /dev/vdb1 /dev/vdc1 /dev/vdd1 /dev/vde1
출력 예시:
Volume group "vg_raid" successfully created
# vgdisplay -s
출력 예시:
"vg_raid" 511.98 GiB [0 used / 511.98 GiB free]
# lvcreate --type raid0 -L 20G -n lv_raid0 vg_raid /dev/vdb1 /dev/vdc1
출력 예시:
Using default stripesize 64.00 KiB. Logical volume "lv_raid0" created.
# lvcreate --type raid1 -L 20G -n lv_raid1 vg_raid /dev/vdb1 /dev/vdc1
출력 예시:
Logical volume "lv_raid1" created.
# lvcreate --type raid5 -L 20G -n lv_raid5 vg_raid /dev/vdb1 /dev/vdc1 /dev/vdd1
출력 예시:
Using default stripesize 64.00 KiB. Logical volume "lv_raid5" created.
# lvcreate --type raid10 -L 20G -n lv_raid10 vg_raid /dev/vdb1 /dev/vdc1 /dev/vdd1 /dev/vde1
출력 예시:
Using default stripesize 64.00 KiB. Logical volume "lv_raid10" created.
# lvdisplay -m /dev/vg_raid/lv_raid0
출력 예시:
--- Logical volume --- LV Path /dev/vg_raid/lv_raid0 LV Name lv_raid0 VG Name vg_raid LV UUID a3QcAT-0jcx-59Fz-auJA-AB5a-KErm-MOD5Gb LV Write Access read/write LV Creation host, time KVM01, 2025-09-06 15:31:53 +0900 LV Status available # open 0 LV Size 20.00 GiB Current LE 5120 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 512 Block device 252:2 --- Segments --- Logical extents 0 to 5119: Type raid0 Monitoring not monitored Raid Data LV 0 Logical volume lv_raid0_rimage_0 Logical extents 0 to 2559 Raid Data LV 1 Logical volume lv_raid0_rimage_1 Logical extents 0 to 2559
# lvs -a -o name,devices vg_raid | grep lv_raid0
출력 예시:
lv_raid0 lv_raid0_rimage_0(0),lv_raid0_rimage_1(0) [lv_raid0_rimage_0] /dev/vdb1(0) [lv_raid0_rimage_1] /dev/vdc1(0)
# lvdisplay -m /dev/vg_raid/lv_raid1
출력 예시:
--- Logical volume --- LV Path /dev/vg_raid/lv_raid1 LV Name lv_raid1 VG Name vg_raid LV UUID FU2uZl-yXSA-Ibey-n8gZ-UF4h-7tBG-gX8GgV LV Write Access read/write LV Creation host, time KVM01, 2025-09-06 15:32:26 +0900 LV Status available # open 0 LV Size 20.00 GiB Current LE 5120 Mirrored volumes 2 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:7 --- Segments --- Logical extents 0 to 5119: Type raid1 Monitoring monitored Raid Data LV 0 Logical volume lv_raid1_rimage_0 Logical extents 0 to 5119 Raid Data LV 1 Logical volume lv_raid1_rimage_1 Logical extents 0 to 5119 Raid Metadata LV 0 lv_raid1_rmeta_0 Raid Metadata LV 1 lv_raid1_rmeta_1
# lvs -a -o name,devices vg_raid | grep lv_raid1_
출력 예시:
lv_raid1 lv_raid1_rimage_0(0),lv_raid1_rimage_1(0) [lv_raid1_rimage_0] /dev/vdb1(2561) [lv_raid1_rimage_1] /dev/vdc1(2561) [lv_raid1_rmeta_0] /dev/vdb1(2560) [lv_raid1_rmeta_1] /dev/vdc1(2560)
# lvdisplay -m /dev/vg_raid/lv_raid5
출력 예시:
--- Logical volume --- LV Path /dev/vg_raid/lv_raid5 LV Name lv_raid5 VG Name vg_raid LV UUID djmAxC-fh3X-mvru-IPoy-6gsx-RTox-isqmjc LV Write Access read/write LV Creation host, time KVM01, 2025-09-06 15:32:38 +0900 LV Status available # open 0 LV Size 20.00 GiB Current LE 5120 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 768 Block device 252:14 --- Segments --- Logical extents 0 to 5119: Type raid5 Monitoring monitored Raid Data LV 0 Logical volume lv_raid5_rimage_0 Logical extents 0 to 2559 Raid Data LV 1 Logical volume lv_raid5_rimage_1 Logical extents 0 to 2559 Raid Data LV 2 Logical volume lv_raid5_rimage_2 Logical extents 0 to 2559 Raid Metadata LV 0 lv_raid5_rmeta_0 Raid Metadata LV 1 lv_raid5_rmeta_1 Raid Metadata LV 2 lv_raid5_rmeta_2
# lvs -a -o name,devices vg_raid | grep lv_raid5_
출력 예시:
lv_raid5 lv_raid5_rimage_0(0),lv_raid5_rimage_1(0),lv_raid5_rimage_2(0) [lv_raid5_rimage_0] /dev/vdb1(7682) [lv_raid5_rimage_1] /dev/vdc1(7682) [lv_raid5_rimage_2] /dev/vdd1(1) [lv_raid5_rmeta_0] /dev/vdb1(7681) [lv_raid5_rmeta_1] /dev/vdc1(7681) [lv_raid5_rmeta_2] /dev/vdd1(0)
# lvdisplay -m /dev/vg_raid/lv_raid10
출력 예시:
--- Logical volume --- LV Path /dev/vg_raid/lv_raid10 LV Name lv_raid10 VG Name vg_raid LV UUID fXRwoA-xFo6-O2Lt-x6Q9-V1SL-nCqP-jZe2vU LV Write Access read/write LV Creation host, time KVM01, 2025-09-06 15:32:49 +0900 LV Status available # open 0 LV Size 20.00 GiB Current LE 5120 Mirrored volumes 4 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 1024 Block device 252:23 --- Segments --- Logical extents 0 to 5119: Type raid10 Monitoring monitored Raid Data LV 0 Logical volume lv_raid10_rimage_0 Logical extents 0 to 5119 Raid Data LV 1 Logical volume lv_raid10_rimage_1 Logical extents 0 to 5119 Raid Data LV 2 Logical volume lv_raid10_rimage_2 Logical extents 0 to 5119 Raid Data LV 3 Logical volume lv_raid10_rimage_3 Logical extents 0 to 5119 Raid Metadata LV 0 lv_raid10_rmeta_0 Raid Metadata LV 1 lv_raid10_rmeta_1 Raid Metadata LV 2 lv_raid10_rmeta_2 Raid Metadata LV 3 lv_raid10_rmeta_3
# lvs -a -o name,devices vg_raid | grep lv_raid10_
출력 예시:
lv_raid10 lv_raid10_rimage_0(0),lv_raid10_rimage_1(0),lv_raid10_rimage_2(0),lv_raid10_rimage_3(0) [lv_raid10_rimage_0] /dev/vdb1(10243) [lv_raid10_rimage_1] /dev/vdc1(10243) [lv_raid10_rimage_2] /dev/vdd1(2562) [lv_raid10_rimage_3] /dev/vde1(1) [lv_raid10_rmeta_0] /dev/vdb1(10242) [lv_raid10_rmeta_1] /dev/vdc1(10242) [lv_raid10_rmeta_2] /dev/vdd1(2561) [lv_raid10_rmeta_3] /dev/vde1(0)
# mkfs.ext4 /dev/vg_raid/lv_raid0 # mkfs.ext4 /dev/vg_raid/lv_raid1 # mkfs.ext4 /dev/vg_raid/lv_raid5 # mkfs.ext4 /dev/vg_raid/lv_raid10
# mkdir /mnt/{raid0,raid1,raid5,raid10}
# mount /dev/vg_raid/lv_raid0 /mnt/raid0 # mount /dev/vg_raid/lv_raid1 /mnt/raid1 # mount /dev/vg_raid/lv_raid5 /mnt/raid5 # mount /dev/vg_raid/lv_raid10 /mnt/raid10
# df -Th | grep raid
출력 예시:
/dev/mapper/vg_raid-lv_raid0 ext4 20G 24K 19G 1% /mnt/raid0 /dev/mapper/vg_raid-lv_raid1 ext4 20G 24K 19G 1% /mnt/raid1 /dev/mapper/vg_raid-lv_raid5 ext4 20G 24K 19G 1% /mnt/raid5 /dev/mapper/vg_raid-lv_raid10 ext4 20G 24K 19G 1% /mnt/raid10
# lvs -a -o name,devices vg_raid | grep lv_raid1_
출력 예시:
lv_raid1 lv_raid1_rimage_0(0),lv_raid1_rimage_1(0) [lv_raid1_rimage_0] /dev/vdb1(2561) [lv_raid1_rimage_1] /dev/vdc1(2561) [lv_raid1_rmeta_0] /dev/vdb1(2560) [lv_raid1_rmeta_1] /dev/vdc1(2560)
/dev/vdb1
)를 다른 PV로 교체합니다.# lvconvert --replace /dev/vdb1 vg_raid/lv_raid1
# lvs -a -o name,devices vg_raid | grep lv_raid1_
출력 예시:
lv_raid1 lv_raid1_rimage_0(0),lv_raid1_rimage_1(0) [lv_raid1_rimage_0] /dev/vdd1(5123) [lv_raid1_rimage_1] /dev/vdc1(2561) [lv_raid1_rmeta_0] /dev/vdd1(5122) [lv_raid1_rmeta_1] /dev/vdc1(2560)
https://youtu.be/pt9qhawl8LY 1. 개요 리눅스 서버에서는 시스템 시간(OS 시간) 과 하드웨어 시간(RTC, Real-Time Clock) 을 모두 관리할 수 있습니다. 운영체제의…
https://youtu.be/iPdHGXh7DUg 1. 개요 서버 운영 시 시스템 시간이 올바르게 설정되어 있지 않으면 로그 분석, 모니터링,…
https://youtu.be/F06CS8Encr8 1. 개요 LVM 환경에서 캐시 LV까지 포함된 볼륨 그룹(VG) 을 깨끗하게 삭제하는 과정을 다룹니다. umount…
https://youtu.be/dxnMCfpe_f8 1. 개요 LVM Cache (lvconvert cache) 기능을 활용하여 저속 디스크(vdb1) 위에 생성한 LV를 고속 디스크(vdc1)기반 Cache Pool로 가속하는…
https://youtu.be/8MelvWg-1Js 1. 개요 LVM 환경에서 Thin Pool과 Thin Volume을 생성하고, ext4·XFS 파일시스템으로 마운트한 뒤, lvs 명령어로 사용 현황을 확인하는 과정을 다룹니다.…
https://youtu.be/8jrMVfosV6Y 1. 개요 LVM 환경에서 pvmove를 사용해 /dev/vdb1 → /dev/vdc1로 데이터를 무중단(온라인) 이관하는 과정을 다룹니다. 이관 전/후 LV–PV 매핑…