导航:首页 > 编程系统 > linux多路径问题

linux多路径问题

发布时间:2023-09-10 16:40:23

1. RH linux 5.9 下做多路径聚合 问题,不知道怎样才算配置成功,路径聚合成功 (存储是HP EVA4400+ )

应该是配置成功了,楼上的网友回答得相当专业。
一般情况下,Linux配完multipath后用fdisk -l查看,重复版的磁盘还是能看得权到。应该看到的是n多个/dev/sd*和后来生出来的/dev/dm-*(与mpath*分别对应)。这点与Windows上有所不用,我记得Windows上配完多路径后是看不到重复的盘了。
你存储上有7个LUN的话,那应该是7个dm-*才对啊,怎么有8个呢?不解。。。
还有要注意的是:应该使用/dev/mapper/mpath*(multipath虚拟出来的多路径设备),对它进行分区等操作。/dev/dm-* 是软件内部自身使用的,不要用。

2. linux 多路径存储是怎么回事

Linux下HDS存储多路径查看
在Redhat下确定需要划分的存储空间。在本例中需要进行划分的空间是从HDS AMS2000上划分到服务器的多路径存储空间。其中sddlmad为ycdb1上需要进行划分的空间,sddlmah为ycdb2上需要进行划分的空间。具体如下:
查看环境
# rpm -qa|grep device-mapper
device-mapper-event-1.02.32-1.el5
device-mapper-multipath-0.4.7-30.el5
device-mapper-1.02.32-1.el5
# rpm -qa|grep lvm2 lvm2-2.02.46-8.el5
查看空间
#fdisk -l
Disk /dev/sddlmad: 184.2 GB, 184236900352 bytes 255 heads, 63 sectors/track, 22398 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sddlmah: 184.2 GB, 184236900352 bytes

255 heads, 63 sectors/track, 22398 cylinders Units = cylinders of 16065 * 512 = 8225280 bytes
查看存储
#cd /opt/DynamicLinkManager/bin/
#./dlnkmgr view -lu
Proct : AMS
SerialNumber : 83041424 LUs : 8
iLU HDevName Device PathID Status
0000 sddlmaa /dev/sdb 000000 Online
/dev/sdj 000008 Online
/dev/sdr 000016 Online
/dev/sdz 000017 Online

0001 sddlmab /dev/sdc 000001 Online
/dev/sdk 000009 Online
/dev/sds 000018 Online
/dev/sdaa 000019 Online
0002 sddlmac /dev/sdd 000002 Online
/dev/sdl 000010 Online
/dev/sdt 000020 Online
/dev/sdab 000021 Online
0003 sddlmad /dev/sde 000003 Online
/dev/sdm 000011 Online
/dev/s 000022 Online
/dev/sdac 000023 Online
0004 sddlmae /dev/sdf 000004 Online
/dev/sdn 000012 Online
/dev/sdv 000024 Online
/dev/sdad 000025 Online
0005 sddlmaf /dev/sdg 000005 Online
/dev/sdo 000013 Online
/dev/sdw 000026 Online
/dev/sdae 000027 Online
0006 sddlmag /dev/sdh 000006 Online
/dev/sdp 000014 Online
/dev/sdx 000028 Online
/dev/sdaf 000029 Online
0007 sddlmah /dev/sdi 000007 Online
/dev/sdq 000015 Online
/dev/sdy 000030 Online
/dev/sdag 000031 Online
##############################################################
4. lvm.conf的修改
为了能够正确的使用LVM,需要修改其过滤器:
#cd /etc/lvm #vi lvm.conf
# By default we accept every block device
# filter = [ "a/.*/" ]
filter = [ "a|sddlm[a-p][a-p]|.*|","r|dev/sd|" ]
例:

[root@bsrunbak etc]# ls -l lvm*

[root@bsrunbak etc]# cd lvm
[root@bsrunbak lvm]# ls
archive backup cache lvm.conf
[root@bsrunbak lvm]# more lvm.conf

[root@bsrunbak lvm]# pvs

Last login: Fri Jul 10 11:17:21 2015 from 172.17.99.198
[root@bsrunserver1 ~]#
[root@bsrunserver1 ~]#
[root@bsrunserver1 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda4 30G 8.8G 20G 32% /
tmpfs 95G 606M 94G 1% /dev/shm
/dev/sda2 194M 33M 151M 18% /boot
/dev/sda1 200M 260K 200M 1% /boot/efi
/dev/mapper/datavg-oraclelv
50G 31G 17G 65% /oracle
172.16.110.25:/Tbackup
690G 553G 102G 85% /Tbackup
/dev/mapper/tmpvg-oradatalv
345G 254G 74G 78% /oradata
/dev/mapper/datavg-lvodc
5.0G 665M 4.1G 14% /odc
[root@bsrunserver1 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda5 datavg lvm2 a-- 208.06g 153.06g
/dev/sddlmba tmpvg lvm2 a-- 200.00g 49.99g
/dev/sddlmbb tmpvg lvm2 a-- 200.00g 0
[root@bsrunserver1 ~]# cd /etc/lvm
[root@bsrunserver1 lvm]# more lvm.conf
# Don't have more than one filter line active at once: only one gets
used.

# Run vgscan after you change this parameter to ensure that
# the cache file gets regenerated (see below).
# If it doesn't do what you expect, check the output of 'vgscan -vvvv'.

# By default we accept every block device:
# filter = [ "a/.*/" ]

# Exclude the cdrom drive
# filter = [ "r|/dev/cdrom|" ]

# When testing I like to work with just loopback devices:
# filter = [ "a/loop/", "r/.*/" ]

# Or maybe all loops and ide drives except hdc:
# filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]

# Use anchors if you want to be really specific
# filter = [ "a|^/dev/hda8$|", "r/.*/" ]
filter = [ "a|/dev/sddlm.*|", "a|^/dev/sda5$|", "r|.*|" ]

[root@bsrunserver1 lvm]# df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/sda4 30963708 9178396 20212448 32% /
tmpfs 99105596 620228 98485368 1% /dev/shm
/dev/sda2 198337 33546 154551 18% /boot
/dev/sda1 204580 260 204320 1% /boot/efi
/dev/mapper/datavg-oraclelv
51606140 31486984 17497716 65% /oracle
172.16.110.25:/Tbackup
722486368 579049760 106736448 85% /Tbackup
/dev/mapper/tmpvg-oradatalv
361243236 266027580 76865576 78% /oradata
/dev/mapper/datavg-lvodc
5160576 680684 4217748 14% /odc
[root@bsrunserver1 lvm]#
You have new mail in /var/spool/mail/root
[root@bsrunserver1 lvm]#
[root@bsrunserver1 lvm]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda5 datavg lvm2 a-- 208.06g 153.06g
/dev/sddlmba tmpvg lvm2 a-- 200.00g 49.99g
/dev/sddlmbb tmpvg lvm2 a-- 200.00g 0
[root@bsrunserver1 lvm]#
进入文件
[root@bsrunbak lvm]# cd /opt/D*/bin
or
[root@bsrunbak bin]# pwd
/opt/DynamicLinkManager/bin
显示HDS存储卷:
[root@bsrunbak lvm]# ./dlnkmgr view -lu

3. 如何使用Linux自带多路径DM

一、多路径解释
多路径,顾名思义就是有多种选择的路径。在SAN或IPSAN环境,主机和存储之间外加了光纤交换机,这就导致主机和存储之间交换速度和效率增强,一条路径肯定是不行的,也是不安全不稳定的。多路径就是要来解决从主机到磁盘之间最快,最高效的问题。主要实现如下几个功能
故障的切换和恢复
IO流量的负载均衡
磁盘的虚拟化
多路径之前一直是存储厂商负责解决,竟来被拆分出来单独卖钱了。
构架基本是这样的:存储,多路径软件,光纤交换机,主机,主机系统

二、LINUX下的multipath
1、查看是否自带安装?

1
2
3
4
5
6

[root@web2 multipath]# rpm -qa|grep device
device-mapper-1.02.39-1.el5
device-mapper-1.02.39-1.el5
device-mapper-multipath-0.4.7-34.el5
device-mapper-event-1.02.39-1.el5
[root@web2 multipath]#

2、安装

1
2
3
4
5
6

rpm -ivh device-mapper-1.02.39-1.el5.rpm #安装映射包
rpm -ivh device-mapper-multipath-0.4.7-34.el5.rpm #安装多路径包

外加加入开机启动
chkconfig –level 2345 multipathd on #设置成开机自启动multipathd
lsmod |grep dm_multipath #来检查安装是否正常

3、配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14

# on the default devices.
blacklist {
devnode "^(ram|raw|loop|fd|md|dm-|sr|sr|scd|st)[0-9]*"
devnode "^hd[a-z]"
}
devices {
device {
vendor "HP"
path_grouping_policy multibus
features "1 queue_if_no_path"
path_checker readsector()
failback immediate
}
}<br><br>完整的配置如下:

blacklist {
devnode "^sda"
}

defaults {
user_friendly_names no
}

multipaths {
multipath {
wwid
alias iscsi-dm0
path_grouping_policy multibus
path_checker tur
path_selector "round-robin 0"
}
multipath {
wwid
alias iscsi-dm1
path_grouping_policy multibus
path_checker tur
path_selector "round-robin 0"
}
multipath {
wwid
alias iscsi-dm2
path_grouping_policy multibus
path_checker tur
path_selector "round-robin 0"
}
multipath {
wwid
alias iscsi-dm3
path_grouping_policy multibus
path_checker tur
path_selector "round-robin 0"
}
}

devices {
device {
vendor "iSCSI-Enterprise"
proct "Virtual disk"
path_grouping_policy multibus
getuid_callout "/sbin/scsi_id -g -u -s /block/%n"
path_checker readsector0
path_selector "round-robin 0"
}
}
4、命令

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30

[root@web2 ~]# multipath -h
multipath-tools v0.4.7 (03/12, 2006)
Usage: multipath [-v level] [-d] [-h|-l|-ll|-f|-F|-r]
[-p failover|multibus|group_by_serial|group_by_prio]
[device]

-v level verbosity level
0 no output
1 print created devmap names only
2 default verbosity
3 print debug information
-h print this usage text
-b file bindings file location
-d dry run, do not create or update devmaps
-l show multipath topology (sysfs and DM info)
-ll show multipath topology (maximum info)
-f flush a multipath device map
-F flush all multipath device maps
-r force devmap reload
-p policy force all maps to specified policy :
failover 1 path per priority group
multibus all paths in 1 priority group
group_by_serial 1 priority group per serial
group_by_prio 1 priority group per priority lvl
group_by_node_name 1 priority group per target node

device limit scope to the device's multipath
(udev-style $DEVNAME reference, eg /dev/sdb
or major:minor or a device map name)
[root@web2 ~]#

5、启动关闭

1
2
3
4

# /etc/init.d/multipathd start #开启mulitipath服务
service multipath start
service multipath restart
service multipath shutdown

6、如何获取wwid

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

1、
[root@vxfs01 ~]# cat /var/lib/multipath/bindings
# Multipath bindings, Version : 1.0
# NOTE: this file is automatically maintained by the multipath program.
# You should not need to edit this file in normal circumstances.
#
# Format:
# alias wwid
#
mpath0
mpath1
mpath2
mpath3
mpath4

2、
[root@vxfs01 ~]# multipath -v3 |grep 3600
sdb: uid = (callout)
sdc: uid = (callout)
sdd: uid = (callout)
sde: uid = (callout)
1:0:0:0 sdb 8:16 0 [undef][ready] DGC,RAI
1:0:1:0 sdc 8:32 1 [undef][ready] DGC,RAI
2:0:0:0 sdd 8:48 1 [undef][ready] DGC,RAI
2:0:1:0 sde 8:64 0 [undef][ready] DGC,RAI
Found matching wwid [] in bindings file.

比较详细的文字:
http://zhumeng8337797.blog.163.com/blog/static/1007689142013416111534352/
http://blog.csdn.net/wuweilong/article/details/14184097
RHEL官网资料:
http://www.prudentwoo.com/wp-content/uploads/downloads/2013/11/Red_Hat_Enterprise_Linux-5-DM_Multipath-en-US.pdf
http://www.prudentwoo.com/wp-content/uploads/downloads/2013/11/Red_Hat_Enterprise_Linux-5-DM_Multipath-zh-CN.pdf
http://www.prudentwoo.com/wp-content/uploads/downloads/2013/11/Red_Hat_Enterprise_Linux-6-DM_Multipath-en-US.pdf
http://www.prudentwoo.com/wp-content/uploads/downloads/2013/11/Red_Hat_Enterprise_Linux-6-DM_Multipath-zh-CN.pdf

4. linux多路径mpath怎么修改名称

Linux下多路径Multipath的简单配置
1、启用Multipath:
(1)启动multipathd服务
#service
multipathd
start
或者
#/etc/init.d/multipathd
start
(2)修改multipath配置文件/etc/multipath.conf:
a
默认情况下所以的设备都在multipath的黑名单中,所以即使启动了multipathd服务并加在了内核模块,multipath也不会对链路进行聚合,找到下面的3行并注释掉(在行首加上#号)
#devnode_blacklist
{
#
devnode
"*"
#}
b
默认情况下multipath生成dm设备之后,会同时在/dev/mapper/下生成以磁盘wwid为名的符号链接指向对应的dm设备。如果想生成mpath设备,则需要打开user_friendly_names选项,将配置文件中下面3行的注释取消(去掉行首的#号)
defaults
{
user_friendly_names
yes
}
(3)重启multipathd服务(修改multipath.conf文件之后都应该重启multipath服务)
(4)扫描磁盘
#multipath
-v2
使用上面命令之后,系统中会出现链路聚合之后的dm设备,同时也会在/dev/mapper/、/dev/mpath/目录下生成相应的设备。
查看multipath拓扑结构
#multipath
-ll
另外一个重要的文件是/var/lib/multipath/bindings,这个文件中是磁盘的别名和wwid的对应关系,典型的例子是:
mpath0

(5)需要注意的问题,multipath也会为本地的磁盘生成相应的dm设备,所以需要在multipath.conf中将本地磁盘加入到黑名单,配置的方法可以参考下面的示例
devnode_blacklist
{
wwid

devnode
"^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
devnode
"^hd[a-z]"
}
如上例所示,可以通过wwid或者设备名将本地磁盘加入到黑名单中。
2、固定multipath设备的命名:
通过wwid和设备别名一一对应的方式固定multipath设备的名称,这些和别名对应的设备会被创建到/dev/mapper/目录下,使用时直接使用这个目录的的设备。
(1)通过/var/lib/multipath/bindings可以获取所有磁盘的wwid,确定每个磁盘的别名之后,在/etc/multipath.conf中的multipaths段中加入相应的配置,如将wwid为的磁盘命名为etl01,wwid为的磁盘命名为etl02,配置文件如下所示
multipaths
{
multipath
{
wwid

alias
etl01
}
multipath
{
wwid

alias
etl02
}
}
(2)配置完成之后,重启multipathd服务,使用下面的命令清空已有的multipath记录
#multipath
-F
然后使用multipath
-v2重新扫描设备,这时会在/dev/mapper/目录下生成和别名对应的设备文件。
#ls
/dev/mapper/
control
etl01
eth02
(3)如果多台服务器的存储链路完全相同,并希望各服务器上同一磁盘的设备名相同,可以在一台服务器上配置好别名绑定之后,将multipaths
{
}中间的配置复制到其他服务器,这样各台服务器/dev/mapper/下面的设备将会保持一致。

5. multipath多路径,Linux系统底层存储扩容了,如何扩大文件系统

linux服务器通过multipath多路径连接到共享存储,那么当文件系统空间不足的时候,有几种方式可以扩展文件系统的大小:

1、pv不变,原lun存储扩大容量,扩大lv,扩大文件系统

2、新增pv,加入到vg中,扩大lv,扩大文件系统

下文是针对场景1的情况下如何操作(但是个人建议采取新建pv的方式2进行):

Environment

If you have this specific scenario, you can use the following steps:

Note: if these lv's are part of a clustered vg, steps 1 and 2 need to be performed on all nodes. 注意:集群模式下步骤1和步骤2两个节点都需要执行。

1) Update block devices

Note: This step needs to be run against any sd devices mapping to that lun. When using multipath, there will be more than one. 通过multipath -ll命令查看每个聚合卷对应的路径。

2) Update multipath device

例子:

3) Resize the physical volume, which will also resize the volume group

4) Resize your logical volume (the below command takes all available space in the vg)

5) Resize your filesystem

6) Verify vg, lv and filesystem extension has worked appropriately

模拟存储端扩容testlv增加

查看客户端多路径情况

客户端更新存储

更新聚合设备

更新pv空间

更新lv空间

更新文件系统空间

6. Linux系统怎么配置多路径

Linux多路径指的是除了主机和硬盘一条路径的连接,还包括了主机和网络服务器的连接形成的主机一对多的路径连接关系。通过多路径的连接,实现了磁盘的虚拟化。

1、安装多路径软件包:
device-mapper-1.02.67-2.el5
device-mapper-event-1.02.67.2.el5
device-mapper-multipath-0.4.7-48.el5
[root@RKDB01 Server]# rpm -ivh device-mapper-1.02.67-2.el5.x86_64.rpm
warning: device-mapper-1.02.67-2.el5.x86_64.rpm: Header V3 DSA signature: NOKEY, key ID 37017186
Preparing.。。 ########################################### [100%]
package device-mapper-1.02.67-2.el5.x86_64 is already installed
[root@RKDB01 Server]# rpm -ivh device-mapper-event-1.02.67-2.el5.x86_64.rpm
warning: device-mapper-event-1.02.67-2.el5.x86_64.rpm: Header V3 DSA signature: NOKEY, key ID 37017186
Preparing.。。 ########################################### [100%]
package device-mapper-event-1.02.67-2.el5.x86_64 is already installed
[root@RKDB01 Server]# rpm -ivh device-mapper-multipath-0.4.7-48.el5.x86_64.rpm
warning: device-mapper-multipath-0.4.7-48.el5.x86_64.rpm: Header V3 DSA signature: NOKEY, key ID 37017186
Preparing.。。 ########################################### [100%]
package device-mapper-multipath-0.4.7-48.el5.x86_64 is already installed
2、设置开机启动,并检查安装包是否正常:
chkconfig --level 345 multipathd on
lsmod |grep dm_multipath
[root@RKDB01 Server]# chkconfig --level 345 multipathd on
[root@RKDB01 Server]# lsmod |grep dm_multipath
dm_multipath 58969 0
scsi_dh 42561 1 dm_multipath
dm_mod 102417 4 dm_mirror,dm_multipath,dm_raid45,dm_log
[root@RKDB01 Server]#
3、配置multipathd 使其正常工作,编辑/etc/multipath.conf,开放如下内容:
defaults {
udev_dir /dev
polling_interval 10
selector “round-robin 0”
path_grouping_policy multibus
getuid_callout “/sbin/scsi_id -g -u -s /block/%n”
prio_callout none
path_checker readsector0
rr_min_io 100
max_fds 8192
rr_weight priorities
failback immediate
no_path_retry fail
user_friendly_names yes
}
blacklist {
wwid 26353900f02796769
devnode “^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*”
devnode “^hd[a-z]”
}
4、并关闭如下内容
#blacklist {
# devnode “*”
#}
#defaults {
27 # user_friendly_names yes
28 #}
5、完成之后执行如下命令发现多路径:
[root@RKDB01 Server]# modprobe dm-multipath
[root@RKDB01 Server]# multipath -F
[root@RKDB01 Server]# multipath dm-multipath
[root@RKDB01 Server]# multipath dm-round-robin
[root@RKDB01 Server]# service multipathd restart
正在关闭multipathd 端口监控程序: [确定]
正在启动守护进程multipathd: [确定]
[root@RKDB01 Server]# multipath -v2
[root@RKDB01 Server]# multipath -v2
[root@RKDB01 Server]# multipath -ll
mpath1 () dm-0 TOYOU,NetStor_iSUM510
[size=3.3T][features=0][hwhandler=0][rw]
\_ round-robin 0 [prio=2][ena bled]
\_ 1:0:0:0 sdb 8:16 [failed][ready]
\_ 1:0:1:0 sdc 8:32 [failed][ready]
[root@RKDB01 Server]#
6、重启服务器后,可以看到多路径信息了:
[root@RKDB01 ~]# ll /dev/mapper/
总计 0
crw------- 1 root root 10, 60 11-05 22:35 control
brw-rw---- 1 root disk 253, 0 11-05 22:35 mpath1
brw-rw---- 1 root disk 253, 1 11-05 22:35 mpath2
[root@RKDB01 ~]# multipath -ll
mpath2 () dm-1 TOYOU,NetStor_iSUM510
[size=3.2T][features=0][hwhandler=0][rw]
\_ round-robin 0 [prio=2][active]
\_ 1:0:0:1 sdc 8:32 [active][ready]
\_ 1:0:1:1 sde 8:64 [active][ready]
mpath1 () dm-0 TOYOU,NetStor_iSUM510
[size=20G][features=0][hwhandler=0][rw]
\_ round-robin 0 [prio=2][active]
\_ 1:0:0:0 sdb 8:16 [active][ready]
\_ 1:0:1:0 sdd 8:48 [active][ready]
7、通过fdisk 看可以生成了DM-0/DM-1两个盘,正是上面sdc/sde,sdb/sdd多路径后出来的:
[root@RKDB01 ~]# fdisk -l
Disk /dev/sda: 299.4 GB, 299439751168 bytes
255 heads, 63 sectors/track, 36404 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 38 305203+ 83 Linux
/dev/sda2 39 13092 104856255 83 Linux
/dev/sda3 13093 19619 52428127+ 83 Linux
/dev/sda4 19620 36404 134825512+ 5 Extended
/dev/sda5 19620 26146 52428096 83 Linux
/dev/sda6 26147 28757 20972826 83 Linux
/dev/sda7 28758 30324 12586896 82 Linux swap / Solaris
/dev/sda8 30325 36404 48837568+ 83 Linux
Disk /dev/sdb: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdb doesn‘t contain a valid partition table
Disk /dev/sdc: 3568.4 GB, 3568429957120 bytes
255 heads, 63 sectors/track, 433836 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdc doesn’t contain a valid partition table
Disk /dev/sdd: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdd doesn‘t contain a valid partition table
Disk /dev/sde: 3568.4 GB, 3568429957120 bytes
255 heads, 63 sectors/track, 433836 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sde doesn’t contain a valid partition table
Disk /dev/dm-0: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/dm-0 doesn‘t contain a valid partition table
Disk /dev/dm-1: 3568.4 GB, 3568429957120 bytes
255 heads, 63 sectors/track, 433836 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/dm-1 doesn’t contain a valid partition table
Disk /dev/sdf: 4009 MB, 4009754624 bytes
255 heads, 63 sectors/track, 487 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdf4 * 1 488 3915744+ b W95 FAT32
Partition 4 has different physical/logical endings:
phys=(486, 254, 63) logical=(487, 125, 22)
[root@RKDB01 ~]#
8、同时也可以在/dev/mapper目录中查看到多路径映射的信息:
[root@RKDB01 ~]# ll /dev/mapper/
总计 0
crw------- 1 root root 10, 60 11-06 00:49 control
brw-rw---- 1 root disk 253, 2 11-06 00:49 data-data001
brw-rw---- 1 root disk 253, 0 11-06 00:49 mpath1
brw-rw---- 1 root disk 253, 1 11-06 00:49 mpath2

阅读全文

与linux多路径问题相关的资料

热点内容
微信打码赚钱安卓软件 浏览:608
苹果官换机买什么版本 浏览:979
visio数据模型怎么用 浏览:179
关于驾驶的app 浏览:92
多线程编程有什么特点 浏览:453
iso文件系统 浏览:116
苹果932拦截骚扰电话 浏览:765
盲盒开箱app有哪些 浏览:422
win10激活脚本之家 浏览:191
魔鬼作坊工具包 浏览:185
ae源文件下载 浏览:520
如何将照片内容转换成pdf文件 浏览:137
浙里办app如何更换手机号码 浏览:244
电子资料文件有哪些 浏览:241
猥琐猫表情教程 浏览:599
android音频文件格式 浏览:458
漫画脸app哪里可以下载 浏览:959
购买欢乐升级欢乐豆 浏览:282
学习智能机器人用什么编程最好 浏览:655
苹果手机如何管控app 浏览:633

友情链接