本文介绍: 上一步骤中,自定义的容器名称可能后期会发生变化,那么一旦名称发生变化,程序之间也要随之发生变化,比如程序通过容器名称进行服务调用,但是容器名称发生变化之后再使用之前的名称肯定是无法成功调用,每次都进行更改的话又比较麻烦,因此可以使用自定义别名的方式解决,即容器名称可以随意变更,只要不更改别名即可docker run -d –name 新容器名称 –link 目标容器名称:自定义的名称 -p本地端口:容器端口 镜像名称 shell命令2.2.1 创建第三个容器2.2.2 查看hosts文件内容。

六、 网络部分

Docker服务安装完成之后,默认在每个宿主机会生成一个名称为docker0的网卡其IP地址都是172.17.0.1/16,并且会生成三种不同类型的网络。

	[root@gbase8c_private ~]# ifconfig docker0
	docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
			inet 172.17.0.1  netmask 255.255.0.0  broadcast 172.17.255.255
			ether 02:42:94:9c:bc:27  txqueuelen 0  (Ethernet)
			RX packets 0  bytes 0 (0.0 B)
			RX errors 0  dropped 0  overruns 0  frame 0
			TX packets 0  bytes 0 (0.0 B)
			TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
	
	[root@gbase8c_private ~]# docker network ls
	NETWORK ID          NAME                DRIVER              SCOPE
	3eba936a2271        bridge              bridge              local
	19023a4d913b        host                host                local
	1a79176b52c5        none                null                local

1 docker结合负载实现网站高可用

1.1 安装并配置keepalived

  • 1.1.1 Server1安装并配置
	ifconfig enp0s3:1 192.168.56.100 broadcast 192.168.124.1 netmask 255.255.255.0 up
	yum install keepalived -y
	cat /etc/keepalived/keepalived.conf
vrrp_instance MAKE_VIP_INT {
    state MASTER    # 标识该节点为MASTER
    interface enp0s3    # 配置网卡接口,根据ifconfig命令查到
    virtual_router_id 1      # 指定实例所属的VRRP路由器id,类似集群id
    priority 100    # 优先级,MASTER要比BACKUP高
    advert_int 1	# 指定广播间隔,1s
    unicast_src_ip 192.168.56.199
    unicast_peer {
        192.168.56.200
    }
    authentication {
        auth_type PASS
	auth_pass 1111
    }
    virtual_ipaddress {	# 配置LVS VIP
        192.168.56.100/24 dev enp0s3 label enp0s3:1
    }
}

	[root@gbase8c_private keepalived]# systemctl restart keepalived && systemctl enable keepalived 
	Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
  • 1.1.2 Server2安装并配置
	yum install keepalived -y
	cat /etc/keepalived/keepalived.conf
vrrp_instance MAKE_VIP_INT {
    state BACKUP    
	# 标识该节点为MASTER
    interface enp0s3    
	# 配置网卡接口,根据ifconfig命令查到
    virtual_router_id 2      
	# 指定实例所属的VRRP路由器id,类似集群id
    priority 50    
	# 优先级,MASTER要比BACKUP高
    advert_int 1	
	# 指定广播间隔,1s
    unicast_src_ip 192.168.56.200
    unicast_peer {
        192.168.56.199
    }
    authentication {
        auth_type PASS
	auth_pass 1111
    }
    virtual_ipaddress {	
	# 配置LVS VIP
        192.168.56.100/24 dev enp0s3 label enp0s3:1
    }
}
	[root@gbase8c_1 keepalived]# systemctl restart keepalived && systemctl enable keepalived
	Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.

1.2 安装并配置haproxy

  • 1.2.1 各服务器配置内核参数
    sysctl -w net.ipv4.ip_nonlocal_bind=1
  • 1.2.2 Server1安装并配置haproxy
	yum install haproxy -y
	cat /etc/haproxy/haproxy.cfg
global
maxconn 100000
uid 99
gid 99
daemon 
nbproc 1
log 127.0.0.1 local0 info 

defaults
option http-keep-alive
#option forwardfor
maxconn 100000
mode tcp
timeout connect 500000ms
timeout client 500000ms
timeout server 500000ms

listen stats
  mode http
  bind 0.0.0.0:9999
  stats enable
  log global
  stats uri /haproxy-stats
  stats auth haadmin:q1w2e3r4ys
 
#===================================
frontend docker_nginx_web
    bind 192.168.56.100:80
	mode http
	default_backend docker_nginx_hosts
	
backend docker_nginx_hosts
	mode http
	#balance source
	balance roundrobin
	server  192.168.56.199 192.168.56.199:81 check inter 2000 fall 3 rise 5
	server 	192.168.56.200 192.168.56.200:81 check inter 2000 fall 3 rise 5 
  • 1.2.3 Server2安装并配置haproxy
	yum install haproxy -y
	cat /etc/haproxy/haproxy.cfg
global
maxconn 100000
uid 99
gid 99
daemon 
nbproc 1
log 127.0.0.1 local0 info 

defaults
option http-keep-alive
#option forwardfor
maxconn 100000
mode tcp
timeout connect 500000ms
timeout client 500000ms
timeout server 500000ms

listen stats
  mode http
  bind 0.0.0.0:9999
  stats enable
  log global
  stats uri /haproxy-stats
  stats auth haadmin:q1w2e3r4ys
 
#===================================
frontend docker_nginx_web
    bind 192.168.56.100:80
	mode http
	default_backend docker_nginx_hosts
	
backend docker_nginx_hosts
	mode http
	#balance source
	balance roundrobin
	server  192.168.56.199 192.168.56.199:81 check inter 2000 fall 3 rise 5
	server 	192.168.56.200 192.168.56.200:81 check inter 2000 fall 3 rise 5 
  • 1.2.4 各服务器别启动haproxy
	systemctl enable haproxy
	systemctl restart haproxy
	[root@gbase8c_1 haproxy]# systemctl enable haproxy
	Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
	[root@gbase8c_1 haproxy]# systemctl restart haproxy
	[root@gbase8c_1 haproxy]# systemctl status haproxy.service 
	● haproxy.service - HAProxy Load Balancer
	Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
	Active: active (running) since 四 2023-12-14 22:04:42 CST; 7s ago
	Main PID: 3873 (haproxy-systemd)
		Tasks: 3
	Memory: 1.5M
	CGroup: /system.slice/haproxy.service
			├─3873 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
			├─3876 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
			└─3877 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
	
	12月 14 22:04:42 gbase8c_1 systemd[1]: Started HAProxy Load Balancer.
	12月 14 22:04:42 gbase8c_1 haproxy-systemd-wrapper[3873]: haproxy-systemd-wrapper: executing /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

1.3 服务器启动nginx容器并验证

  • 1.3.1 Server1启动Nginx容器
    从本地Nginx镜像启动一个容器,并指定端口,默认协议是tcp方式
	docker rm -f `docker ps -a -q`
	docker run --name nginx-web1 -d -p81:80  jack/nginx-1.22.1:v1 nginx
	ss -tnl
  • 1.3.2 Server2启动nginx容器
	docker rm -f `docker ps -a -q`
	docker run --name nginx-web1 -d -p81:80  jack/nginx-1.22.1:v1 nginx
	ss -tnl
  • 1.3.3 验证web访问
    192.168.56.199:81
    192.168.56.100:80
    日志:
	[root@gbase8c_1 ~]# ss -tnl
	State      Recv-Q Send-Q    Local Address:Port     Peer Address:Port                 
	LISTEN     0      128                   *:9999                *:*                     
	LISTEN     0      128      192.168.56.100:80                  *:*                  
	LISTEN     0      128                   *:81                  *:*           
	[root@gbase8c_1 ~]# ip addr
	2: enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
		link/ether 08:00:27:3d:53:56 brd ff:ff:ff:ff:ff:ff
		inet 192.168.56.200/24 brd 192.168.56.255 scope global noprefixroute enp0s3
		valid_lft forever preferred_lft forever
		inet 192.168.56.100/24 scope global secondary enp0s3:1
		valid_lft forever preferred_lft forever
		inet6 fe80::858f:968b:2bfe:f3c0/64 scope link noprefixroute 
		valid_lft forever preferred_lft forever

2 容器之间的互联

2.1 通过容器名称互联

即在同一个宿主机上的容器之间可以通过自定义的容器名称相互访问,比如一个业务前端静态页面是使用nginx,动态页面使用的是tomcat,由于容器在启动的时候其内部ip地址是DHCP随机分配的,所以如果通过内部访问的话,自定义名称是相对比较固定的,因此比较适用于此场景。

  • 2.1.1 创建第一个容器
    先创建第一个容器,后续会使用到这个容器的名称
	docker run --name nginx-1 -d -p 8801:80 jack/nginx-1.22.1:v1 nginx
  • 2.1.2 查看当前hosts文件内容
    日志:
	[root@gbase8c_private ~]# docker run --name nginx-1 -d -p 8801:80 jack/nginx-1.22.1:v1 nginx
	a405a33a8b859396fd03f5be52014a32f6b13a3e63552ed3a9e3c87b002772a4
	[root@gbase8c_private ~]# docker ps -a
	CONTAINER ID        IMAGE                  COMMAND             CREATED             STATUS              PORTS                           NAMES
	a405a33a8b85        jack/nginx-1.22.1:v1   "nginx"             7 seconds ago       Up 6 seconds        443/tcp, 0.0.0.0:8801->80/tcp   nginx-1
	[root@gbase8c_private ~]# docker exec -it a405a33a8b85 bash
	[root@a405a33a8b85 /]# cat /etc/hosts
	127.0.0.1	localhost
	::1	localhost ip6-localhost ip6-loopback
	fe00::0	ip6-localnet
	ff00::0	ip6-mcastprefix
	ff02::1	ip6-allnodes
	ff02::2	ip6-allrouters
	172.17.0.3	a405a33a8b85
  • 2.1.3 创建第二个容器
	docker run -d --name nginx-2 --link nginx-1 -p 8802:80 jack/nginx-1.22.1:v1 nginx
  • 2.1.4 查看第二个容器的hosts文件内容
	[root@gbase8c_private ~]# docker run -d --name nginx-2 --link nginx-1 -p 8802:80 jack/nginx-1.22.1:v1 nginx
	f08679e144deb92bc138bc8688e0eeafda4b501468a82aa51291cf187ddc3634
	[root@gbase8c_private ~]# docker ps
	CONTAINER ID        IMAGE                  COMMAND             CREATED             STATUS              PORTS                           NAMES
	f08679e144de        jack/nginx-1.22.1:v1   "nginx"             58 seconds ago      Up 58 seconds       443/tcp, 0.0.0.0:8802->80/tcp   nginx-2
	a405a33a8b85        jack/nginx-1.22.1:v1   "nginx"             3 minutes ago       Up 3 minutes        443/tcp, 0.0.0.0:8801->80/tcp   nginx-1
	[root@gbase8c_private ~]# docker exec -it f08679e144de bash
	[root@f08679e144de /]# cat /etc/hosts
	127.0.0.1	localhost
	::1	localhost ip6-localhost ip6-loopback
	fe00::0	ip6-localnet
	ff00::0	ip6-mcastprefix
	ff02::1	ip6-allnodes
	ff02::2	ip6-allrouters
	172.17.0.3	nginx-1 a405a33a8b85  #第一个容器的名称和ID,只会添加到本地不会添加到对方
	172.17.0.4	f08679e144de
  • 2.1.5 监测通信
	[root@f08679e144de /]# ping nginx-1
	PING nginx-1 (172.17.0.3) 56(84) bytes of data.
	64 bytes from nginx-1 (172.17.0.3): icmp_seq=1 ttl=64 time=0.075 ms
	64 bytes from nginx-1 (172.17.0.3): icmp_seq=2 ttl=64 time=0.112 ms

2.2 通过自定义容器别名互联

上一步骤中,自定义的容器名称可能后期会发生变化,那么一旦名称发生变化,程序之间也要随之发生变化,比如程序通过容器名称进行服务调用,但是容器名称发生变化之后再使用之前的名称肯定是无法成功调用,每次都进行更改的话又比较麻烦,因此可以使用自定义别名的方式解决,即容器名称可以随意变更,只要不更改别名即可
命令格式:

	docker run -d --name 新容器名称 --link 目标容器名称:自定义的名称 -p本地端口:容器端口 镜像名称 shell命令
  • 2.2.1 创建第三个容器
	docker run -d --name nginx-3 --link nginx-1:custom_vm_name -p 8803:80 jack/nginx-1.22.1:v1 nginx
  • 2.2.2 查看hosts文件内容
  • 2.2.3 检查自定义别名通信
    日志:
	[root@gbase8c_private ~]# docker run -d --name nginx-3 --link nginx-1:custom_vm_name -p 8803:80 jack/nginx-1.22.1:v1 nginx
	d514b2b69550017b7ff4450c226103eab6c4c57a84a6ed370c831006fec1cddb
	[root@gbase8c_private ~]# docker ps
	CONTAINER ID        IMAGE                  COMMAND             CREATED             STATUS              PORTS                           NAMES
	d514b2b69550        jack/nginx-1.22.1:v1   "nginx"             3 seconds ago       Up 3 seconds        443/tcp, 0.0.0.0:8803->80/tcp   nginx-3
	f08679e144de        jack/nginx-1.22.1:v1   "nginx"             9 minutes ago       Up 9 minutes        443/tcp, 0.0.0.0:8802->80/tcp   nginx-2
	a405a33a8b85        jack/nginx-1.22.1:v1   "nginx"             11 minutes ago      Up 11 minutes       443/tcp, 0.0.0.0:8801->80/tcp   nginx-1
	[root@gbase8c_private ~]# docker exec -it d514b2b69550 bash
	[root@d514b2b69550 /]# cat /etc/hosts
	127.0.0.1	localhost
	::1	localhost ip6-localhost ip6-loopback
	fe00::0	ip6-localnet
	ff00::0	ip6-mcastprefix
	ff02::1	ip6-allnodes
	ff02::2	ip6-allrouters
	172.17.0.3	custom_vm_name a405a33a8b85 nginx-1
	172.17.0.2	d514b2b69550
	[root@d514b2b69550 /]# ping custom_vm_name
	PING custom_vm_name (172.17.0.3) 56(84) bytes of data.
	64 bytes from custom_vm_name (172.17.0.3): icmp_seq=1 ttl=64 time=0.128 ms
	64 bytes from custom_vm_name (172.17.0.3): icmp_seq=2 ttl=64 time=0.123 ms
	64 bytes from custom_vm_name (172.17.0.3): icmp_seq=3 ttl=64 time=0.111 ms
	^C
	--- custom_vm_name ping statistics ---
	3 packets transmitted, 3 received, 0% packet loss, time 2003ms
	rtt min/avg/max/mdev = 0.111/0.120/0.128/0.014 ms

2.3 通过网络跨宿主机互联

同一个宿主机之间的各容器是可以直接通信的,但是如果访问到另外一台宿主机的容器呢?

  • 2.3.1 docker网络类型
    Docker的网络使用docker network -ls 命令看到有三种类型,下面将介绍每一种类型的具体工作方式:
    Bridge模式:使用参数–net=bridge指定,不指定默认就是bridge模式。
    日志:查看当前docker的网卡信息
	[root@gbase8c_private ~]# docker network list
	NETWORK ID          NAME                DRIVER              SCOPE
	#bridge:桥接,使用自定义IP
	e211f71bba4d        bridge              bridge              local
	#host:不获取IP,直接使用物理机IP,并监听物理机IP监听端口
	19023a4d913b        host                host                local
	#none:没有网络
	1a79176b52c5        none                null                local
  • 2.3.1.1 Host模式
    Host模式:使用参数 –net=host指定
    启动的容器如果指定了使用host模式,那么新创建的容器不会创建自己的虚拟网卡,而是直接使用宿主机的网卡和IP地址,因此在容器里面查看到的IP信息就是宿主机的信息,访问容器的时候直接使用宿主机IP+容器端口即可,不过容器的其他资源们比如:文件系统、系统进程等还是和宿主机保持隔离。
    此模式的网络性能最高,但是各容器之间端口不能相同,适用于运行容器端口比较固定的业务。
    Host模式不支持端口映射,且容器无法启动。
    日志:
	#先确认宿主机端口没有占用80,启动一个新容器,并指定网络模式为host
	#docker run -d --name net_host --net=host jack/nginx-1.22.1:v1 nginx
	[root@gbase8c_private ~]# docker run -d --name net_host --net=host jack/nginx-1.22.1:v1 nginx
	ba8e9ab9e2f604c92518733673fcda90f8084e35f41944b8ef1167167c792b8c
	[root@gbase8c_private ~]# docker ps -a
	CONTAINER ID        IMAGE                  COMMAND             CREATED             STATUS              PORTS               NAMES
	ba8e9ab9e2f6        jack/nginx-1.22.1:v1   "nginx"             3 seconds ago       Up 2 seconds                            net_host
	[root@gbase8c_private ~]# docker exec -it ba8e9ab9e2f6 bash
	[root@gbase8c_private /]# hostname
	gbase8c_private
	#验证网络信息
	[root@gbase8c_private /]# ifconfig
	br-69052870abe7: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
			inet 172.18.0.1  netmask 255.255.0.0  broadcast 172.18.255.255
			ether 02:42:68:19:1c:77  txqueuelen 0  (Ethernet)
			RX packets 3225  bytes 579415 (565.8 KiB)
			RX errors 0  dropped 0  overruns 0  frame 0
			TX packets 312  bytes 27961 (27.3 KiB)
			TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
	
	docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
			inet 172.17.0.1  netmask 255.255.0.0  broadcast 172.17.255.255
			inet6 fe80::42:99ff:fe40:a53e  prefixlen 64  scopeid 0x20<link>
			ether 02:42:99:40:a5:3e  txqueuelen 0  (Ethernet)
			RX packets 1669  bytes 104806 (102.3 KiB)
			RX errors 0  dropped 0  overruns 0  frame 0
			TX packets 3268  bytes 237614 (232.0 KiB)
			TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
	
	enp0s3: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
			inet 192.168.56.199  netmask 255.255.255.0  broadcast 192.168.56.255
			inet6 fe80::9b58:c5b7:cb7d:fea8  prefixlen 64  scopeid 0x20<link>
			ether 08:00:27:05:6c:a7  txqueuelen 1000  (Ethernet)
			RX packets 22842  bytes 1813943 (1.7 MiB)
	#访问验证
  • 2.3.1.2 none模式
    None模式,使用参数 –net=none 指定
    在使用none模式后,Docker容器不会进行任何网络配置,其没有网卡、没有IP也没有路由,因此默认无法与外界通信,需要手动添加网卡配置IP等,所以极少使用。
    日志:
	[root@gbase8c_private ~]# docker run -d --name net_none --net=none  jack/nginx-1.22.1:v1 nginx
	ddb39e82ca080aa1cf5d07e13671698b38e1b6a41d38e3556ab8a9d68483a102
	[root@gbase8c_private ~]# docker ps -a
	CONTAINER ID        IMAGE                  COMMAND             CREATED             STATUS              PORTS               NAMES
	ddb39e82ca08        jack/nginx-1.22.1:v1   "nginx"             3 seconds ago       Up 2 seconds                            net_none
	[root@gbase8c_private ~]# docker exec -it ddb39e82ca08 bash
	[root@ddb39e82ca08 /]# ifconfig
	lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
			inet 127.0.0.1  netmask 255.0.0.0
			loop  txqueuelen 1000  (Local Loopback)
			RX packets 0  bytes 0 (0.0 B)
			RX errors 0  dropped 0  overruns 0  frame 0
			TX packets 0  bytes 0 (0.0 B)
			TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  • 2.3.1.3 Container模式
    Container模式,使用参数–net=container:名称或ID指定
    使用此模式创建的容器需指定和一个已经存在的容器共享一个网络,而不是和宿主机共享网络,新创建的容器不会创建自己的网卡也不会配置自己的IP,而是和一个已经存在的被指定的容器共享IP和端口范围,因此这个容器的端口不能和被指定的端口冲突,除了网络之外的文件系统、进程信息仍然保持相互隔离,两个容器的进程可以通过lo网卡保持通信。
    直接使用对方的网络,较少使用
    日志:
	#docker run -d --name nginx-web1 jack/nginx-1.22.1:v1 nginx
	#docker run -it --name net_container --net=container:nginx-web1  jack/nginx-1.22.1:v1 bash
	[root@gbase8c_private ~]# docker run -d --name nginx-web1 jack/nginx-1.22.1:v1 nginx
	e858dc3d3ca7e940d9b4b40343b1bb8238487eeefd9cf2b39436e265533ad19d
	[root@gbase8c_private ~]# docker run -it --name net_container --net=container:nginx-web1  jack/nginx-1.22.1:v1 bash 
	[root@e858dc3d3ca7 /]# ifconfig
	eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
			inet 172.17.0.2  netmask 255.255.0.0  broadcast 172.17.255.255
			ether 02:42:ac:11:00:02  txqueuelen 0  (Ethernet)
			RX packets 8  bytes 656 (656.0 B)
			RX errors 0  dropped 0  overruns 0  frame 0
			TX packets 0  bytes 0 (0.0 B)
			TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
	
	lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
			inet 127.0.0.1  netmask 255.0.0.0
			loop  txqueuelen 1000  (Local Loopback)
			RX packets 0  bytes 0 (0.0 B)
			RX errors 0  dropped 0  overruns 0  frame 0
			TX packets 0  bytes 0 (0.0 B)
			TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  • 2.3.1.4 bridge模式
    docker的默认模式即不指定任何模式就是bridge模式,也是使用比较多的模式,此模式创建的容器会为每一个容器分配自己的网络IP等信息,并将容器连接到一个虚拟网桥与外界通信。
	docker network inspect bridge
	docker run -d --name net_bridge  jack/nginx-1.22.1:v1 nginx
	[root@gbase8c_private ~]# docker run -d --name net_bridge  jack/nginx-1.22.1:v1 nginx
	98d6e3abcde5e3017ae38ddf0524ccbb4fcd5638371d0154549889ce38d8a646
	[root@gbase8c_private ~]# docker ps
	CONTAINER ID        IMAGE                  COMMAND             CREATED             STATUS              PORTS               NAMES
	98d6e3abcde5        jack/nginx-1.22.1:v1   "nginx"             2 seconds ago       Up 1 second         80/tcp, 443/tcp     net_bridge
	[root@gbase8c_private ~]# docker exec -it 98d6e3abcde5 bash
	[root@98d6e3abcde5 /]# ifconfig
	eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
			inet 172.17.0.3  netmask 255.255.0.0  broadcast 172.17.255.255
			ether 02:42:ac:11:00:03  txqueuelen 0  (Ethernet)
			RX packets 8  bytes 656 (656.0 B)
			RX errors 0  dropped 0  overruns 0  frame 0
			TX packets 0  bytes 0 (0.0 B)
			TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
	lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
			inet 127.0.0.1  netmask 255.0.0.0
			loop  txqueuelen 1000  (Local Loopback)
			RX packets 0  bytes 0 (0.0 B)
			RX errors 0  dropped 0  overruns 0  frame 0
			TX packets 0  bytes 0 (0.0 B)
			TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  • 2.3.2 docker 跨主机互联之简单实现
    跨主机互联是说A宿主机的容器可以访问B主机上的容器,但是前提是保证各宿主机之间的网络是可以相互通信的,然后各容器才可以通过宿主机访问到对方的容器,实现原理是在宿主机做一个网络路由就可以实现A宿主机的容器访问B主机的容器的目的,复杂的网路或者大型的网络可以使用google开源的k8s进行互联。
  • 2.3.2.1 修改各宿主机网段
    Docker的默认网段是172.17.0.x/24,而且每个宿主机都是一样的,因此要做路由的前提是各个主机的网络不能一致。
  • 2.3.2.2 服务器A更改网段
	vim /usr/lib/systemd/system/docker.service
	ExecStart=/usr/bin/dockerd --bip=10.10.0.1/24
  • 2.3.2.3 重启docker服务并验证网卡
	systemctl daemon-reload
	systemctl restart docker

日志:

	[root@gbase8c_private ~]# ifconfig
	docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
			inet 10.10.0.1  netmask 255.255.255.0  broadcast 10.10.0.255
			inet6 fe80::42:99ff:fe40:a53e  prefixlen 64  scopeid 0x20<link>
			ether 02:42:99:40:a5:3e  txqueuelen 0  (Ethernet)
			RX packets 1669  bytes 104806 (102.3 KiB)
			RX errors 0  dropped 0  overruns 0  frame 0
			TX packets 3268  bytes 237614 (232.0 KiB)
			TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  • 2.3.2.4 服务器B更改网段
	vim /usr/lib/systemd/system/docker.service
	ExecStart=/usr/bin/dockerd --bip=10.10.1.1/24
  • 2.3.2.5 重启B服务并验证网卡
	systemctl daemon-reload
	systemctl restart docker

日志:

	docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 10.10.1.1  netmask 255.255.255.0  broadcast 10.10.1.255
        ether 02:42:c8:c5:b6:c1  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  • 2.3.2.6 在两个宿主机分别启动一个实例
	Server1:
	docker run -d --name test-net-vm jack/nginx-1.22.1:v1 nginx
	Server2:
	docker run -d --name test-net-vm jack/nginx-1.22.1:v1 nginx

日志:验证IP

	Server1:
	[root@4a92f5fffe6f /]# ifconfig
	eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
			inet 10.10.0.2  netmask 255.255.255.0  broadcast 10.10.0.255
	Server2:
	[root@4c872e5e9ddf /]# ifconfig
	eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
			inet 10.10.1.2  netmask 255.255.255.0  broadcast 10.10.1.255
  • 2.3.2.7 添加静态路由
    在个宿主机添加静态路由,网关指向对方的IP
Server1:
	iptables -A FORWARD -s 192.168.56.0/24 -j ACCEPT
	route add -net 10.10.1.0/24 gw 192.168.56.200
	日志:
	[root@gbase8c_private ~]# iptables -A FORWARD -s 192.168.56.0/24 -j ACCEPT
	[root@gbase8c_private ~]# route add -net 10.10.1.0/24 gw 192.168.56.200
	[root@gbase8c_private ~]# route -n
	Kernel IP routing table
	Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
	10.10.1.0       192.168.56.200  255.255.255.0   UG    0      0        0 enp0s3
	[root@gbase8c_private ~]# ping 10.10.1.2
	PING 10.10.1.2 (10.10.1.2) 56(84) bytes of data.
	64 bytes from 10.10.1.2: icmp_seq=1 ttl=63 time=0.378 ms
	64 bytes from 10.10.1.2: icmp_seq=2 ttl=63 time=1.21 ms
	64 bytes from 10.10.1.2: icmp_seq=3 ttl=63 time=0.886 ms
	^C
	--- 10.10.1.2 ping statistics ---
	3 packets transmitted, 3 received, 0% packet loss, time 2002ms
	rtt min/avg/max/mdev = 0.378/0.827/1.218/0.346 ms
Server2:
	iptables -A FORWARD -s 192.168.56.0/24 -j ACCEPT
	route add -net 10.10.0.0/24 gw 192.168.56.199
	日志:
	[root@gbase8c_1 ~]# iptables -A FORWARD -s 192.168.56.0/24 -j ACCEPT
	[root@gbase8c_1 ~]# route add -net 10.10.0.0/24 gw 192.168.56.199
	[root@gbase8c_1 ~]# route -n
	Kernel IP routing table
	Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
	10.10.0.0       192.168.56.199  255.255.255.0   UG    0      0        0 enp0s3
	[root@gbase8c_1 ~]# ping 10.10.0.2
	PING 10.10.0.2 (10.10.0.2) 56(84) bytes of data.
	64 bytes from 10.10.0.2: icmp_seq=1 ttl=63 time=0.553 ms
	64 bytes from 10.10.0.2: icmp_seq=2 ttl=63 time=1.09 ms
	^C
	--- 10.10.0.2 ping statistics ---
	2 packets transmitted, 2 received, 0% packet loss, time 1001ms
	rtt min/avg/max/mdev = 0.553/0.823/1.094/0.272 ms
  • 2.3.2.8 抓包分析
	tcpdump -i eth0 -vnn icmp
  • 2.3.2.9 测试容器间互联
    Server1容器:
	[root@gbase8c_private ~]# docker exec -it 4a92f5fffe6f bash
	[root@4a92f5fffe6f /]# ping  10.10.1.2
	PING 10.10.1.2 (10.10.1.2) 56(84) bytes of data.
	64 bytes from 10.10.1.2: icmp_seq=1 ttl=62 time=0.267 ms
	64 bytes from 10.10.1.2: icmp_seq=2 ttl=62 time=0.351 ms
	^C
	--- 10.10.1.2 ping statistics ---
	2 packets transmitted, 2 received, 0% packet loss, time 1000ms
	rtt min/avg/max/mdev = 0.267/0.309/0.351/0.042 ms
Server2容器:
	[root@gbase8c_1 ~]# docker exec -it 4c872e5e9ddf bash
	[root@4c872e5e9ddf /]# ping 10.10.0.2
	PING 10.10.0.2 (10.10.0.2) 56(84) bytes of data.
	64 bytes from 10.10.0.2: icmp_seq=1 ttl=62 time=0.502 ms
	64 bytes from 10.10.0.2: icmp_seq=2 ttl=62 time=0.330 ms
	^C
	--- 10.10.0.2 ping statistics ---
	2 packets transmitted, 2 received, 0% packet loss, time 1000ms
	rtt min/avg/max/mdev = 0.330/0.416/0.502/0.086 ms

3 创建自定义网络

可以基于docker命令创建自定义网络,自定义网络可以自定义IP地址范围和网关等信息。

3.1 创建自定义docker网络

	docker network create --help
	docker network create -d bridge --subnet 172.27.0.0/21 --gateway 172.27.0.1 mydocker-net

日志:

	[root@gbase8c_private ~]# docker network ls
	NETWORK ID          NAME                DRIVER              SCOPE
	6da39a5a1ab9        bridge              bridge              local
	69052870abe7        harbor_harbor       bridge              local
	19023a4d913b        host                host                local
	1a79176b52c5        none                null                local
	[root@gbase8c_private ~]# docker network create --help
	
	Usage:	docker network create [OPTIONS] NETWORK
	
	Create a network
	
	Options:
		--attachable           Enable manual container attachment
		--aux-address map      Auxiliary IPv4 or IPv6 addresses used by Network driver (default map[])
		--config-from string   The network from which copying the configuration
		--config-only          Create a configuration only network
	-d, --driver string        Driver to manage the Network (default "bridge")
		--gateway strings      IPv4 or IPv6 Gateway for the master subnet
		--ingress              Create swarm routing-mesh network
		--internal             Restrict external access to the network
		--ip-range strings     Allocate container ip from a sub-range
		--ipam-driver string   IP Address Management Driver (default "default")
		--ipam-opt map         Set IPAM driver specific options (default map[])
		--ipv6                 Enable IPv6 networking
		--label list           Set metadata on a network
	-o, --opt map              Set driver specific options (default map[])
		--scope string         Control the network's scope
		--subnet strings       Subnet in CIDR format that represents a network segment
	[root@gbase8c_private ~]# docker network create -d bridge --subnet 172.27.0.0/21 --gateway 172.27.0.1 mydocker-net
	c8f96b62282f63b08833bc82763f1c2354e78a37e8e8b4c0fe4d564354c8fcb3
	[root@gbase8c_private ~]# docker network ls
	NETWORK ID          NAME                DRIVER              SCOPE
	6da39a5a1ab9        bridge              bridge              local
	69052870abe7        harbor_harbor       bridge              local
	19023a4d913b        host                host                local
	c8f96b62282f        mydocker-net        bridge              local
	1a79176b52c5        none                null                local

3.2 使用自定义网络创建容器

	docker run -it --name c1 --network mydocker-net centos

日志:

	[root@gbase8c_private ~]# docker run -it --name c1 --network mydocker-net centos:latest
	[root@ccc8e1215ac6 /]# ip addr
	1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
		link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
		inet 127.0.0.1/8 scope host lo
		valid_lft forever preferred_lft forever
	11: eth0@if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
		link/ether 02:42:ac:1b:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
		inet 172.27.0.2/21 brd 172.27.7.255 scope global eth0
		valid_lft forever preferred_lft forever

3.3 当前iptables规则

日志:

	[root@gbase8c_private ~]# iptables -vnl
	iptables v1.4.21: unknown option "-vnl"
	Try `iptables -h' or 'iptables --help' for more information.
	[root@gbase8c_private ~]# iptables -vnL
	......
	Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
	pkts bytes target     prot opt in     out     source               destination         
		7   812 ACCEPT     all  --  *      br-c8f96b62282f  0.0.0.0/0            0.0.0.0/0            ctstate RELATED,ESTABLISHED
		0     0 DOCKER     all  --  *      br-c8f96b62282f  0.0.0.0/0            0.0.0.0/0           
		9   722 ACCEPT     all  --  br-c8f96b62282f !br-c8f96b62282f  0.0.0.0/0            0.0.0.0/0           
		0     0 ACCEPT     all  --  br-c8f96b62282f br-c8f96b62282f  0.0.0.0/0            0.0.0.0/0           
	Chain DOCKER-ISOLATION-STAGE-1 (1 references)
	pkts bytes target     prot opt in     out     source               destination         
		9   722 DOCKER-ISOLATION-STAGE-2  all  --  br-c8f96b62282f !br-c8f96b62282f  0.0.0.0/0            0.0.0.0/0   
	
	Chain DOCKER-ISOLATION-STAGE-2 (3 references)
	pkts bytes target     prot opt in     out     source               destination         
		0     0 DROP       all  --  *      br-c8f96b62282f  0.0.0.0/0            0.0.0.0/0    
	......
	[root@gbase8c_private ~]# iptables -t nat -vnL
	......
	pkts bytes target     prot opt in     out     source               destination         
		3   194 MASQUERADE  all  --  *      !br-c8f96b62282f  172.27.0.0/21        0.0.0.0/0           
	pkts bytes target     prot opt in     out     source               destination         
		0     0 RETURN     all  --  br-c8f96b62282f *       0.0.0.0/0            0.0.0.0/0 
	......		

3.4 如何与使用默认网络的容器通信

现在有一个docker0(172.17.0.0/16)网络,一个自定义的mydocker-net(172.27.0.0/21)网络,每个网络上分别运行了不同数量的容器,那么怎么才能让位于不同网络的容器可以互相通信呢。
iptables-save > iptables-rule.txt
注释掉DROP的规则
日志:

	......
	#-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
	#-A DOCKER-ISOLATION-STAGE-2 -o br-69052870abe7 -j DROP
	......

3.5 重新导入iptables并验证通信

重新导入iptables规则

	iptables-restore < iptables-rule.txt

日志:

	[root@ccc8e1215ac6 /]# ip addr
	13: eth0@if14: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
		link/ether 02:42:ac:1b:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
		inet 172.27.0.2/21 brd 172.27.7.255 scope global eth0
		valid_lft forever preferred_lft forever
	[root@ccc8e1215ac6 /]# ping 10.10.0.2
	PING 10.10.0.2 (10.10.0.2) 56(84) bytes of data.
	^C
	--- 10.10.0.2 ping statistics ---
	8 packets transmitted, 0 received, 100% packet loss, time 7001ms
	
	[root@ccc8e1215ac6 /]# ping 10.10.0.2
	PING 10.10.0.2 (10.10.0.2) 56(84) bytes of data.
	64 bytes from 10.10.0.2: icmp_seq=1 ttl=63 time=0.057 ms
	64 bytes from 10.10.0.2: icmp_seq=2 ttl=63 time=0.059 ms
	64 bytes from 10.10.0.2: icmp_seq=3 ttl=63 time=0.062 ms
	^C
	--- 10.10.0.2 ping statistics ---
	3 packets transmitted, 3 received, 0% packet loss, time 2041ms
	rtt min/avg/max/mdev = 0.057/0.059/0.062/0.006 ms

原文地址:https://blog.csdn.net/qq_29567379/article/details/135795658

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。

如若转载,请注明出处:http://www.7code.cn/show_60895.html

如若内容造成侵权/违法违规/事实不符,请联系代码007邮箱:suwngjj01@126.com进行投诉反馈,一经查实,立即删除!

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注