Lab-37: Linux container (lxc) networking

I briefly touched Linux container network type in Lab-36. In this lab I will go over various methods to virtualize Linux container network. Linux containers support these types of network virtualization:

  1. empty
  2. phys
  3. veth
  4. vlan
  5. macvlan supports these modes:
    1. bridge
    2. vepa
    3. private

You can read more about Linux container manual pages here

Prerequisite

Using Lab-36 install Linux container. In this lab I am using Centos 7

Linux container (lxc) configuration file

lxc support two types of configuration files. Since we are working on network I will concentrate on network setting only:

  1. Default configuration file: In my OS this files is located in this path /etc/lxc/default.conf. This file contains default network types. Container inherits default network types from this file. default network setting  in my system
#cat default.conf
lxc.network.type = veth
lxc.network.link = virbr0
lxc.network.flags = up
  1. Container configuration file: Each container has its own configuration file, in my OS this file is located in this path /var/lib/lxc/<container name>/conf. When container starts it uses this config file to setup container run time environment including network. This file inherits network setting from default.conf file. This is my container network file looks like for container lxc_cn1
# Template used to create this container: /usr/share/lxc/templates/lxc-ubuntu
# Parameters passed to the template:
# For additional config options, please look at lxc.container.conf(5)

# Common configuration
lxc.include = /usr/share/lxc/config/ubuntu.common.conf

# Container specific configuration
lxc.rootfs = /var/lib/lxc/lxc_cn1/rootfs
lxc.mount = /var/lib/lxc/lxc_cn1/fstab
lxc.utsname = lxc_cn1
lxc.arch = amd64


# Network configuration
lxc.network.type = veth
lxc.network.hwaddr = 00:16:3e:43:aa:ba
lxc.network.flags = up
lxc.network.link = virbr0

In my exercises I will be updating this file to create different network types.

I will perform these connectivity tests where ever applicable

  1. Ping test from container to container
  2. Ping test from host to container and container to host
  3. Ping to external system

Below lab topology. I am using Linux servers connected thru physical interface enp0s29f7u1. I will create Linux containers on machine named host machine.

linux_container_6

lxc.network.type= empty

In this mode no network interface created inside container. This option can be used when container is standalone and don’t need network connectivity.

create a container

#lxc-create -t ubuntu -n empty_cn1

Update container configuration file /var/lib/lxc/empty_cn1/config

# Network configuration
lxc.network.type = empty
lxc.network.flags = up

Start the container and check network setting

[root@localhost]# lxc-start -n empty_cn1 -d

[root@localhost]# lxc-info -n empty_cn1
Name:           empty_cn1
State:          RUNNING
PID:            11825
CPU use:        0.20 seconds
BlkIO use:      0 bytes
Memory use:     3.07 MiB
KMem use:       0 bytes
Link:           virbr0
 TX bytes:      3.01 MiB
 RX bytes:      7.31 KiB
 Total bytes:   3.01 MiB


//check container network setting. No interface created other than loopback
[root@localhost]# lxc-attach -n empty_cn1 /sbin/ip address
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever

lxc.network.type= phys

This option links host interface specified in lxc.network.link to container. To run this option you need an unused interface which can be assigned to container. Once interface assigned to container it disappears from host interface list.

linux_container_4

Create a container

[root@localhost ~]# lxc-create -t ubuntu -n phys_cn1
Checking cache download in /var/cache/lxc/precise/rootfs-amd64 ...
Copy /var/cache/lxc/precise/rootfs-amd64 to /var/lib/lxc/phys_cn1/rootfs ...
Copying rootfs to /var/lib/lxc/phys_cn1/rootfs ...
Generating locales...
  en_US.UTF-8... up-to-date
Generation complete.
Creating SSH2 RSA key; this may take some time ...
Creating SSH2 DSA key; this may take some time ...
Creating SSH2 ECDSA key; this may take some time ...
Timezone in container is not configured. Adjust it manually.

##
# The default user is 'ubuntu' with password 'ubuntu'!
# Use the 'sudo' command to run tasks as root in the container.
##

Edit container config file /var/lib/lxc/phys_cn1/config. Note: I have updated fields in bold italic

lxc.network.link = enp0s29f7u1
lxc.network.type = phys
lxc.network.ipv4 = 192.168.2.1/24

# Network configuration
lxc.network.type = phys
lxc.network.hwaddr = 00:16:3e:bf:a5:4e
lxc.network.flags = up
lxc.network.link = enp0s29f7u1
lxc.network.ipv4 = 192.168.2.1/24

Start container and check network setting

[root@localhost]# lxc-start -n phys_cn1 -d
[root@localhost]# lxc-info -n phys_cn1
Name:           phys_cn1
State:          RUNNING
PID:            23805
IP:             192.168.2.1
CPU use:        0.21 seconds
BlkIO use:      0 bytes
Memory use:     3.10 MiB
KMem use:       0 bytes
Link:           enp0s29f7u1
 Total bytes:   0 bytes
[root@localhost phys_cn1]#

//network interface disappeared from host
[root@localhost ~]# ifconfig enp0s29f7u1
enp0s29f7u1: error fetching interface information: Device not found

Login to container and perform ping test to host and external system

[root@localhost]# lxc-console -n phys_cn1

//interface enp0s29f7u1 linked to container. IP address and mac assigned from 
container configuration file
ubuntu@phys_cn1:~$ ifconfig
enp0s29f7u1 Link encap:Ethernet  HWaddr 00:16:3e:bf:a5:4e
          inet addr:192.168.2.1  Bcast:192.168.2.255  Mask:255.255.255.0
          inet6 addr: fe80::216:3eff:febf:a54e/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:9422 errors:0 dropped:0 overruns:0 frame:0
          TX packets:12999 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:934818 (934.8 KB)  TX bytes:2522162 (2.5 MB)

lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)


//ping to external system passed
ubuntu@phys_cn1:~$ ping 192.168.2.101
PING 192.168.2.101 (192.168.2.101) 56(84) bytes of data.
64 bytes from 192.168.2.101: icmp_req=1 ttl=64 time=0.699 ms
64 bytes from 192.168.2.101: icmp_req=2 ttl=64 time=0.217 ms
64 bytes from 192.168.2.101: icmp_req=3 ttl=64 time=0.204 ms
^C
--- 192.168.2.101 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.204/0.373/0.699/0.230 ms

ubuntu@phys_cn1:~$ ip route
192.168.2.0/24 dev enp0s29f7u1  proto kernel  scope link  src 192.168.2.1
ubuntu@phys_cn1:~$

Ping container from host

//ping to container from host failed
[root@localhost ~]# ping 192.168.2.1
PING 192.168.2.1 (192.168.2.1) 56(84) bytes of data.
^C
--- 192.168.2.1 ping statistics ---
4 packets transmitted, 0 received, +4 errors, 100% packet loss, time 3004ms

Stop container

//stop container
[root@localhost phys_cn1]# lxc-stop -n phys_cn1

//host interface reappeared 
[root@localhost phys_cn1]# ifconfig enp0s29f7u1
enp0s29f7u1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.2.100  netmask 255.255.255.0  broadcast 192.168.2.255
        inet6 fe80::250:b6ff:fe19:4165  prefixlen 64  scopeid 0x20
        ether 00:50:b6:19:41:65  txqueuelen 1000  (Ethernet)
        RX packets 9427  bytes 935162 (913.2 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 13025  bytes 2525292 (2.4 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lxc.network.type = vlan

In this mode a vlan interface is linked with the interface specified by the lxc.network.link and assigned to the container. The vlan identifier is specified by the option lxc.network.vlan.id

In this mode container can communicate with external system but can’t  communicate with host

linux_container_5

Create a container vlan_cn1

[root@localhost]# lxc-create -t ubuntu -n vlan_cn1
Checking cache download in /var/cache/lxc/precise/rootfs-amd64 ...
Copy /var/cache/lxc/precise/rootfs-amd64 to /var/lib/lxc/vlan_cn1/rootfs ...
Copying rootfs to /var/lib/lxc/vlan_cn1/rootfs ...
Generating locales...
  en_US.UTF-8... up-to-date
Generation complete.
Creating SSH2 RSA key; this may take some time ...
Creating SSH2 DSA key; this may take some time ...
Creating SSH2 ECDSA key; this may take some time ...
Timezone in container is not configured. Adjust it manually.

##
# The default user is 'ubuntu' with password 'ubuntu'!
# Use the 'sudo' command to run tasks as root in the container.
##

[root@localhost]# lxc-info -n vlan_cn1
Name:           vlan_cn1
State:          STOPPED
[root@localhost]#

Edit container configuration file. Note: I have updated fields in bold and italic

lxc.network.type= vlan
lxc.network.link = enp0s29f7u1
lxc.network.vlan.id = 100
lxc.network.ipv4 = 192.168.2.1/24

# Network configuration
lxc.network.type = vlan
lxc.network.vlan.id = 100
lxc.network.hwaddr = 00:16:3e:e2:d6:19
lxc.network.flags = up
lxc.network.link = enp0s29f7u1
lxc.network.ipv4 = 192.168.2.1/24

Start container

[root@localhost]# lxc-start -n vlan_cn1 -d
[root@localhost]# lxc-info -n vlan_cn1
Name:           vlan_cn1
State:          RUNNING
PID:            24472
IP:             192.168.2.1
CPU use:        0.21 seconds
BlkIO use:      0 bytes
Memory use:     3.70 MiB
KMem use:       0 bytes
Link:           enp0s29f7u1
 TX bytes:      913.24 KiB
 RX bytes:      2.41 MiB
 Total bytes:   3.30 MiB
[root@localhost vlan_cn1]#

To test connectivity we need to create vlan interface on external system. To create vlan interface you need to Install vconfig

#yum install epel-release
#yum install vconfig

[root@localhost ~]# yum install epel-release
[root@localhost ~]# yum install vconfig

//add vlan interface with vlan.id=100 on external system
[root@localhost ~]# vconfig add enp0s29f7u1 100
WARNING:  Could not open /proc/net/vlan/config.  Maybe you need to load the 8021q module, or maybe you are not using PROCFS??
Added VLAN with VID == 100 to IF -:enp0s29f7u1:-

//a vlan inteface with vlan.id=100 created
[root@localhost ~]# ip addr
5: enp0s29f7u1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:50:b6:19:41:63 brd ff:ff:ff:ff:ff:ff
    inet 192.168.2.101/24 scope global enp0s29f7u1
       valid_lft forever preferred_lft forever
    inet6 fe80::250:b6ff:fe19:4163/64 scope link
       valid_lft forever preferred_lft forever
6: enp0s29f7u1.100@enp0s29f7u1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN
    link/ether 00:50:b6:19:41:63 brd ff:ff:ff:ff:ff:ff

//remove ip from base interface 
[root@localhost ~]# ip addr del 192.168.2.101/24 dev enp0s29f7u1

//add ip to vlan interface
[root@localhost ~]# ip addr add 192.168.2.101/24 dev enp0s29f7u1.100@enp0s29f7u1 

[root@localhost ~]# ifconfig enp0s29f7u1.100@enp0s29f7u1 
enp0s29f7u1.100: flags=4098<BROADCAST,MULTICAST>  mtu 1500         
inet 192.168.2.101  netmask 255.255.255.0  broadcast 0.0.0.0         
ether 00:50:b6:19:41:63  txqueuelen 0  (Ethernet)         
RX packets 0  bytes 0 (0.0 B)         
RX errors 0  dropped 0  overruns 0  frame 0         
TX packets 0  bytes 0 (0.0 B)         
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0 

[root@localhost ~]# ifconfig enp0s29f7u1.100@enp0s29f7u1 up  

[root@localhost ~]# ifconfig enp0s29f7u1.100@enp0s29f7u1 
enp0s29f7u1.100: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500         
inet 192.168.2.101  netmask 255.255.255.0  broadcast 0.0.0.0         
inet6 fe80::250:b6ff:fe19:4163  prefixlen 64  scopeid 0x20         
ether 00:50:b6:19:41:63  txqueuelen 0  (Ethernet)         
RX packets 0  bytes 0 (0.0 B)         
RX errors 0  dropped 0  overruns 0  frame 0         
TX packets 0  bytes 0 (0.0 B)         
TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0 

[root@localhost ~]#ip addr 
5: enp0s29f7u1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 
qdisc pfifo_fast state UP qlen 1000     
link/ether 00:50:b6:19:41:63 brd ff:ff:ff:ff:ff:ff     
inet6 fe80::250:b6ff:fe19:4163/64 scope link        
valid_lft forever preferred_lft forever 

6: enp0s29f7u1.100@enp0s29f7u1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 
qdisc noqueue state UP     link/ether 00:50:b6:19:41:63 brd ff:ff:ff:ff:ff:ff     
inet 192.168.2.101/24 scope global enp0s29f7u1.100        
valid_lft forever preferred_lft forever     
inet6 fe80::250:b6ff:fe19:4163/64 scope link        
valid_lft forever preferred_lft forever
Login to container and ping to external system
ubuntu@vlan_cn1:~$ ifconfig
eth0      Link encap:Ethernet  HWaddr 00:16:3e:e2:d6:19
          inet addr:192.168.2.1  Bcast:192.168.2.255  Mask:255.255.255.0
          inet6 addr: fe80::216:3eff:fee2:d619/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:268 errors:0 dropped:0 overruns:0 frame:0
          TX packets:914 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:21956 (21.9 KB)  TX bytes:81868 (81.8 KB)

lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:709 errors:0 dropped:0 overruns:0 frame:0
          TX packets:709 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1
          RX bytes:78800 (78.8 KB)  TX bytes:78800 (78.8 KB)

//ping to external system passed
ubuntu@vlan_cn1:~$ ping 192.168.2.101
PING 192.168.2.101 (192.168.2.101) 56(84) bytes of data.
64 bytes from 192.168.2.101: icmp_req=1 ttl=64 time=0.387 ms
64 bytes from 192.168.2.101: icmp_req=2 ttl=64 time=0.393 ms
64 bytes from 192.168.2.101: icmp_req=3 ttl=64 time=0.325 ms
64 bytes from 192.168.2.101: icmp_req=4 ttl=64 time=0.223 ms
^C
--- 192.168.2.101 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 2999ms
rtt min/avg/max/mdev = 0.223/0.332/0.393/0.068 ms
ubuntu@vlan_cn1:~$
 Check incoming packets on external system. As can be seen packets are received with vlan.id=100
//see underlined for vlan.id
[root@localhost ~]# tcpdump -i enp0s29f7u1 -XX
tcpdump: WARNING: enp0s29f7u1: no IPv4 address assigned
tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
listening on enp0s29f7u1, link-type EN10MB (Ethernet), capture size 65535 bytes
23:51:38.940412 IP 192.168.2.1 > 192.168.2.101: ICMP echo request, id 305, seq 115, length 64
        0x0000:  0050 b619 4163 0016 3ee2 d619 8100 0064  .P..Ac..>......d
        0x0010:  0800 4500 0054 5423 4000 4001 60cf c0a8  ..E..TT#@.@.`...
        0x0020:  0201 c0a8 0265 0800 6ddb 0131 0073 0250  .....e..m..1.s.P
        0x0030:  9558 0000 0000 2305 0f00 0000 0000 1011  .X....#.........
        0x0040:  1213 1415 1617 1819 1a1b 1c1d 1e1f 2021  ...............!
        0x0050:  2223 2425 2627 2829 2a2b 2c2d 2e2f 3031  "#$%&'()*+,-./01
        0x0060:  3233 3435 3637                           234567
23:51:38.940438 IP 192.168.2.101 > 192.168.2.1: ICMP echo reply, id 305, seq 115, length 64
        0x0000:  0016 3ee2 d619 0050 b619 4163 8100 0064  ..>....P..Ac...d
        0x0010:  0800 4500 0054 9f4b 0000 4001 55a7 c0a8  ..E..T.K..@.U...
        0x0020:  0265 c0a8 0201 0000 75db 0131 0073 0250  .e......u..1.s.P
        0x0030:  9558 0000 0000 2305 0f00 0000 0000 1011  .X....#.........
        0x0040:  1213 1415 1617 1819 1a1b 1c1d 1e1f 2021  ...............!
        0x0050:  2223 2425 2627 2829 2a2b 2c2d 2e2f 3031  "#$%&'()*+,-./01
        0x0060:  3233 3435 3637                           234567

Ping container from host

//ping from host to container failed
[root@localhost ~]# ping 192.168.2.1
PING 192.168.2.1 (192.168.2.1) 56(84) bytes of data.
From 166.127.6.242 icmp_seq=1 Time to live exceeded
From 166.127.6.242 icmp_seq=2 Time to live exceeded
From 166.127.6.242 icmp_seq=3 Time to live exceeded
From 166.127.6.242 icmp_seq=4 Time to live exceeded
From 166.127.6.242 icmp_seq=5 Time to live exceeded
^C
--- 192.168.2.1 ping statistics ---
5 packets transmitted, 0 received, +5 errors, 100% packet loss, time 4006ms

[root@localhost ~]#

lxc.network.type= veth

In this mode a  virtual ethernet pair device is created with one side assigned to the container and the other side attached to a bridge specified by the lxc.network.link option.

In this mode containers can communicate with other, host can communicate with containers and containers can communicate with external system.

Below our topology diagram host machine is connected to external Linux server via physical interface enp0s29f7u1. This interface linked  to bridge lxcbr0. No IP needed on this interface enp0s29f7u1

linux_container_3

First we need to create a Linux bridge (lxcbr0), assign IP address to bridge (in the same subnet as containers) and link host machine physical interface (enp0s29f7u1) to it. I am using brctl command to perform these tasks

//create bridge lxcbr0
[root@localhost]# sudo brctl addbr lxcbr0
 
//assign IP address to bridge
[root@localhost ~]# ip addr add 192.168.2.100/24 dev lxcbr0

[root@localhost ~]# ifconfig lxcbr0
 lxcbr0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
         inet 192.168.2.100  netmask 255.255.255.0  broadcast 0.0.0.0
         inet6 fe80::9c7e:c7ff:fe64:be71  prefixlen 64  scopeid 0x20
         ether fe:01:92:6f:88:21  txqueuelen 1000  (Ethernet)
         RX packets 1554  bytes 505536 (493.6 KiB)
         RX errors 0  dropped 0  overruns 0  frame 0
         TX packets 19  bytes 3403 (3.3 KiB)
         TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
 
//link physical host interface to bridge
[root@localhost ~]# brctl addif lxcbr0 enp0s29f7u1

//bridge show interface associated wth it
[root@localhost ~]# brctl show
 bridge name     bridge id               STP enabled     interfaces
 lxcbr0          8000.0050b6194165       no              enp0s29f7u1
 virbr0          8000.000000000000       no

//no IP needed on physical interface
[root@localhost ~]# ifconfig enp0s29f7u1
enp0s29f7u1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        ether 00:50:b6:19:41:65  txqueuelen 1000  (Ethernet)
        RX packets 9422  bytes 934818 (912.9 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 12589  bytes 2384139 (2.2 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

Create two conatiners named veth_cn1 & veth_cn2

#lxc-create -t ubuntu -n veth_cn1
#lxc-create -t ubuntu -n veth_cn2

Edit container configuration files. container configuration files are located in this path /var/lib/lxc//config. Note: Remember I am using Centos, path on your machine may differ if you are not using Centos

container veth_cn1 configuration file. Note: I have updated fields in bold italic

# Network configuration
 lxc.network.type = veth
 lxc.network.hwaddr = 00:16:3e:0a:3b:cc
 lxc.network.flags = up
 lxc.network.link = lxcbr0
 lxc.network.ipv4 = 192.168.2.1/24

Container veth_cn2 configuration file. Note: I have updated fields in bold italic

# Network configuration
 lxc.network.type = veth
 lxc.network.hwaddr = 00:16:3e:9b:66:cf
 lxc.network.flags = up
 lxc.network.link = lxcbr0
 lxc.network.ipv4 = 192.168.2.2/24

Start both containers

[root@localhost]# lxc-start -n veth_cn1 -d

[root@localhost]# lxc-start -n veth_cn2 -d

//as you see two new veth interfaces created and attached to bridge (lxcbr0). The
other end of these interfaces are connected to containers
[root@localhost]# sudo brctl show
bridge name     bridge id               STP enabled     interfaces
lxcbr0          8000.0050b6194165       no              enp0s29f7u1
                                                        veth2GN030
                                                        vethMFY0GP
virbr0          8000.000000000000       no

//check veth interfaces
[root@localhost ~]# ifconfig veth2GN030
veth2GN030: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet6 fe80::fc01:92ff:fe6f:8821  prefixlen 64  scopeid 0x20
        ether fe:01:92:6f:88:21  txqueuelen 1000  (Ethernet)
        RX packets 882  bytes 295072 (288.1 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 937  bytes 307987 (300.7 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@localhost ~]# ifconfig vethMFY0GP
vethMFY0GP: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet6 fe80::fca6:3ff:fe2d:cbb5  prefixlen 64  scopeid 0x20
        ether fe:a6:03:2d:cb:b5  txqueuelen 1000  (Ethernet)
        RX packets 899  bytes 302950 (295.8 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 897  bytes 297597 (290.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

Login to first container veth_cn1 using lxc-console,  username/password:ubuntu/ubuntu and perform ping tests

[root@localhost ~]# lxc-console -n veth_cn1

ubuntu@veth_cn1:~$ ifconfig
 eth0      Link encap:Ethernet  HWaddr 00:16:3e:0a:3b:cc
           inet addr:192.168.2.1  Bcast:192.168.2.255  Mask:255.255.255.0
           inet6 addr: fe80::216:3eff:fe0a:3bcc/64 Scope:Link
           UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
           RX packets:814 errors:0 dropped:0 overruns:0 frame:0
           TX packets:767 errors:0 dropped:0 overruns:0 carrier:0
           collisions:0 txqueuelen:1000
           RX bytes:273205 (273.2 KB)  TX bytes:260226 (260.2 KB)
 
 lo        Link encap:Local Loopback
           inet addr:127.0.0.1  Mask:255.0.0.0
           inet6 addr: ::1/128 Scope:Host
           UP LOOPBACK RUNNING  MTU:65536  Metric:1
           RX packets:16 errors:0 dropped:0 overruns:0 frame:0
           TX packets:16 errors:0 dropped:0 overruns:0 carrier:0
           collisions:0 txqueuelen:1
           RX bytes:1184 (1.1 KB)  TX bytes:1184 (1.1 KB)
 
//ping to bridge interface ip
ubuntu@veth_cn1:~$ ping 192.168.2.100
 PING 192.168.2.100 (192.168.2.100) 56(84) bytes of data.
 64 bytes from 192.168.2.100: icmp_req=1 ttl=64 time=0.098 ms
 64 bytes from 192.168.2.100: icmp_req=2 ttl=64 time=0.040 ms
 64 bytes from 192.168.2.100: icmp_req=3 ttl=64 time=0.043 ms
 64 bytes from 192.168.2.100: icmp_req=4 ttl=64 time=0.041 ms
 ^C
 --- 192.168.2.100 ping statistics ---
 4 packets transmitted, 4 received, 0% packet loss, time 3000ms
 rtt min/avg/max/mdev = 0.040/0.055/0.098/0.025 ms

//ping to container veth_cn2
ubuntu@veth_cn1:~$ ping 192.168.2.2
 PING 192.168.2.2 (192.168.2.2) 56(84) bytes of data.
 64 bytes from 192.168.2.2: icmp_req=1 ttl=64 time=0.092 ms
 64 bytes from 192.168.2.2: icmp_req=2 ttl=64 time=0.045 ms
 ^C
 --- 192.168.2.2 ping statistics ---
 2 packets transmitted, 2 received, 0% packet loss, time 1000ms
 rtt min/avg/max/mdev = 0.045/0.068/0.092/0.024 ms

//ping to external system
ubuntu@veth_cn1:~$ ping 192.168.2.101
 PING 192.168.2.101 (192.168.2.101) 56(84) bytes of data.
 64 bytes from 192.168.2.101: icmp_req=1 ttl=64 time=0.565 ms
 64 bytes from 192.168.2.101: icmp_req=2 ttl=64 time=0.326 ms
 64 bytes from 192.168.2.101: icmp_req=3 ttl=64 time=0.378 ms
 ^C
 --- 192.168.2.101 ping statistics ---
 3 packets transmitted, 3 received, 0% packet loss, time 1999ms
 rtt min/avg/max/mdev = 0.326/0.423/0.565/0.102 ms
 ubuntu@veth_cn1:~$

Ping from host to containers

//ping to first container veth_cn1
[root@localhost ~]# ping 192.168.2.1
PING 192.168.2.1 (192.168.2.1) 56(84) bytes of data.
64 bytes from 192.168.2.1: icmp_seq=1 ttl=64 time=0.053 ms
64 bytes from 192.168.2.1: icmp_seq=2 ttl=64 time=0.036 ms
64 bytes from 192.168.2.1: icmp_seq=3 ttl=64 time=0.036 ms
^C
--- 192.168.2.1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.036/0.041/0.053/0.010 ms


//ping to second container veth_cn2
[root@localhost ~]# ping 192.168.2.2
PING 192.168.2.2 (192.168.2.2) 56(84) bytes of data.
64 bytes from 192.168.2.2: icmp_seq=1 ttl=64 time=0.082 ms
64 bytes from 192.168.2.2: icmp_seq=2 ttl=64 time=0.039 ms
64 bytes from 192.168.2.2: icmp_seq=3 ttl=64 time=0.037 ms
^C
--- 192.168.2.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.037/0.052/0.082/0.022 ms
[root@localhost ~]#

Ping from external system to container veth_cn1 and veth_cn2

//ping to first container veth_cn1
[root@localhost ~]# ping 192.168.2.1
 PING 192.168.2.1 (192.168.2.1) 56(84) bytes of data.
 64 bytes from 192.168.2.1: icmp_seq=1 ttl=64 time=0.417 ms
 64 bytes from 192.168.2.1: icmp_seq=2 ttl=64 time=0.327 ms
 ^C
 --- 192.168.2.1 ping statistics ---
 2 packets transmitted, 2 received, 0% packet loss, time 1000ms
 rtt min/avg/max/mdev = 0.327/0.372/0.417/0.045 ms
 
//ping to second container veth_cn2
[root@localhost ~]# ping 192.168.2.2
 PING 192.168.2.2 (192.168.2.2) 56(84) bytes of data.
 64 bytes from 192.168.2.2: icmp_seq=1 ttl=64 time=0.614 ms
 64 bytes from 192.168.2.2: icmp_seq=2 ttl=64 time=0.393 ms
 64 bytes from 192.168.2.2: icmp_seq=3 ttl=64 time=0.341 ms
 ^C
 --- 192.168.2.2 ping statistics ---
 3 packets transmitted, 3 received, 0% packet loss, time 2000ms
 rtt min/avg/max/mdev = 0.341/0.449/0.614/0.119 ms

lxc.network.type= macvlan

macvlan supports these three modes

  1. bridge
  2. VEPA (Virtual Ethernet Port Aggregator
  3. private

Let’s try these modes one by one

macvlan.mode = bridge

In macvlan bridge mode containers can communicate with each other, containers can communicate with external system and external system can communicate with containers. But containers do not have network access to host. In this mode you are creating logical network interfaces on host physical interface

Below topology diagram to test macvlan bridge mode. External system is a Linux server directly connected to host physical interface and configured in same subnet as containers.

linux_container_7

Create two containers

lxc-create -t ubuntu -n macvlan_cn1
lxc-create -t ubuntu -n macvlan_cn2

Edit configuration file for containers.

macvlan_br1 container file: /var/lib/lxc/macvlan_cn1/config. Note: I have updated fields in bold italic

# Network configuration
lxc.network.type = macvlan
lxc.network.macvlan.mode = bridge
lxc.network.hwaddr = 00:16:3e:56:95:80
lxc.network.flags = up
lxc.network.link = enp0s29f7u1
lxc.network.ipv4 = 192.168.2.1/24

macvlan_br2 container file:/var/lib/lxc/macvlan_cn2/config. Note: I have updated fields in bold italics

# Network configuration
lxc.network.type = macvlan
lxc.network.macvlan.mode = bridge
lxc.network.hwaddr = 00:16:3e:cb:f5:71
lxc.network.flags = up
lxc.network.link = enp0s29f7u1
lxc.network.ipv4 = 192.168.2.2/24

Start both containers

#lxc-start -n macvlan_cn1 -d
#lxc-start -n macvlan_cn2 -d

[root@localhost]# lxc-info -n macvlan_cn1
Name:           macvlan_cn1
State:          RUNNING
PID:            8140
IP:             192.168.2.1
CPU use:        0.20 seconds
BlkIO use:      0 bytes
Memory use:     3.95 MiB
KMem use:       0 bytes
Link:           enp0s29f7u1
 TX bytes:      1.11 KiB
 RX bytes:      98.12 KiB
 Total bytes:   99.23 KiB

[root@localhost]# lxc-info -n macvlan_cn2
Name:           macvlan_cn2
State:          RUNNING
PID:            8353
IP:             192.168.2.2
CPU use:        0.20 seconds
BlkIO use:      0 bytes
Memory use:     3.72 MiB
KMem use:       0 bytes
Link:           enp0s29f7u1
 TX bytes:      1.11 KiB
 RX bytes:      98.87 KiB
 Total bytes:   99.98 KiB

Login to first container macvlan_cn1 and perform ping  test.

[root@localhost ~]# lxc-console -n macvlan_cn1

ubuntu@macvlan_cn1:~$ ifconfig
eth0      Link encap:Ethernet  HWaddr 00:16:3e:56:95:80
          inet addr:192.168.2.1  Bcast:192.168.2.255  Mask:255.255.255.0
          inet6 addr: fe80::216:3eff:fe56:9580/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:12 errors:0 dropped:0 overruns:0 frame:0
          TX packets:21 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:4272 (4.2 KB)  TX bytes:5094 (5.0 KB)

lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

ubuntu@macvlan_cn1:~$ ip route
192.168.2.0/24 dev eth0  proto kernel  scope link  src 192.168.2.1

//ping to container 2 passed
ubuntu@macvlan_cn1:~$ ping 192.168.2.2
PING 192.168.2.2 (192.168.2.2) 56(84) bytes of data.
64 bytes from 192.168.2.2: icmp_req=1 ttl=64 time=0.086 ms
64 bytes from 192.168.2.2: icmp_req=2 ttl=64 time=0.038 ms
64 bytes from 192.168.2.2: icmp_req=3 ttl=64 time=0.038 ms
^C
--- 192.168.2.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.038/0.054/0.086/0.022 ms

Login to second container and perform ping test to first container

ubuntu@macvlan_cn2:~$ ifconfig
eth0      Link encap:Ethernet  HWaddr 00:16:3e:cb:f5:71
          inet addr:192.168.2.2  Bcast:192.168.2.255  Mask:255.255.255.0
          inet6 addr: fe80::216:3eff:fecb:f571/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:26 errors:0 dropped:0 overruns:0 frame:0
          TX packets:34 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:7868 (7.8 KB)  TX bytes:8208 (8.2 KB)

lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

//ping to first container ip passed
ubuntu@macvlan_cn2:~$ ping 192.168.2.1
PING 192.168.2.1 (192.168.2.1) 56(84) bytes of data.
64 bytes from 192.168.2.1: icmp_req=1 ttl=64 time=0.055 ms
64 bytes from 192.168.2.1: icmp_req=2 ttl=64 time=0.038 ms
64 bytes from 192.168.2.1: icmp_req=3 ttl=64 time=0.038 ms
64 bytes from 192.168.2.1: icmp_req=4 ttl=64 time=0.037 ms
^C
--- 192.168.2.1 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3000ms
rtt min/avg/max/mdev = 0.037/0.042/0.055/0.007 ms

ubuntu@macvlan_cn2:~$ ip route
192.168.2.0/24 dev eth0  proto kernel  scope link  src 192.168.2.2

Ping test from host to containers

//ping from host to container 1 failed
[root@localhost ~]# ping 192.168.2.1
PING 192.168.2.1 (192.168.2.1) 56(84) bytes of data.
^C
--- 192.168.2.1 ping statistics ---
3 packets transmitted, 0 received, 100% packet loss, time 1999ms

//ping from host to container 2 failed
[root@localhost ~]# ping 192.168.2.2
PING 192.168.2.2 (192.168.2.2) 56(84) bytes of data.
^C
--- 192.168.2.2 ping statistics ---
3 packets transmitted, 0 received, 100% packet loss, time 1999ms

Ping to external machine. Login to a container and perform ping test to external system IP address

//ping from second container to external system pass
ubuntu@macvlan_br2:~$ ping 192.168.2.101
PING 192.168.2.101 (192.168.2.101) 56(84) bytes of data.
64 bytes from 192.168.2.101: icmp_req=1 ttl=64 time=0.460 ms
64 bytes from 192.168.2.101: icmp_req=2 ttl=64 time=0.433 ms
64 bytes from 192.168.2.101: icmp_req=3 ttl=64 time=0.316 ms
^C
--- 192.168.2.101 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.316/0.403/0.460/0.062 ms
ubuntu@macvlan_br2:~$

//ping from first container to external system
ubuntu@macvlan_br1:~$ ping 192.168.2.101
PING 192.168.2.101 (192.168.2.101) 56(84) bytes of data.
64 bytes from 192.168.2.101: icmp_req=1 ttl=64 time=0.774 ms
64 bytes from 192.168.2.101: icmp_req=2 ttl=64 time=0.329 ms
64 bytes from 192.168.2.101: icmp_req=3 ttl=64 time=0.218 ms
^C
--- 192.168.2.101 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.218/0.440/0.774/0.240 ms
ubuntu@macvlan_br1:~$

tcpdump on external machine

[root@]# tcpdump -i enp0s29f7u1
tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
listening on enp0s29f7u1, link-type EN10MB (Ethernet), capture size 65535 bytes
23:50:13.477433 IP 0.0.0.0.bootpc > 255.255.255.255.bootps: BOOTP/DHCP, Request from 00:16:3e:56:95:80 (oui Unknown), length 300
23:50:14.071784 IP 192.168.2.1 > 192.168.2.101: ICMP echo request, id 306, seq 9, length 64
23:50:14.071806 IP 192.168.2.101 > 192.168.2.1: ICMP echo reply, id 306, seq 9, length 64
23:50:15.071807 IP 192.168.2.1 > 192.168.2.101: ICMP echo request, id 306, seq 10, length 64
23:50:15.071828 IP 192.168.2.101 > 192.168.2.1: ICMP echo reply, id 306, seq 10, length 64

macvlan.mode = vepa (virtual ethernet port aggregation)

In this mode containers have access to only external network. No access to host and other container network.

linux_container_8

Edit configuration file for our containers (macvlan_cn1 & macvlan_cn2)

macvlan_cn1 container file: /var/lib/lxc/macvlan_cn1/config

# Network configuration
lxc.network.type = macvlan
lxc.network.macvlan.mode = vepa
lxc.network.hwaddr = 00:16:3e:56:95:80
lxc.network.flags = up
lxc.network.link = enp0s29f7u1
lxc.network.ipv4 = 192.168.2.1/24

macvlan_cn2 container file:/var/lib/lxc/macvlan_cn2/config

# Network configuration
lxc.network.type = macvlan
lxc.network.macvlan.mode = vepa
lxc.network.hwaddr = 00:16:3e:cb:f5:71
lxc.network.flags = up
lxc.network.link = enp0s29f7u1
lxc.network.ipv4 = 192.168.2.2/24

Start both containers

#lxc-start -n macvlan_cn1 -d
#lxc-start -n macvlan_cn2 -d

[root@localhost ~]# lxc-info -n macvlan_cn1
Name:           macvlan_cn1
State:          RUNNING
PID:            19204
IP:             192.168.2.1
CPU use:        0.21 seconds
BlkIO use:      0 bytes
Memory use:     5.98 MiB
KMem use:       0 bytes
Link:           enp0s29f7u1
 TX bytes:      891.69 KiB
 RX bytes:      2.00 MiB
 Total bytes:   2.87 MiB
[root@localhost ~]#

[root@localhost ~]# lxc-info -n macvlan_cn2
Name:           macvlan_cn2
State:          RUNNING
PID:            19382
IP:             192.168.2.2
CPU use:        0.20 seconds
BlkIO use:      0 bytes
Memory use:     3.71 MiB
KMem use:       0 bytes
Link:           enp0s29f7u1
 TX bytes:      891.69 KiB
 RX bytes:      2.00 MiB
 Total bytes:   2.87 MiB
[root@localhost ~]#

Login to container and perform ping test from container macvlan_cn1 to macvlan_cn2. Login/Password ubuntu/ubuntu

As you see ping test between container failed

[root@localhost ~]# lxc-console -n macvlan_cn1

ubuntu@macvlan_cn1:~$ ifconfig
eth0      Link encap:Ethernet  HWaddr 00:16:3e:56:95:80
          inet addr:192.168.2.1  Bcast:192.168.2.255  Mask:255.255.255.0
          inet6 addr: fe80::216:3eff:fe56:9580/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:21 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:0 (0.0 B)  TX bytes:5094 (5.0 KB)

lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)

ubuntu@macvlan_cn1:~$ ip route
192.168.2.0/24 dev eth0  proto kernel  scope link  src 192.168.2.1

//ping from container 1 to container 2 failed
ubuntu@macvlan_cn1:~$ ping 192.168.2.2
PING 192.168.2.2 (192.168.2.2) 56(84) bytes of data.
From 192.168.2.1 icmp_seq=1 Destination Host Unreachable
From 192.168.2.1 icmp_seq=2 Destination Host Unreachable
From 192.168.2.1 icmp_seq=3 Destination Host Unreachable
From 192.168.2.1 icmp_seq=4 Destination Host Unreachable
^C
--- 192.168.2.2 ping statistics ---
5 packets transmitted, 0 received, +4 errors, 100% packet loss, time 4001ms
pipe 4

Perform ping test from container macvlan_cn2 to macvlan_cn1. As you can see ping test failed

ubuntu@macvlan_cn2:~$ ifconfig
eth0      Link encap:Ethernet  HWaddr 00:16:3e:cb:f5:71
          inet addr:192.168.2.2  Bcast:192.168.2.255  Mask:255.255.255.0
          inet6 addr: fe80::216:3eff:fecb:f571/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:32 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:0 (0.0 B)  TX bytes:8856 (8.8 KB)

lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:16 errors:0 dropped:0 overruns:0 frame:0
          TX packets:16 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1
          RX bytes:1184 (1.1 KB)  TX bytes:1184 (1.1 KB)

ubuntu@macvlan_cn2:~$ ip route
192.168.2.0/24 dev eth0  proto kernel  scope link  src 192.168.2.2

//ping test from container 2 to container 1 failed
ubuntu@macvlan_cn2:~$ ping 192.168.2.1
PING 192.168.2.1 (192.168.2.1) 56(84) bytes of data.
From 192.168.2.2 icmp_seq=1 Destination Host Unreachable
From 192.168.2.2 icmp_seq=2 Destination Host Unreachable
From 192.168.2.2 icmp_seq=3 Destination Host Unreachable
From 192.168.2.2 icmp_seq=4 Destination Host Unreachable
^C
--- 192.168.2.1 ping statistics ---
4 packets transmitted, 0 received, +4 errors, 100% packet loss, time 3000ms
pipe 4

Ping from container to external system.

As you can see ping test pass

[root@localhost ~]# lxc-console -n macvlan_cn2

Connected to tty 1
Type <Ctrl+a q> to exit the console, <Ctrl+a Ctrl+a> to enter Ctrl+a itself

//ping to external system passed
ubuntu@macvlan_cn2:~$ ping 192.168.2.101
PING 192.168.2.101 (192.168.2.101) 56(84) bytes of data.
64 bytes from 192.168.2.101: icmp_req=1 ttl=64 time=0.373 ms
64 bytes from 192.168.2.101: icmp_req=2 ttl=64 time=0.271 ms
64 bytes from 192.168.2.101: icmp_req=3 ttl=64 time=0.384 ms
64 bytes from 192.168.2.101: icmp_req=4 ttl=64 time=0.265 ms
64 bytes from 192.168.2.101: icmp_req=5 ttl=64 time=0.400 ms
64 bytes from 192.168.2.101: icmp_req=6 ttl=64 time=0.398 ms
64 bytes from 192.168.2.101: icmp_req=7 ttl=64 time=0.347 ms
64 bytes from 192.168.2.101: icmp_req=8 ttl=64 time=0.381 ms
^C
--- 192.168.2.101 ping statistics ---
8 packets transmitted, 8 received, 0% packet loss, time 6999ms
rtt min/avg/max/mdev = 0.265/0.352/0.400/0.053 ms

Ping from container to host. As you can see ping failed

[root@localhost macvlan_cn1]# ifconfig enp0s29f7u1
enp0s29f7u1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.2.100  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 00:50:b6:19:41:65  txqueuelen 1000  (Ethernet)
        RX packets 9392  bytes 932640 (910.7 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 11896  bytes 2158529 (2.0 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

//ping from container to host failed
ubuntu@macvlan_cn2:~$ ping 192.168.2.100
PING 192.168.2.100 (192.168.2.100) 56(84) bytes of data.
From 192.168.2.2 icmp_seq=1 Destination Host Unreachable
From 192.168.2.2 icmp_seq=2 Destination Host Unreachable
From 192.168.2.2 icmp_seq=3 Destination Host Unreachable
From 192.168.2.2 icmp_seq=4 Destination Host Unreachable
^C
--- 192.168.2.100 ping statistics ---
4 packets transmitted, 0 received, +4 errors, 100% packet loss, time 3000ms
pipe 4

 

Save

Lab-36: Linux containers (lxc)

Last three labs Lab-33,Lab-34 & Lab-35 talk about Docker container. As said Docker container in the background uses Linux container. So what is Linux containers?. In this lab I will give a brief introduction of Linux container. How to spin Linux container etc

More reading on Linux containers here


Prerequisite:

I am using Linux Centos 7.2 for this lab

My machine specification


//OS release
[root@localhost ~]# cat /etc/*release
CentOS Linux release 7.3.1611 (Core)
NAME="CentOS Linux"
VERSION="7 (Core)"
ID="centos"
ID_LIKE="rhel fedora"
VERSION_ID="7"
PRETTY_NAME="CentOS Linux 7 (Core)"
ANSI_COLOR="0;31"
CPE_NAME="cpe:/o:centos:centos:7"
HOME_URL="https://www.centos.org/"
BUG_REPORT_URL="https://bugs.centos.org/"

CENTOS_MANTISBT_PROJECT="CentOS-7"
CENTOS_MANTISBT_PROJECT_VERSION="7"

REDHAT_SUPPORT_PRODUCT="centos"
REDHAT_SUPPORT_PRODUCT_VERSION="7"

CentOS Linux release 7.3.1611 (Core)
CentOS Linux release 7.3.1611 (Core)
[root@localhost ~]#

//Check kernel version
[root@localhost ~]# uname -r
3.10.0-514.2.2.el7.x86_64
[root@localhost ~]#

Install Linux container (lxc) and dependencies


$yum update -y
$yum install epel-release

//Install dependencies
$yum install debootstrap perl libvirt bridge-utils
$yum install lxc lxc-templates

//start service
$systemctl start lxc.service
$systemctl start libvirtd 

Check Linux container service status


[root@localhost ~]# systemctl status lxc.service
● lxc.service - LXC Container Initialization and Autoboot Code
   Loaded: loaded (/usr/lib/systemd/system/lxc.service; disabled; vendor preset: disabled)
   Active: active (exited) since Fri 2017-01-27 18:05:26 EST; 16h ago
  Process: 4452 ExecStart=/usr/libexec/lxc/lxc-autostart-helper start (code=exited, status=0/SUCCESS)
  Process: 4444 ExecStartPre=/usr/libexec/lxc/lxc-devsetup (code=exited, status=0/SUCCESS)
 Main PID: 4452 (code=exited, status=0/SUCCESS)

Jan 27 18:05:26 localhost.localdomain systemd[1]: Starting LXC Container Initialization and Autoboot Code...
Jan 27 18:05:26 localhost.localdomain lxc-devsetup[4444]: Creating /dev/.lxc
Jan 27 18:05:26 localhost.localdomain lxc-devsetup[4444]: /dev is devtmpfs
Jan 27 18:05:26 localhost.localdomain lxc-devsetup[4444]: Creating /dev/.lxc/user
Jan 27 18:05:26 localhost.localdomain lxc-autostart-helper[4452]: Starting LXC autoboot containers:  [  OK  ]
Jan 27 18:05:26 localhost.localdomain systemd[1]: Started LXC Container Initialization and Autoboot Code.

Check lxc configuration. Important  things to note here are:

  1. Namespaces are enabled
  2. Veth pair device: enabled. We will sing default veth network type
  3. Control groups are enabled

//Check lxc configuration
[root@localhost ~]# lxc-checkconfig
Kernel configuration not found at /proc/config.gz; searching...
Kernel configuration found at /boot/config-3.10.0-514.2.2.el7.x86_64
--- Namespaces ---
Namespaces: enabled
Utsname namespace: enabled
Ipc namespace: enabled
Pid namespace: enabled
User namespace: enabled
Network namespace: enabled
Multiple /dev/pts instances: enabled

--- Control groups ---
Cgroup: enabled
Cgroup clone_children flag: enabled
Cgroup device: enabled
Cgroup sched: enabled
Cgroup cpu account: enabled
Cgroup memory controller: enabled
Cgroup cpuset: enabled

--- Misc ---
Veth pair device: enabled
Macvlan: enabled
Vlan: enabled
Bridges: enabled
Advanced netfilter: enabled
CONFIG_NF_NAT_IPV4: enabled
CONFIG_NF_NAT_IPV6: enabled
CONFIG_IP_NF_TARGET_MASQUERADE: enabled
CONFIG_IP6_NF_TARGET_MASQUERADE: enabled
CONFIG_NETFILTER_XT_TARGET_CHECKSUM: enabled

--- Checkpoint/Restore ---
checkpoint restore: enabled
CONFIG_FHANDLE: enabled
CONFIG_EVENTFD: enabled
CONFIG_EPOLL: enabled
CONFIG_UNIX_DIAG: enabled
CONFIG_INET_DIAG: enabled
CONFIG_PACKET_DIAG: enabled
CONFIG_NETLINK_DIAG: enabled
File capabilities: enabled

Note : Before booting a new kernel, you can check its configuration
usage : CONFIG=/path/to/config /bin/lxc-checkconfig

[root@localhost ~]#

Procedure:

Create container. Let’s create a container with Ubuntu system

lxc-create  -t ubuntu -n ubuntu_container

-n: Name of the container

-t:Container template

lxc comes with ready made templates for various systems. You can check templates on your system here


[root@localhost]# ls /usr/share/lxc/templates
lxc-alpine    lxc-archlinux  lxc-centos  lxc-debian    lxc-fedora  lxc-openmandriva  lxc-oracle  lxc-sshd    lxc-ubuntu-cloud
lxc-altlinux  lxc-busybox    lxc-cirros  lxc-download  lxc-gentoo  lxc-opensuse      lxc-plamo   lxc-ubuntu
//create container using ubuntu system. Container user/password ubuntu/ubuntu
[root@localhost ~]# lxc-create -t ubuntu -n ubuntu_container

<truncated>
##
# The default user is 'ubuntu' with password 'ubuntu'!
# Use the 'sudo' command to run tasks as root in the container.
##

Check container status using lxc-status


[root@localhost ~]# lxc-info --name ubuntu_container
Name:           ubuntu_container
State:          STOPPED
[root@localhost ~]#

Start the container.

lxc-start -n <container name> -d

-d: for running container in background

Make sure Linux bridge virbr0 created if not create the bridge because by default Linux container uses Linux bridge virbr0. Check network setting for the container we created in file /var/lib/lxc/ubuntu_container/conf. This is what my container network setting looks like

lxc.network.type = veth
lxc.network.link = virbr0
lxc.network.flags = up

lxc.network.type: lxc supports these network types:

  1. veth – virtual ethernet network interface created inside container and other side of this interface attached to bridge specified in lxc.network.link
  2. empty – In this case only loopback interface created inside container
  3. vlan – a vlan interface is linked with the interface specified bythe lxc.network.link and assigned to the container. The vlan identifier is specified with the option lxc.network.vlan.id
  4. phy – assign a physical interface to container
  5. macvlan – mac address based vlan

In this lab I am using default network type veth.

Note:If you like to change the default network behavior you can do that by changing default file /etc/lxc/default.conf


[root@localhost lxc]# cat /etc/lxc/default.conf
lxc.network.type = veth
lxc.network.link = virbr0
lxc.network.flags = up

[root@localhost lxc]# ifconfig virbr0
virbr0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.0.10  netmask 255.255.255.0  broadcast 192.168.0.255
        inet6 fe80::a8e8:3fff:fe5d:c8f0  prefixlen 64  scopeid 0x20<link>
        ether fe:e7:4b:69:2c:0f  txqueuelen 1000  (Ethernet)
        RX packets 2376  bytes 775152 (756.9 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 56  bytes 7486 (7.3 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

//start lxc container -d mean run in the background
[root@localhost ~]# lxc-start -n ubuntu_container -d
[root@localhost ~]# lxc-info -n ubuntu_container
Name:           ubuntu_container
State:          RUNNING
PID:            5317
CPU use:        0.21 seconds
BlkIO use:      0 bytes
Memory use:     3.75 MiB
KMem use:       0 bytes
Link:           vethXMQ8QW
 TX bytes:      1.57 KiB
 RX bytes:      578 bytes
 Total bytes:   2.13 KiB
[root@localhost ~]#

//container network interface vethXMQ8QW attached to Linux bridge virbr0 
[root@localhost ~]# sudo brctl show
bridge name     bridge id               STP enabled     interfaces
virbr0          8000.fee74b692c0f       no              vethXMQ8QW

[root@localhost ~]# ifconfig vethXMQ8QW
vethXMQ8QW: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet6 fe80::fce7:4bff:fe69:2c0f  prefixlen 64  scopeid 0x20<link>
        ether fe:e7:4b:69:2c:0f  txqueuelen 1000  (Ethernet)
        RX packets 16  bytes 3384 (3.3 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 8  bytes 648 (648.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@localhost lxc]# ps -ef | grep ubuntu_container
root      5307     1  0 Jan27 ?        00:00:00 lxc-start -n ubuntu_container -d
root      7859  7011  0 10:40 pts/6    00:00:00 grep --color=auto ubuntu_container
[root@localhost lxc]#

lxc creates configuration for each container. Let’s check configuration file of our container (ubuntu_container)


[root@localhost]# cat /var/lib/lxc/ubuntu_container/config
# Template used to create this container: /usr/share/lxc/templates/lxc-ubuntu
# Parameters passed to the template:
# For additional config options, please look at lxc.container.conf(5)

# Common configuration
lxc.include = /usr/share/lxc/config/ubuntu.common.conf

# Container specific configuration
lxc.rootfs = /var/lib/lxc/ubuntu_container/rootfs
lxc.mount = /var/lib/lxc/ubuntu_container/fstab
lxc.utsname = ubuntu_container
lxc.arch = amd64

# Network configuration
lxc.network.type = veth
lxc.network.hwaddr = 00:16:3e:9b:3c:8d
lxc.network.flags = up
lxc.network.link = virbr0
[root@localhost ubuntu_container]#

You can attach to your container and run commands. This is a quick way to check your container

lxc-attach -n <container name> [<command>]


[root@localhost]# lxc-attach -n ubuntu_container /sbin/ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
16: eth0@if17: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP qlen 1000
    link/ether 00:16:3e:9b:3c:8d brd ff:ff:ff:ff:ff:f
       valid_lft forever preferred_lft forever
    inet6 fe80::216:3eff:fe9b:3c8d/64 scope link
       valid_lft forever preferred_lft forever
[root@localhost ubuntu_container]#

Connect to container using lxc-console command, user/password ubuntu/ubuntu. To terminate the session try ctrl+a, q

lxc-console -n ubuntu_container


[root@localhost ~]# lxc-console -n ubuntu_container

Connected to tty 1
Type <Ctrl+a q> to exit the console, <Ctrl+a Ctrl+a> to enter Ctrl+a itself


ubuntu@ubuntu_container:~$ ifconfig
eth0      Link encap:Ethernet  HWaddr 00:16:3e:9b:3c:8d
          inet6 addr: fe80::216:3eff:fe9b:3c8d/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:8 errors:0 dropped:0 overruns:0 frame:0
          TX packets:9492 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:648 (648.0 B)  TX bytes:3242376 (3.2 MB)

lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:25 errors:0 dropped:0 overruns:0 frame:0
          TX packets:25 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1
          RX bytes:2080 (2.0 KB)  TX bytes:2080 (2.0 KB)

Freeze and unfreeze to stop and start process inside container


[root@localhost ~]# lxc-info -n ubuntu_container
Name:           ubuntu_container
State:          RUNNING
PID:            5317
CPU use:        21.22 seconds
BlkIO use:      171.33 MiB
Memory use:     91.39 MiB
KMem use:       0 bytes
Link:           vethXMQ8QW
 TX bytes:      3.12 MiB
 RX bytes:      648 bytes
 Total bytes:   3.12 MiB

[root@localhost ~]# lxc-freeze -n ubuntu_container
[root@localhost ~]# lxc-info -n ubuntu_container
Name:           ubuntu_container
State:          FROZEN
PID:            5317
CPU use:        21.22 seconds
BlkIO use:      171.33 MiB
Memory use:     91.39 MiB
KMem use:       0 bytes
Link:           vethXMQ8QW
 TX bytes:      3.12 MiB
 RX bytes:      648 bytes
 Total bytes:   3.12 MiB

[root@localhost ~]# lxc-unfreeze -n ubuntu_container
[root@localhost ~]# lxc-info -n ubuntu_container
Name:           ubuntu_container
State:          RUNNING
PID:            5317
CPU use:        21.22 seconds
BlkIO use:      171.33 MiB
Memory use:     91.39 MiB
KMem use:       0 bytes
Link:           vethXMQ8QW
 TX bytes:      3.12 MiB
 RX bytes:      648 bytes
 Total bytes:   3.12 MiB

Delete container using lxc-destroy. You must stop running container before you issue delete command

lxc-stop -n <container name>
lxc-destroy -n <container name>


[root@localhost ~]# lxc-info -n ubuntu_container
Name:           ubuntu_container
State:          RUNNING
PID:            5317
CPU use:        21.24 seconds
BlkIO use:      171.33 MiB
Memory use:     91.39 MiB
KMem use:       0 bytes
Link:           vethXMQ8QW
 TX bytes:      3.12 MiB
 RX bytes:      648 bytes
 Total bytes:   3.12 MiB

[root@localhost ~]# lxc-stop -n ubuntu_container
[root@localhost ~]# lxc-info -n ubuntu_container
Name:           ubuntu_container
State:          STOPPED

[root@localhost ~]# lxc-destroy -n ubuntu_container
[root@localhost ~]# lxc-info -n ubuntu_container
ubuntu_container doesn't exist
[root@localhost ~]#

//container interface removed from bridge
[root@localhost ~]# sudo brctl show
bridge name     bridge id               STP enabled     interfaces
virbr0          8000.000000000000       no
[root@localhost ~]#

 

 

 

 

 

Lab-35: Docker Swarm

Docker swarm is cluster management and orchestration tool for Docker container. Docker engines participating in a cluster are running in swarm mode. You enable swarm mode for an engine by either initializing a swarm or joining an existing swarm.

A swarm is a cluster of Docker engines, or nodes, where you deploy services. The Docker Engine CLI and API include commands to manage swarm nodes (e.g., add or remove nodes), and deploy and orchestrate services across the swarm.

When you run Docker without using swarm mode, you execute container commands. When you run the Docker in swarm mode, you orchestrate services. You can run swarm services and standalone containers on the same Docker instances.

Read more about Docker swarm here

Docker Swarm terminology

Node - A node is an instance of the Docker engine participating in the swarm. You
can nodes in one physical machine or distributed across multiple machine
Manager node - A Manager node manages services on a cluster
Worker node - A worker node executes task dispatched by manager node

Prerequisite

To use swarm you need Docker version v1.12 or higher. Install docker on all nodes, follow instructions below:

Login as a user with sudo permission
$su - divine

$ uname -r   //check kernel version it should be 3.10 or higher
3.10.0-229.el7.x86_64

$ sudo yum update

run Docker installation script
$ curl -fsSL https://get.docker.com/ | sh

Logout and log back in
$logout
$su - divine

Enable the service
$ sudo systemctl enable docker.service
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service

Start Docker daemon
$ sudo systemctl start docker

check if Docker is running
$docker -v
Docker version 1.12.6, build 78d1802

Create Docker group
$ sudo groupadd docker

Add user to Docker group
$sudo usermod -aG docker $(whoami)

Logout and log back in
$logout
$su - divine

[divine@localhost ~]$ docker --version
Docker version 1.12.6, build 78d1802
[divine@localhost ~]$

In this lab I have three physical machines, one machine acting as Manager and two as Worker. Make sure there is IP connectivity among nodes (perform ping test)
Manager= 192.254.211.167
Worker_1= 192.254.211.166
Worker_1= 192.254.211.168

Procedure

  1. Create swarm on machine acting as Manager
[divine@Manager ~]$ docker swarm init --advertise-addr 192.254.211.167
Swarm initialized: current node (9ra9h1uopginqno78zi8rq9ug) is now a manager.

To add a worker to this swarm, run the following command:

    docker swarm join \
    --token SWMTKN-1-4ok62ytnftqgeyvzlz82zlbuzmbx3soqhyfjsoiylsfx7o3tnh-8b5cyqbg6ukpl9oxap0ntrl7t \
    192.254.211.167:2377

Swarm process is running and listening on port 2377

[divine@Manager ~]$ netstat -pan | grep :2377
(No info could be read for "-p": geteuid()=1002 but you should be root.)
tcp 0 0 192.254.211.167:40892 192.254.211.167:2377 ESTABLISHED -
tcp 0 0 127.0.0.1:53116 127.0.0.1:2377 ESTABLISHED -
tcp6 0 0 :::2377 :::* LISTEN -
tcp6 0 0 192.254.211.167:2377 192.254.211.167:40892 ESTABLISHED -
tcp6 0 0 127.0.0.1:2377 127.0.0.1:53116 ESTABLISHED -
[divine@localhost ~]$

[divine@Manager ~]$ docker node ls
ID                           HOSTNAME               STATUS  AVAILABILITY  MANAGER STATUS
9ra9h1uopginqno78zi8rq9ug *  Manager                 Ready   Active        Leader
[divine@Manager ~]$

2. Add worker_1 to swarm

//run this command to get worker join command
[divine@Manager ~]$ docker swarm join-token worker
To add a worker to this swarm, run the following command:

    docker swarm join \
    --token SWMTKN-1-4ok62ytnftqgeyvzlz82zlbuzmbx3soqhyfjsoiylsfx7o3tnh-8b5cyqbg6ukpl9oxap0ntrl7t \
    192.254.211.167:2377

//run out of  above command on worker node 
[divine@Worker_1 ~]$ docker swarm join \
>     --token SWMTKN-1-4ok62ytnftqgeyvzlz82zlbuzmbx3soqhyfjsoiylsfx7o3tnh-8b5cyqbg6ukpl9oxap0ntrl7t \
>     192.254.211.167:2377
This node joined a swarm as a worker.

3.Add worker_2 to swarm using same command. Run same command on Worker_2

[divine@Worker_2 ~]$ docker swarm join \
>     --token SWMTKN-1-4ok62ytnftqgeyvzlz82zlbuzmbx3soqhyfjsoiylsfx7o3tnh-8b5cyqbg6ukpl9oxap0ntrl7t \
>     192.254.211.167:2377
This node joined a swarm as a worker.

Note:All nodes date/time needs to be in-sync otherwise worker join will fail

4. Check node status in manager node. Run these commands on manager node

MANAGER STATUS:<empty> – worker node, leader – primary manager

AVAILABILITY: active – node is active scheduler can assign task, pause – don’t assign new task but old task remain , drain – shutdown task and don’t assign new task

[divine@Manager ~]$ docker node ls
ID                           HOSTNAME                     STATUS  AVAILABILITY  MANAGER STATUS
0pye1jeusjpyypcesrjpwsdgg    Worker_1                      Ready   Active
9iz0pjzudzvv2ujqyh0dgzhfg    Worker_2                      Ready   Active
9ra9h1uopginqno78zi8rq9ug *  Manager                       Ready   Active        Leader
[divine@localhost ~]$

//inspect manager node
[divine@Manager ~]$ docker node inspect self
[
    {
        "ID": "9ra9h1uopginqno78zi8rq9ug",
        "Version": {
            "Index": 10
        },
        "CreatedAt": "2017-01-19T23:12:49.552590718Z",
        "UpdatedAt": "2017-01-19T23:12:49.812466748Z",
        "Spec": {
            "Role": "manager",
            "Availability": "active"
        },
        "Description": {
            "Hostname": "Manager",
            "Platform": {
                "Architecture": "x86_64",
                "OS": "linux"
            },
            "Resources": {
                "NanoCPUs": 4000000000,
                "MemoryBytes": 12412542976
            },
            "Engine": {
                "EngineVersion": "1.12.6",
                "Plugins": [
                    {
                        "Type": "Network",
                        "Name": "bridge"
                    },
                    {
                        "Type": "Network",
                        "Name": "host"
                    },
                    {
                        "Type": "Network",
                        "Name": "null"
                    },
                    {
                        "Type": "Network",
                        "Name": "overlay"
                    },
                    {
                        "Type": "Volume",
                        "Name": "local"
                    }
                ]
            }
        },
        "Status": {
            "State": "ready"
        },
        "ManagerStatus": {
            "Leader": true,
            "Reachability": "reachable",
            "Addr": "192.254.211.167:2377"
        }
    }
]
[divine@Manager ~]$

5. Inspect worker node. Run below command on manager node

[divine@Manager ~]$ docker node inspect Worker_2
[
    {
        "ID": "9iz0pjzudzvv2ujqyh0dgzhfg",
        "Version": {
            "Index": 16
        },
        "CreatedAt": "2017-01-19T23:19:56.795050481Z",
        "UpdatedAt": "2017-01-19T23:19:56.954890419Z",
        "Spec": {
            "Role": "worker",
            "Availability": "active"
        },
        "Description": {
            "Hostname": "Worker_2",
            "Platform": {
                "Architecture": "x86_64",
                "OS": "linux"
            },
            "Resources": {
                "NanoCPUs": 8000000000,
                "MemoryBytes": 12412542976
            },
            "Engine": {
                "EngineVersion": "1.13.0",
                "Plugins": [
                    {
                        "Type": "Network",
                        "Name": "bridge"
                    },
                    {
                        "Type": "Network",
                        "Name": "host"
                    },
                    {
                        "Type": "Network",
                        "Name": "macvlan"
                    },
                    {
                        "Type": "Network",
                        "Name": "null"
                    },
                    {
                        "Type": "Network",
                        "Name": "overlay"
                    },
                    {
                        "Type": "Volume",
                        "Name": "local"
                    }
                ]
            }
        },
        "Status": {
            "State": "ready"
        }
    }
]

[divine@Manager ~]$ docker node inspect Worker_1
[
    {
        "ID": "0pye1jeusjpyypcesrjpwsdgg",
        "Version": {
            "Index": 21
        },
        "CreatedAt": "2017-01-19T23:32:47.763436319Z",
        "UpdatedAt": "2017-01-19T23:32:47.916593695Z",
        "Spec": {
            "Role": "worker",
            "Availability": "active"
        },
        "Description": {
            "Hostname": "Worker_1",
            "Platform": {
                "Architecture": "x86_64",
                "OS": "linux"
            },
            "Resources": {
                "NanoCPUs": 4000000000,
                "MemoryBytes": 12412542976
            },
            "Engine": {
                "EngineVersion": "1.13.0",
                "Plugins": [
                    {
                        "Type": "Network",
                        "Name": "bridge"
                    },
                    {
                        "Type": "Network",
                        "Name": "host"
                    },
                    {
                        "Type": "Network",
                        "Name": "macvlan"
                    },
                    {
                        "Type": "Network",
                        "Name": "null"
                    },
                    {
                        "Type": "Network",
                        "Name": "overlay"
                    },
                    {
                        "Type": "Volume",
                        "Name": "local"
                    }
                ]
            }
        },
        "Status": {
            "State": "ready"
        }
    }
]
[divine@Manager ~]$

6.Start a service in swarm. You can create service either by pulling image from docker hub or local image. In case of local image you need to manually load image on all worker and manager node.

//no service active on swarm
[divine@Manager ~]$ docker service ls
ID  NAME  REPLICAS  IMAGE  COMMAND

//run this command on manager node. This will create 20 containers in cluster
named http_server using image nginx. Docker will pull this image from Docker
hub and create 20 containers. As you can see containers are equally distributed on
3 nodes by scheduler
[divine@Manager ~]$ docker service create --replicas 20 --name httpd_server nginx
brscnmwefdbxopz6hk2nnxebc
[divine@Manager ~]$ docker service ls
ID            NAME          REPLICAS  IMAGE  COMMAND
brscnmwefdbx  httpd_server  0/20      nginx
[divine@Manager ~]$ docker service ls
ID            NAME          REPLICAS  IMAGE  COMMAND
brscnmwefdbx  httpd_server  0/20      nginx
[divine@Manager ~]$ docker service ls
ID            NAME          REPLICAS  IMAGE  COMMAND
brscnmwefdbx  httpd_server  0/20      nginx
[divine@Manager ~]$ docker service ps httpd_server
ID                         NAME             IMAGE  NODE                         DESIRED STATE  CURRENT STATE                     ERROR
etg7ng9e738kde0cffknfaymx  httpd_server.1   nginx  Worker_2        Running        Preparing less than a second ago
d7o4irupasdflk9dqq955rjil  httpd_server.2   nginx  Manager        Running        Preparing 26 seconds ago
a0px1ll6rla2nfjfenfro3ax1  httpd_server.3   nginx  Worker_2        Running        Preparing less than a second ago
5lx61fb15p2otfmnz0azyk4c1  httpd_server.4   nginx  Worker_2        Running        Preparing less than a second ago
buvz6ndhjb3o7mdbua9y4st3l  httpd_server.5   nginx  Manager        Running        Preparing 26 seconds ago
5zdum3ef3qo2vakppwnjt9t6n  httpd_server.6   nginx  Worker_2        Running        Preparing less than a second ago
7pt4s3fl9z41hhcqwlo2llefa  httpd_server.7   nginx  Worker_1  Running        Running less than a second ago
c5jt346vkr5rcwkk72hfc3if0  httpd_server.8   nginx  Manager        Running        Preparing 26 seconds ago
6wr6r9zz0hfy1zc4lg49i108m  httpd_server.9   nginx  Worker_1  Running        Running less than a second ago
cmzrng2t23udi4i71k0vhf0jl  httpd_server.10  nginx  Worker_2        Running        Preparing less than a second ago
1zagdr2zbwvaz5sd7tnrlucks  httpd_server.11  nginx  Worker_1  Running        Running less than a second ago
dbap94coizealad8clzi1d779  httpd_server.12  nginx  Worker_2        Running        Preparing less than a second ago
57hvlhnd942fnsr6b2wvhp0al  httpd_server.13  nginx  Worker_1  Running        Running less than a second ago
51ic2pk2eoq9hqvx2kp2u60n1  httpd_server.14  nginx  Worker_1  Running        Running less than a second ago
3myxunl1h2dk14zne2hci83av  httpd_server.15  nginx  Worker_1  Running        Running less than a second ago
b7smowk7getaxnc3xilty2vez  httpd_server.16  nginx  Worker_2        Running        Preparing less than a second ago
dlmagnn74o3mqpo61fb9cchtq  httpd_server.17  nginx  Manager        Running        Preparing 26 seconds ago
0t3j2u3wkl4ym3w66rvplgn65  httpd_server.18  nginx  Worker_1  Running        Running less than a second ago
0smn62ybwbdlrwtysfxax3yyl  httpd_server.19  nginx  Manager        Running        Preparing 26 seconds ago
6hk3ge3t5xhyemd70vyw6ymvk  httpd_server.20  nginx  Manager        Running        Preparing 26 seconds ago

[divine@Manager ~]$ docker service ls
ID            NAME          REPLICAS  IMAGE  COMMAND
brscnmwefdbx  httpd_server  16/20     nginx
[divine@Manager ~]$ docker service ls
ID            NAME          REPLICAS  IMAGE  COMMAND
brscnmwefdbx  httpd_server  20/20     nginx
[divine@Manager ~]$

7.Check containers in worker node. Run below commands on worker node

//as you can see this worker node instantiated 7 containers. 
[divine@Worker_1 ~]$ docker ps
CONTAINER ID        IMAGE               COMMAND                  CREATED              STATUS              PORTS               NAMES
0e7c1a499cac        nginx:latest        "nginx -g 'daemon ..."   About a minute ago   Up About a minute   80/tcp, 443/tcp     httpd_server.15.3myxunl1h2dk14zne2hci83av
b2d474cd78f3        nginx:latest        "nginx -g 'daemon ..."   About a minute ago   Up About a minute   80/tcp, 443/tcp     httpd_server.9.6wr6r9zz0hfy1zc4lg49i108m
b4e9323c2303        nginx:latest        "nginx -g 'daemon ..."   About a minute ago   Up About a minute   80/tcp, 443/tcp     httpd_server.7.7pt4s3fl9z41hhcqwlo2llefa
fa2b01136a2d        nginx:latest        "nginx -g 'daemon ..."   About a minute ago   Up About a minute   80/tcp, 443/tcp     httpd_server.11.1zagdr2zbwvaz5sd7tnrlucks
0a1dfde8ba0f        nginx:latest        "nginx -g 'daemon ..."   About a minute ago   Up About a minute   80/tcp, 443/tcp     httpd_server.18.0t3j2u3wkl4ym3w66rvplgn65
0f33c50d86aa        nginx:latest        "nginx -g 'daemon ..."   About a minute ago   Up About a minute   80/tcp, 443/tcp     httpd_server.14.51ic2pk2eoq9hqvx2kp2u60n1
9c66bcd8e981        nginx:latest        "nginx -g 'daemon ..."   About a minute ago   Up About a minute   80/tcp, 443/tcp     httpd_server.13.57hvlhnd942fnsr6b2wvhp0al

[divine@Worker_1 ~]$ docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
nginx               latest              a39777a1a4a6        2 days ago          182 MB
[divine@Worker_1 ~]$

8. You can scale up or down the service. Run below commands on manager node

//here I am reducing number of containers from 20 to 10
[divine@Manager ~]$ docker service scale httpd_server=10
httpd_server scaled to 10
[divine@Manager ~]$ docker service ls
ID            NAME          REPLICAS  IMAGE  COMMAND
brscnmwefdbx  httpd_server  10/10     nginx
//10 services moved from running to shutdown
[divine@Manager ~]$ docker service ps httpd_server
ID                         NAME             IMAGE  NODE                         DESIRED STATE  CURRENT STATE                    ERROR
etg7ng9e738kde0cffknfaymx  httpd_server.1   nginx  Worker_2        Shutdown       Shutdown less than a second ago
d7o4irupasdflk9dqq955rjil  httpd_server.2   nginx  Manager        Shutdown       Shutdown 18 seconds ago
a0px1ll6rla2nfjfenfro3ax1  httpd_server.3   nginx  Worker_2        Running        Running 47 seconds ago
5lx61fb15p2otfmnz0azyk4c1  httpd_server.4   nginx  Worker_2        Running        Running 41 seconds ago
buvz6ndhjb3o7mdbua9y4st3l  httpd_server.5   nginx  Manager        Running        Running 4 minutes ago
5zdum3ef3qo2vakppwnjt9t6n  httpd_server.6   nginx  Worker_2        Running        Running 41 seconds ago
7pt4s3fl9z41hhcqwlo2llefa  httpd_server.7   nginx  Worker_1  Shutdown       Shutdown less than a second ago
c5jt346vkr5rcwkk72hfc3if0  httpd_server.8   nginx  Manager        Running        Running 4 minutes ago
6wr6r9zz0hfy1zc4lg49i108m  httpd_server.9   nginx  Worker_1  Shutdown       Shutdown less than a second ago
cmzrng2t23udi4i71k0vhf0jl  httpd_server.10  nginx  Worker_2        Shutdown       Shutdown less than a second ago
1zagdr2zbwvaz5sd7tnrlucks  httpd_server.11  nginx  Worker_1  Running        Running about a minute ago
dbap94coizealad8clzi1d779  httpd_server.12  nginx  Worker_2        Shutdown       Shutdown less than a second ago
57hvlhnd942fnsr6b2wvhp0al  httpd_server.13  nginx  Worker_1  Shutdown       Shutdown less than a second ago
51ic2pk2eoq9hqvx2kp2u60n1  httpd_server.14  nginx  Worker_1  Shutdown       Shutdown less than a second ago
3myxunl1h2dk14zne2hci83av  httpd_server.15  nginx  Worker_1  Running        Running about a minute ago
b7smowk7getaxnc3xilty2vez  httpd_server.16  nginx  Worker_2        Shutdown       Shutdown less than a second ago
dlmagnn74o3mqpo61fb9cchtq  httpd_server.17  nginx  Manager        Shutdown       Shutdown 18 seconds ago
0t3j2u3wkl4ym3w66rvplgn65  httpd_server.18  nginx  Worker_1  Running        Running about a minute ago
0smn62ybwbdlrwtysfxax3yyl  httpd_server.19  nginx  Manager        Running        Running 4 minutes ago
6hk3ge3t5xhyemd70vyw6ymvk  httpd_server.20  nginx  Manager        Running        Running 4 minutes ago

//scale up containers from 10 to 15
[divine@Manager ~]$ docker service scale httpd_server=15
httpd_server scaled to 15

[divine@Manager ~]$ docker service ls
ID            NAME          REPLICAS  IMAGE  COMMAND
brscnmwefdbx  httpd_server  15/15     nginx

//5 new containers started. Looks like Docker doesn't move containers from
shutdown to running state instead it creates new containers
[divine@Manager ~]$ docker service ps httpd_server
ID                         NAME                 IMAGE  NODE                         DESIRED STATE  CURRENT STATE                    ERROR
4fs7ekdmlsd7ljo4blssyho35  httpd_server.1       nginx  Worker_2        Running        Running less than a second ago
etg7ng9e738kde0cffknfaymx   \_ httpd_server.1   nginx  Worker_2        Shutdown       Shutdown less than a second ago
4kjcflhtq8t5m39syol75pbgx  httpd_server.2       nginx  Worker_2        Running        Running less than a second ago
d7o4irupasdflk9dqq955rjil   \_ httpd_server.2   nginx  Manager        Shutdown       Shutdown 59 seconds ago
a0px1ll6rla2nfjfenfro3ax1  httpd_server.3       nginx  Worker_2        Running        Running about a minute ago
5lx61fb15p2otfmnz0azyk4c1  httpd_server.4       nginx  Worker_2        Running        Running about a minute ago
buvz6ndhjb3o7mdbua9y4st3l  httpd_server.5       nginx  Manager        Running        Running 4 minutes ago
5zdum3ef3qo2vakppwnjt9t6n  httpd_server.6       nginx  Worker_2        Running        Running about a minute ago
cezj4m9x16dtow9yet4wovi6e  httpd_server.7       nginx  Worker_1  Running        Running less than a second ago
7pt4s3fl9z41hhcqwlo2llefa   \_ httpd_server.7   nginx  Worker_1  Shutdown       Shutdown less than a second ago
c5jt346vkr5rcwkk72hfc3if0  httpd_server.8       nginx  Manager        Running        Running 4 minutes ago
2iyv3fjibrc53k4aoti5sl115  httpd_server.9       nginx  Worker_1  Running        Running less than a second ago
6wr6r9zz0hfy1zc4lg49i108m   \_ httpd_server.9   nginx  Worker_1  Shutdown       Shutdown less than a second ago
7pi63gh79o9sbm1bqy4ouw24j  httpd_server.10      nginx  Manager        Running        Running 8 seconds ago
cmzrng2t23udi4i71k0vhf0jl   \_ httpd_server.10  nginx  Worker_2        Shutdown       Shutdown less than a second ago
1zagdr2zbwvaz5sd7tnrlucks  httpd_server.11      nginx  Worker_1  Running        Running 2 minutes ago
dbap94coizealad8clzi1d779  httpd_server.12      nginx  Worker_2        Shutdown       Shutdown less than a second ago
57hvlhnd942fnsr6b2wvhp0al  httpd_server.13      nginx  Worker_1  Shutdown       Shutdown less than a second ago
51ic2pk2eoq9hqvx2kp2u60n1  httpd_server.14      nginx  Worker_1  Shutdown       Shutdown less than a second ago
3myxunl1h2dk14zne2hci83av  httpd_server.15      nginx  Worker_1  Running        Running 2 minutes ago
b7smowk7getaxnc3xilty2vez  httpd_server.16      nginx  Worker_2        Shutdown       Shutdown less than a second ago
dlmagnn74o3mqpo61fb9cchtq  httpd_server.17      nginx  Manager        Shutdown       Shutdown 58 seconds ago
0t3j2u3wkl4ym3w66rvplgn65  httpd_server.18      nginx  Worker_1  Running        Running 2 minutes ago
0smn62ybwbdlrwtysfxax3yyl  httpd_server.19      nginx  Manager        Running        Running 4 minutes ago
6hk3ge3t5xhyemd70vyw6ymvk  httpd_server.20      nginx  Manager        Running        Running 4 minutes ago

[divine@Manager ~]$ docker service inspect --pretty httpd_server
ID:             brscnmwefdbxopz6hk2nnxebc
Name:           httpd_server
Mode:           Replicated
 Replicas:      15
Placement:
UpdateConfig:
 Parallelism:   1
 On failure:    pause
ContainerSpec:
 Image:         nginx
Resources:
[divine@localhost ~]$

9. Delete a service. Run below commands on manager node

[divine@Manager ~]$ docker service ls
ID            NAME          REPLICAS  IMAGE  COMMAND
brscnmwefdbx  httpd_server  15/15     nginx
[divine@Manager ~]$ docker service rm httpd_server
httpd_server
[divine@Manager ~]$ docker service ls
ID  NAME  REPLICAS  IMAGE  COMMAND
[divine@Manager ~]$ docker service ps httpd_server
Error: No such service: httpd_server
[divine@localhost ~]$

//check worker
[divine@Worker_1 ~]$ docker ps
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES

10. Leave swarm. Remove worker node from swarm

[divine@Manager ~]$ docker node ls
ID                           HOSTNAME                     STATUS  AVAILABILITY  MANAGER STATUS
0pye1jeusjpyypcesrjpwsdgg    Worker_1                      Ready   Active
9iz0pjzudzvv2ujqyh0dgzhfg    Worker_2                      Ready   Active
9ra9h1uopginqno78zi8rq9ug *  Manager                       Ready   Active        Leader

//try this on worker node
[divine@Worker_1 ~]$ docker swarm leave
Node left the swarm.
[divine@fpm4richdev ~]$

//check manager. Worker_1 shows Down state
[divine@Manager ~]$ docker node ls
ID                           HOSTNAME                     STATUS  AVAILABILITY  MANAGER STATUS
0pye1jeusjpyypcesrjpwsdgg    Worker_1                     Down    Active
9iz0pjzudzvv2ujqyh0dgzhfg    Worker_2                     Ready   Active
9ra9h1uopginqno78zi8rq9ug *  Manager                      Ready   Active        Leader
[divine@localhost ~]$

11. Join swarm again. Worker rejoin the swarm

//run this command on manager to get worker node join command
[divine@Manager ~]$ docker swarm join-token worker
To add a worker to this swarm, run the following command:

    docker swarm join \
    --token SWMTKN-1-4ok62ytnftqgeyvzlz82zlbuzmbx3soqhyfjsoiylsfx7o3tnh-8b5cyqbg6ukpl9oxap0ntrl7t \
    192.254.211.167:2377


//try above join command on worker node
[divine@Worker_1 ~]$ docker swarm join \
>     --token SWMTKN-1-4ok62ytnftqgeyvzlz82zlbuzmbx3soqhyfjsoiylsfx7o3tnh-8b5cyqbg6ukpl9oxap0ntrl7t \
>     192.254.211.167:2377
This node joined a swarm as a worker.
[divine@Worker_1 ~]$

//check node status in manager. Docker adds node again it doesn't move down node
to ready
[divine@Manager ~]$ docker node ls
ID                           HOSTNAME                     STATUS  AVAILABILITY  MANAGER STATUS
0pye1jeusjpyypcesrjpwsdgg    Worker_1                     Down    Active
8pzfy2447ox4c2ay8we1l35su    Worker_1                     Ready   Active
9iz0pjzudzvv2ujqyh0dgzhfg    Worker_2                     Ready   Active
9ra9h1uopginqno78zi8rq9ug *  Manager                      Ready   Active        Leader
[divine@localhost ~]$

//remove down node
[divine@Manager ~]$ docker node rm 0pye1jeusjpyypcesrjpwsdgg
0pye1jeusjpyypcesrjpwsdgg

[divine@Manager ~]$ docker node ls
ID                           HOSTNAME                     STATUS  AVAILABILITY  MANAGER STATUS
8pzfy2447ox4c2ay8we1l35su    Worker_1                     Ready   Active
9iz0pjzudzvv2ujqyh0dgzhfg    Worker_2                     Ready   Active
9ra9h1uopginqno78zi8rq9ug *  Manager                      Ready   Active        Leader
[divine@localhost ~]$


Lab-34: Docker networking

Docker networking provides containers to communicate with each other.  Docker creates three types of networks out of the box 1) bridge 2)host and 3)none

A bridge network is typical Linux bridge implementation. A host network is the mapping of host network to container.

//Default network types
[divine@localhost ~]$ docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
1eb9249c028b        bridge              bridge              local
3958d58a8597        host                host                local
b2dafd188630        none                null                local
[divine@localhost ~]$

//Docker bridge 
[divine@localhost ~]$ ip addr
4: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP
    link/ether 02:42:85:ce:f9:8d brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:85ff:fece:f98d/64 scope link
       valid_lft forever preferred_lft forever

Docker bridge network test

By default containers use bridge network if no network provided during execution. A bridge network is a Linux bridge. Let’s create two containers using image created in Lab-33 and test Docker bridge networking

Instantiate two containers httpd_server_1 & httpd_server_2

[divine@localhost ~]$ docker run -it -d -P --name=httpd_server_1 ubuntu-httpd-server
a06696b1d1a98c1e4afd288e11495f5bea174f9e28aade5b3c6fb563e4d025a0

[divine@localhost ~]$ docker ps
CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS              PORTS                   NAMES
a06696b1d1a9        ubuntu-httpd-server   "apachectl -D FOREGRO"   7 seconds ago       Up 4 seconds        0.0.0.0:32773->80/tcp   httpd_server_1

[divine@localhost ~]$ docker run -it -d -P --name=httpd_server_2 ubuntu-httpd-server
b3bc56610cdcd03f666b37ad33bf0d14206c0a747151541f96b250ecc7686050

[divine@localhost ~]$ docker ps
CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS              PORTS                   NAMES
b3bc56610cdc        ubuntu-httpd-server   "apachectl -D FOREGRO"   5 seconds ago       Up 3 seconds        0.0.0.0:32774->80/tcp   httpd_server_2
a06696b1d1a9        ubuntu-httpd-server   "apachectl -D FOREGRO"   21 seconds ago      Up 19 seconds       0.0.0.0:32773->80/tcp   httpd_server_1
[divine@localhost ~]$

Check host machine interfaces. You see two new interfaces created and attached to Docker bridge (docker0)

//Two new interface created, one for each container
[divine@localhost ~]$ ip addr
4: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP
    link/ether 02:42:85:ce:f9:8d brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:85ff:fece:f98d/64 scope link
       valid_lft forever preferred_lft forever

62: veth7a81e08@if61: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP
    link/ether 7e:fe:e9:f2:cd:9b brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet6 fe80::7cfe:e9ff:fef2:cd9b/64 scope link
       valid_lft forever preferred_lft forever
64: vetha5bf765@if63: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP
    link/ether 8a:d8:1a:a0:03:52 brd ff:ff:ff:ff:ff:ff link-netnsid 1
    inet6 fe80::88d8:1aff:fea0:352/64 scope link
       valid_lft forever preferred_lft forever

//Both interfaces attached to docker bridge
[divine@localhost ~]$ sudo brctl show
bridge name     bridge id               STP enabled     interfaces
docker0         8000.024285cef98d       no              veth7a81e08
                                                        vetha5bf765
[divine@localhost ~]$

Inspect your bridge and check IP addresses assigned to each container. As you can see httpd_server_1 assigned IP address 172.17.0.2 and httpd_server_2 172.17.0.3

[divine@localhost ~]$ docker network inspect bridge
[
    {
        "Name": "bridge",
        "Id": "1eb9249c028b10f76476f6a2e92852e6f87a2e50a7e89926f0a096713a44a945",
        "Scope": "local",
        "Driver": "bridge",
        "EnableIPv6": false,
        "IPAM": {
            "Driver": "default",
            "Options": null,
            "Config": [
                {
                    "Subnet": "172.17.0.0/16"
                }
            ]
        },
        "Internal": false,
        "Containers": {
            "a06696b1d1a98c1e4afd288e11495f5bea174f9e28aade5b3c6fb563e4d025a0": {
                "Name": "httpd_server_1",
                "EndpointID": "beffedfb0145ee33f15c69b6dfd6f2842174a1bf6e298ccb0f944160a731f9b8",
                "MacAddress": "02:42:ac:11:00:02",
                "IPv4Address": "172.17.0.2/16",
                "IPv6Address": ""
            },
            "b3bc56610cdcd03f666b37ad33bf0d14206c0a747151541f96b250ecc7686050": {
                "Name": "httpd_server_2",
                "EndpointID": "166dd093f7ec1862b5c705889c2100ebfd4cb62ec5c3105ad2cdaddcff695b53",
                "MacAddress": "02:42:ac:11:00:03",
                "IPv4Address": "172.17.0.3/16",
                "IPv6Address": ""
            }
        },
        "Options": {
            "com.docker.network.bridge.default_bridge": "true",
            "com.docker.network.bridge.enable_icc": "true",
            "com.docker.network.bridge.enable_ip_masquerade": "true",
            "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
            "com.docker.network.bridge.name": "docker0",
            "com.docker.network.driver.mtu": "1500"
        },
        "Labels": {}
    }
]
[divine@localhost ~]$

Connect to httpd_server_1 and ping to httpd_server_2

//connect to container httpd_server_1
[divine@localhost ~]$ docker exec -i -t httpd_server_1 /bin/bash
root@a06696b1d1a9:/# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
61: eth0@if62: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 172.17.0.2/16 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:acff:fe11:2/64 scope link
       valid_lft forever preferred_lft forever

//ping to container httpd_server_2 ip address
root@a06696b1d1a9:/bin# ping 172.17.0.3
PING 172.17.0.3 (172.17.0.3): 56 data bytes
64 bytes from 172.17.0.3: icmp_seq=0 ttl=64 time=0.105 ms
64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.052 ms
64 bytes from 172.17.0.3: icmp_seq=2 ttl=64 time=0.052 ms
64 bytes from 172.17.0.3: icmp_seq=3 ttl=64 time=0.052 ms
^C--- 172.17.0.3 ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.052/0.065/0.105/0.023 ms

//connect to container httpd_server_2
[divine@localhost ~]$ docker exec -i -t httpd_server_2 /bin/bash
root@b3bc56610cdc:/# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
63: eth0@if64: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 172.17.0.3/16 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:acff:fe11:3/64 scope link
       valid_lft forever preferred_lft forever
root@b3bc56610cdc:/#

//ping to external ip 
root@a06696b1d1a9:/# ping 167.254.210.33
PING 167.254.210.33 (167.254.210.33): 56 data bytes
64 bytes from 167.254.210.33: icmp_seq=0 ttl=254 time=0.984 ms
64 bytes from 167.254.210.33: icmp_seq=1 ttl=254 time=0.522 ms
64 bytes from 167.254.210.33: icmp_seq=2 ttl=254 time=0.548 ms
^C--- 167.254.210.33 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.522/0.685/0.984/0.212 ms

//docker bridge is the default gateway for containers
root@a06696b1d1a9:/# ip route
default via 172.17.0.1 dev eth0
172.17.0.0/16 dev eth0  proto kernel  scope link  src 172.17.0.2
root@a06696b1d1a9:/#

Create bridge network

If you don’t like the IP address scheme in default ‘docker0’ bridge you can create your own bridge network and assign your own subnet

Create bridge network with subnet, name the bridge ‘divine_bridge’

[divine@localhost ~]$ docker network create --subnet 192.168.0.0/24 divine_bridge
ea08d7f8b489c8df56f0412ab5aceb16f59de14539fae15fc2cea3d20b09d087

[divine@localhost ~]$ docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
1eb9249c028b        bridge              bridge              local
ea08d7f8b489        divine_bridge       bridge              local
3958d58a8597        host                host                local
b2dafd188630        none                null                local

//a new bridge created on host machine
[divine@localhost ~]$ ip addr
4: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP
    link/ether 02:42:85:ce:f9:8d brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:85ff:fece:f98d/64 scope link
       valid_lft forever preferred_lft forever
62: veth7a81e08@if61: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP
    link/ether 7e:fe:e9:f2:cd:9b brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet6 fe80::7cfe:e9ff:fef2:cd9b/64 scope link
       valid_lft forever preferred_lft forever
64: vetha5bf765@if63: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP
    link/ether 8a:d8:1a:a0:03:52 brd ff:ff:ff:ff:ff:ff link-netnsid 1
    inet6 fe80::88d8:1aff:fea0:352/64 scope link
       valid_lft forever preferred_lft forever
69: br-ea08d7f8b489: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
    link/ether 02:42:07:28:7d:d5 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.1/24 scope global br-ea08d7f8b489
       valid_lft forever preferred_lft forever

[divine@localhost ~]$ sudo brctl show
[sudo] password for divine:
bridge name     bridge id               STP enabled     interfaces
br-ea08d7f8b489         8000.024207287dd5       no
docker0         8000.024285cef98d       no              veth7a81e08
                                                        vetha5bf765
[divine@localhost ~]$

Instantiate two containers and attach them to newly created bridge network (divine_bridge) using option –network

[divine@localhost ~]$ docker run -it -d -P --network=divine_bridge \
--name=httpd_server_3 ubuntu-httpd-server
2a68d3bd191d004f8499086d22d2e02896d6642d35de4b24cd00c6c6e9c26cfe

[divine@localhost ~]$ docker run -it -d -P --network=divine_bridge \
--name=httpd_server_4 ubuntu-httpd-server
5c4d9557bf57dc2f3dad2500761553384ab2dcfc3733c475c3f9c75aa213b506

[divine@localhost ~]$ docker ps
CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS              PORTS                   NAMES
5c4d9557bf57        ubuntu-httpd-server   "apachectl -D FOREGRO"   3 minutes ago       Up 3 minutes        0.0.0.0:32776->80/tcp   httpd_server_4
2a68d3bd191d        ubuntu-httpd-server   "apachectl -D FOREGRO"   3 minutes ago       Up 3 minutes        0.0.0.0:32775->80/tcp   httpd_server_3
b3bc56610cdc        ubuntu-httpd-server   "apachectl -D FOREGRO"   21 hours ago        Up 21 hours         0.0.0.0:32774->80/tcp   httpd_server_2
a06696b1d1a9        ubuntu-httpd-server   "apachectl -D FOREGRO"   21 hours ago        Up 21 hours         0.0.0.0:32773->80/tcp   httpd_server_1
[divine@localhost ~]$

//inspect your bridge network and check ip addresses assigned to containers
[divine@localhost ~]$ docker network inspect divine_bridge
[
    {
        "Name": "divine_bridge",
        "Id": "ea08d7f8b489c8df56f0412ab5aceb16f59de14539fae15fc2cea3d20b09d087",
        "Scope": "local",
        "Driver": "bridge",
        "EnableIPv6": false,
        "IPAM": {
            "Driver": "default",
            "Options": {},
            "Config": [
                {
                    "Subnet": "192.168.0.0/24"
                }
            ]
        },
        "Internal": false,
        "Containers": {
            "2a68d3bd191d004f8499086d22d2e02896d6642d35de4b24cd00c6c6e9c26cfe": {
                "Name": "httpd_server_3",
                "EndpointID": "0e55819e97d562df4aaa7d575a84550510a0b3e5ef066c558a9892f8ac90fff7",
                "MacAddress": "02:42:c0:a8:00:02",
                "IPv4Address": "192.168.0.2/24",
                "IPv6Address": ""
            },
            "5c4d9557bf57dc2f3dad2500761553384ab2dcfc3733c475c3f9c75aa213b506": {
                "Name": "httpd_server_4",
                "EndpointID": "cde3779af37a84a9f6a6b3f42a5f23b083fc37ed98ea0dc773fa8a37f3e4cce7",
                "MacAddress": "02:42:c0:a8:00:03",
                "IPv4Address": "192.168.0.3/24",
                "IPv6Address": ""
            }
        },
        "Options": {},
        "Labels": {}
    }
]

//Test httpd server
[divine@localhost ~]$ curl localhost:32776
Welcome to Apache2 Web server inside Docker
[divine@localhost ~]$


Check connectivity between containers. Make sure containers can communicate with each other thru your bridge

[divine@localhost ~]$ docker exec -i -t httpd_server_3 /bin/bash
root@2a68d3bd191d:/# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
70: eth0@if71: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether 02:42:c0:a8:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 192.168.0.2/24 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:c0ff:fea8:2/64 scope link
       valid_lft forever preferred_lft forever

 

Docker host network

Host network maps all host interfaces to container. Let’s instantiate a container with host network using –network=host. You will see that container  has same network interfaces as host machine

//instantiate container with --network=host
[divine@localhost ~]$ docker run -it -d -P --network=host --name=httpd_server_5 ubuntu-httpd-server
4ea9600641e13c1c844edf1e8ecbc11c27bdebf88d64bc391a655d8568bdbd03
[divine@localhost ~]$ docker ps
CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS              PORTS                   NAMES
4ea9600641e1        ubuntu-httpd-server   "apachectl -D FOREGRO"   42 seconds ago      Up 40 seconds                               httpd_server_5
5c4d9557bf57        ubuntu-httpd-server   "apachectl -D FOREGRO"   2 hours ago         Up 2 hours          0.0.0.0:32776->80/tcp   httpd_server_4
2a68d3bd191d        ubuntu-httpd-server   "apachectl -D FOREGRO"   2 hours ago         Up 2 hours          0.0.0.0:32775->80/tcp   httpd_server_3
b3bc56610cdc        ubuntu-httpd-server   "apachectl -D FOREGRO"   24 hours ago        Up 24 hours         0.0.0.0:32774->80/tcp   httpd_server_2
a06696b1d1a9        ubuntu-httpd-server   "apachectl -D FOREGRO"   24 hours ago        Up 24 hours         0.0.0.0:32773->80/tcp   httpd_server_1

//inspect host network
[divine@localhost ~]$ docker network inspect host
[
    {
        "Name": "host",
        "Id": "3958d58a8597bbc738cd4cba87d1baa8c491cff2e38b57bdd58f241690a45219",
        "Scope": "local",
        "Driver": "host",
        "EnableIPv6": false,
        "IPAM": {
            "Driver": "default",
            "Options": null,
            "Config": []
        },
        "Internal": false,
        "Containers": {
            "4ea9600641e13c1c844edf1e8ecbc11c27bdebf88d64bc391a655d8568bdbd03": {
                "Name": "httpd_server_5",
                "EndpointID": "3dcbf81cfafac1b6b9f53140b58ec77cde385117f23c28e7ee42add3768ed30f",
                "MacAddress": "",
                "IPv4Address": "",
                "IPv6Address": ""
            }
        },
        "Options": {},
        "Labels": {}
    }
]

//connect to container
[divine@localhost ~]$ docker exec -i -t httpd_server_5 /bin/bash

//as you can see all host machine interfaces are mapped to container
root@localhost:/# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: enp1s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 2c:27:d7:1c:88:b4 brd ff:ff:ff:ff:ff:ff
    inet 167.254.211.167/23 brd 167.254.211.255 scope global enp1s0
       valid_lft forever preferred_lft forever
    inet6 fe80::2e27:d7ff:fe1c:88b4/64 scope link
       valid_lft forever preferred_lft forever
3: enp0s29f7u1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:50:b6:19:41:65 brd ff:ff:ff:ff:ff:ff
    inet 10.10.10.10/24 brd 10.10.10.255 scope global enp0s29f7u1
       valid_lft forever preferred_lft forever
    inet 192.168.1.2/30 brd 192.168.1.3 scope global enp0s29f7u1
       valid_lft forever preferred_lft forever
    inet6 2101:db8:0:1::100/64 scope global
       valid_lft forever preferred_lft forever
    inet6 fe80::250:b6ff:fe19:4165/64 scope link
       valid_lft forever preferred_lft forever
4: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether 02:42:85:ce:f9:8d brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:85ff:fece:f98d/64 scope link
       valid_lft forever preferred_lft forever
62: veth7a81e08@if61: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
    link/ether 7e:fe:e9:f2:cd:9b brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet6 fe80::7cfe:e9ff:fef2:cd9b/64 scope link
       valid_lft forever preferred_lft forever
64: vetha5bf765@if63: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
    link/ether 8a:d8:1a:a0:03:52 brd ff:ff:ff:ff:ff:ff link-netnsid 1
    inet6 fe80::88d8:1aff:fea0:352/64 scope link
       valid_lft forever preferred_lft forever
69: br-ea08d7f8b489: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether 02:42:07:28:7d:d5 brd ff:ff:ff:ff:ff:ff
    inet 192.168.0.1/24 scope global br-ea08d7f8b489
       valid_lft forever preferred_lft forever
    inet6 fe80::42:7ff:fe28:7dd5/64 scope link
       valid_lft forever preferred_lft forever
71: vethf7c7b4b@if70: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br-ea08d7f8b489 state UP group default
    link/ether 7a:0d:0a:b2:7f:af brd ff:ff:ff:ff:ff:ff link-netnsid 2
    inet6 fe80::780d:aff:feb2:7faf/64 scope link
       valid_lft forever preferred_lft forever
73: veth1f04827@if72: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br-ea08d7f8b489 state UP group default
    link/ether 56:08:e4:bf:94:fd brd ff:ff:ff:ff:ff:ff link-netnsid 3
    inet6 fe80::5408:e4ff:febf:94fd/64 scope link
       valid_lft forever preferred_lft forever

Here is the difference, in host network there is no container port mapping. Under ‘ports’ show no port mapping like  bridge mode

//as you can see there is no port mapping for container in host network
[divine@localhost ~]$  docker ps -f name=httpd_server_5
CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS              PORTS               NAMES
4ea9600641e1        ubuntu-httpd-server   "apachectl -D FOREGRO"   44 minutes ago      Up 44 minutes                           httpd_server_5
[divine@localhost ~]$

[divine@localhost ~]$ curl localhost:80
Welcome to Apache2 Web server inside Docker
[divine@localhost ~]$

Below a test to check process and port on host machine when container in host network created. Note:container is running appache2 server and bind to port 80

//As you can see port 80 in LISTEN mode when container created in host network
[divine@localhost ~]$ netstat -pan | grep :80
(Not all processes could be identified, non-owned process info
 will not be shown, you would have to be root to see it all.)
tcp6       0      0 :::80                   :::*                    LISTEN      -

[divine@localhost ~]$ docker ps
CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS              PORTS                   NAMES
4ea9600641e1        ubuntu-httpd-server   "apachectl -D FOREGRO"   50 minutes ago      Up 50 minutes                               httpd_server_5

//delete container
[divine@localhost ~]$ docker stop httpd_server_5
httpd_server_5
[divine@localhost ~]$ docker rm httpd_server_5
httpd_server_5

[divine@localhost ~]$ docker ps
CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS              PORTS                   NAMES

//no process listening on port 80
[divine@localhost ~]$ netstat -pan | grep :80
(No info could be read for "-p": geteuid()=1002 but you should be root.)


//httpd server failed as expected
[divine@localhost ~]$ curl localhost:80
curl: (7) Failed connect to localhost:80; Connection refused
[divine@localhost ~]$

Let’s test the limitation of host network. Since both host and container share same name space two processes listening on same port can’t be started in container and host. In bridge mode this can be avoided by container port mapping to different port on host.

Let’ try to start httpd process on host machine while container is running. This will be denied as port 80 is already taken by container httpd server

//install httpd on host
[divine@localhost ~]$sudo yum install httpd

//port 80 already taken by container httpd server
[divine@localhost ~]$ netstat -pan | grep :80
(No info could be read for "-p": geteuid()=1002 but you should be root.)
tcp6       0      0 :::80                   :::*                    LISTEN      -


//start httpd server. It failed to start
[divine@localhost ~]$ service httpd start
Redirecting to /bin/systemctl start  httpd.service
==== AUTHENTICATING FOR org.freedesktop.systemd1.manage-units ===
Authentication is required to manage system services or units.
Authenticating as: root
Password:
==== AUTHENTICATION COMPLETE ===
Job for httpd.service failed because the control process exited with error code. See "systemctl status httpd.service" and "journalctl -xe" for details.
[divine@localhost ~]$

//stop container
[divine@localhost ~]$ docker stop httpd_server_5
httpd_server_5

//port no longer in use
[divine@localhost ~]$ netstat -pan | grep :80
(No info could be read for "-p": geteuid()=1002 but you should be root.)


//start httpd service. service started as port 80 is available
[divine@localhost ~]$ service httpd start
Redirecting to /bin/systemctl start  httpd.service
==== AUTHENTICATING FOR org.freedesktop.systemd1.manage-units ===
Authentication is required to manage system services or units.
Authenticating as: root
Password:
==== AUTHENTICATION COMPLETE ===

[divine@localhost ~]$ netstat -pan | grep 80
(No info could be read for "-p": geteuid()=1002 but you should be root.)
tcp6       0      0 :::80                   :::*                    LISTEN      -

None network

Docker container instantiated with –network=none mean no network interface  and no port mapping, it is up to you how you want to setup networking

//instantiate conatiner with --network=none
[divine@localhost ~]$ docker run -it -d -P --network=none --name=httpd_server_8 ubuntu-httpd-server
750a28adfb416139c450f75affc6237c34a7447728d8233e2294e27e3227f24e

//no port mapping
[divine@localhost ~]$ docker ps
CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS              PORTS               NAMES
750a28adfb41        ubuntu-httpd-server   "apachectl -D FOREGRO"   9 seconds ago       Up 7 seconds                            httpd_server_8
[divine@localhost ~]$ docker exec -it httpd_server_8 /bin/bash

//No ip interfaces created
root@750a28adfb41:/# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
root@750a28adfb41:/#

 

 

Lab-33:Docker container

What is Docker

Docker is a framework  to create and maintain containers.Docker main intent is to allow developer to build , test and deploy application quicker. Docker provides process level isolation, application portability and consistency. Under the hood it uses Linux kernel features like namespace to create containers

Docker terminology
Docker daemon - Docker process running on host machine
Docker client - cli interface
Docker image - a Docker image is a snapshot of Docker container
Docker container - Docker container is an instance of Docker image
Dockerfile - a Dockerfile contains instructions to build new image

A Docker container image is created in layers i.e. if you want to create a container image for Apache web server application in Ubuntu. You first load Ubuntu base image (you can get base images from Docker hub) you then add Appache2 server to base image then create web page etc. This completes image creation for container. You then instantiate containers from this image. You can save container image and  share with others

Docker container vs VM

A VM contains complete OS and application. A VM is heavy weight (due to full OS) and slow to start. It is processor intensive need big machines (Large RAM and processors) to run . But it provides full isolation from host machine.

A container shares kernel with host machine. Docker container doesn’t contain complete OS which makes it light weight and fast to start.  You can start 100’s of container in a machine. A container typically starts  in seconds compare to VM in minutes

Below pictorial comparison of VM and Docker container

docker1

You can find a spirited discussion on this topic here

Because Docker containers are executed by the Docker engine (as opposed to a hypervisor), they are not fully isolated. However, the trade off is a small footprint: unlike VMware, Docker does not create an entire virtual operating system— instead, all required components not already running on the host machine are packaged up inside the container with the application. Since the host kernel is shared amongst Docker containers, applications only ship with what they need to run—no more, no less.
This makes Docker applications easier and more lightweight to deploy and faster to start up than virtual machines.

 

Prerequisite

Install Docker. As per docker site there are two methods to install Docker I am using Docker script to install. I am using Centos 7.2 on my host machine

Login as a user with sudo permission
$su - divine

$ uname -r   //check kernel version it should be 3.10 or higher
3.10.0-229.el7.x86_64

$ sudo yum update

run Docker installation script
$ curl -fsSL https://get.docker.com/ | sh

Logout and log back in
$logout
$su - divine

Enable the service
$ sudo systemctl enable docker.service
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service

Start Docker daemon
$ sudo systemctl start docker

check if Docker is running
$docker -v
Docker version 1.12.6, build 78d1802

Create Docker group
$ sudo groupadd docker

Add user to Docker group
$sudo usermod -aG docker $(whoami)

Logout and log back in
$logout
$su - divine

Procedure

In this procedure I will create a Docker container with httpd server. I will be using Ubuntu for the container

  1. Create a folder for httpd
    • $mkdir -p docker/httpd
    • $cd docker/httpd
  2. Pull Ubuntu image from Docker hub. This command will pull Ubuntu image from Docker hub and create a container with base image, run the container and provide bash prompt.
    • $docker run -i -t ubuntu bash
    • $docker search ubuntu  //you can search Docker hub for a image
  3. Install Apache2 httpd server and start it inside the container
    • $apt-get update
    • $apt-get install apache2
    • $service apache2 start
  4. Install some needed utilities
    • $apt-get install vim
    • $apt-get install curl
  5. Update Apache2 index.html file
    • $cd /var/www/html
    • $rm index.html
    • $vi index.html  //Add a line “Welcome to Apache2 Web server inside Docker container”
  6. Test Apache2 server inside container
    • $curl localhost:80  //you should get this output “Welcome to Apache2 Web server inside Docker container”
  7. Exit from bash shell
    • $exit

8. Create the image. Specify container id from the prompt in step 2 ([root@0ee9a9324a51 /]#) or get it by running command ‘docker ps -a’

//create image with name 'ubuntu-httpd-server'
$docker commit 0ee9a9324a51 ubuntu-httpd-server
sha256:7ba87e636f0ecadbdb5b45a8e72ad5268be68a2d22f839532ca60aefe486e157

//Check image
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
ubuntu-httpd-server latest 7ba87e636f0e 11 minutes ago 318.6 MB
ubuntu latest 104bec311bcd 4 weeks ago 128.9 MB

9. Now we have working image for our container. Let’s create a container and test our Apache2 httpd server

[divine@localhost ~]$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
ubuntu-httpd-server latest 7ba87e636f0e 11 minutes ago 318.6 MB
ubuntu latest 104bec311bcd 4 weeks ago 128.9 MB

//Run image by exposing httpd server port (80) to 8080 on host. At the same 
time start Apache2 server
[divine@localhost ~]$ docker run -p 8080:80 -d ubuntu-httpd-server apachectl -D FOREGROUND
7afd126c01d1e11e9efa4a6dd905e6519ca3a65bb8044565fe568ffee0f3815d

//Check container
[divine@localhost ~]$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
7afd126c01d1 ubuntu-httpd-server "apachectl -D FOREGRO" About a minute ago Up About a minute 0.0.0.0:8080->80/tcp clever_hoover

//Test Apache2 httpd server
[divine@localhost ~]$ curl localhost:8080
Welcome to Apache2 Web server inside Docker container

Remove Docker container and images

//Check Docker container state
[divine@localhost ~]$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1b0013c96090 ubuntu-httpd-server "apachectl -D FOREGRO" 11 minutes ago Up 11 minutes 0.0.0.0:8080->80/tcp nostalgic_knuth

//Stop container
[divine@localhost ~]$ docker stop 1b0013c96090
1b0013c96090

//Remove container
[divine@localhost ~]$ docker rm 1b0013c96090
1b0013c96090

[divine@localhost ~]$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[divine@localhost ~]$

[divine@localhost ~]$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
ubuntu-httpd-server latest 7ba87e636f0e 16 hours ago 318.6 MB
ubuntu latest 104bec311bcd 4 weeks ago 128.9 MB

//Remove Docker image
[divine@localhost ~]$ docker rmi ubuntu-httpd-server
Untagged: ubuntu-httpd-server:latest
Deleted: sha256:7ba87e636f0ecadbdb5b45a8e72ad5268be68a2d22f839532ca60aefe486e157
Deleted: sha256:91784775b2afecffca200e451be31ac2bd4cb1d1bcef06425f889df25fe51b18

[divine@localhost ~]$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
ubuntu latest 104bec311bcd 4 weeks ago 128.9 MB
[divine@localhost ~]$

Building Docker image using Dockerfile

In this exercise I will create image using Dockerfile. Docker file is a script containing Docker commands. The advantage of using Dockerfile method to build Docker image is that you can share Dockerfile with others so they can create same image.

Create below file under docker/httpd and file name Dockerfile

FROM ubuntu
MAINTAINER Divine life email:divinelife@lifedivine.net

# Update the image with the latest packages (recommended)
RUN apt-get update -y

# Install Apache2 Web Server
RUN apt-get install apache2

# Copy index.html file from host to Docker
ADD index.html /var/www/html/

# Expose httpd server port to host
EXPOSE 80

ENTRYPOINT [ "apachectl" ]
CMD [ "-D", "FOREGROUND" ]

Docker file Terminology

FROM: Base image. it can be image provided by docker or can be image 
you have built
MAINTAINER: Owner of image
RUN: Command to be executed inside the the container
ADD: Add a file from host to container
CMD: Command executed when image instantiated
EXPOSE: Expose container port number to host 
ENTRYPOINT: Commands that needs to be executed when container starts

Create web server index.html file under docker/httpd. Add below line in index.html file

Welcome to Apache2 Web server inside Docker

Build an image using Docker file

[divine@localhost httpd]$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
<none> <none> 8e94f242f5a6 5 minutes ago 267.6 MB
ubuntu latest 104bec311bcd 4 weeks ago 128.9 MB

[divine@localhost httpd]$ pwd
/home/divine/docker/httpd

[divine@localhost httpd]$ ls
Dockerfile index.html

[divine@localhost httpd]$ cat Dockerfile
FROM ubuntu
MAINTAINER Divine life email:divinelife@lifedivine.net

# Update the image with the latest packages (recommended)
RUN apt-get update -y

# Install Apache2 Web Server
RUN apt-get --assume-yes install apache2

# Copy index.html file from host to Docker
ADD index.html /var/www/html/

# Expose httpd server port to host
EXPOSE 80

ENTRYPOINT [ "apachectl" ]
CMD [ "-D", "FOREGROUND" ]

[divine@localhost httpd]$ cat index.html
Welcome to Apache2 Web server inside Docker

//Build Docker image using Dockerfile
[divine@localhost httpd]$ docker build -t ubuntu-httpd-server .
Sending build context to Docker daemon 3.072 kB
Step 1 : FROM ubuntu
 ---> 104bec311bcd
Step 2 : MAINTAINER Divine life email:divinelife@lifedivine.net
 ---> Using cache
 ---> 70b0078a89e9
Step 3 : RUN apt-get update -y
 ---> Using cache
 ---> fb70d91a3aa8
Step 4 : RUN apt-get --assume-yes install apache2
 ---> Using cache
 ---> 8e94f242f5a6
Step 5 : ADD index.html /var/www/html/
 ---> 348b5b76d1ed
Removing intermediate container 8030b8db503b
Step 6 : EXPOSE 80
 ---> Running in 2db0d68aff48
 ---> dc3213733304
Removing intermediate container 2db0d68aff48
Step 7 : ENTRYPOINT apachectl
 ---> Running in 011d64324f77
 ---> c8ef3ef6ecae
Removing intermediate container 011d64324f77
Step 8 : CMD -D FOREGROUND
 ---> Running in 9d0f4579ecb7
 ---> 1b11311232d3
Removing intermediate container 9d0f4579ecb7
Successfully built 1b11311232d3

[divine@localhost httpd]$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
ubuntu-httpd-server latest 1b11311232d3 34 seconds ago 267.6 MB
ubuntu latest 104bec311bcd 4 weeks ago 128.9 MB

Let’s create container from newly created image  and test httpd server

[divine@localhost httpd]$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES

//start container. The -P flag creates dynamic port mapping for exposed port
[divine@localhost httpd]$ docker run -it -d -P ubuntu-httpd-server
d7f7719fcc4f357ea9a5f390e7f4f336ecb5608fa07f4975f7a9958c4950094b

[divine@localhost httpd]$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
d7f7719fcc4f ubuntu-httpd-server "apachectl -D FOREGRO" 8 seconds ago Up 5 seconds 0.0.0.0:32768->80/tcp focused_payne


//This command gives port mapping for exposed port. Here container port 80 mapped
to port 32768 on host
[divine@localhost ~]$ docker port d7f7719fcc4f
80/tcp -> 0.0.0.0:32768

//Test httpd web server
[divine@localhost httpd]$ curl localhost:32768
Welcome to Apache2 Web server inside Docker
[divine@localhost httpd]$

Image sharing

Docker image can be shared with others. Save image as tar file using ‘docker save’ command, the receiver can load image using ‘docker load’ command

//Save image 
[divine@localhost httpd]docker save -o httpd-server.tar ubuntu-httpd-server
[divine@localhost httpd]$ ls
Dockerfile httpd-server.tar index.html
[divine@localhost httpd]$

//Copy image from path to any host Now import to your local docker using 
[divine@localhost httpd]docker load -i httpd-server.tar

Inspect Docker container

You can inspect your container using ‘docker inspect’ command.

[divine@localhost ~]$ docker inspect d7f7719fcc4f
[
 {
 "Id": "d7f7719fcc4f357ea9a5f390e7f4f336ecb5608fa07f4975f7a9958c4950094b",
 "Created": "2017-01-17T16:18:30.758448801Z",
 "Path": "apachectl",
 "Args": [
 "-D",
 "FOREGROUND"
 ],
 "State": {
 "Status": "running",
 "Running": true,
 "Paused": false,
 "Restarting": false,
 "OOMKilled": false,
 "Dead": false,
 "Pid": 28133,
 "ExitCode": 0,
 "Error": "",
 "StartedAt": "2017-01-17T16:18:32.458155991Z",
 "FinishedAt": "0001-01-01T00:00:00Z"
 },
 "Image": "sha256:1b11311232d3b2f1e1b39d4652e3d3f8d41c730d1422fc9857f0a87a8897400e",
 "ResolvConfPath": "/var/lib/docker/containers/d7f7719fcc4f357ea9a5f390e7f4f336ecb5608fa07f4975f7a9958c4950094b/resolv.conf",
 "HostnamePath": "/var/lib/docker/containers/d7f7719fcc4f357ea9a5f390e7f4f336ecb5608fa07f4975f7a9958c4950094b/hostname",
 "HostsPath": "/var/lib/docker/containers/d7f7719fcc4f357ea9a5f390e7f4f336ecb5608fa07f4975f7a9958c4950094b/hosts",
 "LogPath": "/var/lib/docker/containers/d7f7719fcc4f357ea9a5f390e7f4f336ecb5608fa07f4975f7a9958c4950094b/d7f7719fcc4f357ea9a5f390e7f4f336ecb5608fa07f4975f7a9958c4950094b-json.log",
 "Name": "/focused_payne",
 "RestartCount": 0,
 "Driver": "devicemapper",
 "MountLabel": "",
 "ProcessLabel": "",
 "AppArmorProfile": "",
 "ExecIDs": null,
 "HostConfig": {
 "Binds": null,
 "ContainerIDFile": "",
 "LogConfig": {
 "Type": "json-file",
 "Config": {}
 },
 "NetworkMode": "default",
 "PortBindings": {},
 "RestartPolicy": {
 "Name": "no",
 "MaximumRetryCount": 0
 },
 "AutoRemove": false,
 "VolumeDriver": "",
 "VolumesFrom": null,
 "CapAdd": null,
 "CapDrop": null,
 "Dns": [],
 "DnsOptions": [],
 "DnsSearch": [],
 "ExtraHosts": null,
 "GroupAdd": null,
 "IpcMode": "",
 "Cgroup": "",
 "Links": null,
 "OomScoreAdj": 0,
 "PidMode": "",
 "Privileged": false,
 "PublishAllPorts": true,
 "ReadonlyRootfs": false,
 "SecurityOpt": null,
 "UTSMode": "",
 "UsernsMode": "",
 "ShmSize": 67108864,
 "Runtime": "runc",
 "ConsoleSize": [
 0,
 0
 ],
 "Isolation": "",
 "CpuShares": 0,
 "Memory": 0,
 "CgroupParent": "",
 "BlkioWeight": 0,
 "BlkioWeightDevice": null,
 "BlkioDeviceReadBps": null,
 "BlkioDeviceWriteBps": null,
 "BlkioDeviceReadIOps": null,
 "BlkioDeviceWriteIOps": null,
 "CpuPeriod": 0,
 "CpuQuota": 0,
 "CpusetCpus": "",
 "CpusetMems": "",
 "Devices": [],
 "DiskQuota": 0,
 "KernelMemory": 0,
 "MemoryReservation": 0,
 "MemorySwap": 0,
 "MemorySwappiness": -1,
 "OomKillDisable": false,
 "PidsLimit": 0,
 "Ulimits": null,
 "CpuCount": 0,
 "CpuPercent": 0,
 "IOMaximumIOps": 0,
 "IOMaximumBandwidth": 0
 },
 "GraphDriver": {
 "Name": "devicemapper",
 "Data": {
 "DeviceId": "84",
 "DeviceName": "docker-253:1-201327333-97b5d29cf22f37da51d47194d0b019138480e4565e743d6bc7cf141f3e2f9326",
 "DeviceSize": "10737418240"
 }
 },
 "Mounts": [],
 "Config": {
 "Hostname": "d7f7719fcc4f",
 "Domainname": "",
 "User": "",
 "AttachStdin": false,
 "AttachStdout": false,
 "AttachStderr": false,
 "ExposedPorts": {
 "80/tcp": {}
 },
 "Tty": true,
 "OpenStdin": true,
 "StdinOnce": false,
 "Env": [
 "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
 ],
 "Cmd": [
 "-D",
 "FOREGROUND"
 ],
 "Image": "ubuntu-httpd-server",
 "Volumes": null,
 "WorkingDir": "",
 "Entrypoint": [
 "apachectl"
 ],
 "OnBuild": null,
 "Labels": {}
 },
 "NetworkSettings": {
 "Bridge": "",
 "SandboxID": "5cae0653b9608d5d78cba2c87bbe863dc741a127756b0e1091e5e21969d99dce",
 "HairpinMode": false,
 "LinkLocalIPv6Address": "",
 "LinkLocalIPv6PrefixLen": 0,
 "Ports": {
 "80/tcp": [
 {
 "HostIp": "0.0.0.0",
 "HostPort": "32768"
 }
 ]
 },
 "SandboxKey": "/var/run/docker/netns/5cae0653b960",
 "SecondaryIPAddresses": null,
 "SecondaryIPv6Addresses": null,
 "EndpointID": "4f173ec6498b8c2f7833f5b25cd3c97e50f616223747108bbfcfa9ab4f96f769",
 "Gateway": "172.17.0.1",
 "GlobalIPv6Address": "",
 "GlobalIPv6PrefixLen": 0,
 "IPAddress": "172.17.0.2",
 "IPPrefixLen": 16,
 "IPv6Gateway": "",
 "MacAddress": "02:42:ac:11:00:02",
 "Networks": {
 "bridge": {
 "IPAMConfig": null,
 "Links": null,
 "Aliases": null,
 "NetworkID": "1eb9249c028b10f76476f6a2e92852e6f87a2e50a7e89926f0a096713a44a945",
 "EndpointID": "4f173ec6498b8c2f7833f5b25cd3c97e50f616223747108bbfcfa9ab4f96f769",
 "Gateway": "172.17.0.1",
 "IPAddress": "172.17.0.2",
 "IPPrefixLen": 16,
 "IPv6Gateway": "",
 "GlobalIPv6Address": "",
 "GlobalIPv6PrefixLen": 0,
 "MacAddress": "02:42:ac:11:00:02"
 }
 }
 }
 }
]