Thursday, December 27, 2018

nfs in k8s

Ganesha

Ganesha & NFS Installation

Using Ubuntu 18.04

Ganesha

apt -y install nfs-ganesha-gluster
apt-get install nfs-ganesha-vfs  

Mention that, must to install nfs-ganesha-vfs for local volume (non-glustserfs) testing. Still not yet trying hook with glusterfs.

cat /etc/ganesha/ganesha.conf

EXPORT{
    Export_Id = 1 ;   # Export ID unique to each export
    #Path = "volume_path";  # Path of the volume to be exported. Eg: "/test_volume"
    Pseudo = /vfs_distributed;
    Path = /vol_distributed;

    #FSAL {
    #    name = GLUSTER;
    #    hostname = "10.xx.xx.xx";  # IP of one of the nodes in the trusted pool
    #    volume = "volume_name";  # Volume name. Eg: "test_volume"
    #}

    Access_type = RW;    # Access permissions
    Squash = No_root_squash; # To enable/disable root squashing
    Disable_ACL = TRUE;  # To enable/disable ACL
   # Pseudo = "pseudo_path";  # NFSv4 pseudo path for this export. Eg: "/test_volume_pseudo"
    Protocols = "3","4" ;    # NFS protocols supported
    Transports = "UDP","TCP" ; # Transport protocols supported
    #SecType = "sys";     # Security flavors supported
    SecType = "sys,krb5,krb5i,krb5p";
    FSAL {
           Name = VFS;
    }
}

Create Volume and set permission

mkdir /vol_distributed
chown 777 -R  /vol_distributed
mkdir /vfs_distributed
chown 777 -R /vfs_distributed

Start rpcbind

service rpcbind start

Reload Ganesha

systemctl restart nfs-ganesha 

Detect mount point

root@ganesha:~# showmount -e localhost
Export list for localhost:
/vol_distributed (everyone)

Client Mount.

mount -t nfs -o vers=3 nfsserver:/vol_distributed ./test -vvvv

Trouble Shooting

access denied by server while mounting

install

apt-get install nfs-ganesha-vfs  

showmount -e localhost

apt-get install nfs-ganesha-vfs  

client idle on write

chown 777 -R  /vol_distributed
chown 777 -R /vfs_distributed

NFS-Kernel Installation

Install nfs-kernel

    sudo apt-get install nfs-kernel-server

edit and show export points

root@nfs:~# cat /etc/exports
# /etc/exports: the access control list for filesystems which may be exported
#       to NFS clients.  See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)

///srv/nfs  172.16.155.0/24(sync,no_subtree_check,insecure,rw)
/srv/nfs  *(sync,no_subtree_check,insecure,rw,no_root_squash)

Set directory permission

mkdir /srv/nfs
chmod 777 /srv/nfs

Allow all or Allow some subnet to connect to.

sudo exportfs -ra

Reload NFS

sudo systemctl restart nfs-kernel-server

detect mount point


root@nfs:~# showmount -e localhost
Export list for localhost:
/srv/nfs *

Client mount by using v3 or v4.

mount -t nfs -o vers=3 nfsserver:/srv/nfs /mnt  -vvvv
mount -t nfs -o vers=4 nfsserver:/srv/nfs /mnt  -vvvv

Now it support v4.

if we want to disable v4 or v3

/etc/default/nfs-kernel-server

//RPCMOUNTDOPTS="--manage-gids --no-nfs-version 3"
//RPCMOUNTDOPTS="--manage-gids --no-nfs-version 4"

Docker Version Ganesha

see the reference

https://github.com/mitcdh/docker-nfs-ganesha/blob/master/Dockerfile

Dockerfile

#FROM 192.168.51.130:5000/ebotubuntu:v0.2
#FROM ubuntu:18.04
FROM jrei/systemd-ubuntu:18.04

# Environment Setting
# working directory
RUN apt-get update
RUN apt -y install nfs-ganesha-gluster
RUN apt-get install nfs-ganesha-vfs  -y
RUN apt-get install netbase  -y

COPY ganesha.conf /etc/ganesha/ganesha.conf
RUN mkdir /vol_distributed
RUN chown 777 -R  /vol_distributed
RUN mkdir /vfs_distributed
RUN chown 777 -R /vfs_distributed
#RUN mkdir -p /run/rpcbind /export /var/run/dbus
#RUN touch /run/rpcbind/rpcbind.xdr /run/rpcbind/portmap.xdr
#RUN chmod 755 /run/rpcbind/*
#RUN chown messagebus:messagebus /var/run/dbus


EXPOSE 111 111/udp 662 2049 38465-38467

#ENTRYPOINT ["../entrypoint.sh"]
#ENTRYPOINT ["../run.sh"]
#CMD ["bash","./run.sh"]
#EXPOSE 1337
#CMD ["systemctl", "start", "nfs-ganesha.service"]
root@kubecontext:/opt/mount/buildimage/ganesha# cat ganesha.conf
EXPORT{
    Export_Id = 1 ;   # Export ID unique to each export
    #Path = "volume_path";  # Path of the volume to be exported. Eg: "/test_volume"
    Pseudo = /vfs_distributed;
    Path = /vol_distributed;

    #FSAL {
    #    name = GLUSTER;
    #    hostname = "10.xx.xx.xx";  # IP of one of the nodes in the trusted pool
    #    volume = "volume_name";  # Volume name. Eg: "test_volume"
    #}

    Access_type = RW;    # Access permissions
    Squash = No_root_squash; # To enable/disable root squashing
    Disable_ACL = TRUE;  # To enable/disable ACL
   # Pseudo = "pseudo_path";  # NFSv4 pseudo path for this export. Eg: "/test_volume_pseudo"
    Protocols = "3","4" ;    # NFS protocols supported
    Transports = "UDP","TCP" ; # Transport protocols supported
    #SecType = "sys";     # Security flavors supported
    SecType = "sys,krb5,krb5i,krb5p";
    FSAL {
           Name = VFS;
    }
}
docker run -d --privileged --cap-add SYS_ADMIN -v /sys/fs/cgroup:/sys/fs/cgroup:ro dockerepo:5566/nfsganisha:test

initialize, Local mount, and check result.


showmount -e localhost
mount -t nfs -o vers=4 172.17.0.2:/vol_distributed ./test -vvvv

Docker Version NFS

Dockerfile

#FROM 192.168.51.130:5000/ebotubuntu:v0.2
#FROM ubuntu:18.04
FROM jrei/systemd-ubuntu:18.04

# Environment Setting
# working directory
RUN apt-get update
RUN apt-get install nfs-kernel-server -y
COPY exports /etc/exports
RUN mkdir -p /srv/nfs
RUN chmod 777 /srv/nfs
#RUN exportfs -ra //no need to export since it restarted

#EXPOSE 111 111/udp 662 2049 38465-38467
#ENTRYPOINT ["../entrypoint.sh"]
#ENTRYPOINT ["../run.sh"]
#CMD ["bash","./run.sh"]
#EXPOSE 1337
#CMD ["systemctl", "start", "nfs-ganesha.service"]
#CMD ["sleep", "inf"]

cat /etc/exports

/srv/nfs  *(sync,fsid=0,no_subtree_check,insecure,rw,no_root_squash)

Adding fsid attribute to /etc/export.

docker run -d --privileged --cap-add SYS_ADMIN -v /sys/fs/cgroup:/sys/fs/cgroup:ro dockerrepo:5566/nfs:test

need to mount /sys/fs/cgroup to container or the systemd will be failed.

showmount -e localhost
mount -t nfs -o vers=4 172.17.0.2:/srv/nfs test -vvvv
//mount.nfs nfs-access:/srv/nfs test -o nolock

Either use '-o nolock' to keep locks local, or start statd. adding -o nolock in mount

rpcinfo -p localhost

   program vers proto   port  service
    100000    4   tcp    111  portmapper
    100000    3   tcp    111  portmapper
    100000    2   tcp    111  portmapper
    100000    4   udp    111  portmapper
    100000    3   udp    111  portmapper
    100000    2   udp    111  portmapper
    100005    1   udp  32767  mountd
    100005    1   tcp  32767  mountd
    100005    2   udp  32767  mountd
    100005    2   tcp  32767  mountd
    100005    3   udp  32767  mountd
    100005    3   tcp  32767  mountd
    100003    3   tcp   2049  nfs
    100003    4   tcp   2049  nfs
    100227    3   tcp   2049
    100003    3   udp   2049  nfs
    100227    3   udp   2049
    100021    1   udp  46271  nlockmgr
    100021    3   udp  46271  nlockmgr
    100021    4   udp  46271  nlockmgr
    100021    1   tcp  33029  nlockmgr
    100021    3   tcp  33029  nlockmgr
    100021    4   tcp  33029  nlockmgr

It will show how the port expose to outside, it is very useful for k8s service used. ( we just fix some port here).

Useful Docker Command

Remove all images

docker images| awk '{print $3}'|xargs docker rmi  -

Remove all container

docker ps -a| awk '{print $1}' |xargs docker rm -f -

Kubernetes Deployment

apiVersion: v1
kind: Service
metadata:
  name: nfs-access
  labels:
    app: nfs-access
spec:
  ports:
  - name: http
    protocol: TCP
    #port is loadbalancer port
    port: 8500
    # for clustering, port=targetPort
    # adding clusterIP: none for setting up deep dns
  clusterIP: None
  selector:
    role: nfs-server
---
apiVersion: v1
kind: Service
metadata:
    name: ext-nfs
    labels:
      app: ext-nfs
spec:
  externalIPs:
    - 172.16.155.188
  ports:
    - name: nfs
      port: 2049
    - name: mountd
      port: 32767
    - name: rpcbind
      port: 111
    - name: nolock
      port: 34623
    - name: nolock1
      port: 37419
  selector:
    role: nfs-server
---
kind: Service
apiVersion: v1
metadata:
  name: nfs-server
spec:
  ports:
    - name: nfs
      port: 2049
    - name: mountd
      port: 32767
    - name: rpcbind
      port: 111
    - name: nolock
      port: 34623
    - name: nolock1
      port: 37419
  selector:
    role: nfs-server
---
apiVersion: v1
kind: ReplicationController
metadata:
  name: nfs-server
spec:
  replicas: 1
  selector:
    role: nfs-server
  template:
    metadata:
      labels:
        role: nfs-server
    spec:
      #nodeSelector:
      #  external-storage: "true"
      containers:
      - name: nfs-server
        #image: gcr.io/google_containers/volume-nfs:0.8
        image: 172.16.155.136:5000/nfs:test
        ports:
          - name: nfs
            containerPort: 2049
          - name: mountd
            containerPort: 32767
          - name: rpcbind
            containerPort: 111
          - name: nolock
            containerPort: 34623
          - name: nolock1
            containerPort: 37419
        securityContext:
          privileged: true
          capabilities:
             add:
               - NET_ADMIN
        volumeMounts:
          #- mountPath: /exports
          - mountPath: /srv/nfs
            name: nfs-export-fast
          - mountPath: /sys/fs/cgroup
            name: systemd-mount
      volumes:
        - name: nfs-export-fast
          hostPath:
            path: /data/nfs
        - name: systemd-mount
          hostPath:
            path: /sys/fs/cgroup

note: use nfs-access to client access.

Client in K8S

apt-get install nfs-common
mkdir test
mount.nfs nfs-access:/srv/nfs test -o nolock

and make sure yaml contains privileged enabling setting.

        securityContext:
          privileged: true
          capabilities:
             add:
               - NET_ADMIN

Thursday, November 8, 2018

ISTIO Side Car Injection Setting

ISTIO Injection

ISTIO Injection

Standard way

kubectl apply -f <(istioctl kube-inject -f myroute/webserver/instance/template-stateless.yaml)

Is there any way that we can easily run

kubectl apply -f myroute/webserver/instance/template-stateless.yaml

without complicate command as above. And we also hope we can support running without istio injection.

Changing Inject Policy in Default Policy Setting

Before deploy istio, you can modify the istio-demo.yaml that is in the officical istio deploy script.

# Source: istio/templates/sidecar-injector-configmap.yaml

apiVersion: v1
kind: ConfigMap
.
.
.

data:
  config: |-
    policy: disabled

change istio-demo.yaml from policy: enabled to policy: disabled. One can search the keyword and modify it.

Check which namespace enable the istio injection


kubectl get namespace -L istio-injection
NAME           STATUS    AGE       ISTIO-INJECTION
default        Active    2d
istio-system   Active    2d        disabled
jj             Active    2d       

Enabling the istio-injection

kubectl label namespace jj istio-injection=enabled

More commands

clean up

kubectl label namespace jj istio-injection-

if update

kubectl label namespace jj istio-injection=disabled --overwrite

Modify the Service Yaml file

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: helloworld
spec:
  replicas: 1
  template:
    metadata:
      labels:
        app: helloworld
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
        sidecar.istio.io/inject: "true"

adding the term sidecar.istio.io/inject: "true" to yaml file.

Under the setting * adding sidecar.istio.io/inject: "true", istio injection = true * adding sidecar.istio.io/inject: "false", intio injection = false * without sidecar.istio.io/inject, intio injection = false

This setting will satisfy our environment.

Conclusion

To set up 1. default inject policy to disabled and 2. enable istio in namespace that satisfied our requirement.

  • The default setting is not running with istio.
  • And one can setup sidecar.istio.io/inject as true will enable the istio injection
  • setup sidecar.istio.io/inject as false will disable the istio injection.

More

If we use 1. default setting policy to enabled and 2. enable isito in namespace that NOT satisfied our requirement

Since

  • The default setting will be running with istio
  • setting sidecar.istio.io/inject false will not running with istio injection
  • setting sidecar.istio.io/inject true will run with istio injection

Friday, October 5, 2018

CI/CD and Kubernetes ISTIO

CICD and ISTIO

CI/CD and ISTIO

CI/CD contains different stages, such as DEV, QA, Staging, and Production. Each Stages might have their own network environment. For example a webbackend url could be a ip address 172.16.155.207 in dev environment or a mesh service webbackend in QA environment or a real internet domain name webbackend.com in production environment.

We don't hope that we have to change the POD env to match the differnent network environment. I hope that the POD environment is fixed, said webbackend. And We adjust the network environment in different stages by using ISTIO for routing the webbackend url to fit the stage environment.

We mark the url webbackend and webbackend.com as a short url and a long url. And will start discuss how this to url for fit the stages network environment changed.

For Short URL

short url to a kubernetes service

This is a native kubernetes environment, we dont need to change any thing.

short url to a External IP address

istioctl register short 172.16.155.207 http:8000 -n jj

where 172.16.155.207 is the ipaddress outside the service mesh.

Now you can aceess url short to it.

#kubectl get svc

NAME            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
short           ClusterIP   172.18.142.145   <none>        8000/TCP   1s

And it will be registerd as a kubernetes service.

Delete register

istioctl deregister short 172.16.155.207  -n jj
kubectl delete svc short

short url to a outside dns doman name

apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
  name: shot-ext
spec:
  hosts:
  - short.jj.svc.cluster.local
  location: MESH_EXTERNAL
  ports:
  - number: 80
    name: https
    protocol: HTTP
  resolution: DNS
  endpoints:
  - address: httpbin.org
    ports:
      https: 80
istioctl create -f myroute/serviceentry/route-shortremote.yaml

You can use url short to access a existed internet site httpbin.org.

It's not finished yet. Note: In ServiceEntry the hosts must be a real network and is responsible, since ServiceEntry just do network routing and dose not doing anything about DNS and Registry. So we need a fake url short created by kubernetes service.

apiVersion: v1
kind: Service
metadata:
  name: short
  labels:
    app: short
spec:
  ports:
  # must use - port (-); can not use - name
  - port: 8000
    targetPort: 8000
    name: http
  selector:
    app: short
kubectl apply -f myroute/serviceentry/fakeurl-shortremote.yaml

Long Url

Create Gateway.

kind: Gateway
metadata:
  name: istio-egressgateway
spec:
  selector:
    istio: egressgateway
  servers:
  - port:
      number: 80
      name: http
      protocol: HTTP
    hosts:
    - "edition.cnn.com"
    - "test.nxg"
istioctl create -f myroute/egress/gateway-egress.yaml

We prepare to hosts for further examples.

Existed Long url to a kubernetes service

Here we means a Existed means it really existed in Internet and we just need to route the traffic to mesh.

root@curlserver-2ddwj:/# curl edition.cnn.com:80 -v
* Rebuilt URL to: edition.cnn.com:80/
*   Trying 151.101.193.67...
* TCP_NODELAY set
* Connected to edition.cnn.com (151.101.193.67) port 80 (#0)
> GET / HTTP/1.1
> Host: edition.cnn.com
> User-Agent: curl/7.52.1
> Accept: */*
>
< HTTP/1.1 404 Not Found
< date: Fri, 05 Oct 2018 22:59:42 GMT
< server: envoy
< content-length: 0
<
* Curl_http_done: called premature == 0
* Connection #0 to host edition.cnn.com left intact

You will get above result when you curl the url initially. The network is existed but blocked by ISTIO, now we can start setup the routing by istio-egress.

apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
  name: cnn
spec:
  hosts:
  - edition.cnn.com
  ports:
  - number: 80
    name: http-port
    protocol: HTTP
  - number: 443
    name: https
    protocol: HTTPS
  resolution: DNS
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: direct-through-egress-gateway
spec:
  hosts:
  - edition.cnn.com
  gateways:
  - istio-egressgateway
  - mesh
  http:
  - match:
    - gateways:
      - mesh
      port: 80
    route:
    - destination:
        host: istio-egressgateway.istio-system.svc.cluster.local
        port:
          number: 80
      weight: 100
  - match:
    - gateways:
      - istio-egressgateway
      port: 80
    route:
    - destination:
        host: webbackendnxg
        port:
          number: 8000
      weight: 100
istioctl create -f myroute/egress/local-edition.cnn.com.yaml

Test the result in any POD that launched by ISTIO.

curl edition.cnn.com:80 -v
* Rebuilt URL to: edition.cnn.com:80/
*   Trying 151.101.129.67...
* TCP_NODELAY set
* Connected to edition.cnn.com (151.101.129.67) port 80 (#0)
.
.
.
webbackendnxg-v1-56596cdd44-fws75
* Curl_http_done: called premature == 0
* Connection #0 to host edition.cnn.com left intact

where webbackendnxg-v1-56596cdd44-fws75 is the response of host: webbackendnxg in our mesh.

Non-Existed Long url to a kubernetes service

To test a url that not existed in internet or in your local environment. You might get the result.

root@curlserver-2ddwj:/# curl test.nxg:80 -v
* Rebuilt URL to: test.nxg:80/
* Could not resolve host: test.nxg
* Closing connection 0
curl: (6) Could not resolve host: test.nxg

After crate gateway, see above.

cat myroute/egress/local-test.nxg.yaml

apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
  name: nxg-test
spec:
  hosts:
  - test.nxg
  ports:
  - number: 80
    name: http-port
    protocol: HTTP
  - number: 443
    name: https
    protocol: HTTPS
  resolution: DNS
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: direct-through-egress-gateway-test
spec:
  hosts:
  - test.nxg
  gateways:
  # here is the parameter that go to next pipline
  # go to host: istio-egressgateway first and then to next place coud be internet or local(webbackendnxg)
  # if diable one, it will failed
  - istio-egressgateway
  - mesh
  http:
  - match:
    - gateways:
      - mesh
      port: 80
    route:
    - destination:
        host: istio-egressgateway.istio-system.svc.cluster.local
        port:
          number: 80
      weight: 100
  - match:
    - gateways:
      - istio-egressgateway
      port: 80
    route:
    - destination:
        host: webbackendnxg
        port:
          number: 8000
      weight: 100

There is a sequential pipline on this yaml, first route traffic to istio-egressgateway and second routes to host: webbackendnxg. And declair the pipline in yaml

gateways:
  - istio-egressgateway
  - mesh

launch the egress setting.

istioctl create -f  myroute/egress/local-test.nxg.yaml -n jj

where -n jj is the namespace in my environment.

It's not finished yet.

For a ServiceEntry we need a real url existed in you network environment. In this case test.nxg is not register or not in DNS in your network environment. So we need to create the test.nxg.

It's kind of Tricky here. In the url test.nxg where nxg is a namespace in kubernetes service and test is a service. So we need to create

  • a namespace nxg
  • a test service yaml for nxg namespace
apiVersion: v1
kind: Service
metadata:
  name: test
  namespace: nxg
  labels:
    app: test
spec:
  ports:
  # must use - port (-); can not use - name
  - port: 8000
    targetPort: 8000
    name: http
  selector:
    app: test

here port 8000 is useless, since for serviceentry will just check the service is existed or not, so any port can use it and any other namespace can access it. Note that, there is no POD defined in this "FAKE" service.

kubectl create namespace nxg
kubectl apply -f myroute/egress/fakeurl-test.nxg.yaml

Now it works by curl test.nxg:80 -v

root@curlserver-2ddwj:/# curl test.nxg:80 -v
* Rebuilt URL to: test.nxg:80/
*   Trying 172.18.117.18...
* TCP_NODELAY set
* Connected to test.nxg (172.18.117.18) port 80 (#0)
> GET / HTTP/1.1
> Host: test.nxg
> User-Agent: curl/7.52.1
> Accept: */*
>
< HTTP/1.1 200 OK
< server: envoy
< date: Fri, 05 Oct 2018 23:19:21 GMT
< content-type: text/html
< x-envoy-upstream-service-time: 6
< transfer-encoding: chunked
<
webbackendnxg-v1-56596cdd44-fws75
* Curl_http_done: called premature == 0
* Connection #0 to host test.nxg left intact

where 172.18.117.18 was sat by fake service. And route to webbackendnxg.

long url to a External IP address

We need to combine the register and egress method. If we have a IP 172.16.155.207 and route from test.nxg to this IP

Create a short url first.

istioctl deregister short 172.16.155.207  -n jj

just as the previsous section

apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
  name: nxg-test
spec:
  hosts:
  - test.nxg
  ports:
  - number: 80
    name: http-port
    protocol: HTTP
  - number: 443
    name: https
    protocol: HTTPS
  resolution: DNS
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: direct-through-egress-gateway-test
spec:
  hosts:
  - test.nxg
  gateways:
  # here is the parameter that go to next pipline
  # go to host: istio-egressgateway first and then to next place coud be internet or local(webbackendnxg)
  # if diable one, it will failed
  - istio-egressgateway
  - mesh
  http:
  - match:
    - gateways:
      - mesh
      port: 80
    route:
    - destination:
        host: istio-egressgateway.istio-system.svc.cluster.local
        port:
          number: 80
      weight: 100
  - match:
    - gateways:
      - istio-egressgateway
      port: 80
    route:
    - destination:
        host: short
        port:
          number: 80
      weight: 100

where the short is defined by using register command.

Delete old setting if you did.

istioctl delete -f  myroute/egress/local-test.nxg.yaml -n jj

try this in any POD launched by ISTIO injected.

curl test.nxg:80 -v
* Rebuilt URL to: test.nxg:80/
*   Trying 172.18.117.18...
* TCP_NODELAY set
* Connected to test.nxg (172.18.117.18) port 80 (#0)
* .
* .
* .
* <!DOCTYPE html>
    <html>
      <head>
        <meta name="viewport" content="width=device-width, initial-scale=1">
.
.

It works.

long url to a outside dns domain name

In previous we show how the edition.cnn.com is directed to our local mesh. And now I just want to direct the it to its own internet address.

apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
  name: cnn-internet
spec:
  hosts:
  - edition.cnn.com
  ports:
  - number: 80
    name: http-port
    protocol: HTTP
  - number: 443
    name: https
    protocol: HTTPS
  resolution: DNS
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: direct-through-egress-gateway-internet
spec:
  hosts:
  - edition.cnn.com
  gateways:
  - istio-egressgateway
  - mesh
  http:
  - match:
    - gateways:
      - mesh
      port: 80
    route:
    - destination:
        host: istio-egressgateway.istio-system.svc.cluster.local
        port:
          number: 80
      weight: 100
  - match:
    - gateways:
      - istio-egressgateway
      port: 80
    route:
    - destination:
        host: edition.cnn.com
        port:
          number: 80
      weight: 100

After launch gateway, see above.

istioctl create -f myroute/egress/internet-edition.cnn.com.yaml -n jj

Remember to clean old setting about internet-edition.cnn.com if you did it.

istioctl delete -f myroute/egress/local-edition.cnn.com.yaml -n jj

Access to any POD launched by ISTIO inject.

root@curlserver-2ddwj:/# curl edition.cnn.com:80 -v
* Rebuilt URL to: edition.cnn.com:80/
*   Trying 151.101.193.67...
* TCP_NODELAY set
* Connected to edition.cnn.com (151.101.193.67) port 80 (#0)
> GET / HTTP/1.1
> .
> .
> < HTTP/1.1 301 Moved Permanently
< server: envoy
< retry-after: 0
.
.

It works.

Monday, August 13, 2018

ansible installation for 2.3.0

Ansible

Ansible

Ansible script is quite sensitive for version, we just want to install ansbile for version 2.3.0 in Ubuntu16.04.

apt-get install -y software-properties-common git python-pip python-dev libffi-dev libssl-dev
pip install 'ansible==2.3.0.0'

however, ansible is installed in /usr/local/bin/

/usr/local/bin/ansible --version
apt-get install sshpass
mkdir /etc/ansible

edit /etc/ansible/ansible.cfg

[defaults]


# uncomment this to disable SSH key host checking
host_key_checking = False


[persistent_connection]

# Configures the persistent connection timeout value in seconds.  This value is
# how long the persistent connection will remain idle before it is destroyed.
# If the connection doesn't receive a request before the timeout value
# expires, the connection is shutdown.  The default value is 30 seconds.
connect_timeout = 30

# Configures the persistent connection retries.  This value configures the
# number of attempts the ansible-connection will make when trying to connect
# to the local domain socket.  The default value is 30.
connect_retries = 30

# Configures the amount of time in seconds to wait between connection attempts
# to the local unix domain socket.  This value works in conjunction with the
# connect_retries value to define how long to try to connect to the local
# domain socket when setting up a persistent connection.  The default value is
# 1 second.
connect_interval = 1

Start to Deploy

/usr/local/bin/ansible-playbook -i hosts-single site.yml --skip-tags=testingonly -vvv

Sunday, August 12, 2018

ISTIO 0.8.0 Installation and Playing in Kubernetes

ISTIO Installation

ISTIO Installation

Environment

As A Standard Kubernetes

  • Kube-API, Kube-Contrller, Kube-proxy, kube-scheduler, kubelet is all needed.
  • Core-DNS; also set cluster_dns in kubelet, as a standard way.
  • Flannel;
  • apt-get install socat; in each client and server.

ISTIO dose'nt provide DNS, so we have to use fundamental kubernetes's DNS, said coredns or kubedns.

Additional API Setting

--enable-admission-plugins=NamespaceLifecycle,ServiceAccount,LimitRanger,ResourceQuota,PersistentVolumeLabel,DefaultTolerationSeconds,DefaultStorageClass,MutatingAdmissionWebhook,ValidatingAdmissionWebhook \
--authorization-mode=RBAC \

Additional Porxy Setting

--proxy-mode=ipvs \

Finally, we use ISTIO-0.8.0, since it's LTS.

For Kubernetes 1.11

wget https://github.com/istio/istio/releases/download/0.8.0/istio-0.8.0-linux.tar.gz
export PATH=$PWD/bin:$PATH
root@kubecontext:~/istio-0.8.0# istioctl version
Version: 0.8.0
GitRevision: 6f9f420f0c7119ff4fa6a1966a6f6d89b1b4db84
User: root@48d5ddfd72da
Hub: docker.io/istio
GolangVersion: go1.10.1
BuildStatus: Clean
root@kubecontext:~/istio-0.8.0# kubectl apply -f install/kubernetes/istio-demo.yaml
namespace/istio-system created
configmap/istio-statsd-prom-bridge created
configmap/istio-mixer-custom-resources created
configmap/prometheus created
configmap/istio created
configmap/istio-sidecar-injector created
serviceaccount/istio-egressgateway-service-account created
serviceaccount/istio-ingressgateway-service-account created
serviceaccount/istio-mixer-post-install-account created
clusterrole.rbac.authorization.k8s.io/istio-mixer-post-install-istio-system created
clusterrolebinding.rbac.authorization.k8s.io/istio-mixer-post-install-role-binding-istio-system created
job.batch/istio-mixer-post-install created
serviceaccount/istio-mixer-service-account created
serviceaccount/istio-pilot-service-account created
serviceaccount/prometheus created
serviceaccount/istio-citadel-service-account created
serviceaccount/istio-cleanup-old-ca-service-account created
serviceaccount/istio-sidecar-injector-service-account created
customresourcedefinition.apiextensions.k8s.io/rules.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/attributemanifests.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/circonuses.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/deniers.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/fluentds.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/kubernetesenvs.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/listcheckers.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/memquotas.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/noops.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/opas.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/prometheuses.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/rbacs.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/servicecontrols.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/solarwindses.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/stackdrivers.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/statsds.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/stdios.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/apikeys.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/authorizations.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/checknothings.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/kuberneteses.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/listentries.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/logentries.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/metrics.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/quotas.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/reportnothings.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/servicecontrolreports.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/tracespans.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/serviceroles.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/servicerolebindings.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/destinationpolicies.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/egressrules.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/routerules.config.istio.io created
customresourcedefinition.apiextensions.k8s.io/virtualservices.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/destinationrules.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/serviceentries.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/gateways.networking.istio.io created
customresourcedefinition.apiextensions.k8s.io/policies.authentication.istio.io created
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
customresourcedefinition.apiextensions.k8s.io/httpapispecbindings.config.istio.io configured
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
customresourcedefinition.apiextensions.k8s.io/httpapispecs.config.istio.io configured
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
customresourcedefinition.apiextensions.k8s.io/quotaspecbindings.config.istio.io configured
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
customresourcedefinition.apiextensions.k8s.io/quotaspecs.config.istio.io configured
clusterrole.rbac.authorization.k8s.io/istio-mixer-istio-system created
clusterrole.rbac.authorization.k8s.io/istio-pilot-istio-system created
clusterrole.rbac.authorization.k8s.io/prometheus-istio-system created
clusterrolebinding.rbac.authorization.k8s.io/prometheus-istio-system created
clusterrole.rbac.authorization.k8s.io/istio-citadel-istio-system created
role.rbac.authorization.k8s.io/istio-cleanup-old-ca-istio-system created
clusterrole.rbac.authorization.k8s.io/istio-sidecar-injector-istio-system created
clusterrolebinding.rbac.authorization.k8s.io/istio-mixer-admin-role-binding-istio-system created
clusterrolebinding.rbac.authorization.k8s.io/istio-pilot-istio-system created
clusterrolebinding.rbac.authorization.k8s.io/istio-citadel-istio-system created
rolebinding.rbac.authorization.k8s.io/istio-cleanup-old-ca-istio-system created
clusterrolebinding.rbac.authorization.k8s.io/istio-sidecar-injector-admin-role-binding-istio-system created
service/istio-egressgateway created
service/grafana created
service/istio-ingressgateway created
service/istio-policy created
service/istio-telemetry created
service/istio-statsd-prom-bridge created
deployment.extensions/istio-statsd-prom-bridge created
service/istio-pilot created
service/prometheus created
service/istio-citadel created
service/servicegraph created
service/istio-sidecar-injector created
deployment.extensions/istio-egressgateway created
deployment.extensions/grafana created
deployment.extensions/istio-ingressgateway created
deployment.extensions/istio-policy created
deployment.extensions/istio-telemetry created
deployment.extensions/istio-pilot created
deployment.extensions/prometheus created
deployment.extensions/istio-citadel created
deployment.extensions/servicegraph created
deployment.extensions/istio-sidecar-injector created
deployment.extensions/istio-tracing created
job.batch/istio-cleanup-old-ca created
horizontalpodautoscaler.autoscaling/istio-egressgateway created
horizontalpodautoscaler.autoscaling/istio-ingressgateway created
service/zipkin created
service/tracing created
mutatingwebhookconfiguration.admissionregistration.k8s.io/istio-sidecar-injector created

Modify

If You are not using version 0.8.0, it will tell you a lot of error.

Enable RBAC

adding Kube-api with

--authorization-mode=RBAC

Remove Admission Control

kube-apiserver

For Kube-inject

from 
--admission-control=NamespaceLifecycle,ServiceAccount,LimitRanger,SecurityContextDeny,ResourceQuota \
to
--admission-control=NamespaceLifecycle,ServiceAccount,LimitRanger,ResourceQuota \

to
--enable-admission-plugins=NamespaceLifecycle,ServiceAccount,LimitRanger,ResourceQuota,PersistentVolumeLabel,DefaultTolerationSeconds,DefaultStorageClass,MutatingAdmissionWebhook,ValidatingAdmissionWebhook \

IPVS Mode

kube-proxy.service

--proxy-mode=ipvs \

instsall Socat

apt-get install socat

To all K8S and Client Server.

Helm Installation

https://github.com/kubernetes/helm/releases

downlaod 2.9.1

helm init --upgrade -i registry.cn-hangzhou.aliyuncs.com/google_containers/tiller:v2.9.1 --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts

Modify sidecar inject file and enable all egress traffic. So you can direct access network by using apt-get, pip install, whatever you want as a no firewall linux.

helm template install/kubernetes/helm/istio --name istio --namespace istio-system --set sidecarInjectorWebhook.enabled=false --set global.proxy.includeIPRanges="172.16.0.0/16" -x templates/sidecar-injector-configmap.yaml | kubectl apply -f -

where you can make sure the docker ip range 172.16.0.0/16.

if you don't want to this, just don't execute the above commmand.

helm template install/kubernetes/helm/istio --name istio --namespace istio-system --set istio-sidecar-injector=true  -x templates/sidecar-injector-configmap.yaml | kubectl apply -f -

here we set auto inject sidecar not take effect sidecarInjectorWebhook.enabled=false.

kubectl get po --all-namespaces
kube-system    tiller-deploy-b67849f44-bfkch              1/1       Running            0          3m
/root/helm/helm template /root/istio-0.8.0/install/kubernetes/helm/istio --name istio --namespace istio-system > ./istio-0.8.yaml
kubectl apply -f istio-0.8.yaml

Create Application

kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo.yaml) -n istio-system
//or kubectl apply -f samples/bookinfo/kube/bookinfo.yaml -n istio-system


istioctl create -f samples/bookinfo/routing/bookinfo-gateway.yaml -n istio-system
istioctl create -f samples/bookinfo/routing/bookinfo-gateway.yaml -n istio-system
istioctl get gateway 

where istioctl is not related to kubectl context, and istioctl's namespace is default, if your namespace is not default please adding the namespace after istioctl.

Check Yaml

kubectl get pod productpage-v1-57f4d6b98-qwx58 -o yaml -n istio-system
kubectl get deployment  productpage-v1 -o yaml -n istio-system

You will see the inject envoy container runing on.

.
.
.
        image: docker.io/istio/proxyv2:0.8.0
        imagePullPolicy: IfNotPresent
        name: istio-proxy.
.

Test result

curl http://172.16.155.207:31380/productpage

Check version

curl -s http://172.16.155.207:31380/productpage|grep color

In Container

kubectl run curl-test --image=radial/busyboxplus:curl -i --tty --rm -n istio-system

Construct Network

Kubectl Only

kubectl apply -f samples/bookinfo/kube/bookinfo-gateway.yaml -n istio-system

## See by using ing not gateway
kubectl get ing --all-namespaces

## Now You can connect through 32000 port; different with istio created.
http://172.16.155.207:32000/productpage

All not working

ISTIOCTL Only

Worked Method

https://blog.csdn.net/wenwenxiong/article/details/80068835

Do not use kube folder and temperaly use istio-system namespaces Do not trust curl result in another container!!, use browser and edit LoadBalancer to NodePort.

istioctl create -f samples/bookinfo/routing/bookinfo-gateway.yaml -n istio-system

istioctl create -f samples/bookinfo/routing/route-rule-all-v1.yaml -n istio-system

then (must to create above method, must to understand it why bugs? )
istioctl create -f samples/bookinfo/routing/route-rule-reviews-v3.yaml -n istio-system

istioctl replace -f samples/bookinfo/routing/route-rule-reviews-v2-v3.yaml -n istio-system

Additional

# after istioctl create -f samples/bookinfo/routing/route-rule-all-v1.yaml -n istio-system
istioctl replace -f myroute/route-review-1.yaml -n istio-system
istioctl replace -f myroute/route-review-2.yaml -n istio-system

Not Working

Delete ALl

istioctl delete -f samples/bookinfo/routing/bookinfo-gateway.yaml -n istio-system
kubectl delete -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo.yaml) -n istio-system
kubectl delete -f istio-0.8.yaml
 

Clean All under namespace

samples/bookinfo/kube/cleanup.sh

Deploy Application in Another Namespace

We lived in namespace jj setting by kubernetes context.

kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/kube/bookinfo.yaml)

Using istioctl to create network, don't forget namespace, since it is not related to kubernetes context.

istioctl create -f samples/bookinfo/routing/bookinfo-gateway.yaml -n jj
istioctl create -f samples/bookinfo/routing/route-rule-all-v1.yaml -n jj

Where route-rule-all-v1.yaml is quite critical setting for a beginer, and it ismust setting or it will return failed message.

Sorry, product reviews are currently unavailable for this book.

After the setting, we can start to route traffic.

istioctl create -f samples/bookinfo/routing/route-rule-reviews-v2-v3.yaml -n jj
istioctl create -f samples/bookinfo/routing/route-rule-reviews-v3.yaml -n jj

Testing

curl -s http://172.16.155.207:31380/productpage|grep color
or 
using browser.

Analysis

We can not direct apply route-rule-reviews-v3.yaml, since it has some parameters not yet define, and the parameters defined in route-rule-all-v1.yaml. So we have to apply route-rule-all-v1.yaml first.

We can refactor the file and focus on reviews POD traffic. We can just apply the following yaml file without apply route-rule-all-v1.yaml.

---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
  name: reviews
spec:
  host: reviews
  subsets:
  - name: v1
    labels:
      version: v1
  - name: v2
    labels:
      version: v2
  - name: v3
    labels:
      version: v3
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: reviews
spec:
  hosts:
    - reviews
  http:
  - route:
    - destination:
        host: reviews
        subset: v2

Header Filter

istioctl replace -f myroute/route-review-user.yaml -n jj
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: reviews
spec:
  hosts:
    - reviews
  http:
  # need to match first and then - route, there is a sequence. match jason
  - match:
    - headers:
        cookie:
          regex: "^(.*?;)?(user=jason)(;.*)?$"
    route:
    - destination:
        host: reviews
        subset: v3
  - match:
    - headers:
        cookie:
          regex: "^(.*?;)?(user=mary)(;.*)?$"
    route:
    - destination:
        host: reviews
        subset: v1
  - route:
    - destination:
        host: reviews
        subset: v2

It's a sequence, we must set - match before - route

LoadBalancer

istioctl replace -f myroute/route-review-lb.yaml -n jj
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: reviews
spec:
  hosts:
    - reviews
  http:
  - route:
    - destination:
        host: reviews
        subset: v2
      weight: 20
    - destination:
        host: reviews
        subset: v3
      weight: 80

Inject Setting

kubectl label namespace jj istio-injection=enabled

Check Label

kubectl get namespace -L istio-injection

Delete Labeled

kubectl label namespace jj istio-injection-

System will automatically use sidecar to run POD with a proper command kubectl apply -f xxx.yaml.

But if you don't want to run sidecar for a particular POD

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: webbackend-v1
spec:
  #serviceName: "webbackend"
  replicas: 1
  template:
    metadata:
      labels:
        app: webbackend
        version: v1
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
        sidecar.istio.io/inject: "false"

adding sidecar.istio.io/inject: "false" in annotations. and you can also use a proper command kubectl apply -f xxx.yaml and without sidecar.

root@kubecontext:~/istio-0.8.0# kubectl -n istio-system get configmap istio-sidecar-injector -o jsonpath='{.data.config}' | head
policy: enabled

There are two kinds of setting affect the default running with inject or not

namespaceSelector match default policy  Pod override annotation sidecar.istio.io/inject Sidecar injected?
yes        enabled  true    yes
yes         enabled false   no
yes enabled     yes
yes disabled    true    yes
yes disabled    false   no
yes disabled        no
no  enabled true    no
no  enabled false   no
no  enabled     no
no  disabled    true    no
no  disabled    false   no
no  disabled        no

if policy:enabled and istio-injection=enabled, default & sidecar.istio.io/inject: "true" is running sidecar. if policy:disabled and istio-injection=enabled, sidecar.istio.io/inject: "true" is running sidecar, or will not run sidecar.

Saturday, July 14, 2018

XGBOOST Installation in Ubuntu

xgboost

XGBOOST

import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import numpy as np

##How To Install

### Install xgboost

git clone --recursive https://github.com/dmlc/xgboost.git
cd xgboost
./build.sh ```

cd python-package
python setup.py install
#or pip3 install -e python-package  

Install Others

pip3 install pandas
pip3 install scipy
pip3 install numpy==1.13.3
pip3 install sklearn