helm rename + cleanup + terraform files + ansible

This commit is contained in:
2020-03-03 12:36:06 +01:00
parent 9fc9b91935
commit 1eaa25acc8
26 changed files with 169 additions and 36007 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -1,516 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
sidecar.istio.io/status: '{"version":"b5faac9e6b02231b7db3b29487392a395f1c85c746bf62dc8cb660444af6e0d9","initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-certs"],"imagePullSecrets":null}'
creationTimestamp: null
labels:
expose: "true"
security.istio.io/tlsMode: istio
server: http
name: "a"
name: server-a
spec:
containers:
- image: beppev/server-a:latest
name: front-end
ports:
- containerPort: 5000
resources: {}
- args:
- proxy
- sidecar
- --domain
- $(POD_NAMESPACE).svc.cluster.local
- --configPath
- /etc/istio/proxy
- --binaryPath
- /usr/local/bin/envoy
- --serviceCluster
- server-a.default
- --drainDuration
- 45s
- --parentShutdownDuration
- 1m0s
- --discoveryAddress
- istio-pilot.istio-system:15011
- --zipkinAddress
- zipkin.istio-system:9411
- --proxyLogLevel=warning
- --proxyComponentLogLevel=misc:error
- --connectTimeout
- 10s
- --proxyAdminPort
- "15000"
- --concurrency
- "2"
- --controlPlaneAuthPolicy
- MUTUAL_TLS
- --dnsRefreshRate
- 300s
- --statusPort
- "15020"
- --applicationPorts
- "5000"
- --trust-domain=cluster.local
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INSTANCE_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: ISTIO_META_POD_PORTS
value: |-
[
{"containerPort":5000}
]
- name: ISTIO_META_CLUSTER_ID
value: Kubernetes
- name: ISTIO_META_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ISTIO_META_CONFIG_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SDS_ENABLED
value: "false"
- name: ISTIO_META_INTERCEPTION_MODE
value: REDIRECT
- name: ISTIO_META_INCLUDE_INBOUND_PORTS
value: "5000"
- name: ISTIO_METAJSON_LABELS
value: |
{"expose":"true","server":"http"}
- name: ISTIO_META_WORKLOAD_NAME
value: server-a
- name: ISTIO_META_OWNER
value: kubernetes://apis/v1/namespaces/default/pods/server-a
- name: ISTIO_META_MESH_ID
value: cluster.local
image: docker.io/istio/proxyv2:1.4.5
imagePullPolicy: IfNotPresent
name: istio-proxy
ports:
- containerPort: 15090
name: http-envoy-prom
protocol: TCP
readinessProbe:
failureThreshold: 30
httpGet:
path: /healthz/ready
port: 15020
initialDelaySeconds: 1
periodSeconds: 2
resources:
limits:
cpu: "2"
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 1337
runAsNonRoot: true
runAsUser: 1337
volumeMounts:
- mountPath: /etc/istio/proxy
name: istio-envoy
- mountPath: /etc/certs/
name: istio-certs
readOnly: true
initContainers:
- command:
- istio-iptables
- -p
- "15001"
- -z
- "15006"
- -u
- "1337"
- -m
- REDIRECT
- -i
- '*'
- -x
- ""
- -b
- '*'
- -d
- "15020"
image: docker.io/istio/proxyv2:1.4.5
imagePullPolicy: IfNotPresent
name: istio-init
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 10m
memory: 10Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_ADMIN
- NET_RAW
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
volumes:
- emptyDir:
medium: Memory
name: istio-envoy
- name: istio-certs
secret:
optional: true
secretName: istio.default
status: {}
---
apiVersion: v1
kind: Pod
metadata:
annotations:
sidecar.istio.io/status: '{"version":"b5faac9e6b02231b7db3b29487392a395f1c85c746bf62dc8cb660444af6e0d9","initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-certs"],"imagePullSecrets":null}'
creationTimestamp: null
labels:
security.istio.io/tlsMode: istio
server: http
name: "b"
name: server-b
spec:
containers:
- image: beppev/server-b:latest
name: front-end
ports:
- containerPort: 6000
resources: {}
- args:
- proxy
- sidecar
- --domain
- $(POD_NAMESPACE).svc.cluster.local
- --configPath
- /etc/istio/proxy
- --binaryPath
- /usr/local/bin/envoy
- --serviceCluster
- server-b.default
- --drainDuration
- 45s
- --parentShutdownDuration
- 1m0s
- --discoveryAddress
- istio-pilot.istio-system:15011
- --zipkinAddress
- zipkin.istio-system:9411
- --proxyLogLevel=warning
- --proxyComponentLogLevel=misc:error
- --connectTimeout
- 10s
- --proxyAdminPort
- "15000"
- --concurrency
- "2"
- --controlPlaneAuthPolicy
- MUTUAL_TLS
- --dnsRefreshRate
- 300s
- --statusPort
- "15020"
- --applicationPorts
- "6000"
- --trust-domain=cluster.local
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INSTANCE_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: ISTIO_META_POD_PORTS
value: |-
[
{"containerPort":6000}
]
- name: ISTIO_META_CLUSTER_ID
value: Kubernetes
- name: ISTIO_META_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ISTIO_META_CONFIG_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SDS_ENABLED
value: "false"
- name: ISTIO_META_INTERCEPTION_MODE
value: REDIRECT
- name: ISTIO_META_INCLUDE_INBOUND_PORTS
value: "6000"
- name: ISTIO_METAJSON_LABELS
value: |
{"server":"http"}
- name: ISTIO_META_WORKLOAD_NAME
value: server-b
- name: ISTIO_META_OWNER
value: kubernetes://apis/v1/namespaces/default/pods/server-b
- name: ISTIO_META_MESH_ID
value: cluster.local
image: docker.io/istio/proxyv2:1.4.5
imagePullPolicy: IfNotPresent
name: istio-proxy
ports:
- containerPort: 15090
name: http-envoy-prom
protocol: TCP
readinessProbe:
failureThreshold: 30
httpGet:
path: /healthz/ready
port: 15020
initialDelaySeconds: 1
periodSeconds: 2
resources:
limits:
cpu: "2"
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 1337
runAsNonRoot: true
runAsUser: 1337
volumeMounts:
- mountPath: /etc/istio/proxy
name: istio-envoy
- mountPath: /etc/certs/
name: istio-certs
readOnly: true
initContainers:
- command:
- istio-iptables
- -p
- "15001"
- -z
- "15006"
- -u
- "1337"
- -m
- REDIRECT
- -i
- '*'
- -x
- ""
- -b
- '*'
- -d
- "15020"
image: docker.io/istio/proxyv2:1.4.5
imagePullPolicy: IfNotPresent
name: istio-init
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 10m
memory: 10Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_ADMIN
- NET_RAW
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
volumes:
- emptyDir:
medium: Memory
name: istio-envoy
- name: istio-certs
secret:
optional: true
secretName: istio.default
status: {}
---
kind: Service
apiVersion: v1
metadata:
name: server-a-service
spec:
selector:
name: "a"
ports:
- name: http
protocol: TCP
port: 5000
- name: sb
protocol: TCP
port: 6000
---
kind: Service
apiVersion: v1
metadata:
name: server-b-service
spec:
selector:
name: "b"
ports:
- name: http
protocol: TCP
port: 6000
---
kind: VirtualService
apiVersion: networking.istio.io/v1alpha3
metadata:
name: link-servers
spec:
hosts:
- server-a-service
http:
- match:
- headers:
end-user:
exact: jason
route:
- destination:
host: server-b-service
- route:
- destination:
host: server-b-service
#kind: Service
#apiVersion: v1
#metadata:
# name: server-a-service
#spec:
# selector:
# server: "http"
# ports:
# - name: http
# protocol: TCP
# port: 6000
---
kind: Service
apiVersion: v1
metadata:
name: expose-server
spec:
type: NodePort
selector:
expose: "true"
ports:
- name: http
protocol: TCP
targetPort: 5000
port: 5000
nodePort: 30036
#---
#apiVersion: networking.istio.io/v1alpha3
#kind: VirtualService
#metadata:
# name: virtual-service
#spec:
# host:
# - expose-server
# http:
# - match:
# - headers:
# end-user:
# exact: jason
# - route:
# - destination:
# host: expose-server
# - route:
# - destination:
# host: expose-server
#
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: ext-host-gwy
spec:
selector:
expose: "true"
servers:
- port:
number: 5000
name: http
protocol: HTTP
hosts:
- expose-server.cluster.local
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: virtual-svc
spec:
hosts:
- '*'
gateways:
- ext-host-gwy
http:
- name: "help"
route:
- destination:
host: expose-server.cluster.local

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

BIN
terraform/.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,21 @@
haproxy provided by https://docs.kublr.com/articles/onprem-multimaster/
frontend kubernetes-api
bind 10.0.2.2:6443
bind 127.0.0.1:6443
mode tcp
option tcplog
timeout client 300000
default_backend kubernetes-api
backend kubernetes-api
mode tcp
option tcplog
option tcp-check
timeout server 300000
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server apiserver1 10.0.2.10:6443 check
server apiserver2 10.0.2.11:6443 check
server apiserver3 10.0.2.12:6443 check

6
terraform/ansible/hosts Normal file
View File

@@ -0,0 +1,6 @@
127.0.0.1 localhost
10.0.2.2 loadbalancer
10.0.2.10 master1
10.0.2.11 master2
10.0.2.12 master3
10.0.2.20 worker1

View File

@@ -0,0 +1,36 @@
- hosts: MainMaster
tasks:
- name: disable swap
command: swapoff -a
ignore_errors: yes
- name: force reset kubeadm for safety
command: kubeadm reset -f
- name: generateCert
command: kubeadm alpha certs certificate-key
register: cert
- name: kubeadm init
command: kubeadm init --control-plane-endpoint "10.0.2.2:6443" --upload-certs --certificate-key {{ cert.stdout }}
ignore_errors: no
- name: create .kube directory
command: mkdir ~/.kube
ignore_errors: yes
- name: ensure dns server
command: echo "nameserver 8.8.8.8" > /etc/resolv.conf
- name: setup kubeconfig
command: cp /etc/kubernetes/admin.conf ~/.kube/config
- name: copy install weave
copy:
src: ~/installWeave.sh
dest: ~/installWeave.sh
ignore_errors: yes
- name: make executable
command: chmod +x installWeave.sh
- name: weave setup
command: sh ~/installWeave.sh
- name: generate worker join command
command: kubeadm token create --print-join-command
register: joinOutput
- name: save worker join
local_action: copy content={{ joinOutput.stdout }} dest=~/join.sh
- name: save master join
local_action: copy content="{{ joinOutput.stdout }} --control-plane --certificate-key {{ cert.stdout }}" dest=~/joinMaster.sh

View File

@@ -0,0 +1,12 @@
- hosts: SecondaryMasters
tasks:
- name: reset kubeadm for safety
command: kubeadm reset -f
- name: copy join command
copy:
src: joinMaster.sh
dest: join.sh
- name: make executable
command: chmod +x join.sh
- name: run join
command: sh ./join.sh

View File

@@ -0,0 +1,91 @@
- hosts: all
tasks:
- name: test connection
ping:
- name: add kubernetes repo
yum_repository:
name: kubernetes
description: "some repo"
baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled: yes
gpgcheck: yes
repo_gpgcheck: yes
gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
- name: copy hosts file
copy:
src: hosts
dest: /etc/hosts
- name: ensure dns is in resolvconf
command: echo "nameserver 8.8.8.8" > /etc/resolv.conf
- name: setenforce 0 (linux perm step 1)
command: setenforce 0
- name: linux perm step 2
replace:
path: /etc/selinux/config
regexp: 'SELINUX=enforcing'
after: 'SELINUX=permissive'
- name: update all packages
yum:
name: '*'
state: latest
- name: install kubernetes requirements
yum:
name: "{{ requirements }}"
vars:
requirements:
- docker
- kubeadm
- kubectl
- kubelet
- name: enable and start docker service
service:
name: docker
enabled: yes
state: started
- name: enable and start kubelet service
service:
name: kubelet
enabled: yes
state: started
- name: open port 6443 tcp
firewalld:
zone: public
permanent: yes
state: enabled
port: 6443/tcp
- name: open port 10250 tcp
firewalld:
zone: public
permanent: yes
state: enabled
port: 10250/tcp
- name: open port 443 tcp
firewalld:
zone: public
permanent: yes
state: enabled
port: 443/tcp
- hosts: Workers
tasks:
- name: open port range 30000-32767 tcp
firewalld:
zone: public
permanent: yes
state: enabled
port: 30000-32767/tcp
- hosts: Masters
tasks:
- name: open port range 2379-2380 tcp (etcd)
firewalld:
zone: public
permanent: yes
state: enabled
port: 2379-2380/tcp
- name: open port 10251-10252 tcp (scheduler and controller manager)
firewalld:
zone: public
permanent: yes
state: enabled
port: 10251-10252/tcp

0
terraform/terraform.tf Normal file
View File

View File

@@ -0,0 +1,3 @@
project = "flowfactor"
region = "europe-west"
name = "ff-stage-euw1-"