728x90
반응형
  • 노드포트(NodePort)
  • 인그레스(Ingress)
# 노드포스(NodePort) 생성
# 마스터 노드 및 워크노드 정보 

# kubectl get nodes -o wide
NAME     STATUS   ROLES    AGE    VERSION   INTERNAL-IP      EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
m-k8s    Ready    master   3d5h   v1.18.4   192.168.56.10    <none>        CentOS Linux 7 (Core)   3.10.0-1160.90.1.el7.x86_64   docker://18.9.9
w1-k8s   Ready    <none>   3d4h   v1.18.4   192.168.56.101   <none>        CentOS Linux 7 (Core)   3.10.0-1160.90.1.el7.x86_64   docker://18.9.9
w2-k8s   Ready    <none>   3d4h   v1.18.4   192.168.56.102   <none>        CentOS Linux 7 (Core)   3.10.0-1160.90.1.el7.x86_64   docker://18.9.9
w3-k8s   Ready    <none>   3d4h   v1.18.4   192.168.56.103   <none>        CentOS Linux 7 (Core)   3.10.0-1160.90.1.el7.x86_64   docker://18.9.9

 

# 파드 오브젝트 스펙 노드포트 서비스 

# cat nodeport.yaml

apiVersion: v1
kind: Service               # kind : Service

metadata:                   # Metadata 서비스의 이름
  name: np-svc
spec:                       # Spec 셀렉터의 레이블 지정
  selector:
    app: np-pods
  ports:                    # 사용할 프토토콜과 포트들을 지정
    - name: http
      protocol: TCP
      port: 80
      targetPort: 80
      nodePort: 30000
  type: NodePort            # 서비스 타입을 설정

 

# 파드 생성 

# kubectl create deployment np-pods --image=sysnet4admin/echo-hname
deployment.apps/np-pods created

# 파드 노드포트 서비스 생성
# kubectl create -f nodeport.yaml

# kubectl get pods -o wide
NAME                                             READY   STATUS    RESTARTS   AGE     IP               NODE     NOMINATED NODE   READINESS GATES
np-pods-5767d54d4b-txwm4                         1/1     Running   0          10m     172.16.103.169   w2-k8s   <none>           <none>

# kubectl get services
NAME                            TYPE           CLUSTER-IP       EXTERNAL-IP     PORT(S)        AGE
np-svc                          NodePort       10.104.110.226   <none>          80:30000/TCP   7m32s

 

 

 

# 인그레스(Ingress) 생성

 

# cat ingress-config.yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: ingress-nginx
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  rules:
  - http:
      paths:
      - path:
        backend:
          serviceName: hname-svc-default
          servicePort: 80
      - path: /ip
        backend:
          serviceName: ip-svc
          servicePort: 80
      - path: /your-directory
        backend:
          serviceName: your-svc
          servicePort: 80

 

# cat ingress-nginx.yaml
# All of sources From https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/mandatory.yaml
# clone from above to sysnet4admin

apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-configuration
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-serviceaccount
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: nginx-ingress-clusterrole
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - events
    verbs:
      - create
      - patch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses/status
    verbs:
      - update

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
  name: nginx-ingress-role
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - pods
      - secrets
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      # Defaults to "<election-id>-<ingress-class>"
      # Here: "<ingress-controller-leader>-<nginx>"
      # This has to be adapted if you change either parameter
      # when launching the nginx-ingress-controller.
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: nginx-ingress-role-nisa-binding
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-role
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-clusterrole-nisa-binding
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-clusterrole
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: ingress-nginx
      app.kubernetes.io/part-of: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
      annotations:
        prometheus.io/port: "10254"
        prometheus.io/scrape: "true"
    spec:
      # wait up to five minutes for the drain of connections
      terminationGracePeriodSeconds: 300
      serviceAccountName: nginx-ingress-serviceaccount
      nodeSelector:
        kubernetes.io/os: linux
      containers:
        - name: nginx-ingress-controller
          image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0
          args:
            - /nginx-ingress-controller
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
            - --publish-service=$(POD_NAMESPACE)/ingress-nginx
            - --annotations-prefix=nginx.ingress.kubernetes.io
          securityContext:
            allowPrivilegeEscalation: true
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
            # www-data -> 101
            runAsUser: 101
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
            - name: http
              containerPort: 80
              protocol: TCP
            - name: https
              containerPort: 443
              protocol: TCP
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          lifecycle:
            preStop:
              exec:
                command:
                  - /wait-shutdown

---

apiVersion: v1
kind: LimitRange
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  limits:
  - min:
      memory: 90Mi
      cpu: 100m
    type: Container

 

# cat ingress.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
spec:
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
    nodePort: 30100
  - name: https
    protocol: TCP
    port: 443
    targetPort: 443
    nodePort: 30101
  selector:
    app.kubernetes.io/name: ingress-nginx
  type: NodePort

 

 

# kubectl create deployment in-hname-pod --image=sysnet4admin/echo-hname
deployment.apps/in-hname-pod created

# kubectl create deployment in-ip-pod --image=sysnet4admin/echo-ip
deployment.apps/in-ip-pod created

# kubectl apply -f ingress-nginx.yaml
namespace/ingress-nginx created
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
deployment.apps/nginx-ingress-controller created
limitrange/ingress-nginx created

# kubectl apply -f ingress-config.yaml
ingress.networking.k8s.io/ingress-nginx configured

# kubectl apply -f ingress.yaml
service/nginx-ingress-controller created

# kubectl expose deployment in-hname-pod --name=hname-svc-default --port=80,443
service/hname-svc-default exposed

# kubectl expose deployment in-ip-pod --name=ip-svc --port=80,443
service/ip-svc exposed
# kubectl get pods -n ingress-nginx
NAME                                        READY   STATUS    RESTARTS   AGE
nginx-ingress-controller-5bb8fb4bb6-mnx88   1/1     Running   0          23s

# kubectl get ingress
NAME            CLASS    HOSTS   ADDRESS   PORTS   AGE
ingress-nginx   <none>   *                 80      40m

# kubectl get services -n ingress-nginx
NAME                       TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
nginx-ingress-controller   NodePort   10.101.20.235   <none>        80:30100/TCP,443:30101/TCP   20s

# kubectl get services
NAME                            TYPE           CLUSTER-IP       EXTERNAL-IP     PORT(S)          AGE
hname-svc-default               ClusterIP      10.97.228.75     <none>          80/TCP,443/TCP   18s
ip-svc                          ClusterIP      10.108.49.235    <none>          80/TCP,443/TCP   9s

# kubectl get pods
NAME                                             READY   STATUS    RESTARTS   AGE
in-hname-pod-8565c86448-d8q9h                    1/1     Running   0          2m40s
in-ip-pod-76bf6989d-j7pdk                        1/1     Running   0          2m30s

 

 

 

728x90
반응형
LIST

'kubernetes' 카테고리의 다른 글

Kubernetes 설명  (1) 2024.02.12
Prometheus 설명  (0) 2024.02.12
centos7에서 docker 재설치(missing signature key)  (0) 2024.02.04
728x90
반응형
# 쿠버네티스의 이해

 

  • 쿠버네티스는 컨테이너 오케스트레이션(Orchestration)이란 복잡한 단계를 관리하고 요소들의 유기적인 관계를 미리 정의해 손쉽게 사용하도록 서비스를 제공. 다수의 컨테이너를 유기적으로 연결, 실행, 종료할 뿐만 아니라 상태를 추적하고 보존하는 등 컨테이너를 안정적으로 사용할수 있게 만들어 주는것

 

# kubectl get nodes

# kubectl get nodes
NAME     STATUS   ROLES    AGE    VERSION
m-k8s    Ready    master   2d4h   v1.18.4
w1-k8s   Ready    <none>   2d4h   v1.18.4
w2-k8s   Ready    <none>   2d3h   v1.18.4
w3-k8s   Ready    <none>   2d3h   v1.18.4

 

# kubectl get pods --all-namespaces

# kubectl get pods --all-namespaces
NAMESPACE              NAME                                             READY   STATUS    RESTARTS   AGE
kube-system            calico-kube-controllers-99c9b6f64-f2hgs          1/1     Running   7          2d4h
kube-system            calico-node-5j69x                                1/1     Running   12         2d4h
kube-system            calico-node-c4grb                                1/1     Running   11         2d3h
kube-system            calico-node-dmlgz                                1/1     Running   10         2d3h
kube-system            calico-node-r5w6f                                1/1     Running   7          2d4h
kube-system            coredns-66bff467f8-26xcv                         1/1     Running   7          2d4h
kube-system            coredns-66bff467f8-b5v8m                         1/1     Running   7          2d4h
kube-system            etcd-m-k8s                                       1/1     Running   7          2d4h
kube-system            kube-apiserver-m-k8s                             1/1     Running   8          2d4h
kube-system            kube-controller-manager-m-k8s                    1/1     Running   14         2d4h
kube-system            kube-proxy-b48gp                                 1/1     Running   11         2d3h
kube-system            kube-proxy-b8sl2                                 1/1     Running   11         2d4h
kube-system            kube-proxy-c4frz                                 1/1     Running   7          2d4h
kube-system            kube-proxy-jdsdn                                 1/1     Running   10         2d3h
kube-system            kube-scheduler-m-k8s                             1/1     Running   17         2d4h
kubernetes-dashboard   dashboard-metrics-scraper-68fc77645b-lxhkc       1/1     Running   5          24h
kubernetes-dashboard   kubernetes-dashboard-7f9d757bdb-k4h6l            1/1     Running   5          24h

 

# 쿠버네티스 컴포넌트

<출처 :&nbsp; https://kubernetes.io/ko/docs/concepts/overview/components/ >
<출처 : https://arisu1000.tistory.com/27827>

 

# Master Node

  • kubectl : 쿠버네티스 클러스터에 명령을 내리는 역할. 
  • API 서버 : 쿠버네티스 클러스터의 중심 역할을 하는 통로
  • etcd : 구성 요소들의 상태값이 모두 저장되는 곳
  • 컨트롤러 매니저(c-m) : 쿠버네티스 클러스터의 오브젝트 상태를 관리(워크노드 통신이 안되는 경우 상태 체크와 복구는 컨트롤러 매니저에 속한 노트 컨트롤러에서 이루어짐
  • 스케줄러(sched) : 노드의 상태와 자원, 레이블, 요구 조건 등 고려해 파드를 어떤 워커 노드에 생성할것인지 결정

# Work Node

  • kubelet : 파드의 구성내용(PodSpec)을 받아서 컨테이너 런타임으로 전달하고, 파드 안의 컨테이너들이 정상적으로 작동하는지 모니터링 (파드의 상태를 관리   # systemctl start kubelet 
  • 컨테이너 런타임(CRI, Continer Runtime Interface) : 파드를 이루는 컨테이너의 실행을 담당
  • 파드(pod) : 한 개이상의 컨테이너로 단일 목적의 일을 하기 위해서 모인 단위(컨테이너의 묶음)

# 파드의 상태관리와 통신관리

  • kubelet : 파드의 생성과 상태 관리 및 복구 
  • kube-proxy : 파드의 통신을 담당 

 

# 파드의 생성 과 관리
# docker images nginx
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
nginx               latest              b690f5f0a2d5        3 months ago        187MB
nginx               stable              3a8963c304a2        10 months ago       142MB

# (그냥 파드로 생성)
# kubectl run nginx-pod --image=nginx  

# (deployment 파드로 생성)
# kubectl create deployment dpy-nginx --image=nginx

# kubectl get pods
NAME                                             READY   STATUS    RESTARTS   AGE
nginx-pod                                        1/1     Running   0          5m30s
dpy-nginx-c8d778df-4tsmz                         1/1     Running   0          4m59s


# kubectl scale pod nginx-pod --replicas=3
Error from server (NotFound): the server could not find the requested resource
(일반 파드로 생성되었기 때문에 deployment replicas 명령이 안먹힘)


# kubectl scale deployment dpy-nginx --replicas=3
(deployment 파드로 생성되었기 때문에 deployment replicas 명령이 먹힘)

# kubectl get pods 
NAME                                             READY   STATUS    RESTARTS   AGE
dpy-nginx-c8d778df-4tsmz                         1/1     Running   0          16m
dpy-nginx-c8d778df-6qzjj                         1/1     Running   0          77s
dpy-nginx-c8d778df-mtwsz                         1/1     Running   0          77s
nginx-pod                                        1/1     Running   0          17m

 

# 스펙을 지정해 오브젝트 생성하기 (야믈)
# cat echo-hname.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: echo-hname
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: echo-hname
        image: sysnet4admin/echo-hname


# 야믈 파일실행
# kubectl create -f echo-hname.yaml
deployment.apps/echo-hname created


# kubectl get pods
NAME                                             READY   STATUS    RESTARTS   AGE
echo-hname-7894b67f-cndkd                        1/1     Running   0          44s
echo-hname-7894b67f-gbxk5                        1/1     Running   0          44s
echo-hname-7894b67f-tv7rs                        1/1     Running   0          44s
nginx-pod                                        1/1     Running   0          30m

 

- 야믈 파일의 image : sysnet4admin/echo-hname 은 도커허브에 있는 이미지 사용

< spec image : https://hub.docker.com/r/sysnet4admin/echo-hname >

 

# 스펙의 오브젝트를 변경하려고 할때
# pod의 갯수를 6개로 변경하여 다시 생성
(아래 spec: replicas 3 ---> 6으로 변경후)

# cat echo-hname.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: echo-hname
  labels:
    app: nginx
spec:
  replicas: 6
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: echo-hname
        image: sysnet4admin/echo-hname


# kubectl create -f echo-hname.yaml
Error from server (AlreadyExists): error when creating "echo-hname.yaml": deployments.apps "echo-hname" already exists
# 파드 갯수가 3개에서 6개로 적용이 안됨

# kubectl apply -f echo-hname.yaml
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
deployment.apps/echo-hname configured


# kubectl get pods
NAME                                             READY   STATUS    RESTARTS   AGE
echo-hname-7894b67f-cndkd                        1/1     Running   0          7m4s
echo-hname-7894b67f-frrrl                        1/1     Running   0          38s
echo-hname-7894b67f-gbxk5                        1/1     Running   0          7m4s
echo-hname-7894b67f-s9pv4                        1/1     Running   0          38s
echo-hname-7894b67f-trwcb                        1/1     Running   0          38s
echo-hname-7894b67f-tv7rs                        1/1     Running   0          7m4s
nginx-pod                                        1/1     Running   0          37m

 

오브젝트 생성 명령어 비교
구분 Run Create Apply
명령 실행 제한적임 가능함 안됨
파일 실행 안 됨 가능함 가능함
변경 가능 안 됨 안 됨 가능함
실행 편의성 매우 좋음 매우 좋음 좋음
기능 유지 제한적임 지원됨 다양하게 지원

 

파드의 내부 접속 방법
# kubectl get pods
NAME                                             READY   STATUS    RESTARTS   AGE
echo-hname-7894b67f-cndkd                        1/1     Running   0          14m
echo-hname-7894b67f-frrrl                        1/1     Running   0          7m41s
echo-hname-7894b67f-gbxk5                        1/1     Running   0          14m
echo-hname-7894b67f-s9pv4                        1/1     Running   0          7m41s
echo-hname-7894b67f-trwcb                        1/1     Running   0          7m41s
echo-hname-7894b67f-tv7rs                        1/1     Running   0          14m
nginx-pod                                        1/1     Running   0          44m

# kubectl exec -it echo-hname-7894b67f-cndkd -- /bin/bash
root@echo-hname-7894b67f-cndkd:/# exit
exit

# kubectl exec -it nginx-pod -- /bin/bash
root@nginx-pod:/# exit
exit

 

# 파드의 삭제
# nginx-pod는 삭제, echo-hname-xx 는 삭제후 재생성됨 

# kubectl delete pods nginx-pod

# kubectl delete pods echo-hname-7894b67f-tv7rs

# kubectl get pods
NAME                                             READY   STATUS    RESTARTS   AGE
echo-hname-7894b67f-c4pxc                        1/1     Running   0          36s
echo-hname-7894b67f-cndkd                        1/1     Running   1          15h
echo-hname-7894b67f-frrrl                        1/1     Running   1          14h
echo-hname-7894b67f-gbxk5                        1/1     Running   1          15h
echo-hname-7894b67f-s9pv4                        1/1     Running   1          14h
echo-hname-7894b67f-trwcb                        1/1     Running   1          14h

# echo-hname 완전 삭제
# kubectl delete deployments.apps echo-hname

 

 

728x90
반응형
LIST

'kubernetes' 카테고리의 다른 글

Kubernetes 연결을 담당하는 서비스  (1) 2024.02.13
Prometheus 설명  (0) 2024.02.12
centos7에서 docker 재설치(missing signature key)  (0) 2024.02.04
728x90
반응형

 

# 용어설명

 

  • 메트릭스(Metric) : 현재 시스템의 상태를 알수 있는 측정값, 컨테이너 인프라 환경에서는 크게 2가지 상태로 메트릭을 구분, 시스템 메트릭스(system Metric) : 파드 같은 오프젝트에서 측정되는 CPU와 메모리 사용량, 서비스 메트릭스(service Metric) : HTTP 상태 코드 같은 서비스 상태를 나타내는 지표
  • 시계열 데이터베이스 : 시간을 축(키)으로 시간의 흐름에 따라 발생하는 데이터를 저장하는데 최적화된 데이터베이스

 

# 프로세서 설명
# kubectl get services
NAME                            TYPE           CLUSTER-IP       EXTERNAL-IP     PORT(S)        AGE
grafana                         LoadBalancer   10.105.83.88     192.168.56.13   80:31702/TCP   40h
jenkins                         LoadBalancer   10.110.209.109   192.168.56.11   80:31590/TCP   40h
jenkins-agent                   ClusterIP      10.103.100.52    <none>          50000/TCP      40h
kubernetes                      ClusterIP      10.96.0.1        <none>          443/TCP        45h
prometheus-kube-state-metrics   ClusterIP      10.102.2.36      <none>          8080/TCP       40h
prometheus-node-exporter        ClusterIP      None             <none>          9100/TCP       40h
prometheus-server               LoadBalancer   10.109.71.59     192.168.56.12   80:32365/TCP   40h
[root@m-k8s ~]#
# Node 정보

Node      IP                Host
======  =============      ========
Master  192.168.56.10      m-k8s
Work#1  192.168.56.101     w1-k8s
Work#2  192.168.56.102     w1-k8s
Work#3  192.168.56.103     w1-k8s


# kubectl get pods -o wide
NAME                                             READY   STATUS    RESTARTS   AGE   IP               NODE     NOMINATED NODE   READINESS GATES
grafana-86b96cd9c6-brs7f                         1/1     Running   4          40h   172.16.221.138   w1-k8s   <none>           <none>
prometheus-kube-state-metrics-7bc49db5c5-wv7kh   1/1     Running   2          40h   172.16.221.139   w1-k8s   <none>           <none>
prometheus-node-exporter-mdjgp                   1/1     Running   3          40h   192.168.56.10    m-k8s    <none>           <none>
prometheus-node-exporter-nbprf                   1/1     Running   3          40h   192.168.56.101   w1-k8s   <none>           <none>
prometheus-node-exporter-qjtk8                   1/1     Running   3          17h   192.168.56.103   w3-k8s   <none>           <none>
prometheus-node-exporter-zk6zq                   1/1     Running   3          40h   192.168.56.102   w2-k8s   <none>           <none>
prometheus-server-6d77896bb4-zpmqv               2/2     Running   4          13h   172.16.132.26    w3-k8s   <none>           <none>

 

  • prometheus-server : 노드의 메트릭을 수집해 오는 수집기, 수집한 시계열 메트릭 데이터를 저장하는 시계열 데이터베이스, 저장된 데이터를 질의하거나 수집 대상의 상태를 확인할수 있는 웹 UI (현재 w3-k8s에 설치되어 있음)
  • prometheus node-exporter  : 노드의 시스템 메트릭 정보를 HTTP 로 공개하는 역할, 설치된 노드에서 특정 파일들을 읽고, 이를 프로메테우스 서버가 수집할수 있는 메트릭 데이터로 변환한 후에 노드 익스포터에서 HTTP 서버로 공개. 공개된 내용을 프로메테우스 서버에서 수집해 가게 됨 (현재 m-k8s, w1-k8s, w2-k8s, w3-k8s에 설치되어 있음) 
  • prometheus kube-state-metrics : API 서버로 쿠버네티스 클러스터의 여러 메트릭 데이터를 수집 한후, 이를 프로메테우스 서바가 수집할수 있는 메티릭 데이터로 변환해 공개하는 역할, 프로메테우스가 쿠버네티스 클러스터의 여러 정보를 손쉽게 획득할수 있는 것은 쿠버 스테이트 메트릭  (현재 w1-k8s에 설치되어 있음)
  • 얼럿매니저(alertmanager) : 프로메테우스 경보(alert) 규칙을 설정하고, 경보 이벤트가 발생하면 설정된 경보메시지를 대상에게 전할하는 기능을 제공
  • 푸시게이트웨이(pushgateway) : 배치와 스케줄 작업 시 수행되는 일회성 작업들의 상태를 저장하고 모아서 프로메테우스가 주기적으로 가져갈수 있도록 공개, 

 

 

 

 

728x90
반응형
LIST

'kubernetes' 카테고리의 다른 글

Kubernetes 연결을 담당하는 서비스  (1) 2024.02.13
Kubernetes 설명  (1) 2024.02.12
centos7에서 docker 재설치(missing signature key)  (0) 2024.02.04
728x90
반응형
# doker 업그레이드 전에 yum update 한다

 

# yum update -y
# yum install dnf -y
# dnf update -y

 

#  centos7에서 docker pull error

  - 도커의 버젼이 낮아서 나타나는 현상

# docker pull nginx:stable
Trying to pull repository docker.io/library/nginx ...
missing signature key

 

#  centos7에서 docker 재설치(missing signature key)
# sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux  docker-engine-selinux docker-engine
# sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo


# 최신버젼 설치
# sudo yum install docker-ce


# docker 버젼 조회
# yum list docker-ce --showduplicates | sort -r
 yum list docker-ce --showduplicates | sort -r
 * updates: mirror.kakao.com
This system is not registered with an entitlement server. You can use subscription-manager to register.
              : manager
Loading mirror speeds from cached hostfile
Loaded plugins: fastestmirror, product-id, search-disabled-repos, subscription-
Installed Packages
 * extras: mirror.kakao.com
 * epel: mirror-nrt.yuki.net.uk
docker-ce.x86_64            3:25.0.3-1.el7                     docker-ce-stable
docker-ce.x86_64            3:25.0.2-1.el7                     docker-ce-stable
docker-ce.x86_64            3:25.0.1-1.el7                     docker-ce-stable
docker-ce.x86_64            3:25.0.0-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.9-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.8-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.7-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.6-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.5-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.4-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.3-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.2-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.1-1.el7                     docker-ce-stable
docker-ce.x86_64            3:24.0.0-1.el7                     docker-ce-stable
docker-ce.x86_64            3:23.0.6-1.el7                     docker-ce-stable
docker-ce.x86_64            3:23.0.5-1.el7                     docker-ce-stable
docker-ce.x86_64            3:23.0.4-1.el7                     docker-ce-stable
docker-ce.x86_64            3:23.0.3-1.el7                     docker-ce-stable
docker-ce.x86_64            3:23.0.2-1.el7                     docker-ce-stable
docker-ce.x86_64            3:23.0.1-1.el7                     docker-ce-stable
docker-ce.x86_64            3:23.0.0-1.el7                     docker-ce-stable
docker-ce.x86_64            3:20.10.9-3.el7                    docker-ce-stable
docker-ce.x86_64            3:20.10.8-3.el7                    docker-ce-stable
docker-ce.x86_64            3:20.10.7-3.el7                    docker-ce-stable
docker-ce.x86_64            3:20.10.6-3.el7                    docker-ce-stable
docker-ce.x86_64            3:20.10.5-3.el7                    docker-ce-stable
docker-ce.x86_64            3:20.10.4-3.el7                    docker-ce-stable
docker-ce.x86_64            3:20.10.3-3.el7                    docker-ce-stable
docker-ce.x86_64            3:20.10.24-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.2-3.el7                    docker-ce-stable
docker-ce.x86_64            3:20.10.23-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.22-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.21-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.20-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.19-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.18-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.17-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.16-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.15-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.14-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.1-3.el7                    docker-ce-stable
docker-ce.x86_64            3:20.10.13-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.12-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.11-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.10-3.el7                   docker-ce-stable
docker-ce.x86_64            3:20.10.0-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.9-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.8-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.7-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.6-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.5-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.4-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.3-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.2-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.15-3.el7                   docker-ce-stable
docker-ce.x86_64            3:19.03.14-3.el7                   docker-ce-stable
docker-ce.x86_64            3:19.03.1-3.el7                    docker-ce-stable
docker-ce.x86_64            3:19.03.13-3.el7                   docker-ce-stable
docker-ce.x86_64            3:19.03.12-3.el7                   docker-ce-stable
docker-ce.x86_64            3:19.03.12-3.el7                   @docker-ce-stable
docker-ce.x86_64            3:19.03.11-3.el7                   docker-ce-stable
docker-ce.x86_64            3:19.03.10-3.el7                   docker-ce-stable
docker-ce.x86_64            3:19.03.0-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.9-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.8-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.7-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.6-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.5-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.4-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.3-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.2-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.1-3.el7                    docker-ce-stable
docker-ce.x86_64            3:18.09.0-3.el7                    docker-ce-stable
docker-ce.x86_64            18.06.3.ce-3.el7                   docker-ce-stable
docker-ce.x86_64            18.06.2.ce-3.el7                   docker-ce-stable
docker-ce.x86_64            18.06.1.ce-3.el7                   docker-ce-stable
docker-ce.x86_64            18.06.0.ce-3.el7                   docker-ce-stable
docker-ce.x86_64            18.03.1.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            18.03.0.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.12.1.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.12.0.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.09.1.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.09.0.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.06.2.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.06.1.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.06.0.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.03.3.ce-1.el7                   docker-ce-stable
docker-ce.x86_64            17.03.2.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.03.1.ce-1.el7.centos            docker-ce-stable
docker-ce.x86_64            17.03.0.ce-1.el7.centos            docker-ce-stable


# 특정버젼 설치
# yum install docker-ce-19.03.12-3.el7



# docker 버젼 확인
# docker 업그레이드 전
# docker -v
Docker version 1.13.1, build 7d71120/1.13.1

# docker 업그레이드 후
# docker -v
Docker version 25.0.2, build 29cf629
728x90
반응형
LIST

'kubernetes' 카테고리의 다른 글

Kubernetes 연결을 담당하는 서비스  (1) 2024.02.13
Kubernetes 설명  (1) 2024.02.12
Prometheus 설명  (0) 2024.02.12

+ Recent posts