营销型网站建设广告语,揭阳网站建设揭阳,怎么做简单网站,手机端网站模板下载hume项目k8s的改造
一、修改构建目录结构
1、在根目录下添加build-work文件夹
目录结构如下
[rootk8s-worker-01 build-work]# tree .
.
├── Dockerfile
├── hume
│ └── start.sh
└── Jenkinsfile2、每个文件内容如下
Dockerfile
FROM ccr.ccs.tencentyun…hume项目k8s的改造
一、修改构建目录结构
1、在根目录下添加build-work文件夹
目录结构如下
[rootk8s-worker-01 build-work]# tree .
.
├── Dockerfile
├── hume
│ └── start.sh
└── Jenkinsfile
2、每个文件内容如下
Dockerfile
FROM ccr.ccs.tencentyun.com/xxxx/php_supervisor:kafka
USER root
ENV ZBE_PATH /biz-code
ADD hume /biz-code/hume
WORKDIR /biz-code/hume
CMD ./start.shJenkinsfile
pipeline {agent {kubernetes {cloud kubernetescloud kubernetes-testslaveConnectTimeout 1200workspaceVolume hostPathWorkspaceVolume(hostPath:/opt/workspace, readOnly: false)yaml
apiVersion: v1
kind: Pod
spec:containers:- args: [\$(JENKINS_SECRET)\, \$(JENKINS_NAME)\]image: jenkins/jnlp-slave:latest-jdk11name: jnlpimagePullPolicy: IfNotPresentvolumeMounts:- mountPath: /etc/localtimename: localtimereadOnly: false - command:- catenv:- name: LANGUAGEvalue: en_US:en- name: LC_ALLvalue: en_US.UTF-8- name: LANGvalue: en_US.UTF-8image: ccr.ccs.tencentyun.com/xxxx/php_supervisor:kafka-k8simagePullPolicy: IfNotPresentname: buildtty: truevolumeMounts:- mountPath: /etc/localtimename: localtime- mountPath: /biz-code/hume/vendorname: phpdirreadOnly: false- command:- catenv:- name: LANGUAGEvalue: en_US:en- name: LC_ALLvalue: en_US.UTF-8- name: LANGvalue: en_US.UTF-8image: registry.cn-beijing.aliyuncs.com/citools/kubectl:self-1.17imagePullPolicy: IfNotPresentname: kubectltty: truevolumeMounts:- mountPath: /etc/localtimename: localtimereadOnly: false- command:- catenv:- name: LANGUAGEvalue: en_US:en- name: LC_ALLvalue: en_US.UTF-8- name: LANGvalue: en_US.UTF-8image: registry.cn-beijing.aliyuncs.com/citools/docker:19.03.9-gitimagePullPolicy: IfNotPresentname: dockertty: truevolumeMounts:- mountPath: /etc/localtimename: localtimereadOnly: false- mountPath: /var/run/docker.sockname: dockersockreadOnly: falserestartPolicy: NeverimagePullSecrets:- name: qcloudregistrykeynodeSelector:build: truesecurityContext: {}volumes:- hostPath:path: /var/run/docker.sockname: dockersock- hostPath:path: /usr/share/zoneinfo/Asia/Shanghainame: localtime- name: cachedirhostPath:path: /opt/gopkg- name: phpdirhostPath:path: /opt/phppkg
}
}
stages {stage(Pulling Code) {parallel {stage(Pulling Code by Jenkins) {when {expression {env.giteeBranch null}}steps {git(changelog: true, poll: true, url:https://gitee.com/xxxx/hume.git, branch:${BRANCH}, credentialsId: gitee-mima)script {COMMIT_ID sh(returnStdout: true, script: git log -n 1 --prettyformat:%h).trim()TAG BUILD_TAG - COMMIT_IDprintln Current branch is ${BRANCH}, Commit ID is ${COMMIT_ID}, Image TAG is ${TAG}}}}stage(Pulling Code by trigger) {when {expression {env.giteeBranch ! null}}steps {git(url: https://gitee.com/xxxx/hume.git, branch: env.giteeBranch, changelog: true, poll: true, credentialsId: gitee-mima)script {COMMIT_ID sh(returnStdout: true, script: git log -n 1 --prettyformat:%h).trim()TAG BUILD_TAG - COMMIT_IDprintln Current branch is ${env.giteeBranch}, Commit ID is ${COMMIT_ID}, Image TAG is ${TAG}}}}}
}stage(Building) {steps {container(name: build) {sh pwdwhoamicp Scripts/init.sh.dev init.shchmod x init.sh./init.shcomposer config -g --unset repos.packagistcomposer config repo.packagist composer https://mirrors.cloud.tencent.com/composer/composer updatesudo chmod -R 777 ./*sudo rsync -avz --exclude build-work ./* build-work/hume/}}}stage(Docker build for creating image) {environment {HARBOR_USER credentials(registry-secret)}steps {container(name: docker) {sh cd build-workecho ${HARBOR_USER_USR} ${HARBOR_USER_PSW} ${TAG}docker login -u ${HARBOR_USER_USR} -p ${HARBOR_USER_PSW} ${HARBOR_ADDRESS}docker build -t ${HARBOR_ADDRESS}/${REGISTRY_DIR}/${IMAGE_NAME}:${TAG} .docker push ${HARBOR_ADDRESS}/${REGISTRY_DIR}/${IMAGE_NAME}:${TAG}}}}stage(Deploying to K8s) {environment {MY_KUBECONFIG credentials(k8s-config)}steps {container(name: kubectl){sh /usr/local/bin/kubectl --kubeconfig $MY_KUBECONFIG set image deploy -l app${IMAGE_NAME} ${IMAGE_NAME}${HARBOR_ADDRESS}/${REGISTRY_DIR}/${IMAGE_NAME}:${TAG} -n $NAMESPACE}}}}environment {COMMIT_ID HARBOR_ADDRESS ccr.ccs.tencentyun.comREGISTRY_DIR xxxxIMAGE_NAME humeNAMESPACE devTAG
}parameters {gitParameter(branch: , branchFilter: origin/(.*), defaultValue: dev, description: Branch for build and deploy, name:BRANCH, quickFilterEnabled: false, selectedValue: NONE, sortMode: NONE, tagFilter: *, type: PT_BRANCH)}
}
hume/start.sh
mkdir /biz-code/hume/data/logs/supervisor
supervisord -c /biz-code/hume/Config/supervisor.conf
supervisorctl restart all二、jenkins添加流水线任务将镜像构建流程跑通
1、jenkins界面添加pipeline注意提前安装好对应插件
Git
Git Parameter
Git Pipeline for Blue Ocean
GitLab
Credentials
Credentials Binding
Blue Ocean
Blue Ocean Pipeline Editor
Blue Ocean Core JS
Pipeline SCM API for Blue Ocean
Dashboard for Blue Ocean
Build With Parameters
Dynamic Extended Choice Parameter Plug-In
Dynamic Parameter Plug-in
Extended Choice Parameter
List Git Branches Parameter
Pipeline
Pipeline: Declarative
Kubernetes
Kubernetes CLI
Kubernetes Credentials
Image Tag Parameter
Active Choices说明 第一步和第二步的一个流程说明
首先点击jenkins构建会选择对应的分支如果是自动触发会获取到提交的分支具体看jenkinsfile流程然后jenkins会通过k8s插件调用k8s根据jenkinsfile中定义的模板启动一个任务podpod中会先拉取代码然后执行一系列的初始化操作init.sh、拉取依赖包、并拷贝所有文件到build-work/hume以便于后面构建镜像再然后会build镜像推送到镜像仓库最后就是使用kubectl命令执行更新服务器pod的镜像版本发布
三、业务运行的deploy和service
1、业务pod是以sidecar模式运行的一个业务container一个日志收集container通过共享目录的形式来收集业务存在容器里面的日志
hume-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: humelabels:app: hume
spec:selector:matchLabels:app: humereplicas: 1strategy:type: RollingUpdaterollingUpdate:maxUnavailable: 0maxSurge: 1# minReadySeconds: 30template:metadata:labels:app: humespec:containers:- name: filebeat image: registry.cn-beijing.aliyuncs.com/dotbalo/filebeat:7.10.2 resources:requests:memory: 100Micpu: 10mlimits:cpu: 200mmemory: 300MiimagePullPolicy: IfNotPresentenv:- name: podIpvalueFrom:fieldRef:apiVersion: v1fieldPath: status.podIP- name: podNamevalueFrom:fieldRef:apiVersion: v1fieldPath: metadata.name- name: podNamespacevalueFrom:fieldRef:apiVersion: v1fieldPath: metadata.namespace- name: podDeployNamevalue: hume- name: TZvalue: Asia/ShanghaisecurityContext:runAsUser: 0volumeMounts:- name: logpathmountPath: /data/log/- name: filebeatconfmountPath: /usr/share/filebeat/filebeat.yml subPath: usr/share/filebeat/filebeat.yml- name: humeimage: ccr.ccs.tencentyun.com/xxxx/hume:jenkins-hume-dev-71-0f5bbb9aimagePullPolicy: IfNotPresentvolumeMounts:- name: logpathmountPath: /biz-code/hume/data/logs/env:- name: TZvalue: Asia/Shanghai- name: LANGvalue: C.UTF-8- name: LC_ALLvalue: C.UTF-8livenessProbe:failureThreshold: 2initialDelaySeconds: 30periodSeconds: 10successThreshold: 1tcpSocket:port: 7777timeoutSeconds: 2ports:- containerPort: 7777name: webprotocol: TCPreadinessProbe:failureThreshold: 2initialDelaySeconds: 30periodSeconds: 10successThreshold: 1tcpSocket:port: 7777timeoutSeconds: 2resources:limits:cpu: 994mmemory: 1170Mirequests:cpu: 300mmemory: 300MidnsPolicy: ClusterFirstimagePullSecrets:- name: qcloudregistrykeyrestartPolicy: AlwayssecurityContext: {}serviceAccountName: defaultvolumes:- name: logpathemptyDir: {}- name: filebeatconfconfigMap:name: filebeatconfitems:- key: filebeat.ymlpath: usr/share/filebeat/filebeat.yml
hume-service.yaml
---
apiVersion: v1
kind: Service
metadata:creationTimestamp: nulllabels:app: humename: hume-servicenamespace: dev
spec:ports:- name: humeport: 7777protocol: TCPtargetPort: 7777selector:app: humesessionAffinity: Nonetype: NodePort
status:loadBalancer: {}
四、日志收集流程
日志收集流程采用filebeatkafkalogstasheskibana的形式来做的
filebeat容器以sidecar模式与业务容器绑定收集日志推送到kafka在kafka中创建topiclogstash会读取kafka中topic消费业务日志并推送至es然后由kibana进行展示
下面需要文件可在如下连接查找
https://github.com/cs81/k8s-infrastructure/tree/master/efk-7.10.2/filebeat1、安装helm
wget https://get.helm.sh/helm-v3.1.2-linux-amd64.tar.gz
tar xf helm-v3.1.2-linux-amd64.tar.gz
mv linux-amd64/ helm
cd helm/
cp -r helm /usr/local/bin/
helm version添加两个仓库
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo add ali-stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
2、安装kafka和zookeeper
helm pull bitnami/zookeeper
tar xf zookeeper-11.1.2.tgz
cd zookeeper/
helm install zookeeper -n logging --set auth.enabledfalse --set allowAnonymousLogintrue --set persistence.enabledfalse .
cd ../
helm pull bitnami/kafka
tar xf kafka-20.0.6.tgz
cd kafka/
helm install kafka -n logging --set zookeeper.enabledfalse --set replicaCount1 --set externalZookeeper.serverszookeeper --set persistence.enabledfalse .kubectl get pod -n logging
3、安装logstash
filebeat-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: filebeatconf
data:filebeat.yml: |-filebeat.inputs:- input_type: logpaths:- /data/log/*/*.logtail_files: truefields:pod_name: ${podName}pod_ip: ${podIp}pod_deploy_name: ${podDeployName}pod_namespace: ${podNamespace}output.kafka:hosts: [kafka.logging:9092]topic: filebeat-sidecarcodec.json:pretty: falsekeep_alive: 30s
logstash-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: logstash-configmap
data:logstash.yml: |http.host: 0.0.0.0path.config: /usr/share/logstash/pipelinelogstash.conf: |# all input will come from filebeat, no local logsinput {kafka {enable_auto_commit trueauto_commit_interval_ms 1000bootstrap_servers kafka:9092topics [filebeat-sidecar]type [filebeat-sidecar]codecmultiline{pattern^\d{4}-negatetruewhatprevious}}}output {stdout{ codecrubydebug}if [type] filebeat-sidecar{elasticsearch {hosts [172.16.64.12:6123]index filebeat-%{YYYY.MM.dd}}} else{elasticsearch {hosts [172.16.64.12:6123]index other-input-%{YYYY.MM.dd}}}}
logstash-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: logstash-deployment
spec:selector:matchLabels:app: logstashreplicas: 1template:metadata:labels:app: logstashspec:containers:- name: logstashimage: registry.cn-beijing.aliyuncs.com/dotbalo/logstash:7.10.1 ports:- containerPort: 5044volumeMounts:- name: config-volumemountPath: /usr/share/logstash/config- name: logstash-pipeline-volumemountPath: /usr/share/logstash/pipelinevolumes:- name: config-volumeconfigMap:name: logstash-configmapitems:- key: logstash.ymlpath: logstash.yml- name: logstash-pipeline-volumeconfigMap:name: logstash-configmapitems:- key: logstash.confpath: logstash.conf
logstash-service.yaml
kind: Service
apiVersion: v1
metadata:name: logstash-service
spec:selector:app: logstashports:- protocol: TCPport: 5044targetPort: 5044type: ClusterIP
然后es和kibana因为之前装过并且使用docker安装的这里就不写了上面logstash配置里面注意填写对应的es地址 整个流程基本就是这样后续再添加监控及其他