Commit 89f89b07 authored by xuzy's avatar xuzy

update

parents
apiVersion: v1
description: Hbase Helm to deploy the distributed versions of warp10
name: hbase
version: 1.0.4
appVersion: 2.7.3
hbase.version: 1.4.2
home: https://hadoop.apache.org/
sources:
- https://github.com/apache/hadoop
icon: http://hadoop.apache.org/images/hadoop-logo.jpg
maintainers:
- name: Pierre Zemb
email: pierre.zemb.isen@gmnail.com
# pull images
docker pull danisla/hadoop:2.7.3 && docker tag danisla/hadoop:2.7.3 registry.cn-qingdao.aliyuncs.com/wod/hadoop:2.7.3 && \
docker push registry.cn-qingdao.aliyuncs.com/wod/hadoop:2.7.3
docker pull 33098876/hbase:latest && docker tag 33098876/hbase:latest registry.cn-qingdao.aliyuncs.com/wod/hbase:latest && \
docker push registry.cn-qingdao.aliyuncs.com/wod/hbase:latest
## install
```bashhbase
# install
安装zookeeper。
#安装hbase
helm install \
/etc/kubernetes/helm/hbase \
--name=hbase \
--namespace=default \
-f /etc/kubernetes/helm/hbase/values-overrides.yaml
# uninstall
helm delete hbase --purge
# update
helm upgrade hbase /etc/kubernetes/helm/hbase \
-f /etc/kubernetes/helm/hbase/values-overrides.yaml
## Getting started
```
helm install --name myzk incubator/zookeeper --set servers=1,heap="1G"
helm del --purge hbase;helm install . --name hbase
```
1. You can check the status of HDFS by running this command:
kubectl exec -n {{ .Release.Namespace }} -it {{ template "hbase.name" . }}-hdfs-nn-0 -- /usr/local/hadoop/bin/hdfs dfsadmin -report
2. You can list the yarn nodes by running this command:
kubectl exec -n {{ .Release.Namespace }} -it {{ template "hbase.name" . }}-yarn-rm-0 -- /usr/local/hadoop/bin/yarn node -list
3. Create a port-forward to the yarn resource manager UI:
kubectl port-forward -n {{ .Release.Namespace }} {{ template "hbase.name" . }}-yarn-rm-0 8088:8088
Then open the ui in your browser:
open http://localhost:8088
4. You can run included hadoop tests like this:
kubectl exec -n {{ .Release.Namespace }} -it {{ template "hbase.name" . }}-yarn-nm-0 -- /usr/local/hadoop/bin/hadoop jar /usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-{{ .Values.hadoopVersion }}-tests.jar TestDFSIO -write -nrFiles 5 -fileSize 128MB -resFile /tmp/TestDFSIOwrite.txt
5. You can list the mapreduce jobs like this:
kubectl exec -n {{ .Release.Namespace }} -it {{ template "hbase.name" . }}-yarn-rm-0 -- /usr/local/hadoop/bin/mapred job -list
6. This chart can also be used with the zeppelin chart
helm install --namespace {{ .Release.Namespace }} --set hadoop.useConfigMap=true,hadoop.configMapName={{ template "hbase.name" . }} stable/zeppelin
7. You can scale the number of yarn nodes like this:
helm upgrade {{ .Release.Name }} --set yarn.nodeManager.replicas=4 stable/hadoop
Make sure to update the values.yaml if you want to make this permanent.
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "hadoop.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
{{- define "hbase.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
\ No newline at end of file
This diff is collapsed.
apiVersion: v1
kind: ConfigMap
metadata:
name: hbase-configmap
labels:
app: {{ template "hbase.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
hbase-site.xml: |
<configuration>
<property>
<name>hbase.master</name>
<value>{{ template "hbase.name" . }}-hbase-master:16010</value>
</property>
<property>
<name>zookeeper.znode.parent</name>
<value>/hbase</value>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://{{ template "hbase.name" . }}-hdfs-nn:9000/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>{{ .Values.hbase.zookeeper.quorum }}</value>
</property>
</configuration>
bootstrap.sh: |
#!/bin/bash
: ${HADOOP_PREFIX:=/usr/local/hadoop}
. $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
# Directory to find config artifacts
CONFIG_DIR="/tmp/hbase-config"
# Copy config files from volume mount
for f in hbase-site.xml; do
if [[ -e ${CONFIG_DIR}/$f ]]; then
cp ${CONFIG_DIR}/$f /opt/hbase/conf/$f
else
echo "ERROR: Could not find $f in $CONFIG_DIR"
exit 1
fi
done
# installing libraries if any - (resource urls added comma separated to the ACP system variable)
cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd -
if [[ "${HOSTNAME}" =~ "hbase-master" ]]; then
/opt/hbase/bin/hbase-daemon.sh --config /opt/hbase/conf start master
fi
if [[ "${HOSTNAME}" =~ "hbase-rs" ]]; then
/opt/hbase/bin/hbase-daemon.sh --config /opt/hbase/conf start regionserver
fi
tail -f opt/hbase/logs/*
\ No newline at end of file
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "hbase.name" . }}-hbase-master
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hbase-master
spec:
selector:
matchLabels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hbase-master
minAvailable: 1 # FIXME: HA?
\ No newline at end of file
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: {{ template "hbase.name" . }}-hbase-master
annotations:
checksum/config: {{ include (print $.Template.BasePath "/hadoop-configmap.yaml") . | sha256sum }}
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hbase-master
spec:
serviceName: {{ template "hbase.name" . }}-hbase-master
replicas: {{ .Values.hbase.master.replicas }}
template:
metadata:
labels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hbase-master
spec:
affinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: {{ template "hadoop.name" . }}
terminationGracePeriodSeconds: 0
containers:
- name: hbase-master
image: {{ .Values.hbaseimage.repository }}:{{ .Values.hbaseimage.tag }}
imagePullPolicy: {{ .Values.hbaseimage.pullPolicy }}
env:
- name: HBASE_SERVER_TYPE
value: hbase-master
- name: HDFS_PATH
value: {{ template "hbase.name" . }}-hdfs-nn:9000
- name: ZOOKEEPER_SERVICE_LIST
value: {{ .Values.hbase.env.zookeeperServiceList }}
- name: ZOOKEEPER_PORT
value: "{{ .Values.hbase.env.zookeeperPort }}"
- name: ZNODE_PARENT
value: hbase
ports:
- name: port1
containerPort: 16000
protocol: TCP
- name: port2
containerPort: 16010
protocol: TCP
command:
- "/bin/bash"
- "/opt/hbase/bin/start-k8s-hbase.sh"
resources:
{{ toYaml .Values.hbase.master.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: {{ template "hbase.name" . }}-hbase-master
labels:
app: {{ template "hbase.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hbase-master
spec:
ports:
- port: 8080
name: restapi
- port: 9090
name: thriftapi
- port: 16000
name: master
- port: 16010
name: masterinfo
clusterIP: "None"
selector:
app: {{ template "hbase.name" . }}
release: {{ .Release.Name }}
component: hbase-master
\ No newline at end of file
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "hbase.name" . }}-hbase-rs
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hbase-rs
spec:
selector:
matchLabels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hbase-rs
minAvailable: 1 # FIXME: HA?
\ No newline at end of file
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: {{ template "hbase.name" . }}-hbase-rs
annotations:
checksum/config: {{ include (print $.Template.BasePath "/hadoop-configmap.yaml") . | sha256sum }}
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hbase-rs
spec:
serviceName: {{ template "hbase.name" . }}-hbase-rs
replicas: {{ .Values.hbase.region.replicas }}
template:
metadata:
labels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hbase-rs
spec:
affinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name | quote }}
component: hdfs-nn
terminationGracePeriodSeconds: 0
containers:
- name: hbase-rs
image: "{{ .Values.hbaseimage.repository }}:{{ .Values.hbaseimage.tag }}"
imagePullPolicy: {{ .Values.hbaseimage.pullPolicy }}
env:
- name: HBASE_SERVER_TYPE
value: hbase-regionserver
- name: HDFS_PATH
value: {{ template "hbase.name" . }}-hdfs-nn:9000
- name: ZOOKEEPER_SERVICE_LIST
value: {{ .Values.hbase.env.zookeeperServiceList }}
- name: ZOOKEEPER_PORT
value: "{{ .Values.hbase.env.zookeeperPort }}"
- name: ZNODE_PARENT
value: hbase
ports:
- name: port11
containerPort: 16020
protocol: TCP
- name: port12
containerPort: 16030
protocol: TCP
command:
- "/bin/bash"
- "/opt/hbase/bin/start-k8s-hbase.sh"
resources:
{{ toYaml .Values.hbase.region.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: {{ template "hbase.name" . }}-hbase-rs
labels:
app: {{ template "hbase.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hbase-rs
spec:
clusterIP: "None"
ports:
- port: 16020
name: rs
- port: 16030
name: rsinfo
selector:
app: {{ template "hbase.name" . }}
release: {{ .Release.Name }}
component: hbase-rs
\ No newline at end of file
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "hbase.name" . }}-hdfs-dn
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hdfs-dn
spec:
selector:
matchLabels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hdfs-dn
minAvailable: {{ .Values.hdfs.dataNode.pdbMinAvailable }}
\ No newline at end of file
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: {{ template "hbase.name" . }}-hdfs-dn
annotations:
checksum/config: {{ include (print $.Template.BasePath "/hadoop-configmap.yaml") . | sha256sum }}
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hdfs-dn
spec:
serviceName: {{ template "hbase.name" . }}-hdfs-dn
replicas: {{ .Values.hdfs.dataNode.replicas }}
template:
metadata:
labels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hdfs-dn
spec:
affinity:
podAntiAffinity:
{{- if eq .Values.antiAffinity "hard" }}
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name | quote }}
component: hdfs-dn
{{- else if eq .Values.antiAffinity "soft" }}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 5
podAffinityTerm:
topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name | quote }}
component: hdfs-dn
{{- end }}
terminationGracePeriodSeconds: 0
containers:
- name: hdfs-dn
image: {{ .Values.image }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
command:
- "/bin/bash"
- "/tmp/hadoop-config/bootstrap.sh"
- "-d"
resources:
{{ toYaml .Values.hdfs.dataNode.resources | indent 10 }}
readinessProbe:
httpGet:
path: /
port: 50075
initialDelaySeconds: 5
timeoutSeconds: 2
livenessProbe:
httpGet:
path: /
port: 50075
initialDelaySeconds: 10
timeoutSeconds: 2
volumeMounts:
- name: hadoop-config
mountPath: /tmp/hadoop-config
- name: dfs
mountPath: /root/hdfs/datanode
volumes:
- name: hadoop-config
configMap:
name: hadoop-configmap
{{- if .Values.persistence.dataNode.enabled }}
volumeClaimTemplates:
- metadata:
name: dfs
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hdfs-dn
spec:
accessModes:
- {{ .Values.persistence.dataNode.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.dataNode.size | quote }}
{{- if .Values.persistence.dataNode.storageClass }}
{{- if (eq "-" .Values.persistence.dataNode.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.dataNode.storageClass }}"
{{- end }}
{{- end }}
{{- end }}
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: {{ template "hbase.name" . }}-hdfs-dn
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hdfs-dn
spec:
clusterIP: "None"
ports:
- name: dfs
port: 9000
protocol: TCP
- name: webhdfs
port: 50075
selector:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hdfs-dn
\ No newline at end of file
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "hbase.name" . }}-hdfs-nn
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hdfs-nn
spec:
selector:
matchLabels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hdfs-nn
minAvailable: {{ .Values.hdfs.nameNode.pdbMinAvailable }}
\ No newline at end of file
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: {{ template "hbase.name" . }}-hdfs-nn
annotations:
checksum/config: {{ include (print $.Template.BasePath "/hadoop-configmap.yaml") . | sha256sum }}
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hdfs-nn
spec:
serviceName: {{ template "hbase.name" . }}-hdfs-nn
replicas: 1
template:
metadata:
labels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hdfs-nn
spec:
affinity:
podAntiAffinity:
{{- if eq .Values.antiAffinity "hard" }}
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name | quote }}
component: hdfs-nn
{{- else if eq .Values.antiAffinity "soft" }}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 5
podAffinityTerm:
topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name | quote }}
component: hdfs-nn
{{- end }}
terminationGracePeriodSeconds: 0
containers:
- name: hdfs-nn
image: {{ .Values.image }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
command:
- "/bin/bash"
- "/tmp/hadoop-config/bootstrap.sh"
- "-d"
resources:
{{ toYaml .Values.hdfs.nameNode.resources | indent 10 }}
readinessProbe:
httpGet:
path: /
port: 50070
initialDelaySeconds: 5
timeoutSeconds: 2
livenessProbe:
httpGet:
path: /
port: 50070
initialDelaySeconds: 10
timeoutSeconds: 2
volumeMounts:
- name: hadoop-config
mountPath: /tmp/hadoop-config
- name: dfs
mountPath: /root/hdfs/namenode
volumes:
- name: hadoop-config
configMap:
name: hadoop-configmap
{{- if .Values.persistence.nameNode.enabled }}
volumeClaimTemplates:
- metadata:
name: dfs
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hdfs-nn
spec:
accessModes:
- {{ .Values.persistence.nameNode.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.nameNode.size | quote }}
{{- if .Values.persistence.nameNode.storageClass }}
{{- if (eq "-" .Values.persistence.nameNode.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.nameNode.storageClass }}"
{{- end }}
{{- end }}
{{- end }}
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: {{ template "hbase.name" . }}-hdfs-nn
labels:
app: {{ template "hadoop.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
component: hdfs-nn
spec:
clusterIP: "None"
ports:
- name: dfs
port: 9000
protocol: TCP
- name: webhdfs
port: 50070
selector:
app: {{ template "hadoop.name" . }}
release: {{ .Release.Name }}
component: hdfs-nn
\ No newline at end of file
#!/bin/bash
# Calculates cluster resources given a percentage based on what is currently allocatable.
# Related issue to programmatic resource query: https://github.com/kubernetes/kubernetes/issues/27404
TARGET_PCT=$1
[[ -z "${TARGET_PCT}" ]] && echo "USAGE: $0 <target percent>" && exit 1
NODES=$(kubectl get nodes -o jsonpath='{.items..metadata.name}')
NUM_NODES=$(echo "${NODES}" | tr ' ' '\n' | wc -l | xargs echo -n)
TOTAL_CPU=$(kubectl get nodes -o jsonpath='{.items[0].status.allocatable.cpu}')
# Convert CPU to nanocores
TOTAL_CPU=$(bc <<< "${TOTAL_CPU} * 1000000000")
# Start kube proxy to get to node stats summary api
kubectl proxy >/dev/null 2>&1 &
export kproxy=%1
# Cleanup kproxy on exit
function finish {
kill $kproxy
}
trap finish EXIT
# Wait for proxy
(while [[ $count -lt 5 && -z "$(curl -s localhost:8001/api/v1)" ]]; do ((count=count+1)) ; sleep 2; done && [[ $count -lt 5 ]])
[[ $? -ne 0 ]] && echo "ERROR: could not start kube proxy to fetch node stats summary" && exit 1
declare -a NODE_STATS
declare -a AVAIL_CPU
declare -a AVAIL_MEM
i=0
for NODE in ${NODES}; do
NODE_STATS[$i]=$(curl -sf localhost:8001/api/v1/proxy/nodes/${NODE}:10255/stats/summary)
[[ $? -ne 0 ]] && echo "ERROR: Could not get stats summary for node: ${NODE}" && exit 1
# Get available memory
AVAIL_MEM[$i]=$(jq '.node.memory.availableBytes' <<< "${NODE_STATS[$i]}")
AVAIL_MEM[$i]=$(bc -l <<< "scale=0; ${AVAIL_MEM[$i]}/1024/1024")
# Derive available CPU
USED_CPU=$(jq '.node.cpu.usageNanoCores' <<< "${NODE_STATS[$i]}")
AVAIL_CPU[$i]=$(bc -l <<< "scale=2; (${TOTAL_CPU} - ${USED_CPU})/1000000")
((i=i+1))
done
# Optimize per the min resources on any node.
CORES=$(echo "${AVAIL_CPU[*]}" | tr ' ' '\n' | sort -n | head -1)
MEMORY=$(echo "${AVAIL_MEM[*]}" | tr ' ' '\n' | sort -n | head -1)
# Subtract resources used by the chart. Note these are default values.
HADOOP_SHARE_CPU=400
CORES=$(bc -l <<< "scale=0; (${CORES} - ${HADOOP_SHARE_CPU})")
HADOOP_SHARE_MEM=1024
MEMORY=$(bc -l <<< "scale=0; (${MEMORY} - ${HADOOP_SHARE_MEM})")
CPU_PER_NODE=$(bc -l <<< "scale=2; (${CORES} * ${TARGET_PCT}/100)")
MEM_PER_NODE=$(bc -l <<< "scale=2; (${MEMORY} * ${TARGET_PCT}/100)")
# Round cpu to lower mCPU
CPU_PER_NODE=$(bc -l <<< "scale=0; ${CPU_PER_NODE} - (${CPU_PER_NODE} % 10)")
# Round mem to lower Mi
MEM_PER_NODE=$(bc -l <<< "scale=0; ${MEM_PER_NODE} - (${MEM_PER_NODE} % 100)")
[[ "${CPU_PER_NODE/%.*/}" -lt 100 ]] && echo "WARN: Insufficient available CPU for scheduling" >&2
[[ "${MEM_PER_NODE/%.*/}" -lt 2048 ]] && MEM_PER_NODE=2048.0 && echo "WARN: Insufficient available Memory for scheduling" >&2
CPU_LIMIT=${CPU_PER_NODE/%.*/m}
MEM_LIMIT=${MEM_PER_NODE/%.*/Mi}
echo -n "--set yarn.nodeManager.replicas=${NUM_NODES},yarn.nodeManager.resources.requests.cpu=${CPU_LIMIT},yarn.nodeManager.resources.requests.memory=${MEM_LIMIT},yarn.nodeManager.resources.limits.cpu=${CPU_LIMIT},yarn.nodeManager.resources.limits.memory=${MEM_LIMIT}"
# The base hadoop image to use for all components.
# See this repo for image build details: https://github.com/Comcast/kube-yarn/tree/master/image
image: reg.local:5000/wod/hadoop:2.7.3-test
imagePullPolicy: IfNotPresent
hbaseimage:
repository: reg.local:5000/wod/hbase
tag: lastest
pullPolicy: Always
hbaseVersion: 1.4.9
# The version of the hadoop libraries being used in the image.
hadoopVersion: 2.7.3
# Select anitAffinity as either hard or soft, default is hard
antiAffinity: "soft"
hdfs:
nameNode:
pdbMinAvailable: 1
resources:
requests:
memory: "2048Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
dataNode:
replicas: 3
pdbMinAvailable: 3
resources:
requests:
memory: "2048Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
persistence:
nameNode:
enabled: true
storageClass: "rook-ceph-block"
accessMode: ReadWriteOnce
size: 50Gi
dataNode:
enabled: true
storageClass: "rook-ceph-block"
accessMode: ReadWriteOnce
size: 20Gi
hbase:
env:
zookeeperServiceList: myzk-zookeeper
zookeeperPort: "2181"
master:
replicas: 1
resources:
requests:
memory: "2048Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
region:
replicas: 3
resources:
requests:
memory: "2048Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
zookeeper:
quorum: "myzk-zookeeper"
# The base hadoop image to use for all components.
# See this repo for image build details: https://github.com/Comcast/kube-yarn/tree/master/image
image: danisla/hadoop:2.7.3
imagePullPolicy: IfNotPresent
hbaseimage:
repository: 33098876/hbase
tag: lastest
pullPolicy: Always
hbaseVersion: 1.4.9
# The version of the hadoop libraries being used in the image.
hadoopVersion: 2.7.3
# Select anitAffinity as either hard or soft, default is hard
antiAffinity: "soft"
hdfs:
nameNode:
pdbMinAvailable: 1
resources:
requests:
memory: "2048Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
dataNode:
replicas: 3
pdbMinAvailable: 3
resources:
requests:
memory: "2048Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
persistence:
nameNode:
enabled: false
storageClass: "-"
accessMode: ReadWriteOnce
size: 50Gi
dataNode:
enabled: false
storageClass: "-"
accessMode: ReadWriteOnce
size: 200Gi
hbase:
env:
zookeeperServiceList: myzk-zookeeper
zookeeperPort: "2181"
master:
replicas: 1
resources:
requests:
memory: "2048Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
region:
replicas: 3
resources:
requests:
memory: "2048Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
zookeeper:
quorum: "myzk-zookeeper"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment