diff --git a/Chart.yaml b/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c130d590edf721ef1002761db6935f13ede10733 --- /dev/null +++ b/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +appVersion: 1.1.1 +description: Apache Storm is a free and open source distributed realtime computation + system. +home: http://storm.apache.org/ +icon: http://storm.apache.org/images/logo.png +keywords: +- storm +- zookeeper +maintainers: +- email: jorwalk@gmail.com + name: jorwalk +- email: stackedsax@users.noreply.github.com + name: stackedsax +name: storm +sources: +- https://github.com/apache/storm +version: 1.0.2 diff --git a/OWNERS b/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..e6d4f64ea2e4f3511024f04eda0d115a184bbc0f --- /dev/null +++ b/OWNERS @@ -0,0 +1,6 @@ +approvers: +- jorwalk +- stackedsax +reviewers: +- jorwalk +- stackedsax diff --git a/README.md b/README.md index 458081b32efc3e248f21e67d2cd3f00014062d7e..5209bb0e3e5ec56e0634cbeaa15f598d35ff5927 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,75 @@ -# storm +## Storm +[Apache Storm](http://storm.apache.org/) is a free and open source distributed realtime computation system. Storm makes it easy to reliably process unbounded streams of data, doing for realtime processing what Hadoop did for batch processing. Storm is simple, can be used with any programming language, and is a lot of fun to use! -storm \ No newline at end of file +### Prerequisites + +This example assumes you have a Kubernetes cluster installed and +running, and that you have installed the ```kubectl``` command line +tool somewhere in your path. Please see the [getting +started](https://kubernetes.io/docs/tutorials/kubernetes-basics/) for installation +instructions for your platform. + +### Installing the Chart + +To install the chart with the release name `my-storm`: + +```bash +$ helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator +$ helm install --name my-storm incubator/storm +``` + +## Configuration + +The following table lists the configurable parameters of the Storm chart and their default values. + +### Nimbus +| Parameter | Description | Default | +| --------------------------------- | --------------------------- | ------------------- | +| `nimbus.replicaCount` | Number of replicas | 1 | +| `nimbus.image.repository` | Container image name | storm | +| `nimbus.image.tag` | Container image version | 1.1.1 | +| `nimbus.image.pullPolicy` | The default pull policy | IfNotPresent | +| `nimbus.service.name` | Service name | nimbus | +| `nimbus.service.type` | Service Type | ClusterIP | +| `nimbus.service.port` | Service Port | 6627 | +| `nimbus.resources.limits.cpu` | Compute resources | 100m | + +### Supervisor +| Parameter | Description | Default | +| --------------------------------- | --------------------------- | ------------------- | +| `supervisor.replicaCount` | Number of replicas | 3 | +| `supervisor.image.repository` | Container image name | storm | +| `supervisor.image.tag` | Container image version | 1.1.1 | +| `supervisor.image.pullPolicy` | The default pull policy | IfNotPresent | +| `supervisor.service.name` | Service Name | supervisor | +| `supervisor.service.port` | Service Port | 6700 | +| `supervisor.resources.limits.cpu` | Compute Resouces | 200m | + +### User Interface +| Parameter | Description | Default | +| --------------------------------- | --------------------------- | ------------------- | +| `ui.enabled` | Enable the UI | true | +| `ui.replicaCount` | Number of replicas | 1 | +| `ui.image.repository` | Container image name | storm | +| `ui.image.tag` | UI image version | 1.1.1 | +| `ui.image.pullPolicy` | The default pull policy | IfNotPresent | +| `ui.service.type` | UI Service Type | ClusterIP | +| `ui.service.name` | UI service name | ui | +| `ui.service.port` | UI service port | 8080 | +| `ui.resources.limits.cpu` | Compute resources | 100m | + +### Zookeeper +| Parameter | Description | Default | +| --------------------------------- | --------------------------- | ------------------- | +| `zookeeper.enabled` | Enable Zookeeper | true | +| `zookeeper.service.name` | Service name | zookeeper | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml incubator/storm +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/charts/zookeeper/.helmignore b/charts/zookeeper/.helmignore new file mode 100644 index 0000000000000000000000000000000000000000..f0c13194444163d1cba5c67d9e79231a62bc8f44 --- /dev/null +++ b/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/zookeeper/Chart.yaml b/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf5e55bfb163ac8c2f4031c7913dd5225f05aad3 --- /dev/null +++ b/charts/zookeeper/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +appVersion: 3.4.10 +description: Centralized service for maintaining configuration information, naming, + providing distributed synchronization, and providing group services. +home: https://zookeeper.apache.org/ +icon: https://zookeeper.apache.org/images/zookeeper_small.gif +maintainers: +- email: lachlan.evenson@microsoft.com + name: lachie83 +- email: owensk@google.com + name: kow3ns +name: zookeeper +sources: +- https://github.com/apache/zookeeper +- https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper +version: 1.3.1 diff --git a/charts/zookeeper/OWNERS b/charts/zookeeper/OWNERS new file mode 100644 index 0000000000000000000000000000000000000000..dd9facde2a100e678a0c75874cee1ec4bab0deed --- /dev/null +++ b/charts/zookeeper/OWNERS @@ -0,0 +1,6 @@ +approvers: +- lachie83 +- kow3ns +reviewers: +- lachie83 +- kow3ns diff --git a/charts/zookeeper/README.md b/charts/zookeeper/README.md new file mode 100644 index 0000000000000000000000000000000000000000..22bbac49dc6f1420458abff411c676139c2f0b9f --- /dev/null +++ b/charts/zookeeper/README.md @@ -0,0 +1,140 @@ +# incubator/zookeeper + +This helm chart provides an implementation of the ZooKeeper [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) found in Kubernetes Contrib [Zookeeper StatefulSet](https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper). + +## Prerequisites +* Kubernetes 1.6+ +* PersistentVolume support on the underlying infrastructure +* A dynamic provisioner for the PersistentVolumes +* A familiarity with [Apache ZooKeeper 3.4.x](https://zookeeper.apache.org/doc/current/) + +## Chart Components +This chart will do the following: + +* Create a fixed size ZooKeeper ensemble using a [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/). +* Create a [PodDisruptionBudget](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-disruption-budget/) so kubectl drain will respect the Quorum size of the ensemble. +* Create a [Headless Service](https://kubernetes.io/docs/concepts/services-networking/service/) to control the domain of the ZooKeeper ensemble. +* Create a Service configured to connect to the available ZooKeeper instance on the configured client port. +* Optionally apply a [Pod Anti-Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) to spread the ZooKeeper ensemble across nodes. +* Optionally start JMX Exporter and Zookeeper Exporter containers inside Zookeeper pods. +* Optionally create a job which creates Zookeeper chroots (e.g. `/kafka1`). + +## Installing the Chart +You can install the chart with the release name `zookeeper` as below. + +```console +$ helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator +$ helm install --name zookeeper incubator/zookeeper +``` + +If you do not specify a name, helm will select a name for you. + +### Installed Components +You can use `kubectl get` to view all of the installed components. + +```console{%raw} +$ kubectl get all -l app=zookeeper +NAME: zookeeper +LAST DEPLOYED: Wed Apr 11 17:09:48 2018 +NAMESPACE: default +STATUS: DEPLOYED + +RESOURCES: +==> v1beta1/PodDisruptionBudget +NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE +zookeeper N/A 1 1 2m + +==> v1/Service +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +zookeeper-headless ClusterIP None 2181/TCP,3888/TCP,2888/TCP 2m +zookeeper ClusterIP 10.98.179.165 2181/TCP 2m + +==> v1beta1/StatefulSet +NAME DESIRED CURRENT AGE +zookeeper 3 3 2m +``` + +1. `statefulsets/zookeeper` is the StatefulSet created by the chart. +1. `po/zookeeper-<0|1|2>` are the Pods created by the StatefulSet. Each Pod has a single container running a ZooKeeper server. +1. `svc/zookeeper-headless` is the Headless Service used to control the network domain of the ZooKeeper ensemble. +1. `svc/zookeeper` is a Service that can be used by clients to connect to an available ZooKeeper server. + +## Configuration +You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install --name my-release -f values.yaml incubator/zookeeper +``` + +## Default Values + +- You can find all user-configurable settings, their defaults and commentary about them in [values.yaml](values.yaml). + +## Deep Dive + +## Image Details +The image used for this chart is based on Ubuntu 16.04 LTS. This image is larger than Alpine or BusyBox, but it provides glibc, rather than ulibc or mucl, and a JVM release that is built against it. You can easily convert this chart to run against a smaller image with a JVM that is built against that image's libc. However, as far as we know, no Hadoop vendor supports, or has verified, ZooKeeper running on such a JVM. + +## JVM Details +The Java Virtual Machine used for this chart is the OpenJDK JVM 8u111 JRE (headless). + +## ZooKeeper Details +The ZooKeeper version is the latest stable version (3.4.10). The distribution is installed into /opt/zookeeper-3.4.10. This directory is symbolically linked to /opt/zookeeper. Symlinks are created to simulate a rpm installation into /usr. + +## Failover +You can test failover by killing the leader. Insert a key: +```console +$ kubectl exec zookeeper-0 -- /opt/zookeeper/bin/zkCli.sh create /foo bar; +$ kubectl exec zookeeper-2 -- /opt/zookeeper/bin/zkCli.sh get /foo; +``` + +Watch existing members: +```console +$ kubectl run --attach bbox --image=busybox --restart=Never -- sh -c 'while true; do for i in 0 1 2; do echo zk-${i} $(echo stats | nc -${i}.:2181 | grep Mode); sleep 1; done; done'; + +zk-2 Mode: follower +zk-0 Mode: follower +zk-1 Mode: leader +zk-2 Mode: follower +``` + +Delete Pods and wait for the StatefulSet controller to bring them back up: +```console +$ kubectl delete po -l app=zookeeper +$ kubectl get po --watch-only +NAME READY STATUS RESTARTS AGE +zookeeper-0 0/1 Running 0 35s +zookeeper-0 1/1 Running 0 50s +zookeeper-1 0/1 Pending 0 0s +zookeeper-1 0/1 Pending 0 0s +zookeeper-1 0/1 ContainerCreating 0 0s +zookeeper-1 0/1 Running 0 19s +zookeeper-1 1/1 Running 0 40s +zookeeper-2 0/1 Pending 0 0s +zookeeper-2 0/1 Pending 0 0s +zookeeper-2 0/1 ContainerCreating 0 0s +zookeeper-2 0/1 Running 0 19s +zookeeper-2 1/1 Running 0 41s +``` + +Check the previously inserted key: +```console +$ kubectl exec zookeeper-1 -- /opt/zookeeper/bin/zkCli.sh get /foo +ionid = 0x354887858e80035, negotiated timeout = 30000 + +WATCHER:: + +WatchedEvent state:SyncConnected type:None path:null +bar +``` + +## Scaling +ZooKeeper can not be safely scaled in versions prior to 3.5.x. This chart currently uses 3.4.x. There are manual procedures for scaling a 3.4.x ensemble, but as noted in the [ZooKeeper 3.5.2 documentation](https://zookeeper.apache.org/doc/r3.5.2-alpha/zookeeperReconfig.html) these procedures require a rolling restart, are known to be error prone, and often result in a data loss. + +While ZooKeeper 3.5.x does allow for dynamic ensemble reconfiguration (including scaling membership), the current status of the release is still alpha, and 3.5.x is therefore not recommended for production use. + +## Limitations +* StatefulSet and PodDisruptionBudget are beta resources. +* Only supports storage options that have backends for persistent volume claims. diff --git a/charts/zookeeper/templates/NOTES.txt b/charts/zookeeper/templates/NOTES.txt new file mode 100644 index 0000000000000000000000000000000000000000..6c5da85622fafb90ed80fbe8d1c961b92dad8d1d --- /dev/null +++ b/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,7 @@ +Thank you for installing ZooKeeper on your Kubernetes cluster. More information +about ZooKeeper can be found at https://zookeeper.apache.org/doc/current/ + +Your connection string should look like: + {{ template "zookeeper.fullname" . }}-0.{{ template "zookeeper.fullname" . }}-headless:{{ .Values.service.ports.client.port }},{{ template "zookeeper.fullname" . }}-1.{{ template "zookeeper.fullname" . }}-headless:{{ .Values.service.ports.client.port }},... + +You can also use the client service {{ template "zookeeper.fullname" . }}:{{ .Values.service.ports.client.port }} to connect to an available ZooKeeper server. diff --git a/charts/zookeeper/templates/_helpers.tpl b/charts/zookeeper/templates/_helpers.tpl new file mode 100644 index 0000000000000000000000000000000000000000..ae36115b1f66f5a1887cac5fe044f1f390c70ab3 --- /dev/null +++ b/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "zookeeper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zookeeper.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "zookeeper.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/zookeeper/templates/config-jmx-exporter.yaml b/charts/zookeeper/templates/config-jmx-exporter.yaml new file mode 100644 index 0000000000000000000000000000000000000000..79905e57b384f3725dea23c36fb34319db4d854a --- /dev/null +++ b/charts/zookeeper/templates/config-jmx-exporter.yaml @@ -0,0 +1,19 @@ +{{- if .Values.exporters.jmx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-jmx-exporter + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + config.yml: |- + hostPort: 127.0.0.1:{{ .Values.env.JMXPORT }} + lowercaseOutputName: {{ .Values.exporters.jmx.config.lowercaseOutputName }} + rules: +{{ .Values.exporters.jmx.config.rules | toYaml | indent 6 }} + ssl: false + startDelaySeconds: {{ .Values.exporters.jmx.config.startDelaySeconds }} +{{- end }} diff --git a/charts/zookeeper/templates/job-chroots.yaml b/charts/zookeeper/templates/job-chroots.yaml new file mode 100644 index 0000000000000000000000000000000000000000..29ec3bd063eac16390ccef54df67771fe701933b --- /dev/null +++ b/charts/zookeeper/templates/job-chroots.yaml @@ -0,0 +1,65 @@ +{{- if .Values.jobs.chroots.enabled }} +{{- $root := . }} +{{- $job := .Values.jobs.chroots }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "zookeeper.fullname" . }}-chroots + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: jobs + job: chroots +spec: + activeDeadlineSeconds: {{ $job.activeDeadlineSeconds }} + backoffLimit: {{ $job.backoffLimit }} + completions: {{ $job.completions }} + parallelism: {{ $job.parallelism }} + template: + metadata: + labels: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} + component: jobs + job: chroots + spec: + restartPolicy: {{ $job.restartPolicy }} +{{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" +{{- end }} + containers: + - name: main + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - -o + - pipefail + - -euc + {{- $port := .Values.service.ports.client.port }} + - > + sleep 15; + export SERVER={{ template "zookeeper.fullname" $root }}:{{ $port }}; + {{- range $job.config.create }} + echo '==> {{ . }}'; + echo '====> Create chroot if does not exist.'; + zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid' + || zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} create {{ . }} ""; + echo '====> Confirm chroot exists.'; + zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid'; + echo '====> Chroot exists.'; + {{- end }} + env: + {{- range $key, $value := $job.env }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value | quote }} + {{- end }} + resources: +{{ toYaml $job.resources | indent 12 }} +{{- end -}} diff --git a/charts/zookeeper/templates/poddisruptionbudget.yaml b/charts/zookeeper/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000000000000000000000000000000000000..15ee0080232bedb4df87f41198e8c6b4e8f7e6e9 --- /dev/null +++ b/charts/zookeeper/templates/poddisruptionbudget.yaml @@ -0,0 +1,17 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "zookeeper.fullname" . }} + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: server +spec: + selector: + matchLabels: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} + component: server +{{ toYaml .Values.podDisruptionBudget | indent 2 }} diff --git a/charts/zookeeper/templates/service-headless.yaml b/charts/zookeeper/templates/service-headless.yaml new file mode 100644 index 0000000000000000000000000000000000000000..57a7d9f4124a8500826ab7da59da09e383860649 --- /dev/null +++ b/charts/zookeeper/templates/service-headless.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }}-headless + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.headless.annotations }} + annotations: +{{ .Values.headless.annotations | toYaml | trimSuffix "\n" | indent 4 }} +{{- end }} +spec: + clusterIP: None + ports: +{{- range $key, $port := .Values.ports }} + - name: {{ $key }} + port: {{ $port.containerPort }} + targetPort: {{ $key }} + protocol: {{ $port.protocol }} +{{- end }} + selector: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} diff --git a/charts/zookeeper/templates/service.yaml b/charts/zookeeper/templates/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5f10861e8af9c5de6b05fe6c5974c2f8938b28ca --- /dev/null +++ b/charts/zookeeper/templates/service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }} + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: +{{- with .Values.service.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + {{- range $key, $value := .Values.service.ports }} + - name: {{ $key }} +{{ toYaml $value | indent 6 }} + {{- end }} + selector: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} diff --git a/charts/zookeeper/templates/statefulset.yaml b/charts/zookeeper/templates/statefulset.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e70f7bcbf912d8307667bab34ee747bc62d5a8eb --- /dev/null +++ b/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,204 @@ +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: {{ template "zookeeper.fullname" . }} + labels: + app: {{ template "zookeeper.name" . }} + chart: {{ template "zookeeper.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: server +spec: + serviceName: {{ template "zookeeper.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} + component: server + updateStrategy: +{{ toYaml .Values.updateStrategy | indent 4 }} + template: + metadata: + labels: + app: {{ template "zookeeper.name" . }} + release: {{ .Release.Name }} + component: server + {{- if .Values.podLabels }} + ## Custom pod labels + {{- range $key, $value := .Values.podLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + annotations: + {{- if .Values.podAnnotations }} + ## Custom pod annotations + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + spec: + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} +{{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" +{{- end }} + securityContext: +{{ toYaml .Values.securityContext | indent 8 }} +{{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" +{{- end }} + containers: + + - name: zookeeper + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- with .Values.command }} + command: {{ range . }} + - {{ . | quote }} + {{- end }} + {{- end }} + ports: +{{- range $key, $port := .Values.ports }} + - name: {{ $key }} +{{ toYaml $port | indent 14 }} +{{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 12 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 12 }} + env: + - name: ZK_REPLICAS + value: {{ .Values.replicaCount | quote }} + {{- range $key, $value := .Values.env }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value | quote }} + {{- end }} + {{- range $secret := .Values.secrets }} + {{- range $key := $secret.keys }} + - name: {{ (print $secret.name "_" $key) | upper }} + valueFrom: + secretKeyRef: + name: {{ $secret.name }} + key: {{ $key }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 12 }} + volumeMounts: + - name: data + mountPath: /var/lib/zookeeper + {{- range $secret := .Values.secrets }} + {{- if $secret.mountPath }} + {{- range $key := $secret.keys }} + - name: {{ $.Release.Name }}-{{ $secret.name }} + mountPath: {{ $secret.mountPath }}/{{ $key }} + subPath: {{ $key }} + readOnly: true + {{- end }} + {{- end }} + {{- end }} + + +{{- if .Values.exporters.jmx.enabled }} + - name: jmx-exporter + image: "{{ .Values.exporters.jmx.image.repository }}:{{ .Values.exporters.jmx.image.tag }}" + imagePullPolicy: {{ .Values.exporters.jmx.image.pullPolicy }} + ports: + {{- range $key, $port := .Values.exporters.jmx.ports }} + - name: {{ $key }} +{{ toYaml $port | indent 14 }} + {{- end }} + livenessProbe: +{{ toYaml .Values.exporters.jmx.livenessProbe | indent 12 }} + readinessProbe: +{{ toYaml .Values.exporters.jmx.readinessProbe | indent 12 }} + env: + - name: SERVICE_PORT + value: {{ .Values.exporters.jmx.ports.jmxxp.containerPort | quote }} + {{- with .Values.exporters.jmx.env }} + {{- range $key, $value := . }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + resources: +{{ toYaml .Values.exporters.jmx.resources | indent 12 }} + volumeMounts: + - name: config-jmx-exporter + mountPath: /opt/jmx_exporter/config.yml + subPath: config.yml +{{- end }} + +{{- if .Values.exporters.zookeeper.enabled }} + - name: zookeeper-exporter + image: "{{ .Values.exporters.zookeeper.image.repository }}:{{ .Values.exporters.zookeeper.image.tag }}" + imagePullPolicy: {{ .Values.exporters.zookeeper.image.pullPolicy }} + args: + - -bind-addr=:{{ .Values.exporters.zookeeper.ports.zookeeperxp.containerPort }} + - -metrics-path={{ .Values.exporters.zookeeper.path }} + - -zookeeper=localhost:{{ .Values.ports.client.containerPort }} + - -log-level={{ .Values.exporters.zookeeper.config.logLevel }} + - -reset-on-scrape={{ .Values.exporters.zookeeper.config.resetOnScrape }} + ports: + {{- range $key, $port := .Values.exporters.zookeeper.ports }} + - name: {{ $key }} +{{ toYaml $port | indent 14 }} + {{- end }} + livenessProbe: +{{ toYaml .Values.exporters.zookeeper.livenessProbe | indent 12 }} + readinessProbe: +{{ toYaml .Values.exporters.zookeeper.readinessProbe | indent 12 }} + env: + {{- range $key, $value := .Values.exporters.zookeeper.env }} + - name: {{ $key | upper | replace "." "_" }} + value: {{ $value | quote }} + {{- end }} + resources: +{{ toYaml .Values.exporters.zookeeper.resources | indent 12 }} +{{- end }} + + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + {{- range .Values.secrets }} + - name: {{ $.Release.Name }}-{{ .name }} + secret: + secretName: {{ .name }} + {{- end }} + {{- if .Values.exporters.jmx.enabled }} + - name: config-jmx-exporter + configMap: + name: {{ .Release.Name }}-jmx-exporter + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/charts/zookeeper/values.yaml b/charts/zookeeper/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..949bb28b724b7597a64eabc4631315ad36e7f4e4 --- /dev/null +++ b/charts/zookeeper/values.yaml @@ -0,0 +1,323 @@ +## As weighted quorums are not supported, it is imperative that an odd number of replicas +## be chosen. Moreover, the number of replicas should be either 1, 3, 5, or 7. +## +## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper#stateful-set +replicaCount: 3 # Desired quantity of ZooKeeper pods. This should always be (1,3,5, or 7) + +podDisruptionBudget: + maxUnavailable: 1 # Limits how many Zokeeper pods may be unavailable due to voluntary disruptions. + +terminationGracePeriodSeconds: 1800 # Duration in seconds a Zokeeper pod needs to terminate gracefully. + +## OnDelete requires you to manually delete each pod when making updates. +## This approach is at the moment safer than RollingUpdate because replication +## may be incomplete when replication source pod is killed. +## +## ref: http://blog.kubernetes.io/2017/09/kubernetes-statefulsets-daemonsets.html +updateStrategy: + type: OnDelete # Pods will only be created when you manually delete old pods. + +## refs: +## - https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper +## - https://github.com/kubernetes/contrib/blob/master/statefulsets/zookeeper/Makefile#L1 +image: + repository: registry.cn-qingdao.aliyuncs.com/wod/k8szk # Container image repository for zookeeper container. + tag: v3 # Container image tag for zookeeper container. + pullPolicy: IfNotPresent # Image pull criteria for zookeeper container. + +service: + type: ClusterIP # Exposes zookeeper on a cluster-internal IP. + annotations: {} # Arbitrary non-identifying metadata for zookeeper service. + ## AWS example for use with LoadBalancer service type. + # external-dns.alpha.kubernetes.io/hostname: zookeeper.cluster.local + # service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + # service.beta.kubernetes.io/aws-load-balancer-internal: "true" + ports: + client: + port: 2181 # Service port number for client port. + targetPort: client # Service target port for client port. + protocol: TCP # Service port protocol for client port. + +## Headless service. +## +headless: + annotations: {} + +ports: + client: + containerPort: 2181 # Port number for zookeeper container client port. + protocol: TCP # Protocol for zookeeper container client port. + election: + containerPort: 3888 # Port number for zookeeper container election port. + protocol: TCP # Protocol for zookeeper container election port. + server: + containerPort: 2888 # Port number for zookeeper container server port. + protocol: TCP # Protocol for zookeeper container server port. + +resources: {} # Optionally specify how much CPU and memory (RAM) each zookeeper container needs. + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +priorityClassName: "" + +nodeSelector: {} # Node label-values required to run zookeeper pods. + +tolerations: [] # Node taint overrides for zookeeper pods. + +affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods. + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - topologyKey: "kubernetes.io/hostname" + # labelSelector: + # matchLabels: + # release: zookeeper + +podAnnotations: {} # Arbitrary non-identifying metadata for zookeeper pods. + # prometheus.io/scrape: "true" + # prometheus.io/path: "/metrics" + # prometheus.io/port: "9141" + +podLabels: {} # Key/value pairs that are attached to zookeeper pods. + # team: "developers" + # service: "zookeeper" + +livenessProbe: + exec: + command: + - zkOk.sh + initialDelaySeconds: 20 + # periodSeconds: 30 + # timeoutSeconds: 30 + # failureThreshold: 6 + # successThreshold: 1 + +readinessProbe: + exec: + command: + - zkOk.sh + initialDelaySeconds: 20 + # periodSeconds: 30 + # timeoutSeconds: 30 + # failureThreshold: 6 + # successThreshold: 1 + +securityContext: + fsGroup: 1000 + runAsUser: 1000 + +## Useful, if you want to use an alternate image. +command: + - /bin/bash + - -xec + - zkGenConfig.sh && exec zkServer.sh start-foreground + +## Useful if using any custom authorizer. +## Pass any secrets to the kafka pods. Each secret will be passed as an +## environment variable by default. The secret can also be mounted to a +## specific path (in addition to environment variable) if required. Environment +## variable names are generated as: `_` (All upper case) +# secrets: +# - name: myKafkaSecret +# keys: +# - username +# - password +# # mountPath: /opt/kafka/secret +# - name: myZkSecret +# keys: +# - user +# - pass +# mountPath: /opt/zookeeper/secret + +persistence: + enabled: true + ## zookeeper data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + size: 5Gi + +## Exporters query apps for metrics and make those metrics available for +## Prometheus to scrape. +exporters: + + jmx: + enabled: false + image: + repository: registry.cn-qingdao.aliyuncs.com/wod/jmx-prometheus-exporter + tag: 0.3.0 + pullPolicy: IfNotPresent + config: + lowercaseOutputName: false + ## ref: https://github.com/prometheus/jmx_exporter/blob/master/example_configs/zookeeper.yaml + rules: + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$2" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$3" + labels: + replicaId: "$2" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4" + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4_$5" + labels: + replicaId: "$2" + memberType: "$3" + startDelaySeconds: 30 + env: {} + resources: {} + path: /metrics + ports: + jmxxp: + containerPort: 9404 + protocol: TCP + livenessProbe: + httpGet: + path: /metrics + port: jmxxp + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 60 + failureThreshold: 8 + successThreshold: 1 + readinessProbe: + httpGet: + path: /metrics + port: jmxxp + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 60 + failureThreshold: 8 + successThreshold: 1 + + zookeeper: + ## refs: + ## - https://github.com/carlpett/zookeeper_exporter + ## - https://hub.docker.com/r/josdotso/zookeeper-exporter/ + ## - https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/#zookeeper-metrics + enabled: false + image: + repository: registry.cn-qingdao.aliyuncs.com/wod/zookeeper-exporter + tag: v1.1.2 + pullPolicy: IfNotPresent + config: + logLevel: info + resetOnScrape: "true" + env: {} + resources: {} + path: /metrics + ports: + zookeeperxp: + containerPort: 9141 + protocol: TCP + livenessProbe: + httpGet: + path: /metrics + port: zookeeperxp + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 60 + failureThreshold: 8 + successThreshold: 1 + readinessProbe: + httpGet: + path: /metrics + port: zookeeperxp + initialDelaySeconds: 30 + periodSeconds: 15 + timeoutSeconds: 60 + failureThreshold: 8 + successThreshold: 1 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper +env: + + ## Options related to JMX exporter. + ## ref: https://github.com/apache/zookeeper/blob/master/bin/zkServer.sh#L36 + JMXAUTH: "false" + JMXDISABLE: "false" + JMXPORT: 1099 + JMXSSL: "false" + + ## The port on which the server will accept client requests. + ZK_CLIENT_PORT: 2181 + + ## The port on which the ensemble performs leader election. + ZK_ELECTION_PORT: 3888 + + ## The JVM heap size. + ZK_HEAP_SIZE: 2G + + ## The number of Ticks that an ensemble member is allowed to perform leader + ## election. + ZK_INIT_LIMIT: 5 + + ## The Log Level that for the ZooKeeper processes logger. + ## Choices are `TRACE,DEBUG,INFO,WARN,ERROR,FATAL`. + ZK_LOG_LEVEL: INFO + + ## The maximum number of concurrent client connections that + ## a server in the ensemble will accept. + ZK_MAX_CLIENT_CNXNS: 60 + + ## The maximum session timeout that the ensemble will allow a client to request. + ## Upstream default is `20 * ZK_TICK_TIME` + ZK_MAX_SESSION_TIMEOUT: 40000 + + ## The minimum session timeout that the ensemble will allow a client to request. + ## Upstream default is `2 * ZK_TICK_TIME`. + ZK_MIN_SESSION_TIMEOUT: 4000 + + ## The delay, in hours, between ZooKeeper log and snapshot cleanups. + ZK_PURGE_INTERVAL: 0 + + ## The port on which the leader will send events to followers. + ZK_SERVER_PORT: 2888 + + ## The number of snapshots that the ZooKeeper process will retain if + ## `ZK_PURGE_INTERVAL` is set to a value greater than `0`. + ZK_SNAP_RETAIN_COUNT: 3 + + ## The number of Tick by which a follower may lag behind the ensembles leader. + ZK_SYNC_LIMIT: 10 + + ## The number of wall clock ms that corresponds to a Tick for the ensembles + ## internal time. + ZK_TICK_TIME: 2000 + +jobs: + ## ref: http://zookeeper.apache.org/doc/r3.4.10/zookeeperProgrammers.html#ch_zkSessions + chroots: + enabled: false + activeDeadlineSeconds: 300 + backoffLimit: 5 + completions: 1 + config: + create: [] + # - /kafka + # - /ureplicator + env: [] + parallelism: 1 + resources: {} + restartPolicy: Never diff --git a/cluster.xml b/cluster.xml new file mode 100644 index 0000000000000000000000000000000000000000..224a25ed58528c347193c0027e61806911061ebd --- /dev/null +++ b/cluster.xml @@ -0,0 +1,103 @@ + + + + + + %d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n + + + + + ${pattern} + + + + + ${pattern} + + + + + ${pattern} + + + + + + + + + ${pattern} + + + + + + + + + ${pattern} + + + + + + + + + ${patternMetrics} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/requirements.lock b/requirements.lock new file mode 100644 index 0000000000000000000000000000000000000000..f00bce8904dc2822cd297cf9de58daa54a810100 --- /dev/null +++ b/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: zookeeper + repository: https://kubernetes-charts-incubator.storage.googleapis.com/ + version: 1.3.1 +digest: sha256:ae6ba70dbd6645a7a9dcea6363c9870bba66d72f385796a523adee41974f6f4d +generated: "2019-06-11T14:40:14.989855-07:00" diff --git a/requirements.yaml b/requirements.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6aee0cb642ae630b8a3cdc18a95bd76d974e1629 --- /dev/null +++ b/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: +- name: zookeeper + version: ~1.3.1 + repository: https://kubernetes-charts-incubator.storage.googleapis.com/ + condition: zookeeper.enabled diff --git a/templates/NOTES.txt b/templates/NOTES.txt new file mode 100644 index 0000000000000000000000000000000000000000..d037f9c200c0bc4cd73c23aae07e8a435136ebf3 --- /dev/null +++ b/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the Storm UI URL by running these commands: +{{- if contains "NodePort" .Values.ui.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "storm.ui.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.ui.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "storm.ui.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "storm.ui.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.ui.service.externalPort }} +{{- else if contains "ClusterIP" .Values.ui.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "storm.ui.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl port-forward $POD_NAME 8080:{{ .Values.ui.service.port }} -n {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/templates/_helpers.tpl b/templates/_helpers.tpl new file mode 100644 index 0000000000000000000000000000000000000000..d5938cf7df73da0d6289af1c565e96291f211485 --- /dev/null +++ b/templates/_helpers.tpl @@ -0,0 +1,100 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "storm.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "storm.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "storm.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "storm.nimbus.name" -}} +{{- printf "%s-%s" (include "storm.name" .) .Values.nimbus.service.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified nimbus name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "storm.nimbus.fullname" -}} +{{- $name := default .Chart.Name .Values.nimbus.service.name -}} +{{- printf "%s-%s" (include "storm.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "storm.supervisor.name" -}} +{{- printf "%s-%s" (include "storm.name" .) .Values.supervisor.service.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified supervisor name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "storm.supervisor.fullname" -}} +{{- $name := default .Chart.Name .Values.supervisor.service.name -}} +{{- printf "%s-%s" (include "storm.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "storm.ui.name" -}} +{{- printf "%s-%s" (include "storm.name" .) .Values.ui.service.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified ui name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "storm.ui.fullname" -}} +{{- $name := default .Chart.Name .Values.ui.service.name -}} +{{- printf "%s-%s" (include "storm.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified zookeeper name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "storm.zookeeper.fullname" -}} +{{- $name := default .Values.zookeeper.service.name -}} +{{- printf "%s-%s" (include "storm.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "storm.logging.name" -}} +{{- printf "%s-logging" (include "storm.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Override the zookeeper service name for the zookeeper chart so that both charts reference the same zookeeper service name. +*/}} +{{- define "zookeeper.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}}o +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name .Values.stormName $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/templates/configmap.yaml b/templates/configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22a3db9827e4f968207cfe2ff5f6e3ed63666359 --- /dev/null +++ b/templates/configmap.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "storm.nimbus.fullname" . }} + labels: + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + storm.yaml: |- + ########### These MUST be filled in for a storm configuration + storm.zookeeper.servers: + - {{ template "storm.zookeeper.fullname" . }} + nimbus.seeds: + - {{ template "storm.nimbus.fullname" . }} + storm.local.hostname: {{ template "storm.nimbus.fullname" . }} + storm.log4j2.conf.dir: "/log4j2" + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "storm.supervisor.fullname" . }} + labels: + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + storm.yaml: |- + ########### These MUST be filled in for a storm configuration + storm.zookeeper.servers: + - {{ template "storm.zookeeper.fullname" . }} + nimbus.seeds: + - {{ template "storm.nimbus.fullname" . }} + storm.local.hostname: {{ template "storm.supervisor.fullname" . }} + storm.log4j2.conf.dir: "/log4j2" + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "storm.logging.name" . }} + labels: + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + {{- $files := .Files }} + {{- range tuple "cluster.xml" "worker.xml" }} + {{ . }}: |- +{{ $files.Get . | indent 4 }} + {{- end }} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "storm.ui.fullname" . }} + labels: + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + storm.yaml: |- + ########### These MUST be filled in for a storm configuration + storm.zookeeper.servers: + - {{ template "storm.zookeeper.fullname" . }} + nimbus.seeds: + - {{ template "storm.nimbus.fullname" . }} + storm.local.hostname: {{ template "storm.ui.fullname" . }} + storm.log4j2.conf.dir: "/log4j2" diff --git a/templates/nimbus-deployment.yaml b/templates/nimbus-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..01003f1f8213c133a59b2f5f7e4f2b5edd4cf725 --- /dev/null +++ b/templates/nimbus-deployment.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "storm.nimbus.fullname" . }} + labels: + app: {{ template "storm.nimbus.name" . }} + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.nimbus.replicaCount }} + selector: + matchLabels: + app: {{ template "storm.nimbus.name" . }} + release: {{ .Release.Name }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + labels: + app: {{ template "storm.nimbus.name" . }} + release: {{ .Release.Name }} + spec: + initContainers: + - name: init-{{ template "storm.zookeeper.fullname" . }} + image: busybox + command: ["sh", "-c", "until nslookup {{ template "storm.zookeeper.fullname" . }}; do echo waiting for {{ template "storm.zookeeper.fullname" . }}; sleep 2; done;"] + containers: + - name: {{ .Values.nimbus.service.name }} + image: "{{ .Values.nimbus.image.repository }}:{{ .Values.nimbus.image.tag }}" + imagePullPolicy: {{ .Values.nimbus.image.pullPolicy }} + command: ["storm", "nimbus"] + ports: + - containerPort: {{ .Values.nimbus.service.port }} + resources: +{{ toYaml .Values.nimbus.resources | indent 10 }} + volumeMounts: + - mountPath: "/conf" + name: storm-configmap + - mountPath: "/log4j2" + name: storm-logging-config + volumes: + - name: storm-configmap + configMap: + name: {{ template "storm.nimbus.fullname" . }} + - name: storm-logging-config + configMap: + name: {{ template "storm.logging.name" . }} diff --git a/templates/nimbus-service.yaml b/templates/nimbus-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d3bfa2b5d87f53c2a44c845631b8362e8d844eae --- /dev/null +++ b/templates/nimbus-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "storm.nimbus.fullname" . }} + labels: + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.nimbus.service.type }} + ports: + - port: {{ .Values.nimbus.service.port }} + name: {{ .Values.nimbus.service.name }} + selector: + app: {{ template "storm.nimbus.name" . }} + release: {{ .Release.Name }} diff --git a/templates/supervisor-deployment.yaml b/templates/supervisor-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bfcab4b096617cc39e77fc900f870fe90877e89b --- /dev/null +++ b/templates/supervisor-deployment.yaml @@ -0,0 +1,49 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "storm.supervisor.fullname" . }} + labels: + app: {{ template "storm.supervisor.name" . }} + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.supervisor.replicaCount }} + selector: + matchLabels: + app: {{ template "storm.supervisor.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "storm.supervisor.name" . }} + release: {{ .Release.Name }} + spec: + initContainers: + - name: init-{{ template "storm.zookeeper.fullname" . }} + image: busybox + command: ["sh", "-c", "until nslookup {{ template "storm.zookeeper.fullname" . }}; do echo waiting for {{ template "storm.zookeeper.fullname" . }}; sleep 2; done;"] + - name: init-{{ template "storm.nimbus.fullname" . }} + image: busybox + command: ["sh", "-c", "until nslookup {{ template "storm.nimbus.fullname" . }}; do echo waiting for {{ template "storm.nimbus.fullname" . }}; sleep 2; done;"] + containers: + - name: {{ .Values.supervisor.service.name }} + image: "{{ .Values.supervisor.image.repository }}:{{ .Values.supervisor.image.tag }}" + imagePullPolicy: {{ .Values.supervisor.image.pullPolicy }} + command: ["storm", "supervisor"] + ports: + - containerPort: {{ .Values.supervisor.service.port }} + resources: +{{ toYaml .Values.supervisor.resources | indent 10 }} + volumeMounts: + - mountPath: "/conf" + name: storm-configmap + - mountPath: "/log4j2" + name: storm-logging-config + volumes: + - name: storm-configmap + configMap: + name: {{ template "storm.supervisor.fullname" . }} + - name: storm-logging-config + configMap: + name: {{ template "storm.logging.name" . }} diff --git a/templates/supervisor-service.yaml b/templates/supervisor-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ab6741fe447362bfd6c44b25eb6a1f625d5e067 --- /dev/null +++ b/templates/supervisor-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "storm.supervisor.fullname" . }} + labels: + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + ports: + - port: {{ .Values.supervisor.service.port }} + name: {{ .Values.supervisor.service.name }} + selector: + app: {{ template "storm.supervisor.name" . }} + release: {{ .Release.Name }} diff --git a/templates/ui-deployment.yaml b/templates/ui-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aeaf01dae0528eeeaf51c3541b293175f28c2c35 --- /dev/null +++ b/templates/ui-deployment.yaml @@ -0,0 +1,56 @@ +{{- if .Values.ui.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "storm.ui.fullname" . }} + labels: + app: {{ template "storm.ui.name" . }} + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + replicas: {{ .Values.ui.replicaCount }} + selector: + matchLabels: + app: {{ template "storm.ui.name" . }} + release: {{ .Release.Name }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + labels: + app: {{ template "storm.ui.name" . }} + release: {{ .Release.Name }} + spec: + initContainers: + - name: init-{{ template "storm.zookeeper.fullname" . }} + image: busybox + command: ["sh", "-c", "until nslookup {{ template "storm.zookeeper.fullname" . }}; do echo waiting for {{ template "storm.zookeeper.fullname" . }}; sleep 2; done;"] + - name: init-{{ template "storm.nimbus.fullname" . }} + image: busybox + command: ["sh", "-c", "until nslookup {{ template "storm.nimbus.fullname" . }}; do echo waiting for {{ template "storm.nimbus.fullname" . }}; sleep 2; done;"] + - name: init-{{ template "storm.supervisor.fullname" . }} + image: busybox + command: ["sh", "-c", "until nslookup {{ template "storm.supervisor.fullname" . }}; do echo waiting for {{ template "storm.supervisor.fullname" . }}; sleep 2; done;"] + containers: + - name: {{ .Values.ui.service.name }} + image: "{{ .Values.ui.image.repository }}:{{ .Values.ui.image.tag }}" + imagePullPolicy: {{ .Values.ui.image.pullPolicy }} + command: ["storm", "ui"] + ports: + - containerPort: {{ .Values.ui.service.port }} + resources: +{{ toYaml .Values.ui.resources | indent 10 }} + volumeMounts: + - mountPath: "/conf" + name: storm-configmap + - mountPath: "/log4j2" + name: storm-logging-config + volumes: + - name: storm-configmap + configMap: + name: {{ template "storm.ui.fullname" . }} + - name: storm-logging-config + configMap: + name: {{ template "storm.logging.name" . }} +{{- end -}} diff --git a/templates/ui-service.yaml b/templates/ui-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..25fc6f68c59589ecb1fe21ad96e9a6a71fe82b8e --- /dev/null +++ b/templates/ui-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.ui.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "storm.ui.fullname" . }} + labels: + chart: {{ template "storm.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: {{ .Values.ui.service.type }} + ports: + - protocol: TCP + port: {{ .Values.ui.service.port }} + name: {{ .Values.ui.service.name }} + + selector: + app: {{ template "storm.ui.name" . }} + release: {{ .Release.Name }} +{{- end -}} diff --git a/values-overrides.yaml b/values-overrides.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9d78bede5c8b4bb90ad49a202ee29f0528aa780 --- /dev/null +++ b/values-overrides.yaml @@ -0,0 +1,64 @@ +# Default values for storm. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +nameOverride: "" +fullnameOverride: "" +name: storm +enabled: true +nimbus: + replicaCount: 1 + image: + repository: registry.cn-qingdao.aliyuncs.com/wod/storm + tag: 1.1.1 + pullPolicy: IfNotPresent + service: + name: nimbus + type: ClusterIP + port: 6627 + resources: + limits: + cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + +supervisor: + replicaCount: 3 + image: + repository: registry.cn-qingdao.aliyuncs.com/wod/storm + tag: 1.1.1 + pullPolicy: IfNotPresent + service: + name: supervisor + port: 6700 + resources: + limits: + cpu: 200m + nodeSelector: {} + tolerations: [] + affinity: {} + +ui: + enabled: true + replicaCount: 1 + image: + repository: registry.cn-qingdao.aliyuncs.com/wod/storm + tag: 1.1.1 + pullPolicy: IfNotPresent + service: + type: ClusterIP + name: ui + port: 8080 + resources: + limits: + cpu: 100m + ingress: + enabled: false + annotations: {} + tls: [] + +zookeeper: + enabled: true + service: + name: zookeeper + stormName: storm diff --git a/values.yaml b/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..84a354e0fd0a6f8149b6d63aef03c6005ee7f5fe --- /dev/null +++ b/values.yaml @@ -0,0 +1,64 @@ +# Default values for storm. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +nameOverride: "" +fullnameOverride: "" +name: storm +enabled: true +nimbus: + replicaCount: 1 + image: + repository: storm + tag: 1.1.1 + pullPolicy: IfNotPresent + service: + name: nimbus + type: ClusterIP + port: 6627 + resources: + limits: + cpu: 100m + nodeSelector: {} + tolerations: [] + affinity: {} + +supervisor: + replicaCount: 3 + image: + repository: storm + tag: 1.1.1 + pullPolicy: IfNotPresent + service: + name: supervisor + port: 6700 + resources: + limits: + cpu: 200m + nodeSelector: {} + tolerations: [] + affinity: {} + +ui: + enabled: true + replicaCount: 1 + image: + repository: storm + tag: 1.1.1 + pullPolicy: IfNotPresent + service: + type: ClusterIP + name: ui + port: 8080 + resources: + limits: + cpu: 100m + ingress: + enabled: false + annotations: {} + tls: [] + +zookeeper: + enabled: true + service: + name: zookeeper + stormName: storm diff --git a/worker.xml b/worker.xml new file mode 100644 index 0000000000000000000000000000000000000000..f0156ab3fd8e98ca9205829cd3e5f6a67aa70b78 --- /dev/null +++ b/worker.xml @@ -0,0 +1,80 @@ + + + + + + %d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} %t [%p] %msg%n + %msg%n + %d %-8r %m%n + + + + + ${pattern} + + + + + + + + + ${pattern} + + + + + ${pattern} + + + + + ${patternMetrics} + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file