i am using rancher to manage the cluster and i am deploying the helm chart with default configuration (exception is storage class)
here is the printed worker config:
apiVersion: v1
kind: Pod
metadata:
annotations:
checksum/config: 2512344ff793e810b8df9dece4fc6783792343354d69122160139068929b5643
cni.projectcalico.org/containerID: 4e0dca80c13851ff97995d99ce3976bf87e098159e8d8c9c39861697d8dfd6fa
cni.projectcalico.org/podIP: 192.168.34.23/32
cni.projectcalico.org/podIPs: 192.168.34.23/32
creationTimestamp: "2025-02-24T09:08:37Z"
generateName: manti2-manticoresearch-worker-
labels:
app.kubernetes.io/component: worker
app.kubernetes.io/instance: manti2
app.kubernetes.io/name: manticoresearch
apps.kubernetes.io/pod-index: "0"
controller-revision-hash: manti2-manticoresearch-worker-f7b6c4b9f
name: manti2-manticoresearch-worker
statefulset.kubernetes.io/pod-name: manti2-manticoresearch-worker-0
name: manti2-manticoresearch-worker-0
namespace: manticore
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: StatefulSet
name: manti2-manticoresearch-worker
uid: 1f34b224-5d8a-41f2-abc0-63b527e05dbd
resourceVersion: "1124964"
uid: 14968ee1-4518-408b-9133-87198054489d
spec:
containers:
- env:
- name: POD_START_VIA_PROBE
value: "true"
- name: AUTO_ADD_TABLES_IN_CLUSTER
value: "true"
- name: IS_BALANCER_ENABLED
value: "true"
- name: CONFIGMAP_PATH
value: /mnt/manticore.conf
- name: MANTICORE_PORT
value: "9306"
- name: MANTICORE_BINARY_PORT
value: "9312"
- name: CLUSTER_NAME
value: manticore
- name: REPLICATION_MODE
value: multi-master
- name: LOG_LEVEL
value: INFO
- name: INSTANCE_LABEL
value: manti2
- name: WORKER_SERVICE
value: manti2-manticoresearch-worker-replication-svc
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: EXTRA
value: "1"
image: manticoresearch/helm-worker:7.0.0.1
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- ./shutdown.sh
"/tmp/kubectl-edit-1003015580.yaml" 203L, 6081B 1,1 Top
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: Pod
metadata:
annotations:
checksum/config: 2512344ff793e810b8df9dece4fc6783792343354d69122160139068929b5643
cni.projectcalico.org/containerID: 4e0dca80c13851ff97995d99ce3976bf87e098159e8d8c9c39861697d8dfd6fa
cni.projectcalico.org/podIP: 192.168.34.23/32
cni.projectcalico.org/podIPs: 192.168.34.23/32
creationTimestamp: "2025-02-24T09:08:37Z"
generateName: manti2-manticoresearch-worker-
labels:
app.kubernetes.io/component: worker
app.kubernetes.io/instance: manti2
app.kubernetes.io/name: manticoresearch
apps.kubernetes.io/pod-index: "0"
controller-revision-hash: manti2-manticoresearch-worker-f7b6c4b9f
name: manti2-manticoresearch-worker
statefulset.kubernetes.io/pod-name: manti2-manticoresearch-worker-0
name: manti2-manticoresearch-worker-0
namespace: manticore
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: StatefulSet
name: manti2-manticoresearch-worker
uid: 1f34b224-5d8a-41f2-abc0-63b527e05dbd
resourceVersion: "1124964"
uid: 14968ee1-4518-408b-9133-87198054489d
spec:
containers:
- env:
- name: POD_START_VIA_PROBE
value: "true"
- name: AUTO_ADD_TABLES_IN_CLUSTER
value: "true"
- name: IS_BALANCER_ENABLED
value: "true"
- name: CONFIGMAP_PATH
value: /mnt/manticore.conf
- name: MANTICORE_PORT
value: "9306"
- name: MANTICORE_BINARY_PORT
value: "9312"
- name: CLUSTER_NAME
value: manticore
- name: REPLICATION_MODE
value: multi-master
- name: LOG_LEVEL
value: INFO
- name: INSTANCE_LABEL
value: manti2
- name: WORKER_SERVICE
value: manti2-manticoresearch-worker-replication-svc
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: EXTRA
value: "1"
image: manticoresearch/helm-worker:7.0.0.1
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- ./shutdown.sh
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 3
successThreshold: 1
tcpSocket:
port: 9306
timeoutSeconds: 1
name: worker
resources: {}
securityContext: {}
startupProbe:
exec:
command:
- /bin/sh
- -c
- |
if /usr/bin/mysql -e "show status;" | grep cluster_node_state | grep -q synced ; then exit 0; else exit 1; fi
failureThreshold: 30
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/lib/manticore/
name: data
- mountPath: /mnt/manticore.conf
name: config-volume
subPath: manticore.conf
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-ppjx9
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostname: manti2-manticoresearch-worker-0
nodeName: dxkube2
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: manticore-sa
serviceAccountName: manticore-sa
subdomain: manti2-manticoresearch-worker-replication-svc
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: data
persistentVolumeClaim:
claimName: data-manti2-manticoresearch-worker-0
- configMap:
defaultMode: 420
name: manti2-manticoresearch-worker-config
name: config-volume
- name: kube-api-access-ppjx9
projected:
defaultMode: 420
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-02-24T09:08:37Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-02-24T09:08:37Z"
message: 'containers with unready status: [worker]'
reason: ContainersNotReady
status: "False"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-02-24T09:08:37Z"
message: 'containers with unready status: [worker]'
reason: ContainersNotReady
status: "False"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-02-24T09:08:37Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: containerd://4981f511120f4ed78a09b937c8bfe1fdc22fe7f24aedf583d6c2be11c815680a
image: docker.io/manticoresearch/helm-worker:7.0.0.1
imageID: docker.io/manticoresearch/helm-worker@sha256:99f894ddbf311e01b39a0faf9562ba04381c629d89f10e02fb8676512ec9631c
lastState:
terminated:
containerID: containerd://ed8702d5d8ac18756a68d4bbaccff0e9ab4c1e001f4935964d45723f4e6d0c0d
exitCode: 137
finishedAt: "2025-02-24T09:19:39Z"
reason: Error
startedAt: "2025-02-24T09:14:09Z"
name: worker
ready: false
restartCount: 2
started: false
state:
running:
startedAt: "2025-02-24T09:19:39Z"
hostIP: 10.113.24.20
phase: Running
podIP: 192.168.34.23
podIPs:
- ip: 192.168.34.23
qosClass: BestEffort
startTime: "2025-02-24T09:08:37Z
Shouldn’t i have also the mysql port printed?