spec: replicas: 1 selector: matchLabels: app: flink component:
jobmanager template: metadata: labels: app: flink
component: jobmanager spec: serviceAccountName: msc-s3-shared-content
containers: - name: jobmanager image: test:latest (
flink:1.11.3-scala_2.12-java11 image has DeliveryStreams-0.0.1_3.1.0.jar jar
copied to ./bin/flink) args: ["jobmanager"] command:
['./bin/flink', 'run', './bin/DeliveryStreams-0.0.1_3.1.0.jar',
'DeduplicationJob'] . ( I am planning to run job ... Please let me know if
this is right way) ports: - containerPort: 6123 name:
rpc - containerPort: 6124 name: blob-server -
containerPort: 8081 name: webui env: - name:
JOB_MANAGER_RPC_ADDRESS value: flink-jobmanager - name:
KAFKA_BROKERS value: kafka:29092 livenessProbe:
tcpSocket: port: 6123 initialDelaySeconds: 30
periodSeconds: 60 volumeMounts: - name: flink-config-volume
mountPath: /opt/flink/conf securityContext: runAsUser: 9999
# refers to user _flink_ from official flink image, change if necessary
volumes: - name: flink-config-volume configMap: name:
flink-config items: - key: flink-conf.yaml path:
flink-conf.yaml - key: log4j-console.properties path:
log4j-console.properties"msc-jobmanager-deployment.yaml" 54L, 1640C
a) Do not want to run my job from UI , want to run my job from jobmanager
image during boot time , when I am trying to run its throwing me error b) How
to make sure my jobManager and taskmanager is HA configured ( so that if
jobmanager goes off do not loose data)
Thanks Kumar