下面列出了io.fabric8.kubernetes.api.model.EnvVarBuilder#io.fabric8.kubernetes.api.model.apps.Deployment 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Test
public void shouldAssignSecurityContextSharedByPods() throws Exception {
// given
PodSpec podSpec1 =
new PodSpecBuilder()
.withSecurityContext(new PodSecurityContextBuilder().withRunAsUser(42L).build())
.build();
podSpec1.setAdditionalProperty("add1", 1L);
PodData podData1 = new PodData(podSpec1, new ObjectMetaBuilder().build());
PodSpec podSpec2 =
new PodSpecBuilder()
.withSecurityContext(new PodSecurityContextBuilder().withRunAsUser(42L).build())
.build();
podSpec2.setAdditionalProperty("add2", 2L);
PodData podData2 = new PodData(podSpec2, new ObjectMetaBuilder().build());
// when
Deployment merged = podMerger.merge(Arrays.asList(podData1, podData2));
// then
PodTemplateSpec podTemplate = merged.getSpec().getTemplate();
PodSecurityContext sc = podTemplate.getSpec().getSecurityContext();
assertEquals(sc.getRunAsUser(), (Long) 42L);
}
@Test
public void testTracing() {
KafkaConnect resource = new KafkaConnectBuilder(this.resource)
.editSpec()
.withNewJaegerTracing()
.endJaegerTracing()
.endSpec()
.build();
KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS);
Deployment dep = kc.generateDeployment(Collections.EMPTY_MAP, true, null, null);
Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0);
assertThat(cont.getEnv().stream().filter(env -> KafkaConnectCluster.ENV_VAR_STRIMZI_TRACING.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").equals("jaeger"), is(true));
assertThat(cont.getEnv().stream().filter(env -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_CONFIGURATION.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("consumer.interceptor.classes=io.opentracing.contrib.kafka.TracingConsumerInterceptor"), is(true));
assertThat(cont.getEnv().stream().filter(env -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_CONFIGURATION.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("producer.interceptor.classes=io.opentracing.contrib.kafka.TracingProducerInterceptor"), is(true));
}
@Test
public void shouldHavePort() {
KubernetesList list = Serialization.unmarshalAsList(Issue421Test.class.getClassLoader().getResourceAsStream("META-INF/dekorate/kubernetes.yml"));
assertNotNull(list);
Deployment d = findFirst(list, Deployment.class).orElseThrow(() -> new IllegalStateException());
assertNotNull(d);
Container c = d.getSpec().getTemplate().getSpec().getContainers().get(0);
assertNotNull(c);
List<ContainerPort> ports = c.getPorts();
assertNotNull(ports);
assertEquals(1, ports.size());
ContainerPort port = ports.get(0);
assertEquals("HTTP", port.getName());
assertEquals(8080, port.getContainerPort());
}
@Test
@DisplayName("Should pause resource")
void testRolloutPause() throws InterruptedException {
// Given
server.expect().get().withPath("/apis/apps/v1/namespaces/ns1/deployments/deploy1")
.andReturn(HttpURLConnection.HTTP_OK, getDeploymentBuilder().build()).times(3);
server.expect().patch().withPath("/apis/apps/v1/namespaces/ns1/deployments/deploy1")
.andReturn(HttpURLConnection.HTTP_OK, getDeploymentBuilder().build()).once();
KubernetesClient client = server.getClient();
// When
Deployment deployment = client.apps().deployments().inNamespace("ns1").withName("deploy1")
.rolling().pause();
// Then
RecordedRequest recordedRequest = server.getLastRequest();
assertNotNull(deployment);
assertEquals("PATCH", recordedRequest.getMethod());
assertEquals("{\"spec\":{\"paused\":true}}", recordedRequest.getBody().readUtf8());
}
@Test
public void testGenerateDeploymentWithTls() {
KafkaBridge resource = new KafkaBridgeBuilder(this.resource)
.editSpec()
.editOrNewTls()
.addToTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-secret").withCertificate("cert.crt").build())
.addToTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-secret").withCertificate("new-cert.crt").build())
.addToTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-another-secret").withCertificate("another-cert.crt").build())
.endTls()
.endSpec()
.build();
KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS);
Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null);
assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(1).getName(), is("my-secret"));
assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("my-another-secret"));
List<Container> containers = dep.getSpec().getTemplate().getSpec().getContainers();
assertThat(containers.get(0).getVolumeMounts().get(1).getMountPath(), is(KafkaBridgeCluster.TLS_CERTS_BASE_VOLUME_MOUNT + "my-secret"));
assertThat(containers.get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaBridgeCluster.TLS_CERTS_BASE_VOLUME_MOUNT + "my-another-secret"));
assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaBridgeCluster.ENV_VAR_KAFKA_BRIDGE_TRUSTED_CERTS), is("my-secret/cert.crt;my-secret/new-cert.crt;my-another-secret/another-cert.crt"));
assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaBridgeCluster.ENV_VAR_KAFKA_BRIDGE_TLS), is("true"));
}
@Test
public void assertGeneratedResources() throws IOException {
Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir)
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.yml"));
List<HasMetadata> kubernetesList = DeserializationUtil
.deserializeAsList(kubernetesDir.resolve("kubernetes.yml"));
assertThat(kubernetesList.get(0)).isInstanceOfSatisfying(Deployment.class, d -> {
assertThat(d.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("foo");
assertThat(m.getLabels()).contains(entry("app.kubernetes.io/name", "foo"),
entry("app.kubernetes.io/version", "1.0-kube"));
});
});
List<HasMetadata> openshiftList = DeserializationUtil
.deserializeAsList(kubernetesDir.resolve("openshift.yml"));
assertThat(openshiftList).allSatisfy(h -> {
assertThat(h.getMetadata().getName()).isIn("ofoo", "s2ifoo", "s2i-java");
assertThat(h.getMetadata().getLabels()).contains(entry("app.kubernetes.io/name", "ofoo"),
entry("app.kubernetes.io/version", "1.0-openshift"));
});
}
protected KubernetesEnvironment(
InternalRecipe internalRecipe,
Map<String, InternalMachineConfig> machines,
List<Warning> warnings,
Map<String, Pod> pods,
Map<String, Deployment> deployments,
Map<String, Service> services,
Map<String, Ingress> ingresses,
Map<String, PersistentVolumeClaim> persistentVolumeClaims,
Map<String, Secret> secrets,
Map<String, ConfigMap> configMaps) {
super(internalRecipe, machines, warnings);
setType(TYPE);
this.pods = pods;
this.deployments = deployments;
this.services = services;
this.ingresses = ingresses;
this.persistentVolumeClaims = persistentVolumeClaims;
this.secrets = secrets;
this.configMaps = configMaps;
this.podData = new HashMap<>();
this.injectablePods = new HashMap<>();
pods.forEach((name, pod) -> podData.put(name, new PodData(pod)));
deployments.forEach((name, deployment) -> podData.put(name, new PodData(deployment)));
}
@Test
public void testImagePullSecretsBoth() {
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource)
.editSpec()
.withNewTemplate()
.withNewPod()
.withImagePullSecrets(secret2)
.endPod()
.endTemplate()
.endSpec()
.build();
KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS);
Deployment dep = kc.generateDeployment(emptyMap(), true, null, singletonList(secret1));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(false));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true));
}
@Test
public void testJvmOptions() {
Map<String, String> xx = new HashMap<>(2);
xx.put("UseG1GC", "true");
xx.put("MaxGCPauseMillis", "20");
KafkaMirrorMaker resource = new KafkaMirrorMakerBuilder(this.resource)
.editSpec()
.withNewJvmOptions()
.withNewXms("512m")
.withNewXmx("1024m")
.withNewServer(true)
.withXx(xx)
.endJvmOptions()
.endSpec()
.build();
KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS);
Deployment dep = mmc.generateDeployment(Collections.EMPTY_MAP, true, null, null);
Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0);
assertThat(cont.getEnv().stream().filter(env -> "KAFKA_JVM_PERFORMANCE_OPTS".equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("-server"), is(true));
assertThat(cont.getEnv().stream().filter(env -> "KAFKA_JVM_PERFORMANCE_OPTS".equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("-XX:+UseG1GC"), is(true));
assertThat(cont.getEnv().stream().filter(env -> "KAFKA_JVM_PERFORMANCE_OPTS".equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("-XX:MaxGCPauseMillis=20"), is(true));
assertThat(cont.getEnv().stream().filter(env -> "KAFKA_HEAP_OPTS".equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("-Xmx1024m"), is(true));
assertThat(cont.getEnv().stream().filter(env -> "KAFKA_HEAP_OPTS".equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("-Xms512m"), is(true));
}
@Test
public void testImagePullSecretsBoth() {
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resource)
.editSpec()
.withNewTemplate()
.withNewPod()
.withImagePullSecrets(secret2)
.endPod()
.endTemplate()
.endSpec()
.build();
KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS);
Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, singletonList(secret1));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(false));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true));
}
@Test(expectedExceptions = ValidationException.class)
public void shouldFailIfSecurityContextDiffersInPods() throws Exception {
// given
PodSpec podSpec1 =
new PodSpecBuilder()
.withSecurityContext(new PodSecurityContextBuilder().withRunAsUser(42L).build())
.build();
podSpec1.setAdditionalProperty("add1", 1L);
PodData podData1 = new PodData(podSpec1, new ObjectMetaBuilder().build());
PodSpec podSpec2 =
new PodSpecBuilder()
.withSecurityContext(new PodSecurityContextBuilder().withRunAsUser(43L).build())
.build();
podSpec2.setAdditionalProperty("add2", 2L);
PodData podData2 = new PodData(podSpec2, new ObjectMetaBuilder().build());
// when
Deployment merged = podMerger.merge(Arrays.asList(podData1, podData2));
// then
// exception is thrown
}
@Override
public Deployment updateImage(Map<String, String> containerToImageMap) {
Deployment deployment = get();
if (deployment == null) {
throw new KubernetesClientException("Existing replica set doesn't exist");
}
if (deployment.getSpec().getTemplate().getSpec().getContainers().isEmpty()) {
throw new KubernetesClientException("Pod has no containers!");
}
List<Container> containers = deployment.getSpec().getTemplate().getSpec().getContainers();
for (Container container : containers) {
if (containerToImageMap.containsKey(container.getName())) {
container.setImage(containerToImageMap.get(container.getName()));
}
}
deployment.getSpec().getTemplate().getSpec().setContainers(containers);
return sendPatchedObject(get(), deployment);
}
private List<PodData> getPodDatas(List<HasMetadata> componentsObjects) {
List<PodData> podsData = new ArrayList<>();
componentsObjects
.stream()
.filter(hasMetadata -> hasMetadata instanceof Pod)
.map(hasMetadata -> (Pod) hasMetadata)
.forEach(p -> podsData.add(new PodData(p)));
componentsObjects
.stream()
.filter(hasMetadata -> hasMetadata instanceof Deployment)
.map(hasMetadata -> (Deployment) hasMetadata)
.forEach(d -> podsData.add(new PodData(d)));
return podsData;
}
@Test
public void testResources() {
Map<String, Quantity> requests = new HashMap<>(2);
requests.put("cpu", new Quantity("250m"));
requests.put("memory", new Quantity("512Mi"));
Map<String, Quantity> limits = new HashMap<>(2);
limits.put("cpu", new Quantity("500m"));
limits.put("memory", new Quantity("1024Mi"));
KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resource)
.editSpec()
.withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build())
.endSpec()
.build();
KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS);
Deployment dep = kmm2.generateDeployment(Collections.EMPTY_MAP, true, null, null);
Container cont = getContainer(dep);
assertThat(cont.getResources().getLimits(), is(limits));
assertThat(cont.getResources().getRequests(), is(requests));
}
@Test
public void testResources() {
Map<String, Quantity> requests = new HashMap<>(2);
requests.put("cpu", new Quantity("250m"));
requests.put("memory", new Quantity("512Mi"));
Map<String, Quantity> limits = new HashMap<>(2);
limits.put("cpu", new Quantity("500m"));
limits.put("memory", new Quantity("1024Mi"));
KafkaConnect resource = new KafkaConnectBuilder(this.resource)
.editSpec()
.withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build())
.endSpec()
.build();
KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS);
Deployment dep = kc.generateDeployment(Collections.EMPTY_MAP, true, null, null);
Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0);
assertThat(cont.getResources().getLimits(), is(limits));
assertThat(cont.getResources().getRequests(), is(requests));
}
/**
* Build bal file with deployment having readiness disabled.
*
* @throws IOException Error when loading the generated yaml.
* @throws InterruptedException Error when compiling the ballerina file.
* @throws KubernetesPluginException Error when deleting the generated artifacts folder.
*/
@Test
public void disabledTest() throws IOException, InterruptedException, KubernetesPluginException,
DockerTestException {
Assert.assertEquals(KubernetesTestUtils.compileBallerinaFile(BAL_DIRECTORY, "disabled.bal"), 0);
// Check if docker image exists and correct
validateDockerfile();
validateDockerImage();
// Validate deployment yaml
File deploymentYAML = KUBERNETES_TARGET_PATH.resolve("disabled_deployment.yaml").toFile();
Assert.assertTrue(deploymentYAML.exists());
Deployment deployment = KubernetesTestUtils.loadYaml(deploymentYAML);
Assert.assertNotNull(deployment.getSpec());
Assert.assertNotNull(deployment.getSpec().getTemplate());
Assert.assertNotNull(deployment.getSpec().getTemplate().getSpec());
Assert.assertTrue(deployment.getSpec().getTemplate().getSpec().getContainers().size() > 0);
Assert.assertNull(deployment.getSpec().getTemplate().getSpec().getContainers().get(0).getReadinessProbe());
KubernetesUtils.deleteDirectory(KUBERNETES_TARGET_PATH);
KubernetesUtils.deleteDirectory(DOCKER_TARGET_PATH);
KubernetesTestUtils.deleteDockerImage(DOCKER_IMAGE);
}
public static boolean isReady(HasMetadata item) {
if (item instanceof Deployment) {
return isDeploymentReady((Deployment) item);
} else if (item instanceof ReplicaSet) {
return isReplicaSetReady((ReplicaSet) item);
} else if (item instanceof Pod) {
return isPodReady((Pod) item);
} else if (item instanceof DeploymentConfig) {
return isDeploymentConfigReady((DeploymentConfig) item);
} else if (item instanceof ReplicationController) {
return isReplicationControllerReady((ReplicationController) item);
} else if (item instanceof Endpoints) {
return isEndpointsReady((Endpoints) item);
} else if (item instanceof Node) {
return isNodeReady((Node) item);
} else if (item instanceof StatefulSet) {
return isStatefulSetReady((StatefulSet) item);
} else {
throw new IllegalArgumentException("Item needs to be one of [Node, Deployment, ReplicaSet, StatefulSet, Pod, DeploymentConfig, ReplicationController], but was: [" + (item != null ? item.getKind() : "Unknown (null)") + "]");
}
}
@Test
public void testNoExternalConfigurationEnvs() {
ExternalConfigurationEnv env = new ExternalConfigurationEnvBuilder()
.withName("MY_ENV_VAR")
.withNewValueFrom()
.endValueFrom()
.build();
KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resource)
.editSpec()
.withNewExternalConfiguration()
.withEnv(env)
.endExternalConfiguration()
.endSpec()
.build();
KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS);
// Check Deployment
Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null);
List<EnvVar> envs = getContainer(dep).getEnv();
List<EnvVar> selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList());
assertThat(selected.size(), is(0));
}
@Test
@DisplayName("Should restart rollout")
void testRolloutRestart() throws InterruptedException {
// Given
server.expect().get().withPath("/apis/apps/v1/namespaces/ns1/deployments/deploy1")
.andReturn(HttpURLConnection.HTTP_OK, getDeploymentBuilder().build()).times(3);
server.expect().patch().withPath("/apis/apps/v1/namespaces/ns1/deployments/deploy1")
.andReturn(HttpURLConnection.HTTP_OK, getDeploymentBuilder().build()).once();
KubernetesClient client = server.getClient();
// When
Deployment deployment = client.apps().deployments().inNamespace("ns1").withName("deploy1")
.rolling().restart();
// Then
RecordedRequest recordedRequest = server.getLastRequest();
assertNotNull(deployment);
assertEquals("PATCH", recordedRequest.getMethod());
assertTrue(recordedRequest.getBody().readUtf8().contains("kubectl.kubernetes.io/restartedAt"));
}
@Test
public void shouldAssignServiceAccountNameSharedByPods() throws Exception {
// given
PodSpec podSpec1 = new PodSpecBuilder().withServiceAccountName("sa").build();
podSpec1.setAdditionalProperty("add1", 1L);
PodData podData1 = new PodData(podSpec1, new ObjectMetaBuilder().build());
PodSpec podSpec2 = new PodSpecBuilder().withServiceAccountName("sa").build();
podSpec2.setAdditionalProperty("add2", 2L);
PodData podData2 = new PodData(podSpec2, new ObjectMetaBuilder().build());
// when
Deployment merged = podMerger.merge(Arrays.asList(podData1, podData2));
// then
PodTemplateSpec podTemplate = merged.getSpec().getTemplate();
String sa = podTemplate.getSpec().getServiceAccountName();
assertEquals(sa, "sa");
}
@BeforeClass
public void compileSample() throws IOException, InterruptedException {
Assert.assertEquals(KubernetesTestUtils.compileBallerinaFile(SOURCE_DIR_PATH, "hello_world_k8s_namespace.bal"),
0);
File yamlFile = KUBERNETES_TARGET_PATH.resolve("hello_world_k8s_namespace.yaml").toFile();
Assert.assertTrue(yamlFile.exists());
List<HasMetadata> k8sItems = KubernetesTestUtils.loadYaml(yamlFile);
for (HasMetadata data : k8sItems) {
switch (data.getKind()) {
case "Deployment":
deployment = (Deployment) data;
break;
case "Service":
service = (Service) data;
break;
case "Ingress":
ingress = (Ingress) data;
break;
default:
break;
}
}
}
@Test
public void testImagePullSecrets() {
LocalObjectReference secret1 = new LocalObjectReference("some-pull-secret");
LocalObjectReference secret2 = new LocalObjectReference("some-other-pull-secret");
KafkaMirrorMaker resource = new KafkaMirrorMakerBuilder(this.resource)
.editSpec()
.withNewTemplate()
.withNewPod()
.withImagePullSecrets(secret1, secret2)
.endPod()
.endTemplate()
.endSpec()
.build();
KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS);
Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null);
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(true));
assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true));
}
@Test
public void testGracePeriod() {
Kafka resource = new KafkaBuilder(ResourceUtils.createKafkaCluster(namespace, cluster, replicas, image, healthDelay, healthTimeout))
.editSpec()
.withNewEntityOperator()
.withTopicOperator(entityTopicOperatorSpec)
.withUserOperator(entityUserOperatorSpec)
.withNewTemplate()
.withNewPod()
.withTerminationGracePeriodSeconds(123)
.endPod()
.endTemplate()
.endEntityOperator()
.endSpec()
.build();
EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS);
Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, null, null);
assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123)));
assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(2).getLifecycle(), is(notNullValue()));
assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(2).getLifecycle().getPreStop().getExec().getCommand().contains("/opt/stunnel/entity_operator_stunnel_pre_stop.sh"), is(true));
}
@Test
public void testGenerateDeploymentWithTlsSameSecret() {
KafkaMirrorMaker2ClusterSpec targetClusterWithTlsAuth = new KafkaMirrorMaker2ClusterSpecBuilder(this.targetCluster)
.editOrNewTls()
.addToTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-secret").withCertificate("cert.crt").build())
.endTls()
.withAuthentication(
new KafkaClientAuthenticationTlsBuilder()
.withNewCertificateAndKey()
.withSecretName("my-secret")
.withCertificate("user.crt")
.withKey("user.key")
.endCertificateAndKey()
.build())
.build();
KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resource)
.editSpec()
.withClusters(targetClusterWithTlsAuth)
.endSpec()
.build();
KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS);
Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null);
// 3 = 1 volume from logging/metrics + 2 from above cert mounted for connect and for connectors
assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(3));
assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(1).getName(), is("my-secret"));
}
/**
* This test uses the same secret to hold the certs for TLS and the credentials for plain client authentication. It checks that
* the volumes and volume mounts that reference the secret are correctly created and that each volume name is only created once - volumes
* with duplicate names will cause Kubernetes to reject the deployment.
*/
@Test
public void testGenerateDeploymentWithPlainAuthAndTLSSameSecret() {
KafkaConnect resource = new KafkaConnectBuilder(this.resource)
.editSpec()
.editOrNewTls()
.addToTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-secret").withCertificate("cert.crt").build())
.endTls()
.withNewKafkaClientAuthenticationPlain()
.withUsername("user1")
.withNewPasswordSecret()
.withSecretName("my-secret")
.withPassword("user1.password")
.endPasswordSecret()
.endKafkaClientAuthenticationPlain()
.endSpec()
.build();
KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS);
Deployment dep = kc.generateDeployment(emptyMap(), true, null, null);
assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(2));
assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(0).getName(), is("kafka-metrics-and-logging"));
assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(1).getName(), is("my-secret"));
List<Container> containers = dep.getSpec().getTemplate().getSpec().getContainers();
assertThat(containers.get(0).getVolumeMounts().size(), is(3));
assertThat(containers.get(0).getVolumeMounts().get(0).getName(), is("kafka-metrics-and-logging"));
assertThat(containers.get(0).getVolumeMounts().get(0).getMountPath(), is("/opt/kafka/custom-config/"));
assertThat(containers.get(0).getVolumeMounts().get(1).getName(), is("my-secret"));
assertThat(containers.get(0).getVolumeMounts().get(1).getMountPath(), is(KafkaConnectCluster.TLS_CERTS_BASE_VOLUME_MOUNT + "my-secret"));
assertThat(containers.get(0).getVolumeMounts().get(2).getName(), is("my-secret"));
assertThat(containers.get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaConnectCluster.PASSWORD_VOLUME_MOUNT + "my-secret"));
assertThat(AbstractModel.containerEnvVars(containers.get(0)), hasEntry(KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_PASSWORD_FILE, "my-secret/user1.password"));
assertThat(AbstractModel.containerEnvVars(containers.get(0)), hasEntry(KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_USERNAME, "user1"));
assertThat(AbstractModel.containerEnvVars(containers.get(0)), hasEntry(KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM, "plain"));
assertThat(AbstractModel.containerEnvVars(containers.get(0)), hasEntry(KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_TLS, "true"));
}
@Test
public void shouldHaveCustomNameAndVersionInKubernetesYml() {
KubernetesList list = Serialization.unmarshalAsList(Issue442MultiPlatformTest.class.getClassLoader().getResourceAsStream("META-INF/dekorate/kubernetes.yml"));
assertNotNull(list);
Deployment d = findFirst(list, Deployment.class).orElseThrow(() -> new IllegalStateException());
assertNotNull(d);
assertEquals("k-name", d.getMetadata().getName());
Map<String, String> labels = d.getMetadata().getLabels();
assertNotNull(labels);
assertFalse(labels.containsKey(Labels.PART_OF));
assertEquals("1.0-kube", labels.get(Labels.VERSION));
}
@Test
public void testGenerateDeploymentWithProducerOAuthWithClientSecret() {
KafkaMirrorMaker resource = new KafkaMirrorMakerBuilder(this.resource)
.editSpec()
.editProducer()
.withAuthentication(
new KafkaClientAuthenticationOAuthBuilder()
.withClientId("my-client-id")
.withTokenEndpointUri("http://my-oauth-server")
.withNewClientSecret()
.withSecretName("my-secret-secret")
.withKey("my-secret-key")
.endClientSecret()
.build())
.endProducer()
.endSpec()
.build();
KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS);
Deployment dep = kc.generateDeployment(emptyMap(), true, null, null);
Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0);
assertThat(cont.getEnv().stream().filter(var -> KafkaMirrorMakerCluster.ENV_VAR_KAFKA_MIRRORMAKER_SASL_MECHANISM_PRODUCER.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth"));
assertThat(cont.getEnv().stream().filter(var -> KafkaMirrorMakerCluster.ENV_VAR_KAFKA_MIRRORMAKER_OAUTH_CLIENT_SECRET_PRODUCER.equals(var.getName())).findFirst().orElse(null).getValueFrom().getSecretKeyRef().getName(), is("my-secret-secret"));
assertThat(cont.getEnv().stream().filter(var -> KafkaMirrorMakerCluster.ENV_VAR_KAFKA_MIRRORMAKER_OAUTH_CLIENT_SECRET_PRODUCER.equals(var.getName())).findFirst().orElse(null).getValueFrom().getSecretKeyRef().getKey(), is("my-secret-key"));
assertThat(cont.getEnv().stream().filter(var -> KafkaMirrorMakerCluster.ENV_VAR_KAFKA_MIRRORMAKER_OAUTH_CONFIG_PRODUCER.equals(var.getName())).findFirst().orElse(null).getValue().trim(),
is(String.format("%s=\"%s\" %s=\"%s\"", ClientConfig.OAUTH_CLIENT_ID, "my-client-id", ClientConfig.OAUTH_TOKEN_ENDPOINT_URI, "http://my-oauth-server")));
}
@Test
public void shouldHaveRegistry() {
KubernetesList list = Serialization.unmarshalAsList(Issue420Test.class.getClassLoader().getResourceAsStream("META-INF/dekorate/kubernetes.yml"));
assertNotNull(list);
Deployment d = findFirst(list, Deployment.class).orElseThrow(() -> new IllegalStateException());
assertNotNull(d);
Container c = d.getSpec().getTemplate().getSpec().getContainers().get(0);
assertNotNull(c);
String image = c.getImage();
assertTrue(image.startsWith("localhost:3000"));
}
@Test
public void assertGeneratedResources() throws IOException {
final Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir)
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("kubernetes.yml"));
List<HasMetadata> kubernetesList = DeserializationUtil
.deserializeAsList(kubernetesDir.resolve("kubernetes.yml"));
assertThat(kubernetesList.get(0)).isInstanceOfSatisfying(Deployment.class, d -> {
assertThat(d.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("with-health-and-jib");
});
assertThat(d.getSpec()).satisfies(deploymentSpec -> {
assertThat(deploymentSpec.getTemplate()).satisfies(t -> {
assertThat(t.getSpec()).satisfies(podSpec -> {
assertThat(podSpec.getContainers()).hasOnlyOneElementSatisfying(container -> {
assertThat(container.getReadinessProbe()).isNotNull().satisfies(p -> {
assertProbePath(p, "/health/ready");
});
assertThat(container.getLivenessProbe()).isNotNull().satisfies(p -> {
assertProbePath(p, "/health/live");
});
// since no registry was set and a container-image extension exists, we force-set 'IfNotPresent'
assertThat(container.getImagePullPolicy()).isEqualTo("IfNotPresent");
});
});
});
});
});
}
@Test
public void testTopicOperatorContainerSecurityContext() {
SecurityContext securityContext = new SecurityContextBuilder()
.withPrivileged(false)
.withNewReadOnlyRootFilesystem(false)
.withAllowPrivilegeEscalation(false)
.withRunAsNonRoot(true)
.withNewCapabilities()
.addNewDrop("ALL")
.endCapabilities()
.build();
Kafka resource = new KafkaBuilder(ResourceUtils.createKafkaCluster(namespace, cluster, replicas, image, healthDelay, healthTimeout))
.editSpec()
.editOrNewEntityOperator()
.withTopicOperator(entityTopicOperatorSpec)
.withUserOperator(entityUserOperatorSpec)
.editOrNewTemplate()
.editOrNewTopicOperatorContainer()
.withSecurityContext(securityContext)
.endTopicOperatorContainer()
.endTemplate()
.endEntityOperator()
.endSpec()
.build();
EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS);
Deployment deployment = eo.generateDeployment(false, null, null, null);
assertThat(deployment.getSpec().getTemplate().getSpec().getContainers(),
hasItem(allOf(
hasProperty("name", equalTo(EntityTopicOperator.TOPIC_OPERATOR_CONTAINER_NAME)),
hasProperty("securityContext", equalTo(securityContext))
)));
}