下面列出了io.fabric8.kubernetes.api.model.EnvVarBuilder#io.fabric8.kubernetes.api.model.Quantity 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Test
public void shouldCombineAllResources() {
Container container1 = new Container();
container1.setResources(new ResourceRequirementsBuilder() //
.addToLimits("cpu", new Quantity("1")) //
.addToLimits("memory", new Quantity("1Gi")) //
.addToRequests("cpu", new Quantity("100m")) //
.addToRequests("memory", new Quantity("156Mi")) //
.build());
Container container2 = new Container();
container2.setResources(new ResourceRequirementsBuilder() //
.addToLimits("cpu", new Quantity("2")) //
.addToLimits("memory", new Quantity("2Gi")) //
.addToRequests("cpu", new Quantity("200m")) //
.addToRequests("memory", new Quantity("256Mi")) //
.build());
Container result = combine(container1, container2);
assertQuantity("2", result.getResources().getLimits().get("cpu"));
assertQuantity("2Gi", result.getResources().getLimits().get("memory"));
assertQuantity("200m", result.getResources().getRequests().get("cpu"));
assertQuantity("256Mi", result.getResources().getRequests().get("memory"));
}
@Test
public void testBuild() {
server.expect().withPath("/api/v1/namespaces/myspace/resourcequotas/compute-quota").andReturn(200, new ResourceQuotaBuilder()
.withNewMetadata().withName("compute-quota").withNamespace("myspace").endMetadata()
.withNewSpec().addToHard("pods", new Quantity("2"))
.addToHard("requests.cpu", new Quantity("1"))
.addToHard("limits.cpu", new Quantity("2")).endSpec().build()).once();
KubernetesClient client = server.getClient();
Deployment deployment = client.apps().deployments().load(getClass().getResourceAsStream("/test-resourcequota-deployment.yml")).get();
server.expect().withPath("/apis/apps/v1/namespaces/myspace/deployments/deployment").andReturn(200, deployment).once();
ResourceQuota resourcequota = client.resourceQuotas().inNamespace("myspace").withName("compute-quota").get();
assertNotNull(resourcequota);
deployment = client.apps().deployments().inNamespace("myspace").withName("deployment").get();
assertNotNull(deployment);
}
@Test
public void testSetMemoryLimitAndRequest() throws Exception {
when(podStatus.getPhase()).thenReturn(POD_PHASE_SUCCEEDED);
pvcSubPathHelper.createDirs(
identity, WORKSPACE_ID, PVC_NAME, emptyMap(), WORKSPACE_ID + PROJECTS_PATH);
verify(osDeployments).create(podCaptor.capture());
ResourceRequirements actual =
podCaptor.getValue().getSpec().getContainers().get(0).getResources();
ResourceRequirements expected =
new ResourceRequirementsBuilder()
.addToLimits(of("memory", new Quantity(jobMemoryLimit)))
.addToRequests(of("memory", new Quantity(jobMemoryLimit)))
.build();
assertEquals(actual, expected);
verify(osDeployments).wait(anyString(), anyInt(), any());
verify(podStatus).getPhase();
verify(osDeployments).delete(anyString());
verify(securityContextProvisioner).provision(any());
}
public PersistentVolumeClaim createSourceWorkspacePvc(TektonConfig config) {
Map<String, Quantity> requests = new HashMap<String, Quantity>() {{
put("storage", new QuantityBuilder().withAmount(String.valueOf(config.getSourceWorkspaceClaim().getSize())).withFormat(config.getSourceWorkspaceClaim().getUnit()).build());
}};
LabelSelector selector = null;
if (config.getSourceWorkspaceClaim().getMatchLabels().length != 0) {
selector = new LabelSelectorBuilder()
.withMatchLabels(Arrays.stream(config.getSourceWorkspaceClaim().getMatchLabels()).collect(Collectors.toMap(l -> l.getKey(), l -> l.getValue())))
.build();
}
return new PersistentVolumeClaimBuilder()
.withNewMetadata()
.withName(sourceWorkspaceClaimName(config))
.endMetadata()
.withNewSpec()
.withAccessModes(config.getSourceWorkspaceClaim().getAccessMode().name())
.withStorageClassName(config.getSourceWorkspaceClaim().getStorageClass())
.withNewResources().withRequests(requests).endResources()
.withSelector(selector)
.endSpec()
.build();
}
public PersistentVolumeClaim createM2WorkspacePvc(TektonConfig config) {
Map<String, Quantity> requests = new HashMap<String, Quantity>() {{
put("storage", new QuantityBuilder().withAmount(String.valueOf(config.getM2WorkspaceClaim().getSize())).withFormat(config.getM2WorkspaceClaim().getUnit()).build());
}};
LabelSelector selector = null;
if (config.getM2WorkspaceClaim().getMatchLabels().length != 0) {
selector = new LabelSelectorBuilder()
.withMatchLabels(Arrays.stream(config.getM2WorkspaceClaim().getMatchLabels()).collect(Collectors.toMap(l -> l.getKey(), l -> l.getValue())))
.build();
}
return new PersistentVolumeClaimBuilder()
.withNewMetadata()
.withName(m2WorkspaceClaimName(config))
.endMetadata()
.withNewSpec()
.withAccessModes(config.getM2WorkspaceClaim().getAccessMode().name())
.withStorageClassName(config.getM2WorkspaceClaim().getStorageClass())
.withNewResources().withRequests(requests).endResources()
.withSelector(selector)
.endSpec()
.build();
}
private static PodData createPodData(String machineName, long ramLimit, long ramRequest) {
final String containerName = "container_" + machineName;
final Container containerMock = mock(Container.class);
final ResourceRequirements resourcesMock = mock(ResourceRequirements.class);
final Quantity limitQuantityMock = mock(Quantity.class);
final Quantity requestQuantityMock = mock(Quantity.class);
final PodSpec specMock = mock(PodSpec.class);
final ObjectMeta metadataMock = mock(ObjectMeta.class);
when(limitQuantityMock.getAmount()).thenReturn(String.valueOf(ramLimit));
when(requestQuantityMock.getAmount()).thenReturn(String.valueOf(ramRequest));
when(resourcesMock.getLimits()).thenReturn(ImmutableMap.of("memory", limitQuantityMock));
when(resourcesMock.getRequests()).thenReturn(ImmutableMap.of("memory", requestQuantityMock));
when(containerMock.getName()).thenReturn(containerName);
when(containerMock.getResources()).thenReturn(resourcesMock);
when(metadataMock.getAnnotations())
.thenReturn(Names.createMachineNameAnnotations(containerName, machineName));
when(specMock.getContainers()).thenReturn(ImmutableList.of(containerMock));
return new PodData(specMock, metadataMock);
}
@Test
@TestCaseName("{method}(directConnection={0})")
@Parameters({ "true", "false" })
public void testBuildFromYaml(boolean directConnection) throws Exception {
cloud.setDirectConnection(directConnection);
PodTemplate template = new PodTemplate();
template.setYaml(loadYamlFile("pod-busybox.yaml"));
setupStubs();
Pod pod = new PodTemplateBuilder(template).withSlave(slave).build();
validatePod(pod, directConnection);
assertThat(pod.getMetadata().getLabels(), hasEntry("jenkins", "slave"));
Map<String, Container> containers = toContainerMap(pod);
assertEquals(2, containers.size());
Container container0 = containers.get("busybox");
assertNotNull(container0.getResources());
assertNotNull(container0.getResources().getRequests());
assertNotNull(container0.getResources().getLimits());
assertThat(container0.getResources().getRequests(), hasEntry("example.com/dongle", new Quantity("42")));
assertThat(container0.getResources().getLimits(), hasEntry("example.com/dongle", new Quantity("42")));
}
@Test
public void testProvisionVolumesIntoKubernetesEnvironment() throws Exception {
// given
k8sEnv.getPersistentVolumeClaims().put("pvc1", newPVC("pvc1"));
k8sEnv.getPersistentVolumeClaims().put("pvc2", newPVC("pvc2"));
// when
commonPVCStrategy.provision(k8sEnv, IDENTITY);
// then
provisionOrder.verify(volumeConverter).convertCheVolumes(k8sEnv, WORKSPACE_ID);
provisionOrder.verify(subpathPrefixes).prefixVolumeMountsSubpaths(k8sEnv, WORKSPACE_ID);
provisionOrder.verify(podsVolumes).replacePVCVolumesWithCommon(k8sEnv.getPodsData(), PVC_NAME);
assertEquals(k8sEnv.getPersistentVolumeClaims().size(), 1);
PersistentVolumeClaim commonPVC = k8sEnv.getPersistentVolumeClaims().get(PVC_NAME);
assertNotNull(commonPVC);
assertEquals(commonPVC.getMetadata().getName(), PVC_NAME);
assertEquals(commonPVC.getSpec().getAccessModes(), Collections.singletonList(PVC_ACCESS_MODE));
assertEquals(
commonPVC.getSpec().getResources().getRequests().get("storage"),
new Quantity(PVC_QUANTITY));
}
/**
* Get the resource limits for the deployment request. A Pod can define its maximum needed resources by setting the
* limits and Kubernetes can provide more resources if any are free.
* <p>
* Falls back to the server properties if not present in the deployment request.
* <p>
*
* @param kubernetesDeployerProperties the kubernetes deployment properties map
* @return the resource limits to use
*/
Map<String, Quantity> deduceResourceLimits(Map<String, String> kubernetesDeployerProperties) {
String memory = PropertyParserUtils.getDeploymentPropertyValue(kubernetesDeployerProperties,
this.propertyPrefix + ".limits.memory");
if (StringUtils.isEmpty(memory)) {
memory = properties.getLimits().getMemory();
}
String cpu = PropertyParserUtils.getDeploymentPropertyValue(kubernetesDeployerProperties,
this.propertyPrefix + ".limits.cpu");
if (StringUtils.isEmpty(cpu)) {
cpu = properties.getLimits().getCpu();
}
Map<String,Quantity> limits = new HashMap<String,Quantity>();
limits.put("memory", new Quantity(memory));
limits.put("cpu", new Quantity(cpu));
logger.debug("Using limits - cpu: " + cpu + " mem: " + memory);
return limits;
}
/**
* Returns new instance of {@link PersistentVolumeClaim} with specified name, accessMode, quantity
* and storageClassName.
*/
public static PersistentVolumeClaim newPVC(
String name, String accessMode, String quantity, String storageClassName) {
SpecNested<PersistentVolumeClaimBuilder> specs =
new PersistentVolumeClaimBuilder()
.withNewMetadata()
.withName(name)
.endMetadata()
.withNewSpec()
.withAccessModes(accessMode);
if (!isNullOrEmpty(storageClassName)) {
specs.withStorageClassName(storageClassName);
}
return specs
.withNewResources()
.withRequests(ImmutableMap.of(STORAGE_PARAM, new Quantity(quantity)))
.endResources()
.endSpec()
.build();
}
@Test
public void testOverridesContainerRamLimitAndRequestFromMachineAttribute() throws Exception {
ResourceRequirements resourceRequirements =
new ResourceRequirementsBuilder()
.addToLimits(of("memory", new Quantity("3221225472"), "cpu", new Quantity("0.678")))
.addToRequests(of("memory", new Quantity("1231231423"), "cpu", new Quantity("0.333")))
.build();
container.setResources(resourceRequirements);
resourceProvisioner.provision(k8sEnv, identity);
assertEquals(container.getResources().getLimits().get("memory").getAmount(), RAM_LIMIT_VALUE);
assertEquals(container.getResources().getLimits().get("cpu").getAmount(), CPU_LIMIT_VALUE);
assertEquals(
container.getResources().getRequests().get("memory").getAmount(), RAM_REQUEST_VALUE);
assertEquals(container.getResources().getRequests().get("cpu").getAmount(), CPU_REQUEST_VALUE);
}
/**
* Get resource requirements from memory and cpu.
*
* @param mem Memory in mb.
* @param cpu cpu.
* @param externalResources external resources
* @return KubernetesResource requirements.
*/
public static ResourceRequirements getResourceRequirements(int mem, double cpu, Map<String, Long> externalResources) {
final Quantity cpuQuantity = new Quantity(String.valueOf(cpu));
final Quantity memQuantity = new Quantity(mem + Constants.RESOURCE_UNIT_MB);
ResourceRequirementsBuilder resourceRequirementsBuilder = new ResourceRequirementsBuilder()
.addToRequests(Constants.RESOURCE_NAME_MEMORY, memQuantity)
.addToRequests(Constants.RESOURCE_NAME_CPU, cpuQuantity)
.addToLimits(Constants.RESOURCE_NAME_MEMORY, memQuantity)
.addToLimits(Constants.RESOURCE_NAME_CPU, cpuQuantity);
// Add the external resources to resource requirement.
for (Map.Entry<String, Long> externalResource: externalResources.entrySet()) {
final Quantity resourceQuantity = new Quantity(String.valueOf(externalResource.getValue()));
resourceRequirementsBuilder
.addToRequests(externalResource.getKey(), resourceQuantity)
.addToLimits(externalResource.getKey(), resourceQuantity);
LOG.info("Request external resource {} with config key {}.", resourceQuantity.getAmount(), externalResource.getKey());
}
return resourceRequirementsBuilder.build();
}
@Test
void testBuild() {
PersistentVolumeClaim persistentVolumeClaim = new PersistentVolumeClaimBuilder()
.withNewMetadata().withName("test-pv-claim").withNamespace("test").endMetadata()
.withNewSpec()
.withStorageClassName("my-local-storage")
.withAccessModes("ReadWriteOnce")
.withNewResources()
.addToRequests("storage", new Quantity("500Gi"))
.endResources()
.endSpec()
.build();
server.expect().withPath("/api/v1/namespaces/test/persistentvolumeclaims/test-pv-claim").andReturn(200, persistentVolumeClaim).once();
KubernetesClient client = server.getClient();
persistentVolumeClaim = client.persistentVolumeClaims().inNamespace("test").withName("test-pv-claim").get();
assertNotNull(persistentVolumeClaim);
}
@Test
public void testResources() {
Map<String, Quantity> requests = new HashMap<>(2);
requests.put("cpu", new Quantity("250m"));
requests.put("memory", new Quantity("512Mi"));
Map<String, Quantity> limits = new HashMap<>(2);
limits.put("cpu", new Quantity("500m"));
limits.put("memory", new Quantity("1024Mi"));
KafkaBridge resource = new KafkaBridgeBuilder(this.resource)
.editSpec()
.withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build())
.endSpec()
.build();
KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS);
Deployment dep = kbc.generateDeployment(Collections.EMPTY_MAP, true, null, null);
Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0);
assertThat(cont.getResources().getLimits(), is(limits));
assertThat(cont.getResources().getRequests(), is(requests));
}
@Test
public void testResources() {
Map<String, Quantity> requests = new HashMap<>(2);
requests.put("cpu", new Quantity("250m"));
requests.put("memory", new Quantity("512Mi"));
Map<String, Quantity> limits = new HashMap<>(2);
limits.put("cpu", new Quantity("500m"));
limits.put("memory", new Quantity("1024Mi"));
KafkaMirrorMaker resource = new KafkaMirrorMakerBuilder(this.resource)
.editSpec()
.withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build())
.endSpec()
.build();
KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS);
Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null);
Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0);
assertThat(cont.getResources().getLimits(), is(limits));
assertThat(cont.getResources().getRequests(), is(requests));
}
@Test
public void testResources() {
Map<String, Quantity> requests = new HashMap<>(2);
requests.put("cpu", new Quantity("250m"));
requests.put("memory", new Quantity("512Mi"));
Map<String, Quantity> limits = new HashMap<>(2);
limits.put("cpu", new Quantity("500m"));
limits.put("memory", new Quantity("1024Mi"));
KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resource)
.editSpec()
.withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build())
.endSpec()
.build();
KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS);
Deployment dep = kmm2.generateDeployment(Collections.EMPTY_MAP, true, null, null);
Container cont = getContainer(dep);
assertThat(cont.getResources().getLimits(), is(limits));
assertThat(cont.getResources().getRequests(), is(requests));
}
@Test
public void testResources() {
Map<String, Quantity> requests = new HashMap<>(2);
requests.put("cpu", new Quantity("250m"));
requests.put("memory", new Quantity("512Mi"));
Map<String, Quantity> limits = new HashMap<>(2);
limits.put("cpu", new Quantity("500m"));
limits.put("memory", new Quantity("1024Mi"));
KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource)
.editSpec()
.withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build())
.endSpec()
.build();
KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS);
DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null);
Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0);
assertThat(cont.getResources().getLimits(), is(limits));
assertThat(cont.getResources().getRequests(), is(requests));
}
protected void assertResources(String namespace, String podName, String containerName, String memoryLimit, String cpuLimit, String memoryRequest, String cpuRequest) {
Pod po = kubeClient().getPod(podName);
assertThat("Not found an expected pod " + podName + " in namespace " + namespace + " but found " +
kubeClient().listPods().stream().map(p -> p.getMetadata().getName()).collect(Collectors.toList()), po, is(notNullValue()));
Optional optional = po.getSpec().getContainers().stream().filter(c -> c.getName().equals(containerName)).findFirst();
assertThat("Not found an expected container " + containerName, optional.isPresent(), is(true));
Container container = (Container) optional.get();
Map<String, Quantity> limits = container.getResources().getLimits();
assertThat(limits.get("memory").getAmount(), is(memoryLimit));
assertThat(limits.get("cpu").getAmount(), is(cpuLimit));
Map<String, Quantity> requests = container.getResources().getRequests();
assertThat(requests.get("memory").getAmount(), is(memoryRequest));
assertThat(requests.get("cpu").getAmount(), is(cpuRequest));
}
@Test
void testKafkaBridgeStatus() {
String bridgeUrl = KafkaBridgeResources.url(CLUSTER_NAME, NAMESPACE, 8080);
KafkaBridgeResource.kafkaBridge(CLUSTER_NAME, KafkaResources.plainBootstrapAddress(CLUSTER_NAME), 1).done();
KafkaBridgeUtils.waitForKafkaBridgeReady(CLUSTER_NAME);
assertKafkaBridgeStatus(1, bridgeUrl);
KafkaBridgeResource.replaceBridgeResource(CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100000000m"))
.build()));
KafkaBridgeUtils.waitForKafkaBridgeNotReady(CLUSTER_NAME);
KafkaBridgeResource.replaceBridgeResource(CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("10m"))
.build()));
KafkaBridgeUtils.waitForKafkaBridgeReady(CLUSTER_NAME);
assertKafkaBridgeStatus(3, bridgeUrl);
}
@Test
@OpenShiftOnly
@Tag(CONNECT_S2I)
@Tag(CONNECT_COMPONENTS)
void testKafkaConnectS2IStatus() {
String connectS2IDeploymentConfigName = KafkaConnectS2IResources.deploymentName(CONNECTS2I_CLUSTER_NAME);
String connectS2IUrl = KafkaConnectS2IResources.url(CONNECTS2I_CLUSTER_NAME, NAMESPACE, 8083);
KafkaConnectS2IResource.kafkaConnectS2I(CONNECTS2I_CLUSTER_NAME, CLUSTER_NAME, 1)
.editMetadata()
.addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true")
.endMetadata().done();
assertKafkaConnectS2IStatus(1, connectS2IUrl, connectS2IDeploymentConfigName);
KafkaConnectS2IResource.replaceConnectS2IResource(CONNECTS2I_CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100000000m"))
.build()));
KafkaConnectS2IUtils.waitForConnectS2INotReady(CONNECTS2I_CLUSTER_NAME);
KafkaConnectS2IResource.replaceConnectS2IResource(CONNECTS2I_CLUSTER_NAME, kb -> kb.getSpec().setResources(new ResourceRequirementsBuilder()
.addToRequests("cpu", new Quantity("100m"))
.build()));
KafkaConnectS2IUtils.waitForConnectS2IReady(CONNECTS2I_CLUSTER_NAME);
assertKafkaConnectS2IStatus(3, connectS2IUrl, connectS2IDeploymentConfigName);
}
private Container container(String roadName, List<String> args, Map<String, String> config, String truckParkName) {
List<EnvVar> env = ImmutableList
.<EnvVar> builder()
.add(envFromFieldPath("KUBERNETES_NAMESPACE", "metadata.namespace"))
.add(env("POD_NAME", truckParkName))
.add(env("ENVIRONMENT", environment))
.add(env("JVM_ARGS", config.get(JVM_ARGS)))
.add(env("CLOUDWATCH_REGION", config.get(CLOUDWATCH_REGION)))
.add(env("CLOUDWATCH_GROUP", config.get(CLOUDWATCH_GROUP)))
.add(env("CLOUDWATCH_STREAM", "${KUBERNETES_NAMESPACE}-truck-park-" + roadName))
.build();
Map<String, Quantity> limits = ImmutableMap
.<String, Quantity> builder()
.put(CPU, new Quantity(config.get(CPU)))
.put(MEMORY, new Quantity(config.get(MEMORY)))
.build();
return new ContainerBuilder()
.withName(truckParkName)
.withImage(config.get(DOCKER_IMAGE))
.withArgs(args)
.withEnv(env)
.withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(limits).build())
.withLivenessProbe(new ProbeBuilder()
.withHttpGet(new HTTPGetActionBuilder().withPath("/").withPort(new IntOrString("http")).build())
.withInitialDelaySeconds(getConfigOrDefault(config, "livenessInitialDelay", 30))
.withPeriodSeconds(getConfigOrDefault(config, "livenessPeriod", 5))
.withSuccessThreshold(getConfigOrDefault(config, "livenessSuccessThreshold", 1))
.withTimeoutSeconds(getConfigOrDefault(config, "livenessTimeout", 5))
.withFailureThreshold(getConfigOrDefault(config, "livenessFailureThreshold", 3))
.build())
.build();
}
private Map<String, Quantity> getResourcesMap(String memory, String cpu) {
ImmutableMap.Builder<String, Quantity> builder = ImmutableMap.<String, Quantity>builder();
String actualMemory = substituteEnv(memory);
String actualCpu = substituteEnv(cpu);
if (StringUtils.isNotBlank(actualMemory)) {
Quantity memoryQuantity = new Quantity(actualMemory);
builder.put("memory", memoryQuantity);
}
if (StringUtils.isNotBlank(actualCpu)) {
Quantity cpuQuantity = new Quantity(actualCpu);
builder.put("cpu", cpuQuantity);
}
return builder.build();
}
public Map<String, Quantity> asResourceRequestQuantityMap() {
final Quantity cpu = cpuRequest == null ? cpuLimit : cpuRequest;
return ImmutableMap.<String, Quantity>builder()
.put("cpu", cpu)
.put("memory", memory)
.build();
}
public Map<String, Quantity> asResourceLimitQuantityMap() {
final ImmutableMap.Builder<String, Quantity> limitsBuilder = ImmutableMap.builder();
if (cpuLimit != null) {
limitsBuilder.put("cpu", cpuLimit);
}
limitsBuilder.put("memory", memory);
return limitsBuilder.build();
}
@JsonCreator
public RabbitMQStorageResources(
@JsonProperty("storageClassName") final String storageClassName,
@JsonProperty("limit") final Quantity storage
) {
this.storageClassName = storageClassName;
this.storage = storage;
}
/**
* Get requests or limit objects from string hashmaps
*
* @param quantity hashmap of strings
* @return hashmap of string to quantity
*/
public static Map<String, Quantity> getQuantityFromString(Map<String, String> quantity) {
Map<String, Quantity> stringQuantityMap = new HashMap<>();
if (quantity != null && !quantity.isEmpty()) {
for (Map.Entry<String, String> entry : quantity.entrySet()) {
stringQuantityMap.put(entry.getKey(), new Quantity(entry.getValue()));
}
}
return stringQuantityMap;
}
/**
* Convert hard limits map.
* @param hard Hard limit map from model.
* @return Converted map.
*/
private Map<String, Quantity> getHard(Map<String, String> hard) {
return hard.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, hardEntry -> {
String amount = hardEntry.getValue().replaceAll("\\D+", "");
String format = hardEntry.getValue().replace(amount, "");
return new QuantityBuilder()
.withAmount(amount)
.withFormat(format)
.build();
}));
}
private void generate(PersistentVolumeClaimModel volumeClaimModel) throws KubernetesPluginException {
Quantity quantity = new QuantityBuilder()
.withAmount(volumeClaimModel.getVolumeClaimSizeAmount())
.withFormat(volumeClaimModel.getVolumeClaimSizeFormat())
.build();
Map<String, Quantity> requests = new HashMap<>();
requests.put("storage", quantity);
PersistentVolumeClaim claim = new PersistentVolumeClaimBuilder()
.withNewMetadata()
.withName(volumeClaimModel.getName())
.withNamespace(dataHolder.getNamespace())
.withAnnotations(volumeClaimModel.getAnnotations())
.endMetadata()
.withNewSpec()
.withAccessModes(volumeClaimModel.getAccessMode())
.withNewResources()
.withRequests(requests)
.endResources()
.endSpec()
.build();
try {
String claimContent = SerializationUtils.dumpWithoutRuntimeStateAsYaml(claim);
KubernetesUtils.writeToFile(claimContent,
VOLUME_CLAIM_FILE_POSTFIX + YAML);
} catch (IOException e) {
String errorMessage = "error while generating yaml file for volume claim: " + volumeClaimModel.getName();
throw new KubernetesPluginException(errorMessage, e);
}
}
@Test
public void testMainContainerResourceRequirements() {
final ResourceRequirements resourceRequirements = this.resultMainContainer.getResources();
final Map<String, Quantity> requests = resourceRequirements.getRequests();
assertEquals(Double.toString(TASK_MANAGER_CPU), requests.get("cpu").getAmount());
assertEquals(String.valueOf(TOTAL_PROCESS_MEMORY), requests.get("memory").getAmount());
final Map<String, Quantity> limits = resourceRequirements.getLimits();
assertEquals(Double.toString(TASK_MANAGER_CPU), limits.get("cpu").getAmount());
assertEquals(String.valueOf(TOTAL_PROCESS_MEMORY), limits.get("memory").getAmount());
}
private static Map<String, Quantity> combineResources(Container parent, Container template,
Function<ResourceRequirements,
Map<String, Quantity>> resourceTypeMapper) {
return Stream.of(template.getResources(), parent.getResources()) //
.filter(Objects::nonNull) //
.map(resourceTypeMapper) //
.filter(Objects::nonNull) //
.map(Map::entrySet) //
.flatMap(Collection::stream) //
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (v1, v2) -> v1) // v2 (parent) loses
);
}