Sample configuration for AWS with an ALB Ingress controller

Download this sample ALB Ingress configuration provided by ITRS for installations with High Availability (HA) disabled.

# Example Obcerv configuration for AWS with ALB ingress controller.
#
# Intended for demo/micro installations with HA disabled.
#
# The resource requests total ~20 cores and ~40GiB memory.
# and includes Linkerd resources.
#
# Disk requirements:
# - Timescale:
#   - 500 GiB data disk
#   - 30 GiB WAL disk
# - Kafka: 100 GiB
# - Loki: 10 GiB
# - Zookeeper: 1 GiB
# - etcd: 1 GiB
# - Downsampled Metrics:
#   - Raw: 5 GiB
#   - Bucketed: 5 GiB
#
# The AWS Load Balancer Controller is required in order to support external ingestion.  This example assumes version
# 2.3.0 or later is installed. See https://kubernetes-sigs.github.io/aws-load-balancer-controller/.
#
# The AWS Load Balancer Controller requires annotations for each ingress configured below.
# Be sure to change the certificate ARN and group names.  The group name can be any unique value (for example
# use the same value you set for externalHostname) but it must be the same for the `apps` and `iam` ingresses.
#
# The `alb.ingress.kubernetes.io/target-type` annotation controls how traffic is routed to pods.  The simplest
# option ("ip") is used below.  If this is not supported in your cluster, the default setting of "instance" must be
# used instead and all services backed by each ingress must be changed to NodePort instead of the default ClusterIP.
#

defaultStorageClass: "gp3"
apps:
  externalHostname: "obcerv.mydomain.internal"
  ingress:
    annotations:
      kubernetes.io/ingress.class: alb
      alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:...
      alb.ingress.kubernetes.io/group.name: obcerv.mydomain.internal
      alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
      alb.ingress.kubernetes.io/scheme: internet-facing
      alb.ingress.kubernetes.io/ssl-redirect: "443"
      alb.ingress.kubernetes.io/target-type: ip
ingestion:
  externalHostname: "obcerv-ingestion.mydomain.internal"
  replicas: 1
  ingress:
    annotations:
      kubernetes.io/ingress.class: alb
      alb.ingress.kubernetes.io/backend-protocol-version: GRPC
      alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:...
      alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]'
      alb.ingress.kubernetes.io/scheme: internet-facing
      alb.ingress.kubernetes.io/target-type: ip
  resources:
    requests:
      memory: "512Mi"
      cpu: "500m"
    limits:
      memory: "512Mi"
      cpu: "500m"
iam:
  ingress:
    annotations:
      kubernetes.io/ingress.class: alb
      alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:...
      alb.ingress.kubernetes.io/group.name: obcerv.mydomain.internal
      alb.ingress.kubernetes.io/ssl-redirect: "443"
      alb.ingress.kubernetes.io/target-type: ip
zookeeper:
  replicas: 1
  resources:
    requests:
      memory: "256Mi"
      cpu: "200m"
    limits:
      memory: "512Mi"
      cpu: "200m"
kafka:
  replicas: 1
  diskSize: "100Gi"
  consumer:
    fetchMaxWaitMs: 250
    fetchMinBytes: 524288
  resources:
    requests:
      memory: "3Gi"
      cpu: "1"
    limits:
      memory: "3Gi"
      cpu: "2"
timescale:
  dataDiskSize: "500Gi"
  walDiskSize: "30Gi"
  resources:
    requests:
      memory: "14Gi"
      cpu: "2"
    limits:
      memory: "14Gi"
      cpu: "4"
  compressAfter: 3h
  retention:
    entity_attributes:
      chunkSize: 2d
      retention: 1y
    metrics:
      chunkSize: 8h
      retention: 30d
    metrics_5m:
      chunkSize: 1d
      retention: 90d
    metrics_1h:
      chunkSize: 5d
      retention: 180d
    metrics_1d:
      chunkSize: 20d
      retention: 1y
    statuses:
      chunkSize: 7d
      retention: 1y
    signal_details:
      chunkSize: 7d
      retention: 30d
loki:
  diskSize: "10Gi"
sinkd:
  replicas: 1
  rawReplicas: 1
  resources:
    requests:
      memory: "1Gi"
      cpu: "250m"
    limits:
      memory: "1Gi"
      cpu: "400m"
  rawResources:
    requests:
      memory: "1Gi"
      cpu: "250m"
    limits:
      memory: "1Gi"
      cpu: "400m"
platformd:
  replicas: 1
  resources:
    requests:
      memory: "1536Mi"
      cpu: "1"
    limits:
      memory: "2Gi"
      cpu: "1500m"
dpd:
  replicas: 1
  jvmOpts: "-Xmx2G -XX:NewSize=1G"
  metricsMultiplexer:
    maxFilterResultCacheSize: 200000
    maxConcurrentOps: 100
    localParallelism: 6
  selfMonitoringThresholds:
    metrics_partition_lag_warn: 100000
    metrics_partition_lag_critical: 500000
  resources:
    requests:
      memory: "3Gi"
      cpu: "2"
    limits:
      memory: "3500Mi"
      cpu: "3"
metricForecastd:
  resources:
    requests:
      memory: "512Mi"
      cpu: "250m"
    limits:
      memory: "768Mi"
      cpu: "500m"
downsampledMetricsStream:
  replicas: 1
  bucketedReplicas: 1
  jvmOpts: "-XX:InitialRAMPercentage=50 -XX:MaxRAMPercentage=50"
  resources:
    requests:
      memory: "1Gi"
      cpu: "750m"
    limits:
      memory: "1536Mi"
      cpu: "1"
  bucketedResources:
    requests:
      memory: "1536Mi"
      cpu: "1"
    limits:
      memory: "1536Mi"
      cpu: "1500m"
entityStream:
  intermediate:
    resources:
      requests:
        memory: "768Mi"
        cpu: "300m"
      limits:
        memory: "1Gi"
        cpu: "500m"
  final:
    resources:
      requests:
        memory: "512Mi"
        cpu: "300m"
      limits:
        memory: "1536Mi"
        cpu: "500m"
signalsStream:
  resources:
    requests:
      memory: "512Mi"
      cpu: "150m"
    limits:
      memory: "768Mi"
      cpu: "300m"
etcd:
  replicas: 1
collection:
  metrics:
    resources:
      requests:
        memory: "768Mi"
        cpu: "200m"
      limits:
        memory: "1Gi"
        cpu: "250m"
["Obcerv"] ["User Guide", "Technical Reference"]

Was this topic helpful?