Commit 689dfc0d authored by Pedro Eduardo Trujillo's avatar Pedro Eduardo Trujillo
Browse files

Initial commit

parents
Loading
Loading
Loading
Loading

.dockerignore

0 → 100644
+2 −0
Original line number Diff line number Diff line
*
!/scripts

.gitignore

0 → 100644
+5 −0
Original line number Diff line number Diff line
.*
!.gitignore
!.gitlab-ci.yml
!.dockerignore
!.env

.gitlab-ci.yml

0 → 100644
+511 −0
Original line number Diff line number Diff line
image: redmic/docker-gitlab

stages:
  - package
  - test-package
  - deploy

variables:
  CONFLUENT_VERSION: latest

docker-build-dev:
  stage: package
  image: redmic/docker-gitlab
  variables:
    DOCKER_DRIVER: overlay2
    IMAGE_NAME: ${CI_REGISTRY_IMAGE}/connector-supplier
  services:
    - docker:dind
  only:
    - branches
  except:
    - master
  script:
    - docker login -u gitlab-ci-token -p ${CI_JOB_TOKEN} ${CI_REGISTRY}
    - docker build -t ${IMAGE_NAME}:${CI_COMMIT_SHA} -t ${IMAGE_NAME}:${CI_COMMIT_REF_NAME}-latest .
    - docker push ${IMAGE_NAME}

docker-build-pro:
  stage: package
  image: redmic/docker-gitlab
  variables:
    DOCKER_DRIVER: overlay2
    IMAGE_NAME: ${CI_REGISTRY_IMAGE}/connector-supplier
  services:
    - docker:dind
  only:
    - master
  script:
    - docker login -u gitlab-ci-token -p ${CI_JOB_TOKEN} ${CI_REGISTRY}
    - docker build -t ${IMAGE_NAME}:${CI_COMMIT_SHA} -t ${IMAGE_NAME}:latest .
    - docker push ${IMAGE_NAME}

container_scanning:
  stage: test-package
  image: docker:stable
  variables:
    DOCKER_DRIVER: overlay2
    IMAGE_NAME: ${CI_REGISTRY_IMAGE}/connector-supplier
  allow_failure: true
  services:
    - docker:stable-dind
  only:
    - branches
  except:
    - master
  script:
    - docker run -d --name db arminc/clair-db:latest
    - docker run -p 6060:6060 --link db:postgres -d --name clair arminc/clair-local-scan:v2.0.1
    - apk add -U wget ca-certificates
    - docker login -u gitlab-ci-token -p ${CI_JOB_TOKEN} ${CI_REGISTRY}
    - docker pull ${IMAGE_NAME}:${CI_COMMIT_SHA}
    - wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64
    - mv clair-scanner_linux_amd64 clair-scanner
    - chmod +x clair-scanner
    - touch clair-whitelist.yml
    - ./clair-scanner -c http://docker:6060 --ip $(hostname -i) -r gl-sast-container-report.json -l clair.log -w clair-whitelist.yml ${IMAGE_NAME}:${CI_COMMIT_SHA} || true
  artifacts:
    paths: [gl-sast-container-report.json]

deploy-zookeeper-dev:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.tmpl.yml:docker-compose.dev.yml
  services:
    - docker:dind
  script:
    - create-nets.sh kafka-net
    - cd zookeeper
    - export SERVICE=zk-1
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
    - export SERVICE=zk-2
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
    - export SERVICE=zk-3
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: dev/zookeeper
  only:
    - branches
  except:
    - master
  when: manual

deploy-zookeeper-pro:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.tmpl.yml:docker-compose.prod.yml
  services:
    - docker:dind
  script:
    - create-nets.sh kafka-net
    - cd zookeeper
    - export SERVICE=zk-1
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
    - export SERVICE=zk-2
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
    - export SERVICE=zk-3
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: pro/zookeeper
  only:
    - master
  when: manual

deploy-kafka-dev:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.tmpl.yml:docker-compose.dev.yml
    PUBLIC_HOSTNAME: ${DEV_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd kafka
    - export SERVICE=kf-1
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME}
    - export SERVICE=kf-2
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME}
    - export SERVICE=kf-3
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME}
  environment:
    name: dev/kafka
  only:
    - branches
  except:
    - master
  when: manual

deploy-kafka-pro:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.tmpl.yml:docker-compose.prod.yml
  services:
    - docker:dind
  script:
    - cd kafka
    - export SERVICE=kf-1
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
    - export SERVICE=kf-2
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
    - export SERVICE=kf-3
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: pro/kafka
  only:
    - master
  when: manual

deploy-connect-dev:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    SERVICE: connect
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.dev.yml
  services:
    - docker:dind
  script:
    - cd workers
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: dev/${SERVICE}
  only:
    - branches
  except:
    - master
  when: manual

deploy-connect-pro:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    SERVICE: connect
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.prod.yml
  services:
    - docker:dind
  script:
    - cd workers
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: pro/${SERVICE}
  only:
    - master
  when: manual

deploy-connector-supplier-dev:
  stage: deploy
  image: redmic/docker-gitlab
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    SERVICE: connector-supplier
    IMAGE_NAME: ${DOCKER_ENV_CI_REGISTRY_IMAGE}
    IMAGE_TAG: ${CI_COMMIT_SHA}
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.dev.yml
    CONNECT_ADDRS: connect:8083
  services:
    - docker:dind
  script:
    - cd workers
    - deploy.sh IMAGE_NAME=${IMAGE_NAME} IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} CONNECT_ADDRS=${CONNECT_ADDRS} POSTGRES_USER=${POSTGRES_USER} POSTGRES_PASS=${POSTGRES_PASS}
  environment:
    name: dev/${SERVICE}
  only:
    - branches
  except:
    - master
  when: manual

deploy-connector-supplier-pro:
  stage: deploy
  image: redmic/docker-gitlab
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    SERVICE: connector-supplier
    IMAGE_NAME: ${DOCKER_ENV_CI_REGISTRY_IMAGE}
    IMAGE_TAG: ${CI_COMMIT_SHA}
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.prod.yml
    CONNECT_ADDRS: connect:8083
  services:
    - docker:dind
  script:
    - cd workers
    - deploy.sh IMAGE_NAME=${IMAGE_NAME} IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} CONNECT_ADDRS=${CONNECT_ADDRS} POSTGRES_USER=${POSTGRES_USER} POSTGRES_PASS=${POSTGRES_PASS}
  environment:
    name: pro/${SERVICE}
  only:
    - master
  when: manual

deploy-schema-registry-dev:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    SERVICE: schema-registry
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.dev.yml
  services:
    - docker:dind
  script:
    - cd workers
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: dev/${SERVICE}
  only:
    - branches
  except:
    - master
  when: manual

deploy-schema-registry-pro:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    SERVICE: schema-registry
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.prod.yml
  services:
    - docker:dind
  script:
    - cd workers
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: pro/${SERVICE}
  only:
    - master
  when: manual

deploy-kafka-rest-dev:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    SERVICE: kafka-rest
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.dev.yml
    PUBLIC_HOSTNAME: ${DEV_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd workers
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME}
  environment:
    name: dev/${SERVICE}
  only:
    - branches
  except:
    - master
  when: manual

deploy-kafka-rest-pro:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    SERVICE: kafka-rest
    IMAGE_TAG: ${CONFLUENT_VERSION}
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.prod.yml
    PUBLIC_HOSTNAME: ${PRO_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd workers
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME}
  environment:
    name: pro/${SERVICE}
  only:
    - master
  when: manual

deploy-kafka-manager-dev:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    IMAGE_TAG: latest
    SERVICE: kafka-manager
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.dev.yml
    PUBLIC_HOSTNAME: ${DEV_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd uis
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: dev/${SERVICE}
    url: https://kafkamanager.${PUBLIC_HOSTNAME}
  only:
    - branches
  except:
    - master
  when: manual

deploy-kafka-manager-pro:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    IMAGE_TAG: latest
    SERVICE: kafka-manager
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.prod.yml
    PUBLIC_HOSTNAME: ${PRO_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd uis
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME} UI_AUTH=${UI_AUTH}
  environment:
    name: pro/${SERVICE}
    url: https://kafkamanager.${PUBLIC_HOSTNAME}
  only:
    - master
  when: manual

deploy-kafka-topics-ui-dev:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    IMAGE_TAG: latest
    SERVICE: kafka-topics-ui
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.dev.yml
    PUBLIC_HOSTNAME: ${DEV_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd uis
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: dev/${SERVICE}
    url: https://kafkatopicsui.${PUBLIC_HOSTNAME}
  only:
    - branches
  except:
    - master
  when: manual

deploy-kafka-topics-ui-pro:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    IMAGE_TAG: latest
    SERVICE: kafka-topics-ui
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.prod.yml
    PUBLIC_HOSTNAME: ${PRO_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd uis
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME} UI_AUTH=${UI_AUTH}
  environment:
    name: pro/${SERVICE}
    url: https://kafkatopicsui.${PUBLIC_HOSTNAME}
  only:
    - master
  when: manual

deploy-kafka-connect-ui-dev:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    IMAGE_TAG: latest
    SERVICE: kafka-connect-ui
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.dev.yml
    PUBLIC_HOSTNAME: ${DEV_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd uis
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME}
  environment:
    name: dev/${SERVICE}
    url: https://kafkaconnectui.${PUBLIC_HOSTNAME}
  only:
    - branches
  except:
    - master
  when: manual

deploy-kafka-connect-ui-pro:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    IMAGE_TAG: latest
    SERVICE: kafka-connect-ui
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.prod.yml
    PUBLIC_HOSTNAME: ${PRO_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd uis
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME} UI_AUTH=${UI_AUTH}
  environment:
    name: pro/${SERVICE}
    url: https://kafkaconnectui.${PUBLIC_HOSTNAME}
  only:
    - master
  when: manual

deploy-schema-registry-ui-dev:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${DEV_SSH_REMOTE}
    IMAGE_TAG: latest
    SERVICE: schema-registry-ui
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.dev.yml
    PUBLIC_HOSTNAME: ${DEV_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd uis
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE}
  environment:
    name: dev/${SERVICE}
    url: https://schemaregistryui.${PUBLIC_HOSTNAME}
  only:
    - branches
  except:
    - master
  when: manual

deploy-schema-registry-ui-pro:
  stage: deploy
  variables:
    DOCKER_DRIVER: overlay2
    SSH_REMOTE: ${PRO_SSH_REMOTE}
    IMAGE_TAG: latest
    SERVICE: schema-registry-ui
    COMPOSE_FILE: docker-compose.${SERVICE}.tmpl.yml:docker-compose.${SERVICE}.prod.yml
    PUBLIC_HOSTNAME: ${PRO_PUBLIC_HOSTNAME}
  services:
    - docker:dind
  script:
    - cd uis
    - deploy.sh IMAGE_TAG=${IMAGE_TAG} COMPOSE_FILE=${COMPOSE_FILE} PUBLIC_HOSTNAME=${PUBLIC_HOSTNAME} UI_AUTH=${UI_AUTH}
  environment:
    name: pro/${SERVICE}
    url: https://schemaregistryui.${PUBLIC_HOSTNAME}
  only:
    - master
  when: manual

Dockerfile

0 → 100644
+7 −0
Original line number Diff line number Diff line
FROM ellerbrock/alpine-bash-curl-ssl

ENV CONNECT_ADDRS="connect:8083"

COPY scripts/ /

ENTRYPOINT ["sh", "-c", "/docker-entrypoint.sh ${CONNECT_ADDRS}"]

README.md

0 → 100644
+98 −0
Original line number Diff line number Diff line
# Confluent platform

Este proyecto contiene la configuración y despliegue de la plataforma Confluent, para usar **Apache Kafka**.

## Descripción

Se compone de una serie de ficheros *docker-compose*, organizados por niveles:

* zookeeper: Lanza servicios de **Apache Zookeeper** para coordinar a los otros servicios (brokers y workers).
* kafka: Lanza servicios de **Apache Kafka**, que actuan como brokers para comunicar unos servicios con otros.
* workers: Lanza servicios que explotan la red de Kafka y sirven de apoyo a otros servicios.
* uis: Lanza servicios que permiten gestionar visualmente la red de Kafka y sus servicios.

Es importante, en un despliegue desde cero, seguir el orden por niveles expuesto en el listado anterior. Dentro de cada nivel, se puede seguir el orden que se prefiera, no debería haber ningún problema.
Los despliegues, tanto en desarrollo como en producción, se han de ejecutar manualmente (aunque se preparan las tareas automáticamente para ello) desde este repositorio en GitLab.

Hay un caso especial dentro del nivel de *workers*, el servicio **connector-supplier**. Se trata de una imagen propia, encargada de registrar *conectores* en el servicio *connect* de Confluent.
Para definir nuevos conectores, han de añadirse al directorio *scripts* de este repositorio.

Se pueden configurar muchos parámetros de la plataforma a base de variables de entorno. Cada nivel cuenta con un fichero *.env* que define valores por defecto para dichas variables, pero se pueden asignar desde fuera (por ejemplo, desde *.gitlab-ci.yml*).

## Comandos útiles

A continuación, una serie de comandos que pueden resultar de interés como ejemplo.

### Gestión de conectores

```
// primero, entrar a contenedor con acceso a servicio de connect

// crear conector hacia S3
curl -s -X POST -H "Content-Type: application/json" --data \
'{"name": "s3-sink", "config": {"connector.class": "io.confluent.connect.s3.S3SinkConnector", "tasks.max": "1", "topics": "s3_topic", "s3.region": "eu-west-1", "s3.bucket.name": "mediastorage.redmicdev", "s3.part.size": "5242880", "flush.size": "3", "storage.class": "io.confluent.connect.s3.storage.S3Storage", "format.class": "io.confluent.connect.s3.format.avro.AvroFormat", "schema.generator.class": "io.confluent.connect.storage.hive.schema.DefaultSchemaGenerator", "partitioner.class": "io.confluent.connect.storage.partitioner.DefaultPartitioner", "schema.compatibility": "FULL", "name": "s3-sink"}}' \
http://$CONNECT_HOST:8083/connectors

// consultar estado de conector
curl -s -X GET http://$CONNECT_HOST:8083/connectors/s3-sink/status

// eliminar conector
curl -X DELETE $CONNECT_HOST:8083/connectors/s3-sink
```

### Pruebas con esquemas Avro y registro

```
// primero, entrar a contenedor con acceso a servicio de schema-registry

// publicar nuevo esquema 'prueba' al registro
curl -X POST -H "Content-Type: application/vnd.schemaregistry.v1+json" \
--data '{"schema": "{\"type\":\"record\",\"name\":\"MessageWrapper\",\"namespace\":\"es.redmic.brokerlib.dto\",\"fields\":[{\"name\":\"content\",\"type\":{\"type\":\"record\",\"name\":\"Object\",\"namespace\":\"java.lang\",\"fields\":[]}},{\"name\":\"userId\",\"type\":\"string\"},{\"name\":\"actionId\",\"type\":[\"string\",\"null\"]}]}"}' \
http://schema-registry:8081/subjects/prueba/versions

// obtener esquemas presentes en el registro
curl -X GET http://schema-registry:8081/subjects
```

Se puede producir y consumir mensajes con las utilidades de Confluent:

```
// primero, entrar a contenedor de schema-registry

// producir mensajes en formato Avro desde consola
kafka-avro-console-producer --broker-list kf-1:9092 \
--property schema.registry.url=http://schema-registry:8081 \
--topic s3_topic --property value.schema=\
'{"type":"record","name":"myrecord","fields":[{"name":"f1","type":"string"}]}'

// valores de ejemplo para poner en consola, tras ejecutar el comando anterior
{"f1": "value1"}
{"f1": "value2"}
{"f1": "value3"}

// consumir mensajes en formato avro desde consola - ojo, falla magic byte!
kafka-avro-console-consumer --bootstrap-server kf-1:9092 \
--property schema.registry.url=http://schema-registry:8081 \
--topic realtime.tracking.vessels
```

O usar la utilidad *kafkacat*:

```
// primero, entrar a contenedor con acceso a servicio de schema-registry ...
kafkacat -P -b kf-1:9092,kf-2:9092,kf-3:9092 -t prueba -K %

// ... o usar esta imagen Docker unida a la red donde está schema-registry
docker run --network kafka-net -it ryane/kafkacat kafkacat -P -b kf-1:9092,kf-2:9092,kf-3:9092 -t prueba -K %

// valores de ejemplo para poner en consola, tras ejecutar uno de los comandos anteriores
clave%valor1
clave%valor2
otraclave%otrovalor

// se puede consumir desde contenedor con acceso a servicio de schema-registry ...
kafkacat -C -b kf-1:9092,kf-2:9092,kf-3:9092 -t prueba -K %

// ... o desde esta imagen Docker unida a la red donde está schema-registry
docker run --network kafka-net -it ryane/kafkacat kafkacat -C -b kf-1:9092,kf-2:9092,kf-3:9092 -t prueba -K %
```