From f38b9c6f74c7be0437c9b5b3bd484efb2fc35d49 Mon Sep 17 00:00:00 2001 From: Daniel Wagner-Hall Date: Fri, 14 Nov 2025 17:31:10 +0000 Subject: [PATCH] Add event-driven architecture track This is mostly moving over content from https://github.com/CodeYourFuture/immersive-go-course/tree/main/projects/kafka-cron which was mostly written by Laura Nolan for CYF+. I have made the following notable changes: * Mixed the primer and project together into prep sections, and broken them down into much smaller chunks. * Added learning objectives to each chunk, particularly for guiding the reading material but also for everything. * Added a section on tracing (Fixes https://github.com/CodeYourFuture/immersive-go-course/issues/107), partially by taking what exists from the RAFT-Otel project, and partially writing some new content. * Adding backlog exercises (which are all basically "Do the project from prep" and "Give a demo"). Co-authored-by: Laura Nolan Co-authored-by: Radha Kumari Co-authored-by: Sally McGrath --- .../distributed-tracing/introduction/index.md | 26 ++++ .../using-honeycomb/index.md | 42 +++++++ .../event-driven/kafka-in-a-nutshell/index.md | 27 +++++ .../module/event-driven/kafka-paper/index.md | 23 ++++ .../event-driven/project/alerting/index.md | 51 ++++++++ .../module/event-driven/project/cron/index.md | 43 +++++++ .../project/distributed-tracing/index.md | 31 +++++ .../project/distributing-with-kafka/index.md | 50 ++++++++ .../project/handling-errors/index.md | 41 +++++++ .../event-driven/project/intro/index.md | 27 +++++ .../event-driven/project/monitoring/index.md | 112 ++++++++++++++++++ .../project/multiple-queues/index.md | 31 +++++ .../project/running-commands/index.md | 30 +++++ .../en/module/event-driven/queues/index.md | 30 +++++ .../event-driven-architecture/_index.md | 13 ++ .../sprints/1/_index.md | 8 ++ .../sprints/1/backlog/index.md | 8 ++ .../sprints/1/prep/index.md | 21 ++++ .../sprints/1/success/index.md | 8 ++ .../sprints/2/_index.md | 8 ++ .../sprints/2/backlog/index.md | 8 ++ .../sprints/2/prep/index.md | 12 ++ .../sprints/2/success/index.md | 8 ++ .../sprints/3/_index.md | 8 ++ .../sprints/3/backlog/index.md | 8 ++ .../sprints/3/prep/index.md | 12 ++ .../sprints/3/success/index.md | 8 ++ .../sprints/4/_index.md | 8 ++ .../sprints/4/backlog/index.md | 8 ++ .../sprints/4/prep/index.md | 12 ++ .../sprints/4/success/index.md | 8 ++ .../sprints/5/_index.md | 8 ++ .../sprints/5/backlog/index.md | 8 ++ .../sprints/5/prep/index.md | 15 +++ .../sprints/5/success/index.md | 8 ++ 35 files changed, 769 insertions(+) create mode 100644 common-content/en/module/distributed-tracing/introduction/index.md create mode 100644 common-content/en/module/distributed-tracing/using-honeycomb/index.md create mode 100644 common-content/en/module/event-driven/kafka-in-a-nutshell/index.md create mode 100644 common-content/en/module/event-driven/kafka-paper/index.md create mode 100644 common-content/en/module/event-driven/project/alerting/index.md create mode 100644 common-content/en/module/event-driven/project/cron/index.md create mode 100644 common-content/en/module/event-driven/project/distributed-tracing/index.md create mode 100644 common-content/en/module/event-driven/project/distributing-with-kafka/index.md create mode 100644 common-content/en/module/event-driven/project/handling-errors/index.md create mode 100644 common-content/en/module/event-driven/project/intro/index.md create mode 100644 common-content/en/module/event-driven/project/monitoring/index.md create mode 100644 common-content/en/module/event-driven/project/multiple-queues/index.md create mode 100644 common-content/en/module/event-driven/project/running-commands/index.md create mode 100644 common-content/en/module/event-driven/queues/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/_index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/1/_index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/1/backlog/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/1/prep/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/1/success/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/2/_index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/2/backlog/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/2/prep/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/2/success/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/3/_index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/3/backlog/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/3/prep/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/3/success/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/4/_index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/4/backlog/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/4/prep/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/4/success/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/5/_index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/5/backlog/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/5/prep/index.md create mode 100644 org-cyf-tracks/content/event-driven-architecture/sprints/5/success/index.md diff --git a/common-content/en/module/distributed-tracing/introduction/index.md b/common-content/en/module/distributed-tracing/introduction/index.md new file mode 100644 index 000000000..5e81256d2 --- /dev/null +++ b/common-content/en/module/distributed-tracing/introduction/index.md @@ -0,0 +1,26 @@ ++++ +title = "Distributed Tracing" +time = 120 +objectives = [ + "Contrast distributed tracing and metrics.", + "Explain how distributed tracing helps understand a request flow through several systems.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +In the past we have added metrics to our programs and collected and aggregated those metrics using the Prometheus monitoring tool. Metrics are a widely-used methodology for understanding the behaviour of our systems at a statistical level: what percentage of requests are being completed successfully, what is the 90th percentile latency, what is our current cache hit rate or queue length. These kinds of queries are very useful for telling us whether our systems seem to be healthy overall or not, and, in some cases, may provide useful insights into problems or inefficiencies. + +However, one thing that metrics are not normally very good for is understanding how user experience for a system may vary between different types of requests, why particular requests are outliers in terms of latency, and how a single user request flows through backend services - many complex web services may involve dozens of backend services or datastores. It may be possible to answer some of these questions using logs analysis. However, there is a better solution, designed just for this problem: distributed tracing. + +Distributed tracing has two key concepts: traces and spans. A trace represents a whole request or transaction. Traces are uniquely identified by trace IDs. Traces are made up of a set of spans, each tagged with the trace ID of the trace it belongs to. Each span is a unit of work: a remote procedure call or web request to a specific service, a method execution, or perhaps the time that a message spends in a queue. Spans can have child spans. There are specific tools that are designed to collect and store distributed traces, and to perform useful queries against them. + +One of the key aspects of distributed tracing is that when services call other services the trace ID is propagated to those calls (in HTTP-based systems this is done using a special HTTP [traceparent header](https://uptrace.dev/opentelemetry/opentelemetry-traceparent.html)) so that the overall trace may be assembled. This is necessary because each service in a complex chain of calls independently posts its spans to the distributed trace collector. The collector uses the trace ID to assemble the spans together, like a jigsaw puzzle, so that we can see a holistic view of an +entire operation. + +[OpenTelemetry](https://opentelemetry.io/) (also known as OTel) is the main industry standard for distributed tracing. It governs the format of traces and spans, and how traces and spans are collected. It is worth spending some time exploring the [OTel documentation](https://opentelemetry.io/docs/), particularly the Concepts section. The [awesome-opentelemetry repo](https://github.com/magsther/awesome-opentelemetry) is another very comprehensive set of +resources. + +Distributed tracing is a useful technique for understanding how complex systems are operating. diff --git a/common-content/en/module/distributed-tracing/using-honeycomb/index.md b/common-content/en/module/distributed-tracing/using-honeycomb/index.md new file mode 100644 index 000000000..2e190d82b --- /dev/null +++ b/common-content/en/module/distributed-tracing/using-honeycomb/index.md @@ -0,0 +1,42 @@ ++++ +title = "Using Honeycomb" +time = 180 +objectives = [ + "Publish trace spans to Honeycomb.", + "View an assembled trace in Honeycomb.", + "Identify outliers in Honeycomb.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +[Honeycomb](https://www.honeycomb.io/) is a {{}}Software as a Service is software that someone else runs and we can rely on. {{}} distributed tracing provider. + +Honeycomb provide API endpoints where we can upload trace spans. Honeycomb assembles spans which belong to the same traces. We can then view, query, and inspect those entire traces, seeing how our request flowed through a system. + +We will experiment with Honeycomb locally with a single program running on one computer, to practice uploading and interpreting spans. + +Sign up to Honeycomb for free. + +{{}} +Write a small standalone command line application which: +1. Picks a random number of iterations between 2 and 10 (we'll call it `n`). +2. `n` times, creates a span, sleeps for a random amount of time between 10ms and 5s, then uploads the span. +3. Between each span, sleeps for a random amount of time between 100ms and 5s. + +Each time you run your program, it should use a unique trace ID, but within on program execution, all spans should have the same trace ID. + +There are standard libraries for creating and sending OTel spans, such as [in Go](https://docs.honeycomb.io/send-data/go/opentelemetry-sdk/) and [in Java](https://docs.honeycomb.io/send-data/java/opentelemetry-agent/). +{{}} + +{{}} +Run your program 10 times, making sure it uploads its spans to Honeycomb with your API key. + +Explore the Honeycomb UI. Try to work out: +1. What was the biggest `n` generated by one of your program runs? +2. Which was the fastest run? What was `n` for that run? +3. What was the longest individual sleep performed in your program during a span? +4. What was the longest individual sleep _between_ spans in your program? +{{}} diff --git a/common-content/en/module/event-driven/kafka-in-a-nutshell/index.md b/common-content/en/module/event-driven/kafka-in-a-nutshell/index.md new file mode 100644 index 000000000..0c0284933 --- /dev/null +++ b/common-content/en/module/event-driven/kafka-in-a-nutshell/index.md @@ -0,0 +1,27 @@ ++++ +title = "Kafka in a Nutshell" +time = 120 +objectives = [ + "List the components of the Kafka architecture.", + "Explain the purpose of a producer, consumer, and broker.", + "Defined a record.", + "Define a topic.", + "Define a partition.", + "Explain the relationship (and differences) between topics and partitions.", + "Explain how Kafka knows when a consumer has successfully handled a record.", + "Contrast at-most-once and at-least-once delivery.", + "Explain why exactly-once delivery is very hard to achieve.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +Kafka is a commonly-used open-source distributed queue. + +{{}} +Read [Apache Kafka in a Nutshell](https://medium.com/swlh/apache-kafka-in-a-nutshell-5782b01d9ffb). + +Make sure you have achieved all of the learning objectives for this prep. +{{}} diff --git a/common-content/en/module/event-driven/kafka-paper/index.md b/common-content/en/module/event-driven/kafka-paper/index.md new file mode 100644 index 000000000..7a22e7986 --- /dev/null +++ b/common-content/en/module/event-driven/kafka-paper/index.md @@ -0,0 +1,23 @@ ++++ +title = "Kafka Paper" +time = 120 +objectives = [ + "Describe how Kakfa stores data internally.", + "Calculate how many partitions are needed to serve a given number of consumers on a topic.", + "Contrast push-based and pull-based queueing systems.", + "Describe what delivery ordering constraints are and aren't guaranteed by Kafka.", + "Explain limitations of Kafka compared to systems with acknowledgements or two-phase commits.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +Kafka is a commonly-used open-source distributed queue. + +{{}} +Read about the core Kafka concepts in the [Kafka: a Distributed Messaging System for Log Processing paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2017/09/Kafka.pdf). + +Make sure you have achieved all of the learning objectives for this prep. +{{}} diff --git a/common-content/en/module/event-driven/project/alerting/index.md b/common-content/en/module/event-driven/project/alerting/index.md new file mode 100644 index 000000000..5f157cfe8 --- /dev/null +++ b/common-content/en/module/event-driven/project/alerting/index.md @@ -0,0 +1,51 @@ ++++ +title = "Alerting" +time = 180 +objectives = [ + "Identify and graph metrics to indicate specific problems.", + "Create an alert triggered by a specific problem.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +Write an [AlertManager configuration](https://prometheus.io/docs/alerting/latest/alertmanager/) and set up at least one alert. + +For instance: + +- We could alert on the age of jobs being unqueued - if this rises too high (more than a few seconds) then users' jobs aren't being executed in a timely fashion. We should use a percentile for this calculation. + - Note that this may need us to add extra code/data to our system, to be able to produce these metrics. +- We could also alert on failure to queue jobs, and failure to read from the queue. +- We expect to see fetch requests against all of our topics. If we don't, it may mean that our consumers are not running, or are otherwise broken. We could set up alerts on the `kafka_server_BrokerTopicMetrics_Count{name="TotalFetchRequestsPerSec"}` metric to check this. + +For critical alerts in a production environment we would usually use PagerDuty or a similar tool, but for our purposes the easiest way to configure an alert is to use email. +This article describes how to send [Alertmanager email using GMail](https://www.robustperception.io/sending-email-with-the-alertmanager-via-gmail/) as an email server. + +> [!WARNING] +> +> If you do this, be careful not to check your `GMAIL_AUTH_TOKEN` into GitHub - we should never check ANY token into source control. Instead, we can check in a template file and use a tool such as [heredoc](https://tldp.org/LDP/abs/html/here-docs.html) to substitute the value of an environment variable (our token) into the final generated Alertmanager configuration (and include this step in a build script/Makefile). +> +> It is also advisable a throwaway GMail account for this purpose, for additional security - just in case. + +We can also build a Grafana dashboard to display our Prometheus metrics. The [Grafana Fundamentals](https://grafana.com/tutorials/grafana-fundamentals/) tutorial will walk you through how to do this (although we will need to use our own application and not their sample application). + +{{}} +Simulate several potential problems that could happen when running this system. Make sure you can identify the problems on your Grafana dashboard. + +Examples of problems to simulate and identify: +* Kafka crashes and doesn't start again +* One kind of job always fails +* There are too many jobs and we can't get to them all in a timely manner. +* One kind of job fails whenever it runs in a particular topic (but succeeds in the other topics) +* One kind of job takes a really long time to run and means other jobs don't start in a timely manner. +* No consumers are pulling jobs out of a particular topic. +* A producer is producing jobs which the consumers can't understand (e.g. they have missing JSON fields). + +Prepare a demo where you can show how your Grafana dashboard can help you diagnose and debug these problems. +{{}} + +{{}} +Create at least one alert to notify you by email of a problem with your cron system. +{{}} diff --git a/common-content/en/module/event-driven/project/cron/index.md b/common-content/en/module/event-driven/project/cron/index.md new file mode 100644 index 000000000..ae2b57589 --- /dev/null +++ b/common-content/en/module/event-driven/project/cron/index.md @@ -0,0 +1,43 @@ ++++ +title = "Cron" +time = 300 +objectives = [ + "Describe the purpose of cron.", + "Write a crontab to run a job every minute, or at fixed times.", + "Write a program to parse files containing crontabs and schedule jobs.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +We are going to implement a distributed version of the `cron` job scheduler (read about [cron](https://en.wikipedia.org/wiki/Cron) if you are not familiar with it). Cron jobs are defined by two attributes: the command to be executed, and either the schedule that the job should run on or a definition of the times that the job should execute. The schedule is defined according +to the `crontab` format. + +Most languages have parsers of the crontab format - you do not need to write one yourself, (though it can be an interesting challenge!). Some examples: +* For Go, the most widely used is [robfig/cron](https://github.com/robfig/cron). +* For Java, [Quartz](https://www.quartz-scheduler.org/documentation/quartz-2.4.x/) has a Cron parser/scheduler, see [this quick start guide](https://betterstack.com/community/questions/how-to-run-cron-jobs-in-java/) for how to use it.\ + Note that Quartz parsing requires a leading seconds specifier, which is non-standard. You can convert a regular cron expression to a Quartz-compatible one by adding the prefix `"0 "`. + +The `cron` tool common to Unix operating systems runs jobs on a schedule. Cron only works on a single hosts. We want to create a version of cron that can schedule jobs across multiple workers, running on different hosts. + +### Writing a cron scheduler without Kafka + +The first step won't involve Kafka at all, or running custom jobs. These will come later. + +{{}} +Write code which will parse a file which contains a list of crontabs, one per line, and print "Running job [line number]" for each line on the schedule. + +e.g. if passed the file: +``` +* * * * * +15 * * * * +``` + +Your program should print "Running job 0" every minute, and "Running job 1" once an hour at quarter past the hour. +{{}} + +{{}} +Create a Docker image for your cron scheduler program. Make sure you can run the image. +{{}} diff --git a/common-content/en/module/event-driven/project/distributed-tracing/index.md b/common-content/en/module/event-driven/project/distributed-tracing/index.md new file mode 100644 index 000000000..529ee1471 --- /dev/null +++ b/common-content/en/module/event-driven/project/distributed-tracing/index.md @@ -0,0 +1,31 @@ ++++ +title = "Distributed Tracing in Kafka" +time = 300 +objectives = [ + "Instrument a producer and consumer with OTel.", + "Interpret a trace in Honeycomb across producer and consumer.", + "Identify outliers in Honeycomb.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +We know that metrics can help give us aggregate information about all actions, and distributed tracing can help us better understand the flow of particular requests through systems. + +A single cron job invocation is a like a user request. It gets originated in one system (the producer), then flows through Kafka, and may run on one consumer (if it succeeds the first time), or more than one consumer (if it fails and needs to be retried). + +We can use distributed tracing to trace individual cron job invocations. + +{{}} +Add span publishing to your producer and consumers. + +To end up assembled in the same trace, all of the services will need to know to use the same trace ID. You may need to modify your job data format to enable this. + +Run your system, publishing to Honeycomb, and inspect the traces in Honeycomb. Identify: +1. How long jobs spend waiting in Kafka between the producer and consumer. What was the longest time a job waited there? What was the shortest time? +2. What was the largest number of retries any job took? +3. How many jobs always failed all retries? +4. Which jobs fail the most or the least? +{{}} diff --git a/common-content/en/module/event-driven/project/distributing-with-kafka/index.md b/common-content/en/module/event-driven/project/distributing-with-kafka/index.md new file mode 100644 index 000000000..27b84859f --- /dev/null +++ b/common-content/en/module/event-driven/project/distributing-with-kafka/index.md @@ -0,0 +1,50 @@ ++++ +title = "Distributing with Kafka" +time = 90 +objectives = [ + "Run a Kafka queue using `docker-compose`.", + "Produce messages into a Kafka queue from a custom written producer.", + "Consume messages from a Kafka queue in a custom written consumer.", + "Consume different messages from a Kafka queue from multiple consumers on the same topic.", + "Run a Kafka pipeline with producers and consumers in `docker-compose`.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +Having built a local cron scheduler, we can now expand this to create a functional distributed cron system. We will build two separate programs: + +- A Kafka producer that reads configuration files for jobs and queues tasks for execution +- A Kafka consumer that dequeues jobs from a queue and runs them + +In _this_ step, we will just make dummy producers and consumers that send messages on the correct scheduler and log that they were received. In the next step we will make them actually run command lines. + +Kafka itself is a queue that lets you communicate single messages in a structured and asynchronous way between producers and consumers. Therefore, all the scheduling logic for managing recurring jobs must be part of your producer (although it is recommended to reuse a suitable library to assist with parsing crontabs and scheduling). Every time a job is due to be run, your producer creates a new message and writes it to Kafka, for a consumer to dequeue and run. + +We'll need to be able to run Kafka. The easiest way is to use `docker-compose`. The [conduktor/kafka-stack-docker-compose](https://github.com/conduktor/kafka-stack-docker-compose) project provides several starter configurations for running Kafka. The config for `zk-single-kafka-single.yml` will work for development purposes. + +There are existing Kafka clients for many languages, such as: +* A [Golang Kafka client](https://docs.confluent.io/kafka-clients/go/current/overview.html#go-example-code). +* A [Java Kafka client](https://docs.confluent.io/kafka-clients/java/current/overview.html). + +We may want to run other Docker containers later, so we may want to make our own copy of that docker compose configuration that we can add to. + +Our producer program needs to be able to do the following: + +- Read and parse a file with cron job definitions (we'll set up our own for this project, don't reuse the system cron config file because we will want to modify the format later) - you should already have written this code. +- Write a message to Kafka specifying the command to run, the intended start time of the job, and any other information that we think is necessary. It probably makes sense to encode this information as JSON. +- We will also need to [create a Kafka topic](https://kafka.apache.org/documentation/#quickstart_createtopic). In a production environment we would probably use separate tooling to manage topics (perhaps Terraform), but for this project, we can create our Kafka topic using code like these examples in [Go](https://github.com/confluentinc/examples/blob/7.3.0-post/clients/cloud/go/producer.go#L39) or [Java](https://github.com/confluentinc/examples/blob/6d4c49b20662cb4c8b4a668622cb2e9442b59a20/clients/cloud/java/src/main/java/io/confluent/examples/clients/cloud/ProducerExample.java#L39). + +Our consumer program needs to be able to do the following things: + +- Read job information from a Kafka queue (decoding JSON) +- Execute the commands to run the jobs (assume this is a simple one-line command that you can `exec` for now) - for now we will just log the job number (like we were doing in our local version), but in a future step, we will make it run commands. +- Because the producer is writing jobs to the queue when they are ready to run, your consumer does not need to do any scheduling or to parse crontab format + +We want to run two consumers - therefore, when we create our topic, we should create two partitions of the topic. We will also need to specify a key for each Kafka message that we produce - Kafka assigns messages to partitions based on a hash of the message ID. We can generate UUIDs to use as keys. + +We can build Docker containers for our producer and consumer and add these to our docker-compose configuration. We should create a Makefile or script to make this repeatable. + +Test our implementation and observe both of our consumers running jobs scheduled by your producer. What happens if we only create one partition in our topic? What happens if we create three? diff --git a/common-content/en/module/event-driven/project/handling-errors/index.md b/common-content/en/module/event-driven/project/handling-errors/index.md new file mode 100644 index 000000000..a857e9e7b --- /dev/null +++ b/common-content/en/module/event-driven/project/handling-errors/index.md @@ -0,0 +1,41 @@ ++++ +title = "Handling Errors" +time = 300 +objectives = [ + "Explain what command line jobs are and aren't desirable to automatically retry.", + "Explain the risks of enqueueing retries on the same topic as first attempts.", + "Avoid overloading queues by adding delays between retries.", + "Define and describe a Dead Letter Queue.", + "Retry failed jobs on a separate Kafka topic.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +What happens if there is a problem running a job? For some kinds of jobs, maybe the right thing is retry it. For some, it isn't. It probably depends on what the job was doing. + +{{}} +Think about what jobs should probably be retried and what jobs shouldn't. + +What are the common characteristics of each? +{{}} + +This should be a configurable property of our cron jobs: update our program to add a maximum number of attempts to the job configurations and message format. + +However: we don't want to risk retry jobs displacing first-time runs of other jobs. This is why some queue-based systems use separate queues for retries. + +{{}} +Read about [using separate queues for retries](https://www.uber.com/en-IE/blog/reliable-reprocessing/). +{{}} + +We can create a second set of topics for jobs that fail the first time and need to be retried (we need one retry topic for each cluster). If a job fails, the consumer should write the job to the corresponding retry topic for the cluster (and decrement the remaining allowed attempts in the job definition). + +{{}} +Run some instances of your consumer program that read from your retry queues (we can make this a command-line option in your consumer). + +Define a job that fails and observe your retry consumers retrying and eventually discarding it. + +Define a job that randomly fails some percent of the time, and observe your retry consumers retrying and eventually completing it. +{{}} diff --git a/common-content/en/module/event-driven/project/intro/index.md b/common-content/en/module/event-driven/project/intro/index.md new file mode 100644 index 000000000..b253e84a7 --- /dev/null +++ b/common-content/en/module/event-driven/project/intro/index.md @@ -0,0 +1,27 @@ ++++ +title = "Project: Kafka Cron Scheduler" +time = 10 +objectives = [ + "Explain the objectives of the module project.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +Throughout this module, you will be building a project. + +The purpose of the project is to allow a user to specify command lines that should be run on some schedule, and then have those command lines be run on that schedule, on different computers. The more computers we add to the pool of runners, the more command lines we can run at a time. + +To simplify deployment, we will use docker compose to simulate having multiple runner computers. + +Because you will be learning lots of new things in this project, we will split this project up into steps. We will: +1. Build a local cron scheduler that parses the file format and runs simplified tasks at the required intervals. +2. Dockerise this local cron scheduler so we can run it in Docker. +3. Insert Kafka into the process. Have our cron scheduler produce a message into a Kafka queue, and a consumer pull it out. +4. Make our producer produce command lines to run, and our consumer run them. +5. Introduce multiple queues. +6. Handle errors. +7. Add monitoring. +8. Add alerting. diff --git a/common-content/en/module/event-driven/project/monitoring/index.md b/common-content/en/module/event-driven/project/monitoring/index.md new file mode 100644 index 000000000..48edd2182 --- /dev/null +++ b/common-content/en/module/event-driven/project/monitoring/index.md @@ -0,0 +1,112 @@ ++++ +title = "Monitoring" +time = 420 +objectives = [ + "Define meaningful user-facing metrics.", + "Define useful metrics to help diagnose a root-cause of a user-facing issue.", + "Instrument a producer and consumer to provide Prometheus metrics.", + "Collect Prometheus metrics from a Kafka queue.", + "Run Prometheus and Grafana.", + "Scrape Prometheus metrics from services into Prometheus.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +In software operations, we want to know what our software is doing and how it is performing. + +One very useful technique is to have our program export metrics. Metrics are basically values that our program makes available (the industry standard is to export and scrape over HTTP). + +Specialised programs, such as Prometheus, can then fetch metrics regularly from all the running instances of our program, store the history of these metrics, and do useful arithmetic on them (like computing rates, averages, and maximums). We can use this data to do troubleshooting and to alert if things go wrong. + +{{}} +Read the [Overview of Prometheus](https://prometheus.io/docs/introduction/overview/) if you are not familiar with Prometheus. +{{}} + +There are guides to instrumenting applications for Prometheus which describe how to add metrics. For instance [in Go](https://prometheus.io/docs/guides/go-application/) and [in Java](https://www.baeldung.com/java-prometheus-client). + +First, consider: + +- What kinds of things may go wrong with our system? (it is useful to look at errors your code is handling, as inspiration) +- What would users' expectations be of this system? +- What metrics can we add that will tell us when the system is not working as intended? +- What metrics can we add that might help us to troubleshoot the system and understand how it is operating? Read back through the first three parts of this exercise to try and identify the properties of the system that we might want to know about. + +Asking these questions should guide us in designing the metrics that our consumers and producer should export. +Think about what kinds of problems can happen both in the infrastructure - Kafka, your consumers and producers - and in the submitted jobs. + +{{}} +Add metrics to your programs. Verify that they work as expected using `curl` or your web browser. +{{}} + +#### Running the Prometheus JMX Exporter to get Kafka metrics + +Kafka doesn't export Prometheus metrics natively. However, we can use the official +[Prometheus JMX exporter](https://github.com/prometheus/jmx_exporter) to expose its metrics. + +> **Note:** Kafka is a Java program. We don't need to know much about Java programs in order to run them, but it's useful to know that Java programs run in a host process called a Java Virtual Machine (JVM). The JVM also allows for injecting extra code called Java agents, which can modify how a program is run. + +The Prometheus JMX exporter can run as a Java agent (alongside a Java program such as Kafka) or else as a standalone HTTP server, which collects metrics from a JVM running elsewhere and re-exports them as Prometheus metrics. If you're using [conduktor/kafka-stack-docker-compose](https://github.com/conduktor/kafka-stack-docker-compose) as suggested then your image contains the `jmx_prometheus_javaagent` already. + +You need to create a config file, named something like `kafka-jmx-config.yaml`. A config file that will collect all metrics is: + +``` +rules: +- pattern: ".*" +``` + +Now, update the Kafka service in your `docker-compose.yml`. Add a volume - for example: + +``` + volumes: + - ./kafka-jmx-config.yaml:/kafka-jmx-config.yaml +``` + +Finally, you need to add a new line in your `environment` section for your Kafka server in your `docker-compose.yml`: + +``` +KAFKA_OPTS: -javaagent:/usr/share/java/cp-base-new/jmx_prometheus_javaagent-0.14.0.jar=8999:/kafka-jmx-config.yaml +``` + +The version of the `jmx_prometheus_javaagent` jar might change in a later version of the `cp-kafka` image, so if you have any issues running the software, this would be the first thing to check. You can't just map a newer version of the agent as a volume as this is likely to cause runtime errors due to multiple version of the agent on the Java classpath. + +Now you should be able to see JVM and Kafka metrics on [http://localhost:8999](http://localhost:8999). Check this using `curl` or your web browser. + +#### Running Prometheus, Alertmanager, and Grafana + +Next, we can add Prometheus, AlertManager, and Grafana, a common monitoring stack, to our `docker-compose` configuration. Here is an example configuration that we can adapt: https://dzlab.github.io/monitoring/2021/12/30/monitoring-stack-docker/. AlertManager is used for notifying operators of unexpected conditions, and Grafana is useful for building dashboards that allow us to troubleshoot and understand our system's operation. + +> [!NOTE] +> +> If your computer is struggling to run such a complex `docker-compose` system in a performant fashion, you can cut down the number of Kafka topics and consumers that you are running to a minimum (just one producer and consumer/retry consumer pair are fine - don't run sets of these for multiple clusters if your computer is under too much load). + +We'll need to set up a Prometheus configuration to scrape our producers and consumers. Prometheus [configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/) is quite complex but we can adapt this [example configuration](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml). + +For example, to scrape your Kafka metrics, you can add ths to the Prometheus configuration: + +``` +scrape_configs: + - job_name: "kafka" + static_configs: + - targets: ["kafka1:8999"] +``` + +Once you have adapted the sample Prometheus configuration to scrape metrics from your running producer and consumer(s) and from the JMX exporter that is exporting the Kafka metrics, you should check that Prometheus is correctly scraping all those metrics. If you haven't changed the default port, you can access Prometheus's status page at [http://localhost:9090/](http://localhost:9090/). + +You can now try out some queries in the Prometheus UI. + +For example, let's say that our consumers are exporting a metric `job_runtime` that describes how long it takes to run jobs. And let's say the metric is labelled with the name of the queue the consumer is reading from. + +Because this metric is describing a population of observed latencies, the best metric type to use is a [histogram](https://prometheus.io/docs/practices/histograms/). + +We can query this as follows: + +``` +histogram_quantile(0.9, sum by (queue, le)(rate(job_runtime[10m]))) +``` + +This will give you the 90th percentile job runtime (i.e. the runtime where 90% of jobs complete this fast or faster) over the past 10 minutes (the `rate` function does this for histogram queries - it's a little counterintuitive). + +For some more PromQL examples, see the [Prometheus Query Examples page](https://prometheus.io/docs/prometheus/latest/querying/examples/). diff --git a/common-content/en/module/event-driven/project/multiple-queues/index.md b/common-content/en/module/event-driven/project/multiple-queues/index.md new file mode 100644 index 000000000..b39f58248 --- /dev/null +++ b/common-content/en/module/event-driven/project/multiple-queues/index.md @@ -0,0 +1,31 @@ ++++ +title = "Multiple queues" +time = 180 +objectives = [ + "Run multiple Kafka topics.", + "Assign jobs to separate Kafka topics.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +A new requirement: our distributed cron system needs to be able to schedule jobs to run in multiple clusters (e.g. one in Europe, one in America). Imagine that we want to support users who have data stored in specific locations and they want to make sure their cron jobs are running near their data. + +Just like how we are simulating multiple computers with docker-compose, we don't really need to set up any cells for this - just write our program as though you had multiple sets of consumer workers. + +You *don't* need to set up multiple Kafka clusters for this - this extension is just about having multiple sets of consumer jobs, which we notionally call clusters. + +- Define a set of clusters in our program (two is fine, `cluster-a` and `cluster-b`) +- Each cluster should have its own Kafka topic +- Update the job format in the crontab file so that jobs must specify what cluster to run in (Note: This will diverge your crontab file format from the standard one - this is fine) +- Run separate consumers that are configured to read from each cluster-specific topic + +Test that our new program and Kafka configuration works as expected. + +{{}} +Imagine in real life you had a deployed system that didn't need clusters specified, and then wanted to add the ability to choose clusters. + +How would you do this sort of a migration in a running production environment, where you could not drop existing jobs? +{{}} diff --git a/common-content/en/module/event-driven/project/running-commands/index.md b/common-content/en/module/event-driven/project/running-commands/index.md new file mode 100644 index 000000000..1a0bf0476 --- /dev/null +++ b/common-content/en/module/event-driven/project/running-commands/index.md @@ -0,0 +1,30 @@ ++++ +title = "Running commands" +time = 60 +objectives = [ + "Pass command lines from a config file, via a producer, to a consumer.", + "Execute command lines in a consumer." +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +Now that we have a producer passing messages and a consumer receiving them, we can make the messages more useful. + +Modify the producer so that it expects a command line to be present in the supplied crontab file after the schedule. + +Modify its JSON message it produces to include the command line to run. + +Modify the consumer to read the JSON message, run the command, and log its output. + +> [!WARNING] +> +> For the purposes of keeping this project scope tractable, we are ignoring two things. +> +> The first is security: simply run commands as the user that our consumer runs as. +> +> The second thing is that we are assuming the jobs to be run consist of commands available on the consumers. +> +> You may address these concerns later in an optional extension of the project if you have time. diff --git a/common-content/en/module/event-driven/queues/index.md b/common-content/en/module/event-driven/queues/index.md new file mode 100644 index 000000000..1c30e45b8 --- /dev/null +++ b/common-content/en/module/event-driven/queues/index.md @@ -0,0 +1,30 @@ ++++ +title = "Queues" +time = 60 +objectives = [ + "List advantages of using queues.", + "List examples of systems where queues are helpful.", + "Explain how a queue helps to avoid system overload.", + "Explain how a queue can help reduce service provisioning and costs.", + "Identify draw-backs to queue-based systems.", +] +[build] + render = "never" + list = "local" + publishResources = false ++++ + +Queues are a frequently-seen component of large software systems that involve potentially heavyweight or long-running requests. A queue can act as a form of buffer, smoothing out spikes of load so that the system can deal with work when it has the resources to do so. + +{{}} +Read about the [Queue-Based Load-Leveling Pattern](https://learn.microsoft.com/en-us/azure/architecture/patterns/queue-based-load-leveling). + +Make sure you have achieved all of the learning objectives for this prep. +{{}} + +{{< + multiple-choice + question="How can results of tasks be communicated back to users in a queue-based system?" + answers="Writing to stdout/stderr | Notifications such as by email | Displaying results in the user's browser" + feedback="No - a user doesn't see the stdout/stderr of the process running the work. stdout/stderr can be useful for the queue operators to debug things, but generally aren't useful to end-users who submit tasks. | Yes - queues can send notifications about successes/failures/progress. | Ish. We can build systems that monitor the queue and display results. But in general, when we submit work to a queue, we don't have a server we can ask to show us progress." + correct="1">}} \ No newline at end of file diff --git a/org-cyf-tracks/content/event-driven-architecture/_index.md b/org-cyf-tracks/content/event-driven-architecture/_index.md new file mode 100644 index 000000000..2bfd6a308 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/_index.md @@ -0,0 +1,13 @@ ++++ +title = "Event-driven Architecture" +description = "Build, monitor, and observe systems using event-driven architectures." +layout = "module" +track_kinds = ["jobs-after-sdc"] ++++ + +This track introduces event-driven architectures using Kafka, and monitoring event-driven architectures using Prometheus. + +It assumes you have already: +* Completed the [Software Development Course](https://sdc.codeyourfuture.io/) +* Gained enough familiarity with Docker that you can create a docker image for an application you have developed, and run it locally using `docker run`. +* Used `docker compose` to run multiple coordinated Docker containers locally. diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/1/_index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/1/_index.md new file mode 100644 index 000000000..6ba0e6e40 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/1/_index.md @@ -0,0 +1,8 @@ ++++ +title = "Sprint 1" +description = "Kafka, and queue-based systems" +theme = "Kafka, and queue-based systems" +layout = "sprint" +menu_level = ["module"] +weight = 1 ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/1/backlog/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/1/backlog/index.md new file mode 100644 index 000000000..4c5772447 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/1/backlog/index.md @@ -0,0 +1,8 @@ ++++ +title = "Backlog" +layout = "backlog" +menu_level = ["sprint"] +weight = 2 +backlog = "Module-Event-Driven" +backlog_filter = "📅 Sprint 1" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/1/prep/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/1/prep/index.md new file mode 100644 index 000000000..d78332a6b --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/1/prep/index.md @@ -0,0 +1,21 @@ ++++ +title = "Prep" +layout = "prep" +menu_level = ["sprint"] +weight = 1 +[[blocks]] +name = "Queues" +src = "module/event-driven/queues" +[[blocks]] +name = "Kafka In A Nutshell" +src = "module/event-driven/kafka-in-a-nutshell" +[[blocks]] +name = "Kafka Paper" +src = "module/event-driven/kafka-paper" +[[blocks]] +name = "Project: Kafka Cron Scheduler" +src = "module/event-driven/project/intro" +[[blocks]] +name = "Cron" +src = "module/event-driven/project/cron" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/1/success/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/1/success/index.md new file mode 100644 index 000000000..6246758ee --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/1/success/index.md @@ -0,0 +1,8 @@ ++++ +title = "Success" +layout = "success" +menu_level = ["sprint"] +weight = 4 +objectives = [[ +]] ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/2/_index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/2/_index.md new file mode 100644 index 000000000..6cf815914 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/2/_index.md @@ -0,0 +1,8 @@ ++++ +title = "Sprint 2" +description = "Distributing jobs with Kafka" +theme = "Distributing jobs with Kafka" +layout = "sprint" +menu_level = ["module"] +weight = 2 ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/2/backlog/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/2/backlog/index.md new file mode 100644 index 000000000..a8fa33a5e --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/2/backlog/index.md @@ -0,0 +1,8 @@ ++++ +title = "Backlog" +layout = "backlog" +menu_level = ["sprint"] +weight = 2 +backlog = "Module-Event-Driven" +backlog_filter = "📅 Sprint 2" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/2/prep/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/2/prep/index.md new file mode 100644 index 000000000..2db87212f --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/2/prep/index.md @@ -0,0 +1,12 @@ ++++ +title = "Prep" +layout = "prep" +menu_level = ["sprint"] +weight = 1 +[[blocks]] +name = "Distributing with Kafka" +src = "module/event-driven/project/distributing-with-kafka" +[[blocks]] +name = "Running commands" +src = "module/event-driven/project/running-commands" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/2/success/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/2/success/index.md new file mode 100644 index 000000000..6246758ee --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/2/success/index.md @@ -0,0 +1,8 @@ ++++ +title = "Success" +layout = "success" +menu_level = ["sprint"] +weight = 4 +objectives = [[ +]] ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/3/_index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/3/_index.md new file mode 100644 index 000000000..03bef0d82 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/3/_index.md @@ -0,0 +1,8 @@ ++++ +title = "Sprint 3" +description = "Topics, Errors, Retries" +theme = "Topics, Errors, Retries" +layout = "sprint" +menu_level = ["module"] +weight = 3 ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/3/backlog/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/3/backlog/index.md new file mode 100644 index 000000000..92bd3fd46 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/3/backlog/index.md @@ -0,0 +1,8 @@ ++++ +title = "Backlog" +layout = "backlog" +menu_level = ["sprint"] +weight = 2 +backlog = "Module-Event-Driven" +backlog_filter = "📅 Sprint 3" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/3/prep/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/3/prep/index.md new file mode 100644 index 000000000..bf9abcdc1 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/3/prep/index.md @@ -0,0 +1,12 @@ ++++ +title = "Prep" +layout = "prep" +menu_level = ["sprint"] +weight = 1 +[[blocks]] +name = "Multiple queues" +src = "module/event-driven/project/multiple-queues" +[[blocks]] +name = "Handling errors" +src = "module/event-driven/project/handling-errors" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/3/success/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/3/success/index.md new file mode 100644 index 000000000..6246758ee --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/3/success/index.md @@ -0,0 +1,8 @@ ++++ +title = "Success" +layout = "success" +menu_level = ["sprint"] +weight = 4 +objectives = [[ +]] ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/4/_index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/4/_index.md new file mode 100644 index 000000000..d6b881362 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/4/_index.md @@ -0,0 +1,8 @@ ++++ +title = "Sprint 4" +description = "Monitoring and Alerting" +theme = "Monitoring and Alerting" +layout = "sprint" +menu_level = ["module"] +weight = 4 ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/4/backlog/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/4/backlog/index.md new file mode 100644 index 000000000..2a41b1608 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/4/backlog/index.md @@ -0,0 +1,8 @@ ++++ +title = "Backlog" +layout = "backlog" +menu_level = ["sprint"] +weight = 2 +backlog = "Module-Event-Driven" +backlog_filter = "📅 Sprint 4" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/4/prep/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/4/prep/index.md new file mode 100644 index 000000000..68e342b35 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/4/prep/index.md @@ -0,0 +1,12 @@ ++++ +title = "Prep" +layout = "prep" +menu_level = ["sprint"] +weight = 1 +[[blocks]] +name = "Monitoring" +src = "module/event-driven/project/monitoring" +[[blocks]] +name = "Alerting" +src = "module/event-driven/project/alerting" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/4/success/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/4/success/index.md new file mode 100644 index 000000000..6246758ee --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/4/success/index.md @@ -0,0 +1,8 @@ ++++ +title = "Success" +layout = "success" +menu_level = ["sprint"] +weight = 4 +objectives = [[ +]] ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/5/_index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/5/_index.md new file mode 100644 index 000000000..31d9e3a75 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/5/_index.md @@ -0,0 +1,8 @@ ++++ +title = "Sprint 5" +description = "Distributed Tracing" +theme = "Distributed Tracing" +layout = "sprint" +menu_level = ["module"] +weight = 5 ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/5/backlog/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/5/backlog/index.md new file mode 100644 index 000000000..45cffe5e8 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/5/backlog/index.md @@ -0,0 +1,8 @@ ++++ +title = "Backlog" +layout = "backlog" +menu_level = ["sprint"] +weight = 2 +backlog = "Module-Event-Driven" +backlog_filter = "📅 Sprint 5" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/5/prep/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/5/prep/index.md new file mode 100644 index 000000000..d5cd9ac28 --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/5/prep/index.md @@ -0,0 +1,15 @@ ++++ +title = "Prep" +layout = "prep" +menu_level = ["sprint"] +weight = 1 +[[blocks]] +name = "Distributed Tracing" +src = "module/distributed-tracing/introduction" +[[blocks]] +name = "Using Honeycomb" +src = "module/distributed-tracing/using-honeycomb" +[[blocks]] +name = "Distributed Tracing in Kafka" +src = "module/event-driven/project/distributed-tracing" ++++ diff --git a/org-cyf-tracks/content/event-driven-architecture/sprints/5/success/index.md b/org-cyf-tracks/content/event-driven-architecture/sprints/5/success/index.md new file mode 100644 index 000000000..6246758ee --- /dev/null +++ b/org-cyf-tracks/content/event-driven-architecture/sprints/5/success/index.md @@ -0,0 +1,8 @@ ++++ +title = "Success" +layout = "success" +menu_level = ["sprint"] +weight = 4 +objectives = [[ +]] ++++