Skip to content

Commit

Permalink
add ch6 spring version of sample
Browse files Browse the repository at this point in the history
  • Loading branch information
IISI-1204003 committed May 10, 2022
1 parent 40d5869 commit 751a2ef
Show file tree
Hide file tree
Showing 23 changed files with 1,293 additions and 0 deletions.
33 changes: 33 additions & 0 deletions chapter-05/patient-monitoring-app-spring/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
HELP.md
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/

### STS ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache

### IntelliJ IDEA ###
.idea
*.iws
*.iml
*.ipr

### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/

### VS Code ###
.vscode/
10 changes: 10 additions & 0 deletions chapter-05/patient-monitoring-app-spring/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
* https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/3.1.6/reference/html/spring-cloud-stream-binder-kafka.html#_mixing_high_level_dsl_and_low_level_processor_api
* https://cloud.spring.io/spring-cloud-stream-binder-kafka/spring-cloud-stream-binder-kafka.html#_multiple_input_bindings

## Spring Cloud Stream - functional and reactive

* [Spring Cloud Function](https://spring.io/projects/spring-cloud-function)
* [Cloud Events and Spring - part 1 (Oleg Zhurakousky)](https://spring.io/blog/2020/12/10/cloud-events-and-spring-part-1)
* [Cloud Events and Spring - part 2 (Oleg Zhurakousky)](https://spring.io/blog/2020/12/23/cloud-events-and-spring-part-2)
* [Spring Cloud Stream - demystified and simplified](https://spring.io/blog/2019/10/14/spring-cloud-stream-demystified-and-simplified)
* [Spring Cloud Stream - functional and reactive (Oleg Zhurakousky)](https://spring.io/blog/2019/10/17/spring-cloud-stream-functional-and-reactive)
109 changes: 109 additions & 0 deletions chapter-05/patient-monitoring-app-spring/pom.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.5.13</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<groupId>com.magicalpipelines</groupId>
<artifactId>patient-monitoring-app-spring</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>patient-monitoring-app-spring</name>
<description>Demo project for Spring Boot</description>
<properties>
<java.version>8</java.version>
<spring-cloud.version>2020.0.5</spring-cloud.version>
<testcontainers-kafka.version>1.16.2</testcontainers-kafka.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-streams</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.datatype</groupId>
<artifactId>jackson-datatype-jsr310</artifactId>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.9.0</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-webflux</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream</artifactId>
<scope>test</scope>
<classifier>test-binder</classifier>
<type>test-jar</type>
</dependency>
<!-- Spring-kafka-test include EmbeddedKafka -->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>kafka</artifactId>
<version>${testcontainers-kafka.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-test-support</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<version>${testcontainers-kafka.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-dependencies</artifactId>
<version>${spring-cloud.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>

<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>

</project>
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
package com.magicalpipelines;

import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.processor.WallclockTimestampExtractor;
import org.apache.kafka.streams.state.HostInfo;

class App {
public static void main(String[] args) {
Topology topology = PatientMonitoringTopology.build();

// we allow the following system properties to be overridden
String host = System.getProperty("host");
Integer port = Integer.parseInt(System.getProperty("port"));
String stateDir = System.getProperty("stateDir");
String endpoint = String.format("%s:%s", host, port);

// set the required properties for running Kafka Streams
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "dev-consumer");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:29092");
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, endpoint);
props.put(StreamsConfig.STATE_DIR_CONFIG, stateDir);
// an example of setting the timestamp extractor using a streams config
// note that we override this in our topology implementation, this is
// just here for demonstration purposes
props.put(
StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class);

// build the topology
System.out.println("Starting Patient Monitoring Application");
KafkaStreams streams = new KafkaStreams(topology, props);
// close Kafka Streams when the JVM shuts down (e.g. SIGTERM)
Runtime.getRuntime().addShutdownHook(new Thread(streams::close));

// clean up local state since many of the tutorials write to the same location
// you should run this sparingly in production since it will force the state
// store to be rebuilt on start up
streams.cleanUp();

// start streaming
streams.start();

// start the REST service
HostInfo hostInfo = new HostInfo(host, port);
// RestService service = new RestService(hostInfo, streams);
// service.start();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
package com.magicalpipelines;

import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.streams.processor.StateRestoreListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

class MyRestoreListener implements StateRestoreListener {

private static final Logger log = LoggerFactory.getLogger(MyRestoreListener.class);

@Override
public void onRestoreStart(
TopicPartition topicPartition, String storeName, long startingOffset, long endingOffset) {
log.info("The following state store is being restored: {}", storeName);
}

@Override
public void onRestoreEnd(TopicPartition topicPartition, String storeName, long totalRestored) {
log.info("Restore complete for the following state store: {}", storeName);
}

@Override
public void onBatchRestored(
TopicPartition topicPartition, String storeName, long batchEndOffset, long numRestored) {
// this is very noisy. don't log anything
log.info(
"A batch of {} records has been restored in the following state store: {}",
numRestored,
storeName);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
package com.magicalpipelines;

import com.magicalpipelines.model.BodyTemp;
import com.magicalpipelines.model.CombinedVitals;
import com.magicalpipelines.model.Pulse;
import com.magicalpipelines.serialization.json.JsonSerdes;
import java.time.Duration;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.kstream.Consumed;
import org.apache.kafka.streams.kstream.JoinWindows;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.KTable;
import org.apache.kafka.streams.kstream.Materialized;
import org.apache.kafka.streams.kstream.Printed;
import org.apache.kafka.streams.kstream.Produced;
import org.apache.kafka.streams.kstream.StreamJoined;
import org.apache.kafka.streams.kstream.Suppressed;
import org.apache.kafka.streams.kstream.Suppressed.BufferConfig;
import org.apache.kafka.streams.kstream.TimeWindows;
import org.apache.kafka.streams.kstream.ValueJoiner;
import org.apache.kafka.streams.kstream.Windowed;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

class PatientMonitoringTopology {
private static final Logger log = LoggerFactory.getLogger(PatientMonitoringTopology.class);

public static Topology build() {
// the builder is used to construct the topology
StreamsBuilder builder = new StreamsBuilder();

// the following topology steps are numbered. these numbers correlate with
// the topology design shown in the book (Chapter 5: Windows and Time)

// 1.1
Consumed<String, Pulse> pulseConsumerOptions =
Consumed.with(Serdes.String(), JsonSerdes.Pulse())
.withTimestampExtractor(new VitalTimestampExtractor());

KStream<String, Pulse> pulseEvents =
// register the pulse-events stream
builder.stream("pulse-events", pulseConsumerOptions);

// 1.2
Consumed<String, BodyTemp> bodyTempConsumerOptions =
Consumed.with(Serdes.String(), JsonSerdes.BodyTemp())
.withTimestampExtractor(new VitalTimestampExtractor());

KStream<String, BodyTemp> tempEvents =
// register the body-temp-events stream
builder.stream("body-temp-events", bodyTempConsumerOptions);

// turn pulse into a rate (bpm)
TimeWindows tumblingWindow =
TimeWindows.of(Duration.ofSeconds(60)).grace(Duration.ofSeconds(5));

/*!
* Examples of other windows (not needed for the tutorial) are commented
* out below
*
* TimeWindows hoppingWindow =
* TimeWindows.of(Duration.ofSeconds(5)).advanceBy(Duration.ofSeconds(4));
*
* SessionWindows sessionWindow = SessionWindows.with(Duration.ofSeconds(5));
*
* JoinWindows joinWindow = JoinWindows.of(Duration.ofSeconds(5));
*
* SlidingWindows slidingWindow =
* SlidingWindows.withTimeDifferenceAndGrace(Duration.ofSeconds(5), Duration.ofSeconds(0));
*/

KTable<Windowed<String>, Long> pulseCounts =
pulseEvents
// 2
.groupByKey()
// 3.1 - windowed aggregation
.windowedBy(tumblingWindow)
// 3.2 - windowed aggregation
.count(Materialized.as("pulse-counts"))
// 4
.suppress(Suppressed.untilWindowCloses(BufferConfig.unbounded().shutDownWhenFull()));

// 5.1
// filter for any pulse that exceeds our threshold
KStream<String, Long> highPulse =
pulseCounts
.toStream()
// this peek operator is not included in the book, but was added
// to this code example so you could view some additional information
// when running the application locally :)
.peek(
(key, value) -> {
String id = new String(key.key());
Long start = key.window().start();
Long end = key.window().end();
log.info(
"Patient {} had a heart rate of {} between {} and {}", id, value, start, end);
})
// 5.1
.filter((key, value) -> value >= 100)
// 6
.map(
(windowedKey, value) -> {
return KeyValue.pair(windowedKey.key(), value);
});

// 5.2
// filter for any temperature reading that exceeds our threshold
KStream<String, BodyTemp> highTemp =
tempEvents.filter(
(key, value) ->
value != null && value.getTemperature() != null && value.getTemperature() > 100.4);

// looking for step 6? it's chained right after 5.1

// 7
StreamJoined<String, Long, BodyTemp> joinParams =
StreamJoined.with(Serdes.String(), Serdes.Long(), JsonSerdes.BodyTemp());

JoinWindows joinWindows =
JoinWindows
// timestamps must be 1 minute apart
.of(Duration.ofSeconds(60))
// tolerate late arriving data for up to 10 seconds
.grace(Duration.ofSeconds(10));

ValueJoiner<Long, BodyTemp, CombinedVitals> valueJoiner =
(pulseRate, bodyTemp) -> new CombinedVitals(pulseRate.intValue(), bodyTemp);

KStream<String, CombinedVitals> vitalsJoined =
highPulse.join(highTemp, valueJoiner, joinWindows, joinParams);

// 8
vitalsJoined.to("alerts", Produced.with(Serdes.String(), JsonSerdes.CombinedVitals()));

// debug only
pulseCounts
.toStream()
.print(Printed.<Windowed<String>, Long>toSysOut().withLabel("pulse-counts"));
highPulse.print(Printed.<String, Long>toSysOut().withLabel("high-pulse"));
highTemp.print(Printed.<String, BodyTemp>toSysOut().withLabel("high-temp"));
vitalsJoined.print(Printed.<String, CombinedVitals>toSysOut().withLabel("vitals-joined"));

return builder.build();
}
}
Loading

0 comments on commit 751a2ef

Please sign in to comment.