not logged in | [Login]

Configure a standalone Spark Stream job on top of Kafka

These notes describe starting up a standalone instance of Spark, Kafka, and Zooker and executing a Spark job to process messages from Kafka.

Also see my Kafka Cheat Sheet

Setup Zookeeper

Official Docs: https://zookeeper.apache.org/doc/r3.4.8/zookeeperStarted.html

Download Zookeeper

mkdir ~/zookeeper
cd ~/zookeeper
wget http://mirrors.sonic.net/apache/zookeeper/stable/zookeeper-3.4.8.tar.gz
tar zxvf zookeeper-3.4.8.tar.gz

Create the configuration file

cd ~/zookeeper/zookeeper-3.4.8/
cat >conf/zoo.conf <<EOL
tickTime=2000
dataDir=data
clientPort=2181
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
EOL

Start Zookeeper

cd ~/zookeeper/zookeeper-3.4.8/
bin/zkServer.sh start

Connect to Zookeeper and create a znode

~/zookeeper/zookeeper-3.4.8/bin/zkCli.sh -server 127.0.0.1:2181
create /zk_test my_data
ls /

Setup Kafka

Official Docs: http://kafka.apache.org/documentation.html

Download Kafka

mkdir ~/kafka
wget http://apache.mirrors.ionfish.org/kafka/0.10.0.0/kafka_2.11-0.10.0.0.tgz
tar zxvf kafka_2.11-0.10.0.0.tgz

Start Kafka

cd ~/kafka/kafka_2.11-0.10.0.0
bin/kafka-server-start.sh config/server.properties

Create a topic named "test" with a single partition and only one replica:

bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test

Send a file to Kafka

bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test < testfile.txt

Dump messages to the console

bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning

Setup Spark

Download Spark

tar zxvf spark-1.6.2-bin-hadoop2.6
cd spark-1.6.2-bin-hadoop2.6/

Start the master and slave

Note: I couldn't this to work from within a tmux session.

sbin/start-master.sh
sbin/start-slaves.sh

Open the console

open http://localhost:8080

Connect to Spark shell

bin/spark-shell –master spark://localhost:7077

Execute a job

val file=sc.textFile("README.md")
file.count()
file.take(3)
file.filter(line => line.contains("Spark")).count()

Write a Java class and submit it

Create the pom.xml

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
  <modelVersion>4.0.0</modelVersion>

  <name>Spark Job</name>

  <groupId>com.example.spark</groupId>
  <artifactId>streaming-job</artifactId>
  <version>1.0.0-SNAPSHOT</version>
  <packaging>jar</packaging>

  <build>
    <plugins>
      <plugin>
        <groupId>org.apache.maven.plugins</groupId>
        <artifactId>maven-compiler-plugin</artifactId>
        <version>3.5.1</version>
        <configuration>
          <source>1.8</source>
          <target>1.8</target>
        </configuration>
      </plugin>
      <plugin>
        <groupId>org.apache.maven.plugins</groupId>
        <artifactId>maven-jar-plugin</artifactId>
        <configuration>
          <archive>
            <manifest>
              <addClasspath>true</addClasspath>
              <classpathPrefix>lib/</classpathPrefix>
              <mainClass>com.example.spark.StreamingJob</mainClass>
            </manifest>
          </archive>
        </configuration>
      </plugin>
      <plugin>
        <groupId>org.apache.maven.plugins</groupId>
        <artifactId>maven-dependency-plugin</artifactId>
        <executions>
          <execution>
            <id>copy</id>
            <phase>install</phase>
            <goals>
              <goal>copy-dependencies</goal>
            </goals>
            <configuration>
              <outputDirectory>${project.build.directory}/lib</outputDirectory>
            </configuration>
          </execution>
        </executions>
      </plugin>
    </plugins>
  </build>
  <dependencies>
    <dependency>
      <groupId>org.apache.spark</groupId>
      <artifactId>spark-core_2.10</artifactId>
      <version>1.6.2</version>
    </dependency>
    <dependency>
      <groupId>org.apache.spark</groupId>
      <artifactId>spark-streaming_2.10</artifactId>
      <version>1.6.2</version>
    </dependency>
    <dependency>
      <groupId>org.apache.spark</groupId>
      <artifactId>spark-streaming-kafka_2.10</artifactId>
      <version>1.6.2</version>
    </dependency>
    <dependency>
      <groupId>junit</groupId>
      <artifactId>junit</artifactId>
      <version>4.11</version>
      <scope>test</scope>
    </dependency>
  </dependencies>
</project>

Create java class

Relative to your pom, create the following directory and file mkdir -p src/main/java/com/example/spark

Add the text to the following file vim src/main/java/com/example/spark/StreamingJob.java

Note: This is a slightly modified version of this file: https://github.com/apache/spark/blob/master/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java

package com.example.spark;

import java.util.Arrays;
import java.util.Map;
import java.util.HashMap;
import java.util.regex.Pattern;

import scala.Tuple2;

import org.apache.log4j.Level;
import org.apache.log4j.Logger;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaPairReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka.KafkaUtils;

public final class StreamingJob {
    private static final Pattern SPACE = Pattern.compile(" ");

    private StreamingJob() {
    }

    private static void setLogLevels() {
        boolean log4jInitialized = Logger.getRootLogger().getAllAppenders().hasMoreElements();
        if (!log4jInitialized) {
            // We first log something to initialize Spark's default logging, then we override the
            // logging level.
            Logger.getLogger(StreamingJob.class).info("Setting log level to [WARN] for streaming example." +
                    " To override add a custom log4j.properties to the classpath.");
            Logger.getRootLogger().setLevel(Level.WARN);
        }
    }

    public static void main(String[] args) throws Exception {
        if (args.length < 4) {
            System.err.println("Usage: StreamingJob <zkQuorum> <group> <topics> <numThreads>");
            System.exit(1);
        }

        setLogLevels();

        SparkConf sparkConf = new SparkConf().setAppName("StreamingJob");
        // Create the context with 2 seconds batch size
        JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000));

        int numThreads = Integer.parseInt(args[3]);
        Map<String, Integer> topicMap = new HashMap<>();
        String[] topics = args[2].split(",");
        for (String topic : topics) {
            topicMap.put(topic, numThreads);
        }

        JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, args[0], args[1], topicMap);

        JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() {
            private static final long serialVersionUID = 1L;

            @Override
            public String call(Tuple2<String, String> tuple2) {
                return tuple2._2();
            }
        });

        JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
            private static final long serialVersionUID = 1L;

            @Override
            public Iterable<String> call(String x) {
                return Arrays.asList(SPACE.split(x));
            }
        });

        JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() {
            private static final long serialVersionUID = 1L;

            @Override
            public Tuple2<String, Integer> call(String s) {
                return new Tuple2<>(s, 1);
            }
        }).reduceByKey(new Function2<Integer, Integer, Integer>() {
            private static final long serialVersionUID = 1L;

            @Override
            public Integer call(Integer i1, Integer i2) {
                return i1 + i2;
            }
        });

        wordCounts.print();
        jssc.start();
        jssc.awaitTermination();
    }
}

Compile and submit the job

Relative to the pom created above, run mvn clean install

From your Spark directory, run the program.

cd spark-1.6.2-bin-hadoop2.6/
./bin/spark-submit --class com.example.spark.StreamingJob --master local[2] <your pom.xml directory location>/target/streaming-job-1.0.0-SNAPSHOT.jar localhost my-consumer-group test 1

Submit data to the stream

~/kafka/kafka_2.11-0.10.0.0/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test < some_text_file.txt

Additional reading