diff --git a/.gitignore b/.gitignore
index 4581ef2..67daa40 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,3 +27,7 @@
 *.exe
 *.out
 *.app
+
+# Documentation
+doc/doxygen
+doc/_build
diff --git a/doc/Doxyfile b/doc/Doxyfile
new file mode 100644
index 0000000..1a9877b
--- /dev/null
+++ b/doc/Doxyfile
@@ -0,0 +1,2303 @@
+# Doxyfile 1.8.6
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME           = "Ray Core"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         = 0.1
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO           =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = doxygen
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES                =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  = ../src/ray/client ../src/plasma/service ../src/plasma/client ../src/numbuf/cpp/src/numbuf
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS          =
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavours of web server based searching depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools. See
+# the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = YES
+
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = YES
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_SCHEMA             =
+
+# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_DTD                =
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED             =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all refrences to function-like macros that are alone on a line, have an
+# all uppercase name, and do not end with a semicolon. Such function macros are
+# typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have an unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS        = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH               =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT               = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK               = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH          = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS           =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS      = YES
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP            = YES
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..34cb6ae
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,192 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  applehelp  to make an Apple Help Book"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+	@echo "  coverage   to run coverage check of the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/RayCore.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/RayCore.qhc"
+
+applehelp:
+	$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+	@echo
+	@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+	@echo "N.B. You won't be able to view it unless you put it in" \
+	      "~/Library/Documentation/Help or install it in your application" \
+	      "bundle."
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/RayCore"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/RayCore"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+coverage:
+	$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+	@echo "Testing of coverage in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/coverage/python.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/doc/build.sh b/doc/build.sh
new file mode 100755
index 0000000..d085401
--- /dev/null
+++ b/doc/build.sh
@@ -0,0 +1,2 @@
+doxygen Doxyfile
+make html
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000..78cfea9
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,308 @@
+# -*- coding: utf-8 -*-
+#
+# Ray Core documentation build configuration file, created by
+# sphinx-quickstart on Sun Jul  3 17:03:22 2016.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.intersphinx',
+    'sphinx.ext.todo',
+    'sphinx.ext.pngmath',
+    'sphinx.ext.viewcode',
+    'breathe'
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Ray Core'
+copyright = u'2016, The Ray team'
+author = u'The Ray team'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.1'
+# The full version, including alpha/beta/rc tags.
+release = '0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'RayCoredoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  (master_doc, 'RayCore.tex', u'Ray Core Documentation',
+   u'The Ray team', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    (master_doc, 'raycore', u'Ray Core Documentation',
+     [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  (master_doc, 'RayCore', u'Ray Core Documentation',
+   author, 'RayCore', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'https://docs.python.org/': None}
+
+breathe_projects = {
+  "ray":"doxygen/xml/",
+}
+
+# run doxygen on read-the-docs
+
+import subprocess, os
+
+read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
+
+if read_the_docs_build:
+  subprocess.call('doxygen Doxyfile', shell=True)
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..49a9c03
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,46 @@
+.. Ray Core documentation master file, created by
+   sphinx-quickstart on Sun Jul  3 17:03:22 2016.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Ray Core documentation
+======================
+
+Welcome to the documentation of Ray Core! The documentation consists of two
+parts:
+
+* the system user documentation which describes the system API and how to use the
+  system
+
+* the developer documentation that describes the internals of the system and the
+  developer API
+
+These are separate from the Ray user documentation which can be found under
+https://github.com/amplab/ray/blob/master/README.md.
+
+**Ray Core user documentation:**
+
+.. toctree::
+   :maxdepth: 2
+   :glob:
+
+   quickstart.rst
+   system.rst
+   plasma.rst
+   numbuf.rst
+   services.rst
+
+**Ray Core developer documentation:**
+
+.. toctree::
+   :maxdepth: 2
+   :glob:
+
+   internal/*
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/doc/internal/numbuf.rst b/doc/internal/numbuf.rst
new file mode 100644
index 0000000..a2a868a
--- /dev/null
+++ b/doc/internal/numbuf.rst
@@ -0,0 +1,18 @@
+Developer Documentation for Numbuf
+==================================
+
+Numbuf is a library for the fast serialization of primitive Python objects
+(lists, tuples, dictionaries, NumPy arrays) to the
+`Apache Arrow <https://arrow.apache.org/>`_ format.
+
+.. doxygenclass:: numbuf::DictBuilder
+   :project: ray
+   :members:
+
+.. doxygenclass:: numbuf::SequenceBuilder
+   :project: ray
+   :members:
+
+.. doxygenclass:: numbuf::TensorBuilder
+   :project: ray
+   :members:
diff --git a/doc/internal/plasma.rst b/doc/internal/plasma.rst
new file mode 100644
index 0000000..308613c
--- /dev/null
+++ b/doc/internal/plasma.rst
@@ -0,0 +1,22 @@
+Developer documentation for Plasma
+==================================
+
+The IPC interface
+-----------------
+
+.. literalinclude:: ../../src/plasma/service/plasma.mojom
+
+Internal classes
+----------------
+
+.. doxygenclass:: plasma::service::PlasmaEntry
+   :project: ray
+   :members:
+
+.. doxygenclass:: plasma::service::PlasmaImpl
+   :project: ray
+   :members:
+
+.. doxygenclass:: plasma::service::PlasmaServerApp
+   :project: ray
+   :members:
diff --git a/doc/internal/ray.rst b/doc/internal/ray.rst
new file mode 100644
index 0000000..49e7fea
--- /dev/null
+++ b/doc/internal/ray.rst
@@ -0,0 +1,22 @@
+Developer documentation for Ray
+===============================
+
+Client connections to the Shell
+-------------------------------
+
+Most of the complexity of this code comes from the fact that we need to
+be able to connect to the Ray shell from an outside process (i.e. a Python process)
+that was started independently of the Ray shell. This is not supported in
+Mojo, they use fork to start child processes.
+
+.. doxygenclass:: ray::FileDescriptorSender
+   :project: ray
+   :members:
+
+.. doxygenclass:: ray::FileDescriptorReceiver
+   :project: ray
+   :members:
+
+.. doxygenclass:: shell::ServiceConnectionApp
+   :project: ray
+   :members:
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 0000000..35e3577
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,263 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+	:help
+	echo.Please use `make ^<target^>` where ^<target^> is one of
+	echo.  html       to make standalone HTML files
+	echo.  dirhtml    to make HTML files named index.html in directories
+	echo.  singlehtml to make a single large HTML file
+	echo.  pickle     to make pickle files
+	echo.  json       to make JSON files
+	echo.  htmlhelp   to make HTML files and a HTML help project
+	echo.  qthelp     to make HTML files and a qthelp project
+	echo.  devhelp    to make HTML files and a Devhelp project
+	echo.  epub       to make an epub
+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+	echo.  text       to make text files
+	echo.  man        to make manual pages
+	echo.  texinfo    to make Texinfo files
+	echo.  gettext    to make PO message catalogs
+	echo.  changes    to make an overview over all changed/added/deprecated items
+	echo.  xml        to make Docutils-native XML files
+	echo.  pseudoxml  to make pseudoxml-XML files for display purposes
+	echo.  linkcheck  to check all external links for integrity
+	echo.  doctest    to run all doctests embedded in the documentation if enabled
+	echo.  coverage   to run coverage check of the documentation if enabled
+	goto end
+)
+
+if "%1" == "clean" (
+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+	del /q /s %BUILDDIR%\*
+	goto end
+)
+
+
+REM Check if sphinx-build is available and fallback to Python version if any
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 goto sphinx_python
+goto sphinx_ok
+
+:sphinx_python
+
+set SPHINXBUILD=python -m sphinx.__init__
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.http://sphinx-doc.org/
+	exit /b 1
+)
+
+:sphinx_ok
+
+
+if "%1" == "html" (
+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+	goto end
+)
+
+if "%1" == "dirhtml" (
+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+	goto end
+)
+
+if "%1" == "singlehtml" (
+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+	goto end
+)
+
+if "%1" == "pickle" (
+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the pickle files.
+	goto end
+)
+
+if "%1" == "json" (
+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the JSON files.
+	goto end
+)
+
+if "%1" == "htmlhelp" (
+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+	goto end
+)
+
+if "%1" == "qthelp" (
+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\RayCore.qhcp
+	echo.To view the help file:
+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\RayCore.ghc
+	goto end
+)
+
+if "%1" == "devhelp" (
+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished.
+	goto end
+)
+
+if "%1" == "epub" (
+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The epub file is in %BUILDDIR%/epub.
+	goto end
+)
+
+if "%1" == "latex" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "latexpdf" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf
+	cd %~dp0
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "latexpdfja" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf-ja
+	cd %~dp0
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "text" (
+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The text files are in %BUILDDIR%/text.
+	goto end
+)
+
+if "%1" == "man" (
+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The manual pages are in %BUILDDIR%/man.
+	goto end
+)
+
+if "%1" == "texinfo" (
+	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+	goto end
+)
+
+if "%1" == "gettext" (
+	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+	goto end
+)
+
+if "%1" == "changes" (
+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.The overview file is in %BUILDDIR%/changes.
+	goto end
+)
+
+if "%1" == "linkcheck" (
+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+	goto end
+)
+
+if "%1" == "doctest" (
+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+	goto end
+)
+
+if "%1" == "coverage" (
+	%SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Testing of coverage in the sources finished, look at the ^
+results in %BUILDDIR%/coverage/python.txt.
+	goto end
+)
+
+if "%1" == "xml" (
+	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The XML files are in %BUILDDIR%/xml.
+	goto end
+)
+
+if "%1" == "pseudoxml" (
+	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
+	goto end
+)
+
+:end
diff --git a/doc/numbuf.rst b/doc/numbuf.rst
new file mode 100644
index 0000000..23a7766
--- /dev/null
+++ b/doc/numbuf.rst
@@ -0,0 +1,6 @@
+Numbuf: Fast serialization of numerical data
+============================================
+
+Numbuf is a library for the fast serialization of primitive Python objects
+(lists, tuples, dictionaries, NumPy arrays) to the
+`Apache Arrow <https://arrow.apache.org/>`_ format.
diff --git a/doc/plasma.rst b/doc/plasma.rst
new file mode 100644
index 0000000..858de59
--- /dev/null
+++ b/doc/plasma.rst
@@ -0,0 +1,97 @@
+Plasma: Storing objects in memory
+=================================
+
+Plasma is a shared region of memory that allows multiple processes running on
+the same machine to access shared data objects.
+
+It can be used both as a Ray service and a library in your own programs.
+
+An object is created in two distinct phases:
+
+1. Allocate memory and write data into allocated memory.
+   If the size of the data is not known in advance, the buffer can be resized.
+   Note that during this phase the buffer is writable, but only by its
+   creator. No one else can access the buffer during this phase.
+
+2. Seal the buffer. Once the creator finishes writing data into buffer
+   it seals the buffer. From this moment on the buffer becomes
+   immutable and other processes can read it.
+
+To create an object, the user specifies a unique identifier for the object and
+an optional name. Plasma keeps track of the process id that created the object,
+the creation time stamp, how long creation of the object took and the size of
+the object. During creation, the user can also specify metadata that will be
+associated with the object.
+
+Other processes can request an object by its unique identifier (later also by
+name). If the object has not been created or sealed yet, the process requesting
+the object will block until the object has been sealed.
+
+The Buffer interface
+--------------------
+
+A buffer is the region of memory associated to a data object, as determined by a
+start address and a size in bytes. There are two kinds of buffers, read-only
+buffers and read-write buffers.
+
+.. doxygenclass:: plasma::Buffer
+   :project: ray
+   :members:
+
+MutableBuffers have a richer interface, they allow writing to and resizing
+the object. When the object creator has finished modifying the object, it
+calls the Seal method to make the object immutable, which allows other
+processes to read the object.
+
+.. doxygenclass:: plasma::MutableBuffer
+   :project: ray
+   :members:
+
+The Plasma client interface
+---------------------------
+
+The developer interacts with Plasma through the Plasma API. Each process
+needs to instantiate a ClientContext, which will give the process access to
+objects and their metadata and allow them to write objects.
+
+.. doxygenclass:: plasma::ClientContext
+   :project: ray
+   :members:
+
+Plasma metadata
+---------------
+
+There are two parts to the object metadata: One internally maintained by Plasma
+an one provided by the user. The first part is represented by the ObjectInfo class.
+
+.. doxygenclass:: plasma::ObjectInfo
+   :project: ray
+   :members:
+
+Each object has a small dictionary that can hold metadata provided by users.
+Users can provide arbitrary information here. It is most likely going to be
+used to store information like ``format`` (``binary``, ``arrow``, ``protobuf``,
+``json``) and ``schema``, which could hold a schema for the data.
+
+An example application
+----------------------
+
+We are going to have more examples here. Currently, the best way of
+understanding the API is by looking at ``libplasma``, the Python C extension
+for Plasma. It can be found in https://github.com/amplab/ray-core/blob/master/src/plasma/client/plasma.cc.
+
+Note that this is not the Python API that users will interact with.
+It can be used like this:
+
+::
+
+  import libplasma
+
+  plasma = libplasma.connect("/home/pcmoritz/shell")
+
+  A = libplasma.build_object(plasma, 1, 1000, "object-1")
+  libplasma.seal_object(A)
+  B = libplasma.build_object(plasma, 2, 2000, "object-2")
+  libplasma.seal_object(B)
+
+  libplasma.list_objects(plasma)
diff --git a/doc/quickstart.rst b/doc/quickstart.rst
new file mode 100644
index 0000000..45a0c08
--- /dev/null
+++ b/doc/quickstart.rst
@@ -0,0 +1,46 @@
+Quick Start Guide
+=================
+
+To build Ray Core, execute the following commands:
+
+First, install the requirements:
+
+::
+
+  sudo apt-get update
+  sudo apt-get install git subversion build-essential
+  sudo apt-get python-dev g++-multilib libcap-dev
+
+Then, install depot_tools:
+
+::
+
+  git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
+  export PATH=`pwd`/depot_tools:"$PATH"
+
+Check out and build the project:
+
+::
+
+  git clone https://github.com/amplab/ray-core
+  cd ray-core
+  glient sync
+  cd src
+  gn gen out/Debug
+  ninja -C out/Debug -j 16
+
+To make sure everything works, you can try out the Mojo hello world example:
+
+::
+
+  cd ray-core/src/out/Debug
+  ./mojo_shell mojo:hello_mojo_client
+
+Now, the Ray shell can be started with
+
+::
+
+  cd ray-core/src/out/Debug
+  ./mojo_shell --enable-multiprocess
+               --external-connection-address=/home/ubuntu/shell
+               ray_node_app.mojo
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..cd6467e
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1 @@
+breathe
diff --git a/doc/services.rst b/doc/services.rst
new file mode 100644
index 0000000..0d07ef3
--- /dev/null
+++ b/doc/services.rst
@@ -0,0 +1,11 @@
+Connecting to Ray services
+==========================
+
+All the Ray services expose an API via IPC that can be called by any other
+services or applications. To learn more about services, please have a look
+at https://www.chromium.org/developers/design-documents/mojo and
+https://github.com/amplab/ray-core/tree/master/src/docs.
+
+.. doxygenclass:: shell::ClientContext
+   :project: ray
+   :members:
diff --git a/doc/setup.sh b/doc/setup.sh
new file mode 100644
index 0000000..5103c34
--- /dev/null
+++ b/doc/setup.sh
@@ -0,0 +1,2 @@
+sudo apt-get install doxygen
+sudo pip install Sphinx
diff --git a/doc/system.rst b/doc/system.rst
new file mode 100644
index 0000000..acb6aa4
--- /dev/null
+++ b/doc/system.rst
@@ -0,0 +1,18 @@
+The Ray shell
+==========================
+
+The Ray shell is responsible for managing all the services that are running
+on a given node, like the local scheduler, the Plasma store and the Python
+workers. There is one shell per node.
+
+You can start the shell using
+
+::
+
+  ./mojo_shell --enable-multiprocess
+               --external-connection-address=/home/ubuntu/shell
+               ray_node_app.mojo
+
+This starts ray_node_app.mojo, which starts the object store and listens on
+the socket ``/home/ubuntu/shell`` to establish connections to Python and C++
+clients.
diff --git a/src/BUILD.gn b/src/BUILD.gn
index b7da81a..f1c2277 100644
--- a/src/BUILD.gn
+++ b/src/BUILD.gn
@@ -16,6 +16,8 @@ group("default") {
     "//examples",
     "//mojo",
     "//mojom",
+    "//ray",
+    "//plasma",
     "//services",
     "//shell",
   ]
diff --git a/src/numbuf/CMakeLists.txt b/src/numbuf/CMakeLists.txt
new file mode 100644
index 0000000..bda3778
--- /dev/null
+++ b/src/numbuf/CMakeLists.txt
@@ -0,0 +1,40 @@
+cmake_minimum_required(VERSION 2.8)
+
+project(numbuf)
+
+list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules)
+
+find_package(PythonInterp REQUIRED)
+find_package(PythonLibs REQUIRED)
+find_package(NumPy REQUIRED)
+
+include_directories("${PYTHON_INCLUDE_DIRS}")
+include_directories("${NUMPY_INCLUDE_DIR}")
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
+
+if (UNIX AND NOT APPLE)
+   link_libraries(rt)
+endif()
+
+set(ARROW_DIR "${CMAKE_SOURCE_DIR}/../arrow/" CACHE STRING
+  "Path of the arrow source directory")
+
+set(ARROW_STATIC_LIB "${CMAKE_SOURCE_DIR}/../arrow/cpp/build/debug/libarrow.so" CACHE STRING
+  "Path to libarrow.a (needs to be changed if arrow is build in debug mode)")
+
+include_directories("${ARROW_DIR}/cpp/src/")
+include_directories("cpp/src/")
+include_directories("python/src/")
+
+add_definitions(-fPIC)
+
+add_library(numbuf SHARED
+  cpp/src/numbuf/tensor.cc
+  cpp/src/numbuf/dict.cc
+  cpp/src/numbuf/sequence.cc
+  python/src/pynumbuf/numbuf.cc
+  python/src/pynumbuf/adapters/numpy.cc
+  python/src/pynumbuf/adapters/python.cc)
+
+target_link_libraries(numbuf ${ARROW_STATIC_LIB} ${PYTHON_LIBRARIES})
diff --git a/src/numbuf/cmake/Modules/FindNumPy.cmake b/src/numbuf/cmake/Modules/FindNumPy.cmake
new file mode 100644
index 0000000..6e1f3c4
--- /dev/null
+++ b/src/numbuf/cmake/Modules/FindNumPy.cmake
@@ -0,0 +1,54 @@
+# - Find the NumPy libraries
+# This module finds if NumPy is installed, and sets the following variables
+# indicating where it is.
+#
+#
+#  NUMPY_FOUND               - was NumPy found
+#  NUMPY_VERSION             - the version of NumPy found as a string
+#  NUMPY_VERSION_MAJOR       - the major version number of NumPy
+#  NUMPY_VERSION_MINOR       - the minor version number of NumPy
+#  NUMPY_VERSION_PATCH       - the patch version number of NumPy
+#  NUMPY_VERSION_DECIMAL     - e.g. version 1.6.1 is 10601
+#  NUMPY_INCLUDE_DIR         - path to the NumPy include files
+
+unset(NUMPY_VERSION)
+unset(NUMPY_INCLUDE_DIR)
+
+if(PYTHONINTERP_FOUND)
+  execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
+    "import numpy as n; print(n.__version__); print(n.get_include());"
+    RESULT_VARIABLE __result
+    OUTPUT_VARIABLE __output
+    OUTPUT_STRIP_TRAILING_WHITESPACE)
+
+  if(__result MATCHES 0)
+    string(REGEX REPLACE ";" "\\\\;" __values ${__output})
+    string(REGEX REPLACE "\r?\n" ";"    __values ${__values})
+    list(GET __values 0 NUMPY_VERSION)
+    list(GET __values 1 NUMPY_INCLUDE_DIR)
+
+    string(REGEX MATCH "^([0-9])+\\.([0-9])+\\.([0-9])+" __ver_check "${NUMPY_VERSION}")
+    if(NOT "${__ver_check}" STREQUAL "")
+      set(NUMPY_VERSION_MAJOR ${CMAKE_MATCH_1})
+      set(NUMPY_VERSION_MINOR ${CMAKE_MATCH_2})
+      set(NUMPY_VERSION_PATCH ${CMAKE_MATCH_3})
+      math(EXPR NUMPY_VERSION_DECIMAL
+        "(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}")
+      string(REGEX REPLACE "\\\\" "/"  NUMPY_INCLUDE_DIR ${NUMPY_INCLUDE_DIR})
+    else()
+     unset(NUMPY_VERSION)
+     unset(NUMPY_INCLUDE_DIR)
+     message(STATUS "Requested NumPy version and include path, but got instead:\n${__output}\n")
+    endif()
+  endif()
+else()
+  message(STATUS "To find NumPy Python interpretator is required to be found.")
+endif()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(NumPy REQUIRED_VARS NUMPY_INCLUDE_DIR NUMPY_VERSION
+                                        VERSION_VAR   NUMPY_VERSION)
+
+if(NUMPY_FOUND)
+  message(STATUS "NumPy ver. ${NUMPY_VERSION} found (include: ${NUMPY_INCLUDE_DIR})")
+endif()
diff --git a/src/numbuf/cpp/src/numbuf/dict.cc b/src/numbuf/cpp/src/numbuf/dict.cc
new file mode 100644
index 0000000..a0c53d0
--- /dev/null
+++ b/src/numbuf/cpp/src/numbuf/dict.cc
@@ -0,0 +1,22 @@
+#include "dict.h"
+
+using namespace arrow;
+
+namespace numbuf {
+
+std::shared_ptr<arrow::StructArray> DictBuilder::Finish(
+    std::shared_ptr<Array> list_data,
+    std::shared_ptr<Array> dict_data) {
+  // lists and dicts can't be keys of dicts in Python, that is why for
+  // the keys we do not need to collect sublists
+  auto keys = keys_.Finish(nullptr, nullptr);
+  auto vals = vals_.Finish(list_data, dict_data);
+  auto keys_field = std::make_shared<Field>("keys", keys->type());
+  auto vals_field = std::make_shared<Field>("vals", vals->type());
+  auto type = std::make_shared<StructType>(std::vector<FieldPtr>({keys_field, vals_field}));
+  std::vector<ArrayPtr> field_arrays({keys, vals});
+  DCHECK(keys->length() == vals->length());
+  return std::make_shared<StructArray>(type, keys->length(), field_arrays);
+}
+
+}
diff --git a/src/numbuf/cpp/src/numbuf/dict.h b/src/numbuf/cpp/src/numbuf/dict.h
new file mode 100644
index 0000000..0b8eff5
--- /dev/null
+++ b/src/numbuf/cpp/src/numbuf/dict.h
@@ -0,0 +1,47 @@
+#ifndef NUMBUF_DICT_H
+#define NUMBUF_DICT_H
+
+#include <arrow/api.h>
+
+#include "sequence.h"
+
+namespace numbuf {
+
+/*! Constructing dictionaries of key/value pairs. Sequences of
+    keys and values are built separately using a pair of
+    SequenceBuilders. The resulting Arrow representation
+    can be obtained via the Finish method.
+*/
+class DictBuilder {
+public:
+  DictBuilder(arrow::MemoryPool* pool = nullptr)
+    : keys_(pool), vals_(pool) {}
+
+  //! Builder for the keys of the dictionary
+  SequenceBuilder& keys() { return keys_; }
+  //! Builder for the values of the dictionary
+  SequenceBuilder& vals() { return vals_; }
+
+  /*! Construct an Arrow StructArray representing the dictionary.
+      Contains a field "keys" for the keys and "vals" for the values.
+
+      \param list_data
+        List containing the data from nested lists in the value
+        list of the dictionary
+
+      \param dict_data
+        List containing the data from nested dictionaries in the
+        value list of the dictionary
+  */
+  std::shared_ptr<arrow::StructArray> Finish(
+    std::shared_ptr<arrow::Array> list_data,
+    std::shared_ptr<arrow::Array> dict_data);
+
+private:
+  SequenceBuilder keys_;
+  SequenceBuilder vals_;
+};
+
+}
+
+#endif
diff --git a/src/numbuf/cpp/src/numbuf/sequence.cc b/src/numbuf/cpp/src/numbuf/sequence.cc
new file mode 100644
index 0000000..9689725
--- /dev/null
+++ b/src/numbuf/cpp/src/numbuf/sequence.cc
@@ -0,0 +1,108 @@
+#include "sequence.h"
+
+using namespace arrow;
+
+namespace numbuf {
+
+SequenceBuilder::SequenceBuilder(MemoryPool* pool)
+    : pool_(pool), types_(pool), offsets_(pool),
+      nones_(pool, std::make_shared<NullType>()),
+      bools_(pool, std::make_shared<BooleanType>()),
+      ints_(pool), strings_(pool, std::make_shared<StringType>()),
+      floats_(pool), doubles_(pool),
+      tensors_(std::make_shared<DoubleType>(), pool),
+      list_offsets_({0}), dict_offsets_({0}) {}
+
+#define NUMBUF_LIST_UPDATE(OFFSET, TAG)          \
+  	RETURN_NOT_OK(offsets_.Append(OFFSET));      \
+    RETURN_NOT_OK(types_.Append(TAG));           \
+    RETURN_NOT_OK(nones_.AppendToBitmap(true));
+
+Status SequenceBuilder::Append() {
+  RETURN_NOT_OK(offsets_.Append(0));
+  RETURN_NOT_OK(types_.Append(0));
+  return nones_.AppendToBitmap(false);
+}
+
+Status SequenceBuilder::Append(bool data) {
+  NUMBUF_LIST_UPDATE(bools_.length(), BOOL_TAG);
+  return bools_.Append(data);
+}
+
+Status SequenceBuilder::Append(int64_t data) {
+  NUMBUF_LIST_UPDATE(ints_.length(), INT_TAG);
+  return ints_.Append(data);
+}
+
+Status SequenceBuilder::Append(const char* data) {
+  NUMBUF_LIST_UPDATE(strings_.length(), STRING_TAG);
+  return strings_.Append(std::string(data, strlen(data)));
+}
+
+Status SequenceBuilder::Append(float data) {
+  NUMBUF_LIST_UPDATE(floats_.length(), FLOAT_TAG);
+  return floats_.Append(data);
+}
+
+Status SequenceBuilder::Append(double data) {
+  NUMBUF_LIST_UPDATE(doubles_.length(), DOUBLE_TAG);
+  return doubles_.Append(data);
+}
+
+Status SequenceBuilder::Append(const std::vector<int64_t>& dims, double* data) {
+  NUMBUF_LIST_UPDATE(tensors_.length(), TENSOR_TAG);
+  return tensors_.Append(dims, data);
+}
+
+Status SequenceBuilder::AppendList(int32_t size) {
+  NUMBUF_LIST_UPDATE(list_offsets_.size() - 1, LIST_TAG);
+  list_offsets_.push_back(list_offsets_.back() + size);
+  return Status::OK();
+}
+
+Status SequenceBuilder::AppendDict(int32_t size) {
+  NUMBUF_LIST_UPDATE(dict_offsets_.size() - 1, DICT_TAG);
+  dict_offsets_.push_back(dict_offsets_.back() + size);
+  return Status::OK();
+}
+
+#define NUMBUF_LIST_ADD(VARNAME, TAG)     \
+  types[TAG] = VARNAME.type();            \
+  children[TAG] = VARNAME.Finish();       \
+
+std::shared_ptr<DenseUnionArray> SequenceBuilder::Finish(
+  std::shared_ptr<Array> list_data,
+  std::shared_ptr<Array> dict_data) {
+
+  std::vector<TypePtr> types(NUM_TAGS);
+  std::vector<ArrayPtr> children(NUM_TAGS);
+
+  NUMBUF_LIST_ADD(bools_, BOOL_TAG);
+  NUMBUF_LIST_ADD(ints_, INT_TAG);
+  NUMBUF_LIST_ADD(strings_, STRING_TAG);
+  NUMBUF_LIST_ADD(floats_, FLOAT_TAG);
+  NUMBUF_LIST_ADD(doubles_, DOUBLE_TAG);
+  NUMBUF_LIST_ADD(tensors_, TENSOR_TAG);
+
+  // Finish construction of the lists contained in this list
+  list_data = list_data ? list_data : std::make_shared<NullArray>(0);
+  DCHECK(list_data->length() == list_offsets_.back());
+  ListBuilder list_builder(pool_, list_data);
+  ARROW_CHECK_OK(list_builder.Append(list_offsets_.data(), list_offsets_.size()));
+  NUMBUF_LIST_ADD(list_builder, LIST_TAG);
+
+  // Finish construction of the dictionaries contained in this list
+  dict_data = dict_data ? dict_data : std::make_shared<NullArray>(0);
+  DCHECK(dict_data->length() == dict_offsets_.back());
+  ListBuilder dict_builder(pool_, dict_data);
+  ARROW_CHECK_OK(dict_builder.Append(dict_offsets_.data(), dict_offsets_.size()));
+  NUMBUF_LIST_ADD(dict_builder, DICT_TAG);
+
+  TypePtr type = TypePtr(new DenseUnionType(types));
+
+  return std::make_shared<DenseUnionArray>(type, types_.length(),
+           children, types_.data(), offsets_.data(),
+           nones_.null_count(), nones_.null_bitmap());
+}
+
+}
diff --git a/src/numbuf/cpp/src/numbuf/sequence.h b/src/numbuf/cpp/src/numbuf/sequence.h
new file mode 100644
index 0000000..1b7bbb5
--- /dev/null
+++ b/src/numbuf/cpp/src/numbuf/sequence.h
@@ -0,0 +1,99 @@
+#ifndef NUMBUF_LIST_H
+#define NUMBUF_LIST_H
+
+#include <arrow/api.h>
+#include <arrow/types/union.h>
+#include "tensor.h"
+
+namespace numbuf {
+
+const int8_t BOOL_TAG = 0;
+const int8_t INT_TAG = 1;
+const int8_t STRING_TAG = 2;
+const int8_t FLOAT_TAG = 3;
+const int8_t DOUBLE_TAG = 4;
+const int8_t TENSOR_TAG = 5;
+const int8_t LIST_TAG = 6;
+const int8_t DICT_TAG = 7;
+
+const int8_t NUM_TAGS = 8;
+
+class SequenceBuilder {
+ public:
+  SequenceBuilder(arrow::MemoryPool* pool = nullptr);
+
+  //! Appending a none to the list
+  arrow::Status Append();
+
+  //! Appending a boolean to the list
+  arrow::Status Append(bool data);
+
+  //! Appending an int64_t to the list
+  arrow::Status Append(int64_t data);
+
+  //! Appending a null-delimited string to the list
+  arrow::Status Append(const char* data);
+
+  //! Appending a C++ string to the list
+  arrow::Status Append(const std::string& data);
+
+  //! Appending a float to the list
+  arrow::Status Append(float data);
+
+  //! Appending a double to the list
+  arrow::Status Append(double data);
+
+  /*! Appending a tensor to the list
+
+      \param dims
+        A vector of dimensions
+
+      \param data
+        A pointer to the start of the data block. The length of the data block
+        will be the product of the dimensions
+  */
+  arrow::Status Append(const std::vector<int64_t>& dims, double* data);
+
+  /*! Add a sublist to the list. The data contained in the list will be
+     specified in the "Finish" method.
+
+     To construct l = [[11, 22], 33, [44, 55]] you would for example run
+     list = ListBuilder();
+     list.AppendList(2);
+     list.Append(33);
+     list.AppendList(2);
+     list.Finish([11, 22, 44, 55]);
+     list.Finish();
+
+     \param size
+       The size of the sublist
+  */
+  arrow::Status AppendList(int32_t size);
+
+  arrow::Status AppendDict(int32_t size);
+
+  //! Finish building the list and return the result
+  std::shared_ptr<arrow::DenseUnionArray> Finish(
+    std::shared_ptr<arrow::Array> list_data,
+    std::shared_ptr<arrow::Array> dict_data);
+
+ private:
+  arrow::MemoryPool* pool_;
+
+  arrow::Int8Builder types_;
+  arrow::Int32Builder offsets_;
+
+  arrow::NullArrayBuilder nones_;
+  arrow::BooleanBuilder bools_;
+  arrow::Int64Builder ints_;
+  arrow::StringBuilder strings_;
+  arrow::FloatBuilder floats_;
+  arrow::DoubleBuilder doubles_;
+  DoubleTensorBuilder tensors_;
+  std::vector<int32_t> list_offsets_;
+  std::vector<int32_t> dict_offsets_;
+};
+
+} // namespace numbuf
+
+#endif // NUMBUF_LIST_H
diff --git a/src/numbuf/cpp/src/numbuf/tensor.cc b/src/numbuf/cpp/src/numbuf/tensor.cc
new file mode 100644
index 0000000..2a8d6b4
--- /dev/null
+++ b/src/numbuf/cpp/src/numbuf/tensor.cc
@@ -0,0 +1,42 @@
+#include "tensor.h"
+
+using namespace arrow;
+
+namespace numbuf {
+
+template<typename T>
+TensorBuilder<T>::TensorBuilder(const TypePtr& dtype, MemoryPool* pool)
+    : dtype_(dtype) {
+  dim_data_ = std::make_shared<Int64Builder>(pool);
+  dims_ = std::make_shared<ListBuilder>(pool, dim_data_);
+  value_data_ = std::make_shared<PrimitiveBuilder<T>>(pool, dtype);
+  values_ = std::make_shared<ListBuilder>(pool, value_data_);
+  auto dims_field = std::make_shared<Field>("dims", dims_->type());
+  auto values_field = std::make_shared<Field>("data", values_->type());
+  auto type = std::make_shared<StructType>(std::vector<FieldPtr>({dims_field, values_field}));
+  tensors_ = std::make_shared<StructBuilder>(pool, type, std::vector<std::shared_ptr<ArrayBuilder>>({dims_, values_}));
+};
+
+template<typename T>
+Status TensorBuilder<T>::Append(const std::vector<int64_t>& dims, const elem_type* data) {
+  RETURN_NOT_OK(tensors_->Append());
+  RETURN_NOT_OK(dims_->Append());
+  RETURN_NOT_OK(values_->Append());
+  int32_t size = 1;
+  for (auto dim : dims) {
+    size *= dim;
+    RETURN_NOT_OK(dim_data_->Append(dim));
+  }
+  std::cout << "appended with argument " << data << std::endl;
+  RETURN_NOT_OK(value_data_->Append(data, size));
+  return Status::OK(); // tensors_->Append();
+}
+
+template<typename T>
+std::shared_ptr<Array> TensorBuilder<T>::Finish() {
+  return tensors_->Finish();
+}
+
+template class TensorBuilder<DoubleType>;
+
+}
diff --git a/src/numbuf/cpp/src/numbuf/tensor.h b/src/numbuf/cpp/src/numbuf/tensor.h
new file mode 100644
index 0000000..e206b99
--- /dev/null
+++ b/src/numbuf/cpp/src/numbuf/tensor.h
@@ -0,0 +1,59 @@
+#ifndef NUMBUF_TENSOR_H
+#define NUMBUF_TENSOR_H
+
+#include <memory>
+#include <arrow/type.h>
+#include <arrow/api.h>
+
+namespace numbuf {
+
+/*! This is a class for building a dataframe where each row corresponds to
+    a Tensor (= multidimensional array) of numerical data. There are two
+    columns, "dims" which contains an array of dimensions for each Tensor
+    and "data" which contains data buffer of the Tensor as a flattened array.
+*/
+template<typename T>
+class TensorBuilder {
+public:
+  typedef typename T::c_type elem_type;
+
+  TensorBuilder(const arrow::TypePtr& dtype, arrow::MemoryPool* pool = nullptr);
+
+  /*! Append a new tensor.
+
+      \param dims
+        The dimensions of the Tensor
+
+      \param data
+        Pointer to the beginning of the data buffer of the Tensor. The
+        total length of the buffer is sizeof(elem_type) * product of dims[i] over i
+  */
+  arrow::Status Append(const std::vector<int64_t>& dims, const elem_type* data);
+
+  //! Convert the tensors to an Arrow StructArray
+  std::shared_ptr<arrow::Array> Finish();
+
+  //! Number of tensors in the column
+  int32_t length() {
+    return tensors_->length();
+  }
+
+  const arrow::TypePtr& type() {
+    return tensors_->type();
+  }
+
+private:
+	arrow::TypePtr dtype_;
+  std::shared_ptr<arrow::Int64Builder> dim_data_;
+  std::shared_ptr<arrow::ListBuilder> dims_;
+  std::shared_ptr<arrow::PrimitiveBuilder<T>> value_data_;
+  std::shared_ptr<arrow::ListBuilder> values_;
+  std::shared_ptr<arrow::StructBuilder> tensors_;
+};
+
+
+typedef TensorBuilder<arrow::DoubleType> DoubleTensorBuilder;
+
+}
+
+#endif // NUMBUF_TENSOR_H
diff --git a/src/numbuf/python/src/pynumbuf/adapters/numpy.cc b/src/numbuf/python/src/pynumbuf/adapters/numpy.cc
new file mode 100644
index 0000000..230b03a
--- /dev/null
+++ b/src/numbuf/python/src/pynumbuf/adapters/numpy.cc
@@ -0,0 +1,98 @@
+#include "numpy.h"
+
+#include <numbuf/tensor.h>
+// #include <numbuf/types.h>
+
+using namespace arrow;
+
+namespace numbuf {
+
+const auto BOOL_TYPE = std::make_shared<arrow::BooleanType>();
+
+const auto INT8_TYPE = std::make_shared<arrow::Int8Type>();
+const auto INT16_TYPE = std::make_shared<arrow::Int16Type>();
+const auto INT32_TYPE = std::make_shared<arrow::Int32Type>();
+const auto INT64_TYPE = std::make_shared<arrow::Int64Type>();
+
+const auto UINT8_TYPE = std::make_shared<arrow::UInt8Type>();
+const auto UINT16_TYPE = std::make_shared<arrow::UInt16Type>();
+const auto UINT32_TYPE = std::make_shared<arrow::UInt32Type>();
+const auto UINT64_TYPE = std::make_shared<arrow::UInt64Type>();
+
+const auto FLOAT_TYPE = std::make_shared<arrow::FloatType>();
+const auto DOUBLE_TYPE = std::make_shared<arrow::DoubleType>();
+
+#define NUMPY_TYPE_TO_ARROW_CASE(TYPE)     \
+  case NPY_##TYPE:                         \
+    return TYPE##_TYPE;
+
+TypePtr numpy_type_to_arrow(int numpy_type) {
+  switch (numpy_type) {
+    NUMPY_TYPE_TO_ARROW_CASE(INT8)
+    NUMPY_TYPE_TO_ARROW_CASE(INT16)
+    NUMPY_TYPE_TO_ARROW_CASE(INT32)
+    NUMPY_TYPE_TO_ARROW_CASE(INT64)
+    NUMPY_TYPE_TO_ARROW_CASE(UINT8)
+    NUMPY_TYPE_TO_ARROW_CASE(UINT16)
+    NUMPY_TYPE_TO_ARROW_CASE(UINT32)
+    NUMPY_TYPE_TO_ARROW_CASE(UINT64)
+    NUMPY_TYPE_TO_ARROW_CASE(FLOAT)
+    NUMPY_TYPE_TO_ARROW_CASE(DOUBLE)
+    default:
+      assert(false);
+  }
+}
+
+#define ARROW_TYPE_TO_NUMPY_CASE(TYPE) \
+  case Type::TYPE:                     \
+    return NPY_##TYPE;
+
+int arrow_type_to_numpy(TypePtr type) {
+  switch (type->type) {
+    ARROW_TYPE_TO_NUMPY_CASE(INT8)
+    ARROW_TYPE_TO_NUMPY_CASE(INT16)
+    ARROW_TYPE_TO_NUMPY_CASE(INT32)
+    ARROW_TYPE_TO_NUMPY_CASE(INT64)
+    ARROW_TYPE_TO_NUMPY_CASE(UINT8)
+    ARROW_TYPE_TO_NUMPY_CASE(UINT16)
+    ARROW_TYPE_TO_NUMPY_CASE(UINT32)
+    ARROW_TYPE_TO_NUMPY_CASE(UINT64)
+    ARROW_TYPE_TO_NUMPY_CASE(FLOAT)
+    ARROW_TYPE_TO_NUMPY_CASE(DOUBLE)
+    default:
+      assert(false);
+  }
+}
+
+Status DeserializeArray(std::shared_ptr<Array> array, int32_t offset, PyObject** out) {
+  DCHECK(array);
+  auto tensor = std::dynamic_pointer_cast<StructArray>(array);
+  DCHECK(tensor);
+  auto dims = std::dynamic_pointer_cast<ListArray>(tensor->field(0));
+  auto content = std::dynamic_pointer_cast<ListArray>(tensor->field(1));
+  auto values = std::dynamic_pointer_cast<DoubleArray>(content->values());
+  double* data = const_cast<double*>(values->raw_data()) + content->offset(offset);
+  npy_intp num_dims = dims->value_length(offset);
+  std::vector<npy_intp> dim(num_dims);
+  int j = 0; // TODO(pcm): make this the loop variable
+  for (int i = dims->offset(offset); i < dims->offset(offset+1); ++i) {
+    dim[j] = std::dynamic_pointer_cast<Int64Array>(dims->values())->Value(i);
+    j += 1;
+  }
+  *out = PyArray_SimpleNewFromData(num_dims, dim.data(), arrow_type_to_numpy(values->type()), reinterpret_cast<void*>(data));
+  return Status::OK();
+}
+
+Status SerializeArray(PyArrayObject* array, SequenceBuilder& builder) {
+  size_t ndim = PyArray_NDIM(array);
+  int dtype = PyArray_TYPE(array);
+  std::vector<int64_t> dims(ndim);
+  for (int i = 0; i < ndim; ++i) {
+    dims[i] = PyArray_DIM(array, i);
+  }
+  auto type = numpy_type_to_arrow(dtype);
+  auto data = reinterpret_cast<double*>(PyArray_DATA(array));
+  return builder.Append(dims, data);
+}
+
+}
diff --git a/src/numbuf/python/src/pynumbuf/adapters/numpy.h b/src/numbuf/python/src/pynumbuf/adapters/numpy.h
new file mode 100644
index 0000000..8b8b385
--- /dev/null
+++ b/src/numbuf/python/src/pynumbuf/adapters/numpy.h
@@ -0,0 +1,22 @@
+#ifndef PYNUMBUF_NUMPY_H
+#define PYNUMBUF_NUMPY_H
+
+#include <arrow/api.h>
+#include <Python.h>
+
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#define NO_IMPORT_ARRAY
+#define PY_ARRAY_UNIQUE_SYMBOL NUMBUF_ARRAY_API
+#include <numpy/arrayobject.h>
+
+#include <numbuf/tensor.h>
+#include <numbuf/sequence.h>
+
+namespace numbuf {
+
+arrow::Status SerializeArray(PyArrayObject* array, SequenceBuilder& builder);
+arrow::Status DeserializeArray(std::shared_ptr<arrow::Array> array, int32_t offset, PyObject** out);
+
+}
+
+#endif
diff --git a/src/numbuf/python/src/pynumbuf/adapters/python.cc b/src/numbuf/python/src/pynumbuf/adapters/python.cc
new file mode 100644
index 0000000..8e4680d
--- /dev/null
+++ b/src/numbuf/python/src/pynumbuf/adapters/python.cc
@@ -0,0 +1,138 @@
+#include "python.h"
+
+using namespace arrow;
+
+namespace numbuf {
+
+PyObject* get_value(ArrayPtr arr, int32_t index, int32_t type) {
+  PyObject* result;
+  switch (type) {
+    case BOOL_TAG:
+      return PyBool_FromLong(std::static_pointer_cast<BooleanArray>(arr)->Value(index));
+    case INT_TAG:
+      return PyInt_FromLong(std::static_pointer_cast<Int64Array>(arr)->Value(index));
+    case STRING_TAG: {
+      int32_t nchars;
+      const uint8_t* str = std::static_pointer_cast<StringArray>(arr)->GetValue(index, &nchars);
+      return PyString_FromStringAndSize(reinterpret_cast<const char*>(str), nchars);
+    }
+    case FLOAT_TAG:
+      return PyFloat_FromDouble(std::static_pointer_cast<FloatArray>(arr)->Value(index));
+    case DOUBLE_TAG:
+      return PyFloat_FromDouble(std::static_pointer_cast<DoubleArray>(arr)->Value(index));
+    case LIST_TAG: {
+      auto list = std::static_pointer_cast<ListArray>(arr);
+      ARROW_CHECK_OK(DeserializeList(list->values(), list->value_offset(index), list->value_offset(index+1), &result));
+      return result;
+    }
+    case DICT_TAG: {
+      auto list = std::static_pointer_cast<ListArray>(arr);
+      ARROW_CHECK_OK(DeserializeDict(list->values(), list->value_offset(index), list->value_offset(index+1), &result));
+      return result;
+    }
+    case TENSOR_TAG:
+      ARROW_CHECK_OK(DeserializeArray(arr, index, &result));
+      return result;
+    default:
+      DCHECK(false) << "union tag not recognized " << type;
+  }
+  return NULL;
+}
+
+Status append(PyObject* elem, SequenceBuilder& builder, std::vector<PyObject*>& sublists, std::vector<PyObject*>& subdicts) {
+  // The bool case must precede the int case (PyInt_Check passes for bools)
+  if (PyBool_Check(elem)) {
+    RETURN_NOT_OK(builder.Append(elem == Py_True));
+  } else if (PyFloat_Check(elem)) {
+    RETURN_NOT_OK(builder.Append(PyFloat_AS_DOUBLE(elem)));
+  } else if (PyInt_Check(elem)) {
+    RETURN_NOT_OK(builder.Append(PyInt_AS_LONG(elem)));
+  } else if (PyString_Check(elem)) {
+    RETURN_NOT_OK(builder.Append(PyString_AS_STRING(elem)));
+  } else if (PyList_Check(elem)) {
+    builder.AppendList(PyList_Size(elem));
+    sublists.push_back(elem);
+  } else if (PyDict_Check(elem)) {
+    builder.AppendDict(PyDict_Size(elem));
+    subdicts.push_back(elem);
+  } else if (PyArray_Check(elem)) {
+    RETURN_NOT_OK(SerializeArray((PyArrayObject*) elem, builder));
+  } else if (elem == Py_None) {
+    RETURN_NOT_OK(builder.Append());
+  } else {
+    DCHECK(false) << "data type of " << PyString_AS_STRING(PyObject_Repr(elem))
+                  << " not recognized";
+  }
+  return Status::OK();
+}
+
+std::shared_ptr<Array> SerializeDict(std::vector<PyObject*> dicts) {
+  DictBuilder result;
+  std::vector<PyObject*> sublists, subdicts, dummy;
+  for (const auto& dict : dicts) {
+    PyObject *key, *value;
+    Py_ssize_t pos = 0;
+    while (PyDict_Next(dict, &pos, &key, &value)) {
+      ARROW_CHECK_OK(append(key, result.keys(), dummy, dummy));
+      ARROW_CHECK_OK(append(value, result.vals(), sublists, subdicts));
+    }
+  }
+  auto val_list = sublists.size() > 0 ? SerializeList(sublists) : nullptr;
+  auto val_dict = subdicts.size() > 0 ? SerializeDict(subdicts) : nullptr;
+  return result.Finish(val_list, val_dict);
+}
+
+std::shared_ptr<Array> SerializeList(std::vector<PyObject*> lists) {
+  SequenceBuilder builder(nullptr);
+  std::vector<PyObject*> sublists, subdicts;
+  for (const auto& list : lists) {
+    for (size_t i = 0, size = PyList_Size(list); i < size; ++i) {
+      PyObject* elem = PyList_GetItem(list, i);
+      ARROW_CHECK_OK(append(elem, builder, sublists, subdicts));
+    }
+  }
+  auto list = sublists.size() > 0 ? SerializeList(sublists) : nullptr;
+  auto dict = subdicts.size() > 0 ? SerializeDict(subdicts) : nullptr;
+  return builder.Finish(list, dict);
+}
+
+Status DeserializeList(std::shared_ptr<Array> array, int32_t start_idx, int32_t stop_idx, PyObject** out) {
+  auto data = std::dynamic_pointer_cast<DenseUnionArray>(array);
+  // TODO(pcm): error handling
+  int32_t size = array->length();
+  PyObject* result = PyList_New(stop_idx - start_idx);
+  auto types = std::make_shared<Int8Array>(size, data->types());
+  auto offsets = std::make_shared<Int32Array>(size, data->offset_buf());
+  for (size_t i = start_idx; i < stop_idx; ++i) {
+    if (data->IsNull(i)) {
+      Py_INCREF(Py_None);
+      PyList_SetItem(result, i-start_idx, Py_None);
+    } else {
+      int32_t offset = offsets->Value(i);
+      int8_t type = types->Value(i);
+      ArrayPtr arr = data->child(type);
+      PyList_SetItem(result, i-start_idx, get_value(arr, offset, type));
+    }
+  }
+  *out = result;
+  return Status::OK();
+}
+
+Status DeserializeDict(std::shared_ptr<Array> array, int32_t start_idx, int32_t stop_idx, PyObject** out) {
+  auto data = std::dynamic_pointer_cast<StructArray>(array);
+  // TODO(pcm): error handling, get rid of the temporary copy of the list
+  PyObject *keys, *vals;
+  PyObject* result = PyDict_New();
+  ARROW_RETURN_NOT_OK(DeserializeList(data->field(0), start_idx, stop_idx, &keys));
+  ARROW_RETURN_NOT_OK(DeserializeList(data->field(1), start_idx, stop_idx, &vals));
+  for (size_t i = start_idx; i < stop_idx; ++i) {
+    PyDict_SetItem(result, PyList_GetItem(keys, i - start_idx), PyList_GetItem(vals, i - start_idx));
+  }
+  Py_XDECREF(keys); // PyList_GetItem(keys, ...) incremented the reference count
+  Py_XDECREF(vals); // PyList_GetItem(vals, ...) incremented the reference count
+  *out = result;
+  return Status::OK();
+}
+
+
+}
diff --git a/src/numbuf/python/src/pynumbuf/adapters/python.h b/src/numbuf/python/src/pynumbuf/adapters/python.h
new file mode 100644
index 0000000..92d883e
--- /dev/null
+++ b/src/numbuf/python/src/pynumbuf/adapters/python.h
@@ -0,0 +1,20 @@
+#ifndef PYNUMBUF_PYTHON_H
+#define PYNUMBUF_PYTHON_H
+
+#include <Python.h>
+
+#include <arrow/api.h>
+#include <numbuf/dict.h>
+#include <numbuf/sequence.h>
+
+#include "numpy.h"
+
+namespace numbuf {
+
+std::shared_ptr<arrow::Array> SerializeList(std::vector<PyObject*> list);
+arrow::Status DeserializeList(std::shared_ptr<arrow::Array> array, int32_t start_idx, int32_t stop_idx, PyObject** out);
+arrow::Status DeserializeDict(std::shared_ptr<arrow::Array> array, int32_t start_idx, int32_t stop_idx, PyObject** out);
+
+}
+
+#endif
diff --git a/src/numbuf/python/src/pynumbuf/numbuf.cc b/src/numbuf/python/src/pynumbuf/numbuf.cc
new file mode 100644
index 0000000..3eeba37
--- /dev/null
+++ b/src/numbuf/python/src/pynumbuf/numbuf.cc
@@ -0,0 +1,69 @@
+#include <Python.h>
+#include <arrow/api.h>
+#include <arrow/ipc/memory.h>
+#include <arrow/ipc/adapter.h>
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#define PY_ARRAY_UNIQUE_SYMBOL NUMBUF_ARRAY_API
+#include <numpy/arrayobject.h>
+
+#include <iostream>
+
+#include "adapters/python.h"
+
+using namespace arrow;
+using namespace numbuf;
+
+extern "C" {
+
+int PyObjectToArrow(PyObject* object, std::shared_ptr<Array> **result) {
+  if (PyCapsule_IsValid(object, "arrow")) {
+    *result = static_cast<std::shared_ptr<Array>*>(PyCapsule_GetPointer(object, "arrow"));
+    return 1;
+  } else {
+    PyErr_SetString(PyExc_TypeError, "must be an 'arrow' capsule");
+    return 0;
+  }
+}
+
+static void ArrowCapsule_Destructor(PyObject* capsule) {
+  delete static_cast<std::shared_ptr<Array>*>(PyCapsule_GetPointer(capsule, "arrow"));
+}
+
+PyObject* serialize_list(PyObject* self, PyObject* args) {
+  PyObject* value;
+  if (!PyArg_ParseTuple(args, "O", &value)) {
+    return NULL;
+  }
+  std::shared_ptr<Array>* result = new std::shared_ptr<Array>();
+  if (PyList_Check(value)) {
+    *result = SerializeList(std::vector<PyObject*>({value}));
+    std::cout << "validation: " << (*result)->Validate().ToString() << std::endl;
+    std::cout << "schema: " << (*result)->type()->ToString() << std::endl;
+    return PyCapsule_New(static_cast<void*>(result), "arrow", &ArrowCapsule_Destructor);
+  }
+  return NULL;
+}
+
+PyObject* deserialize_list(PyObject* self, PyObject* args) {
+  std::shared_ptr<Array>* data;
+  if (!PyArg_ParseTuple(args, "O&", &PyObjectToArrow, &data)) {
+    return NULL;
+  }
+  PyObject* result;
+  ARROW_CHECK_OK(DeserializeList(*data, 0, (*data)->length(), &result));
+  return result;
+}
+
+static PyMethodDef NumbufMethods[] = {
+ { "serialize_list", serialize_list, METH_VARARGS, "serialize a Python list" },
+ { "deserialize_list", deserialize_list, METH_VARARGS, "deserialize a Python list" },
+ { NULL, NULL, 0, NULL }
+};
+
+PyMODINIT_FUNC initlibnumbuf(void) {
+  PyObject* m;
+  m = Py_InitModule3("libnumbuf", NumbufMethods, "Python C Extension for Numbuf");
+  import_array();
+}
+
+}
diff --git a/src/numbuf/python/test/runtest.py b/src/numbuf/python/test/runtest.py
new file mode 100644
index 0000000..2d20e48
--- /dev/null
+++ b/src/numbuf/python/test/runtest.py
@@ -0,0 +1,28 @@
+import unittest
+import libnumbuf
+
+class SerializationTests(unittest.TestCase):
+
+  def roundTripTest(self, data):
+    serialized = libnumbuf.serialize_list(data)
+    result = libnumbuf.deserialize_list(serialized)
+    self.assertEqual(data, result)
+
+  def testSimple(self):
+    self.roundTripTest([1, 2, 3])
+    self.roundTripTest([1.0, 2.0, 3.0])
+    self.roundTripTest(['hello', 'world'])
+    self.roundTripTest([1, 'hello', 1.0])
+    self.roundTripTest([{'hello': 1.0, 'world': 42}])
+    self.roundTripTest([True, False])
+
+  def testNone(self):
+    self.roundTripTest([1, 2, None, 3])
+
+  def testNested(self):
+    self.roundTripTest([{"hello": {"world": 1}}])
+    self.roundTripTest([{"hello": [1, 2, 3]}])
+    self.roundTripTest([{"hello": [1, [2, 3]]}])
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/src/plasma/BUILD.gn b/src/plasma/BUILD.gn
new file mode 100644
index 0000000..f05f4f4
--- /dev/null
+++ b/src/plasma/BUILD.gn
@@ -0,0 +1,6 @@
+group("plasma") {
+  deps = [
+    "//plasma/client",
+    "//plasma/service"
+  ]
+}
diff --git a/src/plasma/client/BUILD.gn b/src/plasma/client/BUILD.gn
new file mode 100644
index 0000000..ba60a62
--- /dev/null
+++ b/src/plasma/client/BUILD.gn
@@ -0,0 +1,27 @@
+group("client") {
+  deps = [
+    ":plasma"
+  ]
+}
+
+shared_library("plasma") {
+  output_name = "plasma"
+
+  include_dirs = [ "/usr/include/python2.7" ] # TODO(pcm): make this general
+
+  deps = [
+    "//ray/client",
+    "//plasma/service:plasma"
+  ]
+
+  sources = [
+    "plasma.cc",
+  ]
+
+  if (!is_win) {
+    # This is required so functions in Python C extensions can be found
+    configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+  }
+
+  ldflags = [ "-lpython2.7" ]
+}
diff --git a/src/plasma/client/plasma.cc b/src/plasma/client/plasma.cc
new file mode 100644
index 0000000..01fe837
--- /dev/null
+++ b/src/plasma/client/plasma.cc
@@ -0,0 +1,142 @@
+#include <string>
+#include <Python.h>
+#include "base/command_line.h"
+#include "shell/init.h"
+#include "plasma/service/api.h"
+
+extern "C" {
+
+using plasma::ClientContext;
+using plasma::MutableBuffer;
+using plasma::ObjectID;
+
+static int GetClientContext(PyObject* object, ClientContext** context) {
+  if (PyCapsule_IsValid(object, "plasma")) {
+    *context = static_cast<ClientContext*>(PyCapsule_GetPointer(object, "plasma"));
+    return 1;
+  } else {
+    PyErr_SetString(PyExc_TypeError, "must be a 'plasma' capsule");
+    return 0;
+  }
+}
+
+static void PlasmaCapsule_Destructor(PyObject* capsule) {
+  delete static_cast<ClientContext*>(PyCapsule_GetPointer(capsule, "plasma"));
+}
+
+static int GetMutableBuffer(PyObject* object, MutableBuffer** buffer) {
+  if (PyCapsule_IsValid(object, "mut_buff")) {
+    *buffer = static_cast<MutableBuffer*>(PyCapsule_GetPointer(object, "mut_buff"));
+    return 1;
+  } else {
+    PyErr_SetString(PyExc_TypeError, "must be a 'mut_buff' capsule");
+    return 0;
+  }
+}
+
+static void MutableBufferCapsule_Destructor(PyObject* capsule) {
+  delete static_cast<MutableBuffer*>(PyCapsule_GetPointer(capsule, "mut_buff"));
+}
+
+static PyObject* connect(PyObject* self, PyObject* args) {
+  const char* address;
+  if (!PyArg_ParseTuple(args, "s", &address)) {
+    return NULL;
+  }
+  auto context = new ClientContext(std::string(address));
+  return PyCapsule_New(context, "plasma", PlasmaCapsule_Destructor);
+}
+
+static PyObject* build_object(PyObject* self, PyObject* args) {
+  ClientContext* context;
+  ObjectID object_id;
+  Py_ssize_t size;
+  const char* name;
+  if (!PyArg_ParseTuple(args, "O&nns", &GetClientContext,
+                        &context, &object_id, &size, &name)) {
+    return NULL;
+  }
+  auto mutable_buffer = new MutableBuffer();
+  context->BuildObject(object_id, size, *mutable_buffer, std::string(name));
+  return PyCapsule_New(mutable_buffer, "mut_buff", MutableBufferCapsule_Destructor);
+}
+
+static PyObject* get_mutable_buffer(PyObject* self, PyObject* args) {
+  MutableBuffer* buffer;
+  if (!PyArg_ParseTuple(args, "O&", &GetMutableBuffer, &buffer)) {
+    return NULL;
+  }
+  return PyBuffer_FromReadWriteMemory(reinterpret_cast<void*>(buffer->mutable_data()), buffer->size());
+}
+
+static PyObject* seal_object(PyObject* self, PyObject* args) {
+  MutableBuffer* buffer;
+  if (!PyArg_ParseTuple(args, "O&", &GetMutableBuffer, &buffer)) {
+    return NULL;
+  }
+  buffer->Seal();
+  Py_RETURN_NONE;
+}
+
+// TODO: implement blocking and nonblocking version of this
+static PyObject* get_object(PyObject* self, PyObject* args) {
+  ClientContext* context;
+  ObjectID object_id;
+  if (!PyArg_ParseTuple(args, "O&n", &GetClientContext, &context, &object_id)) {
+    return NULL;
+  }
+  plasma::Buffer buffer;
+  context->GetObject(object_id, buffer);
+  const void* data = reinterpret_cast<const void*>(buffer.data());
+  // We need the const cast because the Python API does not implement const for this method
+  // TODO(pcm): Maybe the new Python buffer API does?
+  return PyBuffer_FromMemory(const_cast<void*>(data), buffer.size());
+}
+
+static PyObject* list_objects(PyObject* self, PyObject* args) {
+  ClientContext* context;
+  if (!PyArg_ParseTuple(args, "O&", &GetClientContext, &context)) {
+    return NULL;
+  }
+  std::vector<plasma::ObjectInfo> infos;
+  context->ListObjects(&infos);
+  PyObject* names = PyList_New(infos.size());
+  PyObject* sizes = PyList_New(infos.size());
+  PyObject* create_times = PyList_New(infos.size());
+  PyObject* construct_deltas = PyList_New(infos.size());
+  PyObject* creator_ids = PyList_New(infos.size());
+  for (size_t i = 0; i < infos.size(); ++i) {
+    PyList_SetItem(names, i, PyString_FromString(infos[i].name.c_str()));
+    PyList_SetItem(sizes, i, PyInt_FromLong(infos[i].size));
+    PyList_SetItem(create_times, i, PyInt_FromLong(infos[i].create_time));
+    PyList_SetItem(construct_deltas, i, PyInt_FromLong(infos[i].construct_delta));
+    PyList_SetItem(creator_ids, i, PyInt_FromLong(infos[i].creator_id));
+  }
+  PyObject* result = PyTuple_New(5);
+  PyTuple_SetItem(result, 0, names);
+  PyTuple_SetItem(result, 1, sizes);
+  PyTuple_SetItem(result, 2, create_times);
+  PyTuple_SetItem(result, 3, construct_deltas);
+  PyTuple_SetItem(result, 4, creator_ids);
+  return result;
+}
+
+static PyMethodDef RayClientMethods[] = {
+  { "connect", connect, METH_VARARGS, "connect to the shell" },
+  { "build_object", build_object, METH_VARARGS, "build a new object" },
+  { "get_mutable_buffer", get_mutable_buffer, METH_VARARGS, "get mutable buffer" },
+  { "seal_object", seal_object, METH_VARARGS, "seal an object" },
+  { "get_object", get_object, METH_VARARGS, "get an object from plasma" },
+  { "list_objects", list_objects, METH_VARARGS, "list objects in plasma" },
+  { NULL, NULL, 0, NULL }
+};
+
+PyMODINIT_FUNC initlibplasma(void) {
+  int argc = 1;
+  const char* argv[] = { "libplasma", NULL };
+  base::CommandLine::Init(argc, argv);
+  shell::InitializeLogging();
+  Py_InitModule3("libplasma", RayClientMethods, "plasma python client library");
+}
+
+}
diff --git a/src/plasma/client/test.py b/src/plasma/client/test.py
new file mode 100644
index 0000000..9012b80
--- /dev/null
+++ b/src/plasma/client/test.py
@@ -0,0 +1,17 @@
+import libplasma
+import os
+# import pandas as pd
+
+plasma = libplasma.connect("/home/pcmoritz/shell")
+
+A = libplasma.build_object(plasma, 1, 1000, "object-1")
+libplasma.seal_object(A)
+B = libplasma.build_object(plasma, 2, 2000, "object-2")
+libplasma.seal_object(B)
+
+libplasma.list_objects(plasma)
+
+names, sizes, create_times, construct_deltas, creator_ids = libplasma.list_objects(plasma)
+
+info = pd.DataFrame({"name": names, "size": sizes, "create_time": create_times,
+  "construct_deltas": construct_deltas, "creator_ids": creator_ids})
diff --git a/src/plasma/service/BUILD.gn b/src/plasma/service/BUILD.gn
new file mode 100644
index 0000000..75fb284
--- /dev/null
+++ b/src/plasma/service/BUILD.gn
@@ -0,0 +1,44 @@
+import("//mojo/public/mojo_application.gni")
+import("//mojo/public/tools/bindings/mojom.gni")
+
+group("service") {
+  deps = [
+    ":bindings",
+    ":server",
+    ":plasma"
+  ]
+}
+
+mojo_native_application("server") {
+  output_name = "plasma"
+
+  deps = [
+    ":bindings",
+    "//mojo/common",
+    "//mojo/public/cpp/application:standalone",
+    "//mojo/public/cpp/bindings",
+    "//mojo/public/cpp/utility",
+  ]
+
+  sources = [
+    "server.cc",
+  ]
+}
+
+static_library("plasma") {
+  sources = [
+    "library.cc",
+    "buffer.cc",
+    "plasma_interface.cc",
+  ]
+
+  deps = [
+    ":bindings_sync"
+  ]
+}
+
+mojom("bindings") {
+  sources = [
+    "plasma.mojom",
+  ]
+}
diff --git a/src/plasma/service/api.h b/src/plasma/service/api.h
new file mode 100644
index 0000000..7d689be
--- /dev/null
+++ b/src/plasma/service/api.h
@@ -0,0 +1,106 @@
+#ifndef PLASMA_API_H_
+#define PLASMA_API_H_
+
+#include <string>
+#include "buffer.h"
+
+namespace plasma {
+
+typedef int64_t ClientID;
+
+class PlasmaInterface;
+
+class ObjectInfo {
+public:
+  //! Name of the object as provided by the user during object construction
+  std::string name;
+  //! Size of the object in bytes
+  int64_t size;
+  //! Time when object construction started, in microseconds since the Unix epoch
+  int64_t create_time;
+  //! Time in microseconds between object creation and sealing
+  int64_t construct_duration;
+  //! Process ID of the process that created the object
+  int64_t creator_id;
+  //! Cluster wide unique address for the process that created the object
+  std::string creator_address;
+};
+
+/*! A client context is the primary interface through which clients interact
+    with Plasma.
+*/
+class ClientContext {
+ public:
+  /*! Create a new client context.
+
+      \param address
+        Adress of the Ray shell socket we are connecting to
+  */
+  ClientContext(const std::string& address);
+
+  ~ClientContext();
+
+  /*! Build a new object. Building an object involves multiple steps.
+      Once the creator process finishes to construct the objects, it
+      seals the object. Only after that can it be shared with other
+      processes.
+
+      \param object_id
+        The object ID of the newly created objects. Provided by the
+        client, which must ensure it is globally unique.
+
+      \param size
+        The number of bytes that are allocated for the object
+        initially. Can be reallocated through the MutableBuffer
+        interface.
+
+      \param buffer
+        The function will pass the allocated buffer to the client
+        using this argument.
+
+      \param name
+        An optional name for the object through which is can be
+        accessed without knowing its object ID.
+
+      \param metadata
+        An optional dictionary of metadata for the object. The keys of
+        the dictionary are strings and the values are arbitrary binary data
+        represented by Buffer objects.
+  */
+  Status BuildObject(ObjectID object_id, int64_t size,
+                     MutableBuffer& buffer, const std::string& name = "",
+                     const std::map<std::string, Buffer>& metadata = EMPTY);
+
+  /*! Get buffer associated to an object ID. If the object has not
+	    been sealed yet, this function will block the current thread.
+
+      \param object_id
+        The object ID of the object that shall be retrieved.
+
+      \param buffer
+        The argument is used to pass the read-only buffer to the client.
+  */
+  Status GetObject(ObjectID object_id, Buffer& buffer);
+
+  /*! Put object information of objects in the store into the
+      vector objects.
+  */
+  Status ListObjects(std::vector<ObjectInfo>* objects);
+
+  /*! Retrieve metadata for a given object.
+
+      \param key
+        The key of the metadata information to be retrieved.
+
+      \return
+        A view on the metadata associated to that key.
+  */
+  Status GetMetadata(ObjectID object_id, const std::string& key, Buffer& data);
+
+ private:
+  std::shared_ptr<PlasmaInterface> interface_;
+};
+
+}
+
+#endif
diff --git a/src/plasma/service/buffer.cc b/src/plasma/service/buffer.cc
new file mode 100644
index 0000000..49d7a9e
--- /dev/null
+++ b/src/plasma/service/buffer.cc
@@ -0,0 +1,32 @@
+#include "api.h"
+#include "buffer.h"
+#include "plasma_interface.h"
+
+namespace plasma {
+
+MutableBuffer::MutableBuffer()
+    : Buffer(nullptr, 0), mutable_data_(nullptr), sealed_(false) {}
+
+MutableBuffer::~MutableBuffer() {
+  CHECK(sealed_) << "MutableBuffer must be sealed befor it goes out of scope";
+}
+
+uint8_t* MutableBuffer::mutable_data() {
+  DCHECK(mutable_data_);
+  DCHECK(!sealed_);
+  return mutable_data_;
+}
+
+Status MutableBuffer::Resize(int64_t new_size) {
+  DCHECK(interface_);
+  CHECK(false);
+}
+
+Status MutableBuffer::Seal() {
+  DCHECK(interface_);
+  DCHECK(!sealed_);
+  interface_->get()->SealObject(object_id_);
+  sealed_ = true;
+}
+
+} // namespace plasma
diff --git a/src/plasma/service/buffer.h b/src/plasma/service/buffer.h
new file mode 100644
index 0000000..3e2506d
--- /dev/null
+++ b/src/plasma/service/buffer.h
@@ -0,0 +1,87 @@
+#ifndef PLASMA_BUFFER_H_
+#define PLASMA_BUFFER_H_
+
+#include <vector>
+#include <memory>
+#include "base/logging.h"
+
+namespace plasma {
+
+typedef int64_t ObjectID;
+
+typedef void Status;
+
+class PlasmaInterface;
+
+/*! Read-only view on data
+*/
+class Buffer {
+ public:
+  // we declare ClientContext friend so it can initialize our private fields
+  friend class ClientContext;
+
+  Buffer() : data_(nullptr), size_(0) {}
+
+  Buffer(const uint8_t* data, int64_t size) : data_(data), size_(size) {}
+  /*! Return the start address of the buffer.
+  */
+  const uint8_t* data() { return data_; }
+  /*! Return an address corresponding to an "offset" in this buffer
+  */
+  const uint8_t* data(uint64_t offset) { return data_ + offset; }
+  /*! Return the size of the object in bytes
+  */
+  int64_t size() { return size_; }
+  ~Buffer() {}
+
+ private:
+  const uint8_t* data_;
+  int64_t size_;
+};
+
+/*! Mutable view on data
+*/
+class MutableBuffer : public Buffer {
+public:
+  // we declare ClientContext friend so it can initialize our private fields
+  friend class ClientContext;
+
+  /*! After the default constructor has been called, the class is not
+      functional and all methods will raise errors. Only after it has been
+      initialized by ClientContext::BuildObject can this class be used.
+  */
+  MutableBuffer();
+
+  ~MutableBuffer();
+
+  /*! Return the start address of the buffer (mutable).
+  */
+  uint8_t* mutable_data();
+  /*! Return an address corresponding to an "offset" in this buffer (mutable).
+  */
+  uint8_t* mutable_data(uint64_t offset);
+  /*! Resize the buffer.
+
+      \param new_size
+        New size of the buffer (in bytes).
+  */
+  Status Resize(int64_t new_size);
+  /*! Make the data contained in this buffer immutable. After the buffer
+      has been sealed, it is illegal to modify data from the buffer or to
+      resize the buffer.
+  */
+  Status Seal();
+  /*! Has this MutableBuffer been sealed?
+  */
+  bool sealed() { return sealed_; }
+
+private:
+  uint8_t* mutable_data_;
+  bool sealed_;
+  plasma::ObjectID object_id_;
+  std::shared_ptr<PlasmaInterface> interface_;
+};
+
+} // namespace plasma
+
+#endif
diff --git a/src/plasma/service/library.cc b/src/plasma/service/library.cc
new file mode 100644
index 0000000..9dfd49b
--- /dev/null
+++ b/src/plasma/service/library.cc
@@ -0,0 +1,52 @@
+#include "api.h"
+
+#include "base/process/process.h"
+#include "plasma_interface.h"
+
+namespace plasma {
+
+ClientContext::ClientContext(const std::string& address) {
+  interface_ = std::make_shared<PlasmaInterface>(address);
+}
+
+ClientContext::~ClientContext() {}
+
+Status ClientContext::BuildObject(ObjectID object_id, int64_t size,
+                                  MutableBuffer& buffer, const std::string& name) {
+  mojo::ScopedSharedBufferHandle handle; // TODO(pcm): Check if we need to hold onto this
+  int64_t creator_id = base::Process::Current().Pid();
+  interface_->get()->CreateObject(object_id, size, name, creator_id, &handle);
+  void* pointer = nullptr;
+  CHECK_EQ(MOJO_RESULT_OK, mojo::MapBuffer(handle.get(), 0, size, &pointer, MOJO_MAP_BUFFER_FLAG_NONE));
+  buffer.object_id_ = object_id;
+  buffer.mutable_data_ = static_cast<uint8_t*>(pointer);
+  buffer.data_ = static_cast<const uint8_t*>(pointer);
+  buffer.size_ = size;
+  buffer.interface_ = interface_;
+}
+
+Status ClientContext::GetObject(ObjectID object_id, Buffer& buffer) {
+  mojo::ScopedSharedBufferHandle handle;
+  uint64_t size;
+  interface_->get()->GetObject(object_id, true, &handle, &size);
+  void* pointer = nullptr;
+  CHECK_EQ(MOJO_RESULT_OK, mojo::MapBuffer(handle.get(), 0, size, &pointer, MOJO_MAP_BUFFER_FLAG_NONE));
+  buffer.data_ = static_cast<const uint8_t*>(pointer);
+  buffer.size_ = size;
+}
+
+Status ClientContext::ListObjects(std::vector<ObjectInfo>* objects) {
+  mojo::Array<service::ObjectInfoPtr> infos;
+  interface_->get()->ListObjects(&infos);
+  for (size_t i = 0; i < infos.size(); ++i) {
+    ObjectInfo info;
+    info.name = infos[i]->name.get();
+    info.size = infos[i]->size;
+    info.create_time = infos[i]->create_time;
+    info.construct_delta = infos[i]->construct_delta;
+    info.creator_id = infos[i]->creator_id;
+    objects->push_back(info);
+  }
+}
+
+}
diff --git a/src/plasma/service/plasma.mojom b/src/plasma/service/plasma.mojom
new file mode 100644
index 0000000..dc00a73
--- /dev/null
+++ b/src/plasma/service/plasma.mojom
@@ -0,0 +1,22 @@
+module plasma.service;
+
+struct ObjectInfo {
+  string name;
+  uint64 size;
+  int64 create_time;
+  int64 construct_delta;
+  int64 creator_id;
+};
+
+[ServiceName="plasma::service::Plasma"]
+interface Plasma {
+  CreateObject(int64 object_id, uint64 size, string name, int64 creator_id)
+    => (handle<shared_buffer> buffer);
+  ResizeObject(int64 object_id, uint64 new_size)
+    => (handle<shared_buffer> buffer);
+  SealObject(int64 object_id);
+  GetObject(int64 object_id, bool block)
+    => (handle<shared_buffer> buffer, uint64 size);
+  ListObjects()
+    => (array<ObjectInfo> info);
+};
diff --git a/src/plasma/service/plasma_interface.cc b/src/plasma/service/plasma_interface.cc
new file mode 100644
index 0000000..54b2600
--- /dev/null
+++ b/src/plasma/service/plasma_interface.cc
@@ -0,0 +1,16 @@
+#include "plasma_interface.h"
+
+namespace plasma {
+
+PlasmaInterface::PlasmaInterface(const std::string& address) {
+    context_.ConnectToShell(std::string("mojo:plasma"), std::string(address));
+    interface_ = context_.GetInterface();
+}
+
+PlasmaInterface::~PlasmaInterface() {}
+
+mojo::SynchronousInterfacePtr<plasma::service::Plasma>& PlasmaInterface::get() {
+  return interface_;
+}
+
+}
diff --git a/src/plasma/service/plasma_interface.h b/src/plasma/service/plasma_interface.h
new file mode 100644
index 0000000..d832f65
--- /dev/null
+++ b/src/plasma/service/plasma_interface.h
@@ -0,0 +1,27 @@
+#ifndef PLASMA_INTERFACE_H_
+#define PLASMA_INTERFACE_H_
+
+#include <string>
+#include "mojo/public/cpp/bindings/synchronous_interface_ptr.h"
+#include "plasma/service/plasma.mojom-sync.h"
+#include "ray/client/client_context.h"
+
+namespace plasma {
+
+/*! This class holds all the fields necessary to interact with the Plasma
+    service. They are collected here so that the public API headers do not
+    depend on Mojo (this is the "pointer to implementation" pattern).
+*/
+class PlasmaInterface {
+ public:
+  PlasmaInterface(const std::string& address);
+  ~PlasmaInterface();
+  mojo::SynchronousInterfacePtr<plasma::service::Plasma>& get();
+ private:
+  mojo::SynchronousInterfacePtr<plasma::service::Plasma> interface_;
+  shell::ClientContext<plasma::service::Plasma> context_;
+};
+
+}
+
+#endif
diff --git a/src/plasma/service/server.cc b/src/plasma/service/server.cc
new file mode 100644
index 0000000..e5417a1
--- /dev/null
+++ b/src/plasma/service/server.cc
@@ -0,0 +1,186 @@
+#include <unordered_map>
+#include "base/time/time.h"
+#include "plasma/service/plasma.mojom.h"
+#include "mojo/common/binding_set.h"
+#include "mojo/public/c/system/main.h"
+#include "mojo/public/cpp/application/application_impl_base.h"
+#include "mojo/public/cpp/application/run_application.h"
+#include "mojo/public/cpp/application/service_provider_impl.h"
+#include "mojo/public/cpp/system/buffer.h"
+#include "mojo/public/cpp/system/macros.h"
+
+using plasma::service::Plasma;
+
+namespace plasma {
+
+namespace service {
+
+/*! An entry in the hash table of objects stored in the local object store.
+*/
+class PlasmaEntry {
+public:
+  //! Handle to the shared memory buffer where the object is stored
+  mojo::ScopedSharedBufferHandle handle;
+  //! ObjectInfo (see plasma.mojom)
+  ObjectInfoPtr object_info;
+};
+
+/*! Implementation of the Plasma service interface. This implementation is
+    single threaded, which means we do not have to lock the datastructures.
+*/
+class PlasmaImpl : public Plasma {
+ public:
+  /*! Creates a new object..
+
+      \param object_id
+        Unique identifier of the object to be build
+
+      \param size
+        Initial number of bytes to be allocated for the object
+
+      \param name
+        User defined name of the object
+
+      \return
+        Shared memory handle to the read-write memory of the object
+  */
+  void CreateObject(int64 object_id, uint64 size, const mojo::String& name, int64 creator_id,
+                   const CreateObjectCallback& callback) override {
+    mojo::ScopedSharedBufferHandle handle;
+    CHECK_EQ(MOJO_RESULT_OK, mojo::CreateSharedBuffer(nullptr, size, &handle));
+    DCHECK(handle.is_valid());
+    mojo::ScopedSharedBufferHandle handle_copy;
+    mojo::DuplicateBuffer(handle.get(), nullptr, &handle_copy);
+    DCHECK(handle_copy.is_valid());
+    // Set object info
+    auto object_info = ObjectInfo::New();
+    object_info->name = std::string(name.get());
+    object_info->size = size;
+    object_info->create_time = base::TimeTicks::Now().ToInternalValue(); // TODO(pcm): Check this
+    object_info->construct_delta = -1;
+    object_info->creator_id = creator_id;
+    open_objects_.emplace(object_id, PlasmaEntry {handle.Pass(), object_info.Pass()});
+    callback.Run(handle_copy.Pass());
+  }
+
+  void ResizeObject(int64 object_id, uint64 new_size,
+                    const ResizeObjectCallback& callback) override {
+    mojo::ScopedSharedBufferHandle handle;
+    CHECK_EQ(MOJO_RESULT_OK, mojo::CreateSharedBuffer(nullptr, new_size, &handle));
+    CHECK(false);
+  }
+
+  /*! Pass a sealed object to a client that has been waiting.
+  */
+  void pass_sealed_object(int64 object_id, const GetObjectCallback& callback) {
+    mojo::ScopedSharedBufferHandle handle;
+    const PlasmaEntry& object = sealed_objects_[object_id];
+    mojo::DuplicateBuffer(object.handle.get(), nullptr, &handle);
+    DCHECK(handle.is_valid());
+    callback.Run(handle.Pass(), object.object_info->size);
+  }
+
+  /*! Seal an object, making it immutable.
+
+      \param object_id
+        Unique identifier of the object to be sealed
+  */
+  void SealObject(int64 object_id) override {
+    // TODO(pcm): Check this
+    open_objects_[object_id].object_info->construct_delta =
+      base::TimeTicks::Now().ToInternalValue() -
+        open_objects_[object_id].object_info->create_time;
+    sealed_objects_[object_id] = std::move(open_objects_[object_id]);
+    open_objects_.erase(object_id);
+    for (auto elem : objects_notify_[object_id]) {
+      pass_sealed_object(object_id, elem);
+    }
+    objects_notify_[object_id].clear();
+  }
+
+  /*! Get an object from the object store.
+
+      \param object_id
+        Unique identifier of the object that shall be returned
+
+      \param block
+        If true, this call will block until the object becomes available.
+        Otherwise, if the object is not in the object store, an error will
+        be raised.
+
+      \return
+        Handle to the object and size of the object in bytes
+  */
+  void GetObject(int64 object_id, bool block,
+                 const GetObjectCallback& callback) override {
+    auto entry = sealed_objects_.find(object_id);
+    if (entry == sealed_objects_.end()) {
+      objects_notify_[object_id].push_back(callback);
+    } else {
+      pass_sealed_object(object_id, callback);
+    }
+  }
+
+  /*! List objects from the object store.
+
+      \return
+        A list of ObjectInfoData objects that describe the objects in the store.
+  */
+  void ListObjects(const ListObjectsCallback& callback) override {
+    auto object_info = mojo::Array<ObjectInfoPtr>::New(0);
+    for (const auto & entry : sealed_objects_) {
+      object_info.push_back(entry.second.object_info->Clone());
+    }
+    for (const auto & entry : open_objects_) {
+      object_info.push_back(entry.second.object_info->Clone());
+    }
+    callback.Run(object_info.Pass());
+  }
+
+ private:
+  //! Hash table of objects that have already been sealed
+  std::unordered_map<int64_t, PlasmaEntry> sealed_objects_;
+  //! Hash table of objects that are under construction
+  std::unordered_map<int64_t, PlasmaEntry> open_objects_;
+  //! Requests for objects that have not been sealed yet. For each object,
+  //! we store a list of callbacks that will be used to pass the object
+  //! to the client, one for each client waiting for the object.
+  std::unordered_map<int64_t, std::vector<GetObjectCallback>> objects_notify_;
+};
+
+/*! Implementation of the Plasma server. This follows the "SingletonServer"
+    pattern in examples/echo/echo_server.cc (see documentation there).
+    This means that the object store is shared between all the clients
+    running on a given node.
+*/
+class PlasmaServerApp : public mojo::ApplicationImplBase {
+ public:
+  PlasmaServerApp() {}
+  ~PlasmaServerApp() override {}
+
+  /*! Accept a new connection from a client.
+  */
+  bool OnAcceptConnection(
+      mojo::ServiceProviderImpl* service_provider_impl) override {
+    service_provider_impl->AddService<Plasma>(
+        [this](const mojo::ConnectionContext& connection_context,
+           mojo::InterfaceRequest<Plasma> plasma_request) {
+          bindings_.AddBinding(&plasma_impl_, plasma_request.Pass());
+        });
+    return true;
+  }
+
+ private:
+  PlasmaImpl plasma_impl_;
+
+  mojo::BindingSet<Plasma> bindings_;
+};
+
+} // namespace service
+
+}  // namespace plasma
+
+MojoResult MojoMain(MojoHandle application_request) {
+  plasma::service::PlasmaServerApp plasma_server_app;
+  return mojo::RunApplication(application_request, &plasma_server_app);
+}
diff --git a/src/ray/BUILD.gn b/src/ray/BUILD.gn
new file mode 100644
index 0000000..c226c42
--- /dev/null
+++ b/src/ray/BUILD.gn
@@ -0,0 +1,6 @@
+group("ray") {
+  deps = [
+    "//ray/app",
+    "//ray/client",
+  ]
+}
diff --git a/src/ray/app/BUILD.gn b/src/ray/app/BUILD.gn
new file mode 100644
index 0000000..b061e80
--- /dev/null
+++ b/src/ray/app/BUILD.gn
@@ -0,0 +1,24 @@
+import("//mojo/public/mojo_application.gni")
+import("//mojo/public/tools/bindings/mojom.gni")
+
+mojo_native_application("app") {
+  output_name = "ray_node_app"
+
+  deps = [
+    ":bindings",
+    "//base",
+    "//mojo/public/cpp/application:standalone",
+    "//mojo/public/cpp/bindings",
+    "//mojo/public/cpp/utility",
+  ]
+
+  sources = [
+    "ray_node_app.cc",
+  ]
+}
+
+mojom("bindings") {
+  sources = [
+    "../client/client.mojom"
+  ]
+}
diff --git a/src/ray/app/ray_node_app.cc b/src/ray/app/ray_node_app.cc
new file mode 100644
index 0000000..d708576
--- /dev/null
+++ b/src/ray/app/ray_node_app.cc
@@ -0,0 +1,39 @@
+#include <sstream>
+#include "base/logging.h"
+#include "mojo/public/c/system/main.h"
+#include "mojo/public/cpp/application/application_impl_base.h"
+#include "mojo/public/cpp/application/connect.h"
+#include "mojo/public/cpp/application/run_application.h"
+#include "mojo/public/cpp/utility/run_loop.h"
+
+#include "ray/client/client.mojom.h"
+
+namespace ray {
+
+/*! This is the application that runs on each Ray node and
+    establishes connections to clients on that node. For now, we
+    allow an arbitrary number of Python processes to be connected
+    from the outside.
+*/
+class RayNodeApp : public mojo::ApplicationImplBase {
+ public:
+  void OnInitialize() override {
+    for (size_t i = 0; i < 2; ++i) {
+      size_t index = clients_.size();
+      clients_.emplace_back();
+      std::stringstream stream;
+      stream << "mojo:worker{" << index << "}";
+      LOG(INFO) << "Starting " << stream.str() << std::endl;
+      ConnectToService(shell(), stream.str(), mojo::GetProxy(&clients_[index]));
+    }
+  }
+ private:
+  std::vector<ClientPtr> clients_;
+};
+
+}
+
+MojoResult MojoMain(MojoHandle application_request) {
+  ray::RayNodeApp ray_node_app;
+  return mojo::RunApplication(application_request, &ray_node_app);
+}
diff --git a/src/ray/client/BUILD.gn b/src/ray/client/BUILD.gn
new file mode 100644
index 0000000..0bffa6d
--- /dev/null
+++ b/src/ray/client/BUILD.gn
@@ -0,0 +1,22 @@
+static_library("client") {
+  output_name = "client"
+
+  deps = [
+    "//shell:common_lib",
+    "//shell:child_controller_bindings",
+    "//shell:native_application_support",
+    "//base",
+    "//base/allocator",
+    "//build/config/sanitizers:deps",
+    "//mojo/edk/base_edk",
+    "//mojo/edk/system",
+    "//mojo/environment:chromium",
+    "//mojo/message_pump",
+    "//mojo/application",
+  ]
+
+  sources = [
+    "client_context.cc",
+    "exchange_file_descriptor.cc",
+  ]
+}
diff --git a/src/ray/client/client.mojom b/src/ray/client/client.mojom
new file mode 100644
index 0000000..7427025
--- /dev/null
+++ b/src/ray/client/client.mojom
@@ -0,0 +1,6 @@
+module ray;
+
+[ServiceName="ray::Client"]
+interface Client {
+  // TODO(pcm): Add a method to shut down the client here
+};
diff --git a/src/ray/client/client_context.cc b/src/ray/client/client_context.cc
new file mode 100644
index 0000000..d3da502
--- /dev/null
+++ b/src/ray/client/client_context.cc
@@ -0,0 +1,124 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "client_context.h"
+
+namespace shell {
+
+Blocker::Blocker() : event_(true, false) {}
+
+Blocker::~Blocker() {}
+
+AppContext::AppContext()
+    : io_thread_("io_thread"), controller_thread_("controller_thread") {}
+
+AppContext::~AppContext() {}
+
+void AppContext::Init(ScopedPlatformHandle platform_handle) {
+  // Initialize Mojo before starting any threads.
+  mojo::embedder::Init(mojo::embedder::CreateSimplePlatformSupport());
+
+  // Create and start our I/O thread.
+  base::Thread::Options io_thread_options(base::MessageLoop::TYPE_IO, 0);
+  CHECK(io_thread_.StartWithOptions(io_thread_options));
+  io_runner_ = MakeRefCounted<base_edk::PlatformTaskRunnerImpl>(
+      io_thread_.task_runner());
+  CHECK(io_runner_);
+  io_watcher_ = MakeUnique<base_edk::PlatformHandleWatcherImpl>(
+      static_cast<base::MessageLoopForIO*>(io_thread_.message_loop()));
+
+  // Create and start our controller thread.
+  base::Thread::Options controller_thread_options;
+  controller_thread_options.message_loop_type =
+      base::MessageLoop::TYPE_CUSTOM;
+  controller_thread_options.message_pump_factory =
+      base::Bind(&mojo::common::MessagePumpMojo::Create);
+  CHECK(controller_thread_.StartWithOptions(controller_thread_options));
+  controller_runner_ = MakeRefCounted<base_edk::PlatformTaskRunnerImpl>(
+      controller_thread_.task_runner());
+  CHECK(controller_runner_);
+
+  mojo::embedder::InitIPCSupport(
+      mojo::embedder::ProcessType::SLAVE, controller_runner_.Clone(), this,
+      io_runner_.Clone(), io_watcher_.get(), platform_handle.Pass());
+}
+
+void AppContext::Shutdown() {
+  Blocker blocker;
+  shutdown_unblocker_ = blocker.GetUnblocker();
+  controller_runner_->PostTask([this]() { ShutdownOnControllerThread(); });
+  blocker.Block();
+}
+
+void AppContext::OnShutdownComplete() {
+  shutdown_unblocker_.Unblock(base::Closure());
+}
+
+void AppContext::OnMasterDisconnect() {
+  // We've lost the connection to the master process. This is not recoverable.
+  LOG(ERROR) << "Disconnected from master";
+  _exit(1);
+}
+
+ChildControllerImpl::ChildControllerImpl(AppContext* app_context,
+        const Blocker::Unblocker& unblocker,
+        AppInitializer app_initializer)
+    : app_context_(app_context),
+      unblocker_(unblocker),
+      mojo_task_runner_(MakeRefCounted<base_edk::PlatformTaskRunnerImpl>(
+          base::ThreadTaskRunnerHandle::Get())),
+      app_initializer_(app_initializer),
+      channel_info_(nullptr),
+      binding_(this) {
+  binding_.set_connection_error_handler([this]() { OnConnectionError(); });
+}
+
+ChildControllerImpl::~ChildControllerImpl() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // TODO(vtl): Pass in the result from |MainMain()|.
+  on_app_complete_.Run(MOJO_RESULT_UNIMPLEMENTED);
+}
+
+void ChildControllerImpl::Init(AppContext* app_context,
+        const std::string& child_connection_id,
+        const Blocker::Unblocker& unblocker,
+        AppInitializer app_initializer) {
+  DCHECK(app_context);
+  DCHECK(!app_context->controller());
+
+  scoped_ptr<ChildControllerImpl> impl(
+      new ChildControllerImpl(app_context, unblocker, app_initializer));
+  // TODO(vtl): With C++14 lambda captures, we'll be able to avoid this
+  // silliness.
+  auto raw_impl = impl.get();
+  mojo::ScopedMessagePipeHandle host_message_pipe(
+      mojo::embedder::ConnectToMaster(child_connection_id, [raw_impl]() {
+        raw_impl->DidConnectToMaster();
+      }, impl->mojo_task_runner_.Clone(), &impl->channel_info_));
+  DCHECK(impl->channel_info_);
+  impl->Bind(host_message_pipe.Pass());
+
+  app_context->set_controller(impl.Pass());
+}
+
+void ChildControllerImpl::StartApp(const mojo::String& app_path,
+        mojo::InterfaceRequest<mojo::Application> application_request,
+        const StartAppCallback& on_app_complete) {
+  DVLOG(2) << "ChildControllerImpl::StartApp(" << app_path << ", ...)";
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  on_app_complete_ = on_app_complete;
+  unblocker_.Unblock(base::Bind(&ChildControllerImpl::StartAppOnMainThread,
+                                base::FilePath::FromUTF8Unsafe(app_path),
+                                base::Passed(&application_request),
+                                app_initializer_));
+}
+
+void ChildControllerImpl::ExitNow(int32_t exit_code) {
+  DVLOG(2) << "ChildControllerImpl::ExitNow(" << exit_code << ")";
+  _exit(exit_code);
+}
+
+} // namespace shell
diff --git a/src/ray/client/client_context.h b/src/ray/client/client_context.h
new file mode 100644
index 0000000..e87a57d
--- /dev/null
+++ b/src/ray/client/client_context.h
@@ -0,0 +1,342 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+#include "base/callback_helpers.h"
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/posix/global_descriptors.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "mojo/edk/base_edk/platform_handle_watcher_impl.h"
+#include "mojo/edk/base_edk/platform_task_runner_impl.h"
+#include "mojo/edk/embedder/embedder.h"
+#include "mojo/edk/embedder/multiprocess_embedder.h"
+#include "mojo/edk/embedder/simple_platform_support.h"
+#include "mojo/edk/embedder/slave_process_delegate.h"
+#include "mojo/edk/platform/platform_handle.h"
+#include "mojo/edk/platform/platform_handle_watcher.h"
+#include "mojo/edk/platform/platform_pipe.h"
+#include "mojo/edk/platform/scoped_platform_handle.h"
+#include "mojo/edk/platform/task_runner.h"
+#include "mojo/edk/util/make_unique.h"
+#include "mojo/edk/util/ref_ptr.h"
+#include "mojo/message_pump/message_pump_mojo.h"
+#include "mojo/public/cpp/bindings/binding.h"
+#include "mojo/public/cpp/system/message_pipe.h"
+#include "shell/child_controller.mojom.h"
+#include "shell/child_switches.h"
+#include "shell/init.h"
+#include "shell/native_application_support.h"
+
+#include "exchange_file_descriptor.h"
+#include "examples/echo/echo.mojom-sync.h"
+#include "mojo/public/cpp/application/application_impl_base.h"
+#include "mojo/public/cpp/application/connect.h"
+#include "mojo/public/cpp/bindings/synchronous_interface_ptr.h"
+
+using mojo::platform::PlatformHandle;
+using mojo::platform::PlatformHandleWatcher;
+using mojo::platform::ScopedPlatformHandle;
+using mojo::platform::TaskRunner;
+using mojo::util::MakeRefCounted;
+using mojo::util::MakeUnique;
+using mojo::util::RefPtr;
+
+namespace shell {
+
+/*! The ServiceConnectionApp runs in a separate thread in the client and
+    maintains a connection to the shell. It allows the client to connect
+    synchronously to services, one service per ServiceConnectionApp.
+    It allows the client to get InterfaceHandles to these services. These
+    handles can be transfered to any client thread.
+*/
+template<typename Service>
+class ServiceConnectionApp : public mojo::ApplicationImplBase {
+ public:
+  /*! Construct a new ServiceConnectionApp that will connect to a service.
+
+      \param service_name
+        The name of the service we want to connect to
+
+      \param notify_caller
+        Condition that will be triggered to notify
+        the calling thread that the connection to the service is established
+
+      \param service_handle
+        A pointer to an InterfaceHandle that is
+        owned by the calling thread
+  */
+  ServiceConnectionApp(const std::string& service_name,
+                       std::condition_variable* notify_caller,
+                       mojo::InterfaceHandle<Service>* service_handle)
+      : service_name_(service_name), notify_caller_(notify_caller),
+        service_handle_(service_handle) {}
+
+  void OnInitialize() override {
+    mojo::SynchronousInterfacePtr<Service> service;
+    mojo::ConnectToService(shell(), service_name_,
+                           mojo::GetSynchronousProxy(&service));
+    // pass handle to calling thread
+    *service_handle_ = service.PassInterfaceHandle();
+    notify_caller_->notify_all();
+    notify_caller_ = NULL;
+  }
+ private:
+  std::string service_name_;
+  std::condition_variable* notify_caller_;
+  mojo::InterfaceHandle<Service>* service_handle_;
+};
+
+// Blocker ---------------------------------------------------------------------
+
+// Blocks a thread until another thread unblocks it, at which point it unblocks
+// and runs a closure provided by that thread.
+class Blocker {
+ public:
+  class Unblocker {
+   public:
+    explicit Unblocker(Blocker* blocker = nullptr) : blocker_(blocker) {}
+    ~Unblocker() {}
+
+    void Unblock(base::Closure run_after) {
+      DCHECK(blocker_);
+      DCHECK(blocker_->run_after_.is_null());
+      blocker_->run_after_ = run_after;
+      blocker_->event_.Signal();
+      blocker_ = nullptr;
+    }
+
+   private:
+    Blocker* blocker_;
+
+    // Copy and assign allowed.
+  };
+
+  Blocker();
+  ~Blocker();
+
+  void Block() {
+    DCHECK(run_after_.is_null());
+    event_.Wait();
+    if (!run_after_.is_null())
+      run_after_.Run();
+  }
+
+  Unblocker GetUnblocker() { return Unblocker(this); }
+
+ private:
+  base::WaitableEvent event_;
+  base::Closure run_after_;
+
+  DISALLOW_COPY_AND_ASSIGN(Blocker);
+};
+
+// AppContext ------------------------------------------------------------------
+
+class ChildControllerImpl;
+
+// Should be created and initialized on the main thread.
+class AppContext : public mojo::embedder::SlaveProcessDelegate {
+ public:
+  AppContext();
+  ~AppContext() override;
+
+  void Init(ScopedPlatformHandle platform_handle);
+
+  void Shutdown();
+
+  const RefPtr<TaskRunner>& controller_runner() const {
+    return controller_runner_;
+  }
+
+  ChildControllerImpl* controller() const { return controller_.get(); }
+
+  void set_controller(scoped_ptr<ChildControllerImpl> controller) {
+    controller_ = controller.Pass();
+  }
+
+ private:
+  void ShutdownOnControllerThread() {
+    // First, destroy the controller.
+    controller_.reset();
+
+    // Next shutdown IPC. We'll unblock the main thread in OnShutdownComplete().
+    mojo::embedder::ShutdownIPCSupport();
+  }
+
+  // SlaveProcessDelegate implementation.
+  void OnShutdownComplete() override;
+
+  void OnMasterDisconnect() override;
+
+  base::Thread io_thread_;
+  RefPtr<TaskRunner> io_runner_;
+  std::unique_ptr<PlatformHandleWatcher> io_watcher_;
+
+  base::Thread controller_thread_;
+  RefPtr<TaskRunner> controller_runner_;
+
+  // Accessed only on the controller thread.
+  scoped_ptr<ChildControllerImpl> controller_;
+
+  // Used to unblock the main thread on shutdown.
+  Blocker::Unblocker shutdown_unblocker_;
+
+  DISALLOW_COPY_AND_ASSIGN(AppContext);
+};
+
+// ChildControllerImpl ---------------------------------------------------------
+
+typedef std::function<void(mojo::InterfaceRequest<mojo::Application>)>
+  AppInitializer;
+
+class ChildControllerImpl : public ChildController {
+ public:
+  ~ChildControllerImpl() override;
+
+  // To be executed on the controller thread. Creates the |ChildController|,
+  // etc.
+  static void Init(AppContext* app_context,
+                   const std::string& child_connection_id,
+                   const Blocker::Unblocker& unblocker,
+                   AppInitializer app_initializer);
+
+  void Bind(mojo::ScopedMessagePipeHandle handle) {
+    binding_.Bind(handle.Pass());
+  }
+
+  // |ChildController| methods:
+  void StartApp(const mojo::String& app_path,
+                mojo::InterfaceRequest<mojo::Application> application_request,
+                const StartAppCallback& on_app_complete) override;
+
+  void ExitNow(int32_t exit_code) override;
+
+ private:
+  ChildControllerImpl(AppContext* app_context,
+                      const Blocker::Unblocker& unblocker,
+                      AppInitializer app_initializer);
+
+  void OnConnectionError() {
+    // A connection error means the connection to the shell is lost. This is not
+    // recoverable.
+    LOG(ERROR) << "Connection error to the shell";
+    _exit(1);
+  }
+
+  // Callback for |mojo::embedder::ConnectToMaster()|.
+  void DidConnectToMaster() {
+    DVLOG(2) << "ChildControllerImpl::DidCreateChannel()";
+    DCHECK(thread_checker_.CalledOnValidThread());
+  }
+
+  static void StartAppOnMainThread(
+      const base::FilePath& app_path,
+      mojo::InterfaceRequest<mojo::Application> application_request,
+      AppInitializer app_initializer) {
+    // TODO(vtl): This is copied from in_process_native_runner.cc.
+    DVLOG(2) << "Loading/running Mojo app from " << app_path.value()
+             << " out of process";
+
+    app_initializer(application_request.Pass());
+  }
+
+  base::ThreadChecker thread_checker_;
+  AppContext* const app_context_;
+  Blocker::Unblocker unblocker_;
+  RefPtr<TaskRunner> mojo_task_runner_;
+  StartAppCallback on_app_complete_;
+  AppInitializer app_initializer_;
+
+  mojo::embedder::ChannelInfo* channel_info_;
+  mojo::Binding<ChildController> binding_;
+
+  DISALLOW_COPY_AND_ASSIGN(ChildControllerImpl);
+};
+
+/*! The ClientContext is used to connect a client to a Ray service.
+    The "Service" template parameter is the service class generated by
+    mojom for this service (for example mojo::examples::Echo for the
+    Mojo "echo" example).
+*/
+template<typename Service>
+class ClientContext {
+ public:
+  ClientContext()
+    : at_exit_(mojo::util::MakeUnique<base::AtExitManager>()) {}
+  ~ClientContext() {
+    at_exit_.reset();
+  }
+  /*! Connect this client context to the Ray shell.
+
+      \param service_name
+        The name of the service you want to connect to
+
+      \param address
+        The address to the shell socket
+  */
+  void ConnectToShell(const std::string& service_name,
+                      const std::string& address) {
+    std::condition_variable app_started; // signal when app was started
+    std::mutex app_started_mutex; // lock for above condition
+    std::thread thread([service_name, address, &app_started, this]() {
+      std::string child_connection_id;
+      ray::FileDescriptorReceiver receiver(address);
+      int fd = receiver.Receive(child_connection_id); // file descriptor to bootstrap the IPC from
+
+      mojo::platform::ScopedPlatformHandle platform_handle((mojo::platform::PlatformHandle(fd)));
+      shell::AppContext app_context;
+      app_context.Init(platform_handle.Pass());
+
+      shell::Blocker blocker;
+      // TODO(vtl): With C++14 lambda captures, this can be made nicer.
+      const shell::Blocker::Unblocker unblocker = blocker.GetUnblocker();
+      auto app_initializer = [service_name, &app_started, this](mojo::InterfaceRequest<mojo::Application> request) {
+        shell::ServiceConnectionApp<Service> connector_app(service_name, &app_started, &service_handle_);
+        base::MessageLoop loop((mojo::common::MessagePumpMojo::Create()));
+        connector_app.Bind(request.Pass());
+        loop.Run();
+      };
+      app_context.controller_runner()->PostTask(
+        [app_initializer, &app_context, &child_connection_id, &unblocker]() {
+          shell::ChildControllerImpl::Init(&app_context, child_connection_id,
+                                           unblocker, app_initializer);
+        });
+      // This will block, then run whatever the controller wants.
+      blocker.Block();
+
+      app_context.Shutdown();
+    });
+    std::unique_lock<std::mutex> lock(app_started_mutex);
+    app_started.wait(lock);
+    connection_thread_ = std::move(thread);
+  }
+  /*! Get the Mojo Interface pointer for this connection.
+  */
+  mojo::SynchronousInterfacePtr<Service> GetInterface() {
+    return mojo::SynchronousInterfacePtr<Service>::Create(service_handle_.Pass());
+  }
+ private:
+  std::unique_ptr<base::AtExitManager> at_exit_;
+  std::thread connection_thread_;
+  mojo::InterfaceHandle<Service> service_handle_;
+};
+
+}  // namespace shell
diff --git a/src/ray/client/exchange_file_descriptor.cc b/src/ray/client/exchange_file_descriptor.cc
new file mode 100644
index 0000000..f1b9a9b
--- /dev/null
+++ b/src/ray/client/exchange_file_descriptor.cc
@@ -0,0 +1,127 @@
+#include "exchange_file_descriptor.h"
+
+#include <sys/socket.h>
+#include <stdlib.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <unistd.h>
+#include "base/logging.h"
+
+namespace ray {
+
+const size_t MAX_PAYLOAD_SIZE = 1024;
+
+FileDescriptorSender::FileDescriptorSender(const std::string& address) {
+  socket_ = socket(PF_UNIX, SOCK_STREAM, 0);
+  CHECK(socket_ != -1) << "error creating socket";
+  struct sockaddr_un addr;
+  memset(&addr, 0, sizeof(struct sockaddr_un));
+  addr.sun_family = AF_LOCAL;
+  strncpy(addr.sun_path, address.c_str(), sizeof(addr.sun_path));
+  addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
+  unlink(addr.sun_path);
+  size_t len = strlen(addr.sun_path) + sizeof(addr.sun_family);
+  CHECK(bind(socket_, (struct sockaddr *)&addr, len) != -1) << "error binding socket";
+  CHECK(listen(socket_, 5) != -1) << "error listening on socket";
+}
+
+FileDescriptorSender::~FileDescriptorSender() {
+  close(socket_);
+}
+
+static void init_msg(struct msghdr *msg, struct iovec *iov, char *buf, size_t buf_len) {
+  iov->iov_base = buf;
+  iov->iov_len = 1;
+
+  msg->msg_iov = iov;
+  msg->msg_iovlen = 1;
+  msg->msg_control = buf;
+  msg->msg_controllen = buf_len;
+  msg->msg_name = NULL;
+  msg->msg_namelen = 0;
+}
+
+bool FileDescriptorSender::Send(int file_descriptor, const std::string& payload) {
+  struct sockaddr_in addr;
+	socklen_t len = sizeof(addr);
+	int s = accept(socket_, (struct sockaddr *)&addr, &len);
+
+  struct msghdr msg;
+  struct iovec iov;
+  char buf[CMSG_SPACE(sizeof(int))];
+
+  init_msg(&msg, &iov, buf, sizeof(buf));
+
+  struct cmsghdr *header = CMSG_FIRSTHDR(&msg);
+  header->cmsg_level = SOL_SOCKET;
+  header->cmsg_type = SCM_RIGHTS;
+  header->cmsg_len = CMSG_LEN(sizeof(int));
+  *(int *)CMSG_DATA(header) = file_descriptor;
+
+  DCHECK(payload.size() < MAX_PAYLOAD_SIZE);
+
+  // send file descriptor and payload
+  return sendmsg(s, &msg, 0) != -1 &&
+    send(s, payload.data(), payload.size(), 0) == -1;
+}
+
+FileDescriptorReceiver::FileDescriptorReceiver(const std::string& address) {
+  socket_ = socket(PF_UNIX, SOCK_STREAM, 0);
+  CHECK(socket_ != -1) << "error creating socket";
+  struct sockaddr_un addr;
+  addr.sun_family = AF_LOCAL;
+  strncpy(addr.sun_path, address.c_str(), sizeof(addr.sun_path));
+  addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
+  int r = connect(socket_, (struct sockaddr *)&addr, sizeof(addr));
+  CHECK(r != -1) << "error connecting to socket";
+}
+
+FileDescriptorReceiver::~FileDescriptorReceiver() {
+  close(socket_);
+}
+
+int FileDescriptorReceiver::Receive(std::string& payload) {
+  struct msghdr msg;
+  struct iovec iov;
+  char buf[CMSG_SPACE(sizeof(int))];
+  init_msg(&msg, &iov, buf, sizeof(buf));
+
+  if (recvmsg(socket_, &msg, 0) == -1)
+    return -1;
+
+  int found_fd = -1;
+  bool oh_noes = false;
+  for (struct cmsghdr *header = CMSG_FIRSTHDR(&msg); header != NULL; header = CMSG_NXTHDR(&msg, header))
+    if (header->cmsg_level == SOL_SOCKET && header->cmsg_type == SCM_RIGHTS) {
+      int count = (header->cmsg_len - (CMSG_DATA(header) - (unsigned char *)header)) / sizeof(int);
+      for (int i = 0; i < count; ++i) {
+        int fd = ((int *)CMSG_DATA(header))[i];
+        if (found_fd == -1) {
+          found_fd = fd;
+        } else {
+          close(fd);
+          oh_noes = true;
+        }
+      }
+    }
+
+  // The sender sent us more than one file descriptor. We've closed
+  // them all to prevent fd leaks but notify the caller that we got
+  // a bad message.
+  if (oh_noes) {
+    close(found_fd);
+    errno = EBADMSG;
+    return -1;
+  }
+
+  char reply[MAX_PAYLOAD_SIZE];
+  ssize_t len = recv(socket_, reply, MAX_PAYLOAD_SIZE, 0);
+  if (len < 0) {
+    return -1;
+  }
+
+  payload += std::string(reply, len);
+  return found_fd;
+}
+
+} // namespace ray
diff --git a/src/ray/client/exchange_file_descriptor.h b/src/ray/client/exchange_file_descriptor.h
new file mode 100644
index 0000000..ea8f3f8
--- /dev/null
+++ b/src/ray/client/exchange_file_descriptor.h
@@ -0,0 +1,64 @@
+#ifndef RAYCLIENT_EXCHANGE_FILE_DESCRIPTOR_H
+#define RAYCLIENT_EXCHANGE_FILE_DESCRIPTOR_H
+
+#include <string>
+
+namespace ray {
+
+/*! Send a file descriptor of a process to another process. This is needed
+    because Mojo bootstraps the IPC communication between processes via a
+    file handle (this makes sure no artifacts like actual files remain on the
+    computer once the IPC has finished).
+*/
+class FileDescriptorSender {
+public:
+  /*! Initialize the FileDescriptorSender.
+
+      \param address
+        Address of the socket that is used to send the file descriptor
+  */
+  FileDescriptorSender(const std::string& address);
+  ~FileDescriptorSender();
+
+  /*! Send the file descriptor over the socket.
+
+      \param file_descriptor
+        The file descriptor that will be sent
+
+      \param payload
+        Additional payload that can be sent (< MAX_PAYLOAD_SIZE)
+
+      \return
+        Bool that indicates if the sending was successful
+  */
+  bool Send(int file_descriptor, const std::string& payload);
+private:
+  int socket_;
+};
+
+/*! Receive a file descriptor from another process. This is needed
+    because Mojo bootstraps the IPC communication between processes via a
+    file handle (to make sure no artifacts like actual files remain on the
+    computer once the IPC has finished).
+*/
+class FileDescriptorReceiver {
+public:
+  FileDescriptorReceiver(const std::string& address);
+  ~FileDescriptorReceiver();
+
+  /*! Receive file descriptor from the socket.
+
+      \param payload
+        The payload carried by this receive will be appended to this string
+
+      \return
+        The file descriptor that was sent or -1 if not successful.
+  */
+  int Receive(std::string& payload);
+private:
+  int socket_;
+};
+
+} // namespace ray
+
+#endif
diff --git a/src/shell/BUILD.gn b/src/shell/BUILD.gn
index 26bafc9..c93c2fc 100644
--- a/src/shell/BUILD.gn
+++ b/src/shell/BUILD.gn
@@ -175,6 +175,7 @@ source_set("parent_lib") {
     "background_application_loader.h",
     "child_process_host.cc",
     "child_process_host.h",
+    "../ray/client/exchange_file_descriptor.cc",
     "command_line_util.cc",
     "command_line_util.h",
     "context.cc",
diff --git a/src/shell/application_manager/application_manager.cc b/src/shell/application_manager/application_manager.cc
index 2ee6c59..57aabfa 100644
--- a/src/shell/application_manager/application_manager.cc
+++ b/src/shell/application_manager/application_manager.cc
@@ -400,7 +400,14 @@ void ApplicationManager::RunNativeApplication(
 
   DCHECK(application_request.is_pending());
 
-  if (!path_exists) {
+  // TODO(pcm): We should eventually get rid of all modifications of //shell
+  // and use the mechanisms provided there (like ApplicationManager) to
+  // register custom handlers for ray client connections. Also, the line below
+  // is repeated in out_of_process_native_runner.cc. Let's do these things the
+  // next time we upgrade mojo.
+  bool connect_to_running_process =
+    path.BaseName().AsUTF8Unsafe().compare(0, 7, "worker{") == 0;
+  if (!connect_to_running_process && !path_exists) {
     LOG(ERROR) << "Library not started because library path '" << path.value()
                << "' does not exist.";
     return;
diff --git a/src/shell/child_process_host.cc b/src/shell/child_process_host.cc
index 7ce65f6..27b2202 100644
--- a/src/shell/child_process_host.cc
+++ b/src/shell/child_process_host.cc
@@ -27,6 +27,7 @@
 #include "shell/child_switches.h"
 #include "shell/context.h"
 #include "shell/task_runners.h"
+#include "ray/client/exchange_file_descriptor.h"
 
 using mojo::platform::PlatformPipe;
 using mojo::util::MakeRefCounted;
@@ -51,7 +52,8 @@ ChildProcessHost::~ChildProcessHost() {
   DCHECK(!child_process_.IsValid());
 }
 
-void ChildProcessHost::Start(const NativeApplicationOptions& options) {
+void ChildProcessHost::Start(const NativeApplicationOptions& options,
+    bool connect_to_running_process) {
   DCHECK(!child_process_.IsValid());
 
   scoped_ptr<LaunchData> launch_data(new LaunchData());
@@ -81,11 +83,19 @@ void ChildProcessHost::Start(const NativeApplicationOptions& options) {
   controller_.Bind(mojo::InterfaceHandle<ChildController>(handle.Pass(), 0u));
   controller_.set_connection_error_handler([this]() { OnConnectionError(); });
 
-  CHECK(base::PostTaskAndReplyWithResult(
-      context_->task_runners()->blocking_pool(), FROM_HERE,
-      base::Bind(&ChildProcessHost::DoLaunch, base::Unretained(this),
-                 base::Passed(&launch_data)),
-      base::Bind(&ChildProcessHost::DidStart, base::Unretained(this))));
+  if (connect_to_running_process) {
+    std::string address = base::CommandLine::ForCurrentProcess()
+        ->GetSwitchValueASCII("external-connection-address");
+    ray::FileDescriptorSender sender(address);
+    sender.Send(launch_data->platform_pipe.handle1.Pass().get().fd,
+                launch_data->child_connection_id);
+  } else {
+    CHECK(base::PostTaskAndReplyWithResult(
+        context_->task_runners()->blocking_pool(), FROM_HERE,
+        base::Bind(&ChildProcessHost::DoLaunch, base::Unretained(this),
+                   base::Passed(&launch_data)),
+        base::Bind(&ChildProcessHost::DidStart, base::Unretained(this))));
+  }
 }
 
 int ChildProcessHost::Join() {
diff --git a/src/shell/child_process_host.h b/src/shell/child_process_host.h
index 0017379..eec3fb6 100644
--- a/src/shell/child_process_host.h
+++ b/src/shell/child_process_host.h
@@ -40,7 +40,8 @@ class ChildProcessHost {
   // TODO(vtl): Consider using weak pointers and removing this requirement.
   // TODO(vtl): This should probably take a callback instead.
   // TODO(vtl): Consider merging this with |StartApp()|.
-  void Start(const NativeApplicationOptions& options);
+  void Start(const NativeApplicationOptions& options,
+             bool connect_to_running_process = false);
 
   // Waits for the child process to terminate, and returns its exit code.
   // Note: If |Start()| has been called, this must not be called until the
diff --git a/src/shell/out_of_process_native_runner.cc b/src/shell/out_of_process_native_runner.cc
index 02c5f8a..2cc53ad 100644
--- a/src/shell/out_of_process_native_runner.cc
+++ b/src/shell/out_of_process_native_runner.cc
@@ -51,7 +51,7 @@ OutOfProcessNativeRunner::OutOfProcessNativeRunner(
     : context_(context), options_(options) {}
 
 OutOfProcessNativeRunner::~OutOfProcessNativeRunner() {
-  if (child_process_host_) {
+  if (child_process_host_ && !connect_to_running_process_) {
     // TODO(vtl): Race condition: If |ChildProcessHost::DidStart()| hasn't been
     // called yet, we shouldn't call |Join()| here. (Until |DidStart()|, we may
     // not have a child process to wait on.) Probably we should fix |Join()|.
@@ -71,9 +71,12 @@ void OutOfProcessNativeRunner::Start(
   child_process_host_.reset(new ChildProcessHost(context_));
 
   NativeApplicationOptions options = options_;
-  if (Require32Bit(app_path))
+  if (false && Require32Bit(app_path))
     options.require_32_bit = true;
-  child_process_host_->Start(options);
+  // TODO(pcm): The line below is repeated in application_manager.cc
+  connect_to_running_process_ =
+    app_path.BaseName().AsUTF8Unsafe().compare(0, 7, "worker{") == 0;
+  child_process_host_->Start(options, connect_to_running_process_);
 
   // TODO(vtl): |app_path.AsUTF8Unsafe()| is unsafe.
   child_process_host_->StartApp(
@@ -85,7 +88,7 @@ void OutOfProcessNativeRunner::Start(
 void OutOfProcessNativeRunner::AppCompleted(int32_t result) {
   DVLOG(2) << "OutOfProcessNativeRunner::AppCompleted(" << result << ")";
 
-  if (child_process_host_) {
+  if (child_process_host_ && !connect_to_running_process_) {
     child_process_host_->Join();
     child_process_host_.reset();
   }
diff --git a/src/shell/out_of_process_native_runner.h b/src/shell/out_of_process_native_runner.h
index 6808be2..2eb5aaf 100644
--- a/src/shell/out_of_process_native_runner.h
+++ b/src/shell/out_of_process_native_runner.h
@@ -42,6 +42,8 @@ class OutOfProcessNativeRunner : public NativeRunner {
 
   scoped_ptr<ChildProcessHost> child_process_host_;
 
+  bool connect_to_running_process_;
+
   DISALLOW_COPY_AND_ASSIGN(OutOfProcessNativeRunner);
 };
 
diff --git a/src/shell/switches.cc b/src/shell/switches.cc
index 1998f3c..36c4bf1 100644
--- a/src/shell/switches.cc
+++ b/src/shell/switches.cc
@@ -70,9 +70,14 @@ const char kTraceStartupOutputName[] = "trace-startup-output-name";
 // the first maps 'a' to 'b' and the second 'c' to 'd'.
 const char kURLMappings[] = "url-mappings";
 
+// Specifies the socket address to use for connecting external processes to
+// the shell. Should be a valid socket address. If you change the value here,
+// you also need to change it in child_process_host.cc!
+const char kExternalConnectionAddress[] = "external-connection-address";
+
 // Switches valid for the main process (i.e., that the user may pass in).
 const char* const kSwitchArray[] = {
-    kArgsFor, kContentHandlers, kCPUProfile, kDisableCache,
+    kArgsFor, kContentHandlers, kCPUProfile, kDisableCache, kExternalConnectionAddress,
     kDontDeleteOnDownload, kEnableMultiprocess, kForceInProcess,
     kForceOfflineByDefault, kHelp, kMapOrigin, kOrigin, kTraceStartup,
     kTraceStartupDuration, kTraceStartupOutputName, kURLMappings,
diff --git a/src/shell/switches.h b/src/shell/switches.h
index 4d00f50..f4e0eb1 100644
--- a/src/shell/switches.h
+++ b/src/shell/switches.h
@@ -28,6 +28,7 @@ extern const char kTraceStartup[];
 extern const char kTraceStartupDuration[];
 extern const char kTraceStartupOutputName[];
 extern const char kURLMappings[];
+extern const char kExternalConnectionAddress[];
 
 extern const std::set<std::string> GetAllSwitches();