http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/js/bootstrap/tab.js
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/js/bootstrap/tab.js 
b/website/docs/0.4.0-incubating/js/bootstrap/tab.js
new file mode 100755
index 0000000..7d533e8
--- /dev/null
+++ b/website/docs/0.4.0-incubating/js/bootstrap/tab.js
@@ -0,0 +1,155 @@
+/* ========================================================================
+ * Bootstrap: tab.js v3.3.6
+ * http://getbootstrap.com/javascript/#tabs
+ * ========================================================================
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // TAB CLASS DEFINITION
+  // ====================
+
+  var Tab = function (element) {
+    // jscs:disable requireDollarBeforejQueryAssignment
+    this.element = $(element)
+    // jscs:enable requireDollarBeforejQueryAssignment
+  }
+
+  Tab.VERSION = '3.3.6'
+
+  Tab.TRANSITION_DURATION = 150
+
+  Tab.prototype.show = function () {
+    var $this    = this.element
+    var $ul      = $this.closest('ul:not(.dropdown-menu)')
+    var selector = $this.data('target')
+
+    if (!selector) {
+      selector = $this.attr('href')
+      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip 
for ie7
+    }
+
+    if ($this.parent('li').hasClass('active')) return
+
+    var $previous = $ul.find('.active:last a')
+    var hideEvent = $.Event('hide.bs.tab', {
+      relatedTarget: $this[0]
+    })
+    var showEvent = $.Event('show.bs.tab', {
+      relatedTarget: $previous[0]
+    })
+
+    $previous.trigger(hideEvent)
+    $this.trigger(showEvent)
+
+    if (showEvent.isDefaultPrevented() || hideEvent.isDefaultPrevented()) 
return
+
+    var $target = $(selector)
+
+    this.activate($this.closest('li'), $ul)
+    this.activate($target, $target.parent(), function () {
+      $previous.trigger({
+        type: 'hidden.bs.tab',
+        relatedTarget: $this[0]
+      })
+      $this.trigger({
+        type: 'shown.bs.tab',
+        relatedTarget: $previous[0]
+      })
+    })
+  }
+
+  Tab.prototype.activate = function (element, container, callback) {
+    var $active    = container.find('> .active')
+    var transition = callback
+      && $.support.transition
+      && ($active.length && $active.hasClass('fade') || !!container.find('> 
.fade').length)
+
+    function next() {
+      $active
+        .removeClass('active')
+        .find('> .dropdown-menu > .active')
+          .removeClass('active')
+        .end()
+        .find('[data-toggle="tab"]')
+          .attr('aria-expanded', false)
+
+      element
+        .addClass('active')
+        .find('[data-toggle="tab"]')
+          .attr('aria-expanded', true)
+
+      if (transition) {
+        element[0].offsetWidth // reflow for transition
+        element.addClass('in')
+      } else {
+        element.removeClass('fade')
+      }
+
+      if (element.parent('.dropdown-menu').length) {
+        element
+          .closest('li.dropdown')
+            .addClass('active')
+          .end()
+          .find('[data-toggle="tab"]')
+            .attr('aria-expanded', true)
+      }
+
+      callback && callback()
+    }
+
+    $active.length && transition ?
+      $active
+        .one('bsTransitionEnd', next)
+        .emulateTransitionEnd(Tab.TRANSITION_DURATION) :
+      next()
+
+    $active.removeClass('in')
+  }
+
+
+  // TAB PLUGIN DEFINITION
+  // =====================
+
+  function Plugin(option) {
+    return this.each(function () {
+      var $this = $(this)
+      var data  = $this.data('bs.tab')
+
+      if (!data) $this.data('bs.tab', (data = new Tab(this)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  var old = $.fn.tab
+
+  $.fn.tab             = Plugin
+  $.fn.tab.Constructor = Tab
+
+
+  // TAB NO CONFLICT
+  // ===============
+
+  $.fn.tab.noConflict = function () {
+    $.fn.tab = old
+    return this
+  }
+
+
+  // TAB DATA-API
+  // ============
+
+  var clickHandler = function (e) {
+    e.preventDefault()
+    Plugin.call($(this), 'show')
+  }
+
+  $(document)
+    .on('click.bs.tab.data-api', '[data-toggle="tab"]', clickHandler)
+    .on('click.bs.tab.data-api', '[data-toggle="pill"]', clickHandler)
+
+}(jQuery);

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/js/bootstrap/tooltip.js
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/js/bootstrap/tooltip.js 
b/website/docs/0.4.0-incubating/js/bootstrap/tooltip.js
new file mode 100755
index 0000000..7094b34
--- /dev/null
+++ b/website/docs/0.4.0-incubating/js/bootstrap/tooltip.js
@@ -0,0 +1,514 @@
+/* ========================================================================
+ * Bootstrap: tooltip.js v3.3.6
+ * http://getbootstrap.com/javascript/#tooltip
+ * Inspired by the original jQuery.tipsy by Jason Frame
+ * ========================================================================
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // TOOLTIP PUBLIC CLASS DEFINITION
+  // ===============================
+
+  var Tooltip = function (element, options) {
+    this.type       = null
+    this.options    = null
+    this.enabled    = null
+    this.timeout    = null
+    this.hoverState = null
+    this.$element   = null
+    this.inState    = null
+
+    this.init('tooltip', element, options)
+  }
+
+  Tooltip.VERSION  = '3.3.6'
+
+  Tooltip.TRANSITION_DURATION = 150
+
+  Tooltip.DEFAULTS = {
+    animation: true,
+    placement: 'top',
+    selector: false,
+    template: '<div class="tooltip" role="tooltip"><div 
class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',
+    trigger: 'hover focus',
+    title: '',
+    delay: 0,
+    html: false,
+    container: false,
+    viewport: {
+      selector: 'body',
+      padding: 0
+    }
+  }
+
+  Tooltip.prototype.init = function (type, element, options) {
+    this.enabled   = true
+    this.type      = type
+    this.$element  = $(element)
+    this.options   = this.getOptions(options)
+    this.$viewport = this.options.viewport && 
$($.isFunction(this.options.viewport) ? this.options.viewport.call(this, 
this.$element) : (this.options.viewport.selector || this.options.viewport))
+    this.inState   = { click: false, hover: false, focus: false }
+
+    if (this.$element[0] instanceof document.constructor && 
!this.options.selector) {
+      throw new Error('`selector` option must be specified when initializing ' 
+ this.type + ' on the window.document object!')
+    }
+
+    var triggers = this.options.trigger.split(' ')
+
+    for (var i = triggers.length; i--;) {
+      var trigger = triggers[i]
+
+      if (trigger == 'click') {
+        this.$element.on('click.' + this.type, this.options.selector, 
$.proxy(this.toggle, this))
+      } else if (trigger != 'manual') {
+        var eventIn  = trigger == 'hover' ? 'mouseenter' : 'focusin'
+        var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'
+
+        this.$element.on(eventIn  + '.' + this.type, this.options.selector, 
$.proxy(this.enter, this))
+        this.$element.on(eventOut + '.' + this.type, this.options.selector, 
$.proxy(this.leave, this))
+      }
+    }
+
+    this.options.selector ?
+      (this._options = $.extend({}, this.options, { trigger: 'manual', 
selector: '' })) :
+      this.fixTitle()
+  }
+
+  Tooltip.prototype.getDefaults = function () {
+    return Tooltip.DEFAULTS
+  }
+
+  Tooltip.prototype.getOptions = function (options) {
+    options = $.extend({}, this.getDefaults(), this.$element.data(), options)
+
+    if (options.delay && typeof options.delay == 'number') {
+      options.delay = {
+        show: options.delay,
+        hide: options.delay
+      }
+    }
+
+    return options
+  }
+
+  Tooltip.prototype.getDelegateOptions = function () {
+    var options  = {}
+    var defaults = this.getDefaults()
+
+    this._options && $.each(this._options, function (key, value) {
+      if (defaults[key] != value) options[key] = value
+    })
+
+    return options
+  }
+
+  Tooltip.prototype.enter = function (obj) {
+    var self = obj instanceof this.constructor ?
+      obj : $(obj.currentTarget).data('bs.' + this.type)
+
+    if (!self) {
+      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
+      $(obj.currentTarget).data('bs.' + this.type, self)
+    }
+
+    if (obj instanceof $.Event) {
+      self.inState[obj.type == 'focusin' ? 'focus' : 'hover'] = true
+    }
+
+    if (self.tip().hasClass('in') || self.hoverState == 'in') {
+      self.hoverState = 'in'
+      return
+    }
+
+    clearTimeout(self.timeout)
+
+    self.hoverState = 'in'
+
+    if (!self.options.delay || !self.options.delay.show) return self.show()
+
+    self.timeout = setTimeout(function () {
+      if (self.hoverState == 'in') self.show()
+    }, self.options.delay.show)
+  }
+
+  Tooltip.prototype.isInStateTrue = function () {
+    for (var key in this.inState) {
+      if (this.inState[key]) return true
+    }
+
+    return false
+  }
+
+  Tooltip.prototype.leave = function (obj) {
+    var self = obj instanceof this.constructor ?
+      obj : $(obj.currentTarget).data('bs.' + this.type)
+
+    if (!self) {
+      self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
+      $(obj.currentTarget).data('bs.' + this.type, self)
+    }
+
+    if (obj instanceof $.Event) {
+      self.inState[obj.type == 'focusout' ? 'focus' : 'hover'] = false
+    }
+
+    if (self.isInStateTrue()) return
+
+    clearTimeout(self.timeout)
+
+    self.hoverState = 'out'
+
+    if (!self.options.delay || !self.options.delay.hide) return self.hide()
+
+    self.timeout = setTimeout(function () {
+      if (self.hoverState == 'out') self.hide()
+    }, self.options.delay.hide)
+  }
+
+  Tooltip.prototype.show = function () {
+    var e = $.Event('show.bs.' + this.type)
+
+    if (this.hasContent() && this.enabled) {
+      this.$element.trigger(e)
+
+      var inDom = $.contains(this.$element[0].ownerDocument.documentElement, 
this.$element[0])
+      if (e.isDefaultPrevented() || !inDom) return
+      var that = this
+
+      var $tip = this.tip()
+
+      var tipId = this.getUID(this.type)
+
+      this.setContent()
+      $tip.attr('id', tipId)
+      this.$element.attr('aria-describedby', tipId)
+
+      if (this.options.animation) $tip.addClass('fade')
+
+      var placement = typeof this.options.placement == 'function' ?
+        this.options.placement.call(this, $tip[0], this.$element[0]) :
+        this.options.placement
+
+      var autoToken = /\s?auto?\s?/i
+      var autoPlace = autoToken.test(placement)
+      if (autoPlace) placement = placement.replace(autoToken, '') || 'top'
+
+      $tip
+        .detach()
+        .css({ top: 0, left: 0, display: 'block' })
+        .addClass(placement)
+        .data('bs.' + this.type, this)
+
+      this.options.container ? $tip.appendTo(this.options.container) : 
$tip.insertAfter(this.$element)
+      this.$element.trigger('inserted.bs.' + this.type)
+
+      var pos          = this.getPosition()
+      var actualWidth  = $tip[0].offsetWidth
+      var actualHeight = $tip[0].offsetHeight
+
+      if (autoPlace) {
+        var orgPlacement = placement
+        var viewportDim = this.getPosition(this.$viewport)
+
+        placement = placement == 'bottom' && pos.bottom + actualHeight > 
viewportDim.bottom ? 'top'    :
+                    placement == 'top'    && pos.top    - actualHeight < 
viewportDim.top    ? 'bottom' :
+                    placement == 'right'  && pos.right  + actualWidth  > 
viewportDim.width  ? 'left'   :
+                    placement == 'left'   && pos.left   - actualWidth  < 
viewportDim.left   ? 'right'  :
+                    placement
+
+        $tip
+          .removeClass(orgPlacement)
+          .addClass(placement)
+      }
+
+      var calculatedOffset = this.getCalculatedOffset(placement, pos, 
actualWidth, actualHeight)
+
+      this.applyPlacement(calculatedOffset, placement)
+
+      var complete = function () {
+        var prevHoverState = that.hoverState
+        that.$element.trigger('shown.bs.' + that.type)
+        that.hoverState = null
+
+        if (prevHoverState == 'out') that.leave(that)
+      }
+
+      $.support.transition && this.$tip.hasClass('fade') ?
+        $tip
+          .one('bsTransitionEnd', complete)
+          .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :
+        complete()
+    }
+  }
+
+  Tooltip.prototype.applyPlacement = function (offset, placement) {
+    var $tip   = this.tip()
+    var width  = $tip[0].offsetWidth
+    var height = $tip[0].offsetHeight
+
+    // manually read margins because getBoundingClientRect includes difference
+    var marginTop = parseInt($tip.css('margin-top'), 10)
+    var marginLeft = parseInt($tip.css('margin-left'), 10)
+
+    // we must check for NaN for ie 8/9
+    if (isNaN(marginTop))  marginTop  = 0
+    if (isNaN(marginLeft)) marginLeft = 0
+
+    offset.top  += marginTop
+    offset.left += marginLeft
+
+    // $.fn.offset doesn't round pixel values
+    // so we use setOffset directly with our own function B-0
+    $.offset.setOffset($tip[0], $.extend({
+      using: function (props) {
+        $tip.css({
+          top: Math.round(props.top),
+          left: Math.round(props.left)
+        })
+      }
+    }, offset), 0)
+
+    $tip.addClass('in')
+
+    // check to see if placing tip in new offset caused the tip to resize 
itself
+    var actualWidth  = $tip[0].offsetWidth
+    var actualHeight = $tip[0].offsetHeight
+
+    if (placement == 'top' && actualHeight != height) {
+      offset.top = offset.top + height - actualHeight
+    }
+
+    var delta = this.getViewportAdjustedDelta(placement, offset, actualWidth, 
actualHeight)
+
+    if (delta.left) offset.left += delta.left
+    else offset.top += delta.top
+
+    var isVertical          = /top|bottom/.test(placement)
+    var arrowDelta          = isVertical ? delta.left * 2 - width + 
actualWidth : delta.top * 2 - height + actualHeight
+    var arrowOffsetPosition = isVertical ? 'offsetWidth' : 'offsetHeight'
+
+    $tip.offset(offset)
+    this.replaceArrow(arrowDelta, $tip[0][arrowOffsetPosition], isVertical)
+  }
+
+  Tooltip.prototype.replaceArrow = function (delta, dimension, isVertical) {
+    this.arrow()
+      .css(isVertical ? 'left' : 'top', 50 * (1 - delta / dimension) + '%')
+      .css(isVertical ? 'top' : 'left', '')
+  }
+
+  Tooltip.prototype.setContent = function () {
+    var $tip  = this.tip()
+    var title = this.getTitle()
+
+    $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)
+    $tip.removeClass('fade in top bottom left right')
+  }
+
+  Tooltip.prototype.hide = function (callback) {
+    var that = this
+    var $tip = $(this.$tip)
+    var e    = $.Event('hide.bs.' + this.type)
+
+    function complete() {
+      if (that.hoverState != 'in') $tip.detach()
+      that.$element
+        .removeAttr('aria-describedby')
+        .trigger('hidden.bs.' + that.type)
+      callback && callback()
+    }
+
+    this.$element.trigger(e)
+
+    if (e.isDefaultPrevented()) return
+
+    $tip.removeClass('in')
+
+    $.support.transition && $tip.hasClass('fade') ?
+      $tip
+        .one('bsTransitionEnd', complete)
+        .emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :
+      complete()
+
+    this.hoverState = null
+
+    return this
+  }
+
+  Tooltip.prototype.fixTitle = function () {
+    var $e = this.$element
+    if ($e.attr('title') || typeof $e.attr('data-original-title') != 'string') 
{
+      $e.attr('data-original-title', $e.attr('title') || '').attr('title', '')
+    }
+  }
+
+  Tooltip.prototype.hasContent = function () {
+    return this.getTitle()
+  }
+
+  Tooltip.prototype.getPosition = function ($element) {
+    $element   = $element || this.$element
+
+    var el     = $element[0]
+    var isBody = el.tagName == 'BODY'
+
+    var elRect    = el.getBoundingClientRect()
+    if (elRect.width == null) {
+      // width and height are missing in IE8, so compute them manually; see 
https://github.com/twbs/bootstrap/issues/14093
+      elRect = $.extend({}, elRect, { width: elRect.right - elRect.left, 
height: elRect.bottom - elRect.top })
+    }
+    var elOffset  = isBody ? { top: 0, left: 0 } : $element.offset()
+    var scroll    = { scroll: isBody ? document.documentElement.scrollTop || 
document.body.scrollTop : $element.scrollTop() }
+    var outerDims = isBody ? { width: $(window).width(), height: 
$(window).height() } : null
+
+    return $.extend({}, elRect, scroll, outerDims, elOffset)
+  }
+
+  Tooltip.prototype.getCalculatedOffset = function (placement, pos, 
actualWidth, actualHeight) {
+    return placement == 'bottom' ? { top: pos.top + pos.height,   left: 
pos.left + pos.width / 2 - actualWidth / 2 } :
+           placement == 'top'    ? { top: pos.top - actualHeight, left: 
pos.left + pos.width / 2 - actualWidth / 2 } :
+           placement == 'left'   ? { top: pos.top + pos.height / 2 - 
actualHeight / 2, left: pos.left - actualWidth } :
+        /* placement == 'right' */ { top: pos.top + pos.height / 2 - 
actualHeight / 2, left: pos.left + pos.width }
+
+  }
+
+  Tooltip.prototype.getViewportAdjustedDelta = function (placement, pos, 
actualWidth, actualHeight) {
+    var delta = { top: 0, left: 0 }
+    if (!this.$viewport) return delta
+
+    var viewportPadding = this.options.viewport && 
this.options.viewport.padding || 0
+    var viewportDimensions = this.getPosition(this.$viewport)
+
+    if (/right|left/.test(placement)) {
+      var topEdgeOffset    = pos.top - viewportPadding - 
viewportDimensions.scroll
+      var bottomEdgeOffset = pos.top + viewportPadding - 
viewportDimensions.scroll + actualHeight
+      if (topEdgeOffset < viewportDimensions.top) { // top overflow
+        delta.top = viewportDimensions.top - topEdgeOffset
+      } else if (bottomEdgeOffset > viewportDimensions.top + 
viewportDimensions.height) { // bottom overflow
+        delta.top = viewportDimensions.top + viewportDimensions.height - 
bottomEdgeOffset
+      }
+    } else {
+      var leftEdgeOffset  = pos.left - viewportPadding
+      var rightEdgeOffset = pos.left + viewportPadding + actualWidth
+      if (leftEdgeOffset < viewportDimensions.left) { // left overflow
+        delta.left = viewportDimensions.left - leftEdgeOffset
+      } else if (rightEdgeOffset > viewportDimensions.right) { // right 
overflow
+        delta.left = viewportDimensions.left + viewportDimensions.width - 
rightEdgeOffset
+      }
+    }
+
+    return delta
+  }
+
+  Tooltip.prototype.getTitle = function () {
+    var title
+    var $e = this.$element
+    var o  = this.options
+
+    title = $e.attr('data-original-title')
+      || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)
+
+    return title
+  }
+
+  Tooltip.prototype.getUID = function (prefix) {
+    do prefix += ~~(Math.random() * 1000000)
+    while (document.getElementById(prefix))
+    return prefix
+  }
+
+  Tooltip.prototype.tip = function () {
+    if (!this.$tip) {
+      this.$tip = $(this.options.template)
+      if (this.$tip.length != 1) {
+        throw new Error(this.type + ' `template` option must consist of 
exactly 1 top-level element!')
+      }
+    }
+    return this.$tip
+  }
+
+  Tooltip.prototype.arrow = function () {
+    return (this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow'))
+  }
+
+  Tooltip.prototype.enable = function () {
+    this.enabled = true
+  }
+
+  Tooltip.prototype.disable = function () {
+    this.enabled = false
+  }
+
+  Tooltip.prototype.toggleEnabled = function () {
+    this.enabled = !this.enabled
+  }
+
+  Tooltip.prototype.toggle = function (e) {
+    var self = this
+    if (e) {
+      self = $(e.currentTarget).data('bs.' + this.type)
+      if (!self) {
+        self = new this.constructor(e.currentTarget, this.getDelegateOptions())
+        $(e.currentTarget).data('bs.' + this.type, self)
+      }
+    }
+
+    if (e) {
+      self.inState.click = !self.inState.click
+      if (self.isInStateTrue()) self.enter(self)
+      else self.leave(self)
+    } else {
+      self.tip().hasClass('in') ? self.leave(self) : self.enter(self)
+    }
+  }
+
+  Tooltip.prototype.destroy = function () {
+    var that = this
+    clearTimeout(this.timeout)
+    this.hide(function () {
+      that.$element.off('.' + that.type).removeData('bs.' + that.type)
+      if (that.$tip) {
+        that.$tip.detach()
+      }
+      that.$tip = null
+      that.$arrow = null
+      that.$viewport = null
+    })
+  }
+
+
+  // TOOLTIP PLUGIN DEFINITION
+  // =========================
+
+  function Plugin(option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.tooltip')
+      var options = typeof option == 'object' && option
+
+      if (!data && /destroy|hide/.test(option)) return
+      if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  var old = $.fn.tooltip
+
+  $.fn.tooltip             = Plugin
+  $.fn.tooltip.Constructor = Tooltip
+
+
+  // TOOLTIP NO CONFLICT
+  // ===================
+
+  $.fn.tooltip.noConflict = function () {
+    $.fn.tooltip = old
+    return this
+  }
+
+}(jQuery);

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/js/bootstrap/transition.js
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/js/bootstrap/transition.js 
b/website/docs/0.4.0-incubating/js/bootstrap/transition.js
new file mode 100755
index 0000000..fae36ed
--- /dev/null
+++ b/website/docs/0.4.0-incubating/js/bootstrap/transition.js
@@ -0,0 +1,59 @@
+/* ========================================================================
+ * Bootstrap: transition.js v3.3.6
+ * http://getbootstrap.com/javascript/#transitions
+ * ========================================================================
+ * Copyright 2011-2015 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/)
+  // ============================================================
+
+  function transitionEnd() {
+    var el = document.createElement('bootstrap')
+
+    var transEndEventNames = {
+      WebkitTransition : 'webkitTransitionEnd',
+      MozTransition    : 'transitionend',
+      OTransition      : 'oTransitionEnd otransitionend',
+      transition       : 'transitionend'
+    }
+
+    for (var name in transEndEventNames) {
+      if (el.style[name] !== undefined) {
+        return { end: transEndEventNames[name] }
+      }
+    }
+
+    return false // explicit for ie8 (  ._.)
+  }
+
+  // http://blog.alexmaccaw.com/css-transitions
+  $.fn.emulateTransitionEnd = function (duration) {
+    var called = false
+    var $el = this
+    $(this).one('bsTransitionEnd', function () { called = true })
+    var callback = function () { if (!called) 
$($el).trigger($.support.transition.end) }
+    setTimeout(callback, duration)
+    return this
+  }
+
+  $(function () {
+    $.support.transition = transitionEnd()
+
+    if (!$.support.transition) return
+
+    $.event.special.bsTransitionEnd = {
+      bindType: $.support.transition.end,
+      delegateType: $.support.transition.end,
+      handle: function (e) {
+        if ($(e.target).is(this)) return e.handleObj.handler.apply(this, 
arguments)
+      }
+    }
+  })
+
+}(jQuery);

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/performance/main.rst
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/performance/main.rst 
b/website/docs/0.4.0-incubating/performance/main.rst
new file mode 100644
index 0000000..6dd70c7
--- /dev/null
+++ b/website/docs/0.4.0-incubating/performance/main.rst
@@ -0,0 +1,8 @@
+---
+layout: default
+---
+
+Performance
+===========
+
+(performance results and benchmark)

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/start/building.rst
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/start/building.rst 
b/website/docs/0.4.0-incubating/start/building.rst
new file mode 100644
index 0000000..5a821f2
--- /dev/null
+++ b/website/docs/0.4.0-incubating/start/building.rst
@@ -0,0 +1,92 @@
+---
+title: Build DistributedLog from Source
+top-nav-group: setup
+top-nav-pos: 1
+top-nav-title: Build DistributedLog from Source
+layout: default
+---
+
+.. contents:: This page covers how to build DistributedLog {{ 
site.distributedlog_version }} from sources.
+
+Build DistributedLog
+====================
+
+In order to build DistributedLog you need the source code. Either `download 
the source of a release`_ or `clone the git repository`_.
+
+.. _download the source of a release: {{ site.baseurl }}/download
+.. _clone the git repository: {{ site.github_url }}
+
+In addition you need **Maven 3** and a **JDK** (Java Development Kit). 
DistributedLog requires **at least Java 7** to build. We recommend using Java 8.
+
+To clone from git, enter:
+
+.. code-block:: bash
+
+    git clone {{ site.github_url }}
+
+
+The simplest way of building DistributedLog is by running:
+
+.. code-block:: bash
+
+    mvn clean package -DskipTests
+
+
+This instructs Maven_ (`mvn`) to first remove all existing builds (`clean`) 
and then create a new DistributedLog package(`package`). The `-DskipTests` 
command prevents Maven from executing the tests.
+
+.. _Maven: http://maven.apache.org
+
+Build
+~~~~~
+
+- Build all the components without running tests
+
+.. code-block:: bash
+
+    mvn clean package -DskipTests
+
+- Build all the components and run all the tests
+
+.. code-block:: bash
+
+    mvn clean package
+
+
+- Build a single component: as distributedlog is using shade plugin. shade 
only run when packaging so pre-install the dependencies before building a 
single component.
+
+.. code-block:: bash
+
+    mvn clean install -DskipTests
+    mvn -pl :<module-name> package [-DskipTests] // example: mvn-pl 
:distributedlog-core package
+
+
+- Test a single class: as distributedlog is using shade plugin. shade only run 
when packaging so pre-install the dependencies before building a single 
component.
+
+.. code-block:: bash
+
+    mvn clean install -DskipTests
+    mvn -pl :<module-name> clean test -Dtest=<test-class-name>
+
+
+Scala Versions
+~~~~~~~~~~~~~~
+
+DistributedLog has dependencies such as `Twitter Util`_, Finagle_ written in 
Scala_. Users of the Scala API and libraries may have to match the Scala 
version of DistributedLog with the Scala version of their projects (because 
Scala is not strictly backwards compatible).
+
+.. _Twitter Util: https://twitter.github.io/util/
+.. _Finagle: https://twitter.github.io/finagle/
+.. _Scala: http://scala-lang.org
+
+**By default, DistributedLog is built with the Scala 2.11**. To build 
DistributedLog with Scala *2.10*, you can change the default Scala *binary 
version* with the following script:
+
+.. code-block:: bash
+
+    # Switch Scala binary version between 2.10 and 2.11
+    tools/change-scala-version.sh 2.10
+    # Build with Scala version 2.10
+    mvn clean install -DskipTests
+
+
+DistributedLog is developed against Scala *2.11* and tested additionally 
against Scala *2.10*. These two versions are known to be compatible. Earlier 
versions (like Scala *2.9*) are *not* compatible.
+
+Newer versions may be compatible, depending on breaking changes in the 
language features used by DistributedLog's dependencies, and the availability 
of DistributedLog's dependencies in those Scala versions.

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/start/download.rst
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/start/download.rst 
b/website/docs/0.4.0-incubating/start/download.rst
new file mode 100644
index 0000000..308f36a
--- /dev/null
+++ b/website/docs/0.4.0-incubating/start/download.rst
@@ -0,0 +1,88 @@
+---
+title: Download Releases
+top-nav-group: setup
+top-nav-pos: 2
+top-nav-title: Download Releases
+layout: default
+---
+
+.. contents:: This page covers how to download DistributedLog releases.
+
+Releases
+========
+
+`0.4.0-incubating` is the latest release.
+
+You can verify your download by checking its md5 and sha1.
+
+0.4.0-incubating
+~~~~~~~~~~~~~~~~
+
+This is the first Apache release. Download here_.
+
+- `Release Notes`_
+- `Announce Blog Post`_
+
+.. _here: 
https://dist.apache.org/repos/dist/release/incubator/distributedlog/0.4.0-incubating
+.. _Release Notes: 
https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12320620&version=12337980
+.. _Announce Blog Post: /releases/2017/04/23/the-first-release.html
+
+
+The releases before Apache Incubating are also listed as below:
+
+0.3.51-RC1
+~~~~~~~~~~
+
+This is the second release candidate for 0.3.51.
+
+- Source download: 0.3.51-RC1.zip_
+- Binary downloads: 
+    - Service: 
distributedlog-service-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip_
+    - Benchmark: 
distributedlog-benchmark-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip_
+    - Tutorials: 
distributedlog-tutorials-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip_
+    - All: distributedlog-all-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip_
+
+.. _0.3.51-RC1.zip: 
https://github.com/twitter/distributedlog/archive/0.3.51-RC1.zip
+.. _distributedlog-all-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip: 
https://github.com/twitter/distributedlog/releases/download/0.3.51-RC1/distributedlog-all-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip
+.. _distributedlog-service-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip: 
https://github.com/twitter/distributedlog/releases/download/0.3.51-RC1/distributedlog-service-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip
+.. _distributedlog-benchmark-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip: 
https://github.com/twitter/distributedlog/releases/download/0.3.51-RC1/distributedlog-benchmark-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip
+.. _distributedlog-tutorials-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip: 
https://github.com/twitter/distributedlog/releases/download/0.3.51-RC1/distributedlog-tutorials-3ff9e33fa577f50eebb8ee971ddb265c971c3717.zip
+
+0.3.51-RC0
+~~~~~~~~~~
+
+This is the first release candidate for 0.3.51_.
+
+- Source download: 0.3.51-RC0.zip_
+- Binary downloads: 
+    - Service: 
distributedlog-service-63d214d3a739cb58a71a8b51127f165d15f00584.zip_
+    - Benchmark: 
distributedlog-benchmark-63d214d3a739cb58a71a8b51127f165d15f00584.zip_
+    - Tutorials: 
distributedlog-tutorials-63d214d3a739cb58a71a8b51127f165d15f00584.zip_
+    - All: distributedlog-all-63d214d3a739cb58a71a8b51127f165d15f00584.zip_
+
+.. _0.3.51: https://github.com/twitter/distributedlog/releases/tag/0.3.51-RC0
+.. _0.3.51-RC0.zip: 
https://github.com/twitter/distributedlog/archive/0.3.51-RC0.zip
+.. _distributedlog-all-63d214d3a739cb58a71a8b51127f165d15f00584.zip: 
https://github.com/twitter/distributedlog/releases/download/0.3.51-RC0/distributedlog-all-63d214d3a739cb58a71a8b51127f165d15f00584.zip
+.. _distributedlog-service-63d214d3a739cb58a71a8b51127f165d15f00584.zip: 
https://github.com/twitter/distributedlog/releases/download/0.3.51-RC0/distributedlog-service-63d214d3a739cb58a71a8b51127f165d15f00584.zip
+.. _distributedlog-benchmark-63d214d3a739cb58a71a8b51127f165d15f00584.zip: 
https://github.com/twitter/distributedlog/releases/download/0.3.51-RC0/distributedlog-benchmark-63d214d3a739cb58a71a8b51127f165d15f00584.zip
+.. _distributedlog-tutorials-63d214d3a739cb58a71a8b51127f165d15f00584.zip: 
https://github.com/twitter/distributedlog/releases/download/0.3.51-RC0/distributedlog-tutorials-63d214d3a739cb58a71a8b51127f165d15f00584.zip
+
+Maven Dependencies
+==================
+
+You can add the following dependencies to your `pom.xml` to include Apache 
DistributedLog in your project.
+
+.. code-block:: xml
+
+  <!-- use core library to access DL storage -->
+  <dependency>
+    <groupId>com.twitter</groupId>
+    <artifactId>distributedlog-core_2.11</artifactId>
+    <version>{{ site.DL_VERSION_STABLE }}</version>
+  </dependency>
+  <!-- use thin proxy client to access DL via write proxy -->
+  <dependency>
+    <groupId>com.twitter</groupId>
+    <artifactId>distributedlog-client_2.11</artifactId>
+    <version>{{ site.DL_VERSION_STABLE }}</version>
+  </dependency>

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/start/quickstart.rst
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/start/quickstart.rst 
b/website/docs/0.4.0-incubating/start/quickstart.rst
new file mode 100644
index 0000000..010d323
--- /dev/null
+++ b/website/docs/0.4.0-incubating/start/quickstart.rst
@@ -0,0 +1,127 @@
+---
+title: Setup & Run Example
+top-nav-group: quickstart
+top-nav-pos: 1
+top-nav-title: Setup & Run Example
+layout: default
+---
+
+.. contents:: Get a DistributedLog cluster up running locally and run the 
example program in a few simple steps.
+
+Quick Start
+===========
+
+This tutorial assumes you are starting from fresh and have no existing 
BookKeeper or ZooKeeper data.
+If you already have an existing BookKeeper or ZooKeeper cluster, you can 
checkout the deploy_ section
+for more details on how to deploy a production cluster.
+
+.. _deploy: ../deployment/cluster
+
+Step 1: Download the binary
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Download_ the stable version of `DistributedLog` and un-zip it.
+
+.. _Download: ./download
+
+::
+
+    // Download the binary `distributedlog-all-${gitsha}.zip`
+    > unzip distributedlog-all-${gitsha}.zip
+
+
+Step 2: Start ZooKeeper & BookKeeper
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DistributedLog uses `ZooKeeper` as the metadata store and `BookKeeper` as the 
log segment store. So
+you need to first start a zookeeper server and a few bookies if you don't 
already have one. You can
+use the `dlog` script in `distributedlog-service` package to get a standalone 
bookkeeper sandbox. It
+starts a zookeeper server and `N` bookies (N is 3 by default).
+
+::
+
+    // Start the local sandbox instance at port `7000`
+    > ./distributedlog-service/bin/dlog local 7000
+    DistributedLog Sandbox is running now. You could access 
distributedlog://127.0.0.1:7000
+
+
+Step 3: Create a DistributedLog namespace
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before using distributedlog, you need to create a distributedlog namespace to 
store your own list of
+streams. The zkServer for the local sandbox is `127.0.0.1:7000` and the 
bookkeeper's ledgers path is
+`/ledgers`. You could create a namespace pointing to the corresponding 
bookkeeper cluster.
+
+::
+
+    > ./distributedlog-service/bin/dlog admin bind -l /ledgers -s 
127.0.0.1:7000 -c distributedlog://127.0.0.1:7000/messaging/my_namespace
+    No bookkeeper is bound to 
distributedlog://127.0.0.1:7000/messaging/my_namespace
+    Created binding on distributedlog://127.0.0.1:7000/messaging/my_namespace.
+
+
+If you don't want to create a separated namespace, you could use the default 
namespace `distributedlog://127.0.0.1:7000/messaging/distributedlog`.
+
+
+Step 4: Create some log streams
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Let's create 5 log streams, prefixed with `messaging-stream-`.
+
+::
+
+    > ./distributedlog-service/bin/dlog tool create -u 
distributedlog://127.0.0.1:7000/messaging/my_namespace -r messaging-stream- -e 
1-5
+
+
+We can now see the streams if we run the `list` command from the tool.
+
+::
+    
+    > ./distributedlog-service/bin/dlog tool list -u 
distributedlog://127.0.0.1:7000/messaging/my_namespace
+    Streams under distributedlog://127.0.0.1:7000/messaging/my_namespace :
+    --------------------------------
+    messaging-stream-1
+    messaging-stream-3
+    messaging-stream-2
+    messaging-stream-4
+    messaging-stream-5
+    --------------------------------
+
+
+Step 5: Start a write proxy
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now, lets start a write proxy server that serves writes to distributedlog 
namespace `distributedlog://127.0.0.1/messaging/my_namespace`. The server 
listens on 8000 to accept fan-in write requests.
+
+::
+    
+    > ./distributedlog-service/bin/dlog-daemon.sh start writeproxy -p 8000 
--shard-id 1 -sp 8001 -u distributedlog://127.0.0.1:7000/messaging/my_namespace 
-mx -c `pwd`/distributedlog-service/conf/distributedlog_proxy.conf
+
+From 0.3.51-RC1 and onwards, use the below command to start the write proxy
+
+::
+
+   > WP_SHARD_ID=1 WP_SERVICE_PORT=8000 WP_STATS_PORT=8001 
WP_NAMESPACE='distributedlog://127.0.0.1:7000/messaging/my_namespace' 
./distributedlog-service/bin/dlog-daemon.sh start writeproxy
+
+Step 6: Tail reading records
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The distributedlog tutorial has a multi-streams reader that will dump out 
received records to standard output.
+
+::
+    
+    > ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.MultiReader 
distributedlog://127.0.0.1:7000/messaging/my_namespace 
messaging-stream-1,messaging-stream-2,messaging-stream-3,messaging-stream-4,messaging-stream-5
+
+
+Step 7: Write some records
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The distributedlog tutorial also has a multi-streams writer that will take 
input from a console and write it out
+as records to the distributedlog write proxy. Each line will be sent as a 
separate record.
+
+Run the writer and type a few lines into the console to send to the server.
+
+::
+    
+    > ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.ConsoleProxyMultiWriter 'inet!127.0.0.1:8000' 
messaging-stream-1,messaging-stream-2,messaging-stream-3,messaging-stream-4,messaging-stream-5
+
+If you have each of the above commands running in a different terminal then 
you should now be able to type messages into the writer terminal and see them 
appear in the reader terminal.

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/styles/site.scss
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/styles/site.scss 
b/website/docs/0.4.0-incubating/styles/site.scss
new file mode 100644
index 0000000..cb6f8f8
--- /dev/null
+++ b/website/docs/0.4.0-incubating/styles/site.scss
@@ -0,0 +1,4 @@
+---
+---
+
+@import "bootstrap";

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/tutorials/analytics-mapreduce.rst
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/tutorials/analytics-mapreduce.rst 
b/website/docs/0.4.0-incubating/tutorials/analytics-mapreduce.rst
new file mode 100644
index 0000000..6a18d4a
--- /dev/null
+++ b/website/docs/0.4.0-incubating/tutorials/analytics-mapreduce.rst
@@ -0,0 +1,214 @@
+---
+title: Tutorial - DistributedLog meets MapReduce
+layout: default
+---
+
+.. contents:: Tutorial - DistributedLog meets MapReduce
+
+DistributedLog meets MapReduce
+==============================
+
+A distributedlog log stream is consists of log segments. Each log segment is 
distributed
+among multiple bookies node. This nature of data distribution allows 
distributedlog easily
+integrated with any analytics processing systems like *MapReduce* and *Spark*. 
This tutorial
+shows how you could use *MapReduce* to process log streams' data in batch and 
how *MapReduce*
+can leverage the data locality of log segments.
+
+InputFormat
+~~~~~~~~~~~
+
+**InputFormat** is one of the fundamental class in Hadoop MapReduce framework, 
that is used
+for accessing data from different sources. The class is responsible for 
defining two main
+things:
+
+- Data Splits
+- Record Reader
+
+*Data Split* is a fundamental concept in Hadoop MapReduce framework which 
defines both
+the size of individual Map tasks and its potential execution server. The 
*Record Reader* is
+responsible for actual reading records from the *data split* and submitting 
them (as key/value
+pairs) to the mapper.
+
+Using distributedlog log streams as the sources for a MapReduce job, the *log 
segments* are
+the *data splits*, while the *log segment reader* for a log segment is the 
*record reader* for
+a *data split*.
+
+Log Segment vs Data Split
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Any split implementation extends the Apache base abstract class - 
**InputSplit**, defining a
+split length and locations. A distributedlog log segment has *record count*, 
which could be used
+to define the length of the split, and its metadata contains the storage nodes 
that are used to
+store its log records, which could be used to define the locations of the 
split. So we could
+create a **LogSegmentSplit** wrapping over a *LogSegment* (LogSegmentMetadata 
and LedgerMetadata).
+
+::
+
+    public class LogSegmentSplit extends InputSplit {
+
+        private LogSegmentMetadata logSegmentMetadata;
+        private LedgerMetadata ledgerMetadata;
+
+        public LogSegmentSplit() {}
+
+        public LogSegmentSplit(LogSegmentMetadata logSegmentMetadata,
+                               LedgerMetadata ledgerMetadata) {
+            this.logSegmentMetadata = logSegmentMetadata;
+            this.ledgerMetadata = ledgerMetadata;
+        }
+
+    }
+
+
+The length of the log segment split is the *number of records in the log 
segment*.
+
+::
+
+    @Override
+    public long getLength()
+            throws IOException, InterruptedException {
+        return logSegmentMetadata.getRecordCount();
+    }
+
+
+The locations of the log segment split are the bookies' addresses in the 
ensembles of
+the log segment.
+
+::
+
+    @Override
+    public String[] getLocations()
+            throws IOException, InterruptedException {
+        Set<String> locations = Sets.newHashSet();
+        for (ArrayList<BookieSocketAddress> ensemble : 
ledgerMetadata.getEnsembles().values()) {
+            for (BookieSocketAddress host : ensemble) {
+                locations.add(host.getHostName());
+            }
+        }
+        return locations.toArray(new String[locations.size()]);
+    }
+
+
+At this point, we will have a basic **LogSegmentSplit** wrapping 
*LogSegmentMetadata* and
+*LedgerMetadata*. Then we could retrieve the list of log segments of a log 
stream and construct
+corresponding *data splits* in distributedlog inputformat.
+
+::
+
+    public class DistributedLogInputFormat
+            extends InputFormat<DLSN, LogRecordWithDLSN> implements 
Configurable {
+
+        @Override
+        public List<InputSplit> getSplits(JobContext jobContext)
+                throws IOException, InterruptedException {
+            List<LogSegmentMetadata> segments = dlm.getLogSegments();
+            List<InputSplit> inputSplits = 
Lists.newArrayListWithCapacity(segments.size());
+            BookKeeper bk = namespace.getReaderBKC().get();
+            LedgerManager lm = BookKeeperAccessor.getLedgerManager(bk);
+            final AtomicInteger rcHolder = new AtomicInteger(0);
+            final AtomicReference<LedgerMetadata> metadataHolder = new 
AtomicReference<LedgerMetadata>(null);
+            for (LogSegmentMetadata segment : segments) {
+                final CountDownLatch latch = new CountDownLatch(1);
+                lm.readLedgerMetadata(segment.getLedgerId(),
+                        new 
BookkeeperInternalCallbacks.GenericCallback<LedgerMetadata>() {
+                    @Override
+                    public void operationComplete(int rc, LedgerMetadata 
ledgerMetadata) {
+                        metadataHolder.set(ledgerMetadata);
+                        rcHolder.set(rc);
+                        latch.countDown();
+                    }
+                });
+                latch.await();
+                if (BKException.Code.OK != rcHolder.get()) {
+                    throw new IOException("Faild to get log segment metadata 
for " + segment + " : "
+                            + BKException.getMessage(rcHolder.get()));
+                }
+                inputSplits.add(new LogSegmentSplit(segment, 
metadataHolder.get()));
+            }
+            return inputSplits;
+        }
+
+    }
+
+
+Log Segment Record Reader
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+At this point, we know how to break the log streams into *data splits*. Then 
we need to be able
+to create a **RecordReader** for individual *data split*. Since each *data 
split* is effectively
+a *log segment* in distributedlog, it is straight to implement it using 
distributedlog's log segment
+reader. For simplicity, this example uses the raw bk api to access entries, 
which it doesn't
+leverage features like **ReadAhead** provided in distributedlog. It could be 
changed to
+use log segment reader for better performance.
+
+From the *data split*, we know which log segment and its corresponding 
bookkeeper ledger. Then
+we could open the ledger handle when initializing the record reader.
+
+::
+
+    LogSegmentReader(String streamName,
+                     DistributedLogConfiguration conf,
+                     BookKeeper bk,
+                     LogSegmentSplit split)
+            throws IOException {
+        this.streamName = streamName;
+        this.bk = bk;
+        this.metadata = split.getMetadata();
+        try {
+            this.lh = bk.openLedgerNoRecovery(
+                    split.getLedgerId(),
+                    BookKeeper.DigestType.CRC32,
+                    conf.getBKDigestPW().getBytes(UTF_8));
+        } catch (BKException e) {
+            throw new IOException(e);
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw new IOException(e);
+        }
+    }
+
+
+Reading records from the *data split* is effectively reading records from the 
distributedlog
+log segment.
+
+::
+
+    try {
+        Enumeration<LedgerEntry> entries =
+                lh.readEntries(entryId, entryId);
+        if (entries.hasMoreElements()) {
+            LedgerEntry entry = entries.nextElement();
+            Entry.newBuilder()
+                    .setLogSegmentInfo(metadata.getLogSegmentSequenceNumber(),
+                            metadata.getStartSequenceId())
+                    .setEntryId(entry.getEntryId())
+                    .setEnvelopeEntry(
+                            
LogSegmentMetadata.supportsEnvelopedEntries(metadata.getVersion()))
+                    .deserializeRecordSet(true)
+                    .setInputStream(entry.getEntryInputStream())
+                    .buildReader();
+        }
+        return nextKeyValue();
+    } catch (BKException e) {
+        throw new IOException(e);
+    }
+
+
+We could calculate the progress by comparing the position with the record 
count of this log segment.
+
+::
+
+    @Override
+    public float getProgress()
+            throws IOException, InterruptedException {
+        if (metadata.getRecordCount() > 0) {
+            return ((float) (readPos + 1)) / metadata.getRecordCount();
+        }
+        return 1;
+    }
+
+
+Once we have *LogSegmentSplit* and the *LogSegmentReader* over a split. We 
could hook them up to
+implement distributedlog's InputFormat. Please check out the code_ for more 
details.
+
+.. _code: 
https://github.com/apache/incubator-distributedlog/tree/master/distributedlog-tutorials/distributedlog-mapreduce

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/tutorials/basic-1.rst
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/tutorials/basic-1.rst 
b/website/docs/0.4.0-incubating/tutorials/basic-1.rst
new file mode 100644
index 0000000..07826a4
--- /dev/null
+++ b/website/docs/0.4.0-incubating/tutorials/basic-1.rst
@@ -0,0 +1,250 @@
+---
+title: API - Write Records (via core library)
+top-nav-group: quickstart
+top-nav-pos: 2
+top-nav-title: API - Write Records (via core library)
+layout: default
+---
+
+.. contents:: Basic Tutorial - Using Core Library to write records
+
+Basic Tutorial - Write Records using Core Library
+=================================================
+
+This tutorial shows how to write records using core library.
+
+.. sectnum::
+
+Open a writer
+~~~~~~~~~~~~~
+
+Before everything, you have to open a writer to write records.
+These are the steps to follow to `open a writer`.
+
+Create distributedlog URI
+-------------------------
+
+::
+
+    String dlUriStr = ...;
+    URI uri = URI.create(dlUriStr);
+
+Create distributedlog configuration
+-----------------------------------
+
+::
+
+    DistributedLogConfiguration conf = new DistributedLogConfiguration();
+
+
+Enable immediate flush
+----------------------
+
+::
+
+    conf.setImmediateFlushEnabled(true);
+    conf.setOutputBufferSize(0);
+    conf.setPeriodicFlushFrequencyMilliSeconds(0);
+
+
+Enable immediate locking
+------------------------
+
+So if there is already a writer wring to the stream, opening another writer 
will
+fail because previous writer already held a lock.
+
+::
+
+    conf.setLockTimeout(DistributedLogConstants.LOCK_IMMEDIATE);
+
+
+Build the distributedlog namespace
+----------------------------------
+
+::
+
+    DistributedLogNamespace namespace = 
DistributedLogNamespaceBuilder.newBuilder()
+            .conf(conf)
+            .uri(uri)
+            .regionId(DistributedLogConstants.LOCAL_REGION_ID)
+            .clientId("console-writer")
+            .build(); 
+
+
+Open the writer
+---------------
+
+::
+
+    DistributedLogManager dlm = namespace.openLog("basic-stream-1");
+    AsyncLogWriter writer = FutureUtils.result(dlm.openAsyncLogWriter());
+
+
+Write Records
+~~~~~~~~~~~~~
+
+Once you got a `writer` instance, you can start writing `records` into the 
stream.
+
+Construct a log record
+----------------------
+
+Here lets use `System.currentTimeMillis()` as the `TransactionID`.
+
+::
+
+    byte[] data = ...;
+    LogRecord record = new LogRecord(System.currentTimeMillis(), data); 
+
+
+Write the log record
+--------------------
+
+::
+
+    Future<DLSN> writeFuture = writer.write(record);
+
+
+Register the write callback
+---------------------------
+
+Register a future listener on write completion. The writer will be notified 
once the write is completed.
+
+::
+
+    writeFuture.addEventListener(new FutureEventListener<DLSN>() {
+        @Override
+        public void onFailure(Throwable cause) {
+            // executed when write failed.
+        }
+
+        @Override
+        public void onSuccess(DLSN value) {
+            // executed when write completed.
+        }
+    });
+
+
+Close the writer
+~~~~~~~~~~~~~~~~
+
+Close the writer after usage
+----------------------------
+
+::
+
+    FutureUtils.result(writer.asyncClose());
+
+
+Run the tutorial
+~~~~~~~~~~~~~~~~
+
+Run the example in the following steps:
+
+Start the local bookkeeper cluster
+----------------------------------
+
+You can use follow command to start the distributedlog stack locally.
+After the distributedlog is started, you could access it using
+distributedlog uri *distributedlog://127.0.0.1:7000/messaging/distributedlog*.
+
+::
+
+        // dlog local ${zk-port}
+        ./distributedlog-core/bin/dlog local 7000
+
+
+Create the stream
+-----------------
+
+::
+
+        // Create Stream `basic-stream-1`
+        // dlog tool create -u ${distributedlog-uri} -r ${stream-prefix} -e 
${stream-regex}
+        ./distributedlog-core/bin/dlog tool create -u 
distributedlog://127.0.0.1:7000/messaging/distributedlog -r basic-stream- -e 1
+
+
+Tail the stream
+---------------
+
+Tailing the stream using `TailReader` to wait for new records.
+
+::
+
+        // Tailing Stream `basic-stream-1`
+        // runner run org.apache.distributedlog.basic.TailReader 
${distributedlog-uri} ${stream}
+        ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.TailReader 
distributedlog://127.0.0.1:7000/messaging/distributedlog basic-stream-1
+
+
+Write records
+-------------
+
+Run the example to write records to the stream in a console.
+
+::
+
+        // Write Records into Stream `basic-stream-1`
+        // runner run org.apache.distributedlog.basic.ConsoleWriter 
${distributedlog-uri} ${stream}
+        ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.ConsoleWriter 
distributedlog://127.0.0.1:7000/messaging/distributedlog basic-stream-1
+
+
+Check the results
+-----------------
+
+Example output from `ConsoleWriter` and `TailReader`.
+
+::
+
+        // Output of `ConsoleWriter`
+        Opening log stream basic-stream-1
+        [dlog] > test!
+        [dlog] >
+
+
+        // Output of `TailReader`
+        Opening log stream basic-stream-1
+        Log stream basic-stream-1 is empty.
+        Wait for records starting from DLSN{logSegmentSequenceNo=1, entryId=0, 
slotId=0}
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0}
+        """
+        test!
+        """
+
+Attempt a second writer 
+-----------------------
+
+Open another terminal to run `ConsoleWriter`. It would fail with 
`OwnershipAcquireFailedException` as previous
+`ConsoleWriter` is still holding lock on writing to stream `basic-stream-1`.
+
+::
+
+        Opening log stream basic-stream-1
+        Exception in thread "main" 
org.apache.distributedlog.exceptions.OwnershipAcquireFailedException: LockPath 
- /messaging/distributedlog/basic-stream-1/<default>/lock: Lock acquisition 
failed, the current owner is console-writer
+            at 
org.apache.distributedlog.lock.ZKSessionLock$8.apply(ZKSessionLock.java:570)
+            at 
org.apache.distributedlog.lock.ZKSessionLock$8.apply(ZKSessionLock.java:567)
+            at 
com.twitter.util.Future$$anonfun$map$1$$anonfun$apply$8.apply(Future.scala:1041)
+            at com.twitter.util.Try$.apply(Try.scala:13)
+            at com.twitter.util.Future$.apply(Future.scala:132)
+            at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:1041)
+            at com.twitter.util.Future$$anonfun$map$1.apply(Future.scala:1040)
+            at 
com.twitter.util.Promise$Transformer.liftedTree1$1(Promise.scala:112)
+            at com.twitter.util.Promise$Transformer.k(Promise.scala:112)
+            at com.twitter.util.Promise$Transformer.apply(Promise.scala:122)
+            at com.twitter.util.Promise$Transformer.apply(Promise.scala:103)
+            at com.twitter.util.Promise$$anon$1.run(Promise.scala:357)
+            at 
com.twitter.concurrent.LocalScheduler$Activation.run(Scheduler.scala:178)
+            at 
com.twitter.concurrent.LocalScheduler$Activation.submit(Scheduler.scala:136)
+            at 
com.twitter.concurrent.LocalScheduler.submit(Scheduler.scala:207)
+            at com.twitter.concurrent.Scheduler$.submit(Scheduler.scala:92)
+            at com.twitter.util.Promise.runq(Promise.scala:350)
+            at com.twitter.util.Promise.updateIfEmpty(Promise.scala:716)
+            at com.twitter.util.Promise.update(Promise.scala:694)
+            at com.twitter.util.Promise.setValue(Promise.scala:670)
+            at 
org.apache.distributedlog.lock.ZKSessionLock$9.safeRun(ZKSessionLock.java:622)
+            at 
org.apache.bookkeeper.util.SafeRunnable.run(SafeRunnable.java:31)
+            at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
+            at java.util.concurrent.FutureTask.run(FutureTask.java:262)
+            at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178)
+            at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292)
+            at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
+            at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
+            at java.lang.Thread.run(Thread.java:745) 

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/tutorials/basic-2.rst
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/tutorials/basic-2.rst 
b/website/docs/0.4.0-incubating/tutorials/basic-2.rst
new file mode 100644
index 0000000..ef4c7b6
--- /dev/null
+++ b/website/docs/0.4.0-incubating/tutorials/basic-2.rst
@@ -0,0 +1,221 @@
+---
+title: API - Write Records (via write proxy)
+top-nav-group: quickstart
+top-nav-pos: 3
+top-nav-title: API - Write Records (via write proxy)
+layout: default
+---
+
+.. contents:: Basic Tutorial - Using Proxy Client to write records
+
+Basic Tutorial - Write Records using Write Proxy Client
+=======================================================
+
+This tutorial shows how to write records using write proxy client.
+
+.. sectnum::
+
+Open a write proxy client
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Create write proxy client builder
+---------------------------------
+
+::
+
+        DistributedLogClientBuilder builder = 
DistributedLogClientBuilder.newBuilder();
+                .clientId(ClientId.apply("console-proxy-writer"))
+                .name("console-proxy-writer");
+
+
+Enable thrift mux
+-----------------
+
+::
+
+        builder = builder.thriftmux(true);
+
+
+Point the client to write proxy using finagle name
+--------------------------------------------------
+
+::
+
+        String finagleNameStr = "inet!127.0.0.1:8000";
+        builder = builder.finagleNameStr(finagleNameStr);
+
+
+Build the write proxy client
+----------------------------
+
+::
+
+        DistributedLogClient client = builder.build();
+
+
+Write Records
+~~~~~~~~~~~~~
+
+Write records to a stream 
+-------------------------
+
+Application does not have to provide `TransactionID` on writing.
+The `TransactionID` of a record is assigned by the write proxy.
+
+::
+
+        String streamName = "basic-stream-2";
+        byte[] data = ...;
+        Future<DLSN> writeFuture = client.write(streamName, 
ByteBuffer.wrap(data));
+
+
+Register the write callback
+---------------------------
+
+Register a future listener on write completion. The writer will be notified 
once the write is completed.
+
+::
+
+        writeFuture.addEventListener(new FutureEventListener<DLSN>() {
+            @Override
+            public void onFailure(Throwable cause) {
+                // executed when write failed.
+            }
+
+            @Override
+            public void onSuccess(DLSN value) {
+                // executed when write completed.
+            }
+        });
+
+
+Close the write proxy client
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Close the write proxy client after usage.
+
+::
+
+        client.close();
+
+
+Run the tutorial
+~~~~~~~~~~~~~~~~
+
+Run the example in the following steps:
+
+Start the local bookkeeper cluster
+----------------------------------
+
+You can use follow command to start the distributedlog stack locally.
+After the distributedlog cluster is started, you could access it using
+distributedlog uri *distributedlog://127.0.0.1:7000/messaging/distributedlog*.
+
+::
+
+        // dlog local ${zk-port}
+        ./distributedlog-core/bin/dlog local 7000
+
+
+Start the write proxy
+---------------------
+
+Start the write proxy, listening on port 8000.
+
+::
+
+        // DistributedLogServerApp -p ${service-port} --shard-id ${shard-id} 
-sp ${stats-port} -u {distributedlog-uri} -mx -c ${conf-file}
+        ./distributedlog-service/bin/dlog 
org.apache.distributedlog.service.DistributedLogServerApp -p 8000 --shard-id 1 
-sp 8001 -u distributedlog://127.0.0.1:7000/messaging/distributedlog -mx -c 
${distributedlog-repo}/distributedlog-service/conf/distributedlog_proxy.conf
+
+
+Create the stream
+-----------------
+
+Create the stream under the distributedlog uri.
+
+::
+
+        // Create Stream `basic-stream-2`
+        // dlog tool create -u ${distributedlog-uri} -r ${stream-prefix} -e 
${stream-regex}
+        ./distributedlog-core/bin/dlog tool create -u 
distributedlog://127.0.0.1:7000/messaging/distributedlog -r basic-stream- -e 2
+
+
+Tail the stream
+---------------
+
+Tailing the stream using `TailReader` to wait for new records.
+
+::
+
+        // Tailing Stream `basic-stream-2`
+        // runner run org.apache.distributedlog.basic.TailReader 
${distributedlog-uri} ${stream}
+        ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.TailReader 
distributedlog://127.0.0.1:7000/messaging/distributedlog basic-stream-2
+
+
+Write records
+-------------
+
+Run the example to write records to the stream in a console.
+
+::
+
+        // Write Records into Stream `basic-stream-2`
+        // runner run org.apache.distributedlog.basic.ConsoleProxyWriter 
${distributedlog-uri} ${stream}
+        ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.ConsoleProxyWriter 'inet!127.0.0.1:8000' 
basic-stream-2
+
+
+Check the results
+-----------------
+
+Example output from `ConsoleProxyWriter` and `TailReader`.
+
+::
+
+        // Output of `ConsoleProxyWriter`
+        May 08, 2016 10:27:41 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[inet] = 
com.twitter.finagle.InetResolver(com.twitter.finagle.InetResolver@756d7bba)
+        May 08, 2016 10:27:41 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[fixedinet] = 
com.twitter.finagle.FixedInetResolver(com.twitter.finagle.FixedInetResolver@1d2e91f5)
+        May 08, 2016 10:27:41 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[neg] = 
com.twitter.finagle.NegResolver$(com.twitter.finagle.NegResolver$@5c707aca)
+        May 08, 2016 10:27:41 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[nil] = 
com.twitter.finagle.NilResolver$(com.twitter.finagle.NilResolver$@5c8d932f)
+        May 08, 2016 10:27:41 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[fail] = 
com.twitter.finagle.FailResolver$(com.twitter.finagle.FailResolver$@52ba2221)
+        May 08, 2016 10:27:41 AM com.twitter.finagle.Init$$anonfun$1 
apply$mcV$sp
+        [dlog] > test-proxy-writer
+        [dlog] >
+
+
+        // Output of `TailReader`
+        Opening log stream basic-stream-2
+        Log stream basic-stream-2 is empty.
+        Wait for records starting from DLSN{logSegmentSequenceNo=1, entryId=0, 
slotId=0}
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0}
+        """
+        test-proxy-writer
+        """
+
+
+Attempt a second writer
+-----------------------
+
+Open another terminal to run `ConsoleProxyWriter`. The write should succeed as 
write proxy is able to accept
+fan-in writes. Please checkout section `Considerations` to see the difference 
between **Write Ordering** and
+**Read Ordering**.
+
+::
+
+         May 08, 2016 10:31:54 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+         INFO: Resolver[inet] = 
com.twitter.finagle.InetResolver(com.twitter.finagle.InetResolver@756d7bba)
+         May 08, 2016 10:31:54 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+         INFO: Resolver[fixedinet] = 
com.twitter.finagle.FixedInetResolver(com.twitter.finagle.FixedInetResolver@1d2e91f5)
+         May 08, 2016 10:31:54 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+         INFO: Resolver[neg] = 
com.twitter.finagle.NegResolver$(com.twitter.finagle.NegResolver$@5c707aca)
+         May 08, 2016 10:31:54 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+         INFO: Resolver[nil] = 
com.twitter.finagle.NilResolver$(com.twitter.finagle.NilResolver$@5c8d932f)
+         May 08, 2016 10:31:54 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+         INFO: Resolver[fail] = 
com.twitter.finagle.FailResolver$(com.twitter.finagle.FailResolver$@52ba2221)
+         May 08, 2016 10:31:54 AM com.twitter.finagle.Init$$anonfun$1 
apply$mcV$sp
+         [dlog] > test-write-proxy-message-2
+         [dlog] >

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/tutorials/basic-3.rst
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/tutorials/basic-3.rst 
b/website/docs/0.4.0-incubating/tutorials/basic-3.rst
new file mode 100644
index 0000000..55de7b0
--- /dev/null
+++ b/website/docs/0.4.0-incubating/tutorials/basic-3.rst
@@ -0,0 +1,280 @@
+---
+title: API - Write Records to Multiple Streams
+layout: default
+---
+
+.. contents:: Basic Tutorial - Write Records to Multiple Streams
+
+Write Records to Multiple Streams
+=================================
+
+This tutorial shows how to write records using write proxy multi stream 
writer. The `DistributedLogMultiStreamWriter`
+is a wrapper over `DistributedLogClient` on writing records to a set of 
streams in a `round-robin` way and ensure low write latency even on single 
stream ownership failover.
+
+.. sectnum::
+
+Open a write proxy client
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before everything, you have to open a write proxy client to write records.
+These are the steps to follow to `open a write proxy client`.
+
+Create write proxy client builder
+---------------------------------
+
+::
+
+        DistributedLogClientBuilder builder = 
DistributedLogClientBuilder.newBuilder()
+                .clientId(ClientId.apply("console-proxy-writer"))
+                .name("console-proxy-writer");
+
+
+Enable thrift mux
+-----------------
+
+::
+
+        builder = builder.thriftmux(true);
+
+
+Point the client to write proxy using finagle name
+--------------------------------------------------
+
+::
+
+        String finagleNameStr = "inet!127.0.0.1:8000";
+        builder = builder.finagleNameStr(finagleNameStr);
+
+
+Build the write proxy client
+----------------------------
+
+::
+
+        DistributedLogClient client = builder.build();
+
+
+Create a `MultiStreamWriter`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Create multi stream writer builder
+----------------------------------
+
+::
+
+        DistributedLogMultiStreamWriterBuilder builder = 
DistributedLogMultiStreamWriter.newBuilder();
+
+
+Build the writer to write a set of streams
+------------------------------------------
+
+::
+
+        List<String> streams = ...;
+        builder = builder.streams(streams);
+
+
+Point the multi stream writer to use write proxy client
+-------------------------------------------------------
+
+::
+
+        builder = builder.client(client);
+
+
+Configure the flush policy for the multi stream writer
+------------------------------------------------------
+
+::
+
+        // transmit immediately after a record is written.
+        builder = builder.bufferSize(0);
+        builder = builder.flushIntervalMs(0);
+
+
+Configure the request timeouts and retry policy for the multi stream writer
+---------------------------------------------------------------------------
+
+::
+
+        // Configure the speculative timeouts - if writing to a stream cannot
+        // complete within the speculative timeout, it would try writing to
+        // another streams.
+        builder = builder.firstSpeculativeTimeoutMs(10000)
+        builder = builder.maxSpeculativeTimeoutMs(20000)
+        // Configure the request timeout.
+        builder = builder.requestTimeoutMs(50000);
+
+
+Build the multi writer
+----------------------
+
+::
+
+        DistributedLogMultiStreamWriter writer = builder.build();
+
+
+Write Records
+~~~~~~~~~~~~~
+
+Write records to multi streams 
+------------------------------
+
+::
+
+        byte[] data = ...;
+        Future<DLSN> writeFuture = writer.write(ByteBuffer.wrap(data));
+
+
+Register the write callback
+---------------------------
+
+Register a future listener on write completion.
+
+::
+
+        writeFuture.addEventListener(new FutureEventListener<DLSN>() {
+            @Override
+            public void onFailure(Throwable cause) {
+                // executed when write failed.
+            }
+
+            @Override
+            public void onSuccess(DLSN value) {
+                // executed when write completed.
+            }
+        });
+
+
+Run the tutorial
+~~~~~~~~~~~~~~~~
+
+Run the example in the following steps:
+
+Start the local bookkeeper cluster
+----------------------------------
+
+You can use follow command to start the distributedlog stack locally.
+After the distributedlog is started, you could access it using
+distributedlog uri *distributedlog://127.0.0.1:7000/messaging/distributedlog*.
+
+::
+
+        // dlog local ${zk-port}
+        ./distributedlog-core/bin/dlog local 7000
+
+
+Start the write proxy
+---------------------
+
+Start the write proxy, listening on port 8000.
+
+::
+
+        // DistributedLogServerApp -p ${service-port} --shard-id ${shard-id} 
-sp ${stats-port} -u {distributedlog-uri} -mx -c ${conf-file}
+        ./distributedlog-service/bin/dlog 
org.apache.distributedlog.service.DistributedLogServerApp -p 8000 --shard-id 1 
-sp 8001 -u distributedlog://127.0.0.1:7000/messaging/distributedlog -mx -c 
${distributedlog-repo}/distributedlog-service/conf/distributedlog_proxy.conf
+
+
+Create multiple streams
+-----------------------
+
+Create multiple streams under the distributedlog uri.
+
+::
+
+        // Create Stream `basic-stream-{3-7}`
+        // dlog tool create -u ${distributedlog-uri} -r ${stream-prefix} -e 
${stream-regex}
+        ./distributedlog-core/bin/dlog tool create -u 
distributedlog://127.0.0.1:7000/messaging/distributedlog -r basic-stream- -e 3-7
+
+
+Tail the streams
+----------------
+
+Tailing the streams using `MultiReader` to wait for new records.
+
+::
+
+        // Tailing Stream `basic-stream-{3-7}`
+        // runner run org.apache.distributedlog.basic.MultiReader 
${distributedlog-uri} ${stream}[,${stream}]
+        ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.MultiReader 
distributedlog://127.0.0.1:7000/messaging/distributedlog 
basic-stream-3,basic-stream-4,basic-stream-5,basic-stream-6,basic-stream-7
+
+
+Write the records
+-----------------
+
+Run the example to write records to the multi streams in a console.
+
+::
+
+        // Write Records into Stream `basic-stream-{3-7}`
+        // runner run org.apache.distributedlog.basic.ConsoleProxyMultiWriter 
${distributedlog-uri} ${stream}[,${stream}]
+        ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.ConsoleProxyMultiWriter 'inet!127.0.0.1:8000' 
basic-stream-3,basic-stream-4,basic-stream-5,basic-stream-6,basic-stream-7
+
+Check the results
+-----------------
+
+Example output from `ConsoleProxyMultiWriter` and `MultiReader`.
+
+::
+
+        // Output of `ConsoleProxyWriter`
+        May 08, 2016 11:09:21 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[inet] = 
com.twitter.finagle.InetResolver(com.twitter.finagle.InetResolver@fbb628c)
+        May 08, 2016 11:09:21 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[fixedinet] = 
com.twitter.finagle.FixedInetResolver(com.twitter.finagle.FixedInetResolver@5a25adb1)
+        May 08, 2016 11:09:21 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[neg] = 
com.twitter.finagle.NegResolver$(com.twitter.finagle.NegResolver$@5fae6db3)
+        May 08, 2016 11:09:21 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[nil] = 
com.twitter.finagle.NilResolver$(com.twitter.finagle.NilResolver$@34a433d8)
+        May 08, 2016 11:09:21 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[fail] = 
com.twitter.finagle.FailResolver$(com.twitter.finagle.FailResolver$@847c4e8)
+        May 08, 2016 11:09:22 AM com.twitter.finagle.Init$$anonfun$1 
apply$mcV$sp
+        [dlog] > message-1
+        [dlog] > message-2
+        [dlog] > message-3
+        [dlog] > message-4
+        [dlog] > message-5
+        [dlog] >
+
+
+        // Output of `MultiReader`
+        Opening log stream basic-stream-3
+        Opening log stream basic-stream-4
+        Opening log stream basic-stream-5
+        Opening log stream basic-stream-6
+        Opening log stream basic-stream-7
+        Log stream basic-stream-4 is empty.
+        Wait for records from basic-stream-4 starting from 
DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0}
+        Open reader to read records from stream basic-stream-4
+        Log stream basic-stream-5 is empty.
+        Wait for records from basic-stream-5 starting from 
DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0}
+        Open reader to read records from stream basic-stream-5
+        Log stream basic-stream-6 is empty.
+        Wait for records from basic-stream-6 starting from 
DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0}
+        Open reader to read records from stream basic-stream-6
+        Log stream basic-stream-3 is empty.
+        Wait for records from basic-stream-3 starting from 
DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0}
+        Open reader to read records from stream basic-stream-3
+        Log stream basic-stream-7 is empty.
+        Wait for records from basic-stream-7 starting from 
DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0}
+        Open reader to read records from stream basic-stream-7
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0} from 
stream basic-stream-4
+        """
+        message-1
+        """
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0} from 
stream basic-stream-6
+        """
+        message-2
+        """
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0} from 
stream basic-stream-3
+        """
+        message-3
+        """
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0} from 
stream basic-stream-7
+        """
+        message-4
+        """
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0} from 
stream basic-stream-5
+        """
+        message-5
+        """

http://git-wip-us.apache.org/repos/asf/incubator-distributedlog/blob/3469fc87/website/docs/0.4.0-incubating/tutorials/basic-4.rst
----------------------------------------------------------------------
diff --git a/website/docs/0.4.0-incubating/tutorials/basic-4.rst 
b/website/docs/0.4.0-incubating/tutorials/basic-4.rst
new file mode 100644
index 0000000..bcbacd1
--- /dev/null
+++ b/website/docs/0.4.0-incubating/tutorials/basic-4.rst
@@ -0,0 +1,241 @@
+---
+title: API - Atomic Write Multiple Records
+layout: default
+---
+
+.. contents:: Basic Tutorial - Write Multi Records Atomic using Write Proxy 
Client
+
+Write Multi Records Atomic using Write Proxy Client
+===================================================
+
+This tutorial shows how to write multi records atomic using write proxy client.
+
+.. sectnum::
+
+Open a write proxy client
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Create write proxy client builder
+---------------------------------
+
+::
+
+        DistributedLogClientBuilder builder = 
DistributedLogClientBuilder.newBuilder();
+                .clientId(ClientId.apply("atomic-writer"))
+                .name("atomic-writer");
+
+
+Enable thrift mux
+-----------------
+
+::
+
+        builder = builder.thriftmux(true);
+
+
+Point the client to write proxy using finagle name
+--------------------------------------------------
+
+::
+
+        String finagleNameStr = "inet!127.0.0.1:8000";
+        builder = builder.finagleNameStr(finagleNameStr);
+
+
+Build the write proxy client
+----------------------------
+
+::
+
+        DistributedLogClient client = builder.build();
+
+
+Write Records
+~~~~~~~~~~~~~
+
+Create a RecordSet
+------------------
+
+Create a `RecordSet` for multiple records. The RecordSet has initial `16KB` 
buffer and its
+compression codec is `NONE`.
+
+::
+
+        LogRecordSet.Writer recordSetWriter = LogRecordSet.newWriter(16 * 
1024, Type.NONE);
+
+
+Write multiple records
+----------------------
+
+Write multiple records into the `RecordSet`.
+
+::
+
+        for (String msg : messages) {
+            ByteBuffer msgBuf = ByteBuffer.wrap(msg.getBytes(UTF_8));
+            Promise<DLSN> writeFuture = new Promise<DLSN>();
+            recordSetWriter.writeRecord(msgBuf, writeFuture);
+        }
+
+
+Write the RecordSet
+-------------------
+
+Write the `RecordSet` to a stream.
+
+::
+
+        String streamName = "basic-stream-8";
+        Future<DLSN> writeFuture = client.writeRecordSet(streamName, 
recordSetWriter);
+
+
+Register the write callback
+---------------------------
+
+Register a future listener on write completion. The writer will be notified 
once the write is completed.
+
+::
+
+        writeFuture.addEventListener(new FutureEventListener<DLSN>() {
+            @Override
+            public void onFailure(Throwable cause) {
+                // executed when write failed.
+                recordSetWriter.abortTransmit(cause);
+            }
+
+            @Override
+            public void onSuccess(DLSN value) {
+                // executed when write completed.
+                recordSetWriter.completeTransmit(
+                        dlsn.getLogSegmentSequenceNo(),
+                        dlsn.getEntryId(),
+                        dlsn.getSlotId());
+            }
+        });
+
+
+Close the write proxy client
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Close the write proxy client after usage.
+
+::
+
+        client.close();
+
+
+Run the tutorial
+~~~~~~~~~~~~~~~~
+
+Run the example in the following steps:
+
+Start the local bookkeeper cluster
+----------------------------------
+
+You can use follow command to start the distributedlog stack locally.
+After the distributedlog cluster is started, you could access it using
+distributedlog uri *distributedlog://127.0.0.1:7000/messaging/distributedlog*.
+
+::
+
+        // dlog local ${zk-port}
+        ./distributedlog-core/bin/dlog local 7000
+
+
+Start the write proxy
+---------------------
+
+Start the write proxy, listening on port 8000.
+
+::
+
+        // DistributedLogServerApp -p ${service-port} --shard-id ${shard-id} 
-sp ${stats-port} -u {distributedlog-uri} -mx -c ${conf-file}
+        ./distributedlog-service/bin/dlog 
org.apache.distributedlog.service.DistributedLogServerApp -p 8000 --shard-id 1 
-sp 8001 -u distributedlog://127.0.0.1:7000/messaging/distributedlog -mx -c 
${distributedlog-repo}/distributedlog-service/conf/distributedlog_proxy.conf
+
+
+Create the stream
+-----------------
+
+Create the stream under the distributedlog uri.
+
+::
+
+        // Create Stream `basic-stream-8`
+        // dlog tool create -u ${distributedlog-uri} -r ${stream-prefix} -e 
${stream-regex}
+        ./distributedlog-core/bin/dlog tool create -u 
distributedlog://127.0.0.1:7000/messaging/distributedlog -r basic-stream- -e 8
+
+
+Tail the stream
+---------------
+
+Tailing the stream using `TailReader` to wait for new records.
+
+::
+
+        // Tailing Stream `basic-stream-8`
+        // runner run org.apache.distributedlog.basic.TailReader 
${distributedlog-uri} ${stream}
+        ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.TailReader 
distributedlog://127.0.0.1:7000/messaging/distributedlog basic-stream-8
+
+
+Write records
+-------------
+
+Run the example to write multiple records to the stream.
+
+::
+
+        // Write Records into Stream `basic-stream-8`
+        // runner run org.apache.distributedlog.basic.AtomicWriter 
${distributedlog-uri} ${stream} ${message}[, ${message}]
+        ./distributedlog-tutorials/distributedlog-basic/bin/runner run 
org.apache.distributedlog.basic.AtomicWriter 'inet!127.0.0.1:8000' 
basic-stream-8 "message-1" "message-2" "message-3" "message-4" "message-5"
+
+
+Check the results
+-----------------
+
+Example output from `AtomicWriter` and `TailReader`.
+
+::
+
+        // Output of `AtomicWriter`
+        May 08, 2016 11:48:19 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[inet] = 
com.twitter.finagle.InetResolver(com.twitter.finagle.InetResolver@6c3e459e)
+        May 08, 2016 11:48:19 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[fixedinet] = 
com.twitter.finagle.FixedInetResolver(com.twitter.finagle.FixedInetResolver@4d5698f)
+        May 08, 2016 11:48:19 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[neg] = 
com.twitter.finagle.NegResolver$(com.twitter.finagle.NegResolver$@57052dc3)
+        May 08, 2016 11:48:19 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[nil] = 
com.twitter.finagle.NilResolver$(com.twitter.finagle.NilResolver$@14ff89d7)
+        May 08, 2016 11:48:19 AM 
com.twitter.finagle.BaseResolver$$anonfun$resolvers$1 apply
+        INFO: Resolver[fail] = 
com.twitter.finagle.FailResolver$(com.twitter.finagle.FailResolver$@14b28d06)
+        May 08, 2016 11:48:19 AM com.twitter.finagle.Init$$anonfun$1 
apply$mcV$sp
+        Write 'message-1' as record DLSN{logSegmentSequenceNo=1, entryId=0, 
slotId=0}
+        Write 'message-2' as record DLSN{logSegmentSequenceNo=1, entryId=0, 
slotId=1}
+        Write 'message-3' as record DLSN{logSegmentSequenceNo=1, entryId=0, 
slotId=2}
+        Write 'message-4' as record DLSN{logSegmentSequenceNo=1, entryId=0, 
slotId=3}
+        Write 'message-5' as record DLSN{logSegmentSequenceNo=1, entryId=0, 
slotId=4}
+
+
+        // Output of `TailReader`
+        Opening log stream basic-stream-8
+        Log stream basic-stream-8 is empty.
+        Wait for records starting from DLSN{logSegmentSequenceNo=1, entryId=0, 
slotId=0}
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=0}
+        """
+        message-1
+        """
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=1}
+        """
+        message-2
+        """
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=2}
+        """
+        message-3
+        """
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=3}
+        """
+        message-4
+        """
+        Received record DLSN{logSegmentSequenceNo=1, entryId=0, slotId=4}
+        """
+        message-5
+        """

Reply via email to