eagle-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From h..@apache.org
Subject [53/84] eagle git commit: Merge site source code from https://github.com/geteagle/eaglemonitoring.github.io
Date Mon, 03 Apr 2017 11:55:01 GMT
http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/js/jquery.singlePageNav.js
----------------------------------------------------------------------
diff --git a/eagle-site/js/jquery.singlePageNav.js b/eagle-site/js/jquery.singlePageNav.js
new file mode 100755
index 0000000..f779ad0
--- /dev/null
+++ b/eagle-site/js/jquery.singlePageNav.js
@@ -0,0 +1,183 @@
+/**
+ * Single Page Nav Plugin
+ * Copyright (c) 2013 Chris Wojcik <hello@chriswojcik.net>
+ * Dual licensed under MIT and GPL.
+ * @author Chris Wojcik
+ * @version 1.1.0
+ */
+
+// Utility
+if (typeof Object.create !== 'function') {
+    Object.create = function(obj) {
+        function F() {}
+        F.prototype = obj;
+        return new F();
+    };
+}
+
+(function($, window, document, undefined) {
+    "use strict";
+    
+    var SinglePageNav = {
+        
+        init: function(options, container) {
+            
+            this.options = $.extend({}, $.fn.singlePageNav.defaults, options);
+            
+            this.container = container;            
+            this.$container = $(container);
+            this.$links = this.$container.find('a');
+
+            if (this.options.filter !== '') {
+                this.$links = this.$links.filter(this.options.filter);
+            }
+
+            this.$window = $(window);
+            this.$htmlbody = $('html, body');
+            
+            this.$links.on('click.singlePageNav', $.proxy(this.handleClick, this));
+
+            this.didScroll = false;
+            this.checkPosition();
+            this.setTimer();
+        },
+
+        handleClick: function(e) {
+            var self  = this,
+                link  = e.currentTarget,
+                $elem = $(link.hash);  
+
+            e.preventDefault();             
+
+            if ($elem.length) { // Make sure the target elem exists
+
+                
+                // Prevent active link from cycling during the scroll
+                self.clearTimer();
+
+                // Before scrolling starts
+                if (typeof self.options.beforeStart === 'function') {
+                    self.options.beforeStart();
+                }
+
+                self.setActiveLink(link.hash);
+                
+                self.scrollTo($elem, function() { 
+                 
+                    if (self.options.updateHash) {
+                        document.location.hash = link.hash;
+                    }
+
+                    self.setTimer();
+
+                    // After scrolling ends
+                    if (typeof self.options.onComplete === 'function') {
+                        self.options.onComplete();
+                    }
+                });                            
+            }     
+        },
+        
+        scrollTo: function($elem, callback) {
+            var self = this;
+            var target = self.getCoords($elem).top;
+            var called = false;
+
+            self.$htmlbody.stop().animate(
+                {scrollTop: target}, 
+                { 
+                    duration: self.options.speed,
+                    easing: self.options.easing, 
+                    complete: function() {
+                        if (typeof callback === 'function' && !called) {
+                            callback();
+                        }
+                        called = true;
+                    }
+                }
+            );
+        },
+        
+        setTimer: function() {
+            var self = this;
+            
+            self.$window.on('scroll.singlePageNav', function() {
+                self.didScroll = true;
+            });
+            
+            self.timer = setInterval(function() {
+                if (self.didScroll) {
+                    self.didScroll = false;
+                    self.checkPosition();
+                }
+            }, 250);
+        },        
+        
+        clearTimer: function() {
+            clearInterval(this.timer);
+            this.$window.off('scroll.singlePageNav');
+            this.didScroll = false;
+        },
+        
+        // Check the scroll position and set the active section
+        checkPosition: function() {
+            var scrollPos = this.$window.scrollTop();
+            var currentSection = this.getCurrentSection(scrollPos);
+            this.setActiveLink(currentSection);
+        },        
+        
+        getCoords: function($elem) {
+            return {
+                top: Math.round($elem.offset().top) - this.options.offset
+            };
+        },
+        
+        setActiveLink: function(href) {
+            var $activeLink = this.$container.find("a[href='" + href + "']");
+                            
+            if (!$activeLink.hasClass(this.options.currentClass)) {
+                this.$links.removeClass(this.options.currentClass);
+                $activeLink.addClass(this.options.currentClass);
+            }
+        },        
+        
+        getCurrentSection: function(scrollPos) {
+            var i, hash, coords, section;
+            
+            for (i = 0; i < this.$links.length; i++) {
+                hash = this.$links[i].hash;
+                
+                if ($(hash).length) {
+                    coords = this.getCoords($(hash));
+                    
+                    if (scrollPos >= coords.top - this.options.threshold) {
+                        section = hash;
+                    }
+                }
+            }
+            
+            // The current section or the first link
+            return section || this.$links[0].hash;
+        }
+    };
+    
+    $.fn.singlePageNav = function(options) {
+        return this.each(function() {
+            var singlePageNav = Object.create(SinglePageNav);
+            singlePageNav.init(options, this);
+        });
+    };
+    
+    $.fn.singlePageNav.defaults = {
+        offset: 0,
+        threshold: 120,
+        speed: 400,
+        currentClass: 'current',
+        easing: 'swing',
+        updateHash: false,
+        filter: '',
+        onComplete: false,
+        beforeStart: false
+    };
+    
+})(jQuery, window, document);
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/js/modernizr.min.js
----------------------------------------------------------------------
diff --git a/eagle-site/js/modernizr.min.js b/eagle-site/js/modernizr.min.js
new file mode 100755
index 0000000..f54037e
--- /dev/null
+++ b/eagle-site/js/modernizr.min.js
@@ -0,0 +1 @@
+window.Modernizr=function(e,t,n){function r(e,t){var n=e.charAt(0).toUpperCase()+e.substr(1),r=(e+" "+C.join(n+" ")+n).split(" ");return o(r,t)}function o(e,t){for(var r in e)if(v[e[r]]!==n)return"pfx"==t?e[r]:!0;return!1}function i(e,t){return!!~(""+e).indexOf(t)}function a(e,t){return typeof e===t}function s(e,t){return c(x.join(e+";")+(t||""))}function c(e){v.cssText=e}var l,u,f,d="2.0.6",p={},m=!0,h=t.documentElement,g=(t.head||t.getElementsByTagName("head")[0],"modernizr"),y=t.createElement(g),v=y.style,b=":)",x=(Object.prototype.toString," -webkit- -moz- -o- -ms- -khtml- ".split(" ")),C="Webkit Moz O ms Khtml".split(" "),E={},S=[],T=function(e,n,r,o){var i,a,s,c=t.createElement("div");if(parseInt(r,10))for(;r--;)s=t.createElement("div"),s.id=o?o[r]:g+(r+1),c.appendChild(s);return i=["&shy;","<style>",e,"</style>"].join(""),c.id=g,c.innerHTML+=i,h.appendChild(c),a=n(c,e),c.parentNode.removeChild(c),!!a},w={}.hasOwnProperty;f=a(w,n)||a(w.call,n)?function(e,t){return t in e&&a(e.
 constructor.prototype[t],n)}:function(e,t){return w.call(e,t)};!function(e,n){var r=e.join(""),o=n.length;T(r,function(e,n){for(var r=t.styleSheets[t.styleSheets.length-1],i=r.cssRules&&r.cssRules[0]?r.cssRules[0].cssText:r.cssText||"",a=e.childNodes,s={};o--;)s[a[o].id]=a[o];p.csstransforms3d=9===s.csstransforms3d.offsetLeft,p.generatedcontent=s.generatedcontent.offsetHeight>=1,p.fontface=/src/i.test(i)&&0===i.indexOf(n.split(" ")[0])},o,n)}(['@font-face {font-family:"font";src:url("https://")}',["@media (",x.join("transform-3d),("),g,")","{#csstransforms3d{left:9px;position:absolute}}"].join(""),['#generatedcontent:after{content:"',b,'";visibility:hidden}'].join("")],["fontface","csstransforms3d","generatedcontent"]);E.flexbox=function(){function e(e,t,n,r){e.style.cssText=x.join(t+":"+n+";")+(r||"")}function n(e,t,n,r){t+=":",e.style.cssText=(t+x.join(n+";"+t)).slice(0,-t.length)+(r||"")}var r=t.createElement("div"),o=t.createElement("div");n(r,"display","box","width:42px;padding
 :0;"),e(o,"box-flex","1","width:10px;"),r.appendChild(o),h.appendChild(r);var i=42===o.offsetWidth;return r.removeChild(o),h.removeChild(r),i},E.rgba=function(){return c("background-color:rgba(150,255,150,.5)"),i(v.backgroundColor,"rgba")},E.hsla=function(){return c("background-color:hsla(120,40%,100%,.5)"),i(v.backgroundColor,"rgba")||i(v.backgroundColor,"hsla")},E.multiplebgs=function(){return c("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(v.background)},E.backgroundsize=function(){return r("backgroundSize")},E.borderimage=function(){return r("borderImage")},E.borderradius=function(){return r("borderRadius")},E.boxshadow=function(){return r("boxShadow")},E.textshadow=function(){return""===t.createElement("div").style.textShadow},E.opacity=function(){return s("opacity:.55"),/^0.55$/.test(v.opacity)},E.cssanimations=function(){return r("animationName")},E.csscolumns=function(){return r("columnCount")},E.cssgradients=function(){var e="background
 -image:",t="gradient(linear,left top,right bottom,from(#9f9),to(white));",n="linear-gradient(left top,#9f9, white);";return c((e+x.join(t+e)+x.join(n+e)).slice(0,-e.length)),i(v.backgroundImage,"gradient")},E.cssreflections=function(){return r("boxReflect")},E.csstransforms=function(){return!!o(["transformProperty","WebkitTransform","MozTransform","OTransform","msTransform"])},E.csstransforms3d=function(){var e=!!o(["perspectiveProperty","WebkitPerspective","MozPerspective","OPerspective","msPerspective"]);return e&&"webkitPerspective"in h.style&&(e=p.csstransforms3d),e},E.csstransitions=function(){return r("transitionProperty")},E.fontface=function(){return p.fontface},E.generatedcontent=function(){return p.generatedcontent};for(var j in E)f(E,j)&&(u=j.toLowerCase(),p[u]=E[j](),S.push((p[u]?"":"no-")+u));return c(""),y=l=null,e.attachEvent&&function(){var e=t.createElement("div");return e.innerHTML="<elem></elem>",1!==e.childNodes.length}()&&function(e,t){function r(e){for(var t=-1
 ;++t<c;)e.createElement(s[t])}e.iepp=e.iepp||{};var o,i=e.iepp,a=i.html5elements||"abbr|article|aside|audio|canvas|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",s=a.split("|"),c=s.length,l=new RegExp("(^|\\s)("+a+")","gi"),u=new RegExp("<(/*)("+a+")","gi"),f=/^\s*[\{\}]\s*$/,d=new RegExp("(^|[^\\n]*?\\s)("+a+")([^\\n]*)({[\\n\\w\\W]*?})","gi"),p=t.createDocumentFragment(),m=t.documentElement,h=m.firstChild,g=t.createElement("body"),y=t.createElement("style"),v=/print|all/;i.getCSS=function(e,t){if(e+""===n)return"";for(var r,o=-1,a=e.length,s=[];++o<a;)r=e[o],r.disabled||(t=r.media||t,v.test(t)&&s.push(i.getCSS(r.imports,t),r.cssText),t="all");return s.join("")},i.parseCSS=function(e){for(var t,n=[];null!=(t=d.exec(e));)n.push(((f.exec(t[1])?"\n":t[1])+t[2]+t[3]).replace(l,"$1.iepp_$2")+t[4]);return n.join("\n")},i.writeHTML=function(){var e=-1;for(o=o||t.body;++e<c;)for(var n=t.getElementsByTagName(s[e]),r=n.lengt
 h,i=-1;++i<r;)n[i].className.indexOf("iepp_")<0&&(n[i].className+=" iepp_"+s[e]);p.appendChild(o),m.appendChild(g),g.className=o.className,g.id=o.id,g.innerHTML=o.innerHTML.replace(u,"<$1font")},i._beforePrint=function(){y.styleSheet.cssText=i.parseCSS(i.getCSS(t.styleSheets,"all")),i.writeHTML()},i.restoreHTML=function(){g.innerHTML="",m.removeChild(g),m.appendChild(o)},i._afterPrint=function(){i.restoreHTML(),y.styleSheet.cssText=""},r(t),r(p),i.disablePP||(h.insertBefore(y,h.firstChild),y.media="print",y.className="iepp-printshim",e.attachEvent("onbeforeprint",i._beforePrint),e.attachEvent("onafterprint",i._afterPrint))}(e,t),p._version=d,p._prefixes=x,p._domPrefixes=C,p.testProp=function(e){return o([e])},p.testAllProps=r,p.testStyles=T,h.className=h.className.replace(/\bno-js\b/,"")+(m?" js "+S.join(" "):""),p}(this,this.document),function(e,t,n){function r(e){return!e||"loaded"==e||"complete"==e}function o(){for(var e=1,t=-1;y.length-++t&&(!y[t].s||(e=y[t].r)););e&&s()}functio
 n i(e){var n,i=t.createElement("script");i.src=e.s,i.onreadystatechange=i.onload=function(){!n&&r(i.readyState)&&(n=1,o(),i.onload=i.onreadystatechange=null)},m(function(){n||(n=1,o())},d.errorTimeout),e.e?i.onload():h.parentNode.insertBefore(i,h)}function a(e){var n,r=t.createElement("link");if(r.href=e.s,r.rel="stylesheet",r.type="text/css",e.e||!S&&!b)r.onload=function(){n||(n=1,m(function(){o()},0))},e.e&&r.onload();else{var i=function(e){m(function(){if(!n)try{e.sheet.cssRules.length?(n=1,o()):i(e)}catch(t){1e3==t.code||"security"==t.message||"denied"==t.message?(n=1,m(function(){o()},0)):i(e)}},0)};i(r)}m(function(){n||(n=1,o())},d.errorTimeout),!e.e&&h.parentNode.insertBefore(r,h)}function s(){var e=y.shift();v=1,e?e.t?m(function(){"c"==e.t?a(e):i(e)},0):(e(),o()):v=0}function c(e,n,i,a,c,l){function u(){!p&&r(f.readyState)&&(g.r=p=1,!v&&o(),f.onload=f.onreadystatechange=null,m(function(){C.removeChild(f)},0))}var f=t.createElement(e),p=0,g={t:i,s:n,e:l};f.src=f.data=n,!x&&(f
 .style.display="none"),f.width=f.height="0","object"!=e&&(f.type=i),f.onload=f.onreadystatechange=u,"img"==e?f.onerror=u:"script"==e&&(f.onerror=function(){g.e=g.r=1,s()}),y.splice(a,0,g),C.insertBefore(f,x?null:h),m(function(){p||(C.removeChild(f),g.r=g.e=p=1,o())},d.errorTimeout)}function l(e,t,n){var r="c"==t?j:w;return v=0,t=t||"j",N(e)?c(r,e,t,this.i++,p,n):(y.splice(this.i++,0,e),1==y.length&&s()),this}function u(){var e=d;return e.loader={load:l,i:0},e}var f,d,p=t.documentElement,m=e.setTimeout,h=t.getElementsByTagName("script")[0],g={}.toString,y=[],v=0,b="MozAppearance"in p.style,x=b&&!!t.createRange().compareNode,C=x?p:h.parentNode,E=e.opera&&"[object Opera]"==g.call(e.opera),S="webkitAppearance"in p.style,T=S&&"async"in t.createElement("script"),w=b?"object":E||T?"img":"script",j=S?"img":w,k=Array.isArray||function(e){return"[object Array]"==g.call(e)},P=function(e){return Object(e)===e},N=function(e){return"string"==typeof e},M=function(e){return"[object Function]"==g.ca
 ll(e)},L=[],O={};d=function(e){function t(e){var t,n,r=e.split("!"),o=L.length,i=r.pop(),a=r.length,s={url:i,origUrl:i,prefixes:r};for(n=0;a>n;n++)t=O[r[n]],t&&(s=t(s));for(n=0;o>n;n++)s=L[n](s);return s}function r(e,r,o,i,a){var s=t(e),c=s.autoCallback;if(!s.bypass){if(r&&(r=M(r)?r:r[e]||r[i]||r[e.split("/").pop().split("?")[0]]),s.instead)return s.instead(e,r,o,i,a);o.load(s.url,s.forceCSS||!s.forceJS&&/css$/.test(s.url)?"c":n,s.noexec),(M(r)||M(c))&&o.load(function(){u(),r&&r(s.origUrl,a,i),c&&c(s.origUrl,a,i)})}}function o(e,t){function n(e){if(N(e))r(e,c,t,0,i);else if(P(e))for(o in e)e.hasOwnProperty(o)&&r(e[o],c,t,o,i)}var o,i=!!e.test,a=i?e.yep:e.nope,s=e.load||e.both,c=e.callback;n(a),n(s),e.complete&&t.load(e.complete)}var i,a,s=this.yepnope.loader;if(N(e))r(e,0,s,0);else if(k(e))for(i=0;i<e.length;i++)a=e[i],N(a)?r(a,0,s,0):k(a)?d(a):P(a)&&o(a,s);else P(e)&&o(e,s)},d.addPrefix=function(e,t){O[e]=t},d.addFilter=function(e){L.push(e)},d.errorTimeout=1e4,null==t.readyState&&
 t.addEventListener&&(t.readyState="loading",t.addEventListener("DOMContentLoaded",f=function(){t.removeEventListener("DOMContentLoaded",f,0),t.readyState="complete"},0)),e.yepnope=u()}(this,this.document),Modernizr.load=function(){yepnope.apply(window,[].slice.call(arguments,0))};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/js/responsiveCarousel.min.js
----------------------------------------------------------------------
diff --git a/eagle-site/js/responsiveCarousel.min.js b/eagle-site/js/responsiveCarousel.min.js
new file mode 100755
index 0000000..e42631b
--- /dev/null
+++ b/eagle-site/js/responsiveCarousel.min.js
@@ -0,0 +1,7 @@
+/*! responsiveCarousel.JS - v1.2.0
+ * http://basilio.github.com/responsiveCarousel
+ *
+ * Copyright (c) 2013 Basilio C�ceres <basilio.caceres@gmail.com>;
+ * Licensed under the MIT license */
+
+(function(e){"use strict";e.fn.carousel=function(t){var n,r;n={infinite:true,visible:1,speed:"fast",overflow:false,autoRotate:false,navigation:e(this).data("navigation"),itemMinWidth:0,itemEqualHeight:false,itemMargin:0,itemClassActive:"crsl-active",imageWideClass:"wide-image",carousel:true};return e(this).each(function(){r=e(this);if(e.isEmptyObject(t)===false)e.extend(n,t);if(e.isEmptyObject(e(r).data("crsl"))===false)e.extend(n,e(r).data("crsl"));n.isTouch="ontouchstart"in document.documentElement||navigator.userAgent.match(/Android|BlackBerry|iPhone|iPad|iPod|Opera Mini|IEMobile/i)?true:false;r.init=function(){n.total=e(r).find(".crsl-item").length;n.itemWidth=e(r).outerWidth();n.visibleDefault=n.visible;n.swipeDistance=null;n.swipeMinDistance=100;n.startCoords={};n.endCoords={};e(r).css({width:"100%"});e(r).find(".crsl-item").css({position:"relative","float":"left",overflow:"hidden",height:"auto"});e(r).find("."+n.imageWideClass).each(function(){e(this).css({display:"block",wid
 th:"100%",height:"auto"})});e(r).find(".crsl-item iframe").attr({width:"100%"});if(n.carousel)e(r).find(".crsl-item:first-child").addClass(n.itemClassActive);if(n.carousel&&n.infinite&&n.visible<n.total)e(r).find(".crsl-item:first-child").before(e(".crsl-item:last-child",r));if(n.overflow===false){e(r).css({overflow:"hidden"})}else{e("html, body").css({"overflow-x":"hidden"})}e(r).trigger("initCarousel",[n,r]);r.testPreload();r.config();r.initRotate();r.triggerNavs()};r.testPreload=function(){if(e(r).find("img").length>0){var t=e(r).find("img").length,i=1;e(r).find("img").each(function(){r.preloadImage(this,i,t);i++})}else{e(r).trigger("loadedCarousel",[n,r])}};r.preloadImage=function(t,i,s){var o=new Image,u={};u.src=e(t).attr("src")!==undefined?t.src:"";u.alt=e(t).attr("alt")!==undefined?t.alt:"";e(o).attr(u);e(o).on("load",function(){if(i===1)e(r).trigger("loadingImagesCarousel",[n,r]);if(i===s)e(r).trigger("loadedImagesCarousel",[n,r])})};r.config=function(){n.itemWidth=Math.flo
 or((e(r).outerWidth()-n.itemMargin*(n.visibleDefault-1))/n.visibleDefault);if(n.itemWidth<=n.itemMinWidth){n.visible=Math.floor((e(r).outerWidth()-n.itemMargin*(n.visible-1))/n.itemMinWidth)===1?Math.floor(e(r).outerWidth()/n.itemMinWidth):Math.floor((e(r).outerWidth()-n.itemMargin)/n.itemMinWidth);n.visible=n.visible<1?1:n.visible;n.itemWidth=n.visible===1?Math.floor(e(r).outerWidth()):Math.floor((e(r).outerWidth()-n.itemMargin*(n.visible-1))/n.visible)}else{n.visible=n.visibleDefault}if(n.carousel){r.wrapWidth=Math.floor((n.itemWidth+n.itemMargin)*n.total);r.wrapMargin=r.wrapMarginDefault=n.infinite&&n.visible<n.total?parseInt((n.itemWidth+n.itemMargin)*-1,10):0;if(n.infinite&&n.visible<n.total&&e(r).find(".crsl-item."+n.itemClassActive).index()===0){e(r).find(".crsl-item:first-child").before(e(".crsl-item:last-child",r));r.wrapMargin=r.wrapMarginDefault=parseInt((n.itemWidth+n.itemMargin)*-1,10)}e(r).find(".crsl-wrap").css({width:r.wrapWidth+"px",marginLeft:r.wrapMargin})}else{r.
 wrapWidth=e(r).outerWidth();e(r).find(".crsl-wrap").css({width:r.wrapWidth+n.itemMargin+"px"});e("#"+n.navigation).hide()}e(r).find(".crsl-item").css({width:n.itemWidth+"px",marginRight:n.itemMargin+"px"});r.equalHeights();if(n.carousel){if(n.visible>=n.total){n.autoRotate=false;e("#"+n.navigation).hide()}else{e("#"+n.navigation).show()}}};r.equalHeights=function(){if(n.itemEqualHeight!==false){var t=0;e(r).find(".crsl-item").each(function(){e(this).css({height:"auto"});if(e(this).outerHeight()>t){t=e(this).outerHeight()}});e(r).find(".crsl-item").css({height:t+"px"})}return true};r.initRotate=function(){if(n.autoRotate!==false){r.rotateTime=window.setInterval(function(){r.rotate()},n.autoRotate)}};r.triggerNavs=function(){e("#"+n.navigation).delegate(".previous, .next","click",function(t){t.preventDefault();r.prepareExecute();if(e(this).hasClass("previous")&&r.testPrevious(r.itemActive)){r.previous()}else if(e(this).hasClass("next")&&r.testNext()){r.next()}else{return}})};r.prepare
 Execute=function(){if(n.autoRotate){clearInterval(r.rotateTime)}r.preventAnimateEvent();r.itemActive=e(r).find(".crsl-item."+n.itemClassActive);return true};r.preventAnimateEvent=function(){if(e(r).find(".crsl-wrap:animated").length>0){return false}};r.rotate=function(){r.preventAnimateEvent();r.itemActive=e(r).find(".crsl-item."+n.itemClassActive);r.next();return true};r.testPrevious=function(t){return e(".crsl-wrap",r).find(".crsl-item").index(t)>0};r.testNext=function(){return!n.infinite&&r.wrapWidth>=(n.itemWidth+n.itemMargin)*(n.visible+1)-r.wrapMargin||n.infinite};r.previous=function(){r.wrapMargin=n.infinite?r.wrapMarginDefault+e(r.itemActive).outerWidth(true):r.wrapMargin+e(r.itemActive).outerWidth(true);var t=e(r.itemActive).index();var i=e(r.itemActive).prev(".crsl-item");var s="previous";e(r).trigger("beginCarousel",[n,r,s]);e(r).find(".crsl-wrap").animate({marginLeft:r.wrapMargin+"px"},n.speed,function(){e(r.itemActive).removeClass(n.itemClassActive);e(i).addClass(n.item
 ClassActive);if(n.infinite){e(this).css({marginLeft:r.wrapMarginDefault}).find(".crsl-item:first-child").before(e(".crsl-item:last-child",r))}else{if(r.testPrevious(i)===false)e("#"+n.navigation).find(".previous").addClass("previous-inactive");if(r.testNext())e("#"+n.navigation).find(".next").removeClass("next-inactive")}e(this).trigger("endCarousel",[n,r,s])})};r.next=function(){r.wrapMargin=n.infinite?r.wrapMarginDefault-e(r.itemActive).outerWidth(true):r.wrapMargin-e(r.itemActive).outerWidth(true);var t=e(r.itemActive).index();var i=e(r.itemActive).next(".crsl-item");var s="next";e(r).trigger("beginCarousel",[n,r,s]);e(r).find(".crsl-wrap").animate({marginLeft:r.wrapMargin+"px"},n.speed,function(){e(r.itemActive).removeClass(n.itemClassActive);e(i).addClass(n.itemClassActive);if(n.infinite){e(this).css({marginLeft:r.wrapMarginDefault}).find(".crsl-item:last-child").after(e(".crsl-item:first-child",r))}else{if(r.testPrevious(i))e("#"+n.navigation).find(".previous").removeClass("pr
 evious-inactive");if(r.testNext()===false)e("#"+n.navigation).find(".next").addClass("next-inactive")}e(this).trigger("endCarousel",[n,r,s])})};var i=false,s;e(window).on("mouseleave",function(t){if(t.target)s=t.target;else if(t.srcElement)s=t.srcElement;if(e(r).attr("id")&&e(s).parents(".crsl-items").attr("id")===e(r).attr("id")||e(s).parents(".crsl-items").data("navigation")===e(r).data("navigation")){i=true}else{i=false}return false});e(window).on("keydown",function(e){if(i===true){if(e.keyCode===37){r.prepareExecute();r.previous()}else if(e.keyCode===39){r.prepareExecute();r.next()}}return});if(n.isTouch){e(r).on("touchstart",function(t){e(r).addClass("touching");n.startCoords=t.originalEvent.targetTouches[0];n.endCoords=t.originalEvent.targetTouches[0];e(".touching").on("touchmove",function(e){n.endCoords=e.originalEvent.targetTouches[0];if(Math.abs(parseInt(n.endCoords.pageX-n.startCoords.pageX,10))>Math.abs(parseInt(n.endCoords.pageY-n.startCoords.pageY,10))){e.preventDefault
 ();e.stopPropagation()}})}).on("touchend",function(t){t.preventDefault();t.stopPropagation();n.swipeDistance=n.endCoords.pageX-n.startCoords.pageX;if(n.swipeDistance>=n.swipeMinDistance){r.previous()}else if(n.swipeDistance<=-n.swipeMinDistance){r.next()}e(".touching").off("touchmove").removeClass("touching")})}e(r).on("loadedCarousel loadedImagesCarousel",function(){r.equalHeights()});e(window).on("carouselResizeEnd",function(){if(n.itemWidth!==e(r).outerWidth())r.config()});e(window).ready(function(){e(r).trigger("prepareCarousel",[n,r]);r.init();e(window).on("resize",function(){if(this.carouselResizeTo)clearTimeout(this.carouselResizeTo);this.carouselResizeTo=setTimeout(function(){e(this).trigger("carouselResizeEnd")},10)})});e(window).load(function(){r.testPreload();r.config()})})}})(jQuery)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/js/svg.js
----------------------------------------------------------------------
diff --git a/eagle-site/js/svg.js b/eagle-site/js/svg.js
new file mode 100755
index 0000000..939f301
--- /dev/null
+++ b/eagle-site/js/svg.js
@@ -0,0 +1,24 @@
+!function(a){function b(b,d){function e(){if(w){$canvas=a('<canvas class="pg-canvas"></canvas>'),v.prepend($canvas),p=$canvas[0],q=p.getContext("2d"),f();for(var b=Math.round(p.width*p.height/d.density),c=0;b>c;c++){var e=new l;e.setStackPos(c),x.push(e)}a(window).on("resize",function(){h()}),a(document).on("mousemove",function(a){y=a.pageX,z=a.pageY}),B&&!A&&window.addEventListener("deviceorientation",function(){D=Math.min(Math.max(-event.beta,-30),30),C=Math.min(Math.max(-event.gamma,-30),30)},!0),g(),o("onInit")}}function f(){p.width=v.width(),p.height=v.height(),q.fillStyle=d.dotColor,q.strokeStyle=d.lineColor,q.lineWidth=d.lineWidth}function g(){if(w){s=a(window).width(),t=a(window).height(),q.clearRect(0,0,p.width,p.height);for(var b=0;b<x.length;b++)x[b].updatePosition();for(var b=0;b<x.length;b++)x[b].draw();E||(r=requestAnimationFrame(g))}}function h(){for(f(),i=x.length-1;i>=0;i--)(x[i].position.x>v.width()||x[i].position.y>v.height())&&x.splice(i,1);var a=Math.round(p.wid
 th*p.height/d.density);if(a>x.length)for(;a>x.length;){var b=new l;x.push(b)}else a<x.length&&x.splice(a);for(i=x.length-1;i>=0;i--)x[i].setStackPos(i)}function j(){E=!0}function k(){E=!1,g()}function l(){switch(this.stackPos,this.active=!0,this.layer=Math.ceil(3*Math.random()),this.parallaxOffsetX=0,this.parallaxOffsetY=0,this.position={x:Math.ceil(Math.random()*p.width),y:Math.ceil(Math.random()*p.height)},this.speed={},d.directionX){case"left":this.speed.x=+(-d.maxSpeedX+Math.random()*d.maxSpeedX-d.minSpeedX).toFixed(2);break;case"right":this.speed.x=+(Math.random()*d.maxSpeedX+d.minSpeedX).toFixed(2);break;default:this.speed.x=+(-d.maxSpeedX/2+Math.random()*d.maxSpeedX).toFixed(2),this.speed.x+=this.speed.x>0?d.minSpeedX:-d.minSpeedX}switch(d.directionY){case"up":this.speed.y=+(-d.maxSpeedY+Math.random()*d.maxSpeedY-d.minSpeedY).toFixed(2);break;case"down":this.speed.y=+(Math.random()*d.maxSpeedY+d.minSpeedY).toFixed(2);break;default:this.speed.y=+(-d.maxSpeedY/2+Math.random()*d
 .maxSpeedY).toFixed(2),this.speed.x+=this.speed.y>0?d.minSpeedY:-d.minSpeedY}}function m(a,b){return b?void(d[a]=b):d[a]}function n(){v.find(".pg-canvas").remove(),o("onDestroy"),v.removeData("plugin_"+c)}function o(a){void 0!==d[a]&&d[a].call(u)}var p,q,r,s,t,u=b,v=a(b),w=!!document.createElement("canvas").getContext,x=[],y=0,z=0,A=!navigator.userAgent.match(/(iPhone|iPod|iPad|Android|BlackBerry|BB10|mobi|tablet|opera mini|nexus 7)/i),B=!!window.DeviceOrientationEvent,C=0,D=0,E=!1;return d=a.extend({},a.fn[c].defaults,d),l.prototype.draw=function(){q.beginPath(),q.arc(this.position.x+this.parallaxOffsetX,this.position.y+this.parallaxOffsetY,d.particleRadius/2,0,2*Math.PI,!0),q.closePath(),q.fill(),q.beginPath();for(var a=x.length-1;a>this.stackPos;a--){var b=x[a],c=this.position.x-b.position.x,e=this.position.y-b.position.y,f=Math.sqrt(c*c+e*e).toFixed(2);f<d.proximity&&(q.moveTo(this.position.x+this.parallaxOffsetX,this.position.y+this.parallaxOffsetY),d.curvedLines?q.quadraticCur
 veTo(Math.max(b.position.x,b.position.x),Math.min(b.position.y,b.position.y),b.position.x+b.parallaxOffsetX,b.position.y+b.parallaxOffsetY):q.lineTo(b.position.x+b.parallaxOffsetX,b.position.y+b.parallaxOffsetY))}q.stroke(),q.closePath()},l.prototype.updatePosition=function(){if(d.parallax){if(B&&!A){var a=(s-0)/60;pointerX=(C- -30)*a+0;var b=(t-0)/60;pointerY=(D- -30)*b+0}else pointerX=y,pointerY=z;this.parallaxTargX=(pointerX-s/2)/(d.parallaxMultiplier*this.layer),this.parallaxOffsetX+=(this.parallaxTargX-this.parallaxOffsetX)/10,this.parallaxTargY=(pointerY-t/2)/(d.parallaxMultiplier*this.layer),this.parallaxOffsetY+=(this.parallaxTargY-this.parallaxOffsetY)/10}switch(d.directionX){case"left":this.position.x+this.speed.x+this.parallaxOffsetX<0&&(this.position.x=v.width()-this.parallaxOffsetX);break;case"right":this.position.x+this.speed.x+this.parallaxOffsetX>v.width()&&(this.position.x=0-this.parallaxOffsetX);break;default:(this.position.x+this.speed.x+this.parallaxOffsetX>v.wid
 th()||this.position.x+this.speed.x+this.parallaxOffsetX<0)&&(this.speed.x=-this.speed.x)}switch(d.directionY){case"up":this.position.y+this.speed.y+this.parallaxOffsetY<0&&(this.position.y=v.height()-this.parallaxOffsetY);break;case"down":this.position.y+this.speed.y+this.parallaxOffsetY>v.height()&&(this.position.y=0-this.parallaxOffsetY);break;default:(this.position.y+this.speed.y+this.parallaxOffsetY>v.height()||this.position.y+this.speed.y+this.parallaxOffsetY<0)&&(this.speed.y=-this.speed.y)}this.position.x+=this.speed.x,this.position.y+=this.speed.y},l.prototype.setStackPos=function(a){this.stackPos=a},e(),{option:m,destroy:n,start:k,pause:j}}var c="particleground";a.fn[c]=function(d){if("string"==typeof arguments[0]){var e,f=arguments[0],g=Array.prototype.slice.call(arguments,1);return this.each(function(){a.data(this,"plugin_"+c)&&"function"==typeof a.data(this,"plugin_"+c)[f]&&(e=a.data(this,"plugin_"+c)[f].apply(this,g))}),void 0!==e?e:this}return"object"!=typeof d&&d?void
  0:this.each(function(){a.data(this,"plugin_"+c)||a.data(this,"plugin_"+c,new b(this,d))})},a.fn[c].defaults={minSpeedX:.1,maxSpeedX:.7,minSpeedY:.1,maxSpeedY:.7,directionX:"center",directionY:"center",density:1e4,dotColor:"#666666",lineColor:"#666666",particleRadius:7,lineWidth:1,curvedLines:!1,proximity:100,parallax:!0,parallaxMultiplier:5,onInit:function(){},onDestroy:function(){}}}(jQuery),
+
+function(){for(var a=0,b=["ms","moz","webkit","o"],c=0;c<b.length&&!window.requestAnimationFrame;++c)window.requestAnimationFrame=window[b[c]+"RequestAnimationFrame"],window.cancelAnimationFrame=window[b[c]+"CancelAnimationFrame"]||window[b[c]+"CancelRequestAnimationFrame"];window.requestAnimationFrame||(window.requestAnimationFrame=function(b){var c=(new Date).getTime(),d=Math.max(0,16-(c-a)),e=window.setTimeout(function(){b(c+d)},d);return a=c+d,e}),window.cancelAnimationFrame||(window.cancelAnimationFrame=function(a){clearTimeout(a)})}();
+
+$(function(){
+            
+    $('.particles').particleground({
+        minSpeedX: 0.1,
+        maxSpeedX: 0.7,
+        minSpeedY: 0.1,
+        maxSpeedY: 0.7,
+        directionX: 'center', // 'center', 'left' or 'right'. 'center' = dots bounce off edges
+        directionY: 'center', // 'center', 'up' or 'down'. 'center' = dots bounce off edges
+        density: 10000, // How many particles will be generated: one particle every n pixels
+        dotColor: '#eee',
+        lineColor: '#eee',
+        particleRadius: 7, // Dot size
+        lineWidth: 1,
+        curvedLines: true,
+        proximity: 100, // How close two dots need to be before they join
+        parallax: false
+    });
+
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/mapr-integration.md
----------------------------------------------------------------------
diff --git a/eagle-site/mapr-integration.md b/eagle-site/mapr-integration.md
new file mode 100644
index 0000000..94f4312
--- /dev/null
+++ b/eagle-site/mapr-integration.md
@@ -0,0 +1,131 @@
+---
+layout: doc
+title:  "MapR Integration"
+permalink: /docs/mapr-integration.html
+---
+
+*Since Apache Eagle 0.4.0-incubating. Apache Eagle will be called Eagle in the following.*
+
+### Prerequisites
+
+To get maprFSAuditLog monitoring started, we need to:
+
+* Enable audit logs on MapR from MapR's terminal
+* Created logstash conf file to send audit logs to Kafka[^KAFKA]
+* Initialize metadata for mapFSAuditLog and enabled the application
+
+Here are the steps to follow:   
+
+#### Step1: Enable audit logs for FileSystem Operations and Table Operations in MapR
+First we need to enable data auditing at all three levels: cluster level, volume level and directory,file or table level. 
+##### Cluster level: 
+
+~~~      
+       $ maprcli audit data -cluster <cluster name> -enabled true 
+                           [ -maxsize <GB, defaut value is 32. When size of audit logs exceed this number, an alarm will be sent to the dashboard in the MapR Control Service > ]
+                           [ -retention <number of Days> ]
+~~~
+Example:
+
+~~~
+        $ maprcli audit data -cluster mapr.cluster.com -enabled true -maxsize 30 -retention 30
+~~~
+
+
+
+##### Volume level:
+
+~~~      
+       $ maprcli volume audit -cluster <cluster name> -enabled true 
+                            -name <volume name>
+                            [ -coalesce <interval in minutes, the interval of time during which READ, WRITE, or GETATTR operations on one file from one client IP address are logged only once, if auditing is enabled> ]
+~~~
+
+Example:
+
+~~~
+        $ maprcli volume audit -cluster mapr.cluster.com -name mapr.tmp -enabled true
+~~~
+
+To verify that auditing is enabled for a particular volume, use this command:
+
+~~~
+        $ maprcli volume info -name <volume name> -json | grep -i 'audited\|coalesce'
+~~~
+and you should see something like this:
+
+~~~
+                        "audited":1,
+                        "coalesceInterval":60
+~~~
+If "audited" is '1' then auditing is enabled for this volume.
+
+
+
+##### Directory, file, or MapR-DB table level:
+
+~~~
+        $ hadoop mfs -setaudit on <directory|file|table>
+~~~
+
+To check whether Auditing is Enabled for a Directory, File, or MapR-DB Table, use ``$ hadoop mfs -ls``
+Example:
+Before enable the audit log on file ``/tmp/dir``, try ``$ hadoop mfs -ls /tmp/dir``, you should see something like this:
+
+~~~
+drwxr-xr-x Z U U   - root root          0 2016-03-02 15:02  268435456 /tmp/dir
+               p 2050.32.131328  mapr2.da.dg:5660 mapr1.da.dg:5660
+~~~
+
+The second ``U`` means auditing on this file is not enabled. 
+Enable auditing with this command: 
+
+~~~
+$ hadoop mfs -setaudit on /tmp/dir
+~~~
+
+Then check the auditing bit with : 
+
+~~~
+$ hadoop mfs -ls /tmp/dir
+~~~
+
+you should see something like this:
+
+~~~
+drwxr-xr-x Z U A   - root root          0 2016-03-02 15:02  268435456 /tmp/dir
+               p 2050.32.131328  mapr2.da.dg:5660 mapr1.da.dg:5660
+~~~
+
+We can see the previous ``U`` has been changed to ``A`` which indicates auditing on this file is enabled.
+  
+``Important``:
+When a directory has been enabled auditing,  directories/files located in this dir won't inherit auditing, but a newly created file/dir (after enabling the auditing on this dir) in this directory will.
+
+
+
+#### Step2: Stream log data into Kafka by using Logstash
+As MapR do not have name node, instead it use CLDB service, we have to use logstash to stream log data into Kafka.
+- First find out the nodes that have CLDB service
+- Then find out the location of audit log files, eg: ``/mapr/mapr.cluster.com/var/mapr/local/mapr1.da.dg/audit/``, file names should be in this format: ``FSAudit.log-2016-05-04-001.json`` 
+- Created a logstash conf file and run it, following this doc[Logstash-kafka](https://github.com/apache/eagle/blob/master/eagle-assembly/src/main/docs/logstash-kafka-conf.md)
+
+
+#### Step3: Set up maprFSAuditLog applicaiton in Eagle Service
+After Eagle Service gets started, create mapFSAuditLog application using:  ``$ ./maprFSAuditLog-init.sh``. By default it will create maprFSAuditLog in site "sandbox", you may need to change it to your own site.
+After these steps you are good to go.
+
+Have fun!!! :)
+
+### Reference Links
+1. [Enable Auditing in MapR](http://doc.mapr.com/display/MapR/Enabling+Auditing)
+2. [MapR audit logs](http://doc.mapr.com/display/MapR/Audit+Logs+for+Filesystem+Operations+and+Table+Operations)
+
+
+
+---
+
+#### *Footnotes*
+
+[^KAFKA]:*All mentions of "kafka" on this page represent Apache Kafka.*
+

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/metadata-api.md
----------------------------------------------------------------------
diff --git a/eagle-site/metadata-api.md b/eagle-site/metadata-api.md
new file mode 100644
index 0000000..79bb189
--- /dev/null
+++ b/eagle-site/metadata-api.md
@@ -0,0 +1,188 @@
+---
+layout: doc
+title:  "Policy API"
+permalink: /docs/metadata-api.html
+---
+
+Apache Eagle Provide RESTful APIs for create/update/query/delete policy for alert
+
+* Policy Definition API  
+* Stream Definition API  
+
+------  
+
+### Policy Definition API  
+
+------  
+
+#### **Create/Update Policy Example**      
+
+URL               |||    http://host:port/eagle-service/rest/entities?serviceName=AlertDefinitionService   
+METHOD            |||    POST
+HEADERS           |||    "Content-Type:application/json"   
+                  |||    "Authorization:Basic encodedusrpwd"  (encodedusrpwd is base64 encoded string for "user:password")  
+DATA              |||    [{  
+                  |||    &nbsp;&nbsp;"tags": {  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "site": "sandbox",  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "dataSource": "hdfsAuditLog",  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "policyId": "testPolicy",  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "alertExecutorId": "hdfsAuditLogAlertExecutor",  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "policyType": "siddhiCEPEngine"  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp;},  
+                  |||    &nbsp;&nbsp;"desc": "test alert policy",  
+                  |||    &nbsp;&nbsp;"policyDef": "{\"type\":\"siddhiCEPEngine\",\"expression\":\"from hdfsAuditLogEventStream[src =='/tmp/private'] select * insert into outputStream;\"}",  
+                  |||    &nbsp;&nbsp;"notificationDef": "[{
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "sender":"noreply-eagle@company.com",
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "recipients":"user@company.com",
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "subject":"test alert policy",
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "flavor":"email",
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "id":"email_1"
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp;}]",  
+                  |||    &nbsp;&nbsp;"enabled": true  
+                  |||    }]  
+
+**Field Specification**  
+
+Tags             |||    All Tags form the key for alert policy  
+                 |||    1) site: Which site is the policy for? e.g. sandbox  
+                 |||    2) dataSource: From which dataSource the policy consume from; e.g. hdfsAuditLog  
+                 |||    3) policyId  
+                 |||    4) alertExecutorId: Within which executor will the policy be executed e.g. hdfsAuditLog  
+                 |||    5) policyType: Which engine should the policy be executed with e.g. siddhiCEPEngine  
+policyDef        |||    Definition for the policy, tell  
+                 |||    1) which engine the policy should be executed with  
+                 |||    2) The policy expression to be evaluated  
+notificationDef  |||    Currently we only support email notification for alert, below are fields of alert definition  
+                 |||    1) sender: Email Sender  
+                 |||    2) recipients: Email Receipent  
+                 |||    3) subject: Email Subject  
+                 |||    4) flavor: way of notification, currently only supprot "email"  
+                 |||    5) id: notification id  
+enabled          |||    If the alert is enabled, true/false  
+desc             |||    Description of the policy  
+  
+**Response Body**  
+{  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"meta": {  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;     "elapsedms": 11,  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;     "totalResults": 1  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;},  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"success": true,  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"obj": [  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;     "YEktKX_____62aP_6x97yoSv3B0ANd9Hby--xyCZKe1hk6BkS9hcZXeJk1Je-7-Mrq0lGQ"  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;],  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"type": "java.lang.String"  
+}  
+
+------ 
+
+#### **Get Policy Example**  
+
+URL               |||    http://host:port/eagle-service/rest/list?query=AlertDefinitionService[@dataSource="hdfsAuditLog" AND @site="sandbox"]{*}&pageSize=100  
+METHOD            |||    GET
+HEADERS           |||    "Content-Type:application/json"   
+                  |||    "Authorization:Basic encodedusrpwd"  (encodedusrpwd is base64 encoded string for "user:password")  
+
+**Response Body**   
+{  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;prefix: "alertdef",  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tags: {  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;site: "sandbox",  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;dataSource: "hdfsAuditLog",  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;policyId: "testPolicy",  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;alertExecutorId: "hdfsAuditLogAlertExecutor",  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;policyType: "siddhiCEPEngine"  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;},  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;encodedRowkey: "YEktKX_____62aP_6x97yoSv3B0ANd9Hby--xyCZKe1hk6BkS9hcZXeJk1Je-7-Mrq0lGQ",  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;desc: "nope alert for test",  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;policyDef: "{"type":"siddhiCEPEngine","expression":"from hdfsAuditLogEventStream[src=='/tmp/private'] select * into outputStream;"}",  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;notificationDef: "[{"sender":"noreplay-eagle@company.com","recipients":"user@company.com","subject":"testPolicy","flavor":"email","id":"email_1"}]",  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;enabled: true  
+}  
+
+------  
+
+#### **Delete Policy Example**    
+
+Delete policy by encodedRowkey
+
+URL               |||    http://host:port/eagle-service/rest/entities/delete?serviceName=AlertDefinitionService&byId=true  
+METHOD            |||    POST  
+HEADERS           |||    "Content-Type:application/json"  
+                  |||    "Authorization:Basic encodedusrpwd"  (encodedusrpwd is base64 encoded string for "user:password")  
+DATA              |||    [  
+                  |||       "YEktKX_____62aP_6x97yoSv3B0ANd9Hby--xyCZKe1hk6BkS9hcZXeJk1Je-7-Mrq0lGQ"  
+                  |||    ]  
+
+**Delete Request Response Body**  
+
+The folloing is the response body of a sucessfully delete request  
+{  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"meta": {  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;     "elapsedms": 5,  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;     "totalResults": 1  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;},  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"success": true,  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"obj": [  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;     "YEktKX_____62aP_6x97yoSv3B0ANd9Hby--xyCZKe1hk6BkS9hcZXeJk1Je-7-Mrq0lGQ"  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;],  
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"type": "java.lang.String"  
+}  
+
+-----
+
+### Stream Definition API  
+
+In the policy defintion, if the policyType is "siddhiCEPEngine" we need specify from which stream the query is against , like "from hdfsAuditLogEventStream"   
+
+So we need further define the stream schema along with the policy
+
+The response body of stream schema api is similar to policy api, we don't duplicate it in stream definition api  
+
+------  
+
+#### **Create/Update Stream Shceme Example**   
+
+URL               |||    http://host:port/eagle-service/rest/entities?serviceName=AlertStreamSchemaService   
+METHOD            |||    POST
+HEADERS           |||    "Content-Type:application/json"   
+                  |||    "Authorization:Basic encodedusrpwd"  (encodedusrpwd is base64 encoded string for "user:password")  
+DATA              |||    [{  
+                  |||    &nbsp;&nbsp;"tags": {  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "dataSource": "hiveQueryLog",  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "attrName": "user",  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; "streamName": "hiveAccessLogStream"  
+                  |||    &nbsp;&nbsp;&nbsp;&nbsp; },  
+                  |||    &nbsp;&nbsp;"attrType": "string",  
+                  |||    &nbsp;&nbsp;"attrDescription": "process user"  
+                  |||    }]                  
+
+**Field Specification**  
+
+Tags             |||    All Tags form the key for alert policy  
+                 |||    1) dataSource: From which dataSource the policy consume from, e.g. "hdfsAuditLog"  
+                 |||    2) attrName: Attribute's name, e.g. "user"  
+                 |||    3) streamName: Stream's name, e.g.  "hiveAccessLogStream"  
+attrType         |||    Attribute's type, e.g. string, boolean, int, long  
+attrDescription  |||    Description for the attribute
+  
+------  
+
+#### **Get Stream Shceme Example**  
+
+URL               |||    http://host:port/eagle-service/rest/list?query=AlertStreamSchemaService[@dataSource="hdfsAuditLog" AND @streamName="hiveAccessLogStream"]{*}&pageSize=100  
+METHOD            |||    GET
+HEADERS           |||    "Content-Type:application/json"   
+                  |||    "Authorization:Basic encodedusrpwd"  (encodedusrpwd is base64 encoded string for "user:password")  
+
+------  
+   
+#### **Delete Stream Shceme Example**    
+
+Delete stream shceme by encodedRowkey
+
+URL               |||    http://host:port/eagle-service/rest/entities/delete?serviceName=AlertStreamSchemaService&byId=true  
+METHOD            |||    POST  
+HEADERS           |||    "Content-Type:application/json"  
+                  |||    "Authorization:Basic encodedusrpwd"  (encodedusrpwd is base64 encoded string for "user:password")  
+DATA              |||    [ "YEktKX_____62aP_6x97yoSv3B0ANd9Hby--xyCZKe1hk6BkS9hcZXeJk1Je-7-Mrq0lGQ" ]    

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/policy-capabilities.md
----------------------------------------------------------------------
diff --git a/eagle-site/policy-capabilities.md b/eagle-site/policy-capabilities.md
new file mode 100644
index 0000000..88667c8
--- /dev/null
+++ b/eagle-site/policy-capabilities.md
@@ -0,0 +1,18 @@
+---
+layout: doc
+title:  "Policy Engine Capabilities"
+permalink: /docs/tutorial/policy-capabilities.html
+---
+
+### CEP as first class policy engine
+
+Apache Eagle (called Eagle in the following) platform supports CEP engine as first class policy engine, i.e. Eagle platform runs CEP engine on top of Apache Storm and make rules be hot deployed.  
+Specifically Eagle platform uses WSO2 Siddhi CEP library, source code is [here](https://github.com/wso2/siddhi). 
+
+### Policy capabilities
+
+With embedded CEP engine, Eagle platform provides comprehensive alerting rules as follows. Also please go to Siddhi site for [language reference](https://docs.wso2.com/display/CEP300/Introduction+to+Siddhi+Query+Language).
+
+* **Filter Rule**
+
+* **Window based Rule**

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/quick-start-0.3.0.md
----------------------------------------------------------------------
diff --git a/eagle-site/quick-start-0.3.0.md b/eagle-site/quick-start-0.3.0.md
new file mode 100644
index 0000000..2c2dd74
--- /dev/null
+++ b/eagle-site/quick-start-0.3.0.md
@@ -0,0 +1,57 @@
+---
+layout: doc
+title:  "Quick Start" 
+permalink: /docs/quick-start-0.3.0.html
+---
+
+Guide To Install Apache Eagle 0.3.0-incubating to Hortonworks sandbox.  
+*Apache Eagle will be called Eagle in the following.*
+
+* Prerequisite
+* Download + Patch + Build
+* Setup Hadoop[^HADOOP] Environment.
+* Install Eagle.
+* Demo
+<br/>
+
+### **Prerequisite**
+Eagle requires a streaming environment to run various applications. For more details, please check [Setup Environment](/docs/deployment-env.html)
+<br/>
+
+### **Download + Patch + Build**
+* Download Eagle 0.3.0 source released From Apache [[Tar]](https://dist.apache.org/repos/dist/release/eagle/apache-eagle-0.3.0-incubating/apache-eagle-0.3.0-incubating-src.tar.gz) , [[MD5]](https://dist.apache.org/repos/dist/release/eagle/apache-eagle-0.3.0-incubating/apache-eagle-0.3.0-incubating-src.tar.gz.md5) 
+* Build manually with [Apache Maven](https://maven.apache.org/):
+
+	  $ tar -zxvf apache-eagle-0.3.0-incubating-src.tar.gz
+	  $ cd incubator-eagle-release-0.3.0-rc3  
+	  $ curl -O https://patch-diff.githubusercontent.com/raw/apache/eagle/pull/180.patch
+	  $ git apply 180.patch
+	  $ mvn clean package -DskipTests
+
+	After building successfully, you will get tarball under `eagle-assembly/target/` named as `eagle-0.3.0-incubating-bin.tar.gz`
+<br/>
+
+### **Install Eagle**
+    
+     $ scp -P 2222  eagle-assembly/target/eagle-0.3.0-incubating-bin.tar.gz root@127.0.0.1:/root/
+     $ ssh root@127.0.0.1 -p 2222 (password is hadoop)
+     $ tar -zxvf eagle-0.3.0-incubating-bin.tar.gz
+     $ mv eagle-0.3.0-incubating eagle
+     $ mv eagle /usr/hdp/current/
+     $ cd /usr/hdp/current/eagle
+     $ examples/eagle-sandbox-starter.sh
+
+<br/>
+
+### **Demos**
+* Login to Eagle UI [http://localhost:9099/eagle-service/](http://localhost:9099/eagle-service/) using username and password as "admin" and "secret"
+* [HDFS & Hive](/docs/hdfs-hive-monitoring.html)
+<br/>
+
+
+
+---
+
+#### *Footnotes*
+
+[^HADOOP]:*All mentions of "hadoop" on this page represent Apache Hadoop.*

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/quick-start.md
----------------------------------------------------------------------
diff --git a/eagle-site/quick-start.md b/eagle-site/quick-start.md
new file mode 100644
index 0000000..aa09259
--- /dev/null
+++ b/eagle-site/quick-start.md
@@ -0,0 +1,69 @@
+---
+layout: doc
+title:  "Quick Start" 
+permalink: /docs/quick-start.html
+---
+
+Guide To Install **Apache Eagle 0.4.0-incubating** On Hortonworks sandbox. 
+
+[For older versions: _Apache Eagle 0.3.0-incubating Quick Start_](/docs/quick-start-0.3.0.html)
+
+* Setup Environment
+* Download + Patch + Build
+* Setup Hadoop[^HADOOP] Environment.
+* Install Eagle.
+* Sample Application: Hive[^HIVE] query activity monitoring in sandbox
+<br/>
+
+### **Setup Environment**
+Eagle requires a streaming environment to run various applications. For more details, please check [Setup Environment](/docs/deployment-env.html)
+<br/>
+
+### **Download + Patch + Build**
+* Download latest Eagle source released From Apache [[Tar]](https://dist.apache.org/repos/dist/release/eagle/apache-eagle-0.4.0-incubating/apache-eagle-0.4.0-incubating-src.tar.gz), [[MD5]](https://dist.apache.org/repos/dist/release/eagle/apache-eagle-0.4.0-incubating/apache-eagle-0.4.0-incubating-src.tar.gz.md5).
+* Build manually with [Apache Maven](https://maven.apache.org/):
+
+	  $ tar -zxvf apache-eagle-0.4.0-incubating-src.tar.gz
+	  $ cd apache-eagle-0.4.0-incubating-src 
+	  $ curl -O https://patch-diff.githubusercontent.com/raw/apache/eagle/pull/268.patch
+	  $ git apply 268.patch
+	  $ mvn clean package -DskipTests
+
+	After building successfully, you will get a tarball under `eagle-assembly/target/` named `apache-eagle-0.4.0-incubating-bin.tar.gz`
+<br/>
+
+### **Install Eagle**
+    
+     $ scp -P 2222 eagle-assembly/target/apache-eagle-0.4.0-incubating-bin.tar.gz root@127.0.0.1:/root/
+     $ ssh root@127.0.0.1 -p 2222 (password is hadoop)
+     $ tar -zxvf apache-eagle-0.4.0-incubating-bin.tar.gz
+     $ mv apache-eagle-0.4.0-incubating eagle
+     $ mv eagle /usr/hdp/current/
+     $ cd /usr/hdp/current/eagle
+     $ examples/eagle-sandbox-starter.sh
+
+<br/>
+
+### **Sample Application: Hive query activity monitoring in sandbox**
+After executing `examples/eagle-sandbox-starter.sh`, you have a sample application (topology) running on the Apache Storm (check with [storm ui](http://sandbox.hortonworks.com:8744/index.html)), and a sample policy of Hive activity monitoring defined.
+
+Next you can trigger an alert by running a Hive query.
+
+~~~
+$ su hive
+$ hive
+$ set hive.execution.engine=mr;
+$ use xademo;
+$ select a.phone_number from customer_details a, call_detail_records b where a.phone_number=b.phone_number;
+~~~
+<br/>
+
+
+
+---
+
+#### *Footnotes*
+
+[^HADOOP]:*Apache Hadoop.*
+[^HIVE]:*All mentions of "hive" on this page represent Apache Hive.*
+

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/security.md
----------------------------------------------------------------------
diff --git a/eagle-site/security.md b/eagle-site/security.md
new file mode 100644
index 0000000..fface4b
--- /dev/null
+++ b/eagle-site/security.md
@@ -0,0 +1,13 @@
+---
+layout: doc
+title:  "Apache Eagle Security" 
+permalink: /docs/security.html
+---
+
+The Apache Software Foundation takes a very active stance in eliminating security problems in its software products. Apache Eagle is also responsive to such issues around its features. 
+
+If you have any concern regarding to Eagle's Security or you believe a vulnerability is discovered, don't hesitate to get connected with Aapche Security Team by sending emails to [security@apache.org](mailto:security@apache.org). In the message, you can indicate the project name is Eagle, provide a description of the issue, and you are recommended to give the way of reproducing it. The security team and eagle community will get back to you after assessing the findings.
+
+> **PLEASE PAY ATTENTION** to report any security problem to the security email address before disclosing it publicly.
+
+The ASF Security Team maintains a page with the description of how vulnerabilities are handled, check their [Web Page](http://www.apache.org/security) for more information.

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/serviceconfiguration.md
----------------------------------------------------------------------
diff --git a/eagle-site/serviceconfiguration.md b/eagle-site/serviceconfiguration.md
new file mode 100644
index 0000000..3055ce1
--- /dev/null
+++ b/eagle-site/serviceconfiguration.md
@@ -0,0 +1,66 @@
+---
+layout: doc
+title:  "Apache Eagle Service Configuration"
+permalink: /docs/serviceconfiguration.html
+---
+
+Apache Eagle (called Eagle in the following) Service provides some config files for specifying metadata storage, security access to Eagle Service. This page will give detailed
+description of Eagle Service configuration.
+
+Eagle currently supports to customize the following configurations:
+
+* Metadata store config
+* Security access config
+
+### Metadata store config 
+* for hbase
+
+~~~
+eagle {
+	service{
+		storage-type="hbase"
+		hbase-zookeeper-quorum="sandbox.hortonworks.com"
+		hbase-zookeeper-property-clientPort=2181
+		zookeeper-znode-parent="/hbase-unsecure",
+		springActiveProfile="sandbox"
+		audit-enabled=true
+	}
+      }
+~~~
+
+* for mysql
+
+~~~
+eagle {
+	service {
+		storage-type="jdbc"
+		storage-adapter="mysql"
+		storage-username="eagle"
+		storage-password=eagle
+		storage-database=eagle
+		storage-connection-url="jdbc:mysql://localhost:3306/eagle"
+		storage-connection-props="encoding=UTF-8"
+		storage-driver-class="com.mysql.jdbc.Driver"
+		storage-connection-max=8
+	}
+}
+~~~
+
+* for derby
+
+~~~
+eagle {
+	service {
+		storage-type="jdbc"
+		storage-adapter="derby"
+		storage-username="eagle"
+		storage-password=eagle
+		storage-database=eagle
+		storage-connection-url="jdbc:derby:/tmp/eagle-db-dev;create=true"
+		storage-connection-props="encoding=UTF-8"
+		storage-driver-class="org.apache.derby.jdbc.EmbeddedDriver"
+		storage-connection-max=8
+	}
+}
+~~~
+<br />

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/setup-env.md
----------------------------------------------------------------------
diff --git a/eagle-site/setup-env.md b/eagle-site/setup-env.md
new file mode 100644
index 0000000..de60fa5
--- /dev/null
+++ b/eagle-site/setup-env.md
@@ -0,0 +1,59 @@
+---
+layout: doc
+title:  "Deploy Environment"
+permalink: /docs/deployment-env.html
+---
+
+### Setup Environment
+
+Apache Eagle (called Eagle in the following) as an analytics solution for identifying security and performance issues instantly, relies on streaming platform `Storm`[^STORM] + `Kafka`[^KAFKA] to meet the realtime criteria, and persistence storage to store metadata and some metrics. As for the persistence storage, it supports three types of database: `HBase`[^HBASE], `Derby`[^DERBY], and `Mysql`
+
+To run monitoring applications, Eagle requires the following dependencies.
+
+* For streaming platform dependencies
+
+	* Storm: 0.9.3 or later
+	* Kafka: 0.8.x or later
+	* Java: 1.7.x
+	* NPM (On MAC OS try "brew install node") 	
+
+* For database dependencies (Choose one of them)
+
+	* HBase: 0.98 or later
+		* Hadoop[^HADOOP]: 2.6.x is required
+	* Mysql
+		* Installation is required
+	* Derby
+		* No installation 
+		
+### Setup Cluster in Sandbox
+To make thing easier you can try Eagle with an **all-in-one** sandbox VM, like [HDP sandbox](http://hortonworks.com/downloads/#sandbox)(HDP 2.2.4 is recommended). Next we will go with Hortonworks Sandbox 2.2.4 to setup a minimal requirement cluster with Storm and Kafka. 
+
+1. Launch Ambari[^AMBARI]
+   * Enable Ambari in sandbox http://127.0.0.1:8000 (Click on Enable Button)
+   * Login to Ambari UI http://127.0.0.1:8080/ with user:admin and password:admin
+
+2. Start Storm and Kafka via Ambari. Showing Storm as an example below.
+![Restart Services](/images/docs/start-storm.png "Services")
+
+3. (Optional) Start HBase via Ambari with root as HBase superuser
+![add superuser](/images/docs/hbase-superuser.png)
+![add superuser](/images/docs/hbase-superuser2.png)
+
+4. Add Eagle service port. If the NAT network is used in a virtual machine, add port 9099 to "Port Forwarding"
+  ![Port Forwarding](/images/docs/eagle-service.png)
+
+
+
+
+---
+
+#### *Footnotes*
+
+[^STORM]:*All mentions of "storm" on this page represent Apache Storm.*
+[^KAFKA]:*All mentions of "kafka" on this page represent Apache Kafka.*
+[^HBASE]:*All mentions of "hbase" on this page represent Apache HBase.*
+[^DERBY]:*All mentions of "derby" on this page represent Apache Derby.*
+[^HADOOP]:*Apache Hadoop.*
+[^AMBARI]:*All mentions of "ambari" on this page represent Apache Ambari.*
+

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/standalone-install.md
----------------------------------------------------------------------
diff --git a/eagle-site/standalone-install.md b/eagle-site/standalone-install.md
new file mode 100644
index 0000000..d24a6ed
--- /dev/null
+++ b/eagle-site/standalone-install.md
@@ -0,0 +1,20 @@
+---
+layout: doc
+title:  "Overview" 
+permalink: /docs/standalone-install.html
+---
+
+## Hardware Requirements
+
+TBF
+
+## Software Requirements
+TBF
+
+## Installation Procedure
+
+TBF
+
+Below are some of the features we are working on:
+
+TBF
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/terminology.md
----------------------------------------------------------------------
diff --git a/eagle-site/terminology.md b/eagle-site/terminology.md
new file mode 100644
index 0000000..ca8925d
--- /dev/null
+++ b/eagle-site/terminology.md
@@ -0,0 +1,34 @@
+---
+layout: doc
+title:  "Terminology" 
+permalink: /docs/terminology.html
+---
+
+Here are some terms we are using in Apache Eagle (called Eagle in the following), please check them for your reference.
+They are basic knowledge of Eagle which also will help to well understand Eagle.
+
+* **Site**: a site can be considered as a physical data center. Big data platform e.g. Hadoop[^HADOOP] may be deployed to multiple data centers in an enterprise. 
+
+* **Application**: an application is composed of data integration, policies and insights for one data source.
+
+* **Policy**: a policy defines the rule to alert. Policy can be simply a filter expression or a complex window based aggregation rules etc. 
+
+* **Data source**: a data source is a monitoring target data. Eagle supports many data sources HDFS audit logs, Hive[^HIVE] query, MapReduce job etc.
+
+* **Stream**: a stream is the streaming data from a data source. Each data source has its own stream.
+
+* **Data activity monitoring**: Data activity monitoring is to monitor how user exploits data in Hadoop system etc. 
+
+* **User profile**: a user profile is the historical activity model generated using machine learning algorithm which could be used for showing insights.
+
+* **Data classification**: data classification provides the ability to classify different data sources with different levels of sensitivity.
+
+
+
+---
+
+#### *Footnotes*
+
+[^HADOOP]:*All mentions of "hadoop" on this page represent Apache Hadoop.*
+[^HIVE]:*Apache Hive.*
+

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/tutorial-classfication-0.3.0.md
----------------------------------------------------------------------
diff --git a/eagle-site/tutorial-classfication-0.3.0.md b/eagle-site/tutorial-classfication-0.3.0.md
new file mode 100644
index 0000000..384f14b
--- /dev/null
+++ b/eagle-site/tutorial-classfication-0.3.0.md
@@ -0,0 +1,58 @@
+---
+layout: doc
+title:  "Data Classification Tutorial" 
+permalink: /docs/tutorial/classification-0.3.0.html
+---
+
+Apache Eagle data classification feature provides the ability to classify data with different levels of sensitivity.
+Currently this feature is available ONLY for applications monitoring HDFS, Apache Hive and Apache HBase. For example, HdfsAuditLog, HiveQueryLog and HBaseSecurityLog. 
+
+The main content of this page are 
+
+* Connection Configuration
+* Data Classification
+ 
+ 
+### Connection Configuration
+
+To monitor a remote cluster, we first make sure the connection to the cluster is configured. For more details, please refer to [Site Management](/docs/tutorial/site-0.3.0.html)
+
+### Data Classification
+
+After the configuration is The first part is about how to add/remove sensitivity to files/directories; the second part shows how to monitor these sensitive data. In the following, we take HdfsAuditLog as an example.
+
+#### **Part 1: Sensitivity Edit**
+
+  * add the sensitive mark to files/directories.
+
+    * **Basic**: Label sensitivity files directly (**recommended**)
+
+       ![HDFS classification](/images/docs/hdfs-mark1.png)
+       ![HDFS classification](/images/docs/hdfs-mark2.png)
+       ![HDFS classification](/images/docs/hdfs-mark3.png)
+    * **Advanced**: Import json file/content
+
+        ![HDFS classification](/images/docs/hdfs-import1.png)
+        ![HDFS classification](/images/docs/hdfs-import2.png)
+        ![HDFS classification](/images/docs/hdfs-import3.png)
+
+
+ * remove sensitive mark on files/directories
+
+   * **Basic**: remove label directly
+
+        ![HDFS classification](/images/docs/hdfs-delete1.png)
+        ![HDFS classification](/images/docs/hdfs-delete2.png)
+
+   * **Advanced**: delete lin batch
+
+        ![HDFS classification](/images/docs/hdfs-remove.png)
+
+#### **Part 2: Sensitivity Usage in Policy Definition**
+
+You can mark a particular folder/file as "PRIVATE". Once you have this information you can create policies using this label.
+
+> For example: the following policy monitors all the operations to resources with sensitivity type "PRIVATE".
+
+![sensitivity type policy](/images/docs/sensitivity-policy.png)
+

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/tutorial-classfication.md
----------------------------------------------------------------------
diff --git a/eagle-site/tutorial-classfication.md b/eagle-site/tutorial-classfication.md
new file mode 100644
index 0000000..d5a164a
--- /dev/null
+++ b/eagle-site/tutorial-classfication.md
@@ -0,0 +1,133 @@
+---
+layout: doc
+title:  "Data Classification Tutorial" 
+permalink: /docs/tutorial/classification.html
+---
+
+Apache Eagle data classification feature provides the ability to classify data with different levels of sensitivity.
+Currently this feature is available ONLY for applications monitoring HDFS, Hive[^HIVE] and HBase[^HBASE]. For example, HdfsAuditLog, HiveQueryLog and HBaseSecurityLog.
+
+The main content of this page are 
+
+* Cluster Connection
+* Data Classification
+ 
+### Cluster Connection
+
+Here we give example configurations for HDFS, HBASE, and Hive. Suppose the cluster to monitor is Hortonwork sandbox. This configuration locates at the admin management page. 
+
+* HDFS
+
+    ![hdfs setup](/images/docs/hdfs-setup.png) 
+    
+    * Base case
+
+        You may configure the default path for Apache Hadoop clients to connect remote hdfs namenode.
+
+            classification.fs.defaultFS=hdfs://sandbox.hortonworks.com:8020
+
+    * HA case
+
+        Basically, you point your fs.defaultFS at your nameservice and let the client know how its configured (the backing namenodes) and how to fail over between them under the HA mode
+
+            classification.fs.defaultFS=hdfs://nameservice1
+            classification.dfs.nameservices=nameservice1
+            classification.dfs.ha.namenodes.nameservice1=namenode1,namenode2
+            classification.dfs.namenode.rpc-address.nameservice1.namenode1=hadoopnamenode01:8020
+            classification.dfs.namenode.rpc-address.nameservice1.namenode2=hadoopnamenode02:8020
+            classification.dfs.client.failover.proxy.provider.nameservice1=org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+            
+
+    * Kerberos-secured cluster
+
+        For Kerberos-secured cluster, you need to get a keytab file and the principal from your admin, and configure "eagle.keytab.file" and "eagle.kerberos.principal" to authenticate its access.
+
+            classification.eagle.keytab.file=/EAGLE-HOME/.keytab/eagle.keytab
+            classification.eagle.kerberos.principal=eagle@SOMEWHERE.COM
+
+        If there is an exception about "invalid server principal name", you may need to check the DNS resolver, or the data transfer , such as "dfs.encrypt.data.transfer", "dfs.encrypt.data.transfer.algorithm", "dfs.trustedchannel.resolver.class", "dfs.datatransfer.client.encrypt".
+        
+      
+
+* Hive
+    * Basic
+
+            classification.accessType=metastoredb_jdbc
+            classification.password=hive
+            classification.user=hive
+            classification.jdbcDriverClassName=com.mysql.jdbc.Driver
+            classification.jdbcUrl=jdbc:mysql://sandbox.hortonworks.com/hive?createDatabaseIfNotExist=true
+
+
+* HBase
+
+    * Basic case
+
+        You need to sett "hbase.zookeeper.quorum":"localhost" property and "hbase.zookeeper.property.clientPort" property.
+
+            classification.hbase.zookeeper.property.clientPort=2181
+            classification.hbase.zookeeper.quorum=localhost
+
+    * Kerberos-secured cluster
+
+        According to your environment, you can add or remove some of the following properties. Here is the reference.
+
+            classification.hbase.zookeeper.property.clientPort=2181
+            classification.hbase.zookeeper.quorum=localhost
+            classification.hbase.security.authentication=kerberos
+            classification.hbase.master.kerberos.principal=hadoop/_HOST@EXAMPLE.COM
+            classification.zookeeper.znode.parent=/hbase
+            classification.eagle.keytab.file=/EAGLE-HOME/.keytab/eagle.keytab
+            classification.eagle.kerberos.principal=eagle@EXAMPLE.COM
+
+Any questions on the Kerberos configuration in Eagle, please first check [FAQ](/docs/FAQ.html)
+
+### Data Classification
+
+After the configuration is updated, we can go back to the classification page on Eagle UI. Here we take HdfsAuditLog as an example to explain how to classify data and how to monitor sensitive data in Eagle.
+
+#### **Part 1: Sensitivity Edit**
+
+  * add the sensitive mark to files/directories.
+
+    * **Basic**: Label sensitivity files directly (**recommended**)
+
+       ![HDFS classification](/images/docs/hdfs-mark1.png)
+       ![HDFS classification](/images/docs/hdfs-mark2.png)
+       ![HDFS classification](/images/docs/hdfs-mark3.png)
+       
+    * **Advanced**: Import json file/content
+
+        ![HDFS classification](/images/docs/hdfs-import1.png)
+        ![HDFS classification](/images/docs/hdfs-import2.png)
+        ![HDFS classification](/images/docs/hdfs-import3.png)
+
+ * remove sensitive mark on files/directories
+
+   * **Basic**: remove label directly
+
+        ![HDFS classification](/images/docs/hdfs-delete1.png)
+        ![HDFS classification](/images/docs/hdfs-delete2.png)
+
+   * **Advanced**: delete lin batch
+
+        ![HDFS classification](/images/docs/hdfs-remove.png)
+
+#### **Part 2: Monitor sensitive data
+
+You can mark a particular folder/file as "PRIVATE". Once you have this information you can create policies using this label.
+
+> For example: the following policy monitors all the operations to resources with sensitivity type "PRIVATE".
+
+![sensitivity type policy](/images/docs/sensitivity-policy.png)
+
+
+
+---
+
+#### *Footnotes*
+
+[^HADOOP]:*All mentions of "hadoop" on this page represent Apache Hadoop.*
+[^HBASE]:*All mentions of "hbase" on this page represent Apache HBase.*
+[^HIVE]:*All mentions of "hive" on this page represent Apache Hive.*
+

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/tutorial-ldap.md
----------------------------------------------------------------------
diff --git a/eagle-site/tutorial-ldap.md b/eagle-site/tutorial-ldap.md
new file mode 100644
index 0000000..469f0b6
--- /dev/null
+++ b/eagle-site/tutorial-ldap.md
@@ -0,0 +1,38 @@
+---
+layout: doc
+title:  "Apache Eagle LDAP Tutorial"
+permalink: /docs/tutorial/ldap.html
+---
+
+To enable Apache Eagle (called Eagle in the following) LDAP authentication on the web, two steps are needed.
+
+Step 1: edit configuration under conf/ldap.properties.
+
+    ldap.server=ldap://localhost:10389
+    ldap.username=uid=admin,ou=system
+    ldap.password=secret
+    ldap.user.searchBase=ou=Users,o=mojo
+    ldap.user.searchPattern=(uid={0})
+    ldap.user.groupSearchBase=ou=groups,o=mojo
+    acl.adminRole=
+    acl.defaultRole=ROLE_USER
+
+acl.adminRole and acl.defaultRole are two customized properties for Eagle. Eagle manages admin users with groups. If you set acl.adminRole as ROLE_{EAGLE-ADMIN-GROUP-NAME}, members in this group have the admin privilege. acl.defaultRole is ROLE_USER.
+
+Step 2: edit conf/eagle-service.conf, and add springActiveProfile="default"
+
+    eagle{
+        service{
+            storage-type="hbase"
+            hbase-zookeeper-quorum="localhost"
+            hbase-zookeeper-property-clientPort=2181
+            zookeeper-znode-parent="/hbase",
+            springActiveProfile="default"
+        }
+    }
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/tutorial-notificationplugin.md
----------------------------------------------------------------------
diff --git a/eagle-site/tutorial-notificationplugin.md b/eagle-site/tutorial-notificationplugin.md
new file mode 100644
index 0000000..c9dd313
--- /dev/null
+++ b/eagle-site/tutorial-notificationplugin.md
@@ -0,0 +1,84 @@
+---
+layout: doc
+title:  "Notification Plugin"
+permalink: /docs/tutorial/notificationplugin.html
+---
+
+*Since Apache Eagle 0.4.0-incubating. Apache Eagle will be called Eagle in the following.*
+
+### Eagle Notification Plugins
+
+[Eagle Notification Plugin](https://cwiki.apache.org/confluence/display/EAG/Alert+notification+plugin) provides an interface for users to consume Eagle alerts. When define a policy, a user can add an arbitrary number of notification plugin instances. By default, Eagle supports three types of notification: EagleStore, Kafka[^KAFKA] and Email.
+
+* EagleStore: Alerts will be persisted into the underlying database via eagle. 
+	* no configuration is needed. 
+* Kafka: Alerts will flow into Kafka. Configurations are required:
+	* **kafka_broker**: <hostname:port,..., REQUIRED: The list of hostname and hostname:port> port of the server to connect to. 
+	* **topic**: kafka topic 
+* email: Alert email will be sent out. Configurations are required:
+	* **sender**: email sender address
+	* **recipients**: email recipients, multiple email address with comma separated
+	* **subject**: email subject
+	
+![notificationPlugin](/images/notificationPlugin.png)
+### Customized Notification Plugin
+
+To integrate a customized notification plugin, we must implement an interface 
+
+	public interface NotificationPlugin {
+    /**
+     * for initialization
+     * @throws Exception
+     */
+    void init(Config config, List<AlertDefinitionAPIEntity> initAlertDefs) throws  Exception;
+
+    /**
+     * Update Plugin if any change in Policy Definition
+     * @param policy to be impacted
+     * @param  notificationConfCollection
+     * @throws Exception
+     */
+    void update(String policy, List<Map<String,String>> notificationConfCollection , boolean isPolicyDelete) throws  Exception;
+
+    /**
+     * Post a notification for the given alertEntity
+     * @param alertEntity
+     * @throws Exception
+     */
+
+    void onAlert(AlertAPIEntity alertEntity) throws  Exception;
+
+    /**
+     * Returns Status of Notification Post
+     * @return
+     */
+    List<NotificationStatus> getStatusList();
+	}
+Examples: AlertKafkaPlugin, AlertEmailPlugin, and AlertEagleStorePlugin.
+
+The second and crucial step is to register the configurations of the customized plugin. In other words, we need persist the configuration template into database in order to expose the configurations to users in the front end. 
+
+Examples:
+
+    {
+       "prefix": "alertNotifications",
+       "tags": {
+         "notificationType": "kafka"
+       },
+       "className": "org.apache.eagle.notification.plugin.AlertKafkaPlugin",
+       "description": "send alert to kafka bus",
+       "enabled":true,
+       "fields": "[{\"name\":\"kafka_broker\",\"value\":\"sandbox.hortonworks.com:6667\"},{\"name\":\"topic\"}]"
+    }
+
+**Note**: `fields` is the configuration for notification type `kafka`
+
+How can we do that? [Here](https://github.com/apache/eagle/blob/master/eagle-assembly/src/main/bin/eagle-topology-init.sh) are Eagle other notification plugin configurations. Just append yours to it, and run this script when Eagle service is up. 
+
+
+
+---
+
+#### *Footnotes*
+
+[^KAFKA]:*All mentions of "kafka" on this page represent Apache Kafka.*

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/tutorial-policy.md
----------------------------------------------------------------------
diff --git a/eagle-site/tutorial-policy.md b/eagle-site/tutorial-policy.md
new file mode 100644
index 0000000..cf99013
--- /dev/null
+++ b/eagle-site/tutorial-policy.md
@@ -0,0 +1,67 @@
+---
+layout: doc
+title:  "Policy Tutorial" 
+permalink: /docs/tutorial/policy.html
+---
+
+Apache Eagle (called Eagle in the following) currently supports to customize policies for data sources for each site:
+
+* HDFS Audit Log
+* Hive[^HIVE] Query Log
+
+> NOTICE: policies are classified by sites. Please select the site first when there are multiple ones.
+
+### How to define HDFS Policy?
+In this example we will go through the steps for creating the following HDFS policy.
+
+> Example Policy: Create a policy to alert when a user is trying to delete a file with sensitive data
+
+* **Step 1**: Select Source as HDFS and Stream as HDFS Audit Log
+
+	![HDFS Policies](/images/docs/hdfs-policy1.png)
+
+* **Step 2**: Eagle supports a variety of properties for match critera where users can set different values. Eagle also supports window functions to extend policies with time functions.
+
+	  command = delete 
+	  (Eagle currently supports the following commands open, delete, copy, append, copy from local, get, move, mkdir, create, list, change permissions)
+		
+	  source = /tmp/private 
+	  (Eagle supports wildcarding for property values for example /tmp/*)
+
+	![HDFS Policies](/images/docs/hdfs-policy2.png)
+
+* **Step 3**: Name your policy and select de-duplication options if you need to avoid getting duplicate alerts within a particular time window. You have an option to configure email notifications for the alerts.
+
+	![HDFS Policies](/images/docs/hdfs-policy3.png)
+
+
+### How to define HIVE Policy?
+In this example we will go thru the steps for creating the following Hive policy.
+
+> Example Policy: Create a policy to alert when a user is trying to select PHONE_NUMBER from a hive table with sensitive data
+
+* **Step 1**:  Select Source as Hive and Stream as Hive Query Log
+
+	![Hive Policies](/images/docs/hive-policy1.png)
+
+* **Step 2**: Eagle support a variety of properties for match critera where users can set different values. Eagle also supports window functions to extend policies with time functions.
+
+	  command = Select 
+	  (Eagle currently supports the following commands DDL statements Create, Drop, Alter, Truncate, Show)
+		
+	  sensitivity type = PHONE_NUMBER
+      (Eagle supports classifying data in Hive with different sensitivity types. Users can use these sensitivity types to create policies)
+
+	![Hive Policies](/images/docs/hive-policy2.png)
+
+* **Step 3**: Name your policy and select de-duplication options if you need to avoid getting duplicate alerts within a particular time window. You have an option to configure email notifications for the alerts.
+
+	![Hive Policies](/images/docs/hive-policy3.png)
+
+
+---
+
+#### *Footnotes*
+
+[^HIVE]:*All mentions of "hive" on this page represent Apache Hive.*
+

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/tutorial-site-0.3.0.md
----------------------------------------------------------------------
diff --git a/eagle-site/tutorial-site-0.3.0.md b/eagle-site/tutorial-site-0.3.0.md
new file mode 100644
index 0000000..44dfc09
--- /dev/null
+++ b/eagle-site/tutorial-site-0.3.0.md
@@ -0,0 +1,104 @@
+---
+layout: doc
+title:  "Site Management"
+permalink: /docs/tutorial/site-0.3.0.html
+---
+
+Apache Eagle (called Eagle in the following) identifies different Hadoop[^HADOOP] environments as different sites, such as sandbox, datacenter1, datacenter2. In each site, a user can add different data sources as the monitoring targets. For each data source, a connection configuration is required.
+
+#### Step 1: Add Site
+
+The following is an example which creates a new site "Demo", and add two data sources as its monitoring targets.
+![setup a site](/images/docs/new-site.png)
+
+#### Step 2: Add Configuration
+
+After creating a new site, we need to edit the configuration to connect the cluster. 
+Here we give configuration examples for HDFS, HBASE, and Hive. 
+
+* HDFS
+
+    ![hdfs setup](/images/docs/hdfs-setup-0.3.0.png) 
+    
+    * Base case
+
+        You may configure the default path for Hadoop clients to connect remote hdfs namenode.
+
+            {"fs.defaultFS":"hdfs://sandbox.hortonworks.com:8020"}
+
+    * HA case
+
+        Basically, you point your fs.defaultFS at your nameservice and let the client know how its configured (the backing namenodes) and how to fail over between them under the HA mode
+
+            {"fs.defaultFS":"hdfs://nameservice1",
+             "dfs.nameservices": "nameservice1",
+             "dfs.ha.namenodes.nameservice1":"namenode1,namenode2",
+             "dfs.namenode.rpc-address.nameservice1.namenode1": "hadoopnamenode01:8020",
+             "dfs.namenode.rpc-address.nameservice1.namenode2": "hadoopnamenode02:8020",
+             "dfs.client.failover.proxy.provider.nameservice1": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+            }
+
+    * Kerberos-secured cluster
+
+        For Kerberos-secured cluster, you need to get a keytab file and the principal from your admin, and configure "eagle.keytab.file" and "eagle.kerberos.principal" to authenticate its access.
+
+            { "eagle.keytab.file":"/EAGLE-HOME/.keytab/eagle.keytab",
+              "eagle.kerberos.principal":"eagle@SOMEWHERE.COM"
+            }
+
+        If there is an exception about "invalid server principal name", you may need to check the DNS resolver, or the data transfer , such as "dfs.encrypt.data.transfer", "dfs.encrypt.data.transfer.algorithm", "dfs.trustedchannel.resolver.class", "dfs.datatransfer.client.encrypt".
+        
+      
+
+* Hive[^HIVE]
+    * Basic
+
+            {
+              "accessType": "metastoredb_jdbc",
+              "password": "hive",
+              "user": "hive",
+              "jdbcDriverClassName": "com.mysql.jdbc.Driver",
+              "jdbcUrl": "jdbc:mysql://sandbox.hortonworks.com/hive?createDatabaseIfNotExist=true"
+            }
+
+
+* HBase[^HBASE]
+
+    * Basic case
+
+        You need to sett "hbase.zookeeper.quorum":"localhost" property and "hbase.zookeeper.property.clientPort" property.
+
+            {
+                "hbase.zookeeper.property.clientPort":"2181",
+                "hbase.zookeeper.quorum":"localhost"
+            }
+
+    * Kerberos-secured cluster
+
+        According to your environment, you can add or remove some of the following properties. Here is the reference.
+
+            {
+                "hbase.zookeeper.property.clientPort":"2181",
+                "hbase.zookeeper.quorum":"localhost",
+                "hbase.security.authentication":"kerberos",
+                "hbase.master.kerberos.principal":"hadoop/_HOST@EXAMPLE.COM",
+                "zookeeper.znode.parent":"/hbase",
+                "eagle.keytab.file":"/EAGLE-HOME/.keytab/eagle.keytab",
+                "eagle.kerberos.principal":"eagle@EXAMPLE.COM"
+            }
+
+
+#### Step 3: Checking the connection
+After the configuration is ready, you can go to [classification page](/docs/tutorial/classification-0.3.0.html) and browse the data. If the configuration is correct, data will be ready in a few seconds.
+
+Any questions on the Kerberos configuration in Eagle, please first check [FAQ](/docs/FAQ.html)
+
+
+---
+
+#### *Footnotes*
+
+[^HADOOP]:*All mentions of "hadoop" on this page represent Apache Hadoop.*
+[^HBASE]:*Apache HBase.*
+[^HIVE]:*Apache Hive.*
+

http://git-wip-us.apache.org/repos/asf/eagle/blob/0ecb7c1c/eagle-site/tutorial-site.md
----------------------------------------------------------------------
diff --git a/eagle-site/tutorial-site.md b/eagle-site/tutorial-site.md
new file mode 100644
index 0000000..a1c275e
--- /dev/null
+++ b/eagle-site/tutorial-site.md
@@ -0,0 +1,40 @@
+---
+layout: doc
+title:  "Site Management"
+permalink: /docs/tutorial/site.html
+---
+
+Guide to site management for versions since **Apache Eagle 0.4.0-incubating**.
+
+[For Apache Eagle 0.3.0-incubating, see _here_.](/docs/tutorial/site-0.3.0.html)
+
+The main content in this page have two parts
+
+* How to add a new site 
+* How to add an new application
+
+#### How to add a new site 
+
+Eagle names the cluster to monitor as a site. Users (as an admin user) can manage their sites on Eagle UI. The following example is to add a new site 'test'.
+
+![setup a site](/images/docs/new-site.png)
+
+#### How to add an new application
+
+Once create a site, users can choose the data sources they want to monitoring by adding different applications. For example, HdfsAuditLog application monitors the hdfs audit log. Currently, Eagle supports 
+
+* [HDFS Data Activity Monitoring](/docs/hdfs-data-activity-monitoring.html)
+* [HIVE Query Activity Monitoring](/docs/hive-query-activity-monitoring.html)
+* [HBASE Data Activity Monitoring](/docs/hbase-data-activity-monitoring.html)
+* [MapR FS Data Activity Monitoring](/docs/mapr-integration.html)
+* [Hadoop JMX Metrics Monitoring](/docs/jmx-metric-monitoring.html)
+
+Same as adding a site, application management also locates at the admin management page. To add an application, two steps are needed
+
+#### step 1: go to site tab, and select the target site
+
+![new application step1](/images/docs/new-application1.png)
+
+#### step 2: click the applications you want on the right side and save the page
+
+![new application step1](/images/docs/new-application2.png)


Mime
View raw message