hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mi...@apache.org
Subject [10/19] hbase git commit: HBASE-11533 Asciidoc Proof of Concept
Date Wed, 07 Jan 2015 04:02:41 GMT
http://git-wip-us.apache.org/repos/asf/hbase/blob/92aa9dc8/src/main/asciidoc/hbase.css
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/hbase.css b/src/main/asciidoc/hbase.css
new file mode 100644
index 0000000..68d418b
--- /dev/null
+++ b/src/main/asciidoc/hbase.css
@@ -0,0 +1,400 @@
+/* Asciidoctor default stylesheet | MIT License | http://asciidoctor.org */
+/* Remove the comments around the @import statement below when using this as a custom stylesheet */
+/*@import "https://fonts.googleapis.com/css?family=Open+Sans:300,300italic,400,400italic,600,600italic%7CNoto+Serif:400,400italic,700,700italic%7CDroid+Sans+Mono:400";*/
+article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}
+audio,canvas,video{display:inline-block}
+audio:not([controls]){display:none;height:0}
+[hidden],template{display:none}
+script{display:none!important}
+html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}
+body{margin:0}
+a{background:transparent}
+a:focus{outline:thin dotted}
+a:active,a:hover{outline:0}
+h1{font-size:2em;margin:.67em 0}
+abbr[title]{border-bottom:1px dotted}
+b,strong{font-weight:bold}
+dfn{font-style:italic}
+hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0}
+mark{background:#ff0;color:#000}
+code,kbd,pre,samp{font-family:monospace;font-size:1em}
+pre{white-space:pre-wrap}
+q{quotes:"\201C" "\201D" "\2018" "\2019"}
+small{font-size:80%}
+sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}
+sup{top:-.5em}
+sub{bottom:-.25em}
+img{border:0}
+svg:not(:root){overflow:hidden}
+figure{margin:0}
+fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}
+legend{border:0;padding:0}
+button,input,select,textarea{font-family:inherit;font-size:100%;margin:0}
+button,input{line-height:normal}
+button,select{text-transform:none}
+button,html input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer}
+button[disabled],html input[disabled]{cursor:default}
+input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0}
+input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}
+input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}
+button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}
+textarea{overflow:auto;vertical-align:top}
+table{border-collapse:collapse;border-spacing:0}
+*,*:before,*:after{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}
+html,body{font-size:100%}
+body{background:#fff;color:rgba(0,0,0,.8);padding:0;margin:0;font-family:"Noto Serif","DejaVu Serif",serif;font-weight:400;font-style:normal;line-height:1;position:relative;cursor:auto}
+a:hover{cursor:pointer}
+img,object,embed{max-width:100%;height:auto}
+object,embed{height:100%}
+img{-ms-interpolation-mode:bicubic}
+#map_canvas img,#map_canvas embed,#map_canvas object,.map_canvas img,.map_canvas embed,.map_canvas object{max-width:none!important}
+.left{float:left!important}
+.right{float:right!important}
+.text-left{text-align:left!important}
+.text-right{text-align:right!important}
+.text-center{text-align:center!important}
+.text-justify{text-align:justify!important}
+.hide{display:none}
+.antialiased,body{-webkit-font-smoothing:antialiased}
+img{display:inline-block;vertical-align:middle}
+textarea{height:auto;min-height:50px}
+select{width:100%}
+p.lead,.paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{font-size:1.21875em;line-height:1.6}
+.subheader,.admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{line-height:1.45;color:#7a2518;font-weight:400;margin-top:0;margin-bottom:.25em}
+div,dl,dt,dd,ul,ol,li,h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6,pre,form,p,blockquote,th,td{margin:0;padding:0;direction:ltr}
+a{color:#2156a5;text-decoration:underline;line-height:inherit}
+a:hover,a:focus{color:#1d4b8f}
+a img{border:none}
+p{font-family:inherit;font-weight:400;font-size:1em;line-height:1.6;margin-bottom:1.25em;text-rendering:optimizeLegibility}
+p aside{font-size:.875em;line-height:1.35;font-style:italic}
+h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{font-family:"Open Sans","DejaVu Sans",sans-serif;font-weight:300;font-style:normal;color:#990000;text-rendering:optimizeLegibility;margin-top:1em;margin-bottom:.5em;line-height:1.0125em}
+h1 small,h2 small,h3 small,#toctitle small,.sidebarblock>.content>.title small,h4 small,h5 small,h6 small{font-size:60%;color:#e99b8f;line-height:0}
+h1{font-size:2.125em}
+h2{font-size:1.6875em}
+h3,#toctitle,.sidebarblock>.content>.title{font-size:1.375em}
+h4,h5{font-size:1.125em}
+h6{font-size:1em}
+hr{border:solid #ddddd8;border-width:1px 0 0;clear:both;margin:1.25em 0 1.1875em;height:0}
+em,i{font-style:italic;line-height:inherit}
+strong,b{font-weight:bold;line-height:inherit}
+small{font-size:60%;line-height:inherit}
+code{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;color:rgba(0,0,0,.9)}
+ul,ol,dl{font-size:1em;line-height:1.6;margin-bottom:1.25em;list-style-position:outside;font-family:inherit}
+ul,ol,ul.no-bullet,ol.no-bullet{margin-left:1.5em}
+ul li ul,ul li ol{margin-left:1.25em;margin-bottom:0;font-size:1em}
+ul.square li ul,ul.circle li ul,ul.disc li ul{list-style:inherit}
+ul.square{list-style-type:square}
+ul.circle{list-style-type:circle}
+ul.disc{list-style-type:disc}
+ul.no-bullet{list-style:none}
+ol li ul,ol li ol{margin-left:1.25em;margin-bottom:0}
+dl dt{margin-bottom:.3125em;font-weight:bold}
+dl dd{margin-bottom:1.25em}
+abbr,acronym{text-transform:uppercase;font-size:90%;color:rgba(0,0,0,.8);border-bottom:1px dotted #ddd;cursor:help}
+abbr{text-transform:none}
+blockquote{margin:0 0 1.25em;padding:.5625em 1.25em 0 1.1875em;border-left:1px solid #ddd}
+blockquote cite{display:block;font-size:.9375em;color:rgba(0,0,0,.6)}
+blockquote cite:before{content:"\2014 \0020"}
+blockquote cite a,blockquote cite a:visited{color:rgba(0,0,0,.6)}
+blockquote,blockquote p{line-height:1.6;color:rgba(0,0,0,.85)}
+@media only screen and (min-width:768px){h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2}
+h1{font-size:2.75em}
+h2{font-size:2.3125em}
+h3,#toctitle,.sidebarblock>.content>.title{font-size:1.6875em}
+h4{font-size:1.4375em}}table{background:#fff;margin-bottom:1.25em;border:solid 1px #dedede}
+table thead,table tfoot{background:#f7f8f7;font-weight:bold}
+table thead tr th,table thead tr td,table tfoot tr th,table tfoot tr td{padding:.5em .625em .625em;font-size:inherit;color:rgba(0,0,0,.8);text-align:left}
+table tr th,table tr td{padding:.5625em .625em;font-size:inherit;color:rgba(0,0,0,.8)}
+table tr.even,table tr.alt,table tr:nth-of-type(even){background:#f8f8f7}
+table thead tr th,table tfoot tr th,table tbody tr td,table tr td,table tfoot tr td{display:table-cell;line-height:1.6}
+h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2;word-spacing:-.05em}
+h1 strong,h2 strong,h3 strong,#toctitle strong,.sidebarblock>.content>.title strong,h4 strong,h5 strong,h6 strong{font-weight:400}
+.clearfix:before,.clearfix:after,.float-group:before,.float-group:after{content:" ";display:table}
+.clearfix:after,.float-group:after{clear:both}
+*:not(pre)>code{font-size:.9375em;font-style:normal!important;letter-spacing:0;padding:.1em .5ex;word-spacing:-.15em;background-color:#f7f7f8;-webkit-border-radius:4px;border-radius:4px;line-height:1.45;text-rendering:optimizeSpeed}
+pre,pre>code{line-height:1.45;color:rgba(0,0,0,.9);font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;text-rendering:optimizeSpeed}
+.keyseq{color:rgba(51,51,51,.8)}
+kbd{display:inline-block;color:rgba(0,0,0,.8);font-size:.75em;line-height:1.4;background-color:#f7f7f7;border:1px solid #ccc;-webkit-border-radius:3px;border-radius:3px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em white inset;box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em #fff inset;margin:-.15em .15em 0 .15em;padding:.2em .6em .2em .5em;vertical-align:middle;white-space:nowrap}
+.keyseq kbd:first-child{margin-left:0}
+.keyseq kbd:last-child{margin-right:0}
+.menuseq,.menu{color:rgba(0,0,0,.8)}
+b.button:before,b.button:after{position:relative;top:-1px;font-weight:400}
+b.button:before{content:"[";padding:0 3px 0 2px}
+b.button:after{content:"]";padding:0 2px 0 3px}
+p a>code:hover{color:rgba(0,0,0,.9)}
+#header,#content,#footnotes,#footer{width:100%;margin-left:auto;margin-right:auto;margin-top:0;margin-bottom:0;max-width:62.5em;*zoom:1;position:relative;padding-left:.9375em;padding-right:.9375em}
+#header:before,#header:after,#content:before,#content:after,#footnotes:before,#footnotes:after,#footer:before,#footer:after{content:" ";display:table}
+#header:after,#content:after,#footnotes:after,#footer:after{clear:both}
+#content{margin-top:1.25em}
+#content:before{content:none}
+#header>h1:first-child{color:rgba(0,0,0,.85);margin-top:2.25rem;margin-bottom:0}
+#header>h1:first-child+#toc{margin-top:8px;border-top:1px solid #ddddd8}
+#header>h1:only-child,body.toc2 #header>h1:nth-last-child(2){border-bottom:1px solid #ddddd8;padding-bottom:8px}
+#header .details{border-bottom:1px solid #ddddd8;line-height:1.45;padding-top:.25em;padding-bottom:.25em;padding-left:.25em;color:rgba(0,0,0,.6);display:-ms-flexbox;display:-webkit-flex;display:flex;-ms-flex-flow:row wrap;-webkit-flex-flow:row wrap;flex-flow:row wrap}
+#header .details span:first-child{margin-left:-.125em}
+#header .details span.email a{color:rgba(0,0,0,.85)}
+#header .details br{display:none}
+#header .details br+span:before{content:"\00a0\2013\00a0"}
+#header .details br+span.author:before{content:"\00a0\22c5\00a0";color:rgba(0,0,0,.85)}
+#header .details br+span#revremark:before{content:"\00a0|\00a0"}
+#header #revnumber{text-transform:capitalize}
+#header #revnumber:after{content:"\00a0"}
+#content>h1:first-child:not([class]){color:rgba(0,0,0,.85);border-bottom:1px solid #ddddd8;padding-bottom:8px;margin-top:0;padding-top:1rem;margin-bottom:1.25rem}
+#toc{border-bottom:1px solid #efefed;padding-bottom:.5em}
+#toc>ul{margin-left:.125em}
+#toc ul.sectlevel0>li>a{font-style:italic}
+#toc ul.sectlevel0 ul.sectlevel1{margin:.5em 0}
+#toc ul{font-family:"Open Sans","DejaVu Sans",sans-serif;list-style-type:none}
+#toc a{text-decoration:none}
+#toc a:active{text-decoration:underline}
+#toctitle{color:#7a2518;font-size:1.2em}
+@media only screen and (min-width:768px){#toctitle{font-size:1.375em}
+body.toc2{padding-left:15em;padding-right:0}
+#toc.toc2{margin-top:0!important;background-color:#f8f8f7;position:fixed;width:15em;left:0;top:0;border-right:1px solid #efefed;border-top-width:0!important;border-bottom-width:0!important;z-index:1000;padding:1.25em 1em;height:100%;overflow:auto}
+#toc.toc2 #toctitle{margin-top:0;font-size:1.2em}
+#toc.toc2>ul{font-size:.9em;margin-bottom:0}
+#toc.toc2 ul ul{margin-left:0;padding-left:1em}
+#toc.toc2 ul.sectlevel0 ul.sectlevel1{padding-left:0;margin-top:.5em;margin-bottom:.5em}
+body.toc2.toc-right{padding-left:0;padding-right:15em}
+body.toc2.toc-right #toc.toc2{border-right-width:0;border-left:1px solid #efefed;left:auto;right:0}}@media only screen and (min-width:1280px){body.toc2{padding-left:20em;padding-right:0}
+#toc.toc2{width:20em}
+#toc.toc2 #toctitle{font-size:1.375em}
+#toc.toc2>ul{font-size:.95em}
+#toc.toc2 ul ul{padding-left:1.25em}
+body.toc2.toc-right{padding-left:0;padding-right:20em}}#content #toc{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px}
+#content #toc>:first-child{margin-top:0}
+#content #toc>:last-child{margin-bottom:0}
+#footer{max-width:100%;background-color:rgba(0,0,0,.8);padding:1.25em}
+#footer-text,#footer_nav{color:rgba(255,255,255,.8);line-height:1.44}
+#footer a{color: #990000}
+.sect1{padding-bottom:.625em}
+@media only screen and (min-width:768px){.sect1{padding-bottom:1.25em}}.sect1+.sect1{border-top:1px solid #efefed}
+#content h1>a.anchor,h2>a.anchor,h3>a.anchor,#toctitle>a.anchor,.sidebarblock>.content>.title>a.anchor,h4>a.anchor,h5>a.anchor,h6>a.anchor{position:absolute;z-index:1001;width:1.5ex;margin-left:-1.5ex;display:block;text-decoration:none!important;visibility:hidden;text-align:center;font-weight:400}
+#content h1>a.anchor:before,h2>a.anchor:before,h3>a.anchor:before,#toctitle>a.anchor:before,.sidebarblock>.content>.title>a.anchor:before,h4>a.anchor:before,h5>a.anchor:before,h6>a.anchor:before{content:"\00A7";font-size:.85em;display:block;padding-top:.1em}
+#content h1:hover>a.anchor,#content h1>a.anchor:hover,h2:hover>a.anchor,h2>a.anchor:hover,h3:hover>a.anchor,#toctitle:hover>a.anchor,.sidebarblock>.content>.title:hover>a.anchor,h3>a.anchor:hover,#toctitle>a.anchor:hover,.sidebarblock>.content>.title>a.anchor:hover,h4:hover>a.anchor,h4>a.anchor:hover,h5:hover>a.anchor,h5>a.anchor:hover,h6:hover>a.anchor,h6>a.anchor:hover{visibility:visible}
+#content h1>a.link,h2>a.link,h3>a.link,#toctitle>a.link,.sidebarblock>.content>.title>a.link,h4>a.link,h5>a.link,h6>a.link{color:#990000;text-decoration:none}
+#content h1>a.link:hover,h2>a.link:hover,h3>a.link:hover,#toctitle>a.link:hover,.sidebarblock>.content>.title>a.link:hover,h4>a.link:hover,h5>a.link:hover,h6>a.link:hover{color:#a53221}
+.audioblock,.imageblock,.literalblock,.listingblock,.stemblock,.videoblock{margin-bottom:1.25em}
+.admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{text-rendering:optimizeLegibility;text-align:left;font-family:"Noto Serif","DejaVu Serif",serif;font-size:1rem;font-style:italic}
+table.tableblock>caption.title{white-space:nowrap;overflow:visible;max-width:0}
+.paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{color:rgba(0,0,0,.85)}
+table.tableblock #preamble>.sectionbody>.paragraph:first-of-type p{font-size:inherit}
+.admonitionblock>table{border-collapse:separate;border:0;background:none;width:100%}
+.admonitionblock>table td.icon{text-align:center;width:80px}
+.admonitionblock>table td.icon img{max-width:none}
+.admonitionblock>table td.icon .title{font-weight:bold;font-family:"Open Sans","DejaVu Sans",sans-serif;text-transform:uppercase}
+.admonitionblock>table td.content{padding-left:1.125em;padding-right:1.25em;border-left:1px solid #ddddd8;color:rgba(0,0,0,.6)}
+.admonitionblock>table td.content>:last-child>:last-child{margin-bottom:0}
+.exampleblock>.content{border-style:solid;border-width:1px;border-color:#e6e6e6;margin-bottom:1.25em;padding:1.25em;background:#fff;-webkit-border-radius:4px;border-radius:4px}
+.exampleblock>.content>:first-child{margin-top:0}
+.exampleblock>.content>:last-child{margin-bottom:0}
+.sidebarblock{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px}
+.sidebarblock>:first-child{margin-top:0}
+.sidebarblock>:last-child{margin-bottom:0}
+.sidebarblock>.content>.title{color:#7a2518;margin-top:0;text-align:center}
+.exampleblock>.content>:last-child>:last-child,.exampleblock>.content .olist>ol>li:last-child>:last-child,.exampleblock>.content .ulist>ul>li:last-child>:last-child,.exampleblock>.content .qlist>ol>li:last-child>:last-child,.sidebarblock>.content>:last-child>:last-child,.sidebarblock>.content .olist>ol>li:last-child>:last-child,.sidebarblock>.content .ulist>ul>li:last-child>:last-child,.sidebarblock>.content .qlist>ol>li:last-child>:last-child{margin-bottom:0}
+.literalblock pre,.listingblock pre:not(.highlight),.listingblock pre[class="highlight"],.listingblock pre[class^="highlight "],.listingblock pre.CodeRay,.listingblock pre.prettyprint{background:#f7f7f8}
+.sidebarblock .literalblock pre,.sidebarblock .listingblock pre:not(.highlight),.sidebarblock .listingblock pre[class="highlight"],.sidebarblock .listingblock pre[class^="highlight "],.sidebarblock .listingblock pre.CodeRay,.sidebarblock .listingblock pre.prettyprint{background:#f2f1f1}
+.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{-webkit-border-radius:4px;border-radius:4px;word-wrap:break-word;padding:1em;font-size:.8125em}
+.literalblock pre.nowrap,.literalblock pre[class].nowrap,.listingblock pre.nowrap,.listingblock pre[class].nowrap{overflow-x:auto;white-space:pre;word-wrap:normal}
+@media only screen and (min-width:768px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:.90625em}}@media only screen and (min-width:1280px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:1em}}.literalblock.output pre{color:#f7f7f8;background-color:rgba(0,0,0,.9)}
+.listingblock pre.highlightjs{padding:0}
+.listingblock pre.highlightjs>code{padding:1em;-webkit-border-radius:4px;border-radius:4px}
+.listingblock pre.prettyprint{border-width:0}
+.listingblock>.content{position:relative}
+.listingblock code[data-lang]:before{display:none;content:attr(data-lang);position:absolute;font-size:.75em;top:.425rem;right:.5rem;line-height:1;text-transform:uppercase;color:#999}
+.listingblock:hover code[data-lang]:before{display:block}
+.listingblock.terminal pre .command:before{content:attr(data-prompt);padding-right:.5em;color:#999}
+.listingblock.terminal pre .command:not([data-prompt]):before{content:"$"}
+table.pyhltable{border-collapse:separate;border:0;margin-bottom:0;background:none}
+table.pyhltable td{vertical-align:top;padding-top:0;padding-bottom:0}
+table.pyhltable td.code{padding-left:.75em;padding-right:0}
+pre.pygments .lineno,table.pyhltable td:not(.code){color:#999;padding-left:0;padding-right:.5em;border-right:1px solid #ddddd8}
+pre.pygments .lineno{display:inline-block;margin-right:.25em}
+table.pyhltable .linenodiv{background:none!important;padding-right:0!important}
+.quoteblock{margin:0 1em 1.25em 1.5em;display:table}
+.quoteblock>.title{margin-left:-1.5em;margin-bottom:.75em}
+.quoteblock blockquote,.quoteblock blockquote p{color:rgba(0,0,0,.85);font-size:1.15rem;line-height:1.75;word-spacing:.1em;letter-spacing:0;font-style:italic;text-align:justify}
+.quoteblock blockquote{margin:0;padding:0;border:0}
+.quoteblock blockquote:before{content:"\201c";float:left;font-size:2.75em;font-weight:bold;line-height:.6em;margin-left:-.6em;color:#7a2518;text-shadow:0 1px 2px rgba(0,0,0,.1)}
+.quoteblock blockquote>.paragraph:last-child p{margin-bottom:0}
+.quoteblock .attribution{margin-top:.5em;margin-right:.5ex;text-align:right}
+.quoteblock .quoteblock{margin-left:0;margin-right:0;padding:.5em 0;border-left:3px solid rgba(0,0,0,.6)}
+.quoteblock .quoteblock blockquote{padding:0 0 0 .75em}
+.quoteblock .quoteblock blockquote:before{display:none}
+.verseblock{margin:0 1em 1.25em 1em}
+.verseblock pre{font-family:"Open Sans","DejaVu Sans",sans;font-size:1.15rem;color:rgba(0,0,0,.85);font-weight:300;text-rendering:optimizeLegibility}
+.verseblock pre strong{font-weight:400}
+.verseblock .attribution{margin-top:1.25rem;margin-left:.5ex}
+.quoteblock .attribution,.verseblock .attribution{font-size:.9375em;line-height:1.45;font-style:italic}
+.quoteblock .attribution br,.verseblock .attribution br{display:none}
+.quoteblock .attribution cite,.verseblock .attribution cite{display:block;letter-spacing:-.05em;color:rgba(0,0,0,.6)}
+.quoteblock.abstract{margin:0 0 1.25em 0;display:block}
+.quoteblock.abstract blockquote,.quoteblock.abstract blockquote p{text-align:left;word-spacing:0}
+.quoteblock.abstract blockquote:before,.quoteblock.abstract blockquote p:first-of-type:before{display:none}
+table.tableblock{max-width:100%;border-collapse:separate}
+table.tableblock td>.paragraph:last-child p>p:last-child,table.tableblock th>p:last-child,table.tableblock td>p:last-child{margin-bottom:0}
+table.spread{width:100%}
+table.tableblock,th.tableblock,td.tableblock{border:0 solid #dedede}
+table.grid-all th.tableblock,table.grid-all td.tableblock{border-width:0 1px 1px 0}
+table.grid-all tfoot>tr>th.tableblock,table.grid-all tfoot>tr>td.tableblock{border-width:1px 1px 0 0}
+table.grid-cols th.tableblock,table.grid-cols td.tableblock{border-width:0 1px 0 0}
+table.grid-all *>tr>.tableblock:last-child,table.grid-cols *>tr>.tableblock:last-child{border-right-width:0}
+table.grid-rows th.tableblock,table.grid-rows td.tableblock{border-width:0 0 1px 0}
+table.grid-all tbody>tr:last-child>th.tableblock,table.grid-all tbody>tr:last-child>td.tableblock,table.grid-all thead:last-child>tr>th.tableblock,table.grid-rows tbody>tr:last-child>th.tableblock,table.grid-rows tbody>tr:last-child>td.tableblock,table.grid-rows thead:last-child>tr>th.tableblock{border-bottom-width:0}
+table.grid-rows tfoot>tr>th.tableblock,table.grid-rows tfoot>tr>td.tableblock{border-width:1px 0 0 0}
+table.frame-all{border-width:1px}
+table.frame-sides{border-width:0 1px}
+table.frame-topbot{border-width:1px 0}
+th.halign-left,td.halign-left{text-align:left}
+th.halign-right,td.halign-right{text-align:right}
+th.halign-center,td.halign-center{text-align:center}
+th.valign-top,td.valign-top{vertical-align:top}
+th.valign-bottom,td.valign-bottom{vertical-align:bottom}
+th.valign-middle,td.valign-middle{vertical-align:middle}
+table thead th,table tfoot th{font-weight:bold}
+tbody tr th{display:table-cell;line-height:1.6;background:#f7f8f7}
+tbody tr th,tbody tr th p,tfoot tr th,tfoot tr th p{color:rgba(0,0,0,.8);font-weight:bold}
+p.tableblock>code:only-child{background:none;padding:0}
+p.tableblock{font-size:1em}
+td>div.verse{white-space:pre}
+ol{margin-left:1.75em}
+ul li ol{margin-left:1.5em}
+dl dd{margin-left:1.125em}
+dl dd:last-child,dl dd:last-child>:last-child{margin-bottom:0}
+ol>li p,ul>li p,ul dd,ol dd,.olist .olist,.ulist .ulist,.ulist .olist,.olist .ulist{margin-bottom:.625em}
+ul.unstyled,ol.unnumbered,ul.checklist,ul.none{list-style-type:none}
+ul.unstyled,ol.unnumbered,ul.checklist{margin-left:.625em}
+ul.checklist li>p:first-child>.fa-square-o:first-child,ul.checklist li>p:first-child>.fa-check-square-o:first-child{width:1em;font-size:.85em}
+ul.checklist li>p:first-child>input[type="checkbox"]:first-child{width:1em;position:relative;top:1px}
+ul.inline{margin:0 auto .625em auto;margin-left:-1.375em;margin-right:0;padding:0;list-style:none;overflow:hidden}
+ul.inline>li{list-style:none;float:left;margin-left:1.375em;display:block}
+ul.inline>li>*{display:block}
+.unstyled dl dt{font-weight:400;font-style:normal}
+ol.arabic{list-style-type:decimal}
+ol.decimal{list-style-type:decimal-leading-zero}
+ol.loweralpha{list-style-type:lower-alpha}
+ol.upperalpha{list-style-type:upper-alpha}
+ol.lowerroman{list-style-type:lower-roman}
+ol.upperroman{list-style-type:upper-roman}
+ol.lowergreek{list-style-type:lower-greek}
+.hdlist>table,.colist>table{border:0;background:none}
+.hdlist>table>tbody>tr,.colist>table>tbody>tr{background:none}
+td.hdlist1{padding-right:.75em;font-weight:bold}
+td.hdlist1,td.hdlist2{vertical-align:top}
+.literalblock+.colist,.listingblock+.colist{margin-top:-.5em}
+.colist>table tr>td:first-of-type{padding:0 .75em;line-height:1}
+.colist>table tr>td:last-of-type{padding:.25em 0}
+.thumb,.th{line-height:0;display:inline-block;border:solid 4px #fff;-webkit-box-shadow:0 0 0 1px #ddd;box-shadow:0 0 0 1px #ddd}
+.imageblock.left,.imageblock[style*="float: left"]{margin:.25em .625em 1.25em 0}
+.imageblock.right,.imageblock[style*="float: right"]{margin:.25em 0 1.25em .625em}
+.imageblock>.title{margin-bottom:0}
+.imageblock.thumb,.imageblock.th{border-width:6px}
+.imageblock.thumb>.title,.imageblock.th>.title{padding:0 .125em}
+.image.left,.image.right{margin-top:.25em;margin-bottom:.25em;display:inline-block;line-height:0}
+.image.left{margin-right:.625em}
+.image.right{margin-left:.625em}
+a.image{text-decoration:none}
+span.footnote,span.footnoteref{vertical-align:super;font-size:.875em}
+span.footnote a,span.footnoteref a{text-decoration:none}
+span.footnote a:active,span.footnoteref a:active{text-decoration:underline}
+#footnotes{padding-top:.75em;padding-bottom:.75em;margin-bottom:.625em}
+#footnotes hr{width:20%;min-width:6.25em;margin:-.25em 0 .75em 0;border-width:1px 0 0 0}
+#footnotes .footnote{padding:0 .375em;line-height:1.3;font-size:.875em;margin-left:1.2em;text-indent:-1.2em;margin-bottom:.2em}
+#footnotes .footnote a:first-of-type{font-weight:bold;text-decoration:none}
+#footnotes .footnote:last-of-type{margin-bottom:0}
+#content #footnotes{margin-top:-.625em;margin-bottom:0;padding:.75em 0}
+.gist .file-data>table{border:0;background:#fff;width:100%;margin-bottom:0}
+.gist .file-data>table td.line-data{width:99%}
+div.unbreakable{page-break-inside:avoid}
+.big{font-size:larger}
+.small{font-size:smaller}
+.underline{text-decoration:underline}
+.overline{text-decoration:overline}
+.line-through{text-decoration:line-through}
+.aqua{color:#00bfbf}
+.aqua-background{background-color:#00fafa}
+.black{color:#000}
+.black-background{background-color:#000}
+.blue{color:#0000bf}
+.blue-background{background-color:#0000fa}
+.fuchsia{color:#bf00bf}
+.fuchsia-background{background-color:#fa00fa}
+.gray{color:#606060}
+.gray-background{background-color:#7d7d7d}
+.green{color:#006000}
+.green-background{background-color:#007d00}
+.lime{color:#00bf00}
+.lime-background{background-color:#00fa00}
+.maroon{color:#600000}
+.maroon-background{background-color:#7d0000}
+.navy{color:#000060}
+.navy-background{background-color:#00007d}
+.olive{color:#606000}
+.olive-background{background-color:#7d7d00}
+.purple{color:#600060}
+.purple-background{background-color:#7d007d}
+.red{color:#bf0000}
+.red-background{background-color:#fa0000}
+.silver{color:#909090}
+.silver-background{background-color:#bcbcbc}
+.teal{color:#006060}
+.teal-background{background-color:#007d7d}
+.white{color:#bfbfbf}
+.white-background{background-color:#fafafa}
+.yellow{color:#bfbf00}
+.yellow-background{background-color:#fafa00}
+span.icon>.fa{cursor:default}
+.admonitionblock td.icon [class^="fa icon-"]{font-size:2.5em;text-shadow:1px 1px 2px rgba(0,0,0,.5);cursor:default}
+.admonitionblock td.icon .icon-note:before{content:"\f05a";color:#19407c}
+.admonitionblock td.icon .icon-tip:before{content:"\f0eb";text-shadow:1px 1px 2px rgba(155,155,0,.8);color:#111}
+.admonitionblock td.icon .icon-warning:before{content:"\f071";color:#bf6900}
+.admonitionblock td.icon .icon-caution:before{content:"\f06d";color:#bf3400}
+.admonitionblock td.icon .icon-important:before{content:"\f06a";color:#bf0000}
+.conum[data-value]{display:inline-block;color:#fff!important;background-color:rgba(0,0,0,.8);-webkit-border-radius:100px;border-radius:100px;text-align:center;font-size:.75em;width:1.67em;height:1.67em;line-height:1.67em;font-family:"Open Sans","DejaVu Sans",sans-serif;font-style:normal;font-weight:bold}
+.conum[data-value] *{color:#fff!important}
+.conum[data-value]+b{display:none}
+.conum[data-value]:after{content:attr(data-value)}
+pre .conum[data-value]{position:relative;top:-.125em}
+b.conum *{color:inherit!important}
+.conum:not([data-value]):empty{display:none}
+h1,h2{letter-spacing:-.01em}
+dt,th.tableblock,td.content{text-rendering:optimizeLegibility}
+p,td.content{letter-spacing:-.01em}
+p strong,td.content strong{letter-spacing:-.005em}
+p,blockquote,dt,td.content{font-size:1.0625rem}
+p{margin-bottom:1.25rem}
+.sidebarblock p,.sidebarblock dt,.sidebarblock td.content,p.tableblock{font-size:1em}
+.exampleblock>.content{background-color:#fffef7;border-color:#e0e0dc;-webkit-box-shadow:0 1px 4px #e0e0dc;box-shadow:0 1px 4px #e0e0dc}
+.print-only{display:none!important}
+@media print{@page{margin:1.25cm .75cm}
+*{-webkit-box-shadow:none!important;box-shadow:none!important;text-shadow:none!important}
+a{color:inherit!important;text-decoration:underline!important}
+a.bare,a[href^="#"],a[href^="mailto:"]{text-decoration:none!important}
+a[href^="http:"]:not(.bare):after,a[href^="https:"]:not(.bare):after{content:"(" attr(href) ")";display:inline-block;font-size:.875em;padding-left:.25em}
+abbr[title]:after{content:" (" attr(title) ")"}
+pre,blockquote,tr,img{page-break-inside:avoid}
+thead{display:table-header-group}
+img{max-width:100%!important}
+p,blockquote,dt,td.content{font-size:1em;orphans:3;widows:3}
+h2,h3,#toctitle,.sidebarblock>.content>.title{page-break-after:avoid}
+#toc,.sidebarblock,.exampleblock>.content{background:none!important}
+#toc{border-bottom:1px solid #ddddd8!important;padding-bottom:0!important}
+.sect1{padding-bottom:0!important}
+.sect1+.sect1{border:0!important}
+#header>h1:first-child{margin-top:1.25rem}
+body.book #header{text-align:center}
+body.book #header>h1:first-child{border:0!important;margin:2.5em 0 1em 0}
+body.book #header .details{border:0!important;display:block;padding:0!important}
+body.book #header .details span:first-child{margin-left:0!important}
+body.book #header .details br{display:block}
+body.book #header .details br+span:before{content:none!important}
+body.book #toc{border:0!important;text-align:left!important;padding:0!important;margin:0!important}
+body.book #toc,body.book #preamble,body.book h1.sect0,body.book .sect1>h2{page-break-before:always}
+.listingblock code[data-lang]:before{display:block}
+#footer{background:none!important;padding:0 .9375em}
+#footer-text{color:rgba(0,0,0,.6)!important;font-size:.9em}
+.hide-on-print{display:none!important}
+.print-only{display:block!important}
+.hide-for-print{display:none!important}
+.show-for-print{display:inherit!important}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/92aa9dc8/src/main/asciidoc/hbase_apis.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/hbase_apis.adoc b/src/main/asciidoc/hbase_apis.adoc
new file mode 100644
index 0000000..0e20ac5
--- /dev/null
+++ b/src/main/asciidoc/hbase_apis.adoc
@@ -0,0 +1,138 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+[[hbase_apis]]
+= Apache HBase APIs
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+:docinfo1:
+
+This chapter provides information about performing operations using HBase native APIs.
+This information is not exhaustive, and provides a quick reference in addition to the link:http://hbase.apache.org/apidocs/index.html[User API
+    Reference].
+The examples here are not comprehensive or complete, and should be used for purposes of illustration only.
+
+Apache HBase also works with multiple external APIs.
+See <<external_apis,external apis>>    for more information.
+
+== Examples
+
+.Create a Table Using Java
+====
+This example has been tested on HBase 0.96.1.1.
+
+[source,java]
+----
+
+package com.example.hbase.admin;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+import org.apache.hadoop.conf.Configuration;
+
+import static com.example.hbase.Constants.*;
+
+public class CreateSchema {
+
+ public static void createOrOverwrite(HBaseAdmin admin, HTableDescriptor table) throws IOException {
+    if (admin.tableExists(table.getName())) {
+      admin.disableTable(table.getName());
+      admin.deleteTable(table.getName());
+    }
+    admin.createTable(table);
+  }
+
+  public static void createSchemaTables (Configuration config) {
+    try {
+      final HBaseAdmin admin = new HBaseAdmin(config);
+      HTableDescriptor table = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
+      table.addFamily(new HColumnDescriptor(CF_DEFAULT).setCompressionType(Algorithm.SNAPPY));
+
+      System.out.print("Creating table. ");
+      createOrOverwrite(admin, table);
+      System.out.println(" Done.");
+
+      admin.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      System.exit(-1);
+    }
+  }
+
+
+}
+----
+====
+
+.Add, Modify, and Delete a Table
+====
+This example has been tested on HBase 0.96.1.1.
+
+[source,java]
+----
+
+public static void upgradeFrom0 (Configuration config) {
+
+    try {
+      final HBaseAdmin admin = new HBaseAdmin(config);
+      TableName tableName = TableName.valueOf(TABLE_ASSETMETA);
+      HTableDescriptor table_assetmeta = new HTableDescriptor(tableName);
+      table_assetmeta.addFamily(new HColumnDescriptor(CF_DEFAULT).setCompressionType(Algorithm.SNAPPY));
+
+      // Create a new table.
+
+      System.out.print("Creating table_assetmeta. ");
+      admin.createTable(table_assetmeta);
+      System.out.println(" Done.");
+
+      // Update existing table
+      HColumnDescriptor newColumn = new HColumnDescriptor("NEWCF");
+      newColumn.setCompactionCompressionType(Algorithm.GZ);
+      newColumn.setMaxVersions(HConstants.ALL_VERSIONS);
+      admin.addColumn(tableName, newColumn);
+
+      // Disable an existing table
+      admin.disableTable(tableName);
+
+      // Delete an existing column family
+      admin.deleteColumn(tableName, CF_DEFAULT);
+
+      // Delete a table (Need to be disabled first)
+      admin.deleteTable(tableName);
+
+
+      admin.close();
+    } catch (Exception e) {
+      e.printStackTrace();
+      System.exit(-1);
+    }
+  }
+----
+====

http://git-wip-us.apache.org/repos/asf/hbase/blob/92aa9dc8/src/main/asciidoc/hbase_history.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/hbase_history.adoc b/src/main/asciidoc/hbase_history.adoc
new file mode 100644
index 0000000..4011fb3
--- /dev/null
+++ b/src/main/asciidoc/hbase_history.adoc
@@ -0,0 +1,38 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+[appendix]
+[[hbase.history]]
+== HBase History
+
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+:docinfo1:
+
+* 2006:  link:http://research.google.com/archive/bigtable.html[BigTable] paper published by Google. 
+* 2006 (end of year):  HBase development starts. 
+* 2008:  HBase becomes Hadoop sub-project. 
+* 2010:  HBase becomes Apache top-level project. 
+
+:numbered:

http://git-wip-us.apache.org/repos/asf/hbase/blob/92aa9dc8/src/main/asciidoc/hbck_in_depth.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/hbck_in_depth.adoc b/src/main/asciidoc/hbck_in_depth.adoc
new file mode 100644
index 0000000..c5de5fa
--- /dev/null
+++ b/src/main/asciidoc/hbck_in_depth.adoc
@@ -0,0 +1,213 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+[appendix]
+[[hbck.in.depth]]
+== hbck In Depth
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+:docinfo1:
+
+HBaseFsck (hbck) is a tool for checking for region consistency and table integrity problems and repairing a corrupted HBase.
+It works in two basic modes -- a read-only inconsistency identifying mode and a multi-phase read-write repair mode. 
+
+=== Running hbck to identify inconsistencies
+
+To check to see if your HBase cluster has corruptions, run hbck against your HBase cluster:
+
+[source,bourne]
+----
+
+$ ./bin/hbase hbck
+----
+
+At the end of the commands output it prints OK or tells you the number of INCONSISTENCIES present.
+You may also want to run run hbck a few times because some inconsistencies can be transient (e.g.
+cluster is starting up or a region is splitting). Operationally you may want to run hbck regularly and setup alert (e.g.
+via nagios) if it repeatedly reports inconsistencies . A run of hbck will report a list of inconsistencies along with a brief description of the regions and tables affected.
+The using the [code]+-details+ option will report more details including a representative listing of all the splits present in all the tables. 
+
+[source,bourne]
+----
+
+$ ./bin/hbase hbck -details
+----
+
+If you just want to know if some tables are corrupted, you can limit hbck to identify inconsistencies in only specific tables.
+For example the following command would only attempt to check table TableFoo and TableBar.
+The benefit is that hbck will run in less time.
+
+[source,bourne]
+----
+
+$ ./bin/hbase hbck TableFoo TableBar
+----
+
+=== Inconsistencies
+
+If after several runs, inconsistencies continue to be reported, you may have encountered a corruption.
+These should be rare, but in the event they occur newer versions of HBase include the hbck tool enabled with automatic repair options. 
+
+There are two invariants that when violated create inconsistencies in HBase: 
+
+* HBase's region consistency invariant is satisfied if every region is assigned and deployed on exactly one region server, and all places where this state kept is in accordance.
+* HBase's table integrity invariant is satisfied if for each table, every possible row key resolves to exactly one region.
+
+Repairs generally work in three phases -- a read-only information gathering phase that identifies inconsistencies, a table integrity repair phase that restores the table integrity invariant, and then finally a region consistency repair phase that restores the region consistency invariant.
+Starting from version 0.90.0, hbck could detect region consistency problems report on a subset of possible table integrity problems.
+It also included the ability to automatically fix the most common inconsistency, region assignment and deployment consistency problems.
+This repair could be done by using the [code]+-fix+ command line option.
+These problems close regions if they are open on the wrong server or on multiple region servers and also assigns regions to region servers if they are not open. 
+
+Starting from HBase versions 0.90.7, 0.92.2 and 0.94.0, several new command line options are introduced to aid repairing a corrupted HBase.
+This hbck sometimes goes by the nickname ``uberhbck''. Each particular version of uber hbck is compatible with the HBase's of the same major version (0.90.7 uberhbck can repair a 0.90.4). However, versions <=0.90.6 and versions <=0.92.1 may require restarting the master or failing over to a backup master. 
+
+=== Localized repairs
+
+When repairing a corrupted HBase, it is best to repair the lowest risk inconsistencies first.
+These are generally region consistency repairs -- localized single region repairs, that only modify in-memory data, ephemeral zookeeper data, or patch holes in the META table.
+Region consistency requires that the HBase instance has the state of the region's data in HDFS (.regioninfo files), the region's row in the hbase:meta table., and region's deployment/assignments on region servers and the master in accordance.
+Options for repairing region consistency include: 
+
+* [code]+-fixAssignments+ (equivalent to the 0.90 [code]+-fix+ option) repairs unassigned, incorrectly assigned or multiply assigned regions.
+* [code]+-fixMeta+ which removes meta rows when corresponding regions are not present in HDFS and adds new meta rows if they regions are present in HDFS while not in META.                To fix deployment and assignment problems you can run this command: 
+
+[source,bourne]
+----
+
+$ ./bin/hbase hbck -fixAssignments
+----
+
+To fix deployment and assignment problems as well as repairing incorrect meta rows you can run this command:
+
+[source,bourne]
+----
+
+$ ./bin/hbase hbck -fixAssignments -fixMeta
+----
+
+There are a few classes of table integrity problems that are low risk repairs.
+The first two are degenerate (startkey == endkey) regions and backwards regions (startkey > endkey). These are automatically handled by sidelining the data to a temporary directory (/hbck/xxxx). The third low-risk class is hdfs region holes.
+This can be repaired by using the:
+
+* [code]+-fixHdfsHoles+ option for fabricating new empty regions on the file system.
+  If holes are detected you can use -fixHdfsHoles and should include -fixMeta and -fixAssignments to make the new region consistent.
+
+[source,bourne]
+----
+
+$ ./bin/hbase hbck -fixAssignments -fixMeta -fixHdfsHoles
+----
+
+Since this is a common operation, we've added a the [code]+-repairHoles+ flag that is equivalent to the previous command:
+
+[source,bourne]
+----
+
+$ ./bin/hbase hbck -repairHoles
+----
+
+If inconsistencies still remain after these steps, you most likely have table integrity problems related to orphaned or overlapping regions.
+
+=== Region Overlap Repairs
+
+Table integrity problems can require repairs that deal with overlaps.
+This is a riskier operation because it requires modifications to the file system, requires some decision making, and may require some manual steps.
+For these repairs it is best to analyze the output of a [code]+hbck -details+                run so that you isolate repairs attempts only upon problems the checks identify.
+Because this is riskier, there are safeguard that should be used to limit the scope of the repairs.
+WARNING: This is a relatively new and have only been tested on online but idle HBase instances (no reads/writes). Use at your own risk in an active production environment! The options for repairing table integrity violations include:
+
+* [code]+-fixHdfsOrphans+ option for ``adopting'' a region directory that is missing a region metadata file (the .regioninfo file).
+* [code]+-fixHdfsOverlaps+ ability for fixing overlapping regions
+
+When repairing overlapping regions, a region's data can be modified on the file system in two ways: 1) by merging regions into a larger region or 2) by sidelining regions by moving data to ``sideline'' directory where data could be restored later.
+Merging a large number of regions is technically correct but could result in an extremely large region that requires series of costly compactions and splitting operations.
+In these cases, it is probably better to sideline the regions that overlap with the most other regions (likely the largest ranges) so that merges can happen on a more reasonable scale.
+Since these sidelined regions are already laid out in HBase's native directory and HFile format, they can be restored by using HBase's bulk load mechanism.
+The default safeguard thresholds are conservative.
+These options let you override the default thresholds and to enable the large region sidelining feature.
+
+* [code]+-maxMerge <n>+ maximum number of overlapping regions to merge
+* [code]+-sidelineBigOverlaps+ if more than maxMerge regions are overlapping, sideline attempt to sideline the regions overlapping with the most other regions.
+* [code]+-maxOverlapsToSideline <n>+ if sidelining large overlapping regions, sideline at most n regions.
+
+Since often times you would just want to get the tables repaired, you can use this option to turn on all repair options:
+
+* [code]+-repair+ includes all the region consistency options and only the hole repairing table integrity options.
+
+Finally, there are safeguards to limit repairs to only specific tables.
+For example the following command would only attempt to check and repair table TableFoo and TableBar.
+
+----
+
+$ ./bin/hbase hbck -repair TableFoo TableBar
+----
+
+==== Special cases: Meta is not properly assigned
+
+There are a few special cases that hbck can handle as well.
+Sometimes the meta table's only region is inconsistently assigned or deployed.
+In this case there is a special [code]+-fixMetaOnly+ option that can try to fix meta assignments.
+
+----
+
+$ ./bin/hbase hbck -fixMetaOnly -fixAssignments
+----
+
+==== Special cases: HBase version file is missing
+
+HBase's data on the file system requires a version file in order to start.
+If this flie is missing, you can use the [code]+-fixVersionFile+ option to fabricating a new HBase version file.
+This assumes that the version of hbck you are running is the appropriate version for the HBase cluster.
+
+==== Special case: Root and META are corrupt.
+
+The most drastic corruption scenario is the case where the ROOT or META is corrupted and HBase will not start.
+In this case you can use the OfflineMetaRepair tool create new ROOT and META regions and tables.
+This tool assumes that HBase is offline.
+It then marches through the existing HBase home directory, loads as much information from region metadata files (.regioninfo files) as possible from the file system.
+If the region metadata has proper table integrity, it sidelines the original root and meta table directories, and builds new ones with pointers to the region directories and their data.
+
+----
+
+$ ./bin/hbase org.apache.hadoop.hbase.util.hbck.OfflineMetaRepair
+----
+
+NOTE: This tool is not as clever as uberhbck but can be used to bootstrap repairs that uberhbck can complete.
+If the tool succeeds you should be able to start hbase and run online repairs if necessary.
+
+==== Special cases: Offline split parent
+
+Once a region is split, the offline parent will be cleaned up automatically.
+Sometimes, daughter regions are split again before their parents are cleaned up.
+HBase can clean up parents in the right order.
+However, there could be some lingering offline split parents sometimes.
+They are in META, in HDFS, and not deployed.
+But HBase can't clean them up.
+In this case, you can use the [code]+-fixSplitParents+ option to reset them in META to be online and not split.
+Therefore, hbck can merge them with other regions if fixing overlapping regions option is used. 
+
+This option should not normally be used, and it is not in [code]+-fixAll+. 
+
+:numbered:

http://git-wip-us.apache.org/repos/asf/hbase/blob/92aa9dc8/src/main/asciidoc/mapreduce.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/mapreduce.adoc b/src/main/asciidoc/mapreduce.adoc
new file mode 100644
index 0000000..fd9e7b6
--- /dev/null
+++ b/src/main/asciidoc/mapreduce.adoc
@@ -0,0 +1,589 @@
+////
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+////
+
+[[mapreduce]]
+= HBase and MapReduce
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+:docinfo1:
+
+Apache MapReduce is a software framework used to analyze large amounts of data, and is the framework used most often with link:http://hadoop.apache.org/[Apache Hadoop].
+MapReduce itself is out of the scope of this document.
+A good place to get started with MapReduce is link:http://hadoop.apache.org/docs/r1.2.1/mapred_tutorial.html.
+MapReduce version 2 (MR2)is now part of link:http://hadoop.apache.org/docs/r2.3.0/hadoop-yarn/hadoop-yarn-site/[YARN]. 
+
+This chapter discusses specific configuration steps you need to take to use MapReduce on data within HBase.
+In addition, it discusses other interactions and issues between HBase and MapReduce jobs. 
+
+.mapred and mapreduce
+[NOTE]
+====
+There are two mapreduce packages in HBase as in MapReduce itself: [path]_org.apache.hadoop.hbase.mapred_      and [path]_org.apache.hadoop.hbase.mapreduce_.
+The former does old-style API and the latter the new style.
+The latter has more facility though you can usually find an equivalent in the older package.
+Pick the package that goes with your mapreduce deploy.
+When in doubt or starting over, pick the [path]_org.apache.hadoop.hbase.mapreduce_.
+In the notes below, we refer to o.a.h.h.mapreduce but replace with the o.a.h.h.mapred if that is what you are using. 
+====  
+
+[[hbase.mapreduce.classpath]]
+== HBase, MapReduce, and the CLASSPATH
+
+By default, MapReduce jobs deployed to a MapReduce cluster do not have access to either the HBase configuration under [var]+$HBASE_CONF_DIR+ or the HBase classes.
+
+To give the MapReduce jobs the access they need, you could add [path]_hbase-site.xml_ to the [path]_$HADOOP_HOME/conf/_ directory and add the HBase JARs to the [path]_HADOOP_HOME/conf/_        directory, then copy these changes across your cluster.
+You could add hbase-site.xml to $HADOOP_HOME/conf and add HBase jars to the $HADOOP_HOME/lib.
+You would then need to copy these changes across your cluster or edit [path]_$HADOOP_HOMEconf/hadoop-env.sh_ and add them to the [var]+HADOOP_CLASSPATH+ variable.
+However, this approach is not recommended because it will pollute your Hadoop install with HBase references.
+It also requires you to restart the Hadoop cluster before Hadoop can use the HBase data.
+
+Since HBase 0.90.x, HBase adds its dependency JARs to the job configuration itself.
+The dependencies only need to be available on the local CLASSPATH.
+The following example runs the bundled HBase link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter]        MapReduce job against a table named [systemitem]+usertable+ If you have not set the environment variables expected in the command (the parts prefixed by a [literal]+$+ sign and curly braces), you can use the actual system paths instead.
+Be sure to use the correct version of the HBase JAR for your system.
+The backticks ([literal]+`+ symbols) cause ths shell to execute the sub-commands, setting the CLASSPATH as part of the command.
+This example assumes you use a BASH-compatible shell. 
+
+----
+$ HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/hbase-server-VERSION.jar rowcounter usertable
+----
+
+When the command runs, internally, the HBase JAR finds the dependencies it needs for zookeeper, guava, and its other dependencies on the passed [var]+HADOOP_CLASSPATH+        and adds the JARs to the MapReduce job configuration.
+See the source at TableMapReduceUtil#addDependencyJars(org.apache.hadoop.mapreduce.Job) for how this is done. 
+
+[NOTE]
+====
+The example may not work if you are running HBase from its build directory rather than an installed location.
+You may see an error like the following:
+
+----
+java.lang.RuntimeException: java.lang.ClassNotFoundException: org.apache.hadoop.hbase.mapreduce.RowCounter$RowCounterMapper
+----
+
+If this occurs, try modifying the command as follows, so that it uses the HBase JARs from the [path]_target/_ directory within the build environment.
+
+----
+$ HADOOP_CLASSPATH=${HBASE_HOME}/hbase-server/target/hbase-server-VERSION-SNAPSHOT.jar:`${HBASE_HOME}/bin/hbase classpath` ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/hbase-server/target/hbase-server-VERSION-SNAPSHOT.jar rowcounter usertable
+----
+====
+
+.Notice to Mapreduce users of HBase 0.96.1 and above
+[CAUTION]
+====
+Some mapreduce jobs that use HBase fail to launch.
+The symptom is an exception similar to the following:
+
+----
+
+Exception in thread "main" java.lang.IllegalAccessError: class
+    com.google.protobuf.ZeroCopyLiteralByteString cannot access its superclass
+    com.google.protobuf.LiteralByteString
+    at java.lang.ClassLoader.defineClass1(Native Method)
+    at java.lang.ClassLoader.defineClass(ClassLoader.java:792)
+    at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
+    at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
+    at java.net.URLClassLoader.access$100(URLClassLoader.java:71)
+    at java.net.URLClassLoader$1.run(URLClassLoader.java:361)
+    at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
+    at java.security.AccessController.doPrivileged(Native Method)
+    at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
+    at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
+    at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
+    at
+    org.apache.hadoop.hbase.protobuf.ProtobufUtil.toScan(ProtobufUtil.java:818)
+    at
+    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.convertScanToString(TableMapReduceUtil.java:433)
+    at
+    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initTableMapperJob(TableMapReduceUtil.java:186)
+    at
+    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initTableMapperJob(TableMapReduceUtil.java:147)
+    at
+    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initTableMapperJob(TableMapReduceUtil.java:270)
+    at
+    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initTableMapperJob(TableMapReduceUtil.java:100)
+...
+----
+
+This is caused by an optimization introduced in link:https://issues.apache.org/jira/browse/HBASE-9867[HBASE-9867] that inadvertently introduced a classloader dependency. 
+
+This affects both jobs using the [code]+-libjars+ option and "fat jar," those which package their runtime dependencies in a nested [code]+lib+ folder.
+
+In order to satisfy the new classloader requirements, hbase-protocol.jar must be included in Hadoop's classpath.
+See <<hbase.mapreduce.classpath,hbase.mapreduce.classpath>> for current recommendations for resolving classpath errors.
+The following is included for historical purposes.
+
+This can be resolved system-wide by including a reference to the hbase-protocol.jar in hadoop's lib directory, via a symlink or by copying the jar into the new location.
+
+This can also be achieved on a per-job launch basis by including it in the [code]+HADOOP_CLASSPATH+ environment variable at job submission time.
+When launching jobs that package their dependencies, all three of the following job launching commands satisfy this requirement:
+
+----
+
+$ HADOOP_CLASSPATH=/path/to/hbase-protocol.jar:/path/to/hbase/conf hadoop jar MyJob.jar MyJobMainClass
+$ HADOOP_CLASSPATH=$(hbase mapredcp):/path/to/hbase/conf hadoop jar MyJob.jar MyJobMainClass
+$ HADOOP_CLASSPATH=$(hbase classpath) hadoop jar MyJob.jar MyJobMainClass
+----
+
+For jars that do not package their dependencies, the following command structure is necessary:
+
+----
+
+$ HADOOP_CLASSPATH=$(hbase mapredcp):/etc/hbase/conf hadoop jar MyApp.jar MyJobMainClass -libjars $(hbase mapredcp | tr ':' ',') ...
+----
+
+See also link:https://issues.apache.org/jira/browse/HBASE-10304[HBASE-10304] for further discussion of this issue.
+====
+
+== MapReduce Scan Caching
+
+TableMapReduceUtil now restores the option to set scanner caching (the number of rows which are cached before returning the result to the client) on the Scan object that is passed in.
+This functionality was lost due to a bug in HBase 0.95 (link:https://issues.apache.org/jira/browse/HBASE-11558[HBASE-11558]), which is fixed for HBase 0.98.5 and 0.96.3.
+The priority order for choosing the scanner caching is as follows:
+
+. Caching settings which are set on the scan object.
+. Caching settings which are specified via the configuration option +hbase.client.scanner.caching+, which can either be set manually in [path]_hbase-site.xml_ or via the helper method [code]+TableMapReduceUtil.setScannerCaching()+.
+. The default value [code]+HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING+, which is set to [literal]+100+.
+
+Optimizing the caching settings is a balance between the time the client waits for a result and the number of sets of results the client needs to receive.
+If the caching setting is too large, the client could end up waiting for a long time or the request could even time out.
+If the setting is too small, the scan needs to return results in several pieces.
+If you think of the scan as a shovel, a bigger cache setting is analogous to a bigger shovel, and a smaller cache setting is equivalent to more shoveling in order to fill the bucket.
+
+The list of priorities mentioned above allows you to set a reasonable default, and override it for specific operations.
+
+See the API documentation for link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html[Scan] for more details.
+
+== Bundled HBase MapReduce Jobs
+
+The HBase JAR also serves as a Driver for some bundled mapreduce jobs.
+To learn about the bundled MapReduce jobs, run the following command.
+
+----
+$ ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/hbase-server-VERSION.jar
+An example program must be given as the first argument.
+Valid program names are:
+  copytable: Export a table from local cluster to peer cluster
+  completebulkload: Complete a bulk data load.
+  export: Write table data to HDFS.
+  import: Import data written by Export.
+  importtsv: Import data in TSV format.
+  rowcounter: Count rows in HBase table
+----
+
+Each of the valid program names are bundled MapReduce jobs.
+To run one of the jobs, model your command after the following example.
+
+----
+$ ${HADOOP_HOME}/bin/hadoop jar ${HBASE_HOME}/hbase-server-VERSION.jar rowcounter myTable
+----
+
+== HBase as a MapReduce Job Data Source and Data Sink
+
+HBase can be used as a data source, link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html[TableInputFormat], and data sink, link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.html[TableOutputFormat]        or link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.html[MultiTableOutputFormat], for MapReduce jobs.
+Writing MapReduce jobs that read or write HBase, it is advisable to subclass link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapper.html[TableMapper]        and/or link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableReducer.html[TableReducer].
+See the do-nothing pass-through classes link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.html[IdentityTableMapper]        and link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.html[IdentityTableReducer]        for basic usage.
+For a more involved example, see link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter]        or review the [code]+org.apache.hadoop.hbase.mapreduce.TestTableMapReduce+ unit test. 
+
+If you run MapReduce jobs that use HBase as source or sink, need to specify source and sink table and column names in your configuration.
+
+When you read from HBase, the [code]+TableInputFormat+ requests the list of regions from HBase and makes a map, which is either a [code]+map-per-region+ or [code]+mapreduce.job.maps+ map, whichever is smaller.
+If your job only has two maps, raise [code]+mapreduce.job.maps+ to a number greater than the number of regions.
+Maps will run on the adjacent TaskTracker if you are running a TaskTracer and RegionServer per node.
+When writing to HBase, it may make sense to avoid the Reduce step and write back into HBase from within your map.
+This approach works when your job does not need the sort and collation that MapReduce does on the map-emitted data.
+On insert, HBase 'sorts' so there is no point double-sorting (and shuffling data around your MapReduce cluster) unless you need to.
+If you do not need the Reduce, you myour map might emit counts of records processed for reporting at the end of the jobj, or set the number of Reduces to zero and use TableOutputFormat.
+If running the Reduce step makes sense in your case, you should typically use multiple reducers so that load is spread across the HBase cluster.
+
+A new HBase partitioner, the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.html[HRegionPartitioner], can run as many reducers the number of existing regions.
+The HRegionPartitioner is suitable when your table is large and your upload will not greatly alter the number of existing regions upon completion.
+Otherwise use the default partitioner. 
+
+== Writing HFiles Directly During Bulk Import
+
+If you are importing into a new table, you can bypass the HBase API and write your content directly to the filesystem, formatted into HBase data files (HFiles). Your import will run faster, perhaps an order of magnitude faster.
+For more on how this mechanism works, see <<arch.bulk.load,arch.bulk.load>>.
+
+== RowCounter Example
+
+The included link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter]        MapReduce job uses [code]+TableInputFormat+ and does a count of all rows in the specified table.
+To run it, use the following command: 
+
+----
+$ ./bin/hadoop jar hbase-X.X.X.jar
+----
+
+This will invoke the HBase MapReduce Driver class.
+Select [literal]+rowcounter+ from the choice of jobs offered.
+This will print rowcouner usage advice to standard output.
+Specify the tablename, column to count, and output directory.
+If you have classpath errors, see <<hbase.mapreduce.classpath,hbase.mapreduce.classpath>>.
+
+[[splitter]]
+== Map-Task Splitting
+
+[[splitter.default]]
+=== The Default HBase MapReduce Splitter
+
+When link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html[TableInputFormat]          is used to source an HBase table in a MapReduce job, its splitter will make a map task for each region of the table.
+Thus, if there are 100 regions in the table, there will be 100 map-tasks for the job - regardless of how many column families are selected in the Scan.
+
+[[splitter.custom]]
+=== Custom Splitters
+
+For those interested in implementing custom splitters, see the method [code]+getSplits+ in link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.html[TableInputFormatBase].
+That is where the logic for map-task assignment resides. 
+
+[[mapreduce.example]]
+== HBase MapReduce Examples
+
+[[mapreduce.example.read]]
+=== HBase MapReduce Read Example
+
+The following is an example of using HBase as a MapReduce source in read-only manner.
+Specifically, there is a Mapper instance but no Reducer, and nothing is being emitted from the Mapper.
+There job would be defined as follows...
+
+[source,java]
+----
+
+Configuration config = HBaseConfiguration.create();
+Job job = new Job(config, "ExampleRead");
+job.setJarByClass(MyReadJob.class);     // class that contains mapper
+
+Scan scan = new Scan();
+scan.setCaching(500);        // 1 is the default in Scan, which will be bad for MapReduce jobs
+scan.setCacheBlocks(false);  // don't set to true for MR jobs
+// set other scan attrs
+...
+
+TableMapReduceUtil.initTableMapperJob(
+  tableName,        // input HBase table name
+  scan,             // Scan instance to control CF and attribute selection
+  MyMapper.class,   // mapper
+  null,             // mapper output key
+  null,             // mapper output value
+  job);
+job.setOutputFormatClass(NullOutputFormat.class);   // because we aren't emitting anything from mapper
+
+boolean b = job.waitForCompletion(true);
+if (!b) {
+  throw new IOException("error with job!");
+}
+----
+
+...and the mapper instance would extend link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapper.html[TableMapper]...
+
+[source,java]
+----
+
+public static class MyMapper extends TableMapper<Text, Text> {
+
+  public void map(ImmutableBytesWritable row, Result value, Context context) throws InterruptedException, IOException {
+    // process data for the row from the Result instance.
+   }
+}
+----
+
+[[mapreduce.example.readwrite]]
+=== HBase MapReduce Read/Write Example
+
+The following is an example of using HBase both as a source and as a sink with MapReduce.
+This example will simply copy data from one table to another.
+
+[source,java]
+----
+
+Configuration config = HBaseConfiguration.create();
+Job job = new Job(config,"ExampleReadWrite");
+job.setJarByClass(MyReadWriteJob.class);    // class that contains mapper
+
+Scan scan = new Scan();
+scan.setCaching(500);        // 1 is the default in Scan, which will be bad for MapReduce jobs
+scan.setCacheBlocks(false);  // don't set to true for MR jobs
+// set other scan attrs
+
+TableMapReduceUtil.initTableMapperJob(
+	sourceTable,      // input table
+	scan,	          // Scan instance to control CF and attribute selection
+	MyMapper.class,   // mapper class
+	null,	          // mapper output key
+	null,	          // mapper output value
+	job);
+TableMapReduceUtil.initTableReducerJob(
+	targetTable,      // output table
+	null,             // reducer class
+	job);
+job.setNumReduceTasks(0);
+
+boolean b = job.waitForCompletion(true);
+if (!b) {
+    throw new IOException("error with job!");
+}
+----
+
+An explanation is required of what [class]+TableMapReduceUtil+ is doing, especially with the reducer. link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.html[TableOutputFormat]          is being used as the outputFormat class, and several parameters are being set on the config (e.g., TableOutputFormat.OUTPUT_TABLE), as well as setting the reducer output key to [class]+ImmutableBytesWritable+ and reducer value to [class]+Writable+.
+These could be set by the programmer on the job and conf, but [class]+TableMapReduceUtil+ tries to make things easier.
+
+The following is the example mapper, which will create a [class]+Put+          and matching the input [class]+Result+ and emit it.
+Note: this is what the CopyTable utility does. 
+
+[source,java]
+----
+
+public static class MyMapper extends TableMapper<ImmutableBytesWritable, Put>  {
+
+	public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException, InterruptedException {
+		// this example is just copying the data from the source table...
+   		context.write(row, resultToPut(row,value));
+   	}
+
+  	private static Put resultToPut(ImmutableBytesWritable key, Result result) throws IOException {
+  		Put put = new Put(key.get());
+ 		for (KeyValue kv : result.raw()) {
+			put.add(kv);
+		}
+		return put;
+   	}
+}
+----
+
+There isn't actually a reducer step, so [class]+TableOutputFormat+ takes care of sending the [class]+Put+ to the target table. 
+
+This is just an example, developers could choose not to use [class]+TableOutputFormat+ and connect to the target table themselves. 
+
+[[mapreduce.example.readwrite.multi]]
+=== HBase MapReduce Read/Write Example With Multi-Table Output
+
+TODO: example for [class]+MultiTableOutputFormat+. 
+
+[[mapreduce.example.summary]]
+=== HBase MapReduce Summary to HBase Example
+
+The following example uses HBase as a MapReduce source and sink with a summarization step.
+This example will count the number of distinct instances of a value in a table and write those summarized counts in another table. 
+
+[source,java]
+----
+Configuration config = HBaseConfiguration.create();
+Job job = new Job(config,"ExampleSummary");
+job.setJarByClass(MySummaryJob.class);     // class that contains mapper and reducer
+
+Scan scan = new Scan();
+scan.setCaching(500);        // 1 is the default in Scan, which will be bad for MapReduce jobs
+scan.setCacheBlocks(false);  // don't set to true for MR jobs
+// set other scan attrs
+
+TableMapReduceUtil.initTableMapperJob(
+	sourceTable,        // input table
+	scan,               // Scan instance to control CF and attribute selection
+	MyMapper.class,     // mapper class
+	Text.class,         // mapper output key
+	IntWritable.class,  // mapper output value
+	job);
+TableMapReduceUtil.initTableReducerJob(
+	targetTable,        // output table
+	MyTableReducer.class,    // reducer class
+	job);
+job.setNumReduceTasks(1);   // at least one, adjust as required
+
+boolean b = job.waitForCompletion(true);
+if (!b) {
+	throw new IOException("error with job!");
+}
+----          
+
+In this example mapper a column with a String-value is chosen as the value to summarize upon.
+This value is used as the key to emit from the mapper, and an [class]+IntWritable+ represents an instance counter. 
+
+[source,java]
+----
+public static class MyMapper extends TableMapper<Text, IntWritable>  {
+	public static final byte[] CF = "cf".getBytes();
+	public static final byte[] ATTR1 = "attr1".getBytes();
+
+	private final IntWritable ONE = new IntWritable(1);
+   	private Text text = new Text();
+
+   	public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException, InterruptedException {
+        	String val = new String(value.getValue(CF, ATTR1));
+          	text.set(val);     // we can only emit Writables...
+
+        	context.write(text, ONE);
+   	}
+}
+----          
+
+In the reducer, the "ones" are counted (just like any other MR example that does this), and then emits a [class]+Put+. 
+
+[source,java]
+----
+public static class MyTableReducer extends TableReducer<Text, IntWritable, ImmutableBytesWritable>  {
+	public static final byte[] CF = "cf".getBytes();
+	public static final byte[] COUNT = "count".getBytes();
+
+ 	public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
+    		int i = 0;
+    		for (IntWritable val : values) {
+    			i += val.get();
+    		}
+    		Put put = new Put(Bytes.toBytes(key.toString()));
+    		put.add(CF, COUNT, Bytes.toBytes(i));
+
+    		context.write(null, put);
+   	}
+}
+----        
+
+[[mapreduce.example.summary.file]]
+=== HBase MapReduce Summary to File Example
+
+This very similar to the summary example above, with exception that this is using HBase as a MapReduce source but HDFS as the sink.
+The differences are in the job setup and in the reducer.
+The mapper remains the same. 
+
+[source,java]
+----
+Configuration config = HBaseConfiguration.create();
+Job job = new Job(config,"ExampleSummaryToFile");
+job.setJarByClass(MySummaryFileJob.class);     // class that contains mapper and reducer
+
+Scan scan = new Scan();
+scan.setCaching(500);        // 1 is the default in Scan, which will be bad for MapReduce jobs
+scan.setCacheBlocks(false);  // don't set to true for MR jobs
+// set other scan attrs
+
+TableMapReduceUtil.initTableMapperJob(
+	sourceTable,        // input table
+	scan,               // Scan instance to control CF and attribute selection
+	MyMapper.class,     // mapper class
+	Text.class,         // mapper output key
+	IntWritable.class,  // mapper output value
+	job);
+job.setReducerClass(MyReducer.class);    // reducer class
+job.setNumReduceTasks(1);    // at least one, adjust as required
+FileOutputFormat.setOutputPath(job, new Path("/tmp/mr/mySummaryFile"));  // adjust directories as required
+
+boolean b = job.waitForCompletion(true);
+if (!b) {
+	throw new IOException("error with job!");
+}
+----
+
+As stated above, the previous Mapper can run unchanged with this example.
+As for the Reducer, it is a "generic" Reducer instead of extending TableMapper and emitting Puts.
+
+[source,java]
+----
+public static class MyReducer extends Reducer<Text, IntWritable, Text, IntWritable>  {
+
+	public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
+		int i = 0;
+		for (IntWritable val : values) {
+			i += val.get();
+		}
+		context.write(key, new IntWritable(i));
+	}
+}
+----
+
+[[mapreduce.example.summary.noreducer]]
+=== HBase MapReduce Summary to HBase Without Reducer
+
+It is also possible to perform summaries without a reducer - if you use HBase as the reducer. 
+
+An HBase target table would need to exist for the job summary.
+The Table method [code]+incrementColumnValue+ would be used to atomically increment values.
+From a performance perspective, it might make sense to keep a Map of values with their values to be incremeneted for each map-task, and make one update per key at during the [code]+
+            cleanup+ method of the mapper.
+However, your milage may vary depending on the number of rows to be processed and unique keys. 
+
+In the end, the summary results are in HBase. 
+
+[[mapreduce.example.summary.rdbms]]
+=== HBase MapReduce Summary to RDBMS
+
+Sometimes it is more appropriate to generate summaries to an RDBMS.
+For these cases, it is possible to generate summaries directly to an RDBMS via a custom reducer.
+The [code]+setup+ method can connect to an RDBMS (the connection information can be passed via custom parameters in the context) and the cleanup method can close the connection. 
+
+It is critical to understand that number of reducers for the job affects the summarization implementation, and you'll have to design this into your reducer.
+Specifically, whether it is designed to run as a singleton (one reducer) or multiple reducers.
+Neither is right or wrong, it depends on your use-case.
+Recognize that the more reducers that are assigned to the job, the more simultaneous connections to the RDBMS will be created - this will scale, but only to a point. 
+
+[source,java]
+----
+
+ public static class MyRdbmsReducer extends Reducer<Text, IntWritable, Text, IntWritable>  {
+
+	private Connection c = null;
+
+	public void setup(Context context) {
+  		// create DB connection...
+  	}
+
+	public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
+		// do summarization
+		// in this example the keys are Text, but this is just an example
+	}
+
+	public void cleanup(Context context) {
+  		// close db connection
+  	}
+
+}
+----
+
+In the end, the summary results are written to your RDBMS table/s. 
+
+[[mapreduce.htable.access]]
+== Accessing Other HBase Tables in a MapReduce Job
+
+Although the framework currently allows one HBase table as input to a MapReduce job, other HBase tables can be accessed as lookup tables, etc., in a MapReduce job via creating an Table instance in the setup method of the Mapper. 
+[source,java]
+----
+public class MyMapper extends TableMapper<Text, LongWritable> {
+  private Table myOtherTable;
+
+  public void setup(Context context) {
+    // In here create a Connection to the cluster and save it or use the Connection
+    // from the existing table
+    myOtherTable = connection.getTable("myOtherTable");
+  }
+
+  public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException, InterruptedException {
+	// process Result...
+	// use 'myOtherTable' for lookups
+  }
+----      
+
+[[mapreduce.specex]]
+== Speculative Execution
+
+It is generally advisable to turn off speculative execution for MapReduce jobs that use HBase as a source.
+This can either be done on a per-Job basis through properties, on on the entire cluster.
+Especially for longer running jobs, speculative execution will create duplicate map-tasks which will double-write your data to HBase; this is probably not what you want. 
+
+See <<spec.ex,spec.ex>> for more information. 


Mime
View raw message