hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From hashut...@apache.org
Subject hive git commit: HIVE-15515 : Remove the docs directory (Akira Ajisaka via Ashutosh Chauhan)
Date Thu, 30 Mar 2017 22:11:24 GMT
Repository: hive
Updated Branches:
  refs/heads/master dcb2831d9 -> db983d6c6


HIVE-15515 : Remove the docs directory (Akira Ajisaka via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <hashutosh@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/db983d6c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/db983d6c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/db983d6c

Branch: refs/heads/master
Commit: db983d6c6e082e142f6945a00e7b043fdfad6c9d
Parents: dcb2831
Author: Akira Ajisaka <aajisaka@apache.org>
Authored: Tue Mar 7 23:28:00 2017 -0800
Committer: Ashutosh Chauhan <hashutosh@apache.org>
Committed: Thu Mar 30 15:10:36 2017 -0700

----------------------------------------------------------------------
 docs/changes/ChangesFancyStyle.css              | 170 ----------
 docs/changes/ChangesSimpleStyle.css             |  49 ---
 docs/changes/changes2html.pl                    | 282 -----------------
 docs/site.css                                   | 305 ------------------
 docs/stylesheets/project.xml                    |  41 ---
 docs/stylesheets/site.vsl                       | 317 -------------------
 docs/velocity.properties                        |  17 -
 docs/xdocs/index.xml                            |  38 ---
 docs/xdocs/language_manual/cli.xml              | 208 ------------
 .../data-manipulation-statements.xml            | 234 --------------
 docs/xdocs/language_manual/joins.xml            | 212 -------------
 docs/xdocs/language_manual/var_substitution.xml | 130 --------
 .../working_with_bucketed_tables.xml            |  87 -----
 docs/xdocs/udf/reflect.xml                      |  51 ---
 packaging/src/main/assembly/src.xml             |   1 -
 pom.xml                                         |   1 -
 16 files changed, 2143 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/changes/ChangesFancyStyle.css
----------------------------------------------------------------------
diff --git a/docs/changes/ChangesFancyStyle.css b/docs/changes/ChangesFancyStyle.css
deleted file mode 100644
index 5eef241..0000000
--- a/docs/changes/ChangesFancyStyle.css
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * General
- */
-
-img { border: 0; }
-
-#content table {
-  border: 0;
-  width: 100%;
-}
-/*Hack to get IE to render the table at 100%*/
-* html #content table { margin-left: -3px; }
-
-#content th,
-#content td {
-  margin: 0;
-  padding: 0;
-  vertical-align: top;
-}
-
-.clearboth {
-  clear: both;
-}
-
-.note, .warning, .fixme {
-  border: solid black 1px;
-  margin: 1em 3em;
-}
-
-.note .label {
-  background: #369;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.note .content {
-  background: #F0F0FF;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.warning .label {
-  background: #C00;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.warning .content {
-  background: #FFF0F0;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.fixme .label {
-  background: #C6C600;
-  color: black;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.fixme .content {
-  padding: 5px 10px;
-}
-
-/**
- * Typography
- */
-
-body {
-  font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif;
-  font-size: 100%;
-}
-
-#content {
-  font-family: Georgia, Palatino, Times, serif;
-  font-size: 95%;
-}
-#tabs {
-  font-size: 70%;
-}
-#menu {
-  font-size: 80%;
-}
-#footer {
-  font-size: 70%;
-}
-
-h1, h2, h3, h4, h5, h6 {
-  font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif;
-  font-weight: bold;
-  margin-top: 1em;
-  margin-bottom: .5em;
-}
-
-h1 {
-    margin-top: 0;
-    margin-bottom: 1em;
-  font-size: 1.4em;
-  background-color: 73CAFF
-}
-#content h1 {
-  font-size: 160%;
-  margin-bottom: .5em;
-}
-#menu h1 {
-  margin: 0;
-  padding: 10px;
-  background: #336699;
-  color: white;
-}
-h2 { 
-  font-size: 120%;
-  background-color: 73CAFF
-}
-h3 { font-size: 100%; }
-h4 { font-size: 90%; }
-h5 { font-size: 80%; }
-h6 { font-size: 75%; }
-
-p {
-  line-height: 120%;
-  text-align: left;
-  margin-top: .5em;
-  margin-bottom: 1em;
-}
-
-#content li,
-#content th,
-#content td,
-#content li ul,
-#content li ol{
-  margin-top: .5em;
-  margin-bottom: .5em;
-}
-
-
-#content li li,
-#minitoc-area li{
-  margin-top: 0em;
-  margin-bottom: 0em;
-}
-
-#content .attribution {
-  text-align: right;
-  font-style: italic;
-  font-size: 85%;
-  margin-top: 1em;
-}
-
-.codefrag {
-  font-family: "Courier New", Courier, monospace;
-  font-size: 110%;
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/changes/ChangesSimpleStyle.css
----------------------------------------------------------------------
diff --git a/docs/changes/ChangesSimpleStyle.css b/docs/changes/ChangesSimpleStyle.css
deleted file mode 100644
index 407d0f1..0000000
--- a/docs/changes/ChangesSimpleStyle.css
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-body {
-  font-family: Courier New, monospace;
-  font-size: 10pt;
-}
-
-h1 {
-  font-family: Courier New, monospace;
-  font-size: 10pt;
-}
-
-h2 {
-  font-family: Courier New, monospace;
-  font-size: 10pt; 
-}
-
-h3 {
-  font-family: Courier New, monospace;
-  font-size: 10pt; 
-}
-
-a:link {
-  color: blue;
-}
-
-a:visited {
-  color: purple; 
-}
-
-li {
-  margin-top: 1em;
-  margin-bottom: 1em;
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/changes/changes2html.pl
----------------------------------------------------------------------
diff --git a/docs/changes/changes2html.pl b/docs/changes/changes2html.pl
deleted file mode 100644
index 03f0bbb..0000000
--- a/docs/changes/changes2html.pl
+++ /dev/null
@@ -1,282 +0,0 @@
-#!/usr/bin/perl
-#
-# Transforms Lucene Java's CHANGES.txt into Changes.html
-#
-# Input is on STDIN, output is to STDOUT
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-use strict;
-use warnings;
-
-my $jira_url_prefix = 'http://issues.apache.org/jira/browse/';
-my $title = undef;
-my $release = undef;
-my $sections = undef;
-my $items = undef;
-my $first_relid = undef;
-my $second_relid = undef;
-my @releases = ();
-
-my @lines = <>;                        # Get all input at once
-
-#
-# Parse input and build hierarchical release structure in @releases
-#
-for (my $line_num = 0 ; $line_num <= $#lines ; ++$line_num) {
-  $_ = $lines[$line_num];
-  next unless (/\S/);                  # Skip blank lines
-
-  unless ($title) {
-    if (/\S/) {
-      s/^\s+//;                        # Trim leading whitespace
-      s/\s+$//;                        # Trim trailing whitespace
-    }
-    $title = $_;
-    next;
-  }
-
-  if (/^(Release)|(Trunk)/) {   # Release headings
-    $release = $_;
-    $sections = [];
-    push @releases, [ $release, $sections ];
-    ($first_relid = lc($release)) =~ s/\s+/_/g   if ($#releases == 0);
-    ($second_relid = lc($release)) =~ s/\s+/_/g  if ($#releases == 1);
-    $items = undef;
-    next;
-  }
-
-  # Section heading: 2 leading spaces, words all capitalized
-  if (/^  ([A-Z]+)\s*/) {
-    my $heading = $_;
-    $items = [];
-    push @$sections, [ $heading, $items ];
-    next;
-  }
-
-  # Handle earlier releases without sections - create a headless section
-  unless ($items) {
-    $items = [];
-    push @$sections, [ undef, $items ];
-  }
-
-  my $type;
-  if (@$items) { # A list item has been encountered in this section before
-    $type = $items->[0];  # 0th position of items array is list type
-  } else {
-    $type = get_list_type($_);
-    push @$items, $type;
-  }
-
-  if ($type eq 'numbered') { # The modern items list style
-    # List item boundary is another numbered item or an unindented line
-    my $line;
-    my $item = $_;
-    $item =~ s/^(\s{0,2}\d+\.\s*)//;       # Trim the leading item number
-    my $leading_ws_width = length($1);
-    $item =~ s/\s+$//;                     # Trim trailing whitespace
-    $item .= "\n";
-
-    while ($line_num < $#lines
-           and ($line = $lines[++$line_num]) !~ /^(?:\s{0,2}\d+\.\s*\S|\S)/) {
-      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
-      $line =~ s/\s+$//;                   # Trim trailing whitespace
-      $item .= "$line\n";
-    }
-    $item =~ s/\n+\Z/\n/;                  # Trim trailing blank lines
-    push @$items, $item;
-    --$line_num unless ($line_num == $#lines);
-  } elsif ($type eq 'paragraph') {         # List item boundary is a blank line
-    my $line;
-    my $item = $_;
-    $item =~ s/^(\s+)//;
-    my $leading_ws_width = defined($1) ? length($1) : 0;
-    $item =~ s/\s+$//;                     # Trim trailing whitespace
-    $item .= "\n";
-
-    while ($line_num < $#lines and ($line = $lines[++$line_num]) =~ /\S/) {
-      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
-      $line =~ s/\s+$//;                   # Trim trailing whitespace
-      $item .= "$line\n";
-    }
-    push @$items, $item;
-    --$line_num unless ($line_num == $#lines);
-  } else { # $type is one of the bulleted types
-    # List item boundary is another bullet or a blank line
-    my $line;
-    my $item = $_;
-    $item =~ s/^(\s*$type\s*)//;           # Trim the leading bullet
-    my $leading_ws_width = length($1);
-    $item =~ s/\s+$//;                     # Trim trailing whitespace
-    $item .= "\n";
-
-    while ($line_num < $#lines
-           and ($line = $lines[++$line_num]) !~ /^\s*(?:$type|\Z)/) {
-      $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace
-      $line =~ s/\s+$//;                   # Trim trailing whitespace
-      $item .= "$line\n";
-    }
-    push @$items, $item;
-    --$line_num unless ($line_num == $#lines);
-  }
-}
-
-#
-# Print HTML-ified version to STDOUT
-#
-print<<"__HTML_HEADER__";
-<!--
-**********************************************************
-** WARNING: This file is generated from CHANGES.txt by the 
-**          Perl script 'changes2html.pl'.
-**          Do *not* edit this file!
-**********************************************************
-          
-****************************************************************************
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-****************************************************************************
--->
-<html>
-<head>
-  <title>$title</title>
-  <link rel="stylesheet" href="ChangesFancyStyle.css" title="Fancy">
-  <link rel="alternate stylesheet" href="ChangesSimpleStyle.css" title="Simple">
-  <META http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
-  <SCRIPT>
-    function toggleList(e) {
-      element = document.getElementById(e).style;
-      element.display == 'none' ? element.display = 'block' : element.display='none';
-    }
-    function collapse() {
-      for (var i = 0; i < document.getElementsByTagName("ul").length; i++) {
-        var list = document.getElementsByTagName("ul")[i];
-        if (list.id != '$first_relid' && list.id != '$second_relid') {
-          list.style.display = "none";
-        }
-      }
-      for (var i = 0; i < document.getElementsByTagName("ol").length; i++) {
-        document.getElementsByTagName("ol")[i].style.display = "none"; 
-      }
-    }
-    window.onload = collapse;
-  </SCRIPT>
-</head>
-<body>
-
-<a href="http://hadoop.apache.org/hive/"><img class="logoImage" alt="Hive" src="images/hive-logo.jpg" title="SQL and Data Warehousing Platform on Hadoop"></a>
-<h1>$title</h1>
-
-__HTML_HEADER__
-
-my $heading;
-my $relcnt = 0;
-my $header = 'h2';
-for my $rel (@releases) {
-  if (++$relcnt == 3) {
-    $header = 'h3';
-    print "<h2><a href=\"javascript:toggleList('older')\">";
-    print "Older Releases";
-    print "</a></h2>\n";
-    print "<ul id=\"older\">\n"
-  }
-      
-  ($release, $sections) = @$rel;
-
-  # The first section heading is undefined for the older sectionless releases
-  my $has_release_sections = $sections->[0][0];
-
-  (my $relid = lc($release)) =~ s/\s+/_/g;
-  print "<$header><a href=\"javascript:toggleList('$relid')\">";
-  print "$release";
-  print "</a></$header>\n";
-  print "<ul id=\"$relid\">\n"
-    if ($has_release_sections);
-
-  for my $section (@$sections) {
-    ($heading, $items) = @$section;
-    (my $sectid = lc($heading)) =~ s/\s+/_/g;
-    my $numItemsStr = $#{$items} > 0 ? "($#{$items})" : "(none)";  
-
-    print "  <li><a href=\"javascript:toggleList('$relid.$sectid')\">",
-          ($heading || ''), "</a>&nbsp;&nbsp;&nbsp;$numItemsStr\n"
-      if ($has_release_sections);
-
-    my $list_type = $items->[0] || '';
-    my $list = ($has_release_sections || $list_type eq 'numbered' ? 'ol' : 'ul');
-    my $listid = $sectid ? "$relid.$sectid" : $relid;
-    print "    <$list id=\"$listid\">\n";
-
-    for my $itemnum (1..$#{$items}) {
-      my $item = $items->[$itemnum];
-      $item =~ s:&:&amp;:g;                            # Escape HTML metachars
-      $item =~ s:<:&lt;:g; 
-      $item =~ s:>:&gt;:g;
-
-      $item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:;       # Separate attribution
-      $item =~ s:\n{2,}:\n<p/>\n:g;                    # Keep paragraph breaks
-      $item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)}  # Link to JIRA
-                {<a href="${jira_url_prefix}$1">$1</a>}g;
-      print "      <li>$item</li>\n";
-    }
-    print "    </$list>\n";
-    print "  </li>\n" if ($has_release_sections);
-  }
-  print "</ul>\n" if ($has_release_sections);
-}
-print "</ul>\n" if ($relcnt > 3);
-print "</body>\n</html>\n";
-
-
-#
-# Subroutine: get_list_type
-#
-# Takes one parameter:
-#
-#    - The first line of a sub-section/point
-#
-# Returns one scalar:
-#
-#    - The list type: 'numbered'; or one of the bulleted types '-', or '.' or
-#      'paragraph'.
-#
-sub get_list_type {
-  my $first_list_item_line = shift;
-  my $type = 'paragraph'; # Default to paragraph type
-
-  if ($first_list_item_line =~ /^\s{0,2}\d+\.\s+\S+/) {
-    $type = 'numbered';
-  } elsif ($first_list_item_line =~ /^\s*([-.])\s+\S+/) {
-    $type = $1;
-  }
-  return $type;
-}
-
-1;

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/site.css
----------------------------------------------------------------------
diff --git a/docs/site.css b/docs/site.css
deleted file mode 100644
index 49ca65a..0000000
--- a/docs/site.css
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.    
- */
-
-
-/** defined standard tags **/
-body {
-	background-color: #ffffff;
-	color: #000000;
-}
-
-a:link, a:active, a:visited {
-    color: #525D76;
-}
-
-
-h1 {
-	background-color: #525D76;
-	color: #ffffff;
-	font-family: arial,helvetica,sanserif;
-	font-size: large;
-	padding-left:2px;
-}
-
-h2 {
-	background-color: #828DA6;
-	color: #ffffff;
-	font-family: arial,helvetica,sanserif;
-	font-size: medium;
-	padding-left:2px;
-}
-
-table {
-	border: none;
-	border-spacing:0px;
-	border-collapse: collapse;
-}
-
-img {
-	border: none 0px;
-}
-
-/** define layout **/
-
-/** table used to force footer to end of page **/
-table#layout {
-	width:100%;
-}
-
-table#layout td {
-	padding:0px;
-}
-
-div#container {
-	width: 95%;
-	margin: 10px;
-	margin-left: 0;
-	margin-right: auto;
-	padding: 10px;
-}
-
-div#header {
-	padding: 5px;
-	margin: 0px;
-	margin-top:5px;
-	margin-bottom:5px;
-	height:80px;
-	border-bottom: 1px solid #333333;
-}
-
-div#menu {
-	float: left;
-	width: 200px;
-	margin: 0;
-	margin-left: 0px;
-	margin-right: 5px;
-
-	/** little higher margin since it doesn't start with a header **/
-	margin-top:10px;
-	margin-bottom:0px;
-
-	padding: 5px;
-}
-
-div#body {
-	margin-right:0px;
-	margin-left: 215px;
-	margin-top:5px;
-	margin-bottom:0px;
-
-	padding: 5px;
-
-}
-
-div#footer {
-
-	clear: both;
-
-	padding-top:15px;
-	margin-top:25px;
-	border-top: 1px solid #333333;
-
-
-	text-align:center;
-	color: #525D76;
-	font-style: italic;
-	font-size: smaller;
-}
-
-div#logo1 {
-	float:left;
-	margin-left:5px;
-	margin-top:10px;
-}
-
-
-div#logo2 {
-	float:right;
-	margin-top:10px;
-}
-
-
-/** define body tag redefinitions **/
-
-
-div#body th {
-	background-color: #039acc;
-	color: #000000;
-	font-family: arial,helvetica,sanserif;
-	font-size: smaller;
-	vertical-align: top;
-	text-align:left;
-	border:1px #FFFFFF solid;
-	padding: 2px;
-}
-
-div#body td {
-	background-color: #a0ddf0;
-	color: #000000;
-	font-family: arial,helvetica,sanserif;
-	font-size: smaller;
-	vertical-align: top;
-	text-align:left;
-	border:1px #FFFFFF solid;
-	padding: 2px;
-}
-
-
-div#body li {
-	 margin-top:3px;
-}
-
-/** define other body styles **/
-
-div.section {
-	margin-left: 25px;
-}
-
-div.subsection {
-	margin-left: 25px;
-}
-
-div.source {
-	margin-left:25px;
-	margin-top:20px;
-	margin-bottom:20px;
-	padding-left:4px;
-	padding-right:4px;
-	padding-bottom:4px;
-	padding-top:5px;
-
-	width:600;
-
-	border: 1px solid #333333;
-	background-color: #EEEEEE;
-	color: #333333;
-
-	/** bug: puts a extra line before the block in IE and after the block in FireFox **/
-	white-space: pre;
-
-	font-family: Courier;
-	font-size: smaller;
-	text-align: left;
-
-	overflow:auto;
-}
-
-
-div.license {
-	margin-left:0px;
-	margin-top:20px;
-	margin-bottom:20px;
-	padding:5px;
-
-	border: 1px solid #333333;
-	background-color: #EEEEEE;
-	color: #333333;
-
-	text-align: left;
-}
-
-/** define menu styles **/
-
-div.menusection {
-	margin-bottom:10px;
-}
-
-.menuheader {
-	font-weight:bold;
-	margin-bottom:0px;
-}
-
-div.menusection ul {
-	margin-top:5px;
-
-}
-div.menusection li {
-
-}
-
-
-
-
-/** printing **/
-@page Section1
-    {
-    size:8.5in 11.0in;
-    margin:1.0in .75in 1.0in .75in;
-}
-
-@media print {
-
-	/** make sure this fits the page **/
-	div#container {
-		width:100%;
-		min-height:0px;
-	}
-
-
-	div#menu {
-		display:none;
-	}
-
-	div#header {
-		display:none;
-	}
-
-	div#body {
-		margin-left:5px;
-	}
-
-
-	div.source {
-		width:95%;
-		margin-left:0px;
-	}
-
-	/** make a bit more room on the page **/
-	div.section {
-		margin-left: 0px;
-	}
-
-	div.subsection {
-		margin-left: 0px;
-	}
-
-	h1 {
-		background-color: #FFFFFF;
-		color: #000000;
-	}
-
-	h2 {
-		background-color: #FFFFFF;
-		color: #000000;
-	}
-
-	div#body td {
-		background-color: #FFFFFF;
-		color: #000000;
-		border: #333333 1px solid;
-	}
-
-	div#body th {
-		background-color: #FFFFFF;
-		color: #000000;
-		border: #333333 1px solid;
-		font-style:bold;
-	}
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/stylesheets/project.xml
----------------------------------------------------------------------
diff --git a/docs/stylesheets/project.xml b/docs/stylesheets/project.xml
deleted file mode 100644
index 60bb75f..0000000
--- a/docs/stylesheets/project.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.    
--->
-
-<project name="Apache Hadoop Hive" href="http://hadoop.apache.org/hive">
-  <title>Hadoop Hive</title>
-  <logo href="images/hive-logo.jpg">Hadoop Hive</logo>
-  <body>
-    <menu name="Hadoop hive">
-      <item name="General"  href="/index.html" />
-    </menu>
-    <menu name="Hive Language Manual">
-      <item name="Data Manipulation Statements" href="/language_manual/data-manipulation-statements.html" />
-      <item name="Joins" href="/language_manual/joins.html" />
-      <item name="Cli" href="/language_manual/cli.html" />
-      <item name="Var Substitution" href="/language_manual/var_substitution.html" />
-    </menu>
-    <menu name="Developer Guide">
-      <item name="Issue Tracking (JIRA)" href="https://issues.apache.org/jira/browse/HIVE"/>
-    </menu>
-    <menu name="User Defined Functions">
-      <item name="reflect" href="/udf/reflect.html" />
-    </menu>
-  </body>
-</project>

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/stylesheets/site.vsl
----------------------------------------------------------------------
diff --git a/docs/stylesheets/site.vsl b/docs/stylesheets/site.vsl
deleted file mode 100644
index 9b23f40..0000000
--- a/docs/stylesheets/site.vsl
+++ /dev/null
@@ -1,317 +0,0 @@
-## Licensed to the Apache Software Foundation (ASF) under one
-## or more contributor license agreements.  See the NOTICE file
-## distributed with this work for additional information
-## regarding copyright ownership.  The ASF licenses this file
-## to you under the Apache License, Version 2.0 (the
-## "License"); you may not use this file except in compliance
-## with the License.  You may obtain a copy of the License at
-##
-##   http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing,
-## software distributed under the License is distributed on an
-## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-## KIND, either express or implied.  See the License for the
-## specific language governing permissions and limitations
-## under the License.    
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.    
--->
-
-<!-- start the processing -->
-#document()
-<!-- end the processing -->
-
-## This is where the macro's live
-
-#macro ( table $table)
-<table>
-    #foreach ( $items in $table.getChildren() )
-        #if ($items.getName().equals("tr"))
-            #tr ($items)
-        #end
-    #end
-</table>
-#end
-
-#macro ( tr $tr)
-<tr>
-    #foreach ( $items in $tr.getChildren() )
-        #if ($items.getName().equals("td"))
-            #td ($items)
-        #elseif ($items.getName().equals("th"))
-            #th ($items)
-        #end
-    #end
-</tr>
-#end
-
-#macro ( td $value)
-#if ($value.getAttributeValue("colspan"))
-#set ($colspan = $value.getAttributeValue("colspan"))
-#end
-#if ($value.getAttributeValue("rowspan"))
-#set ($rowspan = $value.getAttributeValue("rowspan"))
-#end
-<td colspan="$!colspan" rowspan="$!rowspan">
-	#foreach ( $items in $value.getContent() )
-		#if($items.name)
-			#display($items)
-		#else
-			$items.value
-		#end
-	#end
-</td>
-#end
-
-#macro ( th $value)
-#if ($value.getAttributeValue("colspan"))
-#set ($colspan = $value.getAttributeValue("colspan"))
-#end
-#if ($value.getAttributeValue("rowspan"))
-#set ($rowspan = $value.getAttributeValue("rowspan"))
-#end
-<th colspan="$!colspan" rowspan="$!rowspan">
-	#foreach ( $items in $value.getContent() )
-		#if($items.name)
-			#display($items)
-		#else
-			$items.value
-		#end
-	#end
-</th>
-#end
-
-#macro ( projectanchor $name $value )
-#if ($value.startsWith("http://"))
-    <a href="$value">$name</a>
-#elseif ($value.startsWith("https://"))
-    <a href="$value">$name</a>
-#else
-    <a href="$relativePath$value">$name</a>
-#end
-#end
-
-#macro ( metaauthor $author $email )
-            <meta name="author" value="$author">
-            <meta name="email" value="$email">
-#end
-
-#macro ( image $value )
-#if ($value.getAttributeValue("width"))
-#set ($width=$value.getAttributeValue("width"))
-#end
-#if ($value.getAttributeValue("height"))
-#set ($height=$value.getAttributeValue("height"))
-#end
-#if ($value.getAttributeValue("align"))
-#set ($align=$value.getAttributeValue("align"))
-#end
-<img src="$relativePath$value.getAttributeValue("src")" width="$!width" height="$!height" align="$!align">
-#end
-
-#macro ( source $value)
-<div class="source">$escape.getText($value.getText())</pre></div>
-#end
-
-
-## need these to catch special macros within lists
-#macro(list $node)
-<$node.getName()>
-	#foreach ( $items in $node.getChildren() )
-		#listitem($items)
-	#end
-</$node.getName()>
-#end
-
-#macro (listitem $node)
-<$node.getName()>
-## use getContent instead of getChildren
-## to include both text and nodes
-	#foreach ( $items in $node.getContent() )
-		#if($items.name)
-			#display($items)
-		#else
-			$items.value
-		#end
-	#end
-</$node.getName()>
-#end
-
-
-## # displays a basic node, calling macros if appropriate
-#macro ( display $node )
-		#if ($node.getName().equals("img"))
-			#image ($node)
-		#elseif ($node.getName().equals("source"))
-			#source ($node)
-		#elseif ($node.getName().equals("table"))
-			#table ($node)
-		#elseif ($node.getName().equals("ul"))
-			#list ($node)
-		#elseif ($node.getName().equals("ol"))
-			#list ($node)
-		#else
-			$node
-		#end
-#end
-
-#macro ( section $section)
-	<a name="#anchorName($section)"></a>
-	<h1>$section.getAttributeValue("name")</h1>
-
-	<div class="subsection">
-		#foreach ( $items in $section.getChildren() )
-			#if ($items.getName().equals("subsection"))
-				#subsection ($items)
-			#else
-				#display($items)
-			#end
-		#end
-	</div>
-#end
-
-#macro ( subsection $subsection)
-	<a name="#anchorName($subsection)"></a>
-	<h2>$subsection.getAttributeValue("name")</h2>
-	<div class="subsection">
-		#foreach ( $items in $subsection.getChildren() )
-			#display($items)
-		#end
-	</div>
-#end
-
-#macro ( anchorName $section)
-#if ($section.getAttributeValue("href"))
-$section.getAttributeValue("href")##
-#else
-$section.getAttributeValue("name")##
-#end
-#end
-
-#macro ( makeProject )
-
-    <!-- ============================================================ -->
-
-    #set ($menus = $project.getChild("body").getChildren("menu"))
-    #foreach ( $menu in $menus )
-    	<div class="menusection">
-    		<span class="menuheader">$menu.getAttributeValue("name")</span>
-			<ul>
-			#foreach ( $item in $menu.getChildren() )
-				#set ($name = $item.getAttributeValue("name"))
-				<li>#projectanchor($name $item.getAttributeValue("href"))</li>
-			#end
-			</ul>
-        </div>
-    #end
-#end
-
-#macro (getProjectImage)
-
-<div id="logo1">
-	<a href="http://hadoop.apache.org/hive/"><img src="${relativePath}/images/hive-logo.jpg" border="0"/></a>
-</div>
-
-
-#if ($project.getChild("logo"))
-
-<div id="logo2">
-
-#set ( $logoString = $project.getChild("logo").getAttributeValue("href") )
-#if ( $logoString.startsWith("/") )
-<a href="$project.getAttributeValue("href")"><img src="$relativePath$logoString" alt="$project.getChild("logo").getText()" border="0"/></a>
-#else
-<a href="$project.getAttributeValue("href")"><img src="$relativePath/$logoString" alt="$project.getChild("logo").getText()" border="0"/></a>
-#end
-
-</div>
-
-#end
-#end
-
-#macro (printMeta $metaElement)
-<meta #set ($attribs = $metaElement.getAttributes())
-#foreach ($a in $attribs) $a.getName()="$a.getValue()" #end />
-#end
-
-#macro (document)
-    <!-- ====================================================================== -->
-    <!-- GENERATED FILE, DO NOT EDIT, EDIT THE XML FILE IN xdocs INSTEAD! -->
-    <!-- Main Page Section -->
-    <!-- ====================================================================== -->
-    <html>
-        <head>
-            <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>
-
-            #set ($authors = $root.getChild("properties").getChildren("author"))
-            #foreach ( $au in $authors )
-                #metaauthor ( $au.getText() $au.getAttributeValue("email") )
-            #end
-
-           #set ($metas = $root.getChildren("meta"))
-
-            ##    Parse meta directives such as
-            ##    <meta name="keyword" content="apache, velocity, java"/>
-            #foreach ($meta in $metas) #printMeta($meta) #end
-
-            ##    Support for <base> tags.
-            #if ($root.getChild("properties").getChild("base"))
-              #set ($url = $root.getChild("properties").getChild("base").getAttributeValue("href"))
-              <base href="$url"/>
-            #end
-
-            <title>$project.getChild("title").getText() - $root.getChild("properties").getChild("title").getText()</title>
-
-			## use a relative CSS for when the page is displayed locally (will overwrite
-			## previous CSS settings)
-			<link rel="stylesheet" href="${relativePath}/site.css" type="text/css">
-        </head>
-
-        <body>
-
-			## use a table in order to force footer to end of page
-
-			<div id="container">
-
-				<div id="header">
-					#getProjectImage()
-				</div>
-
-				<div id="menu">
-					#makeProject()
-				</div>
-
-				<div id="body">
-					#set ($allSections = $root.getChild("body").getChildren("section"))
-					#foreach ( $section in $allSections )
-						#section ($section)
-					#end
-				</div>
-
-				<div id="footer">
-					Copyright &#169; 1999-2007, <a href="http://www.apache.org/">The Apache Software Foundation</a>.
-				</div>
-
-			</div>
-
-        </body>
-    </html>
-#end

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/velocity.properties
----------------------------------------------------------------------
diff --git a/docs/velocity.properties b/docs/velocity.properties
deleted file mode 100644
index 77ee2de..0000000
--- a/docs/velocity.properties
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-runtime.log=build/docs/velocity.log

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/xdocs/index.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/index.xml b/docs/xdocs/index.xml
deleted file mode 100644
index f1df3fa..0000000
--- a/docs/xdocs/index.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.    
--->
-<document>
-  <properties>
-    <title>Hadoop Hive</title>
-    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
-  </properties>
-  <body>
-    <section name="What is Hive?" href="WhatisHive?">
-      <p>Hive is a data warehouse infrastructure built on top of Hadoop. It provides tools to enable easy data ETL, a mechanism to put structures on the data, and the capability to querying and analysis of large data sets stored in Hadoop files. Hive defines a simple SQL-like query language, called QL, that enables users familiar with SQL to query the data. At the same time, this language also allows programmers who are familiar with the MapReduce fromwork to be able to plug in their custom mappers and reducers to perform more sophisticated analysis that may not be supported by the built-in capabilities of the language.</p>
-
-<p>
-Hive does not mandate read or written data be in the "Hive format"---there is no such thing. Hive works equally well on Thrift, control delimited, or your specialized data formats. Please see File Format and SerDe in Developer Guide for details. </p>
-    </section>
-    <section name="What Hive is NOT" href="WhatHiveIsNot?">
-<p>Hive is based on Hadoop, which is a batch processing system. As a result, Hive does not and cannot promise low latencies on queries. The paradigm here is strictly of submitting jobs and being notified when the jobs are completed as opposed to real-time queries. In contrast to the systems such as Oracle where analysis is run on a significantly smaller amount of data, but the analysis proceeds much more iteratively with the response times between iterations being less than a few minutes, Hive queries response times for even the smallest jobs can be of the order of several minutes. However for larger jobs (e.g., jobs processing terabytes of data) in general they may run into hours.</p>
-
-<p>In summary, low latency performance is not the top-priority of Hive's design principles. What Hive values most are scalability (scale out with more machines added dynamically to the Hadoop cluster), extensibility (with MapReduce framework and UDF/UDAF/UDTF), fault-tolerance, and loose-coupling with its input formats.</p>
-    </section>
-  </body>
-</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/xdocs/language_manual/cli.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/cli.xml b/docs/xdocs/language_manual/cli.xml
deleted file mode 100644
index aaa8e81..0000000
--- a/docs/xdocs/language_manual/cli.xml
+++ /dev/null
@@ -1,208 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
--->
-
-<document>
-
-  <properties>
-    <title>Hadoop Hive- Command Line Interface (CLI)</title>
-    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
-  </properties>
-
-  <body>
-<h3>Hive Cli</h3>
-<section name="Hive Command line Options" href="command_line_options">
-
-<p>Usage:</p>
-
-<source><![CDATA[Usage: hive [-hiveconf x=y]* [<-i filename>]* [<-f filename>|<-e query-string>] [-S]
-
-  -i <filename>             Initialization Sql from file (executed automatically and silently before any other commands)
-  -e 'quoted query string'  Sql from command line
-  -f <filename>             Sql from file
-  -S                        Silent mode in interactive shell where only data is emitted
-  -hiveconf x=y             Use this to set hive/hadoop configuration variables. 
-  
-   -e and -f cannot be specified together. In the absence of these options, interactive shell is started.  However, -i can be used with any other options.
-
-   To see this usage help, run hive -h
-]]></source>
-
-<ul>
-<li>Example of running a Query from the command line
-<source><![CDATA[$HIVE_HOME/bin/hive -e 'select a.col from tab1 a'
-   ]]></source>
-</li>
-
-<li>Example of setting hive configuration variables 
-<source><![CDATA[$HIVE_HOME/bin/hive -e 'select a.col from tab1 a' -hiveconf hive.exec.scratchdir=/home/my/hive_scratch  -hiveconf mapred.reduce.tasks=32
-   ]]></source>
-</li>
-
-<li>Example of dumping data out from a query into a file using silent mode 
-<source><![CDATA[$HIVE_HOME/bin/hive -S -e 'select a.col from tab1 a' > a.txt
-]]></source>
-</li>
-
-<li>Example of running a script non-interactively
-<source><![CDATA[$HIVE_HOME/bin/hive -f /home/my/hive-script.sql
-]]></source>
-</li>
-
-<li>Example of running an initialization script before entering interactive mode 
-<source><![CDATA[$HIVE_HOME/bin/hive -i /home/my/hive-init.sql
-]]></source>
-</li>
-
-</ul>
-</section>
-
-<section name="hiverc file " href="hiverc">
-<p>
-The cli when invoked without the -i option will attempt to load HIVE_HOME/bin/.hiverc and $HOME/.hiverc as initialization files.
-</p>
-</section>
-
-<section name="Hive interactive shell commands" href="hive_interactive_shell_commands">
-When $HIVE_HOME/bin/hive is run without either -e/-f option it enters interactive shell mode.
-
-Use ";" (semicolon) to terminate commands. Comments in scripts can be specified using the "--" prefix. 
-
-<table border="1">
-
-<tr>
-<td><b>Command</b></td>
-<td><b>Description</b></td>
-</tr>
-
-<tr>
-<td>quit</td>
-<td>Use quit or exit to leave the interactive shell.</td>
-</tr>
-
-<tr>
-<td>set key=value</td>
-<td>Use this to set value of particular configuration variable. One thing to note here is that if you misspell the variable name, cli will not show an error.</td>
-</tr>
-
-<tr>
-<td>set</td>
-<td>This will print a list of configuration variables that are overridden by user or hive.</td>
-</tr>
-
-
-<tr>
-<td>set -v </td>
-<td>This will print all hadoop and hive configuration variables.</td>
-</tr>
-
-
-<tr>
-<td>add FILE [file] [file]*</td>
-<td>Adds a file to the list of resources</td>
-</tr>
-
-<tr>
-<td>list FILE</td>
-<td>list all the files added to the distributed cache</td>
-</tr>
-
-<tr>
-<td>list FILE [file]*</td>
-<td>Check if given resources are already added to distributed cache</td>
-</tr>
-
-<tr>
-<td>! [cmd]</td>
-<td>Executes a shell command from the hive shell</td>
-</tr>
-
-<tr>
-<td>dfs [dfs cmd]</td>
-<td>Executes a dfs command from the hive shell</td>
-</tr>
-
-<tr>
-<td>[query]</td>
-<td>Executes a hive query and prints results to standard out</td>
-</tr>
-
-<tr>
-<td>source FILE</td>
-<td>Used to execute a script file inside the CLI.</td>
-</tr>
-
-</table>
-
-Sample Usage:
-
-<source><![CDATA[  hive> set  mapred.reduce.tasks=32;
-  hive> set;
-  hive> select a.* from tab1;
-  hive> !ls;
-  hive> dfs -ls;
-]]></source>
-
-</section>
-
-<section name="Logging" href="logging">
-<p>
-Hive uses log4j for logging. These logs are not emitted to the standard output by default but are instead captured to a log file specified by Hive's log4j properties file. By default Hive will use <i>hive-log4j2.properties</i> in the <i>conf/</i> directory of the hive installation which writes out logs to <i>/tmp/$USER/hive.log</i> and uses the <i>WARN</i> level.
-</p>
-<p>
-It is often desirable to emit the logs to the standard output and/or change the logging level for debugging purposes. These can be done from the command line as follows: </p>
-
-<source><![CDATA[$HIVE_HOME/bin/hive -hiveconf hive.root.logger=INFO,console ]]></source>
-<p>
-<i>hive.root.logger</i> specifies the logging level as well as the log destination. Specifying console as the target sends the logs to the standard error (instead of the log file).
-</p>
-</section>
-
-<section name="Hive Resources" href="Hive Resources">
-<p>
-Hive can manage the addition of resources to a session where those resources need to be made available at query execution time. Any locally accessible file can be added to the session. Once a file is added to a session, hive query can refer to this file by its name (in map/reduce/transform clauses) and this file is available locally at execution time on the entire hadoop cluster. Hive uses Hadoop's Distributed Cache to distribute the added files to all the machines in the cluster at query execution time.</p>
-
-<source><![CDATA[   ADD { FILE[S] | JAR[S] | ARCHIVE[S] } <filepath1> [<filepath2>]*
-   LIST { FILE[S] | JAR[S] | ARCHIVE[S] } [<filepath1> <filepath2> ..]
-   DELETE { FILE[S] | JAR[S] | ARCHIVE[S] } [<filepath1> <filepath2> ..] ]]></source>
-
-<ul>
-<li>FILE resources are just added to the distributed cache. Typically, this might be something like a transform script to be executed.</li>
-<li>JAR resources are also added to the Java classpath. This is required in order to reference objects they contain such as UDF's. </li>
-<li>ARCHIVE resources are automatically unarchived as part of distributing them.  </li>
-</ul>
-
-<p>Example</p>
-
-<source><![CDATA[hive> add FILE /tmp/tt.py;
-hive> list FILES;
-/tmp/tt.py
-hive> from networks a  MAP a.networkid USING 'python tt.py' as nn where a.ds = '2009-01-04' limit  10; ]]></source>
-
-<p>It is not neccessary to add files to the session if the files used in a transform script are already available on all machines in the hadoop cluster using the same path name. For example: </p>
-
-<ul>
-<li>... MAP a.networkid USING 'wc -l' ...: here wc is an executable available on all machines</li>
-<li>... MAP a.networkid USING '/home/nfsserv1/hadoopscripts/tt.py' ...: here tt.py may be accessible via a nfs mount point that's configured identically on all the cluster nodes. </li>
-</ul>
-
-
-</section>
-</body>
-</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/xdocs/language_manual/data-manipulation-statements.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/data-manipulation-statements.xml b/docs/xdocs/language_manual/data-manipulation-statements.xml
deleted file mode 100644
index 214a0dc..0000000
--- a/docs/xdocs/language_manual/data-manipulation-statements.xml
+++ /dev/null
@@ -1,234 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.    
--->
-
-<document>
-
-  <properties>
-    <title>Hadoop Hive- Data Manipulation Statements</title>
-    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
-  </properties>
-
-  <body>
-
-    <section name="Create Table Syntax" href="create_table_syntax">
-
-    <source><![CDATA[
-CREATE [EXTERNAL] TABLE [IF NOT EXISTS] table_name
-  [(col_name data_type [COMMENT col_comment], ...)]
-  [COMMENT table_comment]
-  [PARTITIONED BY (col_name data_type [COMMENT col_comment], ...)]
-  [CLUSTERED BY (col_name, col_name, ...) [SORTED BY (col_name [ASC|DESC], ...)] INTO num_buckets BUCKETS]
-  [ROW FORMAT row_format]
-  [STORED AS file_format]
-  [LOCATION hdfs_path]
-  [TBLPROPERTIES (property_name=property_value, ...)]  
-  [AS select_statement]  
-
-CREATE [EXTERNAL] TABLE [IF NOT EXISTS] table_name
-  LIKE existing_table_name
-  [LOCATION hdfs_path]
-
-data_type
-  : primitive_type
-  | array_type
-  | map_type
-  | struct_type
-
-primitive_type
-  : TINYINT
-  | SMALLINT
-  | INT
-  | BIGINT
-  | BOOLEAN
-  | FLOAT
-  | DOUBLE
-  | STRING
-
-array_type
-  : ARRAY < data_type >
-
-map_type
-  : MAP < primitive_type, data_type >
-
-struct_type
-  : STRUCT < col_name : data_type [COMMENT col_comment], ...>
-
-row_format
-  : DELIMITED [FIELDS TERMINATED BY char] [COLLECTION ITEMS TERMINATED BY char]
-        [MAP KEYS TERMINATED BY char] [LINES TERMINATED BY char]
-  | SERDE serde_name [WITH SERDEPROPERTIES (property_name=property_value, property_name=property_value, ...)]
-
-file_format:
-  : SEQUENCEFILE
-  | TEXTFILE
-  | INPUTFORMAT input_format_classname OUTPUTFORMAT output_format_classname
-]]></source>
- 
-<p>
-CREATE TABLE creates a table with the given name. An error is thrown if a table or view with the same name already exists. You can use IF NOT EXISTS to skip the error.
-</p>
-
-<p>
-The EXTERNAL keyword lets you create a table and provide a LOCATION so that Hive does not use a default location for this table. This comes in handy if you already have data generated. When dropping an EXTERNAL table, data in the table is NOT deleted from the file system.
-</p>
-The LIKE form of CREATE TABLE allows you to copy an existing table definition exactly (without copying its data).
-
-<p>
-You can create tables with custom SerDe or using native SerDe. A native SerDe is used if ROW FORMAT is not specified or ROW FORMAT DELIMITED is specified. You can use the DELIMITED clause to read delimited files. Use the SERDE clause to create a table with custom SerDe. Refer to SerDe section of the User Guide for more information on SerDe.
-</p>
-
-<p>
-You must specify a list of a columns for tables that use a native SerDe. Refer to the Types part of the User Guide for the allowable column types. A list of columns for tables that use a custom SerDe may be specified but Hive will query the SerDe to determine the actual list of columns for this table.
-</p>
-
-<p>
-Use STORED AS TEXTFILE if the data needs to be stored as plain text files. Use STORED AS SEQUENCEFILE if the data needs to be compressed. Please read more about Hive/CompressedStorage if you are planning to keep data compressed in your Hive tables. Use INPUTFORMAT and OUTPUTFORMAT to specify the name of a corresponding InputFormat and OutputFormat class as a string literal, e.g. 'org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat'.
-</p>
-
-<p>
-Partitioned tables can be created using the PARTITIONED BY clause. A table can have one or more partition columns and a separate data directory is created for each distinct value combination in the partition columns. Further, tables or partitions can be bucketed using CLUSTERED BY columns, and data can be sorted within that bucket via SORT BY columns. This can improve performance on certain kinds of queries.
-</p>
-
-<p>
-Table names and column names are case insensitive but SerDe and property names are case sensitive. Table and column comments are string literals (single-quoted). The TBLPROPERTIES clause allows you to tag the table definition with your own metadata key/value pairs.
-</p>
-
-<p>A create table example:</p>
-  <source><![CDATA[CREATE TABLE page_view(viewTime INT, userid BIGINT,
-     page_url STRING, referrer_url STRING,
-     ip STRING COMMENT 'IP Address of the User')
- COMMENT 'This is the page view table'
- PARTITIONED BY(dt STRING, country STRING)
- STORED AS SEQUENCEFILE;]]></source>  
-
- <p>The statement above creates the page_view table with viewTime, userid, page_url, referrer_url, and ip columns (including comments). The table is also partitioned and data is stored in sequence files. The data format in the files is assumed to be field-delimited by ctrl-A and row-delimited by newline.
-  </p>
-
-</section>
-
-<section name="Create Table as Select (CTAS)" href="ctas?">
-
-  <p>
-  Tables can also be created and populated by the results of a query in one create-table-as-select (CTAS) statement. The table created by CTAS is atomic, meaning that the table is not seen by other users until all the query results are populated. So other users will either see the table with the complete results of the query or will not see the table at all.
-  </p>
-
-  <p>
-  There are two parts in CTAS, the SELECT part can be any SELECT statement supported by HiveQL. The CREATE part of the CTAS takes the resulting schema from the SELECT part and creates the target table with other table properties such as the SerDe and storage format. The only restrictions in CTAS is that the target table cannot be a partitioned table (nor can it be an external table).
-  </p> 
-
-  <source><![CDATA[CREATE TABLE page_view(viewTime INT, userid BIGINT,
-     page_url STRING, referrer_url STRING,
-     ip STRING COMMENT 'IP Address of the User')
- COMMENT 'This is the page view table'
- PARTITIONED BY(dt STRING, country STRING)
- STORED AS SEQUENCEFILE;
-]]></source>
-
-</section>
-
-<section name="Using SerDes" href="SerDes">
-
-<p>
-This example CTAS statement creates the target table new_key_value_store with the 
-schema (new_key DOUBLE, key_value_pair STRING) derived from the results of the 
-SELECT statement. If the SELECT statement does not specify column aliases, the 
-column names will be automatically assigned to _col0, _col1, and _col2 etc. 
-In addition, the new target table is created using a specific SerDe and a storage 
-format independent of the source tables in the SELECT statement. 
-</p>
-
-<source><![CDATA[CREATE TABLE new_key_value_store
-   ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"
-   STORED AS RCFile AS
-SELECT (key % 1024) new_key, concat(key, value) key_value_pair
-FROM key_value_store
-SORT BY new_key, key_value_pair;
-]]></source>
-
-<p>
-<b>Being able to select data from one table to another is one of the most
-powerful features of Hive. Hive handles the conversion of the data from the source
-format to the destination format as the query is being executed!</b>
-</p>
-
-</section>
-
-<section name="Bucketed Sorted Table" href="bucketed_sorted_table">
-
-<source><![CDATA[CREATE TABLE page_view(viewTime INT, userid BIGINT,
-     page_url STRING, referrer_url STRING,
-     ip STRING COMMENT 'IP Address of the User')
- COMMENT 'This is the page view table'
- PARTITIONED BY(dt STRING, country STRING)
- CLUSTERED BY(userid) SORTED BY(viewTime) INTO 32 BUCKETS
- ROW FORMAT DELIMITED
-   FIELDS TERMINATED BY '\001'
-   COLLECTION ITEMS TERMINATED BY '\002'
-   MAP KEYS TERMINATED BY '\003'
- STORED AS SEQUENCEFILE;
-]]></source>
-
-<p>In the example above, the page_view table is bucketed (clustered by) userid and within each bucket the data is sorted in increasing order of viewTime. Such an organization allows the user to do efficient sampling on the clustered column - in this case userid. The sorting property allows internal operators to take advantage of the better-known data structure while evaluating queries, also increasing efficiency. MAP KEYS and COLLECTION ITEMS keywords can be used if any of the columns are lists or maps.
-</p>
-
-<p>
-The CLUSTERED BY and SORTED BY creation commands do not affect how data is inserted into a table -- only how it is read. This means that users must be careful to insert data correctly by specifying the number of reducers to be equal to the number of buckets, and using CLUSTER BY and SORT BY commands in their query. See
-<a href="working_with_bucketed_tables.html">Working with Bucketed tables</a> to see how these
-are used. 
-</p>
-
-</section>
-
-<section name="External Tables" href="external_table?">
-
-<p>
-Unless a table is specified as EXTERNAL it will be stored inside a folder specified by the
-configuration property hive.metastore.warehouse.dir.
-EXTERNAL tables points to any hdfs location for its storage. You still have to make sure that the data is format is specified to match the data.
- 
-</p>
-<source><![CDATA[CREATE EXTERNAL TABLE page_view(viewTime INT, userid BIGINT,
-     page_url STRING, referrer_url STRING,
-     ip STRING COMMENT 'IP Address of the User',
-     country STRING COMMENT 'country of origination')
- COMMENT 'This is the staging page view table'
- ROW FORMAT DELIMITED FIELDS TERMINATED BY '\054'
- STORED AS TEXTFILE
- LOCATION '<hdfs_location>';
- ]]></source>
-
-</section>
-
-<section name="Create Table ... Like" href="create_table_like?">
-
-<p>The statement above creates a new empty_key_value_store table whose definition exactly matches the existing key_value_store in all particulars other than table name. The new table contains no rows.
-</p>
-
-<source><![CDATA[CREATE TABLE empty_key_value_store
-LIKE key_value_store;
-]]></source>
-
-</section>
-
-<section name="drop" href="drop">
-<p>Drop it like it is hot</p>
-</section>
-  </body>
-</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/xdocs/language_manual/joins.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/joins.xml b/docs/xdocs/language_manual/joins.xml
deleted file mode 100644
index 190ecd4..0000000
--- a/docs/xdocs/language_manual/joins.xml
+++ /dev/null
@@ -1,212 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.    
--->
-
-<document>
-
-  <properties>
-    <title>Hadoop Hive- Joins</title>
-    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
-  </properties>
-
-  <body>
-
-    <section name="Join Syntax" href="join_syntax">
-
-    <source><![CDATA[join_table:
-    table_reference [INNER] JOIN table_factor [join_condition]
-  | table_reference {LEFT|RIGHT|FULL} [OUTER] JOIN table_reference join_condition
-  | table_reference LEFT SEMI JOIN table_reference join_condition
-
-table_reference:
-    table_factor
-  | join_table
-
-table_factor:
-    tbl_name [alias]
-  | table_subquery alias
-  | ( table_references )
-
-join_condition:
-    ON equality_expression ( AND equality_expression )*
-
-equality_expression: 
-    expression = expression
-]]></source>
-
-<p>
-Only equality joins, outer joins, and left semi joins are supported in Hive. Hive does not support join conditions that are not equality conditions as it is very difficult to express such conditions as a map/reduce job. Also, more than two tables can be joined in Hive. 
-</p>
-
-<b>Allowed Equality Joins</b>
-
-<source><![CDATA[SELECT a.* FROM a JOIN b ON (a.id = b.id) 
-]]></source>
-
-<source><![CDATA[SELECT a.* FROM a JOIN b ON (a.id = b.id AND a.department = b.department)
-]]></source>
-
-<b>Disallowed Joins</b>
-
-<source><![CDATA[SELECT a.* FROM a JOIN b ON (a.id <> b.id)
-]]></source>
-
-<p>Multiple Tables can be joined in the same query</p>
-
-<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key2)
-]]></source>
-
-
-
-<source><![CDATA[
-]]></source>
- 
-</section>
-
-<section name="Join implementation with Map Reduce" href="join_map_reduce">
-
-<p>Hive converts joins over multiple tables into a single map/reduce job if for every table the same column is used in the join clauses. The query below is
-converted into a single map/reduce job as only key1 column for b is involved in the join.</p>
-
-<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key1)]]></source>
-<i>It is very interesting to note that any number of tables can be joined in single map/reduce process as long as they fit the above criteria.</i>
-
-<p>However if the join colums are not the same for all tables the is converted into multiple map/reduce jobs</p>
-
-<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key2)
-]]></source>
-
-<p>In this case the first map/reduce job joins a with b and the results are then joined with c in the second map/reduce job. </p>
-</section>
-
-<section name="Largest Table LAST" href="lagest_table_last">
-
-<p>In every map/reduce stage of the join, the last table in the sequence is streamed through the reducers where as the others are buffered. Therefore, it helps to reduce the memory needed in the reducer for buffering the rows for a particular value of the join key by organizing the tables such that the largest tables appear last in the sequence. e.g. in</p>
-
-<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key1)]]></source>
-
-<p>all the three tables are joined in a single map/reduce job and the values for a particular value of the key for tables a and b are buffered in the memory in the reducers. Then for each row retrieved from c, the join is computed with the buffered rows.</p>
-
-<p>For the query:</p>
-
-<source><![CDATA[SELECT a.val, b.val, c.val FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key2)]]></source>
-
-<p>    * there are two map/reduce jobs involved in computing the join. The first of these joins a with b and buffers the values of a while streaming the values of b in the reducers. The second of one of these jobs buffers the results of the first join while streaming the values of c through the reducers. </p>
-
-</section>
-
-<section name="Streamtable hint" href="stream_table_hint">
-
-<p>In every map/reduce stage of the join, the table to be streamed can be specified via a hint:</p>
-
-<source><![CDATA[SELECT /*+ STREAMTABLE(a) */ a.val, b.val, c.val 
-FROM a JOIN b ON (a.key = b.key1) JOIN c ON (c.key = b.key1)]]></source>
-
-<p>All the three tables are joined in a single map/reduce job and the values for a particular value of the key for tables b and c are buffered in the memory in the reducers. Then for each row retrieved from a, the join is computed with the buffered rows.
-</p>
-
-</section>
-
-<section name="Outer Joins" href="outer_joins">
-
-<p>LEFT, RIGHT, and FULL OUTER joins exist in order to provide more control over ON clauses for which there is no match. For example:</p>
-
-<source><![CDATA[SELECT a.val, b.val FROM a LEFT OUTER JOIN b ON (a.key=b.key)
-]]></source>
-
-<p>The above query will return a row for every row in a. This output row will be a.val,b.val when there is a b.key that equals a.key, and the output row will be a.val,NULL when there is no corresponding b.key. Rows from b which have no corresponding a.key will be dropped. The syntax "FROM a LEFT OUTER JOIN b" must be written on one line in order to understand how it works--a is to the LEFT of b in this query, and so all rows from a are kept; a RIGHT OUTER JOIN will keep all rows from b, and a FULL OUTER JOIN will keep all rows from a and all rows from b. OUTER JOIN semantics should conform to standard SQL specs.
-</p>
-
-<p>Joins occur BEFORE WHERE CLAUSES. So, if you want to restrict the OUTPUT of a join, a requirement should be in the WHERE clause, otherwise it should be in the JOIN clause. A big point of confusion for this issue is partitioned tables</p>
-
-<source><![CDATA[SELECT a.val, b.val FROM a LEFT OUTER JOIN b ON (a.key=b.key)
-  WHERE a.ds='2009-07-07' AND b.ds='2009-07-07']]></source>
-
-<p>will join a on b, producing a list of a.val and b.val. The WHERE clause, however, can also reference other columns of a and b that are in the output of the join, and then filter them out. However, whenever a row from the JOIN has found a key for a and no key for b, all of the columns of b will be NULL, including the ds column. This is to say, you will filter out all rows of join output for which there was no valid b.key, and thus you have outsmarted your LEFT OUTER requirement. In other words, the LEFT OUTER part of the join is irrelevant if you reference any column of b in the WHERE clause. Instead, when OUTER JOINing, use this syntax:</p>
-
-<source><![CDATA[SELECT a.val, b.val FROM a LEFT OUTER JOIN b
-  ON (a.key=b.key AND b.ds='2009-07-07' AND a.ds='2009-07-07')]]></source>
-
-<p>Joins are NOT commutative! Joins are left-associative regardless of whether they are LEFT or RIGHT joins. </p>
-
-<source><![CDATA[SELECT a.val1, a.val2, b.val, c.val
-FROM a
-JOIN b ON (a.key = b.key)
-LEFT OUTER JOIN c ON (a.key = c.key)]]></source>
-
-<p>The above query first joins a on b, throwing away everything in a or b that does not have a corresponding key in the other table. The reduced table is then joined on c. This provides unintuitive results if there is a key that exists in both a and c, but not b: The whole row (including a.val1, a.val2, and a.key) is dropped in the "a JOIN b" step, so when the result of that is joined with c, any row with a c.key that had a corresponding a.key or b.key (but not both) will show up as NULL, NULL, NULL, c.val.</p>
-</section>
-
-<section name="Left Semi Join" href="left_semi_join">
-
-<p>LEFT SEMI JOIN implements the correlated IN/EXISTS subquery semantics in an efficient way. Since Hive currently does not support IN/EXISTS subqueries, you can rewrite your queries using LEFT SEMI JOIN. The restrictions of using LEFT SEMI JOIN is that the right-hand-side table should only be referenced in the join condition (ON-clause), but not in WHERE- or SELECT-clauses etc.</p>
-
-<p>This type of query</p>
-<source><![CDATA[SELECT a.key, a.value
-FROM a 
-WHERE a.key in 
-(SELECT b.key
-FROM B);]]></source>
-
-<p>Can be written as:</p>
-
-<source><![CDATA[SELECT a.key, a.val
-FROM a LEFT SEMI JOIN b on (a.key = b.key)]]></source>
-
-</section>
-
-<section name="Map Side Join" href="map_side_join">
-
-<p>If all but one of the tables being joined are small, the join can be performed as a map only job. The query
-does not need a reducer. For every mapper a,b is read completely. A restriction is that a <b>FULL/RIGHT OUTER JOIN b</b> cannot be performed. </p>
-
-<source><![CDATA[SELECT /*+ MAPJOIN(b) */ a.key, a.value
-FROM a join b on a.key = b.key]]></source>
-
-</section>
-
-<section name="Bucketed Map Join" href="bucket_map_join">
-
-<p>If the tables being joined are bucketized, and the buckets are a multiple of each other, the buckets can be joined with each other. If table A has 8 buckets are table B has 4 buckets, the following join:</p>
-
-<source><![CDATA[SELECT /*+ MAPJOIN(b) */ a.key, a.value
-FROM a join b on a.key = b.key]]></source>
-
-<p>can be done on the mapper only. Instead of fetching B completely for each mapper of A, only the required buckets are fetched. For the query above, the mapper processing bucket 1 for A will only fetch bucket 1 of B. It is not the default behavior, and is governed by the following parameter </p>
-
-<i>set hive.optimize.bucketmapjoin = true</i>
-
-<p>If the tables being joined are sorted and bucketized, and the number of buckets are same, a sort-merge join can be performed. The corresponding buckets are joined with each other at the mapper. If both A and B have 4 buckets</p>
-
-<source><![CDATA[ SELECT /*+ MAPJOIN(b) */ a.key, a.value
-FROM A a join B b on a.key = b.key]]></source>
-
-<p>can be done on the mapper only. The mapper for the bucket for A will traverse the corresponding bucket for B. This is not the default behavior, and the following parameters need to be set:</p>
-
-<source><![CDATA[set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set hive.optimize.bucketmapjoin = true;
-set hive.optimize.bucketmapjoin.sortedmerge = true;]]></source>
-
-</section>
-
-</body>
-
-
-
-</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/xdocs/language_manual/var_substitution.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/var_substitution.xml b/docs/xdocs/language_manual/var_substitution.xml
deleted file mode 100644
index 3f3b04b..0000000
--- a/docs/xdocs/language_manual/var_substitution.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
--->
-
-<document>
-
-  <properties>
-    <title>Hadoop Hive- Variable Substitution</title>
-    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
-  </properties>
-
-  <body>
-<h3>Hive Variable Substitution</h3>
-<section name="Introduction" href="Introduction">
-
-<p>Hive is used for both interactive queries as well as part. The hive variable substitution mechanism was 
-designed to avoid some of the code that was getting baked into the scripting language ontop of hive. For example:</p>
-
-<source><![CDATA[$ a=b
-$ hive -e " describe $a "
-]]></source>
-
-<p>
-are becoming common place. This is frustrating as hive becomes closely coupled with scripting languages. The hive
-startup time of a couple seconds is non-trivial when doing thousands of manipulations multiple hive -e invocations.</p>
-
-<p>
-Hive Variables combine the set capability you know and love with some limited yet powerful (evil laugh) substitution 
-ability. For example:</p>
-
-<source><![CDATA[$ bin/hive -hiveconf a=b -e 'set a; set hiveconf:a; \
-create table if not exists b (col int); describe ${hiveconf:a}'
-]]></source>
-
-<p>Results in:</p>
-<source><![CDATA[Hive history file=/tmp/edward/hive_job_log_edward_201011240906_1463048967.txt
-a=b
-hiveconf:a=b
-OK
-Time taken: 5.913 seconds
-OK
-col	int	
-Time taken: 0.754 seconds
-]]></source>
-
-</section>
-
-<section name="Using variables" href="using_variables">
-
-<p>There are three namespaces for variables hiveconf,system, and env. hiveconf variables are set as normal:</p>
-
-<source><![CDATA[set x=myvalue
-]]></source>
-
-<p>However they are retrieved using</p>
-
-<source><![CDATA[${hiveconf:x}
-]]></source>
- 
-<p>Annotated examples of usage from the test case ql/src/test/queries/clientpositive/set_processor_namespaces.q</p>
-
-<source><![CDATA[set zzz=5;
---  sets zzz=5
-set zzz;
-
-set system:xxx=5;
-set system:xxx;
--- sets a system property xxx to 5
-
-set system:yyy=${system:xxx};
-set system:yyy;
--- sets yyy with value of xxx
-
-set go=${hiveconf:zzz};
-set go;
--- sets go base on value on zzz
-
-set hive.variable.substitute=false;
-set raw=${hiveconf:zzz};
-set raw;
--- disable substitution set a value to the literal
-
-set hive.variable.substitute=true;
-
-EXPLAIN SELECT * FROM src where key=${hiveconf:zzz};
-SELECT * FROM src where key=${hiveconf:zzz};
---use a variable in a query
-
-set a=1;
-set b=a;
-set c=${hiveconf:${hiveconf:b}};
-set c;
---uses nested variables. 
-
-
-set jar=../lib/derby.jar;
-
-add file ${hiveconf:jar};
-list file;
-delete file ${hiveconf:jar};
-list file;
-]]></source>
-</section>
-
-<section name="Disabling" href="disable">
-  <p>Variable substitution is on by default. If this causes an issue with an already existing script disable it.</p>
-
-<source><![CDATA[set hive.variable.substitute=false;
-]]></source>
-
-</section>
-
-</body>
-</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/xdocs/language_manual/working_with_bucketed_tables.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/working_with_bucketed_tables.xml b/docs/xdocs/language_manual/working_with_bucketed_tables.xml
deleted file mode 100644
index de4b599..0000000
--- a/docs/xdocs/language_manual/working_with_bucketed_tables.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
--->
-
-<document>
-
-  <properties>
-    <title>Hadoop Hive- Working with Bucketed Tables</title>
-    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
-  </properties>
-
-  <body>
-
- <section name="Defining Bucketed Tables" href="defining_bucketed_tables?">
-
-<p>
-This is a brief example on creating a populating bucketed tables. Bucketed tables 
-are fantastic in that they allow much more efficient sampling than do non-bucketed 
-tables, and they may later allow for time saving operations such as mapside joins. 
-However, the bucketing specified at table creation is not enforced when the table 
-is written to, and so it is possible for the table's metadata to advertise 
-properties which are not upheld by the table's actual layout. This should obviously 
-be avoided. Here's how to do it right.
-</p>
-<p>First there’s table creation:</p>
-
- <source><![CDATA[CREATE TABLE user_info_bucketed(user_id BIGINT, firstname STRING, lastname STRING)
-COMMENT 'A bucketed copy of user_info'
-PARTITIONED BY(ds STRING)
-CLUSTERED BY(user_id) INTO 256 BUCKETS;
- ]]></source>
-
-<p>notice that we define user_id as the bucket column</p>
-</section>
-
-<section name="Populating Bucketed Tables" href="populating_bucketed_tables?">
-
- <source><![CDATA[set hive.enforce.bucketing = true;  
-FROM user_id
-INSERT OVERWRITE TABLE user_info_bucketed
-PARTITION (ds='2009-02-25')
-SELECT userid, firstname, lastname WHERE ds='2009-02-25';
- ]]></source>
-
-<p>The command <strong>set hive.enforce.bucketing = true;</strong>  allows the 
-correct number of reducers and the cluster by column to be automatically selected 
-based on the table. Otherwise, you would need to set the number of reducers to be 
-the same as the number of buckets with 
-<strong>set mapred.reduce.tasks = 256;</strong> and have a 
-<strong>CLUSTER BY ...</strong> clause in the select.</p>
-
-</section>
-
-<section name="Bucketing Explained" href="bucketing_explained?">
-<p>
-How does Hive distribute the rows across the buckets? In general, the bucket number is determined by the expression hash_function(bucketing_column) mod num_buckets. (There's a '0x7FFFFFFF in there too, but that's not that important). The hash_function depends on the type of the bucketing column. For an int, it's easy, hash_int(i) == i. For example, if user_id were an int, and there were 10 buckets, we would expect all user_id's that end in 0 to be in bucket 1, all user_id's that end in a 1 to be in bucket 2, etc. For other datatypes, it's a little tricky. In particular, the hash of a BIGINT is not the same as the BIGINT. And the hash of a string or a complex datatype will be some number that's derived from the value, but not anything humanly-recognizable. For example, if user_id were a STRING, then the user_id's in bucket 1 would probably not end in 0. In general, distributing rows based on the hash will give you a even distribution in the buckets.
-</p>
-
-</section>
-
-<section name="What can go wrong?" href="bucketing_gone_wrong?">
-<p>
-So, what can go wrong? As long as you 
-<strong>set hive.enforce.bucketing = true</strong>, and use the syntax above, 
-the tables should be populated properly. Things can go wrong if the bucketing 
-column type is different during the insert and on read, or if you manually 
-cluster by a value that's different from the table definition. 
-</p>
-</section>
-</body>
-</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/docs/xdocs/udf/reflect.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/udf/reflect.xml b/docs/xdocs/udf/reflect.xml
deleted file mode 100644
index 435f025..0000000
--- a/docs/xdocs/udf/reflect.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
--->
-
-<document>
-
-  <properties>
-    <title>Hadoop Hive- Reflect User Defined Function</title>
-    <author email="hive-user@hadoop.apache.org">Hadoop Hive Documentation Team</author>
-  </properties>
-
-  <body>
-<section name="Reflect (Generic) UDF" href="reflect">
-
-<p>A java class and method often exists to handle the exact function a user would like to use in hive. Rather
-then having to write a wrapper UDF to call this method, the majority of these methods can be called using reflect udf. Reflect uses 
-java reflection to instantiate and call methods of objects, it can also call static functions. The method must return a primative type
-or a type that hive knows how to serialize.
-</p>
-
-<source><![CDATA[SELECT reflect("java.lang.String", "valueOf", 1),
-       reflect("java.lang.String", "isEmpty"),
-       reflect("java.lang.Math", "max", 2, 3),
-       reflect("java.lang.Math", "min", 2, 3),
-       reflect("java.lang.Math", "round", 2.5),
-       reflect("java.lang.Math", "exp", 1.0),
-       reflect("java.lang.Math", "floor", 1.9)
-FROM src LIMIT 1;
-
-
-1	true	3	2	3	2.7182818284590455	1.0]]></source>
-
-</section>
-</body>
-</document>

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/packaging/src/main/assembly/src.xml
----------------------------------------------------------------------
diff --git a/packaging/src/main/assembly/src.xml b/packaging/src/main/assembly/src.xml
index 0529e90..820d379 100644
--- a/packaging/src/main/assembly/src.xml
+++ b/packaging/src/main/assembly/src.xml
@@ -67,7 +67,6 @@
         <include>contrib/**/*</include>
         <include>data/**/*</include>
         <include>dev-support/**/*</include>
-        <include>docs/**/*</include>
         <include>druid-handler/**/*</include>
         <include>jdbc-handler/**/*</include>
         <include>find-bugs/**/*</include>

http://git-wip-us.apache.org/repos/asf/hive/blob/db983d6c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index a6e5c64..108d019 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1135,7 +1135,6 @@
 	    <exclude>checkstyle/**</exclude>
 	    <exclude>bin/**</exclude>
 	    <exclude>itests/**</exclude>
-	    <exclude>docs/**</exclude>
             <exclude>**/README.md</exclude>
             <exclude>**/*.iml</exclude>
 	    <exclude>**/*.txt</exclude>


Mime
View raw message