diff --git a/docs/changes/ChangesFancyStyle.css b/docs/changes/ChangesFancyStyle.css deleted file mode 100644 index 5eef241..0000000 --- a/docs/changes/ChangesFancyStyle.css +++ /dev/null @@ -1,170 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ -/** - * General - */ - -img { border: 0; } - -#content table { - border: 0; - width: 100%; -} -/*Hack to get IE to render the table at 100%*/ -* html #content table { margin-left: -3px; } - -#content th, -#content td { - margin: 0; - padding: 0; - vertical-align: top; -} - -.clearboth { - clear: both; -} - -.note, .warning, .fixme { - border: solid black 1px; - margin: 1em 3em; -} - -.note .label { - background: #369; - color: white; - font-weight: bold; - padding: 5px 10px; -} -.note .content { - background: #F0F0FF; - color: black; - line-height: 120%; - font-size: 90%; - padding: 5px 10px; -} -.warning .label { - background: #C00; - color: white; - font-weight: bold; - padding: 5px 10px; -} -.warning .content { - background: #FFF0F0; - color: black; - line-height: 120%; - font-size: 90%; - padding: 5px 10px; -} -.fixme .label { - background: #C6C600; - color: black; - font-weight: bold; - padding: 5px 10px; -} -.fixme .content { - padding: 5px 10px; -} - -/** - * Typography - */ - -body { - font-family: verdana, "Trebuchet MS", arial, helvetica, sans-serif; - font-size: 100%; -} - -#content { - font-family: Georgia, Palatino, Times, serif; - font-size: 95%; -} -#tabs { - font-size: 70%; -} -#menu { - font-size: 80%; -} -#footer { - font-size: 70%; -} - -h1, h2, h3, h4, h5, h6 { - font-family: "Trebuchet MS", verdana, arial, helvetica, sans-serif; - font-weight: bold; - margin-top: 1em; - margin-bottom: .5em; -} - -h1 { - margin-top: 0; - margin-bottom: 1em; - font-size: 1.4em; - background-color: 73CAFF -} -#content h1 { - font-size: 160%; - margin-bottom: .5em; -} -#menu h1 { - margin: 0; - padding: 10px; - background: #336699; - color: white; -} -h2 { - font-size: 120%; - background-color: 73CAFF -} -h3 { font-size: 100%; } -h4 { font-size: 90%; } -h5 { font-size: 80%; } -h6 { font-size: 75%; } - -p { - line-height: 120%; - text-align: left; - margin-top: .5em; - margin-bottom: 1em; -} - -#content li, -#content th, -#content td, -#content li ul, -#content li ol{ - margin-top: .5em; - margin-bottom: .5em; -} - - -#content li li, -#minitoc-area li{ - margin-top: 0em; - margin-bottom: 0em; -} - -#content .attribution { - text-align: right; - font-style: italic; - font-size: 85%; - margin-top: 1em; -} - -.codefrag { - font-family: "Courier New", Courier, monospace; - font-size: 110%; -} diff --git a/docs/changes/ChangesSimpleStyle.css b/docs/changes/ChangesSimpleStyle.css deleted file mode 100644 index 407d0f1..0000000 --- a/docs/changes/ChangesSimpleStyle.css +++ /dev/null @@ -1,49 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -body { - font-family: Courier New, monospace; - font-size: 10pt; -} - -h1 { - font-family: Courier New, monospace; - font-size: 10pt; -} - -h2 { - font-family: Courier New, monospace; - font-size: 10pt; -} - -h3 { - font-family: Courier New, monospace; - font-size: 10pt; -} - -a:link { - color: blue; -} - -a:visited { - color: purple; -} - -li { - margin-top: 1em; - margin-bottom: 1em; -} diff --git a/docs/changes/changes2html.pl b/docs/changes/changes2html.pl deleted file mode 100644 index 03f0bbb..0000000 --- a/docs/changes/changes2html.pl +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/perl -# -# Transforms Lucene Java's CHANGES.txt into Changes.html -# -# Input is on STDIN, output is to STDOUT -# -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -use strict; -use warnings; - -my $jira_url_prefix = 'http://issues.apache.org/jira/browse/'; -my $title = undef; -my $release = undef; -my $sections = undef; -my $items = undef; -my $first_relid = undef; -my $second_relid = undef; -my @releases = (); - -my @lines = <>; # Get all input at once - -# -# Parse input and build hierarchical release structure in @releases -# -for (my $line_num = 0 ; $line_num <= $#lines ; ++$line_num) { - $_ = $lines[$line_num]; - next unless (/\S/); # Skip blank lines - - unless ($title) { - if (/\S/) { - s/^\s+//; # Trim leading whitespace - s/\s+$//; # Trim trailing whitespace - } - $title = $_; - next; - } - - if (/^(Release)|(Trunk)/) { # Release headings - $release = $_; - $sections = []; - push @releases, [ $release, $sections ]; - ($first_relid = lc($release)) =~ s/\s+/_/g if ($#releases == 0); - ($second_relid = lc($release)) =~ s/\s+/_/g if ($#releases == 1); - $items = undef; - next; - } - - # Section heading: 2 leading spaces, words all capitalized - if (/^ ([A-Z]+)\s*/) { - my $heading = $_; - $items = []; - push @$sections, [ $heading, $items ]; - next; - } - - # Handle earlier releases without sections - create a headless section - unless ($items) { - $items = []; - push @$sections, [ undef, $items ]; - } - - my $type; - if (@$items) { # A list item has been encountered in this section before - $type = $items->[0]; # 0th position of items array is list type - } else { - $type = get_list_type($_); - push @$items, $type; - } - - if ($type eq 'numbered') { # The modern items list style - # List item boundary is another numbered item or an unindented line - my $line; - my $item = $_; - $item =~ s/^(\s{0,2}\d+\.\s*)//; # Trim the leading item number - my $leading_ws_width = length($1); - $item =~ s/\s+$//; # Trim trailing whitespace - $item .= "\n"; - - while ($line_num < $#lines - and ($line = $lines[++$line_num]) !~ /^(?:\s{0,2}\d+\.\s*\S|\S)/) { - $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace - $line =~ s/\s+$//; # Trim trailing whitespace - $item .= "$line\n"; - } - $item =~ s/\n+\Z/\n/; # Trim trailing blank lines - push @$items, $item; - --$line_num unless ($line_num == $#lines); - } elsif ($type eq 'paragraph') { # List item boundary is a blank line - my $line; - my $item = $_; - $item =~ s/^(\s+)//; - my $leading_ws_width = defined($1) ? length($1) : 0; - $item =~ s/\s+$//; # Trim trailing whitespace - $item .= "\n"; - - while ($line_num < $#lines and ($line = $lines[++$line_num]) =~ /\S/) { - $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace - $line =~ s/\s+$//; # Trim trailing whitespace - $item .= "$line\n"; - } - push @$items, $item; - --$line_num unless ($line_num == $#lines); - } else { # $type is one of the bulleted types - # List item boundary is another bullet or a blank line - my $line; - my $item = $_; - $item =~ s/^(\s*$type\s*)//; # Trim the leading bullet - my $leading_ws_width = length($1); - $item =~ s/\s+$//; # Trim trailing whitespace - $item .= "\n"; - - while ($line_num < $#lines - and ($line = $lines[++$line_num]) !~ /^\s*(?:$type|\Z)/) { - $line =~ s/^\s{$leading_ws_width}//; # Trim leading whitespace - $line =~ s/\s+$//; # Trim trailing whitespace - $item .= "$line\n"; - } - push @$items, $item; - --$line_num unless ($line_num == $#lines); - } -} - -# -# Print HTML-ified version to STDOUT -# -print<<"__HTML_HEADER__"; - - - - $title - - - - - - - -Hive -

$title

- -__HTML_HEADER__ - -my $heading; -my $relcnt = 0; -my $header = 'h2'; -for my $rel (@releases) { - if (++$relcnt == 3) { - $header = 'h3'; - print "

"; - print "Older Releases"; - print "

\n"; - print "\n" if ($relcnt > 3); -print "\n\n"; - - -# -# Subroutine: get_list_type -# -# Takes one parameter: -# -# - The first line of a sub-section/point -# -# Returns one scalar: -# -# - The list type: 'numbered'; or one of the bulleted types '-', or '.' or -# 'paragraph'. -# -sub get_list_type { - my $first_list_item_line = shift; - my $type = 'paragraph'; # Default to paragraph type - - if ($first_list_item_line =~ /^\s{0,2}\d+\.\s+\S+/) { - $type = 'numbered'; - } elsif ($first_list_item_line =~ /^\s*([-.])\s+\S+/) { - $type = $1; - } - return $type; -} - -1; diff --git a/docs/images/hive-logo.jpg b/docs/images/hive-logo.jpg deleted file mode 100644 index 0f75cf1bd9859ba07f363bd04d3efef2a4046657..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2498 zcmbW%c{J4T9tZI6EX-gmV_zoASTiEiB+(cuM2#(3Mp;9nC|j27LKsVyRDKPHv1e2! zTUjEKP=qKl$u^}ZWi5C7?(h8WIrrQ@?!C`*-sgPId6x6M&hwmS`_1+gAY^W0W&(gf zAi$N=fo&F`V;oBM0su2JbpQ?k02ly4xBw8RQsD#w0sUD6VCc>uP?!h;GVXEKy#xJa%M+z#sMgjOIShMh+D+k4Tkm? z#l~JQf-4@r*DhxBYDsCIdq5lyuegMyl(e#ns@iUK+Z{*#)b(>aPLN6^Vuc*BLppN;l zzM-+{Nk`|?uI^_&&j*KI508w#85?JPnEp63J2$_;UjFud<;Ty}we^i17YKm-ll8ak z-&{zJ3k-!qps*bm2pqx*1PSF*(B>94vWB_&i=eTwaM8opi|)1aDDJaa5_1oD#Vf9a zV<|81(EgPDcd)qsOZGSHU#@Wg!Fgw3Bm@Z%fYlAPnvl*G6+PRLal1RH5WR^df>&%0 zk95XkjgkOlG&WTBxsCPd!OKRCudGi8xP(sjh|RCP@O(x&zO!9`+!Q3zKx z5ObDln410a>}ut0mt-8#&p!3Ua5Y1j;yIld>}}1qMWwtpp^I7@a{U#Q+_jQNqg$kL zVoO<=hg<34tatglDKX5{waH+=rz_)Oyn zgJ>5qOJSDkX8n21f~D2wdFsK4lx!Ll!sa4pzvz;;@kyoAF+kEu&%B<~M9m**gAhL1LsHB;R25Gvr>O zFsa`@C}10?qZpW+Y+pAyh`TM3(8@xNI!9{Geu&iB1_)&4bt||hTgfn`Qj}5w1gnP! zJ*Kr~Iuhw-zGx#ioi~oPr;7Po@C}3~Iuuj@9y8MEzV3BTC=ZwC{HeG;Xmu+DOYv(Y zy=1TOpJJvdtLBPh%EyYDk}Zw~Ds+GrsYYMm_)j*C3~-lRhc-LGUxlIQqxC6yuLo>F zGA(*)|KRAmuC1#4z-wx>ltNxLT8vQ4^HVYM6rn4lR3~G9mZM)?Hl2NPVcDY3V-NGe zi7GyzUEtkyW~$nBA!)@NlOYpFr<8qWFSaB+@e`Q;o;o1(KH8_fWQili6Os`KX=vbA$dikF6;L)fact*Ikq^naPq5^g|s8!T+ z_>sc0lf`l|cf;U>-B=vz+H0^h!QDuMFg`%{h;SL&vws3(x!jVZKEVA7@y+;+_eYO; zBV~)QnMh1~B8)%bX;%UIQWp6D!ULB4yZGxmc9SYubx@!}OJ#*ah#xK6T z%!j@M(vYrcYC~73q)KDKRjTUnAxl<%ej14_Rr;{un{5K`%;Ac6Z>Hw(m-J+rBL)SY zmn`T$nSG&Rk5c24&AtCnP>r#4HN=0K^|5jY&n|WfetUjvBTCgg4@(qW^6Xe3(YJv_ zFU4nu7lS@XX4Z;#bPo{IqXD+Qy-#&bQBGXRS)ow$BlA(#(rc|?qP=r#Q`$_YXQ5~E zSqgtY%D1E90^&Yp-E|6#dD$XOy32?OVhQKOdF<(gn>{gBs$MQ1aHTL;!<=>Yvp#3f z`kQ|(dTJw}`K$kaiWPHHRROG>_kPRmjYP zZEh55I0NKE)gHoG|EfAKP~_ve8x`%7u2081sB6o_Je!)LEMHHFZIliLT zpIX$5=_%zmngzk^GJ1jdo_-5GDs!#{^{`3rOQKQ&I(EOKce;13X@2W-8QWkbUs%Rh zf!tDbqKJq9xnwf~THu{wjN1Hu<~^nG`hlOMUi3Xy^Sdy>4NQu$dHB z6Nvg4GpH~ebo{sjE!Da<1>T)2DN<-9Dwq@a#$$z%y>jb)$E5U9eaZ(Dl_7_Q6k@|i z-{hFNw~?k-Xg4D;Q!yhF5*g)Y!IwrJeoXe1b(iNg^fNE)hB3_F@+6gAO?psL&G;QX z`obnVyV;;4F|@|bu`pLTxzXMPQ8sUE<|r%iYEOhdE1zGz$IB^Hab4W&-L$`kkuK^Z zb(nExVdC@)&h@*BS{nnRluGugoV|D>DLO4?-|Wk}=KS&+KMfky!D9ZlTWhaDrHs6s ze_z<0jJD=0m_CE)%_uVeovpkEowkdUitB;Rl(5LiPorwp6Ka=tjm?-&gdq|5$NIG=Dvjo3V-*?}sCyN7+;m}dBl(o#yQtFLm7y?5tN`cENjkNpGG C+F@P* diff --git a/docs/site.css b/docs/site.css deleted file mode 100644 index 49ca65a..0000000 --- a/docs/site.css +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - -/** defined standard tags **/ -body { - background-color: #ffffff; - color: #000000; -} - -a:link, a:active, a:visited { - color: #525D76; -} - - -h1 { - background-color: #525D76; - color: #ffffff; - font-family: arial,helvetica,sanserif; - font-size: large; - padding-left:2px; -} - -h2 { - background-color: #828DA6; - color: #ffffff; - font-family: arial,helvetica,sanserif; - font-size: medium; - padding-left:2px; -} - -table { - border: none; - border-spacing:0px; - border-collapse: collapse; -} - -img { - border: none 0px; -} - -/** define layout **/ - -/** table used to force footer to end of page **/ -table#layout { - width:100%; -} - -table#layout td { - padding:0px; -} - -div#container { - width: 95%; - margin: 10px; - margin-left: 0; - margin-right: auto; - padding: 10px; -} - -div#header { - padding: 5px; - margin: 0px; - margin-top:5px; - margin-bottom:5px; - height:80px; - border-bottom: 1px solid #333333; -} - -div#menu { - float: left; - width: 200px; - margin: 0; - margin-left: 0px; - margin-right: 5px; - - /** little higher margin since it doesn't start with a header **/ - margin-top:10px; - margin-bottom:0px; - - padding: 5px; -} - -div#body { - margin-right:0px; - margin-left: 215px; - margin-top:5px; - margin-bottom:0px; - - padding: 5px; - -} - -div#footer { - - clear: both; - - padding-top:15px; - margin-top:25px; - border-top: 1px solid #333333; - - - text-align:center; - color: #525D76; - font-style: italic; - font-size: smaller; -} - -div#logo1 { - float:left; - margin-left:5px; - margin-top:10px; -} - - -div#logo2 { - float:right; - margin-top:10px; -} - - -/** define body tag redefinitions **/ - - -div#body th { - background-color: #039acc; - color: #000000; - font-family: arial,helvetica,sanserif; - font-size: smaller; - vertical-align: top; - text-align:left; - border:1px #FFFFFF solid; - padding: 2px; -} - -div#body td { - background-color: #a0ddf0; - color: #000000; - font-family: arial,helvetica,sanserif; - font-size: smaller; - vertical-align: top; - text-align:left; - border:1px #FFFFFF solid; - padding: 2px; -} - - -div#body li { - margin-top:3px; -} - -/** define other body styles **/ - -div.section { - margin-left: 25px; -} - -div.subsection { - margin-left: 25px; -} - -div.source { - margin-left:25px; - margin-top:20px; - margin-bottom:20px; - padding-left:4px; - padding-right:4px; - padding-bottom:4px; - padding-top:5px; - - width:600; - - border: 1px solid #333333; - background-color: #EEEEEE; - color: #333333; - - /** bug: puts a extra line before the block in IE and after the block in FireFox **/ - white-space: pre; - - font-family: Courier; - font-size: smaller; - text-align: left; - - overflow:auto; -} - - -div.license { - margin-left:0px; - margin-top:20px; - margin-bottom:20px; - padding:5px; - - border: 1px solid #333333; - background-color: #EEEEEE; - color: #333333; - - text-align: left; -} - -/** define menu styles **/ - -div.menusection { - margin-bottom:10px; -} - -.menuheader { - font-weight:bold; - margin-bottom:0px; -} - -div.menusection ul { - margin-top:5px; - -} -div.menusection li { - -} - - - - -/** printing **/ -@page Section1 - { - size:8.5in 11.0in; - margin:1.0in .75in 1.0in .75in; -} - -@media print { - - /** make sure this fits the page **/ - div#container { - width:100%; - min-height:0px; - } - - - div#menu { - display:none; - } - - div#header { - display:none; - } - - div#body { - margin-left:5px; - } - - - div.source { - width:95%; - margin-left:0px; - } - - /** make a bit more room on the page **/ - div.section { - margin-left: 0px; - } - - div.subsection { - margin-left: 0px; - } - - h1 { - background-color: #FFFFFF; - color: #000000; - } - - h2 { - background-color: #FFFFFF; - color: #000000; - } - - div#body td { - background-color: #FFFFFF; - color: #000000; - border: #333333 1px solid; - } - - div#body th { - background-color: #FFFFFF; - color: #000000; - border: #333333 1px solid; - font-style:bold; - } - -} diff --git a/docs/stylesheets/project.xml b/docs/stylesheets/project.xml deleted file mode 100644 index 60bb75f..0000000 --- a/docs/stylesheets/project.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - Hadoop Hive - Hadoop Hive - - - - - - - - - - - - - - - - - - diff --git a/docs/stylesheets/site.vsl b/docs/stylesheets/site.vsl deleted file mode 100644 index 9b23f40..0000000 --- a/docs/stylesheets/site.vsl +++ /dev/null @@ -1,317 +0,0 @@ -## Licensed to the Apache Software Foundation (ASF) under one -## or more contributor license agreements. See the NOTICE file -## distributed with this work for additional information -## regarding copyright ownership. The ASF licenses this file -## to you under the Apache License, Version 2.0 (the -## "License"); you may not use this file except in compliance -## with the License. You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, -## software distributed under the License is distributed on an -## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -## KIND, either express or implied. See the License for the -## specific language governing permissions and limitations -## under the License. - - - - - -#document() - - -## This is where the macro's live - -#macro ( table $table) - - #foreach ( $items in $table.getChildren() ) - #if ($items.getName().equals("tr")) - #tr ($items) - #end - #end -
-#end - -#macro ( tr $tr) - - #foreach ( $items in $tr.getChildren() ) - #if ($items.getName().equals("td")) - #td ($items) - #elseif ($items.getName().equals("th")) - #th ($items) - #end - #end - -#end - -#macro ( td $value) -#if ($value.getAttributeValue("colspan")) -#set ($colspan = $value.getAttributeValue("colspan")) -#end -#if ($value.getAttributeValue("rowspan")) -#set ($rowspan = $value.getAttributeValue("rowspan")) -#end - - #foreach ( $items in $value.getContent() ) - #if($items.name) - #display($items) - #else - $items.value - #end - #end - -#end - -#macro ( th $value) -#if ($value.getAttributeValue("colspan")) -#set ($colspan = $value.getAttributeValue("colspan")) -#end -#if ($value.getAttributeValue("rowspan")) -#set ($rowspan = $value.getAttributeValue("rowspan")) -#end - - #foreach ( $items in $value.getContent() ) - #if($items.name) - #display($items) - #else - $items.value - #end - #end - -#end - -#macro ( projectanchor $name $value ) -#if ($value.startsWith("http://")) - $name -#elseif ($value.startsWith("https://")) - $name -#else - $name -#end -#end - -#macro ( metaauthor $author $email ) - - -#end - -#macro ( image $value ) -#if ($value.getAttributeValue("width")) -#set ($width=$value.getAttributeValue("width")) -#end -#if ($value.getAttributeValue("height")) -#set ($height=$value.getAttributeValue("height")) -#end -#if ($value.getAttributeValue("align")) -#set ($align=$value.getAttributeValue("align")) -#end - -#end - -#macro ( source $value) -
$escape.getText($value.getText())
-#end - - -## need these to catch special macros within lists -#macro(list $node) -<$node.getName()> - #foreach ( $items in $node.getChildren() ) - #listitem($items) - #end - -#end - -#macro (listitem $node) -<$node.getName()> -## use getContent instead of getChildren -## to include both text and nodes - #foreach ( $items in $node.getContent() ) - #if($items.name) - #display($items) - #else - $items.value - #end - #end - -#end - - -## # displays a basic node, calling macros if appropriate -#macro ( display $node ) - #if ($node.getName().equals("img")) - #image ($node) - #elseif ($node.getName().equals("source")) - #source ($node) - #elseif ($node.getName().equals("table")) - #table ($node) - #elseif ($node.getName().equals("ul")) - #list ($node) - #elseif ($node.getName().equals("ol")) - #list ($node) - #else - $node - #end -#end - -#macro ( section $section) - -

$section.getAttributeValue("name")

- -
- #foreach ( $items in $section.getChildren() ) - #if ($items.getName().equals("subsection")) - #subsection ($items) - #else - #display($items) - #end - #end -
-#end - -#macro ( subsection $subsection) - -

$subsection.getAttributeValue("name")

-
- #foreach ( $items in $subsection.getChildren() ) - #display($items) - #end -
-#end - -#macro ( anchorName $section) -#if ($section.getAttributeValue("href")) -$section.getAttributeValue("href")## -#else -$section.getAttributeValue("name")## -#end -#end - -#macro ( makeProject ) - - - - #set ($menus = $project.getChild("body").getChildren("menu")) - #foreach ( $menu in $menus ) - - #end -#end - -#macro (getProjectImage) - -
- -
- - -#if ($project.getChild("logo")) - -
- -#set ( $logoString = $project.getChild("logo").getAttributeValue("href") ) -#if ( $logoString.startsWith("/") ) -$project.getChild( -#else -$project.getChild( -#end - -
- -#end -#end - -#macro (printMeta $metaElement) - -#end - -#macro (document) - - - - - - - - - #set ($authors = $root.getChild("properties").getChildren("author")) - #foreach ( $au in $authors ) - #metaauthor ( $au.getText() $au.getAttributeValue("email") ) - #end - - #set ($metas = $root.getChildren("meta")) - - ## Parse meta directives such as - ## - #foreach ($meta in $metas) #printMeta($meta) #end - - ## Support for tags. - #if ($root.getChild("properties").getChild("base")) - #set ($url = $root.getChild("properties").getChild("base").getAttributeValue("href")) - - #end - - $project.getChild("title").getText() - $root.getChild("properties").getChild("title").getText() - - ## use a relative CSS for when the page is displayed locally (will overwrite - ## previous CSS settings) - - - - - - ## use a table in order to force footer to end of page - -
- - - - - -
- #set ($allSections = $root.getChild("body").getChildren("section")) - #foreach ( $section in $allSections ) - #section ($section) - #end -
- - - -
- - - -#end diff --git a/docs/velocity.properties b/docs/velocity.properties deleted file mode 100644 index 77ee2de..0000000 --- a/docs/velocity.properties +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -runtime.log=build/docs/velocity.log diff --git a/docs/xdocs/index.xml b/docs/xdocs/index.xml deleted file mode 100644 index f1df3fa..0000000 --- a/docs/xdocs/index.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - Hadoop Hive - Hadoop Hive Documentation Team - - -
-

Hive is a data warehouse infrastructure built on top of Hadoop. It provides tools to enable easy data ETL, a mechanism to put structures on the data, and the capability to querying and analysis of large data sets stored in Hadoop files. Hive defines a simple SQL-like query language, called QL, that enables users familiar with SQL to query the data. At the same time, this language also allows programmers who are familiar with the MapReduce fromwork to be able to plug in their custom mappers and reducers to perform more sophisticated analysis that may not be supported by the built-in capabilities of the language.

- -

-Hive does not mandate read or written data be in the "Hive format"---there is no such thing. Hive works equally well on Thrift, control delimited, or your specialized data formats. Please see File Format and SerDe in Developer Guide for details.

-
-
-

Hive is based on Hadoop, which is a batch processing system. As a result, Hive does not and cannot promise low latencies on queries. The paradigm here is strictly of submitting jobs and being notified when the jobs are completed as opposed to real-time queries. In contrast to the systems such as Oracle where analysis is run on a significantly smaller amount of data, but the analysis proceeds much more iteratively with the response times between iterations being less than a few minutes, Hive queries response times for even the smallest jobs can be of the order of several minutes. However for larger jobs (e.g., jobs processing terabytes of data) in general they may run into hours.

- -

In summary, low latency performance is not the top-priority of Hive's design principles. What Hive values most are scalability (scale out with more machines added dynamically to the Hadoop cluster), extensibility (with MapReduce framework and UDF/UDAF/UDTF), fault-tolerance, and loose-coupling with its input formats.

-
- -
diff --git a/docs/xdocs/language_manual/cli.xml b/docs/xdocs/language_manual/cli.xml deleted file mode 100644 index aaa8e81..0000000 --- a/docs/xdocs/language_manual/cli.xml +++ /dev/null @@ -1,208 +0,0 @@ - - - - - - - Hadoop Hive- Command Line Interface (CLI) - Hadoop Hive Documentation Team - - - -

Hive Cli

-
- -

Usage:

- -]* [<-f filename>|<-e query-string>] [-S] - - -i Initialization Sql from file (executed automatically and silently before any other commands) - -e 'quoted query string' Sql from command line - -f Sql from file - -S Silent mode in interactive shell where only data is emitted - -hiveconf x=y Use this to set hive/hadoop configuration variables. - - -e and -f cannot be specified together. In the absence of these options, interactive shell is started. However, -i can be used with any other options. - - To see this usage help, run hive -h -]]> - -
    -
  • Example of running a Query from the command line - -
  • - -
  • Example of setting hive configuration variables - -
  • - -
  • Example of dumping data out from a query into a file using silent mode - a.txt -]]> -
  • - -
  • Example of running a script non-interactively - -
  • - -
  • Example of running an initialization script before entering interactive mode - -
  • - -
-
- -
-

-The cli when invoked without the -i option will attempt to load HIVE_HOME/bin/.hiverc and $HOME/.hiverc as initialization files. -

-
- -
-When $HIVE_HOME/bin/hive is run without either -e/-f option it enters interactive shell mode. - -Use ";" (semicolon) to terminate commands. Comments in scripts can be specified using the "--" prefix. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CommandDescription
quitUse quit or exit to leave the interactive shell.
set key=valueUse this to set value of particular configuration variable. One thing to note here is that if you misspell the variable name, cli will not show an error.
setThis will print a list of configuration variables that are overridden by user or hive.
set -v This will print all hadoop and hive configuration variables.
add FILE [file] [file]*Adds a file to the list of resources
list FILElist all the files added to the distributed cache
list FILE [file]*Check if given resources are already added to distributed cache
! [cmd]Executes a shell command from the hive shell
dfs [dfs cmd]Executes a dfs command from the hive shell
[query]Executes a hive query and prints results to standard out
source FILEUsed to execute a script file inside the CLI.
- -Sample Usage: - - set mapred.reduce.tasks=32; - hive> set; - hive> select a.* from tab1; - hive> !ls; - hive> dfs -ls; -]]> - -
- -
-

-Hive uses log4j for logging. These logs are not emitted to the standard output by default but are instead captured to a log file specified by Hive's log4j properties file. By default Hive will use hive-log4j2.properties in the conf/ directory of the hive installation which writes out logs to /tmp/$USER/hive.log and uses the WARN level. -

-

-It is often desirable to emit the logs to the standard output and/or change the logging level for debugging purposes. These can be done from the command line as follows:

- - -

-hive.root.logger specifies the logging level as well as the log destination. Specifying console as the target sends the logs to the standard error (instead of the log file). -

-
- -
-

-Hive can manage the addition of resources to a session where those resources need to be made available at query execution time. Any locally accessible file can be added to the session. Once a file is added to a session, hive query can refer to this file by its name (in map/reduce/transform clauses) and this file is available locally at execution time on the entire hadoop cluster. Hive uses Hadoop's Distributed Cache to distribute the added files to all the machines in the cluster at query execution time.

- - []* - LIST { FILE[S] | JAR[S] | ARCHIVE[S] } [ ..] - DELETE { FILE[S] | JAR[S] | ARCHIVE[S] } [ ..] ]]> - -
    -
  • FILE resources are just added to the distributed cache. Typically, this might be something like a transform script to be executed.
  • -
  • JAR resources are also added to the Java classpath. This is required in order to reference objects they contain such as UDF's.
  • -
  • ARCHIVE resources are automatically unarchived as part of distributing them.
  • -
- -

Example

- - add FILE /tmp/tt.py; -hive> list FILES; -/tmp/tt.py -hive> from networks a MAP a.networkid USING 'python tt.py' as nn where a.ds = '2009-01-04' limit 10; ]]> - -

It is not neccessary to add files to the session if the files used in a transform script are already available on all machines in the hadoop cluster using the same path name. For example:

- -
    -
  • ... MAP a.networkid USING 'wc -l' ...: here wc is an executable available on all machines
  • -
  • ... MAP a.networkid USING '/home/nfsserv1/hadoopscripts/tt.py' ...: here tt.py may be accessible via a nfs mount point that's configured identically on all the cluster nodes.
  • -
- - -
- -
diff --git a/docs/xdocs/language_manual/data-manipulation-statements.xml b/docs/xdocs/language_manual/data-manipulation-statements.xml deleted file mode 100644 index 214a0dc..0000000 --- a/docs/xdocs/language_manual/data-manipulation-statements.xml +++ /dev/null @@ -1,234 +0,0 @@ - - - - - - - Hadoop Hive- Data Manipulation Statements - Hadoop Hive Documentation Team - - - - -
- - - -map_type - : MAP < primitive_type, data_type > - -struct_type - : STRUCT < col_name : data_type [COMMENT col_comment], ...> - -row_format - : DELIMITED [FIELDS TERMINATED BY char] [COLLECTION ITEMS TERMINATED BY char] - [MAP KEYS TERMINATED BY char] [LINES TERMINATED BY char] - | SERDE serde_name [WITH SERDEPROPERTIES (property_name=property_value, property_name=property_value, ...)] - -file_format: - : SEQUENCEFILE - | TEXTFILE - | INPUTFORMAT input_format_classname OUTPUTFORMAT output_format_classname -]]> - -

-CREATE TABLE creates a table with the given name. An error is thrown if a table or view with the same name already exists. You can use IF NOT EXISTS to skip the error. -

- -

-The EXTERNAL keyword lets you create a table and provide a LOCATION so that Hive does not use a default location for this table. This comes in handy if you already have data generated. When dropping an EXTERNAL table, data in the table is NOT deleted from the file system. -

-The LIKE form of CREATE TABLE allows you to copy an existing table definition exactly (without copying its data). - -

-You can create tables with custom SerDe or using native SerDe. A native SerDe is used if ROW FORMAT is not specified or ROW FORMAT DELIMITED is specified. You can use the DELIMITED clause to read delimited files. Use the SERDE clause to create a table with custom SerDe. Refer to SerDe section of the User Guide for more information on SerDe. -

- -

-You must specify a list of a columns for tables that use a native SerDe. Refer to the Types part of the User Guide for the allowable column types. A list of columns for tables that use a custom SerDe may be specified but Hive will query the SerDe to determine the actual list of columns for this table. -

- -

-Use STORED AS TEXTFILE if the data needs to be stored as plain text files. Use STORED AS SEQUENCEFILE if the data needs to be compressed. Please read more about Hive/CompressedStorage if you are planning to keep data compressed in your Hive tables. Use INPUTFORMAT and OUTPUTFORMAT to specify the name of a corresponding InputFormat and OutputFormat class as a string literal, e.g. 'org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat'. -

- -

-Partitioned tables can be created using the PARTITIONED BY clause. A table can have one or more partition columns and a separate data directory is created for each distinct value combination in the partition columns. Further, tables or partitions can be bucketed using CLUSTERED BY columns, and data can be sorted within that bucket via SORT BY columns. This can improve performance on certain kinds of queries. -

- -

-Table names and column names are case insensitive but SerDe and property names are case sensitive. Table and column comments are string literals (single-quoted). The TBLPROPERTIES clause allows you to tag the table definition with your own metadata key/value pairs. -

- -

A create table example:

- - -

The statement above creates the page_view table with viewTime, userid, page_url, referrer_url, and ip columns (including comments). The table is also partitioned and data is stored in sequence files. The data format in the files is assumed to be field-delimited by ctrl-A and row-delimited by newline. -

- -
- -
- -

- Tables can also be created and populated by the results of a query in one create-table-as-select (CTAS) statement. The table created by CTAS is atomic, meaning that the table is not seen by other users until all the query results are populated. So other users will either see the table with the complete results of the query or will not see the table at all. -

- -

- There are two parts in CTAS, the SELECT part can be any SELECT statement supported by HiveQL. The CREATE part of the CTAS takes the resulting schema from the SELECT part and creates the target table with other table properties such as the SerDe and storage format. The only restrictions in CTAS is that the target table cannot be a partitioned table (nor can it be an external table). -

- - - -
- -
- -

-This example CTAS statement creates the target table new_key_value_store with the -schema (new_key DOUBLE, key_value_pair STRING) derived from the results of the -SELECT statement. If the SELECT statement does not specify column aliases, the -column names will be automatically assigned to _col0, _col1, and _col2 etc. -In addition, the new target table is created using a specific SerDe and a storage -format independent of the source tables in the SELECT statement. -

- - - -

-Being able to select data from one table to another is one of the most -powerful features of Hive. Hive handles the conversion of the data from the source -format to the destination format as the query is being executed! -

- -
- -
- - - -

In the example above, the page_view table is bucketed (clustered by) userid and within each bucket the data is sorted in increasing order of viewTime. Such an organization allows the user to do efficient sampling on the clustered column - in this case userid. The sorting property allows internal operators to take advantage of the better-known data structure while evaluating queries, also increasing efficiency. MAP KEYS and COLLECTION ITEMS keywords can be used if any of the columns are lists or maps. -

- -

-The CLUSTERED BY and SORTED BY creation commands do not affect how data is inserted into a table -- only how it is read. This means that users must be careful to insert data correctly by specifying the number of reducers to be equal to the number of buckets, and using CLUSTER BY and SORT BY commands in their query. See -Working with Bucketed tables to see how these -are used. -

- -
- -
- -

-Unless a table is specified as EXTERNAL it will be stored inside a folder specified by the -configuration property hive.metastore.warehouse.dir. -EXTERNAL tables points to any hdfs location for its storage. You still have to make sure that the data is format is specified to match the data. - -

-'; - ]]> - -
- -
- -

The statement above creates a new empty_key_value_store table whose definition exactly matches the existing key_value_store in all particulars other than table name. The new table contains no rows. -

- - - -
- -
-

Drop it like it is hot

-
- -
diff --git a/docs/xdocs/language_manual/joins.xml b/docs/xdocs/language_manual/joins.xml deleted file mode 100644 index 190ecd4..0000000 --- a/docs/xdocs/language_manual/joins.xml +++ /dev/null @@ -1,212 +0,0 @@ - - - - - - - Hadoop Hive- Joins - Hadoop Hive Documentation Team - - - - -
- - - -

-Only equality joins, outer joins, and left semi joins are supported in Hive. Hive does not support join conditions that are not equality conditions as it is very difficult to express such conditions as a map/reduce job. Also, more than two tables can be joined in Hive. -

- -Allowed Equality Joins - - - - - -Disallowed Joins - - b.id) -]]> - -

Multiple Tables can be joined in the same query

- - - - - - - -
- -
- -

Hive converts joins over multiple tables into a single map/reduce job if for every table the same column is used in the join clauses. The query below is -converted into a single map/reduce job as only key1 column for b is involved in the join.

- - -It is very interesting to note that any number of tables can be joined in single map/reduce process as long as they fit the above criteria. - -

However if the join colums are not the same for all tables the is converted into multiple map/reduce jobs

- - - -

In this case the first map/reduce job joins a with b and the results are then joined with c in the second map/reduce job.

-
- -
- -

In every map/reduce stage of the join, the last table in the sequence is streamed through the reducers where as the others are buffered. Therefore, it helps to reduce the memory needed in the reducer for buffering the rows for a particular value of the join key by organizing the tables such that the largest tables appear last in the sequence. e.g. in

- - - -

all the three tables are joined in a single map/reduce job and the values for a particular value of the key for tables a and b are buffered in the memory in the reducers. Then for each row retrieved from c, the join is computed with the buffered rows.

- -

For the query:

- - - -

* there are two map/reduce jobs involved in computing the join. The first of these joins a with b and buffers the values of a while streaming the values of b in the reducers. The second of one of these jobs buffers the results of the first join while streaming the values of c through the reducers.

- -
- -
- -

In every map/reduce stage of the join, the table to be streamed can be specified via a hint:

- - - -

All the three tables are joined in a single map/reduce job and the values for a particular value of the key for tables b and c are buffered in the memory in the reducers. Then for each row retrieved from a, the join is computed with the buffered rows. -

- -
- -
- -

LEFT, RIGHT, and FULL OUTER joins exist in order to provide more control over ON clauses for which there is no match. For example:

- - - -

The above query will return a row for every row in a. This output row will be a.val,b.val when there is a b.key that equals a.key, and the output row will be a.val,NULL when there is no corresponding b.key. Rows from b which have no corresponding a.key will be dropped. The syntax "FROM a LEFT OUTER JOIN b" must be written on one line in order to understand how it works--a is to the LEFT of b in this query, and so all rows from a are kept; a RIGHT OUTER JOIN will keep all rows from b, and a FULL OUTER JOIN will keep all rows from a and all rows from b. OUTER JOIN semantics should conform to standard SQL specs. -

- -

Joins occur BEFORE WHERE CLAUSES. So, if you want to restrict the OUTPUT of a join, a requirement should be in the WHERE clause, otherwise it should be in the JOIN clause. A big point of confusion for this issue is partitioned tables

- - - -

will join a on b, producing a list of a.val and b.val. The WHERE clause, however, can also reference other columns of a and b that are in the output of the join, and then filter them out. However, whenever a row from the JOIN has found a key for a and no key for b, all of the columns of b will be NULL, including the ds column. This is to say, you will filter out all rows of join output for which there was no valid b.key, and thus you have outsmarted your LEFT OUTER requirement. In other words, the LEFT OUTER part of the join is irrelevant if you reference any column of b in the WHERE clause. Instead, when OUTER JOINing, use this syntax:

- - - -

Joins are NOT commutative! Joins are left-associative regardless of whether they are LEFT or RIGHT joins.

- - - -

The above query first joins a on b, throwing away everything in a or b that does not have a corresponding key in the other table. The reduced table is then joined on c. This provides unintuitive results if there is a key that exists in both a and c, but not b: The whole row (including a.val1, a.val2, and a.key) is dropped in the "a JOIN b" step, so when the result of that is joined with c, any row with a c.key that had a corresponding a.key or b.key (but not both) will show up as NULL, NULL, NULL, c.val.

-
- -
- -

LEFT SEMI JOIN implements the correlated IN/EXISTS subquery semantics in an efficient way. Since Hive currently does not support IN/EXISTS subqueries, you can rewrite your queries using LEFT SEMI JOIN. The restrictions of using LEFT SEMI JOIN is that the right-hand-side table should only be referenced in the join condition (ON-clause), but not in WHERE- or SELECT-clauses etc.

- -

This type of query

- - -

Can be written as:

- - - -
- -
- -

If all but one of the tables being joined are small, the join can be performed as a map only job. The query -does not need a reducer. For every mapper a,b is read completely. A restriction is that a FULL/RIGHT OUTER JOIN b cannot be performed.

- - - -
- -
- -

If the tables being joined are bucketized, and the buckets are a multiple of each other, the buckets can be joined with each other. If table A has 8 buckets are table B has 4 buckets, the following join:

- - - -

can be done on the mapper only. Instead of fetching B completely for each mapper of A, only the required buckets are fetched. For the query above, the mapper processing bucket 1 for A will only fetch bucket 1 of B. It is not the default behavior, and is governed by the following parameter

- -set hive.optimize.bucketmapjoin = true - -

If the tables being joined are sorted and bucketized, and the number of buckets are same, a sort-merge join can be performed. The corresponding buckets are joined with each other at the mapper. If both A and B have 4 buckets

- - - -

can be done on the mapper only. The mapper for the bucket for A will traverse the corresponding bucket for B. This is not the default behavior, and the following parameters need to be set:

- - - -
- - - - - -
diff --git a/docs/xdocs/language_manual/var_substitution.xml b/docs/xdocs/language_manual/var_substitution.xml deleted file mode 100644 index 3f3b04b..0000000 --- a/docs/xdocs/language_manual/var_substitution.xml +++ /dev/null @@ -1,130 +0,0 @@ - - - - - - - Hadoop Hive- Variable Substitution - Hadoop Hive Documentation Team - - - -

Hive Variable Substitution

-
- -

Hive is used for both interactive queries as well as part. The hive variable substitution mechanism was -designed to avoid some of the code that was getting baked into the scripting language ontop of hive. For example:

- - - -

-are becoming common place. This is frustrating as hive becomes closely coupled with scripting languages. The hive -startup time of a couple seconds is non-trivial when doing thousands of manipulations multiple hive -e invocations.

- -

-Hive Variables combine the set capability you know and love with some limited yet powerful (evil laugh) substitution -ability. For example:

- - - -

Results in:

- - -
- -
- -

There are three namespaces for variables hiveconf,system, and env. hiveconf variables are set as normal:

- - - -

However they are retrieved using

- - - -

Annotated examples of usage from the test case ql/src/test/queries/clientpositive/set_processor_namespaces.q

- - -
- -
-

Variable substitution is on by default. If this causes an issue with an already existing script disable it.

- - - -
- - -
diff --git a/docs/xdocs/language_manual/working_with_bucketed_tables.xml b/docs/xdocs/language_manual/working_with_bucketed_tables.xml deleted file mode 100644 index de4b599..0000000 --- a/docs/xdocs/language_manual/working_with_bucketed_tables.xml +++ /dev/null @@ -1,87 +0,0 @@ - - - - - - - Hadoop Hive- Working with Bucketed Tables - Hadoop Hive Documentation Team - - - - -
- -

-This is a brief example on creating a populating bucketed tables. Bucketed tables -are fantastic in that they allow much more efficient sampling than do non-bucketed -tables, and they may later allow for time saving operations such as mapside joins. -However, the bucketing specified at table creation is not enforced when the table -is written to, and so it is possible for the table's metadata to advertise -properties which are not upheld by the table's actual layout. This should obviously -be avoided. Here's how to do it right. -

-

First there’s table creation:

- - - -

notice that we define user_id as the bucket column

-
- -
- - - -

The command set hive.enforce.bucketing = true; allows the -correct number of reducers and the cluster by column to be automatically selected -based on the table. Otherwise, you would need to set the number of reducers to be -the same as the number of buckets with -set mapred.reduce.tasks = 256; and have a -CLUSTER BY ... clause in the select.

- -
- -
-

-How does Hive distribute the rows across the buckets? In general, the bucket number is determined by the expression hash_function(bucketing_column) mod num_buckets. (There's a '0x7FFFFFFF in there too, but that's not that important). The hash_function depends on the type of the bucketing column. For an int, it's easy, hash_int(i) == i. For example, if user_id were an int, and there were 10 buckets, we would expect all user_id's that end in 0 to be in bucket 1, all user_id's that end in a 1 to be in bucket 2, etc. For other datatypes, it's a little tricky. In particular, the hash of a BIGINT is not the same as the BIGINT. And the hash of a string or a complex datatype will be some number that's derived from the value, but not anything humanly-recognizable. For example, if user_id were a STRING, then the user_id's in bucket 1 would probably not end in 0. In general, distributing rows based on the hash will give you a even distribution in the buckets. -

- -
- -
-

-So, what can go wrong? As long as you -set hive.enforce.bucketing = true, and use the syntax above, -the tables should be populated properly. Things can go wrong if the bucketing -column type is different during the insert and on read, or if you manually -cluster by a value that's different from the table definition. -

-
- -
diff --git a/docs/xdocs/udf/reflect.xml b/docs/xdocs/udf/reflect.xml deleted file mode 100644 index 435f025..0000000 --- a/docs/xdocs/udf/reflect.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - Hadoop Hive- Reflect User Defined Function - Hadoop Hive Documentation Team - - - -
- -

A java class and method often exists to handle the exact function a user would like to use in hive. Rather -then having to write a wrapper UDF to call this method, the majority of these methods can be called using reflect udf. Reflect uses -java reflection to instantiate and call methods of objects, it can also call static functions. The method must return a primative type -or a type that hive knows how to serialize. -

- - - -
- -
diff --git a/packaging/src/main/assembly/src.xml b/packaging/src/main/assembly/src.xml index 0529e90..820d379 100644 --- a/packaging/src/main/assembly/src.xml +++ b/packaging/src/main/assembly/src.xml @@ -67,7 +67,6 @@ contrib/**/* data/**/* dev-support/**/* - docs/**/* druid-handler/**/* jdbc-handler/**/* find-bugs/**/* diff --git a/pom.xml b/pom.xml index d8b4cf3..da6db67 100644 --- a/pom.xml +++ b/pom.xml @@ -1111,7 +1111,6 @@ checkstyle/** bin/** itests/** - docs/** **/README.md **/*.iml **/*.txt