Explorar o código

update readme and remove configs that do not apply

arpitgupta %!s(int64=10) %!d(string=hai) anos
pai
achega
1eba57814b

+ 2 - 2
README.md

@@ -70,14 +70,14 @@ All of these steps should be carried out on your Hadoop cluster.
   	hive> source query55.sql;
   	```
 
-  Note that the database is named based on the Data Scale chosen in step 3. At Data Scale 10000, your database will be named tpcds_bin_partitioned_orc_10000. At Data Scale 1000 it would be named tpcds_bin_partitioned_orc_1000. You can always ```show databases``` to get a list of available databases.
+  Note that the database is named based on the Data Scale chosen in step 3. At Data Scale 10000, your database will be named tpcds_bin_partitioned_orc_10000. At Data Scale 1000 it would be named tpch_flat_orc_1000. You can always ```show databases``` to get a list of available databases.
 
   Similarly, if you generated 1 TB of TPC-H data during Step 5:
 
   	```
   	cd sample-queries-tpch
   	hive -i testbench.settings
-  	hive> use tpch_bin_partitioned_orc_1000;
+  	hive> use tpch_flat_orc_1000;
   	hive> source tpch_query1.sql;
   	```
 

+ 0 - 3
sample-queries-tpcds/testbench-withATS.settings

@@ -3,7 +3,6 @@ set fs.file.impl.disable.cache=true;
 set fs.hdfs.impl.disable.cache=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join=true;
-set hive.auto.convert.sortmerge.join.noconditionaltask=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.compactor.abortedtxn.threshold=1000;
 set hive.compactor.check.interval=300;
@@ -31,13 +30,11 @@ set hive.metastore.warehouse.dir=/apps/hive/warehouse;
 set hive.optimize.bucketmapjoin.sortedmerge=false;
 set hive.optimize.bucketmapjoin=true;
 set hive.optimize.index.filter=true;
-set hive.optimize.mapjoin.mapreduce=true;
 set hive.optimize.reducededuplication.min.reducer=4;
 set hive.optimize.reducededuplication=true;
 set hive.orc.splits.include.file.footer=false;
 set hive.security.authorization.enabled=false;
 set hive.security.metastore.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider;
-set hive.semantic.analyzer.factory.impl=org.apache.hivealog.cli.HCatSemanticAnalyzerFactory;
 set hive.server2.enable.doAs=false;
 set hive.server2.tez.default.queues=default;
 set hive.server2.tez.initialize.default.sessions=false;

+ 0 - 3
sample-queries-tpcds/testbench.settings

@@ -3,7 +3,6 @@ set fs.file.impl.disable.cache=true;
 set fs.hdfs.impl.disable.cache=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join=true;
-set hive.auto.convert.sortmerge.join.noconditionaltask=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.compactor.abortedtxn.threshold=1000;
 set hive.compactor.check.interval=300;
@@ -28,13 +27,11 @@ set hive.metastore.warehouse.dir=/apps/hive/warehouse;
 set hive.optimize.bucketmapjoin.sortedmerge=false;
 set hive.optimize.bucketmapjoin=true;
 set hive.optimize.index.filter=true;
-set hive.optimize.mapjoin.mapreduce=true;
 set hive.optimize.reducededuplication.min.reducer=4;
 set hive.optimize.reducededuplication=true;
 set hive.orc.splits.include.file.footer=false;
 set hive.security.authorization.enabled=false;
 set hive.security.metastore.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider;
-set hive.semantic.analyzer.factory.impl=org.apache.hivealog.cli.HCatSemanticAnalyzerFactory;
 set hive.server2.enable.doAs=false;
 set hive.server2.tez.default.queues=default;
 set hive.server2.tez.initialize.default.sessions=false;

+ 0 - 3
sample-queries-tpch/testbench-withATS.settings

@@ -3,7 +3,6 @@ set fs.file.impl.disable.cache=true;
 set fs.hdfs.impl.disable.cache=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join=true;
-set hive.auto.convert.sortmerge.join.noconditionaltask=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.compactor.abortedtxn.threshold=1000;
 set hive.compactor.check.interval=300;
@@ -31,13 +30,11 @@ set hive.metastore.warehouse.dir=/apps/hive/warehouse;
 set hive.optimize.bucketmapjoin.sortedmerge=false;
 set hive.optimize.bucketmapjoin=true;
 set hive.optimize.index.filter=true;
-set hive.optimize.mapjoin.mapreduce=true;
 set hive.optimize.reducededuplication.min.reducer=4;
 set hive.optimize.reducededuplication=true;
 set hive.orc.splits.include.file.footer=false;
 set hive.security.authorization.enabled=false;
 set hive.security.metastore.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider;
-set hive.semantic.analyzer.factory.impl=org.apache.hivealog.cli.HCatSemanticAnalyzerFactory;
 set hive.server2.enable.doAs=false;
 set hive.server2.tez.default.queues=default;
 set hive.server2.tez.initialize.default.sessions=false;

+ 0 - 3
sample-queries-tpch/testbench.settings

@@ -3,7 +3,6 @@ set fs.file.impl.disable.cache=true;
 set fs.hdfs.impl.disable.cache=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join=true;
-set hive.auto.convert.sortmerge.join.noconditionaltask=true;
 set hive.auto.convert.sortmerge.join=true;
 set hive.compactor.abortedtxn.threshold=1000;
 set hive.compactor.check.interval=300;
@@ -28,13 +27,11 @@ set hive.metastore.warehouse.dir=/apps/hive/warehouse;
 set hive.optimize.bucketmapjoin.sortedmerge=false;
 set hive.optimize.bucketmapjoin=true;
 set hive.optimize.index.filter=true;
-set hive.optimize.mapjoin.mapreduce=true;
 set hive.optimize.reducededuplication.min.reducer=4;
 set hive.optimize.reducededuplication=true;
 set hive.orc.splits.include.file.footer=false;
 set hive.security.authorization.enabled=false;
 set hive.security.metastore.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider;
-set hive.semantic.analyzer.factory.impl=org.apache.hivealog.cli.HCatSemanticAnalyzerFactory;
 set hive.server2.enable.doAs=false;
 set hive.server2.tez.default.queues=default;
 set hive.server2.tez.initialize.default.sessions=false;