load-partitioned.sql 1.4 KB

12345678910111213141516171819202122232425262728293031
  1. -- set hive.enforce.bucketing=true;
  2. -- set hive.enforce.sorting=true;
  3. set hive.exec.dynamic.partition.mode=nonstrict;
  4. set hive.exec.max.dynamic.partitions.pernode=100000;
  5. set hive.exec.max.dynamic.partitions=100000;
  6. set hive.exec.max.created.files=1000000;
  7. set hive.exec.parallel=true;
  8. set hive.exec.reducers.max=${REDUCERS};
  9. set hive.stats.autogather=true;
  10. set hive.optimize.sort.dynamic.partition=true;
  11. -- set mapred.job.reduce.input.buffer.percent=0.0;
  12. -- set mapreduce.input.fileinputformat.split.minsize=240000000;
  13. -- set mapreduce.input.fileinputformat.split.minsize.per.node=240000000;
  14. -- set mapreduce.input.fileinputformat.split.minsize.per.rack=240000000;
  15. set hive.optimize.sort.dynamic.partition=true;
  16. -- set hive.tez.java.opts=-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp/;
  17. set tez.runtime.empty.partitions.info-via-events.enabled=true;
  18. set tez.runtime.report.partition.stats=true;
  19. -- fewer files for the NULL partition
  20. set hive.tez.auto.reducer.parallelism=true;
  21. set hive.tez.min.partition.factor=0.01;
  22. -- set mapred.map.child.java.opts=-server -Xmx2800m -Djava.net.preferIPv4Stack=true;
  23. -- set mapred.reduce.child.java.opts=-server -Xms1024m -Xmx3800m -Djava.net.preferIPv4Stack=true;
  24. -- set mapreduce.map.memory.mb=3072;
  25. -- set mapreduce.reduce.memory.mb=4096;
  26. -- set io.sort.mb=800;
  27. set hive.optimize.sort.dynamic.partition.threshold=0;