tpcds-setup-sandbox.sh 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748
  1. #!/bin/bash
  2. if [ ! -f tpcds-gen/target/tpcds-gen-1.0-SNAPSHOT.jar ]; then
  3. echo "Build the data generator with build.sh first"
  4. exit 1
  5. fi
  6. which hive > /dev/null 2>&1
  7. if [ $? -ne 0 ]; then
  8. echo "Script must be run where Hive is installed"
  9. exit 1
  10. fi
  11. set -x
  12. set -e
  13. # Tables in the TPC-DS schema.
  14. LIST="date_dim time_dim item customer customer_demographics household_demographics customer_address store promotion warehouse ship_mode reason income_band call_center web_page catalog_page inventory store_sales store_returns web_sales web_returns web_site catalog_sales catalog_returns"
  15. SCALE=$1
  16. DIR=$2
  17. BUCKETS=13
  18. RETURN_BUCKETS=1
  19. SPLIT=16
  20. STORE_CLAUSES=( "orc" )
  21. FILE_FORMATS=( "orc" )
  22. SERDES=( "org.apache.hadoop.hive.ql.io.orc.OrcSerde" )
  23. hadoop dfs -ls ${DIR}/${SCALE} || (cd tpcds-gen; hadoop jar target/*.jar -d ${DIR}/${SCALE}/ -s ${SCALE})
  24. hadoop dfs -ls ${DIR}/${SCALE}
  25. # Generate the text/flat tables. These will be later be converted to ORCFile.
  26. if true; then
  27. for t in ${LIST}
  28. do
  29. hive -i settings/load.sql -f ddl/text/${t}.sql -d DB=tpcds_text_${SCALE} -d LOCATION=${DIR}/${SCALE}/${t}
  30. done
  31. fi
  32. # Generate a flat (unpartitioned) schema in ORCFile format.
  33. i=0
  34. for file in "${STORE_CLAUSES[@]}"
  35. do
  36. for t in ${LIST}
  37. do
  38. hive -i settings/load.sql -f ddl/bin_flat/${t}.sql -d DB=tpcds_bin_flat_${FILE_FORMATS[$i]}_${SCALE} -d SOURCE=tpcds_text_${SCALE} -d BUCKETS=${BUCKETS} -d FILE="${file}" -d SERDE=${SERDES[$i]} -d SPLIT=${SPLIT}
  39. done
  40. i=$((i+1))
  41. done