This is an automated email from the ASF dual-hosted git repository.

wzhou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 5835c9b994ad30b33542afa3c704d7a4a45aa24f
Author: jasonmfehr <[email protected]>
AuthorDate: Fri Mar 15 13:38:13 2024 -0700

    IMPALA-12913: Refactor Workload Management Custom Cluster Tests
    
    The custom cluster tests that assert the workload
    management functionality to insert completed queries into
    the impala_query_log table were inefficient because they
    created their own database tables and added data to those
    tables.
    
    This patch updates these tests to use the existing tables
    in the functional database where possible. The few tests
    that need their own tables now have those tables set up in
    a database created by the pytest unique_database fixture
    instead of using the default database.
    
    A new table has also been added to the functional database.
    This table is named zipcode_timezones and contains two
    columns, the first having a few zipcodes and the second
    having their corresponding timezone. This table can be used
    to join the zipcode_incomes and alltimezones tables. This
    table is populated by a new csv file in the testdata
    directory.
    
    Change-Id: I1e3249a8f306cf43de0d6f6586711c779399e83b
    Reviewed-on: http://gerrit.cloudera.org:8080/21153
    Reviewed-by: Impala Public Jenkins <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 testdata/data/zipcodes_timezones.csv               | 605 ++++++++++++++
 .../functional/functional_schema_template.sql      |  14 +
 tests/custom_cluster/test_query_log.py             | 893 +++++++++------------
 tests/custom_cluster/test_sys_db.py                |  62 ++
 tests/util/workload_management.py                  |  14 +-
 5 files changed, 1092 insertions(+), 496 deletions(-)

diff --git a/testdata/data/zipcodes_timezones.csv 
b/testdata/data/zipcodes_timezones.csv
new file mode 100644
index 000000000..4a5e6ba16
--- /dev/null
+++ b/testdata/data/zipcodes_timezones.csv
@@ -0,0 +1,605 @@
+98001,US/Pacific
+98002,US/Pacific
+98003,US/Pacific
+98004,US/Pacific
+98005,US/Pacific
+98006,US/Pacific
+98007,US/Pacific
+98008,US/Pacific
+98010,US/Pacific
+98011,US/Pacific
+98012,US/Pacific
+98014,US/Pacific
+98019,US/Pacific
+98020,US/Pacific
+98021,US/Pacific
+98022,US/Pacific
+98023,US/Pacific
+98024,US/Pacific
+98025,US/Pacific
+98026,US/Pacific
+98027,US/Pacific
+98028,US/Pacific
+98029,US/Pacific
+98030,US/Pacific
+98031,US/Pacific
+98032,US/Pacific
+98033,US/Pacific
+98034,US/Pacific
+98036,US/Pacific
+98037,US/Pacific
+98038,US/Pacific
+98039,US/Pacific
+98040,US/Pacific
+98042,US/Pacific
+98043,US/Pacific
+98045,US/Pacific
+98047,US/Pacific
+98050,US/Pacific
+98051,US/Pacific
+98052,US/Pacific
+98053,US/Pacific
+98055,US/Pacific
+98056,US/Pacific
+98057,US/Pacific
+98058,US/Pacific
+98059,US/Pacific
+98065,US/Pacific
+98068,US/Pacific
+98070,US/Pacific
+98072,US/Pacific
+98074,US/Pacific
+98075,US/Pacific
+98077,US/Pacific
+98087,US/Pacific
+98092,US/Pacific
+98101,US/Pacific
+98102,US/Pacific
+98103,US/Pacific
+98104,US/Pacific
+98105,US/Pacific
+98106,US/Pacific
+98107,US/Pacific
+98108,US/Pacific
+98109,US/Pacific
+98110,US/Pacific
+98112,US/Pacific
+98115,US/Pacific
+98116,US/Pacific
+98117,US/Pacific
+98118,US/Pacific
+98119,US/Pacific
+98121,US/Pacific
+98122,US/Pacific
+98125,US/Pacific
+98126,US/Pacific
+98133,US/Pacific
+98134,US/Pacific
+98136,US/Pacific
+98144,US/Pacific
+98146,US/Pacific
+98148,US/Pacific
+98154,US/Pacific
+98155,US/Pacific
+98158,US/Pacific
+98164,US/Pacific
+98166,US/Pacific
+98168,US/Pacific
+98174,US/Pacific
+98177,US/Pacific
+98178,US/Pacific
+98188,US/Pacific
+98195,US/Pacific
+98198,US/Pacific
+98199,US/Pacific
+98201,US/Pacific
+98203,US/Pacific
+98204,US/Pacific
+98207,US/Pacific
+98208,US/Pacific
+98220,US/Pacific
+98221,US/Pacific
+98222,US/Pacific
+98223,US/Pacific
+98224,US/Pacific
+98225,US/Pacific
+98226,US/Pacific
+98229,US/Pacific
+98230,US/Pacific
+98232,US/Pacific
+98233,US/Pacific
+98235,US/Pacific
+98236,US/Pacific
+98237,US/Pacific
+98238,US/Pacific
+98239,US/Pacific
+98240,US/Pacific
+98241,US/Pacific
+98243,US/Pacific
+98244,US/Pacific
+98245,US/Pacific
+98247,US/Pacific
+98248,US/Pacific
+98249,US/Pacific
+98250,US/Pacific
+98251,US/Pacific
+98252,US/Pacific
+98253,US/Pacific
+98255,US/Pacific
+98256,US/Pacific
+98257,US/Pacific
+98258,US/Pacific
+98260,US/Pacific
+98261,US/Pacific
+98262,US/Pacific
+98263,US/Pacific
+98264,US/Pacific
+98266,US/Pacific
+98267,US/Pacific
+98270,US/Pacific
+98271,US/Pacific
+98272,US/Pacific
+98273,US/Pacific
+98274,US/Pacific
+98275,US/Pacific
+98276,US/Pacific
+98277,US/Pacific
+98278,US/Pacific
+98279,US/Pacific
+98280,US/Pacific
+98281,US/Pacific
+98282,US/Pacific
+98283,US/Pacific
+98284,US/Pacific
+98286,US/Pacific
+98288,US/Pacific
+98290,US/Pacific
+98292,US/Pacific
+98294,US/Pacific
+98295,US/Pacific
+98296,US/Pacific
+98297,US/Pacific
+98303,US/Pacific
+98304,US/Pacific
+98305,US/Pacific
+98310,US/Pacific
+98311,US/Pacific
+98312,US/Pacific
+98314,US/Pacific
+98315,US/Pacific
+98320,US/Pacific
+98321,US/Pacific
+98323,US/Pacific
+98325,US/Pacific
+98326,US/Pacific
+98327,US/Pacific
+98328,US/Pacific
+98329,US/Pacific
+98330,US/Pacific
+98331,US/Pacific
+98332,US/Pacific
+98333,US/Pacific
+98335,US/Pacific
+98336,US/Pacific
+98337,US/Pacific
+98338,US/Pacific
+98339,US/Pacific
+98340,US/Pacific
+98342,US/Pacific
+98345,US/Pacific
+98346,US/Pacific
+98349,US/Pacific
+98350,US/Pacific
+98351,US/Pacific
+98353,US/Pacific
+98354,US/Pacific
+98355,US/Pacific
+98356,US/Pacific
+98357,US/Pacific
+98358,US/Pacific
+98359,US/Pacific
+98360,US/Pacific
+98361,US/Pacific
+98362,US/Pacific
+98363,US/Pacific
+98364,US/Pacific
+98365,US/Pacific
+98366,US/Pacific
+98367,US/Pacific
+98368,US/Pacific
+98370,US/Pacific
+98371,US/Pacific
+98372,US/Pacific
+98373,US/Pacific
+98374,US/Pacific
+98375,US/Pacific
+98376,US/Pacific
+98377,US/Pacific
+98380,US/Pacific
+98381,US/Pacific
+98382,US/Pacific
+98383,US/Pacific
+98385,US/Pacific
+98387,US/Pacific
+98388,US/Pacific
+98390,US/Pacific
+98391,US/Pacific
+98392,US/Pacific
+98394,US/Pacific
+98396,US/Pacific
+98402,US/Pacific
+98403,US/Pacific
+98404,US/Pacific
+98405,US/Pacific
+98406,US/Pacific
+98407,US/Pacific
+98408,US/Pacific
+98409,US/Pacific
+98416,US/Pacific
+98418,US/Pacific
+98421,US/Pacific
+98422,US/Pacific
+98424,US/Pacific
+98430,US/Pacific
+98433,US/Pacific
+98438,US/Pacific
+98439,US/Pacific
+98443,US/Pacific
+98444,US/Pacific
+98445,US/Pacific
+98446,US/Pacific
+98447,US/Pacific
+98465,US/Pacific
+98466,US/Pacific
+98467,US/Pacific
+98498,US/Pacific
+98499,US/Pacific
+98501,US/Pacific
+98502,US/Pacific
+98503,US/Pacific
+98505,US/Pacific
+98506,US/Pacific
+98512,US/Pacific
+98513,US/Pacific
+98516,US/Pacific
+98520,US/Pacific
+98524,US/Pacific
+98526,US/Pacific
+98527,US/Pacific
+98528,US/Pacific
+98530,US/Pacific
+98531,US/Pacific
+98532,US/Pacific
+98533,US/Pacific
+98535,US/Pacific
+98536,US/Pacific
+98537,US/Pacific
+98538,US/Pacific
+98539,US/Pacific
+98541,US/Pacific
+98542,US/Pacific
+98544,US/Pacific
+98546,US/Pacific
+98547,US/Pacific
+98548,US/Pacific
+98550,US/Pacific
+98552,US/Pacific
+98555,US/Pacific
+98556,US/Pacific
+98557,US/Pacific
+98558,US/Pacific
+98559,US/Pacific
+98560,US/Pacific
+98562,US/Pacific
+98563,US/Pacific
+98564,US/Pacific
+98565,US/Pacific
+98566,US/Pacific
+98568,US/Pacific
+98569,US/Pacific
+98570,US/Pacific
+98571,US/Pacific
+98572,US/Pacific
+98575,US/Pacific
+98576,US/Pacific
+98577,US/Pacific
+98579,US/Pacific
+98580,US/Pacific
+98581,US/Pacific
+98582,US/Pacific
+98583,US/Pacific
+98584,US/Pacific
+98585,US/Pacific
+98586,US/Pacific
+98587,US/Pacific
+98588,US/Pacific
+98589,US/Pacific
+98590,US/Pacific
+98591,US/Pacific
+98592,US/Pacific
+98593,US/Pacific
+98595,US/Pacific
+98596,US/Pacific
+98597,US/Pacific
+98601,US/Pacific
+98602,US/Pacific
+98603,US/Pacific
+98604,US/Pacific
+98605,US/Pacific
+98606,US/Pacific
+98607,US/Pacific
+98609,US/Pacific
+98610,US/Pacific
+98611,US/Pacific
+98612,US/Pacific
+98613,US/Pacific
+98614,US/Pacific
+98616,US/Pacific
+98617,US/Pacific
+98619,US/Pacific
+98620,US/Pacific
+98621,US/Pacific
+98623,US/Pacific
+98624,US/Pacific
+98625,US/Pacific
+98626,US/Pacific
+98628,US/Pacific
+98629,US/Pacific
+98631,US/Pacific
+98632,US/Pacific
+98635,US/Pacific
+98638,US/Pacific
+98639,US/Pacific
+98640,US/Pacific
+98641,US/Pacific
+98642,US/Pacific
+98643,US/Pacific
+98644,US/Pacific
+98645,US/Pacific
+98647,US/Pacific
+98648,US/Pacific
+98649,US/Pacific
+98650,US/Pacific
+98651,US/Pacific
+98660,US/Pacific
+98661,US/Pacific
+98662,US/Pacific
+98663,US/Pacific
+98664,US/Pacific
+98665,US/Pacific
+98670,US/Pacific
+98671,US/Pacific
+98672,US/Pacific
+98673,US/Pacific
+98674,US/Pacific
+98675,US/Pacific
+98682,US/Pacific
+98683,US/Pacific
+98684,US/Pacific
+98685,US/Pacific
+98686,US/Pacific
+98801,US/Pacific
+98802,US/Pacific
+98811,US/Pacific
+98812,US/Pacific
+98813,US/Pacific
+98814,US/Pacific
+98815,US/Pacific
+98816,US/Pacific
+98817,US/Pacific
+98819,US/Pacific
+98821,US/Pacific
+98822,US/Pacific
+98823,US/Pacific
+98824,US/Pacific
+98826,US/Pacific
+98827,US/Pacific
+98828,US/Pacific
+98829,US/Pacific
+98830,US/Pacific
+98831,US/Pacific
+98832,US/Pacific
+98833,US/Pacific
+98834,US/Pacific
+98836,US/Pacific
+98837,US/Pacific
+98840,US/Pacific
+98841,US/Pacific
+98843,US/Pacific
+98844,US/Pacific
+98845,US/Pacific
+98846,US/Pacific
+98847,US/Pacific
+98848,US/Pacific
+98849,US/Pacific
+98850,US/Pacific
+98851,US/Pacific
+98852,US/Pacific
+98853,US/Pacific
+98855,US/Pacific
+98856,US/Pacific
+98857,US/Pacific
+98858,US/Pacific
+98859,US/Pacific
+98860,US/Pacific
+98862,US/Pacific
+98901,US/Pacific
+98902,US/Pacific
+98903,US/Pacific
+98908,US/Pacific
+98921,US/Pacific
+98922,US/Pacific
+98923,US/Pacific
+98925,US/Pacific
+98926,US/Pacific
+98930,US/Pacific
+98932,US/Pacific
+98933,US/Pacific
+98934,US/Pacific
+98935,US/Pacific
+98936,US/Pacific
+98937,US/Pacific
+98938,US/Pacific
+98939,US/Pacific
+98940,US/Pacific
+98941,US/Pacific
+98942,US/Pacific
+98943,US/Pacific
+98944,US/Pacific
+98946,US/Pacific
+98947,US/Pacific
+98948,US/Pacific
+98950,US/Pacific
+98951,US/Pacific
+98952,US/Pacific
+98953,US/Pacific
+99001,US/Pacific
+99003,US/Pacific
+99004,US/Pacific
+99005,US/Pacific
+99006,US/Pacific
+99008,US/Pacific
+99009,US/Pacific
+99011,US/Pacific
+99012,US/Pacific
+99013,US/Pacific
+99014,US/Pacific
+99016,US/Pacific
+99017,US/Pacific
+99018,US/Pacific
+99019,US/Pacific
+99020,US/Pacific
+99021,US/Pacific
+99022,US/Pacific
+99023,US/Pacific
+99025,US/Pacific
+99026,US/Pacific
+99027,US/Pacific
+99029,US/Pacific
+99030,US/Pacific
+99031,US/Pacific
+99032,US/Pacific
+99033,US/Pacific
+99034,US/Pacific
+99036,US/Pacific
+99037,US/Pacific
+99039,US/Pacific
+99040,US/Pacific
+99101,US/Pacific
+99102,US/Pacific
+99103,US/Pacific
+99105,US/Pacific
+99109,US/Pacific
+99110,US/Pacific
+99111,US/Pacific
+99113,US/Pacific
+99114,US/Pacific
+99115,US/Pacific
+99116,US/Pacific
+99117,US/Pacific
+99118,US/Pacific
+99119,US/Pacific
+99121,US/Pacific
+99122,US/Pacific
+99123,US/Pacific
+99124,US/Pacific
+99125,US/Pacific
+99126,US/Pacific
+99128,US/Pacific
+99129,US/Pacific
+99130,US/Pacific
+99131,US/Pacific
+99133,US/Pacific
+99134,US/Pacific
+99135,US/Pacific
+99136,US/Pacific
+99137,US/Pacific
+99138,US/Pacific
+99139,US/Pacific
+99140,US/Pacific
+99141,US/Pacific
+99143,US/Pacific
+99144,US/Pacific
+99146,US/Pacific
+99147,US/Pacific
+99148,US/Pacific
+99149,US/Pacific
+99150,US/Pacific
+99151,US/Pacific
+99152,US/Pacific
+99153,US/Pacific
+99154,US/Pacific
+99155,US/Pacific
+99156,US/Pacific
+99157,US/Pacific
+99158,US/Pacific
+99159,US/Pacific
+99160,US/Pacific
+99161,US/Pacific
+99163,US/Pacific
+99166,US/Pacific
+99167,US/Pacific
+99169,US/Pacific
+99170,US/Pacific
+99171,US/Pacific
+99173,US/Pacific
+99174,US/Pacific
+99176,US/Pacific
+99179,US/Pacific
+99180,US/Pacific
+99181,US/Pacific
+99185,US/Pacific
+99201,US/Pacific
+99202,US/Pacific
+99203,US/Pacific
+99204,US/Pacific
+99205,US/Pacific
+99206,US/Pacific
+99207,US/Pacific
+99208,US/Pacific
+99212,US/Pacific
+99216,US/Pacific
+99217,US/Pacific
+99218,US/Pacific
+99223,US/Pacific
+99224,US/Pacific
+99251,US/Pacific
+99258,US/Pacific
+99301,US/Pacific
+99320,US/Pacific
+99321,US/Pacific
+99322,US/Pacific
+99323,US/Pacific
+99324,US/Pacific
+99326,US/Pacific
+99328,US/Pacific
+99329,US/Pacific
+99330,US/Pacific
+99333,US/Pacific
+99335,US/Pacific
+99336,US/Pacific
+99337,US/Pacific
+99338,US/Pacific
+99341,US/Pacific
+99343,US/Pacific
+99344,US/Pacific
+99345,US/Pacific
+99346,US/Pacific
+99347,US/Pacific
+99348,US/Pacific
+99349,US/Pacific
+99350,US/Pacific
+99352,US/Pacific
+99353,US/Pacific
+99354,US/Pacific
+99356,US/Pacific
+99357,US/Pacific
+99359,US/Pacific
+99360,US/Pacific
+99361,US/Pacific
+99362,US/Pacific
+99363,US/Pacific
+99371,US/Pacific
+99401,US/Pacific
+99402,US/Pacific
+99403,US/Pacific
diff --git a/testdata/datasets/functional/functional_schema_template.sql 
b/testdata/datasets/functional/functional_schema_template.sql
index f4f9fa63d..aa6566c0c 100644
--- a/testdata/datasets/functional/functional_schema_template.sql
+++ b/testdata/datasets/functional/functional_schema_template.sql
@@ -1799,6 +1799,20 @@ partition by range(id)
 ---- DATASET
 functional
 ---- BASE_TABLE_NAME
+zipcode_timezones
+---- COLUMNS
+zip STRING
+timezone STRING
+---- ROW_FORMAT
+DELIMITED FIELDS TERMINATED BY ','
+---- DEPENDENT_LOAD
+INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name} SELECT * FROM 
{db_name}.{table_name};
+---- LOAD
+LOAD DATA LOCAL INPATH '{impala_home}/testdata/data/zipcodes_timezones.csv' 
OVERWRITE INTO TABLE {db_name}{db_suffix}.{table_name};
+====
+---- DATASET
+functional
+---- BASE_TABLE_NAME
 unsupported_timestamp_partition
 ---- CREATE_HIVE
 -- Create a table that is partitioned on an unsupported partition-column type
diff --git a/tests/custom_cluster/test_query_log.py 
b/tests/custom_cluster/test_query_log.py
index 48efbb78d..1d39b2da2 100644
--- a/tests/custom_cluster/test_query_log.py
+++ b/tests/custom_cluster/test_query_log.py
@@ -25,6 +25,7 @@ import tempfile
 from random import choice, randint
 from signal import SIGRTMIN
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
+from tests.common.test_dimensions import create_single_exec_option_dimension
 from tests.common.test_vector import ImpalaTestDimension
 from tests.util.retry import retry
 from tests.util.workload_management import assert_query, 
COMPRESSED_BYTES_SPILLED, \
@@ -38,11 +39,24 @@ class TestQueryLogTableBase(CustomClusterTestSuite):
 
   WM_DB = "sys"
   QUERY_TBL = "{0}.impala_query_log".format(WM_DB)
+  PROTOCOL_BEESWAX = "beeswax"
+  PROTOCOL_HS2 = "hs2"
 
   @classmethod
   def add_test_dimensions(cls):
     super(TestQueryLogTableBase, cls).add_test_dimensions()
-    cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('protocol', 
'beeswax', 'hs2'))
+    cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
+    cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('protocol',
+        cls.PROTOCOL_BEESWAX, cls.PROTOCOL_HS2))
+
+  def get_client(self, protocol):
+    """Retrieves the default Impala client for the specified protocol. This 
client is
+       automatically closed after the test completes."""
+    if protocol == self.PROTOCOL_BEESWAX:
+      return self.client
+    elif protocol == self.PROTOCOL_HS2:
+      return self.hs2_client
+    raise Exception("unknown protocol: {0}".format(protocol))
 
 
 class TestQueryLogTableBeeswax(TestQueryLogTableBase):
@@ -75,35 +89,33 @@ class TestQueryLogTableBeeswax(TestQueryLogTableBase):
   def test_query_log_table_lower_max_sql_plan(self, vector):
     """Asserts that lower limits on the sql and plan columns in the completed 
queries
        table are respected."""
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
     rand_long_str = "".join(choice(string.ascii_letters) for _ in
         range(self.MAX_SQL_PLAN_LEN))
 
-    try:
-      handle = client.execute_async("select '{0}'".format(rand_long_str))
-      query_id = handle.get_handle().id
-      client.wait_for_finished_timeout(handle, 10)
-      client.close_query(handle)
+    # Run the query async to avoid fetching results since fetching such a 
large result was
+    # causing the execution to take a very long time.
+    handle = client.execute_async("select '{0}'".format(rand_long_str))
+    query_id = handle.get_handle().id
+    client.wait_for_finished_timeout(handle, 10)
+    client.close_query(handle)
 
-      self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", 1, 60)
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 1, 60)
 
-      # Force Impala to process the inserts to the completed queries table.
-      client.execute("refresh " + self.QUERY_TBL)
+    # Force Impala to process the inserts to the completed queries table.
+    client.execute("refresh " + self.QUERY_TBL)
 
-      res = client.execute("select length(sql),plan from {0} where 
query_id='{1}'"
-          .format(self.QUERY_TBL, query_id))
-      assert res.success
-      assert len(res.data) == 1
+    res = client.execute("select length(sql),plan from {0} where 
query_id='{1}'"
+        .format(self.QUERY_TBL, query_id))
+    assert res.success
+    assert len(res.data) == 1
 
-      data = res.data[0].split("\t")
-      assert len(data) == 2
-      assert int(data[0]) == self.MAX_SQL_PLAN_LEN - 1, "incorrect sql 
statement length"
-      assert len(data[1]) == self.MAX_SQL_PLAN_LEN - data[1].count("\n") - 1, \
-          "incorrect plan length"
-
-    finally:
-      client.close()
+    data = res.data[0].split("\t")
+    assert len(data) == 2
+    assert int(data[0]) == self.MAX_SQL_PLAN_LEN - 1, "incorrect sql statement 
length"
+    assert len(data[1]) == self.MAX_SQL_PLAN_LEN - data[1].count("\n") - 1, \
+        "incorrect plan length"
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=1 "
@@ -115,36 +127,32 @@ class TestQueryLogTableBeeswax(TestQueryLogTableBase):
   def test_query_log_table_over_max_sql_plan(self, vector):
     """Asserts that very long queries have their corresponding plan and sql 
columns
        shortened in the completed queries table."""
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
     rand_long_str = "".join(choice(string.ascii_letters) for _ in 
range(16778200))
 
-    try:
-      client.set_configuration_option("MAX_STATEMENT_LENGTH_BYTES", 16780000)
-      handle = client.execute_async("select '{0}'".format(rand_long_str))
-      query_id = handle.get_handle().id
-      client.wait_for_finished_timeout(handle, 10)
-      client.close_query(handle)
-
-      self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", 1, 60)
+    client.set_configuration_option("MAX_STATEMENT_LENGTH_BYTES", 16780000)
+    handle = client.execute_async("select '{0}'".format(rand_long_str))
+    query_id = handle.get_handle().id
+    client.wait_for_finished_timeout(handle, 10)
+    client.close_query(handle)
 
-      # Force Impala to process the inserts to the completed queries table.
-      client.execute("refresh " + self.QUERY_TBL)
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 1, 60)
 
-      client.set_configuration_option("MAX_ROW_SIZE", 35000000)
-      res = client.execute("select length(sql),plan from {0} where 
query_id='{1}'"
-          .format(self.QUERY_TBL, query_id))
-      assert res.success
-      assert len(res.data) == 1
-      data = res.data[0].split("\t")
-      assert len(data) == 2
-      assert data[0] == "16777215"
+    # Force Impala to process the inserts to the completed queries table.
+    client.execute("refresh " + self.QUERY_TBL)
 
-      # Newline characters are not counted by Impala's length function.
-      assert len(data[1]) == 16777216 - data[1].count("\n") - 1
+    client.set_configuration_option("MAX_ROW_SIZE", 35000000)
+    res = client.execute("select length(sql),plan from {0} where 
query_id='{1}'"
+        .format(self.QUERY_TBL, query_id))
+    assert res.success
+    assert len(res.data) == 1
+    data = res.data[0].split("\t")
+    assert len(data) == 2
+    assert data[0] == "16777215"
 
-    finally:
-      client.close()
+    # Newline characters are not counted by Impala's length function.
+    assert len(data[1]) == 16777216 - data[1].count("\n") - 1
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=1 "
@@ -156,29 +164,26 @@ class TestQueryLogTableBeeswax(TestQueryLogTableBase):
                                     catalogd_args="--enable_workload_mgmt",
                                     impalad_graceful_shutdown=True)
   def test_query_log_table_no_query_log_select(self, vector):
-    """Asserts queries are written to the completed queries table when the 
query log is
-       turned off."""
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
-
-    try:
-      # Run a select query.
-      random_val = randint(1, 1000000)
-      select_sql = "select {0}".format(random_val)
-      res = client.execute(select_sql)
-      assert res.success
-      self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", 1, 60)
+    """Asserts queries are written to the completed queries table when the 
in-memory
+    query log is turned off."""
+    client = self.get_client(vector.get_value('protocol'))
+
+    # Run a select query.
+    random_val = randint(1, 1000000)
+    select_sql = "select {0}".format(random_val)
+    res = client.execute(select_sql)
+    assert res.success
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 1, 60)
 
-      # Force Impala to process the inserts to the completed queries table.
-      client.execute("refresh " + self.QUERY_TBL)
+    # Force Impala to process the inserts to the completed queries table.
+    client.execute("refresh " + self.QUERY_TBL)
 
-      actual = client.execute("select sql from {0} where 
query_id='{1}'".format(
-          self.QUERY_TBL, res.query_id))
-      assert actual.success
-      assert len(actual.data) == 1
-      assert actual.data[0] == select_sql
-    finally:
-      client.close()
+    actual = client.execute("select sql from {0} where query_id='{1}'".format(
+        self.QUERY_TBL, res.query_id))
+    assert actual.success
+    assert len(actual.data) == 1
+    assert actual.data[0] == select_sql
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=1 "
@@ -193,63 +198,38 @@ class TestQueryLogTableBeeswax(TestQueryLogTableBase):
   def test_query_log_table_query_cache(self, vector):
     """Asserts the values written to the query log table match the values from 
the
        query profile. Specifically focuses on the data cache metrics."""
-    tbl_name = "default.test_query_log_cache" + str(int(time()))
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
-
-    try:
-      # Create the test table.
-      create_tbl_sql = "create table {0} (id INT, product_name STRING) " \
-        "partitioned by (category INT)".format(tbl_name)
-      create_tbl_results = client.execute(create_tbl_sql)
-      assert create_tbl_results.success
-
-      # Insert some rows into the test table.
-      insert_sql = "insert into {0} (id,category,product_name) VALUES 
".format(tbl_name)
-      for i in range(1, 11):
-        for j in range(1, 11):
-          if i * j > 1:
-            insert_sql += ","
-
-          random_product_name = "".join(choice(string.ascii_letters)
-            for _ in range(10))
-          insert_sql += "({0},{1},'{2}')".format((i * j), i, 
random_product_name)
-
-      insert_results = client.execute(insert_sql)
-      assert insert_results.success
-
-      # Select all rows from the test table. Run the query multiple times to 
ensure data
-      # is cached.
-      select_sql = "select * from {0}".format(tbl_name)
-      for i in range(3):
-        res = client.execute(select_sql)
-        assert res.success
-        sleep(1)
+    client = self.get_client(vector.get_value('protocol'))
 
+    # Select all rows from the test table. Run the query multiple times to 
ensure data
+    # is cached.
+    warming_query_count = 3
+    select_sql = "select * from functional.tinytable"
+    for i in range(warming_query_count):
+      res = client.execute(select_sql)
+      assert res.success
       self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", 5, 60)
+          "impala-server.completed-queries.written", i + 1, 60)
 
-      # Allow some time for the cache to be written to disk.
-      sleep(10)
+    # Wait for the cache to be written to disk.
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.io-mgr.remote-data-cache-num-writes", 1, 60)
 
-      # Run the same query again so results are read from the data cache.
-      res = client.execute(select_sql, fetch_profile_after_close=True)
-      assert res.success
-      self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", 6, 60)
+    # Run the same query again so results are read from the data cache.
+    res = client.execute(select_sql, fetch_profile_after_close=True)
+    assert res.success
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", warming_query_count + 1, 60)
 
-      data = assert_query(self.QUERY_TBL, client, "test_query_hist_2",
-          res.runtime_profile)
+    data = assert_query(self.QUERY_TBL, client, "test_query_hist_2",
+        res.runtime_profile)
 
-      # Since the assert_query function only asserts that the bytes read from 
cache
-      # column is equal to the bytes read from cache in the profile, there is 
a potential
-      # for this test to not actually assert anything different than other 
tests. Thus, an
-      # additional assert is needed to ensure that there actually was data 
read from the
-      # cache.
-      assert data[BYTES_READ_CACHE_TOTAL] != "0", "bytes read from cache total 
was " \
-          "zero, test did not assert anything"
-    finally:
-      client.execute("drop table if exists {0}".format(tbl_name))
-      client.close()
+    # Since the assert_query function only asserts that the bytes read from 
cache
+    # column is equal to the bytes read from cache in the profile, there is a 
potential
+    # for this test to not actually assert anything different than other 
tests. Thus, an
+    # additional assert is needed to ensure that there actually was data read 
from the
+    # cache.
+    assert data[BYTES_READ_CACHE_TOTAL] != "0", "bytes read from cache total 
was " \
+        "zero, test did not assert anything"
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=5 "
@@ -268,41 +248,38 @@ class TestQueryLogTableBeeswax(TestQueryLogTableBase):
     print("USING LOG DIRECTORY: {0}".format(self.LOG_DIR_MAX_WRITES))
 
     impalad = self.cluster.get_first_impalad()
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
 
-    try:
-      res = client.execute("drop table {0} purge".format(self.QUERY_TBL))
-      assert res.success
-      impalad.service.wait_for_metric_value(
-          "impala-server.completed-queries.scheduled-writes", 3, 60)
-      
impalad.service.wait_for_metric_value("impala-server.completed-queries.failure",
 3,
-          60)
+    res = client.execute("drop table {0} purge".format(self.QUERY_TBL))
+    assert res.success
+    impalad.service.wait_for_metric_value(
+        "impala-server.completed-queries.scheduled-writes", 3, 60)
+    
impalad.service.wait_for_metric_value("impala-server.completed-queries.failure",
 3,
+        60)
 
-      query_count = 0
-
-      # Allow time for logs to be written to disk.
-      sleep(5)
-
-      with open(os.path.join(self.LOG_DIR_MAX_WRITES, "impalad.ERROR")) as 
file:
-        for line in file:
-          if line.find('could not write completed query table="{0}" 
query_id="{1}"'
-                           .format(self.QUERY_TBL, res.query_id)) >= 0:
-            query_count += 1
-
-      assert query_count == 1
-
-      assert impalad.service.get_metric_value(
-        "impala-server.completed-queries.max-records-writes") == 0
-      assert impalad.service.get_metric_value(
-        "impala-server.completed-queries.queued") == 0
-      assert impalad.service.get_metric_value(
-        "impala-server.completed-queries.failure") == 3
-      assert impalad.service.get_metric_value(
-        "impala-server.completed-queries.scheduled-writes") == 4
-      assert impalad.service.get_metric_value(
-        "impala-server.completed-queries.written") == 0
-    finally:
-      client.close()
+    query_count = 0
+
+    # Allow time for logs to be written to disk.
+    sleep(5)
+
+    with open(os.path.join(self.LOG_DIR_MAX_WRITES, "impalad.ERROR")) as file:
+      for line in file:
+        if line.find('could not write completed query table="{0}" 
query_id="{1}"'
+                          .format(self.QUERY_TBL, res.query_id)) >= 0:
+          query_count += 1
+
+    assert query_count == 1
+
+    assert impalad.service.get_metric_value(
+      "impala-server.completed-queries.max-records-writes") == 0
+    assert impalad.service.get_metric_value(
+      "impala-server.completed-queries.queued") == 0
+    assert impalad.service.get_metric_value(
+      "impala-server.completed-queries.failure") == 3
+    assert impalad.service.get_metric_value(
+      "impala-server.completed-queries.scheduled-writes") == 4
+    assert impalad.service.get_metric_value(
+      "impala-server.completed-queries.written") == 0
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  "--query_log_max_queued={0} "
@@ -319,7 +296,7 @@ class TestQueryLogTableBeeswax(TestQueryLogTableBase):
        the maximum number of queued records it reached."""
 
     impalad = self.cluster.get_first_impalad()
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
 
     rand_str = "{0}-{1}".format(vector.get_value('protocol'), time())
 
@@ -328,38 +305,36 @@ class TestQueryLogTableBeeswax(TestQueryLogTableBase):
     test_sql_assert = "select '{0}', count(*) from {1} where sql='{2}'".format(
         rand_str, self.QUERY_TBL, test_sql.replace("'", r"\'"))
 
-    try:
-      for _ in range(0, self.FLUSH_MAX_RECORDS_QUERY_COUNT):
-        res = client.execute(test_sql)
-        assert res.success
-
-      # Running this query results in the number of queued completed queries 
to exceed
-      # the max and thus all completed queries will be written to the query 
log table.
-      res = client.execute(test_sql_assert)
-      assert res.success
-      assert 1 == len(res.data)
-      assert "0" == res.data[0].split("\t")[1]
-
-      # Wait until the completed queries have all been written out because the 
max queued
-      # count was exceeded.
-      impalad.service.wait_for_metric_value(
-          "impala-server.completed-queries.max-records-writes", 1, 60)
-
-      # Force Impala to process the inserts to the completed queries table.
-      sleep(5)
-      client.execute("refresh " + self.QUERY_TBL)
-
-      # This query will remain queued due to the long write interval and max 
queued
-      # records limit not being reached.
-      res = client.execute(r"select count(*) from {0} where sql like 'select 
\'{1}\'%'"
-          .format(self.QUERY_TBL, rand_str))
+    for _ in range(0, self.FLUSH_MAX_RECORDS_QUERY_COUNT):
+      res = client.execute(test_sql)
       assert res.success
-      assert 1 == len(res.data)
-      assert "3" == res.data[0]
-      impalad.service.wait_for_metric_value(
-          "impala-server.completed-queries.queued", 2, 60)
-    finally:
-      client.close()
+
+    # Running this query results in the number of queued completed queries to 
exceed
+    # the max and thus all completed queries will be written to the query log 
table.
+    res = client.execute(test_sql_assert)
+    assert res.success
+    assert 1 == len(res.data)
+    assert "0" == res.data[0].split("\t")[1]
+
+    # Wait until the completed queries have all been written out because the 
max queued
+    # count was exceeded.
+    impalad.service.wait_for_metric_value(
+        "impala-server.completed-queries.max-records-writes", 1, 60)
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 3, 60)
+
+    # Force Impala to process the inserts to the completed queries table.
+    client.execute("refresh " + self.QUERY_TBL)
+
+    # This query will remain queued due to the long write interval and max 
queued
+    # records limit not being reached.
+    res = client.execute(r"select count(*) from {0} where sql like 'select 
\'{1}\'%'"
+        .format(self.QUERY_TBL, rand_str))
+    assert res.success
+    assert 1 == len(res.data)
+    assert "3" == res.data[0]
+    impalad.service.wait_for_metric_value(
+        "impala-server.completed-queries.queued", 2, 60)
 
     assert impalad.service.get_metric_value(
         "impala-server.completed-queries.max-records-writes") == 1
@@ -381,10 +356,9 @@ class TestQueryLogTableBeeswax(TestQueryLogTableBase):
                                                   
"--blacklisted_dbs=information_schema",
                                     impalad_graceful_shutdown=True)
   def test_query_log_table_different_table(self, vector):
-    """Asserts that queries that have completed but are not yet written to the 
query
-       log table are flushed to the table before the coordinator exits."""
+    """Asserts that the completed queries table can be renamed."""
 
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
 
     try:
       res = client.execute("show tables in {0}".format(self.WM_DB))
@@ -401,7 +375,66 @@ class TestQueryLogTableBeeswax(TestQueryLogTableBase):
           .format(self.OTHER_TBL, self.DB)
     finally:
       client.execute("drop table {0}.{1} purge".format(self.WM_DB, 
self.OTHER_TBL))
-      client.close()
+
+  @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
+                                                 
"--query_log_write_interval_s=1 "
+                                                 "--shutdown_grace_period_s=10 
"
+                                                 "--shutdown_deadline_s=60",
+                                    cluster_size=3,
+                                    num_exclusive_coordinators=2,
+                                    catalogd_args="--enable_workload_mgmt",
+                                    impalad_graceful_shutdown=False)
+  def test_query_log_table_query_select_dedicate_coordinator(self, vector):
+    """Asserts the values written to the query log table match the values from 
the
+       query profile when dedicated coordinators are used."""
+    client = self.get_client(vector.get_value('protocol'))
+    test_sql = "select * from functional.tinytable"
+
+    # Select all rows from the test table.
+    res = client.execute(test_sql, fetch_profile_after_close=True)
+    assert res.success
+
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 1, 60)
+
+    client2 = self.create_client_for_nth_impalad(1, 
vector.get_value('protocol'))
+    try:
+      assert client2 is not None
+      assert_query(self.QUERY_TBL, client2, "",
+          res.runtime_profile)
+    finally:
+      client2.close()
+
+  @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
+                                                 
"--query_log_write_interval_s=1 "
+                                                 "--shutdown_grace_period_s=10 
"
+                                                 "--shutdown_deadline_s=60",
+                                    cluster_size=3,
+                                    num_exclusive_coordinators=2,
+                                    catalogd_args="--enable_workload_mgmt",
+                                    impalad_graceful_shutdown=False)
+  def test_query_log_table_query_select_mt_dop(self, vector):
+    """Asserts the values written to the query log table match the values from 
the
+       query profile when dedicated coordinators are used along with an MT_DOP 
setting
+       greater than 0."""
+    client = self.get_client(vector.get_value('protocol'))
+    test_sql = "select * from functional.tinytable"
+
+    # Select all rows from the test table.
+    client.set_configuration_option("MT_DOP", "4")
+    res = client.execute(test_sql, fetch_profile_after_close=True)
+    assert res.success
+
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 1, 60)
+
+    client2 = self.create_client_for_nth_impalad(1, 
vector.get_value('protocol'))
+    try:
+      assert client2 is not None
+      assert_query(self.QUERY_TBL, client2, "",
+          res.runtime_profile)
+    finally:
+      client2.close()
 
 
 class TestQueryLogTableHS2(TestQueryLogTableBase):
@@ -425,82 +458,25 @@ class TestQueryLogTableHS2(TestQueryLogTableBase):
   def test_query_log_table_query_multiple(self, vector):
     """Asserts the values written to the query log table match the values from 
the
        query profile for a query that reads from multiple tables."""
-    tbl_name = "default.test_query_log_" + str(int(time()))
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
+
+    # Select all rows from the test table.
+    client.set_configuration_option("MAX_MEM_ESTIMATE_FOR_ADMISSION", "10MB")
+    res = client.execute("select a.zip,a.income,b.timezone,c.timezone from "
+        "functional.zipcode_incomes a inner join functional.zipcode_timezones 
b on "
+        "a.zip = b.zip inner join functional.alltimezones c on b.timezone = 
c.timezone",
+        fetch_profile_after_close=True)
+    assert res.success
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 1, 60)
 
+    client2 = self.create_client_for_nth_impalad(1, 
vector.get_value('protocol'))
     try:
-      # Create the first test table.
-      create_tbl_sql = "create table {0}_products (id INT, product_name 
STRING)" \
-          .format(tbl_name)
-      create_tbl_results = client.execute(create_tbl_sql)
-      assert create_tbl_results.success
-
-      # Insert some rows into the test products table.
-      insert_sql = "insert into {0}_products (id,product_name) VALUES 
".format(tbl_name)
-      for i in range(1, 11):
-        for j in range(1, 11):
-          if i * j > 1:
-            insert_sql += ","
-
-          random_product_name = "".join(choice(string.ascii_letters) for _ in 
range(10))
-          insert_sql += "({0},'{1}')".format((i * j), random_product_name)
-
-      insert_results = client.execute(insert_sql)
-      assert insert_results.success
-
-      # Create the second test table.
-      create_tbl_sql = "create table {0}_customers (id INT, name STRING) " \
-          .format(tbl_name)
-      create_tbl_results = client.execute(create_tbl_sql)
-      assert create_tbl_results.success
-
-      # Insert rows into the test customers table.
-      insert_sql = "insert into {0}_customers (id,name) VALUES 
".format(tbl_name)
-      for i in range(1, 11):
-        if i > 1:
-          insert_sql += ","
-        rand_cust_name = "".join(choice(string.ascii_letters) for _ in 
range(10))
-        insert_sql += "({0},'{1}')".format(i, rand_cust_name)
-
-      insert_results = client.execute(insert_sql)
-      assert insert_results.success
-
-      # Create the third test table.
-      create_tbl_sql = "create table {0}_sales (id INT, product_id INT, " \
-          "customer_id INT) ".format(tbl_name)
-      create_tbl_results = client.execute(create_tbl_sql)
-      assert create_tbl_results.success
-
-      # Insert rows into the test sales table.
-      insert_sql = "insert into {0}_sales (id, product_id, customer_id) VALUES 
" \
-          .format(tbl_name)
-      for i in range(1, 1001):
-        if i != 1:
-          insert_sql += ","
-        insert_sql += "({0},{1},{2})".format(i * j, randint(1, 100), 
randint(1, 10))
-
-      insert_results = client.execute(insert_sql)
-      assert insert_results.success
-
-      # Select all rows from the test table.
-      client.set_configuration_option("MAX_MEM_ESTIMATE_FOR_ADMISSION", "10MB")
-      res = client.execute("select s.id, p.product_name, c.name from {0}_sales 
s "
-          "inner join {0}_products p on s.product_id=p.id "
-          "inner join {0}_customers c on s.customer_id=c.id".format(tbl_name),
-          fetch_profile_after_close=True)
-      assert res.success
-      self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", 7, 60)
-
-      client2 = self.create_client_for_nth_impalad(1, 
vector.get_value('protocol'))
       assert client2 is not None
       assert_query(self.QUERY_TBL, client2, "test_query_hist_mult", 
res.runtime_profile,
           max_mem_for_admission=10485760)
     finally:
-      client.execute("drop table if exists {0}_sales".format(tbl_name))
-      client.execute("drop table if exists {0}_customers".format(tbl_name))
-      client.execute("drop table if exists {0}_products".format(tbl_name))
-      client.close()
+      client2.close()
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=1 "
@@ -509,49 +485,33 @@ class TestQueryLogTableHS2(TestQueryLogTableBase):
                                                  "--shutdown_deadline_s=60",
                                     catalogd_args="--enable_workload_mgmt",
                                     impalad_graceful_shutdown=True)
-  def test_query_log_table_query_insert_select(self, vector):
+  def test_query_log_table_query_insert_select(self, vector, unique_database,
+      unique_name):
     """Asserts the values written to the query log table match the values from 
the
        query profile for a query that insert selects."""
-    tbl_name = "default.test_query_log_insert_select" + str(int(time()))
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
-
-    try:
-      # Create the source test table.
-      assert client.execute("create table {0}_source (id INT, product_name 
STRING) "
-          .format(tbl_name)).success, "could not create source table"
-
-      # Insert some rows into the test table.
-      insert_sql = "insert into {0}_source (id,product_name) VALUES " \
-          .format(tbl_name)
-      for i in range(1, 100):
-        if i > 1:
-          insert_sql += ","
-
-        random_product_name = "".join(choice(string.ascii_letters)
-          for _ in range(10))
-        insert_sql += "({0},'{1}')".format(i, random_product_name)
+    tbl_name = "{0}.{1}".format(unique_database, unique_name)
+    client = self.get_client(vector.get_value('protocol'))
 
-      assert client.execute(insert_sql).success, "could not insert rows"
+    # Create the destination test table.
+    assert client.execute("create table {0} (identifier INT, product_name 
STRING) "
+        .format(tbl_name)).success, "could not create source table"
 
-      # Create the destination test table.
-      assert client.execute("create table {0}_dest (id INT, product_name 
STRING) "
-          .format(tbl_name)).success, "could not create destination table"
+    # Insert select into the destination table.
+    res = client.execute("insert into {0} (identifier, product_name) select 
id, "
+        "string_col from functional.alltypes limit 50".format(tbl_name),
+        fetch_profile_after_close=True)
+    assert res.success, "could not insert select"
 
-      # Insert select from the source table to the destination table.
-      res = client.execute("insert into {0}_dest (id, product_name) select id, 
"
-          "product_name from {0}_source".format(tbl_name), 
fetch_profile_after_close=True)
-      assert res.success, "could not insert select"
-
-      self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", 4, 60)
+    # Include the two queries run by the unique_database fixture setup.
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 4, 60)
 
-      client2 = self.create_client_for_nth_impalad(2, 
vector.get_value('protocol'))
+    client2 = self.create_client_for_nth_impalad(2, 
vector.get_value('protocol'))
+    try:
       assert client2 is not None
       assert_query(self.QUERY_TBL, client2, "test_query_hist_3", 
res.runtime_profile)
     finally:
-      client.execute("drop table if exists {0}_source".format(tbl_name))
-      client.execute("drop table if exists {0}_dest".format(tbl_name))
-      client.close()
+      client2.close()
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=15 "
@@ -563,21 +523,18 @@ class TestQueryLogTableHS2(TestQueryLogTableBase):
     """Asserts that queries that have completed are written to the query log 
table
        after the specified write interval elapses."""
 
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
 
-    try:
-      query_count = 10
+    query_count = 10
 
-      for i in range(query_count):
-        res = client.execute("select sleep(1000)")
-        assert res.success
+    for i in range(query_count):
+      res = client.execute("select sleep(1000)")
+      assert res.success
 
-      # At least 10 seconds have already elapsed, wait up to 10 more seconds 
for the
-      # queries to be written to the completed queries table.
-      self.cluster.get_first_impalad().service.wait_for_metric_value(
-        "impala-server.completed-queries.written", query_count, 10)
-    finally:
-      client.close()
+    # At least 10 seconds have already elapsed, wait up to 10 more seconds for 
the
+    # queries to be written to the completed queries table.
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+      "impala-server.completed-queries.written", query_count, 10)
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=9999 "
@@ -590,7 +547,7 @@ class TestQueryLogTableHS2(TestQueryLogTableBase):
        log table are flushed to the table before the coordinator exits."""
 
     impalad = self.cluster.get_first_impalad()
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
 
     try:
       # Execute sql statements to ensure all get written to the query log 
table.
@@ -623,7 +580,6 @@ class TestQueryLogTableHS2(TestQueryLogTableBase):
 
       assert retry(func=assert_func, max_attempts=5, sleep_time_s=5)
     finally:
-      client.close()
       client2.close()
 
 
@@ -631,8 +587,6 @@ class TestQueryLogTableAll(TestQueryLogTableBase):
   """Tests to assert the query log table is correctly populated when using all 
the
      client protocols."""
 
-  SCRATCH_DIR = tempfile.mkdtemp(prefix="scratch_dir")
-
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=1 "
                                                  
"--cluster_id=test_query_hist_2 "
@@ -640,27 +594,26 @@ class TestQueryLogTableAll(TestQueryLogTableBase):
                                                  "--shutdown_deadline_s=60",
                                     catalogd_args="--enable_workload_mgmt",
                                     impalad_graceful_shutdown=True)
-  def test_query_log_table_ddl(self, vector):
+  def test_query_log_table_ddl(self, vector, unique_database, unique_name):
     """Asserts the values written to the query log table match the values from 
the
        query profile for a DDL query."""
-    tbl_name = "default.test_query_log_ddl_" + str(int(time()))
-    create_tbl_sql = "create table {0} (id INT, product_name STRING) " \
-        "partitioned by (category INT)".format(tbl_name)
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    create_tbl_sql = "create table {0}.{1} (id INT, product_name STRING) " \
+        "partitioned by (category INT)".format(unique_database, unique_name)
+    client = self.get_client(vector.get_value('protocol'))
 
-    try:
-      res = client.execute(create_tbl_sql, fetch_profile_after_close=True)
-      assert res.success
+    res = client.execute(create_tbl_sql, fetch_profile_after_close=True)
+    assert res.success
 
-      self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", 1, 60)
+    # Include the two queries run by the unique_database fixture setup.
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 3, 60)
 
-      client2 = self.create_client_for_nth_impalad(2, 
vector.get_value('protocol'))
+    client2 = self.create_client_for_nth_impalad(2, 
vector.get_value('protocol'))
+    try:
       assert client2 is not None
       assert_query(self.QUERY_TBL, client2, "test_query_hist_2", 
res.runtime_profile)
     finally:
-      client.execute("drop table if exists {0}".format(tbl_name))
-      client.close()
+      client2.close()
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=1 "
@@ -669,13 +622,11 @@ class TestQueryLogTableAll(TestQueryLogTableBase):
                                                  "--shutdown_deadline_s=60",
                                     catalogd_args="--enable_workload_mgmt",
                                     impalad_graceful_shutdown=True)
-  def test_query_log_table_dml(self, vector):
+  def test_query_log_table_dml(self, vector, unique_database, unique_name):
     """Asserts the values written to the query log table match the values from 
the
        query profile for a DML query."""
-    tbl_name = "default.test_query_log_dml_" + str(int(time()))
-    create_tbl_sql = "create table {0} (id INT, product_name STRING) " \
-        "partitioned by (category INT)".format(tbl_name)
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    tbl_name = "{0}.{1}".format(unique_database, unique_name)
+    client = self.get_client(vector.get_value('protocol'))
 
     try:
       # Create the test table.
@@ -689,104 +640,18 @@ class TestQueryLogTableAll(TestQueryLogTableBase):
       res = client.execute(insert_sql, fetch_profile_after_close=True)
       assert res.success
 
+      # Include the two queries run by the unique_database fixture setup.
       self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", 2, 60)
-
-      client2 = self.create_client_for_nth_impalad(2, 
vector.get_value('protocol'))
-      assert client2 is not None
-      assert_query(self.QUERY_TBL, client2, "test_query_hist_3", 
res.runtime_profile)
-    finally:
-      client.execute("drop table if exists {0}".format(tbl_name))
-      client.close()
-
-  @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
-                                                 
"--query_log_write_interval_s=1 "
-                                                 
"--cluster_id=test_query_hist_1 "
-                                                 "--shutdown_grace_period_s=10 
"
-                                                 "--shutdown_deadline_s=60 "
-                                                 "--scratch_dirs={0}:5G"
-                                                 .format(SCRATCH_DIR),
-                                    catalogd_args="--enable_workload_mgmt",
-                                    impalad_graceful_shutdown=True)
-  @pytest.mark.parametrize("buffer_pool_limit", [(None), ("16.05MB")])
-  def test_query_log_table_query_select(self, vector, buffer_pool_limit):
-    """Asserts the values written to the query log table match the values from 
the
-       query profile. If the buffer_pool_limit parameter is not None, then 
this test
-       requires that the query spills to disk to assert that the spill metrics 
are correct
-       in the completed queries table."""
-    tbl_name = "default.test_query_log_" + str(int(time()))
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
-    query_cnt = 0
-
-    try:
-      # Create the test table.
-      create_tbl_sql = "create table {0} (id INT, product_name STRING, 
create_dt STRING" \
-          ",descr STRING) partitioned by (category INT)".format(tbl_name)
-      print("CREATE TABLE SQL: {0}".format(create_tbl_sql))
-      create_tbl_results = client.execute(create_tbl_sql)
-      assert create_tbl_results.success
-      query_cnt += 1
-
-      # Insert some rows into the test table.
-      def __run_insert(values):
-        insert_results = client.execute("insert into {0} 
(id,category,product_name,"
-        "create_dt,descr) VALUES ({1})".format(tbl_name, values))
-        assert insert_results.success
-
-      # When buffer pool limit is not None, the test is forcing the query to 
spill. Thus,
-      # a large number of records is needed to force the spilling.
-      record_count_to_insert = 99
-      if buffer_pool_limit is not None:
-        record_count_to_insert = 24999
-
-      insert_vals = ""
-      for i in range(1, record_count_to_insert):
-        random_product_name = "".join(choice(string.ascii_letters) for _ in 
range(100))
-        random_dt = "{:}-{:0>2}-{:0>2}".format(randint(1982, 2022), randint(1, 
12),
-            randint(1, 31))
-        random_desc = "".join(choice(string.ascii_letters) for _ in 
range(1000))
-        insert_vals += "({0},{1},'{2}','{3}','{4}'),".format(i, (i % 50),
-           random_product_name, random_dt, random_desc)
-
-        if i % 500 == 0:
-          __run_insert(insert_vals[:-1])
-          query_cnt += 1
-          insert_vals = ""
-
-      __run_insert(insert_vals[:-1])
-      query_cnt += 1
-
-      # Set up query configuration
-      client.set_configuration_option("MAX_MEM_ESTIMATE_FOR_ADMISSION", "10MB")
-      if buffer_pool_limit is not None:
-        client.set_configuration_option("BUFFER_POOL_LIMIT", buffer_pool_limit)
-        client.set_configuration_option("SPOOL_QUERY_RESULTS", "TRUE")
-
-      # Select all rows from the test table.
-      res = client.execute("select * from {0} order by 
create_dt".format(tbl_name),
-          fetch_profile_after_close=True)
-      assert res.success
-      query_cnt += 1
-
-      self.cluster.get_first_impalad().service.wait_for_metric_value(
-          "impala-server.completed-queries.written", query_cnt, 60)
+          "impala-server.completed-queries.written", 4, 60)
 
       client2 = self.create_client_for_nth_impalad(2, 
vector.get_value('protocol'))
-      assert client2 is not None
-      data = assert_query(self.QUERY_TBL, client2, "test_query_hist_1",
-          res.runtime_profile, max_mem_for_admission=10485760)
-
-      if buffer_pool_limit is not None:
-        # Since the assert_query function only asserts that the compressed 
bytes spilled
-        # column is equal to the compressed bytes spilled in the profile, 
there is a
-        # potential for this test to not actually assert anything different 
than other
-        # tests. Thus, an additional assert is needed to ensure that there 
actually was
-        # data that was spilled.
-        assert data[COMPRESSED_BYTES_SPILLED] != "0", "compressed bytes 
spilled total " \
-            "was zero, test did not assert anything"
+      try:
+        assert client2 is not None
+        assert_query(self.QUERY_TBL, client2, "test_query_hist_3", 
res.runtime_profile)
+      finally:
+        client2.close()
     finally:
       client.execute("drop table if exists {0}".format(tbl_name))
-      client.close()
 
   @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
                                                  
"--query_log_write_interval_s=1 "
@@ -798,7 +663,7 @@ class TestQueryLogTableAll(TestQueryLogTableBase):
   def test_query_log_table_invalid_query(self, vector):
     """Asserts correct values are written to the completed queries table for a 
failed
        query. The query profile is used as the source of expected values."""
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
 
     # Assert an invalid query
     unix_now = time()
@@ -830,7 +695,7 @@ class TestQueryLogTableAll(TestQueryLogTableBase):
                                     impalad_graceful_shutdown=True)
   def test_query_log_ignored_sqls(self, vector):
     """Asserts that expected queries are not written to the query log table."""
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
+    client = self.get_client(vector.get_value('protocol'))
 
     sqls = {}
     sqls["use default"] = False
@@ -873,38 +738,39 @@ class TestQueryLogTableAll(TestQueryLogTableBase):
     sqls["select 1"] = True
 
     control_queries_count = 0
-    try:
-      for sql, experiment_control in sqls.items():
-        results = client.execute(sql)
-        assert results.success, "could not execute query '{0}'".format(sql)
-        sqls[sql] = results.query_id
-
-        # Ensure at least one sql statement was written to the completed 
queries table
-        # to avoid false negatives where the sql statements that are ignored 
are not
-        # written to the completed queries table because of another issue.
-        if experiment_control:
+    for sql, experiment_control in sqls.items():
+      results = client.execute(sql)
+      assert results.success, "could not execute query '{0}'".format(sql)
+      sqls[sql] = results.query_id
+
+      # Ensure at least one sql statement was written to the completed queries 
table
+      # to avoid false negatives where the sql statements that are ignored are 
not
+      # written to the completed queries table because of another issue. Does 
not check
+      # the completed-queries.written metric because, if another query that 
should not
+      # have been written to the completed queries was actually written, the 
metric will
+      # be wrong.
+      if experiment_control:
+        control_queries_count += 1
+        sql_results = None
+        for _ in range(6):
+          sql_results = client.execute("select * from {0} where 
query_id='{1}'".format(
+            self.QUERY_TBL, results.query_id))
           control_queries_count += 1
-          sql_results = None
-          for _ in range(6):
-            sql_results = client.execute("select * from {0} where 
query_id='{1}'".format(
-              self.QUERY_TBL, results.query_id))
-            control_queries_count += 1
-            if sql_results.success and len(sql_results.data) == 1:
-              break
-            else:
-              sleep(5)
-          assert sql_results.success
-          assert len(sql_results.data) == 1, "query not found in completed 
queries table"
-          sqls.pop(sql)
-
-      for sql, query_id in sqls.items():
-        log_results = client.execute("select * from {0} where query_id='{1}'"
-                                     .format(self.QUERY_TBL, query_id))
-        assert log_results.success
-        assert len(log_results.data) == 0, "found query in query log table: 
{0}" \
-                                               .format(sql)
-    finally:
-      client.close()
+          if sql_results.success and len(sql_results.data) == 1:
+            break
+          else:
+            # The query is not yet available in the completed queries table, 
wait before
+            # checking again.
+            sleep(5)
+        assert sql_results.success
+        assert len(sql_results.data) == 1, "query not found in completed 
queries table"
+        sqls.pop(sql)
+
+    for sql, query_id in sqls.items():
+      log_results = client.execute("select * from {0} where query_id='{1}'"
+                                    .format(self.QUERY_TBL, query_id))
+      assert log_results.success
+      assert len(log_results.data) == 0, "found query in query log table: 
{0}".format(sql)
 
     # Assert there was one query per sql item written to the query log table. 
The queries
     # inserted into the completed queries table are the queries used to assert 
the ignored
@@ -921,51 +787,28 @@ class TestQueryLogTableAll(TestQueryLogTableBase):
                                     catalogd_args="--enable_workload_mgmt",
                                     impalad_graceful_shutdown=True)
   def test_query_log_table_sql_injection(self, vector):
-    tbl_name = "default.test_query_log_sql_injection_" + str(int(time()))
-    client = self.create_impala_client(protocol=vector.get_value('protocol'))
-
-    try:
-      # Create the test table.
-      create_tbl_sql = "create table {0} (id INT, product_name STRING) " \
-        "partitioned by (category INT)".format(tbl_name)
-      create_tbl_results = client.execute(create_tbl_sql)
-      assert create_tbl_results.success
-
-      # Insert some rows into the test table.
-      insert_sql = "insert into {0} (id,category,product_name) VALUES 
".format(tbl_name)
-      for i in range(1, 11):
-        for j in range(1, 11):
-          if i * j > 1:
-            insert_sql += ","
-
-          insert_sql += "({0},{1},'{2}')".format((i * j), i,
-                                                  "product-{0}-{1}".format(i, 
j))
-
-      insert_results = client.execute(insert_sql)
-      assert insert_results.success
+    client = self.get_client(vector.get_value('protocol'))
+    impalad = self.cluster.get_first_impalad()
 
-      impalad = self.cluster.get_first_impalad()
+    # Try a sql injection attack with closing double quotes.
+    sql1_str = "select * from functional.alltypes where 
string_col='product-2-3\"'"
+    self.__run_sql_inject(impalad, client, sql1_str, "closing quotes", 1)
 
-      # Try a sql injection attack with closing quotes.
-      sql1_str = "select * from {0} where 
product_name='product-2-3'".format(tbl_name)
-      self.__run_sql_inject(impalad, client, sql1_str, "closing quotes", 3)
+    # Try a sql injection attack with closing single quotes.
+    sql1_str = "select * from functional.alltypes where 
string_col=\"product-2-3'\""
+    self.__run_sql_inject(impalad, client, sql1_str, "closing quotes", 4)
 
-      # Try a sql inject attack with terminating quote and semicolon.
-      sql2_str = "select 1'); drop table {0}; select('" \
-                 .format(self.QUERY_TBL)
-      self.__run_sql_inject(impalad, client, sql2_str, "terminating 
semicolon", 6)
+    # Try a sql inject attack with terminating quote and semicolon.
+    sql2_str = "select 1'); drop table {0}; select('".format(self.QUERY_TBL)
+    self.__run_sql_inject(impalad, client, sql2_str, "terminating semicolon", 
7)
 
-      # Attempt to cause an error using multiline comments.
-      sql3_str = "select 1' /* foo"
-      self.__run_sql_inject(impalad, client, sql3_str, "multiline comments", 
9, False)
+    # Attempt to cause an error using multiline comments.
+    sql3_str = "select 1' /* foo"
+    self.__run_sql_inject(impalad, client, sql3_str, "multiline comments", 10, 
False)
 
-      # Attempt to cause an error using single line comments.
-      sql4_str = "select 1' -- foo"
-      self.__run_sql_inject(impalad, client, sql4_str, "single line comments", 
12, False)
-
-    finally:
-      client.execute("drop table if exists {0}".format(tbl_name))
-      client.close()
+    # Attempt to cause an error using single line comments.
+    sql4_str = "select 1' -- foo"
+    self.__run_sql_inject(impalad, client, sql4_str, "single line comments", 
13, False)
 
   def __run_sql_inject(self, impalad, client, sql, test_case, expected_writes,
                        expect_success=True):
@@ -1005,3 +848,69 @@ class TestQueryLogTableAll(TestQueryLogTableBase):
       assert len(sql_verify.data) == 1, "did not find query '{0}' in query log 
" \
                                         "table for test case '{1}" \
                                         .format(esc_sql, test_case)
+
+
+class TestQueryLogTableBufferPool(TestQueryLogTableBase):
+  """Base class for all query log tests that set the buffer pool query 
option."""
+
+  SCRATCH_DIR = tempfile.mkdtemp(prefix="scratch_dir")
+
+  @classmethod
+  def add_test_dimensions(cls):
+    super(TestQueryLogTableBufferPool, cls).add_test_dimensions()
+    cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('buffer_pool_limit',
+        None, "14.97MB"))
+
+  @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt "
+                                                 
"--query_log_write_interval_s=1 "
+                                                 
"--cluster_id=test_query_hist_1 "
+                                                 "--shutdown_grace_period_s=10 
"
+                                                 "--shutdown_deadline_s=60 "
+                                                 "--scratch_dirs={0}:5G"
+                                                 .format(SCRATCH_DIR),
+                                    catalogd_args="--enable_workload_mgmt",
+                                    impalad_graceful_shutdown=True)
+  def test_query_log_table_query_select(self, vector):
+    """Asserts the values written to the query log table match the values from 
the
+       query profile. If the buffer_pool_limit parameter is not None, then 
this test
+       requires that the query spills to disk to assert that the spill metrics 
are correct
+       in the completed queries table."""
+    buffer_pool_limit = vector.get_value('buffer_pool_limit')
+    client = self.get_client(vector.get_value('protocol'))
+    test_sql = "select * from functional.tinytable"
+
+    # When buffer pool limit is not None, the test is forcing the query to 
spill. Thus,
+    # a large number of records is needed to force the spilling.
+    if buffer_pool_limit is not None:
+      test_sql = "select a.*,b.*,c.* from " \
+        "functional.zipcode_incomes a inner join functional.zipcode_timezones 
b on " \
+        "a.zip = b.zip inner join functional.alltimezones c on b.timezone = 
c.timezone"
+
+    # Set up query configuration
+    client.set_configuration_option("MAX_MEM_ESTIMATE_FOR_ADMISSION", "10MB")
+    if buffer_pool_limit is not None:
+      client.set_configuration_option("BUFFER_POOL_LIMIT", buffer_pool_limit)
+
+    # Select all rows from the test table.
+    res = client.execute(test_sql, fetch_profile_after_close=True)
+    assert res.success
+
+    self.cluster.get_first_impalad().service.wait_for_metric_value(
+        "impala-server.completed-queries.written", 1, 60)
+
+    client2 = self.create_client_for_nth_impalad(2, 
vector.get_value('protocol'))
+    try:
+      assert client2 is not None
+      data = assert_query(self.QUERY_TBL, client2, "test_query_hist_1",
+          res.runtime_profile, max_mem_for_admission=10485760)
+    finally:
+      client2.close()
+
+    if buffer_pool_limit is not None:
+      # Since the assert_query function only asserts that the compressed bytes 
spilled
+      # column is equal to the compressed bytes spilled in the profile, there 
is a
+      # potential for this test to not actually assert anything different than 
other
+      # tests. Thus, an additional assert is needed to ensure that there 
actually was
+      # data that was spilled.
+      assert data[COMPRESSED_BYTES_SPILLED] != "0", "compressed bytes spilled 
total " \
+          "was zero, test did not assert anything"
diff --git a/tests/custom_cluster/test_sys_db.py 
b/tests/custom_cluster/test_sys_db.py
new file mode 100644
index 000000000..a9d8f6833
--- /dev/null
+++ b/tests/custom_cluster/test_sys_db.py
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import, division, print_function
+
+from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
+from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
+from tests.common.test_dimensions import create_single_exec_option_dimension
+
+
+class TestSysDb(CustomClusterTestSuite):
+  """Tests that are specific to the 'sys' database."""
+
+  SYS_DB_NAME = "sys"
+
+  @classmethod
+  def add_test_dimensions(cls):
+    super(TestSysDb, cls).add_test_dimensions()
+    cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
+
+  @CustomClusterTestSuite.with_args()
+  def test_query_log_table_create_sys_db_blocked(self, vector):
+    """Asserts that the sys db cannot be created."""
+
+    try:
+      self.client.execute("create database {0}".format(self.SYS_DB_NAME))
+      assert False, "database '{0}' should have failed to create but was 
created" \
+          .format(self.SYS_DB_NAME)
+    except ImpalaBeeswaxException as e:
+      assert "Invalid db name: {0}. It has been blacklisted using 
--blacklisted_dbs" \
+          .format(self.SYS_DB_NAME) in str(e), "database '{0}' failed to 
create but " \
+          "for the wrong reason".format(self.SYS_DB_NAME)
+
+  @CustomClusterTestSuite.with_args(impalad_args="--enable_workload_mgmt",
+                                    catalogd_args="--enable_workload_mgmt")
+  def test_query_log_table_create_table_sys_db_blocked(self, vector):
+    """Asserts that no other tables can be created in the sys db."""
+
+    table_name = "{0}.should_not_create".format(self.SYS_DB_NAME)
+
+    try:
+      self.client.execute("create table {0} (id STRING)".format(table_name))
+      assert False, "table '{0}' should have failed to create but was created" 
\
+          .format(table_name)
+    except ImpalaBeeswaxException as e:
+      assert "Query aborted:IllegalStateException: Can't create blacklisted 
table: {0}" \
+          .format(table_name) in str(e), "table '{0}' failed to create but for 
the " \
+          "wrong reason".format(table_name)
diff --git a/tests/util/workload_management.py 
b/tests/util/workload_management.py
index 0d6827baf..85e4797b5 100644
--- a/tests/util/workload_management.py
+++ b/tests/util/workload_management.py
@@ -297,17 +297,23 @@ def assert_query(query_tbl, client, expected_cluster_id, 
raw_profile=None, impal
   index += 1
   assert sql_results.column_labels[index] == PER_HOST_MEM_ESTIMATE
   ret_data[PER_HOST_MEM_ESTIMATE] = data[index]
-  if query_state_value == "EXCEPTION":
+  if query_state_value == "EXCEPTION" or query_type == "DDL":
     assert data[index] == "0", "per-host memory estimate incorrect"
   else:
-    if query_type != "DDL":
+    # First check the Estimated Per-Host Mem from the query profile. This 
value may not
+    # match though because certain query options can cause this value to 
diverge from the
+    # per-host memory estimate stored in the query history table.
+    est_perhost_mem = re.search(r'\n\s+Estimated Per-Host Mem:\s+(\d+)\n', 
profile_text)
+    assert est_perhost_mem is not None
+    if est_perhost_mem.group(1) != data[index]:
+      # The profile and db values diverged, use the Per-Host Resource 
Estimates field from
+      # the query profile as the expected value. Since query profile value is 
an estimate,
+      # it's not as good to use, but it's all we have available.
       perhost_mem_est = re.search(r'\nPer-Host Resource 
Estimates:\s+Memory\=(.*?)\n',
           profile_text)
       assert perhost_mem_est is not None
       assert_byte_str(expected_str=perhost_mem_est.group(1), 
actual_bytes=data[index],
           msg="per-host memory estimate incorrect", unit_combined=True)
-    else:
-      assert data[index] == "0", "per-host memory estimate not 0"
 
   # Dedicated Coordinator Memory Estimate
   # This value is different because it is the minimum of the query option

Reply via email to