hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From h...@apache.org
Subject [1/3] incubator-hawq git commit: HAWQ-86. Fix and re-enable unit test for pxf and external storage
Date Mon, 02 Nov 2015 06:13:57 GMT
Repository: incubator-hawq
Updated Branches:
  refs/heads/master 6d743f10a -> 7c2f615d2


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7c2f615d/src/backend/access/external/test_discard/hd_work_mgr_distribute_work_2_gp_segments_test.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test_discard/hd_work_mgr_distribute_work_2_gp_segments_test.c b/src/backend/access/external/test_discard/hd_work_mgr_distribute_work_2_gp_segments_test.c
deleted file mode 100644
index 2518604..0000000
--- a/src/backend/access/external/test_discard/hd_work_mgr_distribute_work_2_gp_segments_test.c
+++ /dev/null
@@ -1,621 +0,0 @@
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include "cmockery.h"
-
-#include "c.h"
-#include "hd_work_mgr_mock.h"
-
-
-/*
- * In test__distribute_work_2_gp_segments we are testing the function distribute_work_2_gp_segments().
- * distribute_work_2_gp_segments() implements the algorithm that allocates the fragments of an external   
- * data source to the Hawq segments for processing.
- * This unitest verifies the algorithm output, and ensures the following algorithm behaviour:
- * a. The number of fragments allocated is equal to the input number of fragments
- * b. distribution of  work between segments: if there are two segments required and more than 
- *    one host, each segment will be on a different host
- * c. percent of local datanodes out of all datanodes
- * d. percent of local fragments out of all fragments
- * e. number of actual working segments is bigger than half of the initial working segments
- */
-
-static void print_allocated_fragments(List **allocated_fragments, int total_segs);
-static char* print_one_allocated_data_fragment(AllocatedDataFragment *frag, int seg_index);
-static char* find_segment_ip_by_index(int seg_index);
-static char** create_cluster(int num_hosts);
-static char** clean_cluster(char** cluster, int num_hosts);
-static char** create_array_of_segs(char **cluster, int num_hosts, int num_segments_on_host);
-static void clean_array_of_segs(char **array_of_segs, int number_of_segments);
-static bool* create_array_of_primaries(int number_of_segments);
-static char** print_cluster(char** cluster, int num_hosts);
-static void print_segments_list();
-void clean_allocated_fragments(List **allocated_fragments, int total_segs);
-static void validate_total_fragments_allocated(List **allocated_fragments, int total_segs, int input_total_fragments);
-static void validate_max_load_per_segment(List **allocated_fragments, int total_segs, int working_segs, int input_total_fragments);
-static int calc_load_per_segment(int input_total_fragments, int working_segs);
-static void validate_all_working_segments_engagement(List **allocated_fragments, 
-													 int total_segs, 
-													 int working_segs, 
-													 int input_total_fragments,
-													 int num_hosts_in_cluster);
-static bool is_host_uniq(List** ips_list, char* ip);
-static List* spread_fragments_in_cluster(int number_of_fragments, 
-								  int number_of_hosts, 
-								  int replication_factor, 
-								  char **cluster,
-								  int cluster_size);
-
-/* test input data*/
-typedef struct sTestInputData
-{
-	int m_num_hosts_in_cluster; /* cluster size mustn't exceed 65025 - see function create_cluster() */
-	int m_num_data_fragments; /* number of fragments in the data we intend to allocate between the hawq segments */
-	/* 
-	 * number of datanodes that hold the 'querried' data - there is one datanode 
-	 * on each cluster host - so there are <num_hosts_in_cluster> datanodes 
-	 */
-	int m_num_active_data_nodes; 
-	int m_num_of_fragment_replicas;
-	int m_num_segments_on_host;/* number of Hawq segments on each cluster host - we assume all cluster hosts have Hawq segments installed */
-	/* 
-	 * the subset of Hawq segments that will do the processing  - not all the Hawqs segments 
-	 * in the cluster are involved.
-	 * This parameter plays the role of max_participants_allowed that is passed to map_hddata_2gp_segments()
-	 * in createplan.c
-	 */
-	int m_num_working_segs; 
-	bool m_enable_print_input_cluster;
-	bool m_enable_print_input_fragments;
-	bool m_enable_print_input_segments;
-	bool m_enable_print_allocated_fragments;	
-} TestInputData;
-
-static void test__distribute_work_to_gp_segments(TestInputData *input);
-/*
- * TRACING CAPABILITIES
- * The unitest validates the behaviour of the SUT function distribute_work_2_gp_segments() using
- * the assert_XXX_... functions. But in order to understand the behaviour of the allocation algorithm
- * it can be helpful to look  at the various data structures involved. For this purpose we have 
- * several print functions:
- * a. print_cluster(...)
- * b. print_fragment_list(...)
- * c. print_segments_list(...)
- * d. print_allocated_fragments(...)
- * All these trace function have the output disabled by default. To enable the output of any print
- * function set the booleans enable_trace_... for the respective function
- * test__distribute_work_2_gp_segments()
- */
-
-
-void
-test__distribute_work_to_gp_segments__big_cluster_few_active_nodes(void **state)
-{
-	TestInputData *input = (TestInputData*)palloc0(sizeof(TestInputData));
-	
-	input->m_num_hosts_in_cluster = 1000; /* cluster size musn't exceed 65025 - see function create_cluster() */
-	input->m_num_data_fragments = 100; /* number of fragments in the data we intend to allocate between the hawq segments */
-	input->m_num_active_data_nodes = 10; /* number of datanodes that hold the 'querried' data - there one datanode om each cluster host - so there are <num_hosts_in_cluster> datanodes */
-	input->m_num_of_fragment_replicas = 3;
-	input->m_num_segments_on_host = 4;/* number of Hawq segments on each cluster host - we assume all cluster hosts have Hawq segments installed */
-	input->m_num_working_segs = 64; /* the subset of Hawq segments that will do the processing  - not all the Hawqs segments in the cluster are involved */
-	input->m_enable_print_input_cluster = false;
-	input->m_enable_print_input_fragments = false;
-	input->m_enable_print_input_segments = false;
-	input->m_enable_print_allocated_fragments = false;
-	
-	test__distribute_work_to_gp_segments(input);
-	pfree(input);
-}
-
-void
-test__distribute_work_to_gp_segments__big_cluster_many_active_nodes(void **state)
-{
-	TestInputData *input = (TestInputData*)palloc0(sizeof(TestInputData));
-	
-	input->m_num_hosts_in_cluster = 1000; /* cluster size musn't exceed 65025 - see function create_cluster() */
-	input->m_num_data_fragments = 100; /* number of fragments in the data we intend to allocate between the hawq segments */
-	input->m_num_active_data_nodes = 100; /* number of datanodes that hold the 'querried' data - there one datanode om each cluster host - so there are <num_hosts_in_cluster> datanodes */
-	input->m_num_of_fragment_replicas = 3;
-	input->m_num_segments_on_host = 4;/* number of Hawq segments on each cluster host - we assume all cluster hosts have Hawq segments installed */
-	input->m_num_working_segs = 64; /* the subset of Hawq segments that will do the processing  - not all the Hawqs segments in the cluster are involved */
-	input->m_enable_print_input_cluster = false;
-	input->m_enable_print_input_fragments = false;
-	input->m_enable_print_input_segments = false;
-	input->m_enable_print_allocated_fragments = false;
-	
-	test__distribute_work_to_gp_segments(input);
-	pfree(input);
-}
-
-void
-test__distribute_work_to_gp_segments__small_cluster(void **state)
-{
-	TestInputData *input = (TestInputData*)palloc0(sizeof(TestInputData));
-	
-	input->m_num_hosts_in_cluster = 100; /* cluster size musn't exceed 65025 - see function create_cluster() */
-	input->m_num_data_fragments = 100; /* number of fragments in the data we intend to allocate between the hawq segments */
-	input->m_num_active_data_nodes = 50; /* number of datanodes that hold the 'querried' data - there one datanode om each cluster host - so there are <num_hosts_in_cluster> datanodes */
-	input->m_num_of_fragment_replicas = 3;
-	input->m_num_segments_on_host = 4;/* number of Hawq segments on each cluster host - we assume all cluster hosts have Hawq segments installed */
-	input->m_num_working_segs = 64; /* the subset of Hawq segments that will do the processing  - not all the Hawqs segments in the cluster are involved */
-	input->m_enable_print_input_cluster = false;
-	input->m_enable_print_input_fragments = false;
-	input->m_enable_print_input_segments = false;
-	input->m_enable_print_allocated_fragments = false;
-	
-	test__distribute_work_to_gp_segments(input);
-	pfree(input);
-}
-
-void
-test__distribute_work_to_gp_segments__small_cluster_many_active_nodes(void **state)
-{
-	TestInputData *input = (TestInputData*)palloc0(sizeof(TestInputData));
-	
-	input->m_num_hosts_in_cluster = 100; /* cluster size musn't exceed 65025 - see function create_cluster() */
-	input->m_num_data_fragments = 100; /* number of fragments in the data we intend to allocate between the hawq segments */
-	input->m_num_active_data_nodes = 90; /* number of datanodes that hold the 'querried' data - there one datanode om each cluster host - so there are <num_hosts_in_cluster> datanodes */
-	input->m_num_of_fragment_replicas = 3;
-	input->m_num_segments_on_host = 4;/* number of Hawq segments on each cluster host - we assume all cluster hosts have Hawq segments installed */
-	input->m_num_working_segs = 64; /* the subset of Hawq segments that will do the processing  - not all the Hawqs segments in the cluster are involved */
-	input->m_enable_print_input_cluster = false;
-	input->m_enable_print_input_fragments = false;
-	input->m_enable_print_input_segments = false;
-	input->m_enable_print_allocated_fragments = false;
-	
-	test__distribute_work_to_gp_segments(input);
-	pfree(input);
-}
-
-void
-test__distribute_work_to_gp_segments__small_cluster_few_replicas(void **state)
-{
-	TestInputData *input = (TestInputData*)palloc0(sizeof(TestInputData));
-	
-	input->m_num_hosts_in_cluster = 100; /* cluster size musn't exceed 65025 - see function create_cluster() */
-	input->m_num_data_fragments = 100; /* number of fragments in the data we intend to allocate between the hawq segments */
-	input->m_num_active_data_nodes = 90; /* number of datanodes that hold the 'querried' data - there one datanode om each cluster host - so there are <num_hosts_in_cluster> datanodes */
-	input->m_num_of_fragment_replicas = 2;
-	input->m_num_segments_on_host = 4;/* number of Hawq segments on each cluster host - we assume all cluster hosts have Hawq segments installed */
-	input->m_num_working_segs = 64; /* the subset of Hawq segments that will do the processing  - not all the Hawqs segments in the cluster are involved */
-	input->m_enable_print_input_cluster = false;
-	input->m_enable_print_input_fragments = false;
-	input->m_enable_print_input_segments = false;
-	input->m_enable_print_allocated_fragments = false;
-	
-	test__distribute_work_to_gp_segments(input);
-	pfree(input);
-}
-
-/*
- * Testing distribute_work_2_gp_segments
- */
-static void test__distribute_work_to_gp_segments(TestInputData *input)
-{
-	List **segs_allocated_data = NULL;
-	List * input_fragments_list = NIL;
-	char** array_of_segs = NULL;
-	bool *array_of_primaries;
-	int total_segs;
-	bool cluster_size_not_exceeded = input->m_num_hosts_in_cluster <=  65025;
-	
-	assert_true(cluster_size_not_exceeded);
-	/*  
-	 * 1. Initialize the test input parameters
-	 * We are testing an N hosts cluster. The size of the cluster is set in this section - section 1. 
-	 * Basic test assumptions:
-	 * a. There is one datanode on each host in the cluster
-	 * b. There are Hawq segments on each host in the cluster.
-	 * c. There is an equal number of Hawq segments on each host - hardcoded in this section
-	 */
-	int num_hosts_in_cluster = input->m_num_hosts_in_cluster; /* cluster size musn't exceed 65025 - see function create_cluster() */
-	int num_data_fragments = input->m_num_data_fragments; /* number of fragments in the data we intend to allocate between the hawq segments */
-	int num_active_data_nodes = input->m_num_active_data_nodes; /* number of datanodes that hold the 'querried' data - there one datanode om each cluster host - so there are <num_hosts_in_cluster> datanodes */
-	int num_of_fragment_replicas = input->m_num_of_fragment_replicas;
-	int num_segments_on_host = input->m_num_segments_on_host;/* number of Hawq segments on each cluster host - we assume all cluster hosts have Hawq segments installed */
-	int num_working_segs = input->m_num_working_segs; /* the subset of Hawq segments that will do the processing  - not all the Hawqs segments in the cluster are involved */
-	bool enable_print_input_cluster = input->m_enable_print_input_cluster;
-	bool enable_print_input_fragments = input->m_enable_print_input_fragments;
-	bool enable_print_input_segments = input->m_enable_print_input_segments;
-	bool enable_print_allocated_fragments = input->m_enable_print_allocated_fragments;
-		
-	/* 2. Create the cluster */
-	char **cluster = create_cluster(num_hosts_in_cluster);
-	
-	if (enable_print_input_cluster)
-		print_cluster(cluster, num_hosts_in_cluster);
-	 	
-	/* 3. Input - data fragments */
-	input_fragments_list = spread_fragments_in_cluster(num_data_fragments, /* number of fragments in the data we are about to allocate */
-													   num_active_data_nodes, /* hosts */
-													   num_of_fragment_replicas, /* replicas */
-													   cluster, /* the whole cluster*/
-													   num_hosts_in_cluster/* the number of hosts in the cluster */);
-	if (enable_print_input_fragments)
-		print_fragment_list(input_fragments_list); 
-	
-	/* 4. Input - hawq segments */
-	total_segs = num_hosts_in_cluster * num_segments_on_host;
-	array_of_segs = create_array_of_segs(cluster, num_hosts_in_cluster, num_segments_on_host);	
-	array_of_primaries = create_array_of_primaries(total_segs);
-		
-	buildCdbComponentDatabases(total_segs, array_of_segs, array_of_primaries);	
-	if (enable_print_input_segments)
-		print_segments_list();
-		
-	/* 5. The actual unitest of distribute_work_2_gp_segments() */
-	segs_allocated_data = distribute_work_2_gp_segments(input_fragments_list, total_segs, num_working_segs);
-	if (enable_print_allocated_fragments)
-		print_allocated_fragments(segs_allocated_data, total_segs);
-	
-	/* 6. The validations - verifying that the expected output was obtained */
-	validate_total_fragments_allocated(segs_allocated_data, total_segs, num_data_fragments);
-	validate_max_load_per_segment(segs_allocated_data, total_segs, num_working_segs, num_data_fragments);
-	validate_all_working_segments_engagement(segs_allocated_data, total_segs, num_working_segs, num_data_fragments, num_hosts_in_cluster);
-	
-	/* 7. Cleanup */
-	restoreCdbComponentDatabases();
-	clean_cluster(cluster, num_hosts_in_cluster);
-	clean_array_of_segs(array_of_segs, total_segs);
-	clean_allocated_fragments(segs_allocated_data, total_segs);
-	pfree(array_of_primaries);
-}
-
-/* create an array of segments based on the host in the cluster and the number of Hawq segments on host */
-static char** create_array_of_segs(char **cluster, int num_hosts, int num_segments_on_host)
-{
-	int i, j;
-	int total_segs = num_hosts * num_segments_on_host;
-	char **array_of_segs = (char**)palloc0(total_segs * sizeof(char *));
-	
-	for (i = 0; i < num_hosts; i++)
-	{
-		for (j = 0; j < num_segments_on_host; j++)
-		{
-			array_of_segs[i * num_segments_on_host + j] = pstrdup(cluster[i]);
-		}
-	}
-
-	return array_of_segs;
-}
-
-/* clean the array of Hawq segments */
-static void clean_array_of_segs(char **array_of_segs, int total_segments)
-{
-	int i;
-	
-	for (i = 0; i < total_segments; i++)
-		pfree(array_of_segs[i]);
-	pfree(array_of_segs);
-}
-
-static bool* create_array_of_primaries(int total_segments)
-{
-	int i;
-	bool *primaries = (bool*)palloc0(total_segments * sizeof(bool));
-	for (i = 0; i < total_segments; i++)
-		primaries[i] = true;
-		
-	return primaries;
-}
-
-/* gives an ip to each host in a num_hosts size cluster */
-static char** create_cluster(int num_hosts)
-{
-	char** cluster = (char**)palloc0(num_hosts * sizeof(char *));
-	int i;
-	char *prefix = "1.2.%d.%d";
-	int third_octet = 1; /* let's begin at 1 */
-	int fourth_octet = 1;
-	StringInfoData ip;
-	initStringInfo(&ip);
-	
-	for (i = 0; i < num_hosts; i++)
-	{
-		appendStringInfo(&ip, prefix, third_octet, fourth_octet);
-		cluster[i] = pstrdup(ip.data);
-		/* this naming scheme will accomodate a cluster size up to 255x255 = 65025. */
-		fourth_octet++;
-		if (fourth_octet == 256)
-		{
-			fourth_octet = 1;
-			third_octet++;
-		}
-		resetStringInfo(&ip);
-	}
-	
-	return  cluster;
-}
-
-/* release memory */
-static char** clean_cluster(char** cluster, int num_hosts)
-{
-	int i;
-	
-	for (i = 0; i < num_hosts; i++)
-	{
-		if (cluster[i])
-			pfree(cluster[i]);
-	}
-	pfree(cluster);
-}
-
-/* show the cluster*/
-static char** print_cluster(char** cluster, int num_hosts)
-{
-	int i;
-	StringInfoData msg;
-	initStringInfo(&msg);
-	
-	appendStringInfo(&msg, "cluster size: %d\n", num_hosts);
-	for (i = 0; i < num_hosts; i++)
-	{
-		if (cluster[i])
-			appendStringInfo(&msg, "cluster #%d:   %s\n", i + 1, cluster[i]);
-		else
-			appendStringInfo(&msg, "cluster naming error \n");
-	}
-	
-	elog(FRAGDEBUG, "%s", msg.data);
-	pfree(msg.data);
-}
-
-/* prints for each segments, the index and the host ip */
-static void print_segments_list()
-{
-	StringInfoData msg;
-	CdbComponentDatabases *test_cdb = GpAliveSegmentsInfo.cdbComponentDatabases;
-	initStringInfo(&msg);
-	
-	for (int i = 0; i < test_cdb->total_segment_dbs; ++i)
-	{
-		CdbComponentDatabaseInfo* component = &test_cdb->segment_db_info[i];
-		appendStringInfo(&msg, "\nsegment -- index: %d, ip: %s", component->segindex, component->hostip);
-	}
-	
-	elog(FRAGDEBUG, "%s", msg.data);
-	pfree(msg.data);
-}
-
-/* returns the ip  of the segment's host */
-static char* find_segment_ip_by_index(int seg_index)
-{	
-	CdbComponentDatabases *test_cdb = GpAliveSegmentsInfo.cdbComponentDatabases;
-	if (seg_index < 0 || seg_index >= test_cdb->total_segment_dbs)
-		assert_true(false);
-		
-	for (int i = 0; i < test_cdb->total_segment_dbs; ++i)
-	{
-		CdbComponentDatabaseInfo* seg = &test_cdb->segment_db_info[i];
-		if (seg->segindex == seg_index)
-			return seg->hostip;
-	}
-	
-	/* we assert if an index outside the boundaries was supplied */
-	assert_true(false);
-	return NULL;
-}
-
-/* 
- * print the allocated fragments list 
- * allocated_fragments is an array of lists. The size of the array is total_segs.
- * The list located at index i in the array , holds the fragments that will be processed
- * by Hawq segment i
- */
-static void print_allocated_fragments(List **allocated_fragments, int total_segs)
-{
-	StringInfoData msg;
-	initStringInfo(&msg);
-	appendStringInfo(&msg, "ALLOCATED FRAGMENTS FOR EACH SEGMENT:\n");
-	
-	for (int i = 0; i < total_segs; i++)
-	{
-		if (allocated_fragments[i])
-		{
-			ListCell *frags_cell = NULL;
-			foreach(frags_cell, allocated_fragments[i])
-			{
-				AllocatedDataFragment *frag = (AllocatedDataFragment*)lfirst(frags_cell);
-				appendStringInfo(&msg, "%s\n", print_one_allocated_data_fragment(frag, i));
-			}
-		}
-	}
-		
-	elog(FRAGDEBUG, "%s", msg.data);
-	if (msg.data)
-		pfree(msg.data);
-}
-
-/* print one allocated fragment */
-static char* print_one_allocated_data_fragment(AllocatedDataFragment *frag, int seg_index)
-{
-	StringInfoData msg;
-	initStringInfo(&msg);
-	char* seg_ip = find_segment_ip_by_index(seg_index);
-	if (!seg_ip)
-		seg_ip = "INVALID SEGMENT INDEX";
-	bool locality = (strcmp(frag->host, seg_ip) == 0) ? true : false;
-	
-	appendStringInfo(&msg, 
-	                 "locality: %d, segment number: %d , segment ip: %s --- fragment index: %d, datanode host: %s, file: %s", 
-	                 locality, seg_index, seg_ip, frag->index, frag->host, frag->source_name);
-	return msg.data;
-}
-
-/* release memory of allocated_fragments */
-void clean_allocated_fragments(List **allocated_fragments, int total_segs)
-{
-	for (int i = 0; i < total_segs; i++)
-		if (allocated_fragments[i])
-			free_allocated_frags(allocated_fragments[i]);
-	pfree(allocated_fragments);
-}
-
-/* calculate the optimal load distribution per segment */
-static int calc_load_per_segment(int input_total_fragments, int working_segs)
-{
-	return (input_total_fragments % working_segs) ? input_total_fragments / working_segs + 1: 
-	input_total_fragments / working_segs;
-}
-
-/* 
- * test that a host is uniq.
- * the functions ensures that ip names are unique by managing a set of ips
- * the set is implemented with a linked list
- */
-static bool is_host_uniq(List** ips_list, char* ip)
-{
-	ListCell* cell;
-	foreach(cell, *ips_list)
-	{
-		char* foundip = (char*)lfirst(cell);
-		if (strcmp(foundip, ip) == 0)
-			return false;
-	}
-	
-	lappend(*ips_list, ip);
-	return true;
-}
-
-/* validate that all input blocks were allocated */
-static void validate_total_fragments_allocated(List **allocated_fragments, int total_segs, int input_total_fragments)
-{
-	int total_fragments_allocated = 0; 
-	
-	for (int i = 0; i < total_segs; i++)
-	{
-		if (allocated_fragments[i])
-			total_fragments_allocated += list_length(allocated_fragments[i]);
-	}
-	
-	assert_int_equal(total_fragments_allocated, input_total_fragments);
-}
-
-/* validate that the load per segment does not exceed the expected load */
-static void validate_max_load_per_segment(List **allocated_fragments, int total_segs, int working_segs, int input_total_fragments)
-{
-	int max_load = 0;
-	int load_per_segment =  calc_load_per_segment(input_total_fragments, working_segs);
-	
-	for (int i = 0; i < total_segs; i++)
-	{
-		if (allocated_fragments[i] && list_length(allocated_fragments[i]) > max_load)
-			max_load = list_length(allocated_fragments[i]);
-	}
-	
-	bool load_per_segment_not_exceeded = load_per_segment >=  max_load;
-	elog(FRAGDEBUG, "actual max_load: %d, expected load_per_segment: %d", max_load, load_per_segment);
-	assert_true(load_per_segment_not_exceeded);
-}
-
-/* 
- * we validate that every working segment is engaged, by verifying that for the case when 
- * the load_per_segment is greater than one, then every working_segment has allocated fragments,
- * and for the case when load_per_segment is 1, then the number of segments that got work
- * equals the number of fragments
- */
-static void validate_all_working_segments_engagement(List **allocated_fragments, 
-													 int total_segs, 
-													 int working_segs, 
-													 int input_total_fragments,
-													 int num_hosts_in_cluster)
-{
-	List* ips_list = NIL;
-	ListCell* cell;
-	int total_segs_engaged = 0;
-	int load_per_segment =  calc_load_per_segment(input_total_fragments, working_segs);
-	bool require_full_distribution = num_hosts_in_cluster >= working_segs;
-	
-	for (int i = 0; i < total_segs; i++)
-		if (allocated_fragments[i] && list_length(allocated_fragments[i]) > 0)
-		{
-			char *ip;
-			bool isuniq;
-			total_segs_engaged++;
-			if (require_full_distribution)
-			{
-				ip = find_segment_ip_by_index(i);
-				isuniq = is_host_uniq(&ips_list, ip);
-				assert_true(isuniq);
-			}
-		}
-	
-	if (load_per_segment == 1)
-		assert_int_equal(total_segs_engaged, input_total_fragments);
-	else
-	{
-		bool total_segs_engaged_not_exceeded = total_segs_engaged <= working_segs;
-		assert_true(total_segs_engaged_not_exceeded);
-	}
-	
-	/* clean memory */
-	foreach(cell, ips_list)
-		pfree(lfirst(cell));
-	list_free(ips_list);
-}
-
-/*
- * Creates a list of DataFragment for one file ("file.txt").
- * The important thing here is the fragments' location. It is deteremined by the parameters:
- * replication_factor - number of copies of each fragment on the different hosts.
- * number_of_hosts - number of hosts
- * number_of_fragments - number of fragments in the file.
- * cluster - holds the ips of all hosts in the cluster
- *
- * Each fragment will have <replication_factor> hosts from the cluster
- */
-static List* 
-spread_fragments_in_cluster(int number_of_fragments, 
-								  int number_of_hosts, 
-								  int replication_factor, 
-								  char **cluster, 
-								  int cluster_size)
-{
-	int first_host, target_host;
-	List* fragments_list = NIL;
-	StringInfoData string_info;
-	initStringInfo(&string_info);
-	
-	/* pick the first host in the cluster that will host the data. The fragments will be spread from this host onward */
-	first_host = 0;
-	
-	target_host = first_host;
-	
-	for (int i = 0; i < number_of_fragments; ++i)
-	{
-		DataFragment* fragment = (DataFragment*) palloc0(sizeof(DataFragment));
-		
-		fragment->index = i;
-		fragment->source_name = pstrdup("file.txt");
-		
-		for (int j = 0; j < replication_factor; ++j)
-		{
-			FragmentHost* fhost = (FragmentHost*)palloc0(sizeof(FragmentHost));
-			appendStringInfo(&string_info, cluster[target_host]);
-			fhost->ip = pstrdup(string_info.data);
-			resetStringInfo(&string_info);
-			fragment->replicas = lappend(fragment->replicas, fhost);
-			
-			target_host = ((j + i + first_host) % number_of_hosts);
-		}
-		assert_int_equal(list_length(fragment->replicas), replication_factor);
-		appendStringInfo(&string_info, "metadata %d", i);
-		fragment->fragment_md = pstrdup(string_info.data);
-		resetStringInfo(&string_info);
-		appendStringInfo(&string_info, "user data %d", i);
-		fragment->user_data = pstrdup(string_info.data);
-		resetStringInfo(&string_info);
-		fragments_list = lappend(fragments_list, fragment);
-	}
-	
-	pfree(string_info.data);
-	return fragments_list;
-}
-
-
-
-
-
-
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7c2f615d/src/backend/access/external/test_discard/hd_work_mgr_do_segment_clustering_by_host_test.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test_discard/hd_work_mgr_do_segment_clustering_by_host_test.c b/src/backend/access/external/test_discard/hd_work_mgr_do_segment_clustering_by_host_test.c
deleted file mode 100644
index be1cdef..0000000
--- a/src/backend/access/external/test_discard/hd_work_mgr_do_segment_clustering_by_host_test.c
+++ /dev/null
@@ -1,175 +0,0 @@
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include "cmockery.h"
-
-#include "c.h"
-#include "hd_work_mgr_mock.h"
-
-
-/*
- * check element list_index in segmenet_list
- * has the expected hostip and segindex.
- */
-void check_segment_info(List* segment_list, int list_index,
-						const char* expected_hostip,
-						int expected_segindex)
-{
-
-	CdbComponentDatabaseInfo* seg_info =
-			(CdbComponentDatabaseInfo*)lfirst(list_nth_cell(segment_list, list_index));
-	assert_string_equal(seg_info->hostip, expected_hostip);
-	assert_int_equal(seg_info->segindex, expected_segindex);
-}
-
-/*
- * Test clustering of segments to hosts.
- * Environment: 10 segments over 3 hosts, all primary.
- */
-void 
-test__do_segment_clustering_by_host__10SegmentsOn3Hosts(void **state)
-{
-	List* groups = NIL;
-	ListCell* cell = NULL;
-	GpHost* gphost = NULL;
-	List* segs = NIL;
-	CdbComponentDatabaseInfo* seg_info = NULL;
-
-	char* array_of_segs[10] =
-		{"1.2.3.1", "1.2.3.1", "1.2.3.1", "1.2.3.1",
-		 "1.2.3.2", "1.2.3.2", "1.2.3.2",
-		 "1.2.3.3", "1.2.3.3", "1.2.3.3"
-	};
-	bool array_of_primaries[10] =
-	{
-		true, true, true, true,
-		true, true, true,
-		true, true, true
-	};
-	int number_of_segments = 10;
-	/* sanity */
-	assert_true(number_of_segments == (sizeof(array_of_segs) / sizeof(array_of_segs[0])));
-	assert_true(number_of_segments == (sizeof(array_of_primaries) / sizeof(array_of_primaries[0])));
-
-	buildCdbComponentDatabases(number_of_segments, array_of_segs, array_of_primaries);
-
-	CdbComponentDatabases *cdb = GpAliveSegmentsInfo.cdbComponentDatabases;
-
-	/* sanity for cdbComponentDatabases building*/
-	assert_int_equal(cdb->total_segment_dbs, number_of_segments);
-	assert_string_equal(cdb->segment_db_info[4].hostip, array_of_segs[4]);
-
-	/* test do_segment_clustering_by_host */
-	groups = do_segment_clustering_by_host();
-
-	assert_int_equal(list_length(groups), 3);
-
-	cell = list_nth_cell(groups, 0);
-	gphost = (GpHost*)lfirst(cell);
-	assert_string_equal(gphost->ip, array_of_segs[0]);
-	assert_int_equal(list_length(gphost->segs), 4);
-	for (int i = 0; i < 4; ++i)
-	{
-		check_segment_info(gphost->segs, i, "1.2.3.1", i);
-	}
-
-	cell = list_nth_cell(groups, 1);
-	gphost = (GpHost*)lfirst(cell);
-	assert_string_equal(gphost->ip, "1.2.3.2");
-	assert_int_equal(list_length(gphost->segs), 3);
-	for (int i = 0; i < 3; ++i)
-	{
-		check_segment_info(gphost->segs, i, "1.2.3.2", i+4);
-	}
-
-	cell = list_nth_cell(groups, 2);
-	gphost = (GpHost*)lfirst(cell);
-	assert_string_equal(gphost->ip, "1.2.3.3");
-	assert_int_equal(list_length(gphost->segs), 3);
-	for (int i = 0; i < 3; ++i)
-	{
-		check_segment_info(gphost->segs, i, "1.2.3.3", i+7);
-	}
-
-	restoreCdbComponentDatabases();
-}
-
-/*
- * Test clustering of segments to hosts.
- * Environment: 10 segments over 3 hosts, some of them mirrors.
- */
-void
-test__do_segment_clustering_by_host__10SegmentsOn3HostsWithMirrors(void **state)
-{
-	List* groups = NIL;
-	ListCell* cell = NULL;
-	GpHost* gphost = NULL;
-	List* segs = NIL;
-	CdbComponentDatabaseInfo* seg_info = NULL;
-	CdbComponentDatabases *cdb = NULL;
-
-	char* array_of_segs[10] =
-	{
-		"1.2.3.1", "1.2.3.1", "1.2.3.1",
-		"1.2.3.2", "1.2.3.2", "1.2.3.2",
-		"1.2.3.3", "1.2.3.3", "1.2.3.3",
-		"1.2.3.1" /* another segment on the first host */
-	};
-	bool array_of_primaries[10] =
-	{
-		true, false, true,
-		true, true, true,
-		true, true, false,
-		true
-	};
-	int number_of_segments = 10;
-	/* sanity */
-	assert_true(number_of_segments == (sizeof(array_of_segs) / sizeof(array_of_segs[0])));
-	assert_true(number_of_segments == (sizeof(array_of_primaries) / sizeof(array_of_primaries[0])));
-
-	int array_for_host1[3] = {0, 2, 9};
-	int array_for_host2[3] = {3, 4, 5};
-	int array_for_host3[2] = {6, 7};
-
-	buildCdbComponentDatabases(number_of_segments, array_of_segs, array_of_primaries);
-
-	cdb = GpAliveSegmentsInfo.cdbComponentDatabases;
-
-	/* sanity for cdbComponentDatabases building*/
-	assert_int_equal(cdb->total_segment_dbs, number_of_segments);
-	assert_string_equal(cdb->segment_db_info[4].hostip, array_of_segs[4]);
-
-	/* test do_segment_clustering_by_host */
-	groups = do_segment_clustering_by_host();
-
-	assert_int_equal(list_length(groups), 3);
-
-	cell = list_nth_cell(groups, 0);
-	gphost = (GpHost*)lfirst(cell);
-	assert_string_equal(gphost->ip, "1.2.3.1");
-	assert_int_equal(list_length(gphost->segs), 3);
-	for (int i = 0; i < 3; ++i)
-	{
-		check_segment_info(gphost->segs, i, "1.2.3.1", array_for_host1[i]);
-	}
-
-	cell = list_nth_cell(groups, 1);
-	gphost = (GpHost*)lfirst(cell);
-	assert_string_equal(gphost->ip, "1.2.3.2");
-	assert_int_equal(list_length(gphost->segs), 3);
-	for (int i = 0; i < 3; ++i)
-	{
-		check_segment_info(gphost->segs, i, "1.2.3.2", array_for_host2[i]);
-	}
-
-	cell = list_nth_cell(groups, 2);
-	gphost = (GpHost*)lfirst(cell);
-	assert_string_equal(gphost->ip, "1.2.3.3");
-	assert_int_equal(list_length(gphost->segs), 2);
-	for (int i = 0; i < 2; ++i)
-	{
-		check_segment_info(gphost->segs, i, "1.2.3.3", array_for_host3[i]);
-	}
-
-	restoreCdbComponentDatabases();
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7c2f615d/src/backend/access/external/test_discard/hd_work_mgr_mock.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test_discard/hd_work_mgr_mock.c b/src/backend/access/external/test_discard/hd_work_mgr_mock.c
deleted file mode 100644
index ec1eaf5..0000000
--- a/src/backend/access/external/test_discard/hd_work_mgr_mock.c
+++ /dev/null
@@ -1,53 +0,0 @@
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include "cmockery.h"
-
-#include "c.h"
-#include "hd_work_mgr_mock.h"
-
-/*
- * Helper functions to create and restore GpAliveSegmentsInfo.cdbComponentDatabases element
- * used by hd_work_mgr
- */
-
-/*
- * Builds an array of CdbComponentDatabaseInfo.
- * Each segment is assigned a sequence number and an ip.
- * segs_num - the number of segments
- * segs_hostips - array of the ip of each segment
- * primaries_map - array of which segments are primaries
- */
-void buildCdbComponentDatabases(int segs_num,
-								char* segs_hostips[],
-								bool primaries_map[])
-{
-	CdbComponentDatabases *test_cdb = palloc0(sizeof(CdbComponentDatabases));
-	CdbComponentDatabaseInfo* component = NULL;
-	test_cdb->total_segment_dbs = segs_num;
-	test_cdb->segment_db_info =
-			(CdbComponentDatabaseInfo *) palloc0(sizeof(CdbComponentDatabaseInfo) * test_cdb->total_segment_dbs);
-
-	for (int i = 0; i < test_cdb->total_segment_dbs; ++i)
-	{
-		component = &test_cdb->segment_db_info[i];
-		component->segindex = i;
-		component->role = primaries_map[i] ? SEGMENT_ROLE_PRIMARY : SEGMENT_ROLE_MIRROR;
-		component->hostip = pstrdup(segs_hostips[i]);
-	}
-
-	orig_cdb = GpAliveSegmentsInfo.cdbComponentDatabases;
-	orig_seg_count = GpAliveSegmentsInfo.aliveSegmentsCount;
-	GpAliveSegmentsInfo.cdbComponentDatabases = test_cdb;
-	GpAliveSegmentsInfo.aliveSegmentsCount = segs_num;
-}
-
-void restoreCdbComponentDatabases()
-{
-	/* free test CdbComponentDatabases */
-	if (GpAliveSegmentsInfo.cdbComponentDatabases)
-		freeCdbComponentDatabases(GpAliveSegmentsInfo.cdbComponentDatabases);
-
-	GpAliveSegmentsInfo.cdbComponentDatabases = orig_cdb;
-	GpAliveSegmentsInfo.aliveSegmentsCount = orig_seg_count;
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7c2f615d/src/backend/access/external/test_discard/hd_work_mgr_mock.h
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test_discard/hd_work_mgr_mock.h b/src/backend/access/external/test_discard/hd_work_mgr_mock.h
deleted file mode 100644
index 8728705..0000000
--- a/src/backend/access/external/test_discard/hd_work_mgr_mock.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef HD_WORK_MGR_MOCK_
-#define HD_WORK_MGR_MOCK_
-
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include "cmockery.h"
-
-#include "c.h"
-#include "../hd_work_mgr.c"
-
-static CdbComponentDatabases *orig_cdb = NULL;
-static int orig_seg_count = -1;
-
-/*
- * Helper functions copied from backend/cdb/cdbutils.c
- */
-
-/*
- * _freeCdbComponentDatabases
- *
- * Releases the storage occupied by the CdbComponentDatabases
- * struct pointed to by the argument.
- */
-void
-_freeCdbComponentDatabases(CdbComponentDatabases *pDBs);
-
-/*
- * _freeCdbComponentDatabaseInfo:
- * Releases any storage allocated for members variables of a CdbComponentDatabaseInfo struct.
- */
-void
-_freeCdbComponentDatabaseInfo(CdbComponentDatabaseInfo *cdi);
-
-/*
- * Helper functions to create and restore GpAliveSegmentsInfo.cdbComponentDatabases element
- * used by hd_work_mgr
- */
-
-/*
- * Builds an array of CdbComponentDatabaseInfo.
- * Each segment is assigned a sequence number and an ip.
- * segs_num - the number of segments
- * segs_hostips - array of the ip of each segment
- * primaries_map - array of which segments are primaries
- */
-void buildCdbComponentDatabases(int segs_num,
-								char* segs_hostips[],
-								bool primaries_map[]);
-
-
-void restoreCdbComponentDatabases();
-
-#endif //HD_WORK_MGR_MOCK_

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7c2f615d/src/backend/access/external/test_discard/hd_work_mgr_test.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test_discard/hd_work_mgr_test.c b/src/backend/access/external/test_discard/hd_work_mgr_test.c
deleted file mode 100644
index 1258c5a..0000000
--- a/src/backend/access/external/test_discard/hd_work_mgr_test.c
+++ /dev/null
@@ -1,34 +0,0 @@
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include "cmockery.h"
-
-#include "c.h"
-#include "hd_work_mgr_mock.c"
-#include "hd_work_mgr_do_segment_clustering_by_host_test.c"
-#include "hd_work_mgr_allocate_fragments_to_datanodes_test.c"
-#include "hd_work_mgr_distribute_work_2_gp_segments_test.c"
-
-int 
-main(int argc, char* argv[]) 
-{
-	cmockery_parse_arguments(argc, argv);
-
-	const UnitTest tests[] = {
-			unit_test(test__do_segment_clustering_by_host__10SegmentsOn3Hosts),
-			unit_test(test__do_segment_clustering_by_host__10SegmentsOn3HostsWithMirrors),
-			unit_test(test__get_dn_processing_load),
-			unit_test(test__create_allocated_fragment__NoUserData),
-			unit_test(test__create_allocated_fragment__WithUserData),
-			unit_test(test__allocate_fragments_to_datanodes__4Fragments10Hosts3Replicates),
-			unit_test(test__allocate_fragments_to_datanodes__4Fragments3Hosts2Replicates),
-			unit_test(test__allocate_fragments_to_datanodes__4Fragments3Hosts1Replicates),
-			unit_test(test__allocate_fragments_to_datanodes__7Fragments10Hosts1Replicates),
-			unit_test(test__distribute_work_to_gp_segments__big_cluster_few_active_nodes),	
-			unit_test(test__distribute_work_to_gp_segments__big_cluster_many_active_nodes),
-			unit_test(test__distribute_work_to_gp_segments__small_cluster),
-			unit_test(test__distribute_work_to_gp_segments__small_cluster_many_active_nodes),
-			unit_test(test__distribute_work_to_gp_segments__small_cluster_few_replicas)
-	};
-	return run_tests(tests);
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7c2f615d/src/backend/access/external/test_discard/pxffilters_test.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test_discard/pxffilters_test.c b/src/backend/access/external/test_discard/pxffilters_test.c
deleted file mode 100644
index 8f96550..0000000
--- a/src/backend/access/external/test_discard/pxffilters_test.c
+++ /dev/null
@@ -1,556 +0,0 @@
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include "cmockery.h"
-
-#include "c.h"
-#include "../pxffilters.c"
-
-void
-test__supported_filter_type(void **state)
-{
-	Oid oids[] =
-	{
-		INT2OID,
-		INT4OID,
-		INT8OID,
-		FLOAT4OID,
-		FLOAT8OID,
-		NUMERICOID,
-		TEXTOID,
-		VARCHAROID,
-		BPCHAROID,
-		CHAROID,
-		BYTEAOID,
-		BOOLOID,
-		CIRCLEOID /* unsupported type */
-	};
-
-	int array_size = sizeof(oids) / sizeof(oids[0]);
-	bool result = false;
-	int i = 0;
-
-	/* supported types */
-	for (; i < array_size-1; ++i)
-	{
-		result = supported_filter_type(oids[i]);
-		assert_true(result);
-	}
-	/* unsupported type */
-	result = supported_filter_type(oids[i]);
-	assert_false(result);
-
-	/* go over pxf_supported_types array */
-	int nargs = sizeof(pxf_supported_types) / sizeof(Oid);
-	assert_int_equal(nargs, 12);
-	for (i = 0; i < nargs; ++i)
-	{
-		assert_true(supported_filter_type(pxf_supported_types[i]));
-	}
-
-}
-
-/*
- * const_value must be palloc'ed, it will be freed by const_to_str
- */
-void
-mock__const_to_str(Oid const_type, char* const_value)
-{
-	expect_value(getTypeOutputInfo, type, const_type);
-	expect_any(getTypeOutputInfo, typOutput);
-	expect_any(getTypeOutputInfo, typIsVarlena);
-	will_return(getTypeOutputInfo, NULL);
-
-	expect_any(OidOutputFunctionCall, functionId);
-	expect_any(OidOutputFunctionCall, val);
-	will_return(OidOutputFunctionCall, const_value);
-}
-
-void
-verify__const_to_str(bool is_null, char* const_value, Oid const_type, char* expected)
-{
-	StringInfo result = makeStringInfo();
-	char* value = NULL;
-	Const* input = (Const*) palloc0(sizeof(Const));
-	input->constisnull = is_null;
-	input->consttype = const_type;
-
-	/* need to prepare inner functions */
-	if (!is_null)
-	{
-		value = strdup(const_value); /* will be free'd by const_to_str */
-
-		mock__const_to_str(const_type, value);
-	}
-
-	/* no expected value means it's a negative test */
-	if (expected)
-	{
-		run__const_to_str(input, result, expected);
-	}
-	else
-	{
-		run__const_to_str__negative(input, result, value);
-		pfree(value); /* value was not freed by const_to_str b/c of failure */
-	}
-
-	pfree(result->data);
-	pfree(result);
-	pfree(input);
-}
-
-void run__const_to_str(Const* input, StringInfo result, char* expected)
-{
-	const_to_str(input, result);
-	assert_string_equal(result->data, expected);
-}
-
-void run__const_to_str__negative(Const* input, StringInfo result, char* value)
-{
-
-	StringInfo err_msg = makeStringInfo();
-	appendStringInfo(err_msg,
-			"internal error in pxffilters.c:const_to_str. "
-			"Using unsupported data type (%d) (value %s)", input->consttype, value);
-
-	/* Setting the test -- code omitted -- */
-	PG_TRY();
-	{
-		/* This will throw a ereport(ERROR).*/
-		const_to_str(input, result);
-	}
-	PG_CATCH();
-	{
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/* Validate the type of expected error */
-		assert_true(edata->sqlerrcode == ERRCODE_INTERNAL_ERROR);
-		assert_true(edata->elevel == ERROR);
-		assert_string_equal(edata->message, err_msg->data);
-
-		pfree(err_msg->data);
-		pfree(err_msg);
-
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-
-void
-test__const_to_str__null(void **state)
-{
-	verify__const_to_str(true, NULL, 1, "\"NULL\"");
-}
-
-void
-test__const_to_str__int(void **state)
-{
-	verify__const_to_str(false, "1234", INT2OID, "1234");
-	verify__const_to_str(false, "1234", INT4OID, "1234");
-	verify__const_to_str(false, "1234", INT8OID, "1234");
-	verify__const_to_str(false, "1.234", FLOAT4OID, "1.234");
-	verify__const_to_str(false, "1.234", FLOAT8OID, "1.234");
-	verify__const_to_str(false, "1234", NUMERICOID, "1234");
-}
-
-void
-test__const_to_str__text(void **state)
-{
-	verify__const_to_str(false, "that", TEXTOID, "\\\"that\\\"");
-	verify__const_to_str(false, "joke", VARCHAROID, "\\\"joke\\\"");
-	verify__const_to_str(false, "isn't", BPCHAROID, "\\\"isn't\\\"");
-	verify__const_to_str(false, "funny", CHAROID, "\\\"funny\\\"");
-	verify__const_to_str(false, "anymore", BYTEAOID, "\\\"anymore\\\"");
-}
-
-void
-test__const_to_str__boolean(void **state)
-{
-	verify__const_to_str(false, "t", BOOLOID, "\"true\"");
-	verify__const_to_str(false, "f", BOOLOID, "\"false\"");
-}
-
-void
-test__const_to_str__NegativeCircle(void **state)
-{
-	verify__const_to_str(false, "<3,3,9>", CIRCLEOID, NULL);
-}
-
-void
-test__opexpr_to_pxffilter__null(void **state)
-{
-	PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc));
-	OpExpr *expr = (OpExpr*) palloc0(sizeof(OpExpr));
-
-	assert_false(opexpr_to_pxffilter(NULL, NULL));
-	assert_false(opexpr_to_pxffilter(NULL, filter));
-	assert_false(opexpr_to_pxffilter(expr, NULL));
-
-	expr->args = NIL;
-	assert_false(opexpr_to_pxffilter(expr, filter));
-
-	pfree(filter);
-	pfree(expr);
-}
-
-void
-test__opexpr_to_pxffilter__unary_expr(void **state)
-{
-	PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc));
-	OpExpr *expr = (OpExpr*) palloc0(sizeof(OpExpr));
-	Var *arg = (Var*) palloc0(sizeof(Var));
-	arg->xpr.type = T_Var;
-
-	assert_false(opexpr_to_pxffilter(NULL, NULL));
-	assert_false(opexpr_to_pxffilter(NULL, filter));
-	assert_false(opexpr_to_pxffilter(expr, NULL));
-
-	expr->args = NIL;
-	expr->args = lappend(expr->args, arg);
-	assert_false(opexpr_to_pxffilter(expr, filter));
-
-	pfree(arg);
-	pfree(filter);
-	pfree(expr);
-}
-
-void
-compare_filters(PxfFilterDesc* result, PxfFilterDesc* expected)
-{
-	assert_int_equal(result->l.opcode, expected->l.opcode);
-	assert_int_equal(result->l.attnum, expected->l.attnum);
-	if (expected->l.conststr)
-		assert_string_equal(result->l.conststr->data, expected->l.conststr->data);
-	else
-		assert_true(result->l.conststr == NULL);
-
-	assert_true(result->r.opcode == expected->r.opcode);
-	assert_int_equal(result->r.attnum, expected->r.attnum);
-	if (expected->r.conststr)
-		assert_string_equal(result->r.conststr->data, expected->r.conststr->data);
-	else
-		assert_true(result->r.conststr == NULL);
-
-	assert_int_equal(result->op, expected->op);
-}
-
-PxfFilterDesc* build_filter(char lopcode, int lattnum, char* lconststr,
-							 char ropcode, int rattnum, char* rconststr,
-							 PxfOperatorCode op)
-{
-	PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc));
-
-	filter->l.opcode = lopcode;
-	filter->l.attnum = lattnum;
-	if (lconststr)
-	{
-		filter->l.conststr = makeStringInfo();
-		appendStringInfoString(filter->l.conststr, lconststr);
-	}
-
-	filter->r.opcode = ropcode;
-	filter->r.attnum = rattnum;
-	if (rconststr)
-	{
-		filter->r.conststr = makeStringInfo();
-		appendStringInfoString(filter->r.conststr, rconststr);
-	}
-
-	filter->op = op;
-
-	return filter;
-}
-
-Var* build_var(Oid oid, int attno) {
-	Var *arg_var = (Var*) palloc0(sizeof(Var));
-	arg_var->xpr.type = T_Var;
-	arg_var->vartype = oid;
-	arg_var->varattno = attno;
-	return arg_var;
-}
-
-Const* build_const(Oid oid, char* value)
-{
-	Const* arg_const = (Const*) palloc0(sizeof(Const));
-	arg_const->xpr.type = T_Const;
-	arg_const->constisnull = (value == NULL);
-	arg_const->consttype = oid;
-	if (value != NULL)
-	{
-		mock__const_to_str(oid, value);
-	}
-
-	return arg_const;
-}
-
-OpExpr* build_op_expr(void* left, void* right, int op)
-{
-	OpExpr *expr = (OpExpr*) palloc0(sizeof(OpExpr));
-	expr->args = NIL;
-	expr->args = lappend(expr->args, left);
-	expr->args = lappend(expr->args, right);
-
-	expr->opno = op;
-	expr->xpr.type = T_OpExpr;
-	return expr;
-}
-
-void run__opexpr_to_pxffilter__positive(Oid dbop, PxfOperatorCode expectedPxfOp)
-{
-	PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc));
-	Var *arg_var = build_var(INT2OID, 1);
-	char* const_value = strdup("1984"); /* will be free'd by const_to_str */
-	Const* arg_const = build_const(INT2OID, const_value);
-
-	OpExpr *expr = build_op_expr(arg_var, arg_const, dbop);
-	PxfFilterDesc* expected = build_filter(
-			PXF_ATTR_CODE, 1, NULL,
-			PXF_CONST_CODE, 0, "1984",
-			expectedPxfOp);
-
-	/* run test */
-	assert_true(opexpr_to_pxffilter(expr, filter));
-
-	compare_filters(filter, expected);
-
-	pxf_free_filter(expected);
-	pxf_free_filter(filter);
-
-	list_free_deep(expr->args); /* free all args */
-	pfree(expr);
-}
-
-void
-test__opexpr_to_pxffilter__intGT(void **state)
-{
-	run__opexpr_to_pxffilter__positive(520 /* int2gt */, PXFOP_GT);
-}
-
-void
-test__opexpr_to_pxffilter__allSupportedTypes(void **state)
-{
-	int nargs = sizeof(pxf_supported_opr) / sizeof(dbop_pxfop_map);
-	PxfOperatorCode pxfop = 0;
-	Oid dbop = InvalidOid;
-
-	for (int i = 0; i < nargs; ++i)
-	{
-		dbop = pxf_supported_opr[i].dbop;
-		pxfop = pxf_supported_opr[i].pxfop;
-		run__opexpr_to_pxffilter__positive(dbop, pxfop);
-	}
-}
-
-/* NOTE: this test is not  a use case - when the query includes
- * 'is null' or 'is not null' the qualifier code is T_NullTest and not T_OpExpr */
-void
-test__opexpr_to_pxffilter__attributeIsNull(void **state)
-{
-	PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc));
-	Var *arg_var = build_var(INT2OID, 1);
-	Const* arg_const = build_const(INT2OID, NULL);
-	OpExpr *expr = build_op_expr(arg_var, arg_const, 94 /* int2eq */);
-
-	PxfFilterDesc* expected = build_filter(
-				PXF_ATTR_CODE, 1, NULL,
-				PXF_CONST_CODE, 0, "\"NULL\"",
-				PXFOP_EQ);
-
-	/* run test */
-	assert_true(opexpr_to_pxffilter(expr, filter));
-	compare_filters(filter, expected);
-
-	pxf_free_filter(filter);
-	pxf_free_filter(expected);
-
-	list_free_deep(expr->args); /* free all args */
-	pfree(expr);
-}
-
-/*
- * Test for a query with different types.
- * Types pairing are not checked, it is covered by the
- * supported operations which are type specific.
- */
-void
-test__opexpr_to_pxffilter__differentTypes(void **state)
-{
-	PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc));
-	Var *arg_var = build_var(INT2OID, 3);
-	char* const_value = strdup("13"); /* will be free'd by const_to_str */
-	Const *arg_const = build_const(INT8OID, const_value);
-	OpExpr *expr = build_op_expr(arg_const, arg_var, 1864 /* int28lt */);
-
-
-	/* run test */
-	assert_true(opexpr_to_pxffilter(expr, filter));
-	PxfFilterDesc *expected = build_filter(
-			PXF_CONST_CODE, 0, "13",
-			PXF_ATTR_CODE, 3, NULL,
-			PXFOP_LT);
-	compare_filters(filter, expected);
-
-	pxf_free_filter(filter);
-	list_free_deep(expr->args); /* free all args */
-	pfree(expr);
-}
-
-void
-test__opexpr_to_pxffilter__unsupportedTypeCircle(void **state)
-{
-	PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc));
-	Var *arg_var = build_var(CIRCLEOID, 8);
-	Const *arg_const = build_const(CIRCLEOID, NULL);
-	OpExpr *expr = build_op_expr(arg_const, arg_var, 0 /* whatever */);
-
-	/* run test */
-	assert_false(opexpr_to_pxffilter(expr, filter));
-
-	pxf_free_filter(filter);
-
-	list_free_deep(expr->args); /* free all args */
-	pfree(expr);
-}
-
-void
-test__opexpr_to_pxffilter__twoVars(void **state)
-{
-	PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc));
-	Var *arg_var_left = build_var(INT4OID, 8);
-	Var *arg_var_right = build_var(INT4OID, 9);
-	OpExpr *expr = build_op_expr(arg_var_left, arg_var_right, 0 /* whatever */);
-
-	/* run test */
-	assert_false(opexpr_to_pxffilter(expr, filter));
-
-	pxf_free_filter(filter);
-
-	list_free_deep(expr->args); /* free all args */
-	pfree(expr);
-}
-
-void
-test__opexpr_to_pxffilter__unsupportedOpNot(void **state)
-{
-	PxfFilterDesc *filter = (PxfFilterDesc*) palloc0(sizeof(PxfFilterDesc));
-	Var *arg_var = build_var(INT2OID, 3);
-	char* const_value = strdup("not"); /* will be free'd by const_to_str */
-	Const *arg_const = build_const(INT2OID, const_value);
-	OpExpr *expr = build_op_expr(arg_const, arg_var, 1877 /* int2not */);
-
-	/* run test */
-	assert_false(opexpr_to_pxffilter(expr, filter));
-
-	pxf_free_filter(filter);
-
-	list_free_deep(expr->args); /* free all args */
-	pfree(expr);
-}
-
-void
-test__pxf_serialize_filter_list__oneFilter(void **state)
-{
-	List* filter_list = NIL;
-
-	PxfFilterDesc* filter = build_filter(
-			PXF_ATTR_CODE, 1, NULL,
-			PXF_CONST_CODE, 0, "1984",
-			PXFOP_GT);
-	filter_list = lappend(filter_list, filter);
-
-	char* result = pxf_serialize_filter_list(filter_list);
-	assert_string_equal(result, "a0c1984o2");
-
-	pxf_free_filter_list(filter_list);
-	filter_list = NIL;
-	pfree(result);
-
-	filter = build_filter(
-			PXF_ATTR_CODE, 8, NULL,
-			PXF_CONST_CODE, 0, "\"George Orwell\"",
-			PXFOP_EQ);
-	filter_list = lappend(filter_list, filter);
-
-	result = pxf_serialize_filter_list(filter_list);
-	assert_string_equal(result, "a7c\"George Orwell\"o5");
-
-	pxf_free_filter_list(filter_list);
-	pfree(result);
-}
-
-void
-test__pxf_serialize_filter_list__manyFilters(void **state)
-{
-	char* result = NULL;
-	List* filter_list = NIL;
-
-	PxfFilterDesc* filter1 = build_filter(
-			PXF_ATTR_CODE, 2, NULL,
-			PXF_CONST_CODE, 0, "1983",
-			PXFOP_GT);
-	PxfFilterDesc* filter2 = build_filter(
-			PXF_ATTR_CODE, 3, NULL,
-			PXF_CONST_CODE, 0, "1985",
-			PXFOP_LT);
-	PxfFilterDesc* filter3 = build_filter(
-			PXF_ATTR_CODE, 4, NULL,
-			PXF_CONST_CODE, 0, "\"George Orwell\"",
-			PXFOP_EQ);
-	PxfFilterDesc* filter4 = build_filter(
-			PXF_ATTR_CODE, 5, NULL,
-			PXF_CONST_CODE, 0, "\"Winston\"",
-			PXFOP_GE);
-
-	filter_list = lappend(filter_list, filter1);
-	filter_list = lappend(filter_list, filter2);
-
-	result = pxf_serialize_filter_list(filter_list);
-	assert_string_equal(result, "a1c1983o2a2c1985o1o7");
-	pfree(result);
-
-	filter_list = lappend(filter_list, filter3);
-
-	result = pxf_serialize_filter_list(filter_list);
-	assert_string_equal(result, "a1c1983o2a2c1985o1o7a3c\"George Orwell\"o5o7");
-	pfree(result);
-
-	filter_list = lappend(filter_list, filter4);
-
-	result = pxf_serialize_filter_list(filter_list);
-	assert_string_equal(result, "a1c1983o2a2c1985o1o7a3c\"George Orwell\"o5o7a4c\"Winston\"o4o7");
-	pfree(result);
-
-	pxf_free_filter_list(filter_list);
-	filter_list = NIL;
-}
-
-int 
-main(int argc, char* argv[]) 
-{
-	cmockery_parse_arguments(argc, argv);
-
-	const UnitTest tests[] = {
-			unit_test(test__supported_filter_type),
-			unit_test(test__const_to_str__null),
-			unit_test(test__const_to_str__int),
-			unit_test(test__const_to_str__text),
-			unit_test(test__const_to_str__boolean),
-			unit_test(test__const_to_str__NegativeCircle),
-			unit_test(test__opexpr_to_pxffilter__null),
-			unit_test(test__opexpr_to_pxffilter__unary_expr),
-			unit_test(test__opexpr_to_pxffilter__intGT),
-			unit_test(test__opexpr_to_pxffilter__allSupportedTypes),
-			unit_test(test__opexpr_to_pxffilter__attributeIsNull),
-			unit_test(test__opexpr_to_pxffilter__differentTypes),
-			unit_test(test__opexpr_to_pxffilter__unsupportedTypeCircle),
-			unit_test(test__opexpr_to_pxffilter__twoVars),
-			unit_test(test__opexpr_to_pxffilter__unsupportedOpNot),
-			unit_test(test__pxf_serialize_filter_list__oneFilter),
-			unit_test(test__pxf_serialize_filter_list__manyFilters)
-	};
-	return run_tests(tests);
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7c2f615d/src/backend/access/external/test_discard/pxfheaders_test.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test_discard/pxfheaders_test.c b/src/backend/access/external/test_discard/pxfheaders_test.c
deleted file mode 100644
index 159d684..0000000
--- a/src/backend/access/external/test_discard/pxfheaders_test.c
+++ /dev/null
@@ -1,223 +0,0 @@
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include "cmockery.h"
-
-#include "c.h"
-#include "../pxfheaders.c"
-
-static GPHDUri *gphd_uri = NULL;
-static PxfInputData *input_data = NULL;
-static extvar_t *mock_extvar = NULL;
-
-static char *old_pxf_remote_service_login = NULL;
-static char *old_pxf_remote_service_secret = NULL;
-
-void
-test__build_http_header__remote_login_is_null(void **state)
-{
-	expect_external_vars();
-
-	expect_churl_headers("X-GP-SEGMENT-ID", mock_extvar->GP_SEGMENT_ID);
-	expect_churl_headers("X-GP-SEGMENT-COUNT", mock_extvar->GP_SEGMENT_COUNT);
-	expect_churl_headers("X-GP-XID", mock_extvar->GP_XID);
-	expect_churl_headers_alignment();
-	expect_churl_headers("X-GP-URL-HOST", gphd_uri->host);
-	expect_churl_headers("X-GP-URL-PORT", gphd_uri->port);
-	expect_churl_headers("X-GP-DATA-DIR", gphd_uri->data);
-	expect_churl_headers("X-GP-URI", gphd_uri->uri);
-	expect_churl_headers("X-GP-HAS-FILTER", "0");
-
-	build_http_header(input_data);
-}
-
-void
-test__build_http_header__remote_login_is_not_null(void **state)
-{
-	expect_external_vars();
-
-	expect_churl_headers("X-GP-SEGMENT-ID", mock_extvar->GP_SEGMENT_ID);
-	expect_churl_headers("X-GP-SEGMENT-COUNT", mock_extvar->GP_SEGMENT_COUNT);
-	expect_churl_headers("X-GP-XID", mock_extvar->GP_XID);
-	expect_churl_headers_alignment();
-	expect_churl_headers("X-GP-URL-HOST", gphd_uri->host);
-	expect_churl_headers("X-GP-URL-PORT", gphd_uri->port);
-	expect_churl_headers("X-GP-DATA-DIR", gphd_uri->data);
-	expect_churl_headers("X-GP-URI", gphd_uri->uri);
-	expect_churl_headers("X-GP-HAS-FILTER", "0");
-
-	pxf_remote_service_login = "not a valid login";
-	expect_churl_headers("X-GP-REMOTE-USER", pxf_remote_service_login);
-
-	build_http_header(input_data);
-}
-
-void
-test__build_http_header__remote_secret_is_not_null(void **state)
-{
-	expect_external_vars();
-
-	expect_churl_headers("X-GP-SEGMENT-ID", mock_extvar->GP_SEGMENT_ID);
-	expect_churl_headers("X-GP-SEGMENT-COUNT", mock_extvar->GP_SEGMENT_COUNT);
-	expect_churl_headers("X-GP-XID", mock_extvar->GP_XID);
-	expect_churl_headers_alignment();
-	expect_churl_headers("X-GP-URL-HOST", gphd_uri->host);
-	expect_churl_headers("X-GP-URL-PORT", gphd_uri->port);
-	expect_churl_headers("X-GP-DATA-DIR", gphd_uri->data);
-	expect_churl_headers("X-GP-URI", gphd_uri->uri);
-	expect_churl_headers("X-GP-HAS-FILTER", "0");
-
-	pxf_remote_service_secret = "password";
-	expect_churl_headers("X-GP-REMOTE-PASS", pxf_remote_service_secret);
-
-	build_http_header(input_data);
-}
-
-void
-test__build_http_header__remote_credentials_are_not_null(void **state)
-{
-	expect_external_vars();
-
-	expect_churl_headers("X-GP-SEGMENT-ID", mock_extvar->GP_SEGMENT_ID);
-	expect_churl_headers("X-GP-SEGMENT-COUNT", mock_extvar->GP_SEGMENT_COUNT);
-	expect_churl_headers("X-GP-XID", mock_extvar->GP_XID);
-	expect_churl_headers_alignment();
-	expect_churl_headers("X-GP-URL-HOST", gphd_uri->host);
-	expect_churl_headers("X-GP-URL-PORT", gphd_uri->port);
-	expect_churl_headers("X-GP-DATA-DIR", gphd_uri->data);
-	expect_churl_headers("X-GP-URI", gphd_uri->uri);
-	expect_churl_headers("X-GP-HAS-FILTER", "0");
-
-	pxf_remote_service_login = "not a valid login";
-	expect_churl_headers("X-GP-REMOTE-USER", pxf_remote_service_login);
-
-	pxf_remote_service_secret = "password";
-	expect_churl_headers("X-GP-REMOTE-PASS", pxf_remote_service_secret);
-
-	build_http_header(input_data);
-}
-
-/*
- * Add an expect clause on a churl_headers_append with given
- * key and value
- */
-void
-expect_churl_headers(const char *key, const char *value)
-{
-	expect_value(churl_headers_append, headers, input_data->headers);
-	expect_string(churl_headers_append, key, key);
-	expect_string(churl_headers_append, value, value);
-	will_be_called(churl_headers_append);
-}
-
-/*
- * Add the X-GP-ALIGNMENT header
- * As I don't want to copy-paste the logic in the 
- * production code I just support two cases.
- * Anything other than that requires special attention
- */
-void 
-expect_churl_headers_alignment()
-{
-	if (sizeof(char*) == 4)
-		expect_churl_headers("X-GP-ALIGNMENT", "4");
-	else if (sizeof(char*) == 8)
-		expect_churl_headers("X-GP-ALIGNMENT", "8");
-	else
-		assert_false(true);
-}
-
-void
-common_setup (void** state)
-{
-	store_gucs();
-	setup_gphd_uri();
-	setup_input_data();
-	setup_external_vars();
-}
-
-void
-store_gucs()
-{
-	old_pxf_remote_service_login = pxf_remote_service_login;
-	old_pxf_remote_service_secret = pxf_remote_service_secret;
-}
-
-void
-setup_gphd_uri()
-{
-	gphd_uri = palloc0(sizeof(GPHDUri));
-	gphd_uri->host = "there's a place you're always welcome";
-	gphd_uri->port = "it's as nice as it can be";
-	gphd_uri->data = "everyone can get in";
-	gphd_uri->uri = "'cos it's absolutely free";
-}
-
-void
-setup_input_data()
-{
-	input_data = palloc0(sizeof(PxfInputData));
-	input_data->gphduri = gphd_uri;
-	input_data->headers = 0xBAADF00D;
-}
-
-void
-setup_external_vars()
-{
-	mock_extvar = palloc0(sizeof(extvar_t));
-
-	snprintf(mock_extvar->GP_SEGMENT_ID, sizeof(mock_extvar->GP_SEGMENT_ID), "badID");
-	snprintf(mock_extvar->GP_SEGMENT_COUNT, sizeof(mock_extvar->GP_SEGMENT_COUNT), "lots");
-	snprintf(mock_extvar->GP_XID, sizeof(mock_extvar->GP_XID), "badXID");
-}
-
-void expect_external_vars()
-{
-	expect_any(external_set_env_vars, extvar);
-	expect_string(external_set_env_vars, uri, gphd_uri->uri);
-	expect_value(external_set_env_vars, csv, false);
-	expect_value(external_set_env_vars, escape, NULL);
-	expect_value(external_set_env_vars, quote, NULL);
-	expect_value(external_set_env_vars, header, false);
-	expect_value(external_set_env_vars, scancounter, 0);
-	will_assign_memory(external_set_env_vars, extvar, mock_extvar, sizeof(extvar_t));
-	will_be_called(external_set_env_vars);
-}
-
-/*
- * Common resource cleanup
- */
-void 
-common_teardown (void** state)
-{
-	pfree(mock_extvar);
-	pfree(input_data);
-	pfree(gphd_uri);
-
-	// Reset GUCs so tests won't have to
-	restore_gucs();
-}
-
-void restore_gucs()
-{
-	pxf_remote_service_login = old_pxf_remote_service_login;
-	pxf_remote_service_secret = old_pxf_remote_service_secret;
-}
-
-int 
-main(int argc, char* argv[]) 
-{
-	cmockery_parse_arguments(argc, argv);
-
-	const UnitTest tests[] = 
-	{
-		unit_test_setup_teardown(test__build_http_header__remote_login_is_null, 
-								 common_setup, common_teardown),
-		unit_test_setup_teardown(test__build_http_header__remote_login_is_not_null, 
-								 common_setup, common_teardown),
-		unit_test_setup_teardown(test__build_http_header__remote_credentials_are_not_null, 
-								 common_setup, common_teardown)
-	};
-
-	return run_tests(tests);
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7c2f615d/src/backend/access/external/test_discard/pxfmasterapi_test.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test_discard/pxfmasterapi_test.c b/src/backend/access/external/test_discard/pxfmasterapi_test.c
deleted file mode 100644
index f28fd09..0000000
--- a/src/backend/access/external/test_discard/pxfmasterapi_test.c
+++ /dev/null
@@ -1,231 +0,0 @@
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include "cmockery.h"
-
-#include "c.h"
-#include "../pxfmasterapi.c"
-#include "lib/stringinfo.h"
-#include "utils/elog.h"
-#include <stdarg.h>
-
-/*
- * Tests for the HA failover mechanism
- * pxfmasterapi.c contains several methods that issue REST calls to the PXF agent:
- * get_data_fragment_list(), get_datanode_rest_servers() and get_data_statistics()
- * All these methods need to support the case where PXF is deployed on a HA HDFS namenode cluster.
- * In such a situation a namenode machine is actually represented by two machines and
- * one of them could become inactive.
- * For this reason the REST logic was encapsulated into a failover logic in function rest_request().
- * All REST calls mentioned above will use rest_request(). The tests bellow will validate rest_request().
- */
-
-/*
- * Trigger the first exception thrown from rest_request
- * Function is used with will_be_called_with_sideeffect
- */
-void
-FirstException( )
-{
-	elog(ERROR, "first exception");
-}
-
-/*
- * Trigger the second exception thrown from rest_request
- * Function is used with will_be_called_with_sideeffect
- */
-void
-SecondException( )
-{
-	elog(ERROR, "second exception");
-}
-
-/*
- * SUT: rest_request
- * call_rest throws an error while not in HA mode
- */
-void
-test__rest_request__callRestThrowsNoHA(void **state)
-{
-	GPHDUri *hadoop_uri = (GPHDUri*)  palloc0(sizeof(GPHDUri));
-	hadoop_uri->host = pstrdup("host1");
-	hadoop_uri->port = pstrdup("port1");
-	ClientContext* client_context =  (ClientContext*)  palloc0(sizeof(ClientContext));
-	char *restMsg = "empty message";
-
-	expect_any(call_rest, hadoop_uri);
-	expect_any(call_rest, client_context);
-	expect_any(call_rest, rest_msg);
-	will_be_called_with_sideeffect(call_rest, &FirstException, NULL);
-
-	/* test */
-	PG_TRY();
-	{
-		rest_request(hadoop_uri, client_context, restMsg);
-	}
-	PG_CATCH();
-	{
-		pfree(hadoop_uri->host);
-		pfree(hadoop_uri->port);
-		pfree(hadoop_uri);
-		pfree(client_context);
-
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_string_equal(edata->message, "first exception");
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-/*
- * SUT: rest_request
- * call_rest throws an error while in HA mode
- * and the failover method finds an active IP so the second
- * call to call_rest does  no throw an exception
- */
-void
-test__rest_request__callRestThrowsHAFirstTime(void **state)
-{
-	GPHDUri *hadoop_uri = (GPHDUri*)  palloc0(sizeof(GPHDUri));
-	hadoop_uri->host = pstrdup("host1");
-	hadoop_uri->port = pstrdup("port1");
-	NNHAConf *ha_nodes = (NNHAConf*)  palloc0(sizeof(NNHAConf));
-    hadoop_uri->ha_nodes = ha_nodes;
-	ha_nodes->nodes = (char *[]){"host1", "host2"};
-	ha_nodes->restports = (char *[]){"port1", "port2"};
-	ha_nodes->numn = 2;
-
-	ClientContext* client_context =  (ClientContext*)  palloc0(sizeof(ClientContext));
-	char *restMsg = "empty message";
-
-	expect_any(call_rest, hadoop_uri);
-	expect_any(call_rest, client_context);
-	expect_any(call_rest, rest_msg);
-	will_be_called_with_sideeffect(call_rest, &FirstException, NULL);
-
-	/* the second call from ha_failover */
-	expect_any(call_rest, hadoop_uri);
-	expect_any(call_rest, client_context);
-	expect_any(call_rest, rest_msg);
-	will_be_called(call_rest);
-
-
-	/* test */
-	rest_request(hadoop_uri, client_context, restMsg);
-
-	pfree(hadoop_uri);
-	pfree(client_context);
-}
-
-/*
- * SUT: rest_request
- * call_rest throws an error while in HA mode
- * and the failover method finds an an active IP so the second
- * call to call_rest is issued on the second IP. This call also throws
- * an exception - but this time the exception is not caught.
- */
-void
-test__rest_request__callRestThrowsHASecondTime(void **state)
-{
-	GPHDUri *hadoop_uri = (GPHDUri*)  palloc0(sizeof(GPHDUri));
-	hadoop_uri->host = pstrdup("host1");
-	hadoop_uri->port = pstrdup("port1");
-	NNHAConf *ha_nodes = (NNHAConf*)  palloc0(sizeof(NNHAConf));
-    hadoop_uri->ha_nodes = ha_nodes;
-	ha_nodes->nodes = (char *[]){"host1", "host2"};
-	ha_nodes->restports = (char *[]){"port1", "port2"};
-	ha_nodes->numn = 2;
-
-	ClientContext* client_context =  (ClientContext*)  palloc0(sizeof(ClientContext));
-	char *restMsg = "empty message";
-
-	expect_any(call_rest, hadoop_uri);
-	expect_any(call_rest, client_context);
-	expect_any(call_rest, rest_msg);
-	will_be_called_with_sideeffect(call_rest, &FirstException, NULL);
-
-	/* the second call from ha_failover */
-	expect_any(call_rest, hadoop_uri);
-	expect_any(call_rest, client_context);
-	expect_any(call_rest, rest_msg);
-	will_be_called_with_sideeffect(call_rest, &SecondException, NULL);
-
-
-	/* test */
-	PG_TRY();
-	{
-		rest_request(hadoop_uri, client_context, restMsg);
-	}
-	PG_CATCH();
-	{
-		pfree(hadoop_uri->host);
-		pfree(hadoop_uri->port);
-		pfree(hadoop_uri);
-		pfree(client_context);
-
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_string_equal(edata->message, "second exception");
-		/* the first exception was caught by rest_request() */
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-/*
- * SUT: rest_request
- * the first time call_rest is called we succeed, since the first IP is valid
- * No exceptions are thrown
- */
-void
-test__rest_request__callRestHASuccessFromTheFirstCall(void **state)
-{
-	GPHDUri *hadoop_uri = (GPHDUri*)  palloc0(sizeof(GPHDUri));
-	hadoop_uri->host = pstrdup("host1");
-	hadoop_uri->port = pstrdup("port1");
-	NNHAConf *ha_nodes = (NNHAConf*)  palloc0(sizeof(NNHAConf));
-    hadoop_uri->ha_nodes = ha_nodes;
-	ha_nodes->nodes = (char *[]){"host1", "host2"};
-	ha_nodes->restports = (char *[]){"port1", "port2"};
-	ha_nodes->numn = 2;
-
-	ClientContext* client_context =  (ClientContext*)  palloc0(sizeof(ClientContext));
-	char *restMsg = "empty message";
-
-	expect_any(call_rest, hadoop_uri);
-	expect_any(call_rest, client_context);
-	expect_any(call_rest, rest_msg);
-	will_be_called(call_rest);
-
-	/* test */
-	rest_request(hadoop_uri, client_context, restMsg);
-
-	pfree(hadoop_uri->host);
-	pfree(hadoop_uri->port);
-	pfree(hadoop_uri);
-	pfree(client_context);
-}
-
-
-int 
-main(int argc, char *argv[]) 
-{
-	cmockery_parse_arguments(argc, argv);
-
-	const UnitTest tests[] = {
-		    unit_test(test__rest_request__callRestThrowsNoHA),
-		    unit_test(test__rest_request__callRestThrowsHAFirstTime),
-		    unit_test(test__rest_request__callRestThrowsHASecondTime),
-		    unit_test(test__rest_request__callRestHASuccessFromTheFirstCall)
-	};
-	return run_tests(tests);
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7c2f615d/src/backend/access/external/test_discard/pxfuriparser_test.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test_discard/pxfuriparser_test.c b/src/backend/access/external/test_discard/pxfuriparser_test.c
deleted file mode 100644
index ecde5aa..0000000
--- a/src/backend/access/external/test_discard/pxfuriparser_test.c
+++ /dev/null
@@ -1,352 +0,0 @@
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include "cmockery.h"
-
-#include "c.h"
-#include "../pxfuriparser.c"
-
-
-/*
- * Test parsing of valid uri as given in LOCATION in a PXF external table.
- */
-void 
-test__parseGPHDUri__ValidURI(void **state)
-{
-	char* uri = "pxf://1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER=SomeFragmenter&ACCESSOR=SomeAccessor&RESOLVER=SomeResolver&ANALYZER=SomeAnalyzer";
-	List* options = NIL;
-	ListCell* cell = NULL;
-	OptionData* option = NULL;
-
-	GPHDUri* parsed = parseGPHDUri(uri);
-
-	assert_true(parsed != NULL);
-	assert_string_equal(parsed->uri, uri);
-
-	assert_string_equal(parsed->protocol, "pxf");
-	assert_string_equal(parsed->host, "1.2.3.4");
-	assert_string_not_equal(parsed->port, "5678"); /* it should be pxf_service_port */
-	assert_string_equal(parsed->data, "some/path/and/table.tbl");
-
-	options = parsed->options;
-	assert_int_equal(list_length(options), 4);
-
-	cell = list_nth_cell(options, 0);
-	option = lfirst(cell);
-	assert_string_equal(option->key, "FRAGMENTER");
-	assert_string_equal(option->value, "SomeFragmenter");
-
-	cell = list_nth_cell(options, 1);
-	option = lfirst(cell);
-	assert_string_equal(option->key, "ACCESSOR");
-	assert_string_equal(option->value, "SomeAccessor");
-
-	cell = list_nth_cell(options, 2);
-	option = lfirst(cell);
-	assert_string_equal(option->key, "RESOLVER");
-	assert_string_equal(option->value, "SomeResolver");
-
-	cell = list_nth_cell(options, 3);
-	option = lfirst(cell);
-	assert_string_equal(option->key, "ANALYZER");
-	assert_string_equal(option->value, "SomeAnalyzer");
-
-	assert_true(parsed->fragments == NULL);
-
-	freeGPHDUri(parsed);
-}
-
-/*
- * Negative test: parsing of uri without protocol delimiter "://"
- */
-void
-test__parseGPHDUri__NegativeTestNoProtocol(void **state)
-{
-	char* uri_no_protocol = "pxf:/1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER=HdfsDataFragmenter";
-
-	/* Setting the test -- code omitted -- */
-	PG_TRY();
-	{
-		/* This will throw a ereport(ERROR).*/
-		GPHDUri* parsed = parseGPHDUri(uri_no_protocol);
-	}
-	PG_CATCH();
-	{
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_true(edata->sqlerrcode == ERRCODE_SYNTAX_ERROR);
-		assert_true(edata->elevel == ERROR);
-		assert_string_equal(edata->message, "Invalid URI pxf:/1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER=HdfsDataFragmenter");
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-/*
- * Negative test: parsing of uri without options part
- */
-void
-test__parseGPHDUri__NegativeTestNoOptions(void **state)
-{
-	char* uri_no_options = "pxf://1.2.3.4:5678/some/path/and/table.tbl";
-
-	/* Setting the test -- code omitted -- */
-	PG_TRY();
-	{
-		/* This will throw a ereport(ERROR).*/
-		GPHDUri* parsed = parseGPHDUri(uri_no_options);
-	}
-	PG_CATCH();
-	{
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_true(edata->sqlerrcode == ERRCODE_SYNTAX_ERROR);
-		assert_true(edata->elevel == ERROR);
-		assert_string_equal(edata->message, "Invalid URI pxf://1.2.3.4:5678/some/path/and/table.tbl: missing options section");
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-/*
- * Negative test: parsing of a uri with a missing equal
- */
-void
-test__parseGPHDUri__NegativeTestMissingEqual(void **state)
-{
-	char* uri_missing_equal = "pxf://1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER";
-
-	/* Setting the test -- code omitted -- */
-	PG_TRY();
-	{
-		/* This will throw a ereport(ERROR).*/
-		GPHDUri* parsed = parseGPHDUri(uri_missing_equal);
-	}
-	PG_CATCH();
-	{
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_true(edata->sqlerrcode == ERRCODE_SYNTAX_ERROR);
-		assert_true(edata->elevel == ERROR);
-		assert_string_equal(edata->message, "Invalid URI pxf://1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER: option 'FRAGMENTER' missing '='");
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-/*
- * Negative test: parsing of a uri with duplicate equals
- */
-void
-test__parseGPHDUri__NegativeTestDuplicateEquals(void **state)
-{
-	char* uri_duplicate_equals = "pxf://1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER=HdfsDataFragmenter=DuplicateFragmenter";
-
-	/* Setting the test -- code omitted -- */
-	PG_TRY();
-	{
-		/* This will throw a ereport(ERROR).*/
-		GPHDUri* parsed = parseGPHDUri(uri_duplicate_equals);
-	}
-	PG_CATCH();
-	{
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_true(edata->sqlerrcode == ERRCODE_SYNTAX_ERROR);
-		assert_true(edata->elevel == ERROR);
-		assert_string_equal(edata->message, "Invalid URI pxf://1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER=HdfsDataFragmenter=DuplicateFragmenter: option 'FRAGMENTER=HdfsDataFragmenter=DuplicateFragmenter' contains duplicate '='");
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-/*
- * Negative test: parsing of a uri with a missing key
- */
-void
-test__parseGPHDUri__NegativeTestMissingKey(void **state)
-{
-	char* uri_missing_key = "pxf://1.2.3.4:5678/some/path/and/table.tbl?=HdfsDataFragmenter";
-
-	/* Setting the test -- code omitted -- */
-	PG_TRY();
-	{
-		/* This will throw a ereport(ERROR).*/
-		GPHDUri* parsed = parseGPHDUri(uri_missing_key);
-	}
-	PG_CATCH();
-	{
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_true(edata->sqlerrcode == ERRCODE_SYNTAX_ERROR);
-		assert_true(edata->elevel == ERROR);
-		assert_string_equal(edata->message, "Invalid URI pxf://1.2.3.4:5678/some/path/and/table.tbl?=HdfsDataFragmenter: option '=HdfsDataFragmenter' missing key before '='");
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-/*
- * Negative test: parsing of a uri with a missing value
- */
-void
-test__parseGPHDUri__NegativeTestMissingValue(void **state)
-{
-	char* uri_missing_value = "pxf://1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER=";
-
-	/* Setting the test -- code omitted -- */
-	PG_TRY();
-	{
-		/* This will throw a ereport(ERROR).*/
-		GPHDUri* parsed = parseGPHDUri(uri_missing_value);
-	}
-	PG_CATCH();
-	{
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_true(edata->sqlerrcode == ERRCODE_SYNTAX_ERROR);
-		assert_true(edata->elevel == ERROR);
-		assert_string_equal(edata->message, "Invalid URI pxf://1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER=: option 'FRAGMENTER=' missing value after '='");
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-/*
- * Test GPHDUri_verify_no_duplicate_options: valid uri
- */
-void
-test__GPHDUri_verify_no_duplicate_options__ValidURI(void **state)
-{
-	char* valid_uri = "pxf://1.2.3.4:5678/some/path/and/table.tbl?Profile=a&Analyzer=b";
-
-	/* Setting the test -- code omitted -- */
-	GPHDUri* parsed = parseGPHDUri(valid_uri);
-	GPHDUri_verify_no_duplicate_options(parsed);
-	freeGPHDUri(parsed);
-}
-
-/*
- * Negative test of GPHDUri_verify_no_duplicate_options: parsing of a uri with duplicate options
- */
-void
-test__GPHDUri_verify_no_duplicate_options__NegativeTestDuplicateOpts(void **state)
-{
-	char* uri_duplicate_opts = "pxf://1.2.3.4:5678/some/path/and/table.tbl?Profile=a&Analyzer=b&PROFILE=c";
-
-	/* Setting the test -- code omitted -- */
-	PG_TRY();
-	{
-		GPHDUri* parsed = parseGPHDUri(uri_duplicate_opts);
-		/* This will throw a ereport(ERROR).*/
-		GPHDUri_verify_no_duplicate_options(parsed);
-	}
-	PG_CATCH();
-	{
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_true(edata->sqlerrcode == ERRCODE_SYNTAX_ERROR);
-		assert_true(edata->elevel == ERROR);
-		assert_string_equal(edata->message, "Invalid URI pxf://1.2.3.4:5678/some/path/and/table.tbl?Profile=a&Analyzer=b&PROFILE=c: Duplicate option(s): PROFILE");
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-/*
- * Test GPHDUri_verify_core_options_exist with a valid uri
- */
-void
-test__GPHDUri_verify_core_options_exist__ValidURI(void **state)
-{
-	char* valid_uri = "pxf://1.2.3.4:5678/some/path/and/table.tbl?Fragmenter=1&Accessor=2&Resolver=3";
-
-	/* Setting the test -- code omitted -- */
-	GPHDUri* parsed = parseGPHDUri(valid_uri);
-	List *coreOptions = list_make3("FRAGMENTER", "ACCESSOR", "RESOLVER");
-	GPHDUri_verify_core_options_exist(parsed, coreOptions);
-	freeGPHDUri(parsed);
-	list_free(coreOptions);
-}
-
-/*
- * Negative test of GPHDUri_verify_core_options_exist: Missing core options
- */
-void
-test__GPHDUri_verify_core_options_exist__NegativeTestMissingCoreOpts(void **state)
-{
-	char* missing_core_opts = "pxf://1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER=a";
-	List *coreOptions;
-	/* Setting the test -- code omitted -- */
-	PG_TRY();
-	{
-		GPHDUri* parsed = parseGPHDUri(missing_core_opts);
-		coreOptions = list_make3("FRAGMENTER", "ACCESSOR", "RESOLVER");
-		/* This will throw a ereport(ERROR).*/
-		GPHDUri_verify_core_options_exist(parsed, coreOptions);
-	}
-	PG_CATCH();
-	{
-		CurrentMemoryContext = 1;
-		ErrorData *edata = CopyErrorData();
-
-		/*Validate the type of expected error */
-		assert_true(edata->sqlerrcode == ERRCODE_SYNTAX_ERROR);
-		assert_true(edata->elevel == ERROR);
-		assert_string_equal(edata->message, "Invalid URI pxf://1.2.3.4:5678/some/path/and/table.tbl?FRAGMENTER=a: PROFILE or ACCESSOR and RESOLVER option(s) missing");
-		list_free(coreOptions);
-		return;
-	}
-	PG_END_TRY();
-
-	assert_true(false);
-}
-
-int 
-main(int argc, char* argv[]) 
-{
-	cmockery_parse_arguments(argc, argv);
-
-	const UnitTest tests[] = {
-			unit_test(test__parseGPHDUri__ValidURI),
-			unit_test(test__parseGPHDUri__NegativeTestNoProtocol),
-			unit_test(test__parseGPHDUri__NegativeTestNoOptions),
-			unit_test(test__parseGPHDUri__NegativeTestMissingEqual),
-			unit_test(test__parseGPHDUri__NegativeTestDuplicateEquals),
-			unit_test(test__parseGPHDUri__NegativeTestMissingKey),
-			unit_test(test__parseGPHDUri__NegativeTestMissingValue),
-			unit_test(test__GPHDUri_verify_no_duplicate_options__ValidURI),
-			unit_test(test__GPHDUri_verify_no_duplicate_options__NegativeTestDuplicateOpts),
-			unit_test(test__GPHDUri_verify_core_options_exist__ValidURI),
-			unit_test(test__GPHDUri_verify_core_options_exist__NegativeTestMissingCoreOpts)
-	};
-	return run_tests(tests);
-}


Mime
View raw message