nifi-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jeremyd...@apache.org
Subject [34/51] [partial] nifi-minifi-cpp git commit: MINIFI-372: Replace leveldb with RocksDB
Date Mon, 09 Oct 2017 16:25:14 GMT
http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/TARGETS
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/TARGETS b/thirdparty/rocksdb/TARGETS
new file mode 100644
index 0000000..3fac4a7
--- /dev/null
+++ b/thirdparty/rocksdb/TARGETS
@@ -0,0 +1,533 @@
+
+import os
+
+TARGETS_PATH = os.path.dirname(__file__)
+REPO_PATH = TARGETS_PATH[(TARGETS_PATH.find('fbcode/') + len('fbcode/')):] + "/"
+BUCK_BINS = "buck-out/gen/" + REPO_PATH
+TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh"
+rocksdb_compiler_flags = [
+  "-fno-builtin-memcmp",
+  "-DROCKSDB_PLATFORM_POSIX",
+  "-DROCKSDB_LIB_IO_POSIX",
+  "-DROCKSDB_FALLOCATE_PRESENT",
+  "-DROCKSDB_MALLOC_USABLE_SIZE",
+  "-DROCKSDB_RANGESYNC_PRESENT",
+  "-DROCKSDB_SCHED_GETCPU_PRESENT",
+  "-DROCKSDB_SUPPORT_THREAD_LOCAL",
+  "-DOS_LINUX",
+  "-DROCKSDB_UBSAN_RUN",
+  # Flags to enable libs we include
+  "-DSNAPPY",
+  "-DZLIB",
+  "-DBZIP2",
+  "-DLZ4",
+  "-DZSTD",
+  "-DGFLAGS=gflags",
+  "-DNUMA",
+  "-DTBB",
+  # Needed to compile in fbcode
+  "-Wno-expansion-to-defined",
+]
+
+rocksdb_external_deps = [
+  ('bzip2', None, 'bz2'),
+  ('snappy', None, "snappy"),
+  ('zlib', None, 'z'),
+  ('gflags', None, 'gflags'),
+  ('lz4', None, 'lz4'),
+  ('zstd', None),
+  ('tbb', None),
+  ("numa", None, "numa"),
+  ("googletest", None, "gtest"),
+]
+
+rocksdb_preprocessor_flags = [
+  # Directories with files for #include
+  "-I" + REPO_PATH + "include/",
+  "-I" + REPO_PATH,
+]
+
+rocksdb_arch_preprocessor_flags = {
+  "x86_64": ["-DHAVE_SSE42"],
+}
+
+cpp_library(
+    name = "rocksdb_lib",
+    headers = AutoHeaders.RECURSIVE_GLOB,
+    srcs = [
+      "cache/clock_cache.cc",
+      "cache/lru_cache.cc",
+      "cache/sharded_cache.cc",
+      "db/builder.cc",
+      "db/c.cc",
+      "db/column_family.cc",
+      "db/compacted_db_impl.cc",
+      "db/compaction.cc",
+      "db/compaction_iterator.cc",
+      "db/compaction_job.cc",
+      "db/compaction_picker.cc",
+      "db/compaction_picker_universal.cc",
+      "db/convenience.cc",
+      "db/db_filesnapshot.cc",
+      "db/db_impl.cc",
+      "db/db_impl_write.cc",
+      "db/db_impl_compaction_flush.cc",
+      "db/db_impl_files.cc",
+      "db/db_impl_open.cc",
+      "db/db_impl_debug.cc",
+      "db/db_impl_experimental.cc",
+      "db/db_impl_readonly.cc",
+      "db/db_info_dumper.cc",
+      "db/db_iter.cc",
+      "db/dbformat.cc",
+      "db/event_helpers.cc",
+      "db/experimental.cc",
+      "db/external_sst_file_ingestion_job.cc",
+      "db/file_indexer.cc",
+      "db/flush_job.cc",
+      "db/flush_scheduler.cc",
+      "db/forward_iterator.cc",
+      "db/internal_stats.cc",
+      "db/log_reader.cc",
+      "db/log_writer.cc",
+      "db/malloc_stats.cc",
+      "db/managed_iterator.cc",
+      "db/memtable.cc",
+      "db/memtable_list.cc",
+      "db/merge_helper.cc",
+      "db/merge_operator.cc",
+      "db/range_del_aggregator.cc",
+      "db/repair.cc",
+      "db/snapshot_impl.cc",
+      "db/table_cache.cc",
+      "db/table_properties_collector.cc",
+      "db/transaction_log_impl.cc",
+      "db/version_builder.cc",
+      "db/version_edit.cc",
+      "db/version_set.cc",
+      "db/wal_manager.cc",
+      "db/write_batch.cc",
+      "db/write_batch_base.cc",
+      "db/write_controller.cc",
+      "db/write_thread.cc",
+      "env/env.cc",
+      "env/env_chroot.cc",
+      "env/env_encryption.cc",
+      "env/env_hdfs.cc",
+      "env/env_posix.cc",
+      "env/io_posix.cc",
+      "env/mock_env.cc",
+      "memtable/alloc_tracker.cc",
+      "memtable/hash_cuckoo_rep.cc",
+      "memtable/hash_linklist_rep.cc",
+      "memtable/hash_skiplist_rep.cc",
+      "memtable/skiplistrep.cc",
+      "memtable/vectorrep.cc",
+      "memtable/write_buffer_manager.cc",
+      "monitoring/histogram.cc",
+      "monitoring/histogram_windowing.cc",
+      "monitoring/instrumented_mutex.cc",
+      "monitoring/iostats_context.cc",
+      "monitoring/perf_context.cc",
+      "monitoring/perf_level.cc",
+      "monitoring/statistics.cc",
+      "monitoring/thread_status_impl.cc",
+      "monitoring/thread_status_updater.cc",
+      "monitoring/thread_status_updater_debug.cc",
+      "monitoring/thread_status_util.cc",
+      "monitoring/thread_status_util_debug.cc",
+      "options/cf_options.cc",
+      "options/db_options.cc",
+      "options/options.cc",
+      "options/options_helper.cc",
+      "options/options_parser.cc",
+      "options/options_sanity_check.cc",
+      "port/port_posix.cc",
+      "port/stack_trace.cc",
+      "table/adaptive_table_factory.cc",
+      "table/block.cc",
+      "table/block_based_filter_block.cc",
+      "table/block_based_table_builder.cc",
+      "table/block_based_table_factory.cc",
+      "table/block_based_table_reader.cc",
+      "table/block_builder.cc",
+      "table/block_prefix_index.cc",
+      "table/bloom_block.cc",
+      "table/cuckoo_table_builder.cc",
+      "table/cuckoo_table_factory.cc",
+      "table/cuckoo_table_reader.cc",
+      "table/flush_block_policy.cc",
+      "table/format.cc",
+      "table/full_filter_block.cc",
+      "table/get_context.cc",
+      "table/index_builder.cc",
+      "table/iterator.cc",
+      "table/merging_iterator.cc",
+      "table/meta_blocks.cc",
+      "table/partitioned_filter_block.cc",
+      "table/persistent_cache_helper.cc",
+      "table/plain_table_builder.cc",
+      "table/plain_table_factory.cc",
+      "table/plain_table_index.cc",
+      "table/plain_table_key_coding.cc",
+      "table/plain_table_reader.cc",
+      "table/sst_file_writer.cc",
+      "table/table_properties.cc",
+      "table/two_level_iterator.cc",
+      "tools/dump/db_dump_tool.cc",
+      "util/arena.cc",
+      "util/auto_roll_logger.cc",
+      "util/bloom.cc",
+      "util/build_version.cc",
+      "util/coding.cc",
+      "util/compaction_job_stats_impl.cc",
+      "util/comparator.cc",
+      "util/concurrent_arena.cc",
+      "util/crc32c.cc",
+      "util/delete_scheduler.cc",
+      "util/dynamic_bloom.cc",
+      "util/event_logger.cc",
+      "util/file_reader_writer.cc",
+      "util/file_util.cc",
+      "util/filename.cc",
+      "util/filter_policy.cc",
+      "util/hash.cc",
+      "util/log_buffer.cc",
+      "util/murmurhash.cc",
+      "util/random.cc",
+      "util/rate_limiter.cc",
+      "util/slice.cc",
+      "util/sst_file_manager_impl.cc",
+      "util/status.cc",
+      "util/status_message.cc",
+      "util/string_util.cc",
+      "util/sync_point.cc",
+      "util/thread_local.cc",
+      "util/threadpool_imp.cc",
+      "util/transaction_test_util.cc",
+      "util/xxhash.cc",
+      "utilities/backupable/backupable_db.cc",
+      "utilities/blob_db/blob_db.cc",
+      "utilities/blob_db/blob_db_impl.cc",
+      "utilities/blob_db/blob_file.cc",
+      "utilities/blob_db/blob_log_reader.cc",
+      "utilities/blob_db/blob_log_writer.cc",
+      "utilities/blob_db/blob_log_format.cc",
+      "utilities/blob_db/ttl_extractor.cc",
+      "utilities/cassandra/cassandra_compaction_filter.cc",
+      "utilities/cassandra/format.cc",
+      "utilities/cassandra/merge_operator.cc",
+      "utilities/checkpoint/checkpoint_impl.cc",
+      "utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc",
+      "utilities/convenience/info_log_finder.cc",
+      "utilities/date_tiered/date_tiered_db_impl.cc",
+      "utilities/debug.cc",
+      "utilities/document/document_db.cc",
+      "utilities/document/json_document.cc",
+      "utilities/document/json_document_builder.cc",
+      "utilities/env_mirror.cc",
+      "utilities/env_timed.cc",
+      "utilities/geodb/geodb_impl.cc",
+      "utilities/leveldb_options/leveldb_options.cc",
+      "utilities/lua/rocks_lua_compaction_filter.cc",
+      "utilities/memory/memory_util.cc",
+      "utilities/merge_operators/max.cc",
+      "utilities/merge_operators/put.cc",
+      "utilities/merge_operators/string_append/stringappend.cc",
+      "utilities/merge_operators/string_append/stringappend2.cc",
+      "utilities/merge_operators/uint64add.cc",
+      "utilities/option_change_migration/option_change_migration.cc",
+      "utilities/options/options_util.cc",
+      "utilities/persistent_cache/block_cache_tier.cc",
+      "utilities/persistent_cache/block_cache_tier_file.cc",
+      "utilities/persistent_cache/block_cache_tier_metadata.cc",
+      "utilities/persistent_cache/persistent_cache_tier.cc",
+      "utilities/persistent_cache/volatile_tier_impl.cc",
+      "utilities/redis/redis_lists.cc",
+      "utilities/simulator_cache/sim_cache.cc",
+      "utilities/spatialdb/spatial_db.cc",
+      "utilities/table_properties_collectors/compact_on_deletion_collector.cc",
+      "utilities/transactions/optimistic_transaction_db_impl.cc",
+      "utilities/transactions/optimistic_transaction.cc",
+      "utilities/transactions/transaction_base.cc",
+      "utilities/transactions/pessimistic_transaction_db.cc",
+      "utilities/transactions/transaction_db_mutex_impl.cc",
+      "utilities/transactions/pessimistic_transaction.cc",
+      "utilities/transactions/transaction_lock_mgr.cc",
+      "utilities/transactions/transaction_util.cc",
+      "utilities/transactions/write_prepared_txn.cc",
+      "utilities/ttl/db_ttl_impl.cc",
+      "utilities/write_batch_with_index/write_batch_with_index.cc",
+      "utilities/write_batch_with_index/write_batch_with_index_internal.cc",
+      "tools/ldb_cmd.cc",
+      "tools/ldb_tool.cc",
+      "tools/sst_dump_tool.cc",
+      "utilities/blob_db/blob_dump_tool.cc",
+    ],
+    deps = [],
+    preprocessor_flags = rocksdb_preprocessor_flags,
+    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+    compiler_flags = rocksdb_compiler_flags,
+    external_deps = rocksdb_external_deps,
+)
+
+cpp_library(
+    name = "rocksdb_test_lib",
+    headers = AutoHeaders.RECURSIVE_GLOB,
+    srcs = [
+      "table/mock_table.cc",
+      "util/fault_injection_test_env.cc",
+      "util/testharness.cc",
+      "util/testutil.cc",
+      "db/db_test_util.cc",
+      "utilities/cassandra/test_utils.cc",
+      "utilities/col_buf_encoder.cc",
+      "utilities/col_buf_decoder.cc",
+      "utilities/column_aware_encoding_util.cc",
+    ],
+    deps = [":rocksdb_lib"],
+    preprocessor_flags = rocksdb_preprocessor_flags,
+    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+    compiler_flags = rocksdb_compiler_flags,
+    external_deps = rocksdb_external_deps,
+)
+
+cpp_library(
+    name = "rocksdb_tools_lib",
+    headers = AutoHeaders.RECURSIVE_GLOB,
+    srcs = [
+      "tools/db_bench_tool.cc",
+      "util/testutil.cc",
+    ],
+    deps = [":rocksdb_lib"],
+    preprocessor_flags = rocksdb_preprocessor_flags,
+    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+    compiler_flags = rocksdb_compiler_flags,
+    external_deps = rocksdb_external_deps,
+)
+
+cpp_library(
+    name = "env_basic_test_lib",
+    headers = AutoHeaders.RECURSIVE_GLOB,
+    srcs = ["env/env_basic_test.cc"],
+    deps = [":rocksdb_test_lib"],
+    preprocessor_flags = rocksdb_preprocessor_flags,
+    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+    compiler_flags = rocksdb_compiler_flags,
+    external_deps = rocksdb_external_deps,
+)
+
+# [test_name, test_src, test_type]
+ROCKS_TESTS = [['arena_test', 'util/arena_test.cc', 'serial'],
+ ['auto_roll_logger_test', 'util/auto_roll_logger_test.cc', 'serial'],
+ ['autovector_test', 'util/autovector_test.cc', 'serial'],
+ ['backupable_db_test',
+  'utilities/backupable/backupable_db_test.cc',
+  'parallel'],
+ ['blob_db_test', 'utilities/blob_db/blob_db_test.cc', 'serial'],
+ ['block_based_filter_block_test',
+  'table/block_based_filter_block_test.cc',
+  'serial'],
+ ['block_test', 'table/block_test.cc', 'serial'],
+ ['bloom_test', 'util/bloom_test.cc', 'serial'],
+ ['c_test', 'db/c_test.c', 'serial'],
+ ['cache_test', 'cache/cache_test.cc', 'serial'],
+ ['cassandra_format_test',
+  'utilities/cassandra/cassandra_format_test.cc',
+  'serial'],
+ ['cassandra_functional_test',
+  'utilities/cassandra/cassandra_functional_test.cc',
+  'serial'],
+ ['cassandra_row_merge_test',
+  'utilities/cassandra/cassandra_row_merge_test.cc',
+  'serial'],
+ ['cassandra_serialize_test',
+  'utilities/cassandra/cassandra_serialize_test.cc',
+  'serial'],
+ ['checkpoint_test', 'utilities/checkpoint/checkpoint_test.cc', 'serial'],
+ ['cleanable_test', 'table/cleanable_test.cc', 'serial'],
+ ['coding_test', 'util/coding_test.cc', 'serial'],
+ ['column_aware_encoding_test',
+  'utilities/column_aware_encoding_test.cc',
+  'serial'],
+ ['column_family_test', 'db/column_family_test.cc', 'serial'],
+ ['compact_files_test', 'db/compact_files_test.cc', 'serial'],
+ ['compact_on_deletion_collector_test',
+  'utilities/table_properties_collectors/compact_on_deletion_collector_test.cc',
+  'serial'],
+ ['compaction_iterator_test', 'db/compaction_iterator_test.cc', 'serial'],
+ ['compaction_job_stats_test', 'db/compaction_job_stats_test.cc', 'serial'],
+ ['compaction_job_test', 'db/compaction_job_test.cc', 'serial'],
+ ['compaction_picker_test', 'db/compaction_picker_test.cc', 'serial'],
+ ['comparator_db_test', 'db/comparator_db_test.cc', 'serial'],
+ ['corruption_test', 'db/corruption_test.cc', 'serial'],
+ ['crc32c_test', 'util/crc32c_test.cc', 'serial'],
+ ['cuckoo_table_builder_test', 'table/cuckoo_table_builder_test.cc', 'serial'],
+ ['cuckoo_table_db_test', 'db/cuckoo_table_db_test.cc', 'serial'],
+ ['cuckoo_table_reader_test', 'table/cuckoo_table_reader_test.cc', 'serial'],
+ ['date_tiered_test', 'utilities/date_tiered/date_tiered_test.cc', 'serial'],
+ ['db_basic_test', 'db/db_basic_test.cc', 'serial'],
+ ['db_block_cache_test', 'db/db_block_cache_test.cc', 'serial'],
+ ['db_bloom_filter_test', 'db/db_bloom_filter_test.cc', 'serial'],
+ ['db_compaction_filter_test', 'db/db_compaction_filter_test.cc', 'parallel'],
+ ['db_compaction_test', 'db/db_compaction_test.cc', 'parallel'],
+ ['db_dynamic_level_test', 'db/db_dynamic_level_test.cc', 'serial'],
+ ['db_encryption_test', 'db/db_encryption_test.cc', 'serial'],
+ ['db_flush_test', 'db/db_flush_test.cc', 'serial'],
+ ['db_inplace_update_test', 'db/db_inplace_update_test.cc', 'serial'],
+ ['db_io_failure_test', 'db/db_io_failure_test.cc', 'serial'],
+ ['db_iter_test', 'db/db_iter_test.cc', 'serial'],
+ ['db_iterator_test', 'db/db_iterator_test.cc', 'serial'],
+ ['db_log_iter_test', 'db/db_log_iter_test.cc', 'serial'],
+ ['db_memtable_test', 'db/db_memtable_test.cc', 'serial'],
+ ['db_merge_operator_test', 'db/db_merge_operator_test.cc', 'serial'],
+ ['db_options_test', 'db/db_options_test.cc', 'serial'],
+ ['db_properties_test', 'db/db_properties_test.cc', 'serial'],
+ ['db_range_del_test', 'db/db_range_del_test.cc', 'serial'],
+ ['db_sst_test', 'db/db_sst_test.cc', 'parallel'],
+ ['db_statistics_test', 'db/db_statistics_test.cc', 'serial'],
+ ['db_table_properties_test', 'db/db_table_properties_test.cc', 'serial'],
+ ['db_tailing_iter_test', 'db/db_tailing_iter_test.cc', 'serial'],
+ ['db_test', 'db/db_test.cc', 'parallel'],
+ ['db_test2', 'db/db_test2.cc', 'serial'],
+ ['db_universal_compaction_test',
+  'db/db_universal_compaction_test.cc',
+  'parallel'],
+ ['db_wal_test', 'db/db_wal_test.cc', 'parallel'],
+ ['db_write_test', 'db/db_write_test.cc', 'serial'],
+ ['dbformat_test', 'db/dbformat_test.cc', 'serial'],
+ ['delete_scheduler_test', 'util/delete_scheduler_test.cc', 'serial'],
+ ['deletefile_test', 'db/deletefile_test.cc', 'serial'],
+ ['document_db_test', 'utilities/document/document_db_test.cc', 'serial'],
+ ['dynamic_bloom_test', 'util/dynamic_bloom_test.cc', 'serial'],
+ ['env_basic_test', 'env/env_basic_test.cc', 'serial'],
+ ['env_test', 'env/env_test.cc', 'serial'],
+ ['env_timed_test', 'utilities/env_timed_test.cc', 'serial'],
+ ['event_logger_test', 'util/event_logger_test.cc', 'serial'],
+ ['external_sst_file_basic_test',
+  'db/external_sst_file_basic_test.cc',
+  'serial'],
+ ['external_sst_file_test', 'db/external_sst_file_test.cc', 'parallel'],
+ ['fault_injection_test', 'db/fault_injection_test.cc', 'parallel'],
+ ['file_indexer_test', 'db/file_indexer_test.cc', 'serial'],
+ ['file_reader_writer_test', 'util/file_reader_writer_test.cc', 'serial'],
+ ['filelock_test', 'util/filelock_test.cc', 'serial'],
+ ['filename_test', 'db/filename_test.cc', 'serial'],
+ ['flush_job_test', 'db/flush_job_test.cc', 'serial'],
+ ['full_filter_block_test', 'table/full_filter_block_test.cc', 'serial'],
+ ['geodb_test', 'utilities/geodb/geodb_test.cc', 'serial'],
+ ['hash_table_test',
+  'utilities/persistent_cache/hash_table_test.cc',
+  'serial'],
+ ['hash_test', 'util/hash_test.cc', 'serial'],
+ ['heap_test', 'util/heap_test.cc', 'serial'],
+ ['histogram_test', 'monitoring/histogram_test.cc', 'serial'],
+ ['inlineskiplist_test', 'memtable/inlineskiplist_test.cc', 'parallel'],
+ ['iostats_context_test', 'monitoring/iostats_context_test.cc', 'serial'],
+ ['json_document_test', 'utilities/document/json_document_test.cc', 'serial'],
+ ['ldb_cmd_test', 'tools/ldb_cmd_test.cc', 'serial'],
+ ['listener_test', 'db/listener_test.cc', 'serial'],
+ ['log_test', 'db/log_test.cc', 'serial'],
+ ['lru_cache_test', 'cache/lru_cache_test.cc', 'serial'],
+ ['manual_compaction_test', 'db/manual_compaction_test.cc', 'parallel'],
+ ['memory_test', 'utilities/memory/memory_test.cc', 'serial'],
+ ['memtable_list_test', 'db/memtable_list_test.cc', 'serial'],
+ ['merge_helper_test', 'db/merge_helper_test.cc', 'serial'],
+ ['merge_test', 'db/merge_test.cc', 'serial'],
+ ['merger_test', 'table/merger_test.cc', 'serial'],
+ ['mock_env_test', 'env/mock_env_test.cc', 'serial'],
+ ['object_registry_test', 'utilities/object_registry_test.cc', 'serial'],
+ ['optimistic_transaction_test',
+  'utilities/transactions/optimistic_transaction_test.cc',
+  'serial'],
+ ['option_change_migration_test',
+  'utilities/option_change_migration/option_change_migration_test.cc',
+  'serial'],
+ ['options_file_test', 'db/options_file_test.cc', 'serial'],
+ ['options_settable_test', 'options/options_settable_test.cc', 'serial'],
+ ['options_test', 'options/options_test.cc', 'serial'],
+ ['options_util_test', 'utilities/options/options_util_test.cc', 'serial'],
+ ['partitioned_filter_block_test',
+  'table/partitioned_filter_block_test.cc',
+  'serial'],
+ ['perf_context_test', 'db/perf_context_test.cc', 'serial'],
+ ['persistent_cache_test',
+  'utilities/persistent_cache/persistent_cache_test.cc',
+  'parallel'],
+ ['plain_table_db_test', 'db/plain_table_db_test.cc', 'serial'],
+ ['prefix_test', 'db/prefix_test.cc', 'serial'],
+ ['range_del_aggregator_test', 'db/range_del_aggregator_test.cc', 'serial'],
+ ['rate_limiter_test', 'util/rate_limiter_test.cc', 'serial'],
+ ['reduce_levels_test', 'tools/reduce_levels_test.cc', 'serial'],
+ ['repair_test', 'db/repair_test.cc', 'serial'],
+ ['sim_cache_test', 'utilities/simulator_cache/sim_cache_test.cc', 'serial'],
+ ['skiplist_test', 'memtable/skiplist_test.cc', 'serial'],
+ ['slice_transform_test', 'util/slice_transform_test.cc', 'serial'],
+ ['spatial_db_test', 'utilities/spatialdb/spatial_db_test.cc', 'serial'],
+ ['sst_dump_test', 'tools/sst_dump_test.cc', 'serial'],
+ ['statistics_test', 'monitoring/statistics_test.cc', 'serial'],
+ ['stringappend_test',
+  'utilities/merge_operators/string_append/stringappend_test.cc',
+  'serial'],
+ ['table_properties_collector_test',
+  'db/table_properties_collector_test.cc',
+  'serial'],
+ ['table_test', 'table/table_test.cc', 'parallel'],
+ ['thread_list_test', 'util/thread_list_test.cc', 'serial'],
+ ['thread_local_test', 'util/thread_local_test.cc', 'serial'],
+ ['timer_queue_test', 'util/timer_queue_test.cc', 'serial'],
+ ['transaction_test', 'utilities/transactions/transaction_test.cc', 'serial'],
+ ['ttl_test', 'utilities/ttl/ttl_test.cc', 'serial'],
+ ['util_merge_operators_test',
+  'utilities/util_merge_operators_test.cc',
+  'serial'],
+ ['version_builder_test', 'db/version_builder_test.cc', 'serial'],
+ ['version_edit_test', 'db/version_edit_test.cc', 'serial'],
+ ['version_set_test', 'db/version_set_test.cc', 'serial'],
+ ['wal_manager_test', 'db/wal_manager_test.cc', 'serial'],
+ ['write_batch_test', 'db/write_batch_test.cc', 'serial'],
+ ['write_batch_with_index_test',
+  'utilities/write_batch_with_index/write_batch_with_index_test.cc',
+  'serial'],
+ ['write_buffer_manager_test',
+  'memtable/write_buffer_manager_test.cc',
+  'serial'],
+ ['write_callback_test', 'db/write_callback_test.cc', 'serial'],
+ ['write_controller_test', 'db/write_controller_test.cc', 'serial']]
+
+
+# Generate a test rule for each entry in ROCKS_TESTS
+for test_cfg in ROCKS_TESTS:
+    test_name = test_cfg[0]
+    test_cc = test_cfg[1]
+    ttype = "gtest" if test_cfg[2] == "parallel" else "simple"
+    test_bin = test_name + "_bin"
+
+    cpp_binary (
+      name = test_bin,
+      srcs = [test_cc],
+      deps = [":rocksdb_test_lib"],
+      preprocessor_flags = rocksdb_preprocessor_flags,
+      arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+      compiler_flags = rocksdb_compiler_flags,
+      external_deps = rocksdb_external_deps,
+    )
+
+    custom_unittest(
+      name = test_name,
+      type = ttype,
+      deps = [":" + test_bin],
+      command = [TEST_RUNNER, BUCK_BINS + test_bin]
+    )
+
+custom_unittest(
+    name = "make_rocksdbjavastatic",
+    type = "simple",
+    command = ["internal_repo_rocksdb/make_rocksdbjavastatic.sh"],
+)
+
+custom_unittest(
+    name = "make_rocksdb_lite_release",
+    type = "simple",
+    command = ["internal_repo_rocksdb/make_rocksdb_lite_release.sh"],
+)

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/USERS.md
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/USERS.md b/thirdparty/rocksdb/USERS.md
new file mode 100644
index 0000000..7be093f
--- /dev/null
+++ b/thirdparty/rocksdb/USERS.md
@@ -0,0 +1,85 @@
+This document lists users of RocksDB and their use cases. If you are using RocksDB, please open a pull request and add yourself to the list.
+
+## Facebook
+At Facebook, we use RocksDB as storage engines in multiple data management services and a backend for many different stateful services, including:
+
+1. MyRocks -- https://github.com/MySQLOnRocksDB/mysql-5.6
+2. MongoRocks -- https://github.com/mongodb-partners/mongo-rocks
+3. ZippyDB --  Facebook's distributed key-value store with Paxos-style replication, built on top of RocksDB.[*] https://www.youtube.com/watch?v=DfiN7pG0D0khtt
+4. Laser -- Laser is a high query throughput, low (millisecond) latency, key-value storage service built on top of RocksDB.[*]
+4. Dragon -- a distributed graph query engine. https://code.facebook.com/posts/1737605303120405/dragon-a-distributed-graph-query-engine/
+5. Stylus -- a low-level stream processing framework writtenin C++.[*]
+
+[*] https://research.facebook.com/publications/realtime-data-processing-at-facebook/
+
+## LinkedIn
+Two different use cases at Linkedin are using RocksDB as a storage engine:
+
+1. LinkedIn's follow feed for storing user's activities. Check out the blog post: https://engineering.linkedin.com/blog/2016/03/followfeed--linkedin-s-feed-made-faster-and-smarter
+2. Apache Samza, open source framework for stream processing
+
+Learn more about those use cases in a Tech Talk by Ankit Gupta and Naveen Somasundaram: http://www.youtube.com/watch?v=plqVp_OnSzg
+
+## Yahoo
+Yahoo is using RocksDB as a storage engine for their biggest distributed data store Sherpa. Learn more about it here: http://yahooeng.tumblr.com/post/120730204806/sherpa-scales-new-heights
+
+## CockroachDB
+CockroachDB is an open-source geo-replicated transactional database (still in development). They are using RocksDB as their storage engine. Check out their github: https://github.com/cockroachdb/cockroach
+
+## DNANexus
+DNANexus is using RocksDB to speed up processing of genomics data.
+You can learn more from this great blog post by Mike Lin: http://devblog.dnanexus.com/faster-bam-sorting-with-samtools-and-rocksdb/
+
+## Iron.io
+Iron.io is using RocksDB as a storage engine for their distributed queueing system.
+Learn more from Tech Talk by Reed Allman: http://www.youtube.com/watch?v=HTjt6oj-RL4
+
+## Tango Me
+Tango is using RocksDB as a graph storage to store all users' connection data and other social activity data.
+
+## Turn
+Turn is using RocksDB as a storage layer for their key/value store, serving at peak 2.4MM QPS out of different datacenters.
+Check out our RocksDB Protobuf merge operator at: https://github.com/vladb38/rocksdb_protobuf
+
+## Santanader UK/Cloudera Profession Services
+Check out their blog post: http://blog.cloudera.com/blog/2015/08/inside-santanders-near-real-time-data-ingest-architecture/
+
+## Airbnb
+Airbnb is using RocksDB as a storage engine for their personalized search service. You can learn more about it here: https://www.youtube.com/watch?v=ASQ6XMtogMs
+
+## Pinterest
+Pinterest's Object Retrieval System uses RocksDB for storage: https://www.youtube.com/watch?v=MtFEVEs_2Vo
+
+## Smyte
+[Smyte](https://www.smyte.com/) uses RocksDB as the storage layer for their core key-value storage, high-performance counters and time-windowed HyperLogLog services.
+
+## Rakuten Marketing
+[Rakuten Marketing](https://marketing.rakuten.com/) uses RocksDB as the disk cache layer for the real-time bidding service in their Performance DSP.
+
+## VWO, Wingify
+[VWO's](https://vwo.com/) Smart Code checker and URL helper uses RocksDB to store all the URLs where VWO's Smart Code is installed.
+
+## quasardb
+[quasardb](https://www.quasardb.net) is a high-performance, distributed, transactional key-value database that integrates well with in-memory analytics engines such as Apache Spark. 
+quasardb uses a heavily tuned RocksDB as its persistence layer.
+
+## Netflix
+[Netflix](http://techblog.netflix.com/2016/05/application-data-caching-using-ssds.html) Netflix uses RocksDB on AWS EC2 instances with local SSD drives to cache application data.
+
+## TiKV
+[TiKV](https://github.com/pingcap/tikv) is a GEO-replicated, high-performance, distributed, transactional key-value database. TiKV is powered by Rust and Raft. TiKV uses RocksDB as its persistence layer.
+
+## Apache Flink
+[Apache Flink](https://flink.apache.org/news/2016/03/08/release-1.0.0.html) uses RocksDB to store state locally on a machine.
+
+## Dgraph
+[Dgraph](https://github.com/dgraph-io/dgraph) is an open-source, scalable, distributed, low latency, high throughput Graph database .They use RocksDB to store state locally on a machine.
+
+## Uber
+[Uber](http://eng.uber.com/cherami/) uses RocksDB as a durable and scalable task queue.
+
+## 360 Pika
+[360](http://www.360.cn/) [Pika](https://github.com/Qihoo360/pika) is a nosql compatible with redis. With the huge amount of data stored, redis may suffer for a capacity bottleneck, and pika was born for solving it. It has widely been widely used in many company
+
+## LzLabs
+LzLabs is using RocksDB as a storage engine in their multi-database distributed framework to store application configuration and user data.

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/Vagrantfile
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/Vagrantfile b/thirdparty/rocksdb/Vagrantfile
new file mode 100644
index 0000000..d7c2991
--- /dev/null
+++ b/thirdparty/rocksdb/Vagrantfile
@@ -0,0 +1,34 @@
+# Vagrant file
+Vagrant.configure("2") do |config|
+
+  config.vm.provider "virtualbox" do |v|
+    v.memory = 4096
+    v.cpus = 2
+  end
+
+  config.vm.define "ubuntu14" do |box|
+    box.vm.box = "ubuntu/trusty64"
+  end
+
+  config.vm.define "centos65" do |box|
+    box.vm.box = "chef/centos-6.5"
+  end
+
+  config.vm.define "FreeBSD10" do |box|
+    box.vm.guest = :freebsd
+    box.vm.box = "robin/freebsd-10"
+    # FreeBSD does not support 'mount_virtualbox_shared_folder', use NFS
+    box.vm.synced_folder ".", "/vagrant", :nfs => true, id: "vagrant-root"
+    box.vm.network "private_network", ip: "10.0.1.10"
+
+    # build everything after creating VM, skip using --no-provision
+    box.vm.provision "shell", inline: <<-SCRIPT
+      pkg install -y gmake clang35
+      export CXX=/usr/local/bin/clang++35
+      cd /vagrant
+      gmake clean
+      gmake all OPT=-g
+    SCRIPT
+  end
+
+end

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/WINDOWS_PORT.md
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/WINDOWS_PORT.md b/thirdparty/rocksdb/WINDOWS_PORT.md
new file mode 100644
index 0000000..a0fe1fe
--- /dev/null
+++ b/thirdparty/rocksdb/WINDOWS_PORT.md
@@ -0,0 +1,228 @@
+# Microsoft Contribution Notes
+
+## Contributors
+* Alexander Zinoviev https://github.com/zinoale
+* Dmitri Smirnov https://github.com/yuslepukhin
+* Praveen Rao  https://github.com/PraveenSinghRao
+* Sherlock Huang  https://github.com/SherlockNoMad
+
+## Introduction
+RocksDB is a well proven open source key-value persistent store, optimized for fast storage. It provides scalability with number of CPUs and storage IOPS, to support IO-bound, in-memory and write-once workloads, most importantly, to be flexible to allow for innovation.
+
+As Microsoft Bing team we have been continuously pushing hard to improve the scalability, efficiency of platform and eventually benefit Bing end-user satisfaction.  We would like to explore the opportunity to embrace open source, RocksDB here, to use, enhance and customize for our usage, and also contribute back to the RocksDB community. Herein, we are pleased to offer this RocksDB port for Windows platform.
+
+These notes describe some decisions and changes we had to make with regards to porting RocksDB on Windows. We hope this will help both reviewers and users of the Windows port.
+We are open for comments and improvements.
+
+## OS specifics
+All of the porting, testing and benchmarking was done on Windows Server 2012 R2 Datacenter 64-bit but to the best of our knowledge there is not a specific API we used during porting that is unsupported on other Windows OS after Vista.
+
+## Porting goals
+We strive to achieve the following goals:
+* make use of the existing porting interface of RocksDB
+* make minimum [WY2]modifications within platform independent code.
+* make all unit test pass both in debug and release builds. 
+  * Note: latest introduction of SyncPoint seems to disable running db_test in Release.
+* make performance on par with published benchmarks accounting for HW differences
+* we would like to keep the port code inline with the master branch with no forking
+
+## Build system
+We have chosen CMake as a widely accepted build system to build the Windows port. It is very fast and convenient. 
+
+At the same time it generates Visual Studio projects that are both usable from a command line and IDE.
+
+The top-level CMakeLists.txt file contains description of all targets and build rules. It also provides brief instructions on how to build the software for Windows. One more build related file is thirdparty.inc that also resides on the top level. This file must be edited to point to actual third party libraries location.
+We think that it would be beneficial to merge the existing make-based build system and the new cmake-based build system into a single one to use on all platforms.
+
+All building and testing was done for 64-bit. We have not conducted any testing for 32-bit and early reports indicate that it will not run on 32-bit.
+
+## C++ and STL notes
+We had to make some minimum changes within the portable files that either account for OS differences or the shortcomings of C++11 support in the current version of the MS compiler. Most or all of them are expected to be fixed in the upcoming compiler releases.
+
+We plan to use this port for our business purposes here at Bing and this provided business justification for this port. This also means, we do not have at present to choose the compiler version at will.
+
+* Certain headers that are not present and not necessary on Windows were simply `#ifndef OS_WIN` in a few places (`unistd.h`)
+* All posix specific headers were replaced to port/port.h which worked well
+* Replaced `dirent.h` for `port/dirent.h` (very few places) with the implementation of the relevant interfaces within `rocksdb::port` namespace
+* Replaced `sys/time.h` to `port/sys_time.h` (few places) implemented equivalents within `rocksdb::port`
+* `printf %z` specification is not supported on Windows. To imitate existing standards we came up with a string macro `ROCKSDB_PRIszt` which expands to `%z` on posix systems and to Iu on windows.
+* in class member initialization were moved to a __ctors in some cases
+* `constexpr` is not supported. We had to replace `std::numeric_limits<>::max/min()` to its C macros for constants. Sometimes we had to make class members `static const` and place a definition within a .cc file.
+* `constexpr` for functions was replaced to a template specialization (1 place)
+* Union members that have non-trivial constructors were replaced to `char[]` in one place along with bug fixes (spatial experimental feature)
+* Zero-sized arrays are deemed a non-standard extension which we converted to 1 size array and that should work well for the purposes of these classes.
+* `std::chrono` lacks nanoseconds support (fixed in the upcoming release of the STL) and we had to use `QueryPerfCounter()` within env_win.cc
+* Function local statics initialization is still not safe. Used `std::once` to mitigate within WinEnv.
+
+## Windows Environments notes
+We endeavored to make it functionally on par with posix_env. This means we replicated the functionality of the thread pool and other things as precise as possible, including:
+* Replicate posix logic using std:thread primitives.
+* Implement all posix_env disk access functionality.
+* Set `use_os_buffer=false` to disable OS disk buffering for WinWritableFile and WinRandomAccessFile.
+* Replace `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure.
+* Use `SetFileInformationByHandle` to compensate absence of `fallocate`.
+
+### In detail
+Even though Windows provides its own efficient thread-pool implementation we chose to replicate posix logic using `std::thread` primitives. This allows anyone to quickly detect any changes within the posix source code and replicate them within windows env. This has proven to work very well. At the same time for anyone who wishes to replace the built-in thread-pool can do so using RocksDB stackable environments.
+
+For disk access we implemented all of the functionality present within the posix_env which includes memory mapped files, random access, rate-limiter support etc.
+The `use_os_buffer` flag on Posix platforms currently denotes disabling read-ahead log via `fadvise` mechanism. Windows does not have `fadvise` system call. What is more, it implements disk cache in a way that differs from Linux greatly. It�s not an uncommon practice on Windows to perform un-buffered disk access to gain control of the memory consumption. We think that in our use case this may also be a good configuration option at the expense of disk throughput. To compensate one may increase the configured in-memory cache size instead. Thus we have chosen  `use_os_buffer=false` to disable OS disk buffering for `WinWritableFile` and `WinRandomAccessFile`. The OS imposes restrictions on the alignment of the disk offsets, buffers used and the amount of data that is read/written when accessing files in un-buffered mode. When the option is true, the classes behave in a standard way. This allows to perform writes and reads in cases when un-buffered access does not make sense such as WA
 L and MANIFEST.
+
+We have replaced `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure so we can atomically seek to the position of the disk operation but still perform the operation synchronously. Thus we able to emulate that functionality of `pread/pwrite` reasonably well. The only difference is that the file pointer is not returned to its original position but that hardly matters given the random nature of access.
+
+We used `SetFileInformationByHandle` both to truncate files after writing a full final page to disk and to pre-allocate disk space for faster I/O thus compensating for the absence of `fallocate` although some differences remain. For example, the pre-allocated space is not filled with zeros like on Linux, however, on a positive note, the end of file position is also not modified after pre-allocation.
+
+RocksDB renames, copies and deletes files at will even though they may be opened with another handle at the same time. We had to relax and allow nearly all the concurrent access permissions possible.
+
+## Thread-Local Storage
+Thread-Local storage plays a significant role for RocksDB performance. Rather than creating a separate implementation we chose to create inline wrappers that forward `pthread_specific` calls to Windows `Tls` interfaces within `rocksdb::port` namespace. This leaves the existing meat of the logic in tact and unchanged and just as maintainable.
+
+To mitigate the lack of thread local storage cleanup on thread-exit we added a limited amount of windows specific code within the same thread_local.cc file that injects a cleanup callback into a `"__tls"` structure within `".CRT$XLB"` data segment. This approach guarantees that the callback is invoked regardless of whether RocksDB used within an executable, standalone DLL or within another DLL.
+
+## Jemalloc usage
+
+When RocksDB is used with Jemalloc the latter needs to be initialized before any of the C++ globals or statics. To accomplish that we injected an initialization routine into `".CRT$XCT"` that is automatically invoked by the runtime before initializing static objects. je-uninit is queued to `atexit()`. 
+
+The jemalloc redirecting `new/delete` global operators are used by the linker providing certain conditions are met. See build section in these notes.
+
+## Stack Trace and Unhandled Exception Handler
+
+We decided not to implement these two features because the hosting program as a rule has these two things in it.
+We experienced no inconveniences debugging issues in the debugger or analyzing process dumps if need be and thus we did not
+see this as a priority.
+
+## Performance results
+### Setup
+All of the benchmarks are run on the same set of machines. Here are the details of the test setup:
+* 2 Intel(R) Xeon(R) E5 2450 0 @ 2.10 GHz (total 16 cores)
+* 2 XK0480GDQPH SSD Device, total 894GB free disk
+* Machine has 128 GB of RAM
+* Operating System: Windows Server 2012 R2 Datacenter
+* 100 Million keys; each key is of size 10 bytes, each value is of size 800 bytes
+* total database size is ~76GB
+* The performance result is based on RocksDB 3.11.
+* The parameters used, unless specified, were exactly the same as published in the GitHub Wiki page. 
+
+### RocksDB on flash storage
+
+#### Test 1. Bulk Load of keys in Random Order
+
+Version 3.11 
+
+* Total Run Time: 17.6 min
+* Fillrandom: 5.480 micros/op 182465 ops/sec;  142.0 MB/s
+* Compact: 486056544.000 micros/op 0 ops/sec
+
+Version 3.10 
+
+* Total Run Time: 16.2 min 
+* Fillrandom: 5.018 micros/op 199269 ops/sec;  155.1 MB/s 
+* Compact: 441313173.000 micros/op 0 ops/sec; 
+
+
+#### Test 2. Bulk Load of keys in Sequential Order
+
+Version 3.11 
+
+* Fillseq: 4.944 micros/op 202k ops/sec;  157.4 MB/s
+
+Version 3.10
+
+* Fillseq: 4.105 micros/op 243.6k ops/sec;  189.6 MB/s 
+
+
+#### Test 3. Random Write
+
+Version 3.11 
+
+* Unbuffered I/O enabled
+* Overwrite: 52.661 micros/op 18.9k ops/sec;   14.8 MB/s
+
+Version 3.10
+
+* Unbuffered I/O enabled 
+* Overwrite: 52.661 micros/op 18.9k ops/sec; 
+
+
+#### Test 4. Random Read
+
+Version 3.11 
+
+* Unbuffered I/O enabled
+* Readrandom: 15.716 micros/op 63.6k ops/sec; 49.5 MB/s 
+
+Version 3.10
+
+* Unbuffered I/O enabled 
+* Readrandom: 15.548 micros/op 64.3k ops/sec; 
+
+
+#### Test 5. Multi-threaded read and single-threaded write
+
+Version 3.11
+
+* Unbuffered I/O enabled
+* Readwhilewriting: 25.128 micros/op 39.7k ops/sec; 
+
+Version 3.10
+
+* Unbuffered I/O enabled 
+* Readwhilewriting: 24.854 micros/op 40.2k ops/sec; 
+
+
+### RocksDB In Memory 
+
+#### Test 1. Point Lookup
+
+Version 3.11
+
+80K writes/sec
+* Write Rate Achieved: 40.5k write/sec;
+* Readwhilewriting: 0.314 micros/op 3187455 ops/sec;  364.8 MB/s (715454999 of 715454999 found)
+
+Version 3.10
+
+* Write Rate Achieved:  50.6k write/sec 
+* Readwhilewriting: 0.316 micros/op 3162028 ops/sec; (719576999 of 719576999 found) 
+
+
+*10K writes/sec*
+
+Version 3.11
+
+* Write Rate Achieved: 5.8k/s write/sec
+* Readwhilewriting: 0.246 micros/op 4062669 ops/sec;  464.9 MB/s (915481999 of 915481999 found)
+
+Version 3.10
+
+* Write Rate Achieved: 5.8k/s write/sec 
+* Readwhilewriting: 0.244 micros/op 4106253 ops/sec; (927986999 of 927986999 found) 
+
+
+#### Test 2. Prefix Range Query
+
+Version 3.11
+
+80K writes/sec
+* Write Rate Achieved:  46.3k/s write/sec
+* Readwhilewriting: 0.362 micros/op 2765052 ops/sec;  316.4 MB/s (611549999 of 611549999 found)
+
+Version 3.10
+
+* Write Rate Achieved: 45.8k/s write/sec 
+* Readwhilewriting: 0.317 micros/op 3154941 ops/sec; (708158999 of 708158999 found) 
+
+Version 3.11
+
+10K writes/sec
+* Write Rate Achieved: 5.78k write/sec
+* Readwhilewriting: 0.269 micros/op 3716692 ops/sec;  425.3 MB/s (837401999 of 837401999 found)
+
+Version 3.10
+
+* Write Rate Achieved: 5.7k write/sec 
+* Readwhilewriting: 0.261 micros/op 3830152 ops/sec; (863482999 of 863482999 found) 
+
+
+We think that there is still big room to improve the performance, which will be an ongoing effort for us.
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/appveyor.yml
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/appveyor.yml b/thirdparty/rocksdb/appveyor.yml
new file mode 100644
index 0000000..f582bb1
--- /dev/null
+++ b/thirdparty/rocksdb/appveyor.yml
@@ -0,0 +1,15 @@
+version: 1.0.{build}
+image: Visual Studio 2015
+before_build:
+- md %APPVEYOR_BUILD_FOLDER%\build
+- cd %APPVEYOR_BUILD_FOLDER%\build
+- cmake -G "Visual Studio 14 2015 Win64" -DOPTDBG=1 -DXPRESS=1 ..
+- cd ..
+build:
+  project: build\rocksdb.sln
+  parallel: true
+  verbosity: minimal
+test:
+test_script:
+- ps: build_tools\run_ci_db_test.ps1 -SuiteRun db_basic_test,db_test2,db_test,env_basic_test,env_test -Concurrency 8
+

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/arcanist_util/INTERNAL_ONLY_DIR
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/arcanist_util/INTERNAL_ONLY_DIR b/thirdparty/rocksdb/arcanist_util/INTERNAL_ONLY_DIR
new file mode 100644
index 0000000..e55aa3b
--- /dev/null
+++ b/thirdparty/rocksdb/arcanist_util/INTERNAL_ONLY_DIR
@@ -0,0 +1,2 @@
+arcanist_util are only used internaly, If you want to change it please check
+<internal_rocksdb_repo>/arcanist_util

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/buckifier/buckify_rocksdb.py
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/buckifier/buckify_rocksdb.py b/thirdparty/rocksdb/buckifier/buckify_rocksdb.py
new file mode 100644
index 0000000..a3c8be3
--- /dev/null
+++ b/thirdparty/rocksdb/buckifier/buckify_rocksdb.py
@@ -0,0 +1,172 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+from targets_builder import TARGETSBuilder
+from optparse import OptionParser
+import os
+import fnmatch
+import sys
+import tempfile
+
+from util import ColorString
+import util
+
+# tests to export as libraries for inclusion in other projects
+_EXPORTED_TEST_LIBS = ["env_basic_test"]
+
+# Parse src.mk files as a Dictionary of
+# VAR_NAME => list of files
+def parse_src_mk(repo_path):
+    src_mk = repo_path + "/src.mk"
+    src_files = {}
+    for line in open(src_mk):
+        line = line.strip()
+        if len(line) == 0 or line[0] == '#':
+            continue
+        if '=' in line:
+            current_src = line.split('=')[0].strip()
+            src_files[current_src] = []
+        elif '.cc' in line:
+            src_path = line.split('.cc')[0].strip() + '.cc'
+            src_files[current_src].append(src_path)
+    return src_files
+
+
+# get all .cc / .c files
+def get_cc_files(repo_path):
+    cc_files = []
+    for root, dirnames, filenames in os.walk(repo_path):
+        root = root[(len(repo_path) + 1):]
+        if "java" in root:
+            # Skip java
+            continue
+        for filename in fnmatch.filter(filenames, '*.cc'):
+            cc_files.append(os.path.join(root, filename))
+        for filename in fnmatch.filter(filenames, '*.c'):
+            cc_files.append(os.path.join(root, filename))
+    return cc_files
+
+
+# Get tests from Makefile
+def get_tests(repo_path):
+    Makefile = repo_path + "/Makefile"
+
+    # Dictionary TEST_NAME => IS_PARALLEL
+    tests = {}
+
+    found_tests = False
+    for line in open(Makefile):
+        line = line.strip()
+        if line.startswith("TESTS ="):
+            found_tests = True
+        elif found_tests:
+            if line.endswith("\\"):
+                # remove the trailing \
+                line = line[:-1]
+                line = line.strip()
+                tests[line] = False
+            else:
+                # we consumed all the tests
+                break
+
+    found_parallel_tests = False
+    for line in open(Makefile):
+        line = line.strip()
+        if line.startswith("PARALLEL_TEST ="):
+            found_parallel_tests = True
+        elif found_parallel_tests:
+            if line.endswith("\\"):
+                # remove the trailing \
+                line = line[:-1]
+                line = line.strip()
+                tests[line] = True
+            else:
+                # we consumed all the parallel tests
+                break
+    
+    return tests
+
+
+# Prepare TARGETS file for buck
+def generate_targets(repo_path):
+    print(ColorString.info("Generating TARGETS"))
+    # parsed src.mk file
+    src_mk = parse_src_mk(repo_path)
+    # get all .cc files
+    cc_files = get_cc_files(repo_path)
+    # get tests from Makefile
+    tests = get_tests(repo_path)
+
+    if src_mk is None or cc_files is None or tests is None:
+        return False
+
+    TARGETS = TARGETSBuilder("%s/TARGETS" % repo_path)
+    # rocksdb_lib
+    TARGETS.add_library(
+        "rocksdb_lib",
+        src_mk["LIB_SOURCES"] +
+        src_mk["TOOL_LIB_SOURCES"])
+    # rocksdb_test_lib
+    TARGETS.add_library(
+        "rocksdb_test_lib",
+        src_mk.get("MOCK_LIB_SOURCES", []) +
+        src_mk.get("TEST_LIB_SOURCES", []) +
+        src_mk.get("EXP_LIB_SOURCES", []),
+        [":rocksdb_lib"])
+    # rocksdb_tools_lib
+    TARGETS.add_library(
+        "rocksdb_tools_lib",
+        src_mk.get("BENCH_LIB_SOURCES", []) +
+        ["util/testutil.cc"],
+        [":rocksdb_lib"])
+
+    # test for every test we found in the Makefile
+    for test in sorted(tests):
+        match_src = [src for src in cc_files if ("/%s.c" % test) in src]
+        if len(match_src) == 0:
+            print(ColorString.warning("Cannot find .cc file for %s" % test))
+            continue
+        elif len(match_src) > 1:
+            print(ColorString.warning("Found more than one .cc for %s" % test))
+            print(match_src)
+            continue
+
+        assert(len(match_src) == 1)
+        is_parallel = tests[test]
+        TARGETS.register_test(test, match_src[0], is_parallel)
+
+        if test in _EXPORTED_TEST_LIBS:
+            test_library = "%s_lib" % test
+            TARGETS.add_library(test_library, match_src, [":rocksdb_test_lib"])
+    TARGETS.flush_tests()
+
+    print(ColorString.info("Generated TARGETS Summary:"))
+    print(ColorString.info("- %d libs" % TARGETS.total_lib))
+    print(ColorString.info("- %d binarys" % TARGETS.total_bin))
+    print(ColorString.info("- %d tests" % TARGETS.total_test))
+    return True
+
+
+def get_rocksdb_path():
+    # rocksdb = {script_dir}/..
+    script_dir = os.path.dirname(sys.argv[0])
+    script_dir = os.path.abspath(script_dir)
+    rocksdb_path = os.path.abspath(
+        os.path.join(script_dir, "../"))
+
+    return rocksdb_path
+
+def exit_with_error(msg):
+    print(ColorString.error(msg))
+    sys.exit(1)
+
+
+def main():
+    # Generate TARGETS file for buck
+    ok = generate_targets(get_rocksdb_path())
+    if not ok:
+        exit_with_error("Failed to generate TARGETS files")
+
+if __name__ == "__main__":
+    main()

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/buckifier/targets_builder.py
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/buckifier/targets_builder.py b/thirdparty/rocksdb/buckifier/targets_builder.py
new file mode 100644
index 0000000..7d47d2d
--- /dev/null
+++ b/thirdparty/rocksdb/buckifier/targets_builder.py
@@ -0,0 +1,65 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import targets_cfg
+import pprint
+
+# TODO(tec): replace this with PrettyPrinter
+def pretty_list(lst, indent=6):
+    if lst is None or len(lst) == 0:
+        return ""
+
+    if len(lst) == 1:
+        return "\"%s\"" % lst[0]
+
+    separator = "\",\n%s\"" % (" " * indent)
+    res = separator.join(lst)
+    res = "\n" + (" " * indent) + "\"" + res + "\",\n" + (" " * (indent - 2))
+    return res
+
+
+class TARGETSBuilder:
+    def __init__(self, path):
+        self.path = path
+        self.targets_file = open(path, 'w')
+        self.targets_file.write(targets_cfg.rocksdb_target_header)
+        self.total_lib = 0
+        self.total_bin = 0
+        self.total_test = 0
+        self.tests_cfg = []
+
+    def __del__(self):
+        self.targets_file.close()
+
+    def add_library(self, name, srcs, deps=None, headers=None):
+        if headers is None:
+            headers = "AutoHeaders.RECURSIVE_GLOB"
+        self.targets_file.write(targets_cfg.library_template % (
+            name,
+            headers,
+            pretty_list(srcs),
+            pretty_list(deps)))
+        self.total_lib = self.total_lib + 1
+
+    def add_binary(self, name, srcs, deps=None):
+        self.targets_file.write(targets_cfg.binary_template % (
+            name,
+            pretty_list(srcs),
+            pretty_list(deps)))
+        self.total_bin = self.total_bin + 1
+
+    def register_test(self, test_name, src, is_parallel):
+        exec_mode = "serial"
+        if is_parallel:
+            exec_mode = "parallel"
+        self.tests_cfg.append([test_name, str(src), str(exec_mode)])
+
+        self.total_test = self.total_test + 1
+
+    def flush_tests(self):
+        self.targets_file.write(targets_cfg.unittests_template % (
+            pprint.PrettyPrinter().pformat(self.tests_cfg)
+        ))
+
+        self.tests_cfg = []

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/buckifier/targets_cfg.py
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/buckifier/targets_cfg.py b/thirdparty/rocksdb/buckifier/targets_cfg.py
new file mode 100644
index 0000000..33023a5
--- /dev/null
+++ b/thirdparty/rocksdb/buckifier/targets_cfg.py
@@ -0,0 +1,124 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+rocksdb_target_header = """
+import os
+
+TARGETS_PATH = os.path.dirname(__file__)
+REPO_PATH = "rocksdb/src/"
+BUCK_BINS = "buck-out/gen/" + REPO_PATH
+TEST_RUNNER = REPO_PATH + "buckifier/rocks_test_runner.sh"
+rocksdb_compiler_flags = [
+  "-fno-builtin-memcmp",
+  "-DROCKSDB_PLATFORM_POSIX",
+  "-DROCKSDB_LIB_IO_POSIX",
+  "-DROCKSDB_FALLOCATE_PRESENT",
+  "-DROCKSDB_MALLOC_USABLE_SIZE",
+  "-DROCKSDB_RANGESYNC_PRESENT",
+  "-DROCKSDB_SCHED_GETCPU_PRESENT",
+  "-DROCKSDB_SUPPORT_THREAD_LOCAL",
+  "-DOS_LINUX",
+  # Flags to enable libs we include
+  "-DSNAPPY",
+  "-DZLIB",
+  "-DBZIP2",
+  "-DLZ4",
+  "-DZSTD",
+  "-DGFLAGS=gflags",
+  "-DNUMA",
+  "-DTBB",
+  # Needed to compile in fbcode
+  "-Wno-expansion-to-defined",
+]
+
+rocksdb_external_deps = [
+  ('bzip2', None, 'bz2'),
+  ('snappy', None, "snappy"),
+  ('zlib', None, 'z'),
+  ('gflags', None, 'gflags'),
+  ('lz4', None, 'lz4'),
+  ('zstd', None),
+  ('tbb', None),
+  ("numa", None, "numa"),
+  ("googletest", None, "gtest"),
+]
+
+rocksdb_preprocessor_flags = [
+  # Directories with files for #include
+  "-I" + REPO_PATH + "include/",
+  "-I" + REPO_PATH,
+]
+
+rocksdb_arch_preprocessor_flags = {
+  "x86_64": ["-DHAVE_SSE42"],
+}
+"""
+
+
+library_template = """
+cpp_library(
+    name = "%s",
+    headers = %s,
+    srcs = [%s],
+    deps = [%s],
+    preprocessor_flags = rocksdb_preprocessor_flags,
+    arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+    compiler_flags = rocksdb_compiler_flags,
+    external_deps = rocksdb_external_deps,
+)
+"""
+
+binary_template = """
+cpp_binary(
+  name = "%s",
+  srcs = [%s],
+  deps = [%s],
+  preprocessor_flags = rocksdb_preprocessor_flags,
+  arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+  compiler_flags = rocksdb_compiler_flags,
+  external_deps = rocksdb_external_deps,
+)
+"""
+
+unittests_template = """
+# [test_name, test_src, test_type]
+ROCKS_TESTS = %s
+
+
+# Generate a test rule for each entry in ROCKS_TESTS
+for test_cfg in ROCKS_TESTS:
+    test_name = test_cfg[0]
+    test_cc = test_cfg[1]
+    ttype = "gtest" if test_cfg[2] == "parallel" else "simple"
+    test_bin = test_name + "_bin"
+
+    cpp_binary (
+      name = test_bin,
+      srcs = [test_cc],
+      deps = [":rocksdb_test_lib"],
+      preprocessor_flags = rocksdb_preprocessor_flags,
+      arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
+      compiler_flags = rocksdb_compiler_flags,
+      external_deps = rocksdb_external_deps,
+    )
+
+    custom_unittest(
+      name = test_name,
+      type = ttype,
+      deps = [":" + test_bin],
+      command = [TEST_RUNNER, BUCK_BINS + test_bin]
+    )
+
+custom_unittest(
+    name = "make_rocksdbjavastatic",
+    type = "simple",
+    command = ["internal_repo_rocksdb/make_rocksdbjavastatic.sh"],
+)
+
+custom_unittest(
+    name = "make_rocksdb_lite_release",
+    type = "simple",
+    command = ["internal_repo_rocksdb/make_rocksdb_lite_release.sh"],
+)
+"""

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/buckifier/util.py
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/buckifier/util.py b/thirdparty/rocksdb/buckifier/util.py
new file mode 100644
index 0000000..350b733
--- /dev/null
+++ b/thirdparty/rocksdb/buckifier/util.py
@@ -0,0 +1,107 @@
+"""
+This module keeps commonly used components.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import subprocess
+import os
+import time
+
+class ColorString:
+    """ Generate colorful strings on terminal """
+    HEADER = '\033[95m'
+    BLUE = '\033[94m'
+    GREEN = '\033[92m'
+    WARNING = '\033[93m'
+    FAIL = '\033[91m'
+    ENDC = '\033[0m'
+
+    @staticmethod
+    def _make_color_str(text, color):
+        return "".join([color, text.encode('utf-8'), ColorString.ENDC])
+
+    @staticmethod
+    def ok(text):
+        if ColorString.is_disabled:
+            return text
+        return ColorString._make_color_str(text, ColorString.GREEN)
+
+    @staticmethod
+    def info(text):
+        if ColorString.is_disabled:
+            return text
+        return ColorString._make_color_str(text, ColorString.BLUE)
+
+    @staticmethod
+    def header(text):
+        if ColorString.is_disabled:
+            return text
+        return ColorString._make_color_str(text, ColorString.HEADER)
+
+    @staticmethod
+    def error(text):
+        if ColorString.is_disabled:
+            return text
+        return ColorString._make_color_str(text, ColorString.FAIL)
+
+    @staticmethod
+    def warning(text):
+        if ColorString.is_disabled:
+            return text
+        return ColorString._make_color_str(text, ColorString.WARNING)
+
+    is_disabled = False
+
+
+def run_shell_command(shell_cmd, cmd_dir=None):
+    """ Run a single shell command.
+        @returns a tuple of shell command return code, stdout, stderr """
+
+    if cmd_dir is not None and not os.path.exists(cmd_dir):
+        run_shell_command("mkdir -p %s" % cmd_dir)
+
+    start = time.time()
+    print("\t>>> Running: " + shell_cmd)
+    p = subprocess.Popen(shell_cmd,
+                         shell=True,
+                         stdout=subprocess.PIPE,
+                         stderr=subprocess.PIPE,
+                         cwd=cmd_dir)
+    stdout, stderr = p.communicate()
+    end = time.time()
+
+    # Report time if we spent more than 5 minutes executing a command
+    execution_time = end - start
+    if execution_time > (60 * 5):
+        mins = (execution_time / 60)
+        secs = (execution_time % 60)
+        print("\t>time spent: %d minutes %d seconds" % (mins, secs))
+
+
+    return p.returncode, stdout, stderr
+
+
+def run_shell_commands(shell_cmds, cmd_dir=None, verbose=False):
+    """ Execute a sequence of shell commands, which is equivalent to
+        running `cmd1 && cmd2 && cmd3`
+        @returns boolean indication if all commands succeeds.
+    """
+
+    if cmd_dir:
+        print("\t=== Set current working directory => %s" % cmd_dir)
+
+    for shell_cmd in shell_cmds:
+        ret_code, stdout, stderr = run_shell_command(shell_cmd, cmd_dir)
+        if stdout:
+            if verbose or ret_code != 0:
+                print(ColorString.info("stdout: \n"), stdout)
+        if stderr:
+            # contents in stderr is not necessarily to be error messages.
+            if verbose or ret_code != 0:
+                print(ColorString.error("stderr: \n"), stderr)
+        if ret_code != 0:
+            return False
+
+    return True

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/build_tools/RocksDBCommonHelper.php
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/build_tools/RocksDBCommonHelper.php b/thirdparty/rocksdb/build_tools/RocksDBCommonHelper.php
new file mode 100644
index 0000000..41d1e21
--- /dev/null
+++ b/thirdparty/rocksdb/build_tools/RocksDBCommonHelper.php
@@ -0,0 +1,365 @@
+<?php
+// Copyright 2004-present Facebook. All Rights Reserved.
+//  This source code is licensed under both the GPLv2 (found in the
+//  COPYING file in the root directory) and Apache 2.0 License
+//  (found in the LICENSE.Apache file in the root directory).
+
+// Name of the environment variables which need to be set by the entity which
+// triggers continuous runs so that code at the end of the file gets executed
+// and Sandcastle run starts.
+define("ENV_POST_RECEIVE_HOOK", "POST_RECEIVE_HOOK");
+define("ENV_HTTPS_APP_VALUE", "HTTPS_APP_VALUE");
+define("ENV_HTTPS_TOKEN_VALUE", "HTTPS_TOKEN_VALUE");
+
+define("PRIMARY_TOKEN_FILE", '/home/krad/.sandcastle');
+define("CONT_RUN_ALIAS", "leveldb");
+
+//////////////////////////////////////////////////////////////////////
+/*  Run tests in sandcastle */
+function postURL($diffID, $url) {
+  assert(strlen($diffID) > 0);
+  assert(is_numeric($diffID));
+  assert(strlen($url) > 0);
+
+  $cmd = 'echo \'{"diff_id": ' . $diffID . ', '
+         . '"name":"click here for sandcastle tests for D' . $diffID . '", '
+         . '"link":"' . $url . '"}\' | '
+         . 'arc call-conduit '
+         . 'differential.updateunitresults';
+  shell_exec($cmd);
+}
+
+function buildUpdateTestStatusCmd($diffID, $test, $status) {
+  assert(strlen($diffID) > 0);
+  assert(is_numeric($diffID));
+  assert(strlen($test) > 0);
+  assert(strlen($status) > 0);
+
+  $cmd = 'echo \'{"diff_id": ' . $diffID . ', '
+         . '"name":"' . $test . '", '
+         . '"result":"' . $status . '"}\' | '
+         . 'arc call-conduit '
+         . 'differential.updateunitresults';
+  return $cmd;
+}
+
+function updateTestStatus($diffID, $test) {
+  assert(strlen($diffID) > 0);
+  assert(is_numeric($diffID));
+  assert(strlen($test) > 0);
+
+  shell_exec(buildUpdateTestStatusCmd($diffID, $test, "waiting"));
+}
+
+function getSteps($applyDiff, $diffID, $username, $test) {
+  assert(strlen($username) > 0);
+  assert(strlen($test) > 0);
+
+  if ($applyDiff) {
+    assert(strlen($diffID) > 0);
+    assert(is_numeric($diffID));
+
+    $arcrc_content = (PHP_OS == "Darwin" ?
+        exec("cat ~/.arcrc | gzip -f | base64") :
+            exec("cat ~/.arcrc | gzip -f | base64 -w0"));
+    assert(strlen($arcrc_content) > 0);
+
+    // Sandcastle machines don't have arc setup. We copy the user certificate
+    // and authenticate using that in Sandcastle.
+    $setup = array(
+      "name" => "Setup arcrc",
+      "shell" => "echo " . $arcrc_content . " | base64 --decode"
+                 . " | gzip -d > ~/.arcrc",
+      "user" => "root"
+    );
+
+    // arc demands certain permission on its config.
+    // also fix the sticky bit issue in sandcastle
+    $fix_permission = array(
+      "name" => "Fix environment",
+      "shell" => "chmod 600 ~/.arcrc && chmod +t /dev/shm",
+      "user" => "root"
+    );
+
+    // Construct the steps in the order of execution.
+    $steps[] = $setup;
+    $steps[] = $fix_permission;
+  }
+
+  // fbcode is a sub-repo. We cannot patch until we add it to ignore otherwise
+  // Git thinks it is an uncommited change.
+  $fix_git_ignore = array(
+    "name" => "Fix git ignore",
+    "shell" => "echo fbcode >> .git/info/exclude",
+    "user" => "root"
+  );
+
+  // This fixes "FATAL: ThreadSanitizer can not mmap the shadow memory"
+  // Source:
+  // https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual#FAQ
+  $fix_kernel_issue = array(
+    "name" => "Fix kernel issue with tsan",
+    "shell" => "echo 2 >/proc/sys/kernel/randomize_va_space",
+    "user" => "root"
+  );
+
+  $steps[] = $fix_git_ignore;
+  $steps[] = $fix_kernel_issue;
+
+  // This will be the command used to execute particular type of tests.
+  $cmd = "";
+
+  if ($applyDiff) {
+    // Patch the code (keep your fingures crossed).
+    $patch = array(
+      "name" => "Patch " . $diffID,
+      "shell" => "arc --arcrc-file ~/.arcrc "
+                  . "patch --nocommit --diff " . $diffID,
+      "user" => "root"
+    );
+
+    $steps[] = $patch;
+
+    updateTestStatus($diffID, $test);
+    $cmd = buildUpdateTestStatusCmd($diffID, $test, "running") . "; ";
+  }
+
+  // Run the actual command.
+  $cmd = $cmd . "J=$(nproc) ./build_tools/precommit_checker.py " . $test
+           . "; exit_code=$?; ";
+
+  if ($applyDiff) {
+    $cmd = $cmd . "([[ \$exit_code -eq 0 ]] &&"
+                . buildUpdateTestStatusCmd($diffID, $test, "pass") . ")"
+                . "||" . buildUpdateTestStatusCmd($diffID, $test, "fail")
+                . "; ";
+  }
+
+  // shell command to sort the tests based on exit code and print
+  // the output of the log files.
+  $cat_sorted_logs = "
+    while read code log_file;
+      do echo \"################ cat \$log_file [exit_code : \$code] ################\";
+      cat \$log_file;
+    done < <(tail -n +2 LOG | sort -k7,7n -k4,4gr | awk '{print \$7,\$NF}')";
+
+  // Shell command to cat all log files
+  $cat_all_logs = "for f in `ls t/!(run-*)`; do echo \$f;cat \$f; done";
+
+  // If LOG file exist use it to cat log files sorted by exit code, otherwise
+  // cat everything
+  $logs_cmd = "if [ -f LOG ]; then {$cat_sorted_logs}; else {$cat_all_logs}; fi";
+
+  $cmd = $cmd . " cat /tmp/precommit-check.log"
+              . "; shopt -s extglob; {$logs_cmd}"
+              . "; shopt -u extglob; [[ \$exit_code -eq 0 ]]";
+  assert(strlen($cmd) > 0);
+
+  $run_test = array(
+    "name" => "Run " . $test,
+    "shell" => $cmd,
+    "user" => "root",
+    "parser" => "python build_tools/error_filter.py " . $test,
+  );
+
+  $steps[] = $run_test;
+
+  if ($applyDiff) {
+    // Clean up the user arc config we are using.
+    $cleanup = array(
+      "name" => "Arc cleanup",
+      "shell" => "rm -f ~/.arcrc",
+      "user" => "root"
+    );
+
+    $steps[] = $cleanup;
+  }
+
+  assert(count($steps) > 0);
+  return $steps;
+}
+
+function getSandcastleConfig() {
+  $sandcastle_config = array();
+
+  $cwd = getcwd();
+  $cwd_token_file = "{$cwd}/.sandcastle";
+  // This is a case when we're executed from a continuous run. Fetch the values
+  // from the environment.
+  if (getenv(ENV_POST_RECEIVE_HOOK)) {
+    $sandcastle_config[0] = getenv(ENV_HTTPS_APP_VALUE);
+    $sandcastle_config[1] = getenv(ENV_HTTPS_TOKEN_VALUE);
+  } else {
+    // This is a typical `[p]arc diff` case. Fetch the values from the specific
+    // configuration files.
+    for ($i = 0; $i < 50; $i++) {
+      if (file_exists(PRIMARY_TOKEN_FILE) ||
+          file_exists($cwd_token_file)) {
+        break;
+      }
+      // If we failed to fetch the tokens, sleep for 0.2 second and try again
+      usleep(200000);
+    }
+    assert(file_exists(PRIMARY_TOKEN_FILE) ||
+           file_exists($cwd_token_file));
+
+    // Try the primary location first, followed by a secondary.
+    if (file_exists(PRIMARY_TOKEN_FILE)) {
+      $cmd = 'cat ' . PRIMARY_TOKEN_FILE;
+    } else {
+      $cmd = 'cat ' . $cwd_token_file;
+    }
+
+    assert(strlen($cmd) > 0);
+    $sandcastle_config = explode(':', rtrim(shell_exec($cmd)));
+  }
+
+  // In this case be very explicit about the implications.
+  if (count($sandcastle_config) != 2) {
+    echo "Sandcastle configuration files don't contain valid information " .
+         "or the necessary environment variables aren't defined. Unable " .
+         "to validate the code changes.";
+    exit(1);
+  }
+
+  assert(strlen($sandcastle_config[0]) > 0);
+  assert(strlen($sandcastle_config[1]) > 0);
+  assert(count($sandcastle_config) > 0);
+
+  return $sandcastle_config;
+}
+
+// This function can be called either from `[p]arc diff` command or during
+// the Git post-receive hook.
+ function startTestsInSandcastle($applyDiff, $workflow, $diffID) {
+  // Default options don't terminate on failure, but that's what we want. In
+  // the current case we use assertions intentionally as "terminate on failure
+  // invariants".
+  assert_options(ASSERT_BAIL, true);
+
+  // In case of a diff we'll send notificatios to the author. Else it'll go to
+  // the entire team because failures indicate that build quality has regressed.
+  $username = $applyDiff ? exec("whoami") : CONT_RUN_ALIAS;
+  assert(strlen($username) > 0);
+
+  if ($applyDiff) {
+    assert($workflow);
+    assert(strlen($diffID) > 0);
+    assert(is_numeric($diffID));
+  }
+
+  // List of tests we want to run in Sandcastle.
+  $tests = array("unit", "unit_non_shm", "unit_481", "clang_unit", "tsan",
+                 "asan", "lite_test", "valgrind", "release", "release_481",
+                 "clang_release", "clang_analyze", "code_cov",
+                 "java_build", "no_compression", "unity", "ubsan");
+
+  $send_email_template = array(
+    'type' => 'email',
+    'triggers' => array('fail'),
+    'emails' => array($username . '@fb.com'),
+  );
+
+  // Construct a job definition for each test and add it to the master plan.
+  foreach ($tests as $test) {
+    $stepName = "RocksDB diff " . $diffID . " test " . $test;
+
+    if (!$applyDiff) {
+      $stepName = "RocksDB continuous integration test " . $test;
+    }
+
+    $arg[] = array(
+      "name" => $stepName,
+      "report" => array($send_email_template),
+      "steps" => getSteps($applyDiff, $diffID, $username, $test)
+    );
+  }
+
+  // We cannot submit the parallel execution master plan to Sandcastle and
+  // need supply the job plan as a determinator. So we construct a small job
+  // that will spit out the master job plan which Sandcastle will parse and
+  // execute. Why compress the job definitions? Otherwise we run over the max
+  // string size.
+  $cmd = "echo " . base64_encode(json_encode($arg))
+         . (PHP_OS == "Darwin" ?
+             " | gzip -f | base64" :
+                 " | gzip -f | base64 -w0");
+  assert(strlen($cmd) > 0);
+
+  $arg_encoded = shell_exec($cmd);
+  assert(strlen($arg_encoded) > 0);
+
+  $runName = "Run diff " . $diffID . "for user " . $username;
+
+  if (!$applyDiff) {
+    $runName = "RocksDB continuous integration build and test run";
+  }
+
+  $command = array(
+    "name" => $runName,
+    "steps" => array()
+  );
+
+  $command["steps"][] = array(
+    "name" => "Generate determinator",
+    "shell" => "echo " . $arg_encoded . " | base64 --decode | gzip -d"
+               . " | base64 --decode",
+    "determinator" => true,
+    "user" => "root"
+  );
+
+  // Submit to Sandcastle.
+  $url = 'https://interngraph.intern.facebook.com/sandcastle/create';
+
+  $job = array(
+    'command' => 'SandcastleUniversalCommand',
+    'args' => $command,
+    'capabilities' => array(
+      'vcs' => 'rocksdb-int-git',
+      'type' => 'lego',
+    ),
+    'hash' => 'origin/master',
+    'user' => $username,
+    'alias' => 'rocksdb-precommit',
+    'tags' => array('rocksdb'),
+    'description' => 'Rocksdb precommit job',
+  );
+
+  // Fetch the configuration necessary to submit a successful HTTPS request.
+  $sandcastle_config = getSandcastleConfig();
+
+  $app = $sandcastle_config[0];
+  $token = $sandcastle_config[1];
+
+  $cmd = 'curl -s -k -F app=' . $app . ' '
+          . '-F token=' . $token . ' -F job=\'' . json_encode($job)
+          .'\' "' . $url . '"';
+
+  $output = shell_exec($cmd);
+  assert(strlen($output) > 0);
+
+  // Extract Sandcastle URL from the response.
+  preg_match('/url": "(.+)"/', $output, $sandcastle_url);
+
+  assert(count($sandcastle_url) > 0, "Unable to submit Sandcastle request.");
+  assert(strlen($sandcastle_url[1]) > 0, "Unable to extract Sandcastle URL.");
+
+  if ($applyDiff) {
+    echo "\nSandcastle URL: " . $sandcastle_url[1] . "\n";
+    // Ask Phabricator to display it on the diff UI.
+    postURL($diffID, $sandcastle_url[1]);
+  } else {
+    echo "Continuous integration started Sandcastle tests. You can look at ";
+    echo "the progress at:\n" . $sandcastle_url[1] . "\n";
+  }
+}
+
+// Continuous run cript will set the environment variable and based on that
+// we'll trigger the execution of tests in Sandcastle. In that case we don't
+// need to apply any diffs and there's no associated workflow either.
+if (getenv(ENV_POST_RECEIVE_HOOK)) {
+  startTestsInSandcastle(
+    false /* $applyDiff */,
+    NULL /* $workflow */,
+    NULL /* $diffID */);
+}

http://git-wip-us.apache.org/repos/asf/nifi-minifi-cpp/blob/48867732/thirdparty/rocksdb/build_tools/amalgamate.py
----------------------------------------------------------------------
diff --git a/thirdparty/rocksdb/build_tools/amalgamate.py b/thirdparty/rocksdb/build_tools/amalgamate.py
new file mode 100755
index 0000000..548b1e8
--- /dev/null
+++ b/thirdparty/rocksdb/build_tools/amalgamate.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+
+# amalgamate.py creates an amalgamation from a unity build.
+# It can be run with either Python 2 or 3.
+# An amalgamation consists of a header that includes the contents of all public
+# headers and a source file that includes the contents of all source files and
+# private headers.
+#
+# This script works by starting with the unity build file and recursively expanding
+# #include directives. If the #include is found in a public include directory,
+# that header is expanded into the amalgamation header.
+#
+# A particular header is only expanded once, so this script will
+# break if there are multiple inclusions of the same header that are expected to
+# expand differently. Similarly, this type of code causes issues:
+#
+# #ifdef FOO
+#   #include "bar.h"
+#   // code here
+# #else
+#   #include "bar.h"            // oops, doesn't get expanded
+#   // different code here
+# #endif
+#
+# The solution is to move the include out of the #ifdef.
+
+from __future__ import print_function
+
+import argparse
+from os import path
+import re
+import sys
+
+include_re = re.compile('^[ \t]*#include[ \t]+"(.*)"[ \t]*$')
+included = set()
+excluded = set()
+
+def find_header(name, abs_path, include_paths):
+    samedir = path.join(path.dirname(abs_path), name)
+    if path.exists(samedir):
+        return samedir
+    for include_path in include_paths:
+        include_path = path.join(include_path, name)
+        if path.exists(include_path):
+            return include_path
+    return None
+
+def expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths):
+    if include_path in included:
+        return False
+
+    included.add(include_path)
+    with open(include_path) as f:
+        print('#line 1 "{}"'.format(include_path), file=source_out)
+        process_file(f, include_path, source_out, header_out, include_paths, public_include_paths)
+    return True
+
+def process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths):
+    for (line, text) in enumerate(f):
+        m = include_re.match(text)
+        if m:
+            filename = m.groups()[0]
+            # first check private headers
+            include_path = find_header(filename, abs_path, include_paths)
+            if include_path:
+                if include_path in excluded:
+                    source_out.write(text)
+                    expanded = False
+                else:
+                    expanded = expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths)
+            else:
+                # now try public headers
+                include_path = find_header(filename, abs_path, public_include_paths)
+                if include_path:
+                    # found public header
+                    expanded = False
+                    if include_path in excluded:
+                        source_out.write(text)
+                    else:
+                        expand_include(include_path, f, abs_path, header_out, None, public_include_paths, [])
+                else:
+                    sys.exit("unable to find {}, included in {} on line {}".format(filename, abs_path, line))
+
+            if expanded:
+                print('#line {} "{}"'.format(line+1, abs_path), file=source_out)
+        elif text != "#pragma once\n":
+            source_out.write(text)
+
+def main():
+    parser = argparse.ArgumentParser(description="Transform a unity build into an amalgamation")
+    parser.add_argument("source", help="source file")
+    parser.add_argument("-I", action="append", dest="include_paths", help="include paths for private headers")
+    parser.add_argument("-i", action="append", dest="public_include_paths", help="include paths for public headers")
+    parser.add_argument("-x", action="append", dest="excluded", help="excluded header files")
+    parser.add_argument("-o", dest="source_out", help="output C++ file", required=True)
+    parser.add_argument("-H", dest="header_out", help="output C++ header file", required=True)
+    args = parser.parse_args()
+
+    include_paths = list(map(path.abspath, args.include_paths or []))
+    public_include_paths = list(map(path.abspath, args.public_include_paths or []))
+    excluded.update(map(path.abspath, args.excluded or []))
+    filename = args.source
+    abs_path = path.abspath(filename)
+    with open(filename) as f, open(args.source_out, 'w') as source_out, open(args.header_out, 'w') as header_out:
+        print('#line 1 "{}"'.format(filename), file=source_out)
+        print('#include "{}"'.format(header_out.name), file=source_out)
+        process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths)
+
+if __name__ == "__main__":
+    main()


Mime
View raw message