diff --git a/src/common/storage/src/merge.rs b/src/common/storage/src/merge.rs index f9519f9cba819..9f4fe94080bc4 100644 --- a/src/common/storage/src/merge.rs +++ b/src/common/storage/src/merge.rs @@ -23,18 +23,6 @@ pub struct MutationStatus { } impl MutationStatus { - pub fn add_insert_rows(&mut self, insert_rows: u64) { - self.insert_rows += insert_rows; - } - - pub fn add_deleted_rows(&mut self, deleted_rows: u64) { - self.deleted_rows += deleted_rows - } - - pub fn add_update_rows(&mut self, update_rows: u64) { - self.update_rows += update_rows - } - pub fn merge_mutation_status(&mut self, mutation_status: MutationStatus) { self.insert_rows += mutation_status.insert_rows; self.deleted_rows += mutation_status.deleted_rows; diff --git a/src/query/service/src/interpreters/hook/compact_hook.rs b/src/query/service/src/interpreters/hook/compact_hook.rs index 8f29626214218..e2fd0f300b88c 100644 --- a/src/query/service/src/interpreters/hook/compact_hook.rs +++ b/src/query/service/src/interpreters/hook/compact_hook.rs @@ -106,8 +106,10 @@ async fn do_hook_compact( }; // keep the original progress value - let progress = ctx.get_write_progress(); - let progress_value = progress.as_ref().get_values(); + let write_progress = ctx.get_write_progress(); + let write_progress_value = write_progress.as_ref().get_values(); + let scan_progress = ctx.get_scan_progress(); + let scan_progress_value = scan_progress.as_ref().get_values(); match GlobalIORuntime::instance().block_on({ compact_table(ctx, compact_target, compaction_limits, lock_opt) @@ -119,7 +121,8 @@ async fn do_hook_compact( } // reset the progress value - progress.set(&progress_value); + write_progress.set(&write_progress_value); + scan_progress.set(&scan_progress_value); metrics_inc_compact_hook_compact_time_ms(&trace_ctx.operation_name, compact_start_at.elapsed().as_millis() as u64); } diff --git a/src/query/service/src/interpreters/interpreter_insert.rs b/src/query/service/src/interpreters/interpreter_insert.rs index f9d951033bd9b..1a057fdaf79e7 100644 --- a/src/query/service/src/interpreters/interpreter_insert.rs +++ b/src/query/service/src/interpreters/interpreter_insert.rs @@ -18,7 +18,11 @@ use databend_common_catalog::lock::LockTableOption; use databend_common_catalog::table::TableExt; use databend_common_exception::ErrorCode; use databend_common_exception::Result; +use databend_common_expression::types::UInt64Type; +use databend_common_expression::DataBlock; use databend_common_expression::DataSchema; +use databend_common_expression::FromData; +use databend_common_expression::SendableDataBlockStream; use databend_common_pipeline_sources::AsyncSourcer; use databend_common_sql::executor::physical_plans::DistributedInsertSelect; use databend_common_sql::executor::physical_plans::MutationKind; @@ -43,6 +47,7 @@ use crate::pipelines::ValueSource; use crate::schedulers::build_query_pipeline_without_render_result_set; use crate::sessions::QueryContext; use crate::sessions::TableContext; +use crate::stream::DataBlockStream; pub struct InsertInterpreter { ctx: Arc, @@ -259,4 +264,13 @@ impl Interpreter for InsertInterpreter { Ok(build_res) } + + fn inject_result(&self) -> Result { + let binding = self.ctx.get_mutation_status(); + let status = binding.read(); + let blocks = vec![DataBlock::new_from_columns(vec![UInt64Type::from_data( + vec![status.insert_rows], + )])]; + Ok(Box::pin(DataBlockStream::create(None, blocks))) + } } diff --git a/src/query/service/src/interpreters/interpreter_mutation.rs b/src/query/service/src/interpreters/interpreter_mutation.rs index 6fc39817c2b4a..fe1dde3c7c874 100644 --- a/src/query/service/src/interpreters/interpreter_mutation.rs +++ b/src/query/service/src/interpreters/interpreter_mutation.rs @@ -14,18 +14,20 @@ use std::sync::Arc; -use databend_common_base::base::ProgressValues; use databend_common_catalog::lock::LockTableOption; use databend_common_catalog::plan::Partitions; use databend_common_catalog::plan::PartitionsShuffleKind; use databend_common_catalog::table::TableExt; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_expression::types::UInt32Type; +use databend_common_expression::types::UInt64Type; use databend_common_expression::DataBlock; use databend_common_expression::DataSchemaRef; use databend_common_expression::FromData; use databend_common_expression::SendableDataBlockStream; +use databend_common_pipeline_core::processors::ProcessorPtr; +use databend_common_pipeline_sinks::EmptySink; +use databend_common_pipeline_sources::EmptySource; use databend_common_sql::binder::MutationStrategy; use databend_common_sql::binder::MutationType; use databend_common_sql::executor::physical_plans::create_push_down_filters; @@ -36,6 +38,7 @@ use databend_common_sql::executor::PhysicalPlanBuilder; use databend_common_sql::optimizer::SExpr; use databend_common_sql::plans; use databend_common_sql::plans::Mutation; +use databend_common_storage::MutationStatus; use databend_common_storages_factory::Table; use databend_common_storages_fuse::operations::TruncateMode; use databend_common_storages_fuse::FuseTable; @@ -249,14 +252,10 @@ impl MutationInterpreter { let mut columns = Vec::new(); for field in self.schema.as_ref().fields() { match field.name().as_str() { - plans::INSERT_NAME => { - columns.push(UInt32Type::from_data(vec![status.insert_rows as u32])) - } - plans::UPDATE_NAME => { - columns.push(UInt32Type::from_data(vec![status.update_rows as u32])) - } + plans::INSERT_NAME => columns.push(UInt64Type::from_data(vec![status.insert_rows])), + plans::UPDATE_NAME => columns.push(UInt64Type::from_data(vec![status.update_rows])), plans::DELETE_NAME => { - columns.push(UInt32Type::from_data(vec![status.deleted_rows as u32])) + columns.push(UInt64Type::from_data(vec![status.deleted_rows])) } _ => unreachable!(), } @@ -270,8 +269,6 @@ impl MutationInterpreter { fuse_table: &FuseTable, snapshot: &Option>, ) -> Result> { - let mut build_res = PipelineBuildResult::create(); - // Check if the filter is a constant. let mut truncate_table = mutation.truncate_table; if let Some(filter) = &mutation.direct_filter @@ -288,7 +285,7 @@ impl MutationInterpreter { truncate_table = true; } else if !filter_result { // The update/delete condition is always false, do nothing. - return Ok(Some(build_res)); + return self.no_effect_mutation(); } } @@ -299,20 +296,21 @@ impl MutationInterpreter { // Check if table is empty. let Some(snapshot) = snapshot else { // No snapshot, no mutation. - return Ok(Some(build_res)); + return self.no_effect_mutation(); }; if snapshot.summary.row_count == 0 { // Empty snapshot, no mutation. - return Ok(Some(build_res)); + return self.no_effect_mutation(); } if mutation.mutation_type == MutationType::Delete { if truncate_table { - let progress_values = ProgressValues { - rows: snapshot.summary.row_count as usize, - bytes: snapshot.summary.uncompressed_byte_size as usize, - }; - self.ctx.get_write_progress().incr(&progress_values); + let mut build_res = PipelineBuildResult::create(); + self.ctx.add_mutation_status(MutationStatus { + insert_rows: 0, + deleted_rows: snapshot.summary.row_count, + update_rows: 0, + }); // deleting the whole table... just a truncate fuse_table .do_truncate( @@ -330,6 +328,15 @@ impl MutationInterpreter { } } + fn no_effect_mutation(&self) -> Result> { + let mut build_res = PipelineBuildResult::create(); + build_res.main_pipeline.add_source(EmptySource::create, 1)?; + build_res + .main_pipeline + .add_sink(|input| Ok(ProcessorPtr::create(EmptySink::create(input))))?; + Ok(Some(build_res)) + } + async fn mutation_source_partitions( &self, mutation: &Mutation, diff --git a/src/query/service/tests/it/servers/flight_sql/testdata/query.txt b/src/query/service/tests/it/servers/flight_sql/testdata/query.txt index f4179e58e52d6..193d2fd23d006 100644 --- a/src/query/service/tests/it/servers/flight_sql/testdata/query.txt +++ b/src/query/service/tests/it/servers/flight_sql/testdata/query.txt @@ -46,7 +46,11 @@ create table test1(a int, b string) ---------- Input ---------- insert into table test1(a, b) values (1, 'x'), (2, 'y') ---------- Output --------- -2 ++-------------------------+ +| number of rows inserted | ++-------------------------+ +| 2 | ++-------------------------+ ---------- Input ---------- select * from test1 ---------- Output --------- diff --git a/src/query/service/tests/it/servers/http/http_query_handlers.rs b/src/query/service/tests/it/servers/http/http_query_handlers.rs index 76bbee3e9f42f..fbaa1d814436e 100644 --- a/src/query/service/tests/it/servers/http/http_query_handlers.rs +++ b/src/query/service/tests/it/servers/http/http_query_handlers.rs @@ -710,7 +710,7 @@ async fn test_insert() -> Result<()> { let sqls = vec![ ("create table t(a int) engine=fuse", 0, 0), - ("insert into t(a) values (1),(2)", 0, 2), + ("insert into t(a) values (1),(2)", 1, 2), ("select * from t", 2, 0), ]; @@ -1323,7 +1323,7 @@ async fn test_func_object_keys() -> Result<()> { ), ( "INSERT INTO objects_test1 VALUES (1, parse_json('{\"a\": 1, \"b\": [1,2,3]}'), parse_json('{\"1\": 2}'));", - 0, + 1, ), ( "SELECT id, object_keys(obj), object_keys(var) FROM objects_test1;", @@ -1349,9 +1349,9 @@ async fn test_multi_partition() -> Result<()> { let sqls = vec![ ("create table tb2(id int, c1 varchar) Engine=Fuse;", 0), - ("insert into tb2 values(1, 'mysql'),(1, 'databend')", 0), - ("insert into tb2 values(2, 'mysql'),(2, 'databend')", 0), - ("insert into tb2 values(3, 'mysql'),(3, 'databend')", 0), + ("insert into tb2 values(1, 'mysql'),(1, 'databend')", 1), + ("insert into tb2 values(2, 'mysql'),(2, 'databend')", 1), + ("insert into tb2 values(3, 'mysql'),(3, 'databend')", 1), ("select * from tb2;", 6), ]; @@ -1682,7 +1682,7 @@ async fn test_has_result_set() -> Result<()> { let sqls = vec![ ("create table tb2(id int, c1 varchar) Engine=Fuse;", false), - ("insert into tb2 values(1, 'mysql'),(1, 'databend')", false), + ("insert into tb2 values(1, 'mysql'),(1, 'databend')", true), ("select * from tb2;", true), ]; diff --git a/src/query/sql/src/planner/plans/insert.rs b/src/query/sql/src/planner/plans/insert.rs index 7685f4c6c382d..a0420a28694b4 100644 --- a/src/query/sql/src/planner/plans/insert.rs +++ b/src/query/sql/src/planner/plans/insert.rs @@ -15,9 +15,13 @@ use std::sync::Arc; use databend_common_ast::ast::FormatTreeNode; +use databend_common_expression::types::DataType; +use databend_common_expression::types::NumberDataType; use databend_common_expression::types::StringType; use databend_common_expression::DataBlock; +use databend_common_expression::DataField; use databend_common_expression::DataSchemaRef; +use databend_common_expression::DataSchemaRefExt; use databend_common_expression::FromData; use databend_common_expression::Scalar; use databend_common_expression::TableSchemaRef; @@ -28,6 +32,7 @@ use serde::Serialize; use super::Plan; use crate::plans::CopyIntoTablePlan; +use crate::INSERT_NAME; #[derive(Clone, Debug, EnumAsInner)] pub enum InsertInputSource { @@ -116,6 +121,13 @@ impl Insert { result.push(DataBlock::new_from_columns(vec![formatted_plan])); Ok(vec![DataBlock::concat(&result)?]) } + + pub fn schema(&self) -> DataSchemaRef { + DataSchemaRefExt::create(vec![DataField::new( + INSERT_NAME, + DataType::Number(NumberDataType::UInt64), + )]) + } } pub(crate) fn format_insert_source( diff --git a/src/query/sql/src/planner/plans/plan.rs b/src/query/sql/src/planner/plans/plan.rs index e886fefba18f4..dba3003b023be 100644 --- a/src/query/sql/src/planner/plans/plan.rs +++ b/src/query/sql/src/planner/plans/plan.rs @@ -494,6 +494,7 @@ impl Plan { Plan::CallProcedure(plan) => plan.schema(), Plan::InsertMultiTable(plan) => plan.schema(), Plan::DescUser(plan) => plan.schema(), + Plan::Insert(plan) => plan.schema(), _ => Arc::new(DataSchema::empty()), } diff --git a/src/query/storages/fuse/src/operations/common/processors/transform_serialize_block.rs b/src/query/storages/fuse/src/operations/common/processors/transform_serialize_block.rs index 0aa86194ca689..5064f96e729c8 100644 --- a/src/query/storages/fuse/src/operations/common/processors/transform_serialize_block.rs +++ b/src/query/storages/fuse/src/operations/common/processors/transform_serialize_block.rs @@ -31,6 +31,7 @@ use databend_common_pipeline_core::processors::Processor; use databend_common_pipeline_core::processors::ProcessorPtr; use databend_common_pipeline_core::PipeItem; use databend_common_sql::executor::physical_plans::MutationKind; +use databend_common_storage::MutationStatus; use databend_storages_common_index::BloomIndex; use opendal::Operator; @@ -304,6 +305,14 @@ impl Processor for TransformSerializeBlock { match std::mem::replace(&mut self.state, State::Consume) { State::Serialized { serialized, index } => { let block_meta = BlockWriter::write_down(&self.dal, serialized).await?; + let progress_values = ProgressValues { + rows: block_meta.row_count as usize, + bytes: block_meta.block_size as usize, + }; + self.block_builder + .ctx + .get_write_progress() + .incr(&progress_values); let mutation_log_data_block = if let Some(index) = index { // we are replacing the block represented by the `index` @@ -313,19 +322,18 @@ impl Processor for TransformSerializeBlock { }) } else { // appending new data block - let progress_values = ProgressValues { - rows: block_meta.row_count as usize, - bytes: block_meta.block_size as usize, - }; - self.block_builder - .ctx - .get_write_progress() - .incr(&progress_values); - - if let Some(tid) = self.table_id { - self.block_builder - .ctx - .update_multi_table_insert_status(tid, block_meta.row_count); + if matches!(self.kind, MutationKind::Insert) { + if let Some(tid) = self.table_id { + self.block_builder + .ctx + .update_multi_table_insert_status(tid, block_meta.row_count); + } else { + self.block_builder.ctx.add_mutation_status(MutationStatus { + insert_rows: block_meta.row_count, + update_rows: 0, + deleted_rows: 0, + }); + } } if matches!(self.kind, MutationKind::Recluster) { diff --git a/src/query/storages/fuse/src/operations/merge_into/mutator/matched_mutator.rs b/src/query/storages/fuse/src/operations/merge_into/mutator/matched_mutator.rs index de77c1dcef0eb..7a727eb10133b 100644 --- a/src/query/storages/fuse/src/operations/merge_into/mutator/matched_mutator.rs +++ b/src/query/storages/fuse/src/operations/merge_into/mutator/matched_mutator.rs @@ -20,7 +20,6 @@ use std::time::Instant; use ahash::AHashMap; use databend_common_base::base::tokio::sync::Semaphore; -use databend_common_base::base::ProgressValues; use databend_common_base::runtime::GlobalIORuntime; use databend_common_base::runtime::TrySpawn; use databend_common_catalog::plan::build_origin_block_row_num; @@ -66,7 +65,6 @@ use crate::operations::BlockMetaIndex; use crate::FuseTable; struct AggregationContext { - ctx: Arc, data_accessor: Operator, write_settings: WriteSettings, read_settings: ReadSettings, @@ -135,7 +133,6 @@ impl MatchedAggregator { Ok(Self { aggregation_ctx: Arc::new(AggregationContext { - ctx: ctx.clone(), write_settings, read_settings, data_accessor, @@ -147,7 +144,7 @@ impl MatchedAggregator { segment_reader, block_mutation_row_offset: HashMap::new(), segment_locations: AHashMap::from_iter(segment_locations), - ctx: ctx.clone(), + ctx, target_build_optimization, meta_indexes: HashSet::new(), }) @@ -393,11 +390,6 @@ impl AggregationContext { "apply update and delete to segment idx {}, block idx {}", segment_idx, block_idx, ); - let progress_values = ProgressValues { - rows: modified_offsets.len(), - bytes: 0, - }; - self.ctx.get_write_progress().incr(&progress_values); let mut origin_data_block = read_block( self.write_settings.storage_format, &self.block_reader, diff --git a/src/query/storages/fuse/src/operations/mutation/processors/compact_source.rs b/src/query/storages/fuse/src/operations/mutation/processors/compact_source.rs index f88ef1197fb08..981734970bcdb 100644 --- a/src/query/storages/fuse/src/operations/mutation/processors/compact_source.rs +++ b/src/query/storages/fuse/src/operations/mutation/processors/compact_source.rs @@ -15,6 +15,7 @@ use std::sync::Arc; use std::time::Instant; +use databend_common_base::base::Progress; use databend_common_base::base::ProgressValues; use databend_common_catalog::plan::gen_mutation_stream_meta; use databend_common_catalog::table_context::TableContext; @@ -124,7 +125,7 @@ impl PrefetchAsyncSource for CompactSource { } pub struct CompactTransform { - ctx: Arc, + scan_progress: Arc, block_reader: Arc, storage_format: FuseStorageFormat, stream_ctx: Option, @@ -138,7 +139,7 @@ impl CompactTransform { stream_ctx: Option, ) -> Self { Self { - ctx, + scan_progress: ctx.get_scan_progress(), block_reader, storage_format, stream_ctx, @@ -168,6 +169,10 @@ impl BlockMetaTransform for CompactTransform { data, )?; + self.scan_progress.incr(&ProgressValues { + rows: block.num_rows(), + bytes: block.memory_size(), + }); if let Some(stream_ctx) = &self.stream_ctx { let stream_meta = gen_mutation_stream_meta(None, &meta.location.0)?; block = stream_ctx.apply(block, &stream_meta)?; @@ -188,12 +193,6 @@ impl BlockMetaTransform for CompactTransform { ClusterStatsGenType::Generally, ))); let new_block = block.add_meta(Some(meta))?; - - let progress_values = ProgressValues { - rows: new_block.num_rows(), - bytes: new_block.memory_size(), - }; - self.ctx.get_write_progress().incr(&progress_values); Ok(vec![new_block]) } CompactSourceMeta::Extras(extra) => { diff --git a/src/query/storages/fuse/src/operations/mutation/processors/mutation_source.rs b/src/query/storages/fuse/src/operations/mutation/processors/mutation_source.rs index 3e834f1c41596..fd47de5a810c1 100644 --- a/src/query/storages/fuse/src/operations/mutation/processors/mutation_source.rs +++ b/src/query/storages/fuse/src/operations/mutation/processors/mutation_source.rs @@ -177,7 +177,11 @@ impl Processor for MutationSource { chunks, &self.storage_format, )?; - let num_rows = data_block.num_rows(); + let rows = data_block.num_rows(); + self.ctx.get_scan_progress().incr(&ProgressValues { + rows, + bytes: data_block.memory_size(), + }); let fuse_part = FuseBlockPartInfo::from_part(&part)?; if let Some(filter) = self.filter.as_ref() { @@ -195,7 +199,7 @@ impl Processor for MutationSource { let affect_rows = match &predicates { Value::Scalar(v) => { if *v { - num_rows + rows } else { 0 } @@ -208,7 +212,7 @@ impl Processor for MutationSource { match self.action { MutationAction::Deletion => { - if affect_rows == num_rows { + if affect_rows == rows { // all the rows should be removed. let meta = Box::new(SerializeDataMeta::SerializeBlock( SerializeBlock::create( @@ -222,7 +226,7 @@ impl Processor for MutationSource { ); } else { if self.block_reader.update_stream_columns { - let row_num = build_origin_block_row_num(num_rows); + let row_num = build_origin_block_row_num(rows); data_block.add_column(row_num); } @@ -268,7 +272,7 @@ impl Processor for MutationSource { self.state = State::Output(self.ctx.get_partition(), DataBlock::empty()); } } else { - self.update_mutation_status(num_rows); + self.update_mutation_status(rows); self.state = State::PerformOperator(data_block, fuse_part.location.clone()); } } @@ -287,6 +291,12 @@ impl Processor for MutationSource { &self.storage_format, )?; + // Record the remain memory size, rows has been recorded. + self.ctx.get_scan_progress().incr(&ProgressValues { + rows: 0, + bytes: remain_block.memory_size(), + }); + let remain_block = if let Some(filter) = filter { // for deletion. remain_block.filter_boolean_value(&filter)? @@ -414,12 +424,6 @@ impl Processor for MutationSource { impl MutationSource { fn update_mutation_status(&self, num_rows: usize) { - let progress_values = ProgressValues { - rows: num_rows, - bytes: 0, - }; - self.ctx.get_write_progress().incr(&progress_values); - let (update_rows, deleted_rows) = if self.action == MutationAction::Update { (num_rows as u64, 0) } else { diff --git a/src/query/storages/fuse/src/operations/replace_into/mutator/merge_into_mutator.rs b/src/query/storages/fuse/src/operations/replace_into/mutator/merge_into_mutator.rs index 640f0853078a2..f142e7dc45dd9 100644 --- a/src/query/storages/fuse/src/operations/replace_into/mutator/merge_into_mutator.rs +++ b/src/query/storages/fuse/src/operations/replace_into/mutator/merge_into_mutator.rs @@ -18,7 +18,6 @@ use std::time::Instant; use ahash::AHashMap; use databend_common_base::base::tokio::sync::Semaphore; -use databend_common_base::base::ProgressValues; use databend_common_base::runtime::GlobalIORuntime; use databend_common_base::runtime::TrySpawn; use databend_common_catalog::plan::gen_mutation_stream_meta; @@ -462,17 +461,6 @@ impl AggregationContext { return Ok(None); } - let progress_values = ProgressValues { - rows: delete_nums, - // ignore bytes. - bytes: 0, - }; - - self.block_builder - .ctx - .get_write_progress() - .incr(&progress_values); - // shortcut: whole block deletion if delete_nums == block_meta.row_count as usize { info!("whole block deletion"); diff --git a/tests/sqllogictests/suites/base/03_common/03_0025_delete_from.test b/tests/sqllogictests/suites/base/03_common/03_0025_delete_from.test index 8099a2b1792be..72ce5252a18e4 100644 --- a/tests/sqllogictests/suites/base/03_common/03_0025_delete_from.test +++ b/tests/sqllogictests/suites/base/03_common/03_0025_delete_from.test @@ -10,14 +10,20 @@ USE db1 statement ok CREATE TABLE IF NOT EXISTS t(c1 Int, c2 Int ) -statement ok +query I INSERT INTO t VALUES(1,2) +---- +1 -statement ok +query I INSERT INTO t VALUES(3,4) +---- +1 -statement ok +query I delete from t where c1 > 3 +---- +0 query B select count(*) = 2 from t @@ -26,8 +32,10 @@ select count(*) = 2 from t -statement ok +query I delete from t where c1 = 1 +---- +1 query B select count(*) = 0 from t where c1 = 1 @@ -42,11 +50,15 @@ select count(*) = 1 from t where c1 <> 1 statement ok INSERT INTO t VALUES(5,6) -statement ok +query I delete from t as t1 where t1.c2 = 6 +---- +1 -statement ok +query I delete from t +---- +1 query B select count(*) = 0 from t @@ -61,11 +73,15 @@ drop table t all statement ok create table t (c Int null) -statement ok +query I insert into t values (1),(2),(NULL) +---- +3 -statement ok +query I delete from t where 1 = 0 +---- +0 query B select count(*) = 3 from t @@ -74,8 +90,10 @@ select count(*) = 3 from t -statement ok +query I delete from t where c = 1 +---- +1 query B select count(*) = 2 from t @@ -91,8 +109,10 @@ select count(*) = 1 from t where c IS NULL -statement ok +query I delete from t where c IS NULL +---- +1 query B select count(*) = 0 from t where c IS NULL @@ -108,19 +128,25 @@ select count(*) = 1 from t where c IS NOT NULL -statement ok +query I delete from t where 1 = 1 +---- +1 query B select count(*) = 0 from t ---- 1 -statement ok +query I insert into t values (1), (NULL) +---- +2 -statement ok +query I delete from t where c >= 1 +---- +1 query T @@ -129,8 +155,10 @@ select * from t NULL -statement ok +query I insert into t values (1),(2),(NULL) +---- +3 query B select count(*) = 0 from t @@ -151,8 +179,10 @@ insert into t values (1),(3) statement ok insert into t values (2),(4) -statement ok +query I delete from t where c > 2; +---- +2 query B select count(*) = 2 from t diff --git a/tests/suites/0_stateless/03_dml/03_0015_insert_into_select2.result b/tests/suites/0_stateless/03_dml/03_0015_insert_into_select2.result index d00491fd7e5bb..19920de3d3ca2 100644 --- a/tests/suites/0_stateless/03_dml/03_0015_insert_into_select2.result +++ b/tests/suites/0_stateless/03_dml/03_0015_insert_into_select2.result @@ -1 +1,3 @@ +2 +2 1 diff --git a/tests/suites/0_stateless/10_drivers/10_0000_python_mysql_driver.result b/tests/suites/0_stateless/10_drivers/10_0000_python_mysql_driver.result index e69de29bb2d1d..6ed281c757a96 100644 --- a/tests/suites/0_stateless/10_drivers/10_0000_python_mysql_driver.result +++ b/tests/suites/0_stateless/10_drivers/10_0000_python_mysql_driver.result @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/suites/0_stateless/18_rbac/18_0012_temp_table.result b/tests/suites/0_stateless/18_rbac/18_0012_temp_table.result index f007b33dfc4bc..ac8703d0df12e 100644 --- a/tests/suites/0_stateless/18_rbac/18_0012_temp_table.result +++ b/tests/suites/0_stateless/18_rbac/18_0012_temp_table.result @@ -7,6 +7,7 @@ mysql: [Warning] Using a password on the command line interface can be insecure. ERROR 1105 (HY000) at line 1: PermissionDenied. Code: 1063, Text = Permission denied: privilege [Create] is required on 'default'.'test'.* for user 'owner'@'%' with roles [role1]. Note: Please ensure that your current role have the appropriate permissions to create a new Database|Table|UDF|Stage.. mysql: [Warning] Using a password on the command line interface can be insecure. +2 1 2 1 @@ -15,6 +16,7 @@ mysql: [Warning] Using a password on the command line interface can be insecure. 3 3 3 NULL +1 2 1 mysql: [Warning] Using a password on the command line interface can be insecure. diff --git a/tests/suites/0_stateless/20+_others/20_0014_sort_spill.result b/tests/suites/0_stateless/20+_others/20_0014_sort_spill.result index 9a551562af15f..a0967d9ce6914 100644 --- a/tests/suites/0_stateless/20+_others/20_0014_sort_spill.result +++ b/tests/suites/0_stateless/20+_others/20_0014_sort_spill.result @@ -1,10 +1,14 @@ ==TEST GLOBAL SORT== +3 +1 0 1 NULL +1 ==Test if the spill is activated== 2 ==Enable sort_spilling_bytes_threshold_per_proc== +1 0 1 NULL @@ -15,11 +19,13 @@ NULL 2 8 1 9 ==Test abc== +2 one Two 4 1 ==Test xy== +4 2 NULL 2 5 NULL 6 @@ -32,9 +38,11 @@ NULL 6 4 8 NULL 6 2 5 +1 ==Test a== 16 ==Test b== +1 2 NULL 2 5 4 8 @@ -56,11 +64,14 @@ NULL NULL 2 NULL 4 8 ==TEST TOP-N SORT== +1 ==Test c== 0 ==Test d== 1 +1 ==Test e== +1 2 NULL 2 5 4 8 @@ -71,4 +82,5 @@ NULL 6 NULL NULL 2 5 ==Test f== +1 9 diff --git a/tests/suites/5_ee/01_vacuum/01_0000_ee_vacuum.py b/tests/suites/5_ee/01_vacuum/01_0000_ee_vacuum.py index cedc32dc5bd67..0ed3b191b2ce6 100755 --- a/tests/suites/5_ee/01_vacuum/01_0000_ee_vacuum.py +++ b/tests/suites/5_ee/01_vacuum/01_0000_ee_vacuum.py @@ -26,6 +26,8 @@ def insert_data(name): while value < 20: sql = "insert into table gc_test values(%d);" % value mycursor.execute(sql) + res = mycursor.fetchall() + assert res == [(1,)] value += 1