Skip to content

Commit

Permalink
Rename GP parallel words to CBDB parallel.
Browse files Browse the repository at this point in the history
GP don't have parallel feature, it would be more
appropriate to rename to our CBDB parallel words.

Rename gp_parallel files to cbdb_parallel files
and gp parallel functions.

Authored-by: Zhang Mingli [email protected]
  • Loading branch information
avamingli authored and tuhaihe committed Dec 6, 2023
1 parent 5a663d7 commit 5f9190e
Show file tree
Hide file tree
Showing 13 changed files with 34 additions and 34 deletions.
2 changes: 1 addition & 1 deletion src/backend/access/common/session.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@

/* This backend's current session. */
Session *CurrentSession = NULL;
/* gp style parallelism session. */
/* CBDB style parallelism session. */
Session *ParallelSession = NULL;
/*
* Set up CurrentSession to point to an empty Session object.
Expand Down
20 changes: 10 additions & 10 deletions src/backend/access/transam/parallel.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@
/* Magic number for parallel context TOC. */
#define PARALLEL_MAGIC 0x50477c7c

/* Magic number for gp style parallel context TOC. */
#define GP_PARALLEL_MAGIC 0x50477d7d
/* Magic number for CBDB style parallel context TOC. */
#define CBDB_PARALLEL_MAGIC 0x50477d7d

/*
* Magic numbers for per-context parallel state sharing. Higher-level code
Expand Down Expand Up @@ -1692,7 +1692,7 @@ GpFetchParallelDSMEntry(ParallelEntryTag tag, int plan_node_id)
else
{
Assert(ParallelSession->segment);
toc = shm_toc_attach(GP_PARALLEL_MAGIC, dsm_segment_address(ParallelSession->segment));
toc = shm_toc_attach(CBDB_PARALLEL_MAGIC, dsm_segment_address(ParallelSession->segment));
}

Assert(toc != NULL);
Expand Down Expand Up @@ -1749,15 +1749,15 @@ void GpDestroyParallelDSMEntry()
}

void
AtEOXact_GP_Parallel()
AtEOXact_CBDB_Parallel()
{
GpDestroyParallelDSMEntry();
}

void
AtProcExit_GP_Parallel(int code, Datum arg)
AtProcExit_CBDB_Parallel(int code, Datum arg)
{
AtEOXact_GP_Parallel();
AtEOXact_CBDB_Parallel();
}

GpParallelDSMEntry *
Expand Down Expand Up @@ -1818,14 +1818,14 @@ GpInsertParallelDSMHash(PlanState *planstate)
dsm_segment* seg = dsm_create(segsize, DSM_CREATE_NULL_IF_MAXSEGMENTS);

if (seg != NULL)
toc = shm_toc_create(GP_PARALLEL_MAGIC,
toc = shm_toc_create(CBDB_PARALLEL_MAGIC,
dsm_segment_address(seg),
segsize);
else
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errhint("create dsm for gp style parallel workers failed.")));
errhint("create dsm for CBDB style parallel workers failed.")));

BarrierInit(&entry->build_barrier, parallel_workers);
entry->handle = dsm_segment_handle(seg);
Expand Down Expand Up @@ -1866,7 +1866,7 @@ GpInsertParallelDSMHash(PlanState *planstate)
if (!init)
{
/* should ensure that no shared memory is pinned before process exist. */
before_shmem_exit(AtProcExit_GP_Parallel, 0);
before_shmem_exit(AtProcExit_CBDB_Parallel, 0);
init = true;
}
}
Expand All @@ -1878,7 +1878,7 @@ GpInsertParallelDSMHash(PlanState *planstate)
ParallelSession->segment = seg;

/* Attach to DSA area that can be used by the leader and all workers. */
shm_toc* toc = shm_toc_attach(GP_PARALLEL_MAGIC, dsm_segment_address(seg));
shm_toc* toc = shm_toc_attach(CBDB_PARALLEL_MAGIC, dsm_segment_address(seg));
char* area_space = shm_toc_lookup(toc, PARALLEL_KEY_GP_DSA, false);
dsa_area* area = dsa_attach_in_place(area_space, seg);

Expand Down
8 changes: 4 additions & 4 deletions src/backend/access/transam/xact.c
Original file line number Diff line number Diff line change
Expand Up @@ -2816,8 +2816,8 @@ CommitTransaction(void)
if (IsInParallelMode())
AtEOXact_Parallel(true);

/* Clean up GP style parallel workers which we might have. */
AtEOXact_GP_Parallel();
/* Clean up CBDB style parallel workers which we might have. */
AtEOXact_CBDB_Parallel();

/* Shut down the deferred-trigger manager */
AfterTriggerEndXact(true);
Expand Down Expand Up @@ -3546,8 +3546,8 @@ AbortTransaction(void)
s->parallelModeLevel = 0;
}

/* Clean up GP style parallel workers which we might have. */
AtEOXact_GP_Parallel();
/* Clean up CBDB style parallel workers which we might have. */
AtEOXact_CBDB_Parallel();

/*
* do abort processing
Expand Down
6 changes: 3 additions & 3 deletions src/backend/cdb/cdbpath.c
Original file line number Diff line number Diff line change
Expand Up @@ -2950,7 +2950,7 @@ can_elide_explicit_motion(PlannerInfo *root, Index rti, Path *subpath,
* parallel plan with Motion(1:6), but it still can't be processed by multiple
* workers or be duplicated in every worker as the inner path.
*
* All locus test cases are in gp_parallel, see final join locus examples there.
* All locus test cases are in cbdb_parallel, see final join locus examples there.
*/
CdbPathLocus
cdbpath_motion_for_parallel_join(PlannerInfo *root,
Expand Down Expand Up @@ -3137,7 +3137,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root,
/*
* SegmentGeneralWorkers parallel join SegmentGeneralWorkers when parallel_aware
* generate SegmentGeneralWorerks locus.
* see ex 5_P_5_5 in gp_parallel.sql
* see ex 5_P_5_5 in cbdb_parallel.sql
*/
if (outer.ok_to_replicate && inner.ok_to_replicate)
return outer.locus;
Expand All @@ -3156,7 +3156,7 @@ cdbpath_motion_for_parallel_join(PlannerInfo *root,
* SegmentGeneralWorkers JOIN SegmentGeneral without shared hash table.
* And the join locus is SegmentGeneralWorkers.
* Then we can return the outer locus as join will set workers as outer locus.
* See ex 5_4_5 in gp_parallel.sql
* See ex 5_4_5 in cbdb_parallel.sql
*/
if (outer.ok_to_replicate && inner.ok_to_replicate)
return outer.locus;
Expand Down
2 changes: 1 addition & 1 deletion src/backend/executor/execMain.c
Original file line number Diff line number Diff line change
Expand Up @@ -2540,7 +2540,7 @@ ExecutePlan(EState *estate,
EnterParallelMode();

/*
* GP style parallelism won't interfere PG style parallel mechanism.
* CBDB style parallelism won't interfere PG style parallel mechanism.
* So that we will pass if use_parallel_mode is true which means there exists
* Gather/GatherMerge node.
*/
Expand Down
8 changes: 4 additions & 4 deletions src/backend/optimizer/README.cbdb.parallel
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@ Cloudberry Database treats all workers equally. They work together to
execute a plan node with some sync mechanism to keep the right thing,
ex: create a shared hash table etc.

That's called GP style. GP style launches workers as non-parallel
That's called CBDB style. CBDB style launches workers as non-parallel
plan except that expand Gang size by factor if a top path node has
parallel_workers > 1.

The reasons we choose GP style but not PG style or mix them is complex.
The reasons we choose CBDB style but not PG style or mix them is complex.

We encounter lots of problems when mixing them together and we don't
have enough time to enable both and don't know how much the benefit we
Expand All @@ -41,15 +41,15 @@ The Gather node should be Hashed locus in that situation. But things
become complex when joining with other locus and if there is a Motion
node below that.

3. GP style could parallelize plan as late as possible until the final
3. CBDB style could parallelize plan as late as possible until the final
Gather(to QD or to QE in the middle), But PG style will Gather workers
in apply_scanjoin_target_to_path. PG style can't generate the final
scan/join target in parallel workers. This is PG's last opportunity to
use any partial paths that exist.

It will empty partial_pathlist, all paths are moved to pathlist that
it couldn't participate in later parallel join as the outer path, ex:
parallel_aware hash join with a shared table. But GP style could keep
parallel_aware hash join with a shared table. But CBDB style could keep
partial path in partial_pathlist because we have a Gather Motion on
the top.

Expand Down
2 changes: 1 addition & 1 deletion src/backend/optimizer/plan/planner.c
Original file line number Diff line number Diff line change
Expand Up @@ -2504,7 +2504,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Limit parallel:
* PG doesn't have to handle limit here becuase all partial paths have been Gathered
* into pathlist, and the subpath of Limit node could be parallel.
* For our GP style, we don't have Gather node and keep the partial path in partial_pathlist
* For our CBDB style, we don't have Gather node and keep the partial path in partial_pathlist
* until the last step if possible.
* When we generate two phase limit path or limit has sub partial path,
* the Limit node on QEs could be parallel.
Expand Down
4 changes: 2 additions & 2 deletions src/include/access/parallel.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,8 @@ extern void* GpFetchParallelDSMEntry(ParallelEntryTag tag, int plan_node_id);

extern void GpDestroyParallelDSMEntry(void);

extern void AtEOXact_GP_Parallel(void);
extern void AtEOXact_CBDB_Parallel(void);

extern void AtProcExit_GP_Parallel(int code, Datum arg);
extern void AtProcExit_CBDB_Parallel(int code, Datum arg);

#endif /* PARALLEL_H */
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
--
-- GP PARALLEL
-- Test GP style parallel plan.
-- CBDB PARALLEL
-- Test CBDB style parallel plan.
-- GUCs shoule be set with local, do not disturb other parallel plans.
-- Should not use force_parallel_mode as it will ignore plan and check results only.
-- We want to check plan in this file!
Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/expected/select_parallel.out
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
--
-- PARALLEL
-- We have GP style parallel now, open this file in parallel mode.
-- We have CBDB style parallel now, open this file in parallel mode.
--
set enable_parallel = on;
set optimizer = off;
Expand Down
4 changes: 2 additions & 2 deletions src/test/regress/greenplum_schedule
Original file line number Diff line number Diff line change
Expand Up @@ -250,8 +250,8 @@ test: uao_dml/uao_dml_column
test: ao_locks
test: freeze_aux_tables

# gp parallel test
test: gp_parallel
# cbdb parallel test
test: cbdb_parallel

# These cannot run in parallel, because they check that VACUUM FULL shrinks table size.
# A concurrent session could hold back the xid horizon and prevent old tuples from being
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
--
-- GP PARALLEL
-- Test GP style parallel plan.
-- CBDB PARALLEL
-- Test CBDB style parallel plan.
-- GUCs shoule be set with local, do not disturb other parallel plans.
-- Should not use force_parallel_mode as it will ignore plan and check results only.
-- We want to check plan in this file!
Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/sql/select_parallel.sql
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
--
-- PARALLEL
-- We have GP style parallel now, open this file in parallel mode.
-- We have CBDB style parallel now, open this file in parallel mode.
--

set enable_parallel = on;
Expand Down

0 comments on commit 5f9190e

Please sign in to comment.