forked from apache/spark
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[SPARK-50578][PYTHON][SS] Add support for new version of state metada…
…ta for TransformWithStateInPandas ### What changes were proposed in this pull request? Enable TransformWithStateInPandas operator to write new versions of state metadata and state schema. This will enable state metadata source and state data source reader. And will also support future schema evolution changes. To achieve this purpose, in this PR, we add a new implementation of driver side Python runner. This is because spark will need to get the state schema on the driver during planning inside `IncrementalExecution`. We will also need to start another state server in the new driver side Python runner to handle API calls in init(). ### Why are the changes needed? This is to match with the new versions of state metadata and state schema version implemented in Scala side of TransformWithState. ### Does this PR introduce _any_ user-facing change? No. But now users will be able to get results from state metadata source reader and state data source reader using the same API as Scala. E.g. for state metadata source reader, we can now read out state metadata as follows: ``` metadata_df = spark.read.format("state-metadata").load(checkpoint_path) ``` And we can read out state rows by using state data source as follows: ``` list_state_df = spark.read.format("statestore")\ .option("path", checkpoint_path)\ .option("stateVarName", "listState")\ .load() ``` ### How was this patch tested? Add unit tests in `python/pyspark/sql/tests/pandas/test_pandas_transform_with_state.py`. Test state metadata and state schema files are written correctly by using state metadata source reader and state data source reader. ### Was this patch authored or co-authored using generative AI tooling? No. Closes apache#49156 from jingz-db/python-metadata. Lead-authored-by: jingz-db <[email protected]> Co-authored-by: Jing Zhan <[email protected]> Signed-off-by: Jungtaek Lim <[email protected]>
- Loading branch information
1 parent
aac494e
commit c920210
Showing
16 changed files
with
887 additions
and
166 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -25,3 +25,4 @@ class TransformWithStateInPandasFuncMode(Enum): | |
PROCESS_DATA = 1 | ||
PROCESS_TIMER = 2 | ||
COMPLETE = 3 | ||
PRE_INIT = 4 |
102 changes: 102 additions & 0 deletions
102
python/pyspark/sql/streaming/transform_with_state_driver_worker.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,102 @@ | ||
# | ||
# Licensed to the Apache Software Foundation (ASF) under one or more | ||
# contributor license agreements. See the NOTICE file distributed with | ||
# this work for additional information regarding copyright ownership. | ||
# The ASF licenses this file to You under the Apache License, Version 2.0 | ||
# (the "License"); you may not use this file except in compliance with | ||
# the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# | ||
|
||
import os | ||
import json | ||
from typing import Any, Iterator, TYPE_CHECKING | ||
|
||
from pyspark.util import local_connect_and_auth | ||
from pyspark.serializers import ( | ||
write_int, | ||
read_int, | ||
UTF8Deserializer, | ||
CPickleSerializer, | ||
) | ||
from pyspark import worker | ||
from pyspark.util import handle_worker_exception | ||
from typing import IO | ||
from pyspark.worker_util import check_python_version | ||
from pyspark.sql.streaming.stateful_processor_api_client import StatefulProcessorApiClient | ||
from pyspark.sql.streaming.stateful_processor_util import TransformWithStateInPandasFuncMode | ||
from pyspark.sql.types import StructType | ||
|
||
if TYPE_CHECKING: | ||
from pyspark.sql.pandas._typing import ( | ||
DataFrameLike as PandasDataFrameLike, | ||
) | ||
|
||
pickle_ser = CPickleSerializer() | ||
utf8_deserializer = UTF8Deserializer() | ||
|
||
|
||
def main(infile: IO, outfile: IO) -> None: | ||
check_python_version(infile) | ||
|
||
log_name = "Streaming TransformWithStateInPandas Python worker" | ||
print(f"Starting {log_name}.\n") | ||
|
||
def process( | ||
processor: StatefulProcessorApiClient, | ||
mode: TransformWithStateInPandasFuncMode, | ||
key: Any, | ||
input: Iterator["PandasDataFrameLike"], | ||
) -> None: | ||
print(f"{log_name} Starting execution of UDF: {func}.\n") | ||
func(processor, mode, key, input) | ||
print(f"{log_name} Completed execution of UDF: {func}.\n") | ||
|
||
try: | ||
func, return_type = worker.read_command(pickle_ser, infile) | ||
print( | ||
f"{log_name} finish init stage of Python runner. Received UDF from JVM: {func}, " | ||
f"received return type of UDF: {return_type}.\n" | ||
) | ||
# send signal for getting args | ||
write_int(0, outfile) | ||
outfile.flush() | ||
|
||
# This driver runner will only be used on the first batch of a query, | ||
# and the following code block should be only run once for each query run | ||
state_server_port = read_int(infile) | ||
key_schema = StructType.fromJson(json.loads(utf8_deserializer.loads(infile))) | ||
print( | ||
f"{log_name} received parameters for UDF. State server port: {state_server_port}, " | ||
f"key schema: {key_schema}.\n" | ||
) | ||
|
||
stateful_processor_api_client = StatefulProcessorApiClient(state_server_port, key_schema) | ||
process( | ||
stateful_processor_api_client, | ||
TransformWithStateInPandasFuncMode.PRE_INIT, | ||
None, | ||
iter([]), | ||
) | ||
write_int(0, outfile) | ||
outfile.flush() | ||
except Exception as e: | ||
handle_worker_exception(e, outfile) | ||
outfile.flush() | ||
|
||
|
||
if __name__ == "__main__": | ||
# Read information about how to connect back to the JVM from the environment. | ||
java_port = int(os.environ["PYTHON_WORKER_FACTORY_PORT"]) | ||
auth_secret = os.environ["PYTHON_WORKER_FACTORY_SECRET"] | ||
(sock_file, sock) = local_connect_and_auth(java_port, auth_secret) | ||
write_int(os.getpid(), sock_file) | ||
sock_file.flush() | ||
main(sock_file, sock_file) |
Oops, something went wrong.