Skip to content

sql_utils

Sql utils are used to interact with databases.

StoredProcedureNotDefinedError

Bases: Exception

Raised when the upsert stored procedure does not exist.

Source code in physical_operations_utils/sql_utils.py
672
673
674
675
class StoredProcedureNotDefinedError(Exception):
    """Raised when the upsert stored procedure does not exist."""

    pass

execute_sql_queries_with_retry(db_connection_engine, queries, bulk_commit)

Executes a list of SQL queries with optional bulk commit and retry logic.

Connects to the database using the provided SQLAlchemy engine and executes each query in the given list. If bulk_commit is True, all queries are committed together after execution; otherwise, each query is committed individually. Retries up to 3 times with a 2-second wait between attempts in case of transient OperationalErrors.

Parameters:

Name Type Description Default
db_connection_engine Engine

The SQLAlchemy engine used to connect to the database.

required
queries List[str]

A list of SQL queries to execute.

required
bulk_commit bool

Whether to commit all queries in a single transaction.

required

Returns:

Name Type Description
int int

The number of rows affected by the last executed query.

Raises:

Type Description
OperationalError

If the connection or query execution fails after retries.

Example
from physical_operations_utils.sql_utils import execute_sql_queries_with_retry
from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

engine = get_db_connection(f"common_trading_data_{environment}").engine
queries = [
    "UPDATE my_table SET value = 1 WHERE id = 10",
    "DELETE FROM my_table WHERE value IS NULL"
]
rowcount = execute_sql_queries_with_retry(engine, queries, bulk_commit=True)
print(f"{rowcount} rows affected by the last query.")
Source code in physical_operations_utils/sql_utils.py
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
@retry(
    wait=wait_fixed(2),
    stop=stop_after_attempt(3),
)
def execute_sql_queries_with_retry(
    db_connection_engine: Engine,
    queries: List[str],
    bulk_commit: bool,
) -> int:
    """
    Executes a list of SQL queries with optional bulk commit and retry logic.

    Connects to the database using the provided SQLAlchemy engine and executes each
    query in the given list. If `bulk_commit` is True, all queries are committed
    together after execution; otherwise, each query is committed individually.
    Retries up to 3 times with a 2-second wait between attempts in case of
    transient `OperationalError`s.

    Args:
        db_connection_engine (sqlalchemy.engine.Engine): The SQLAlchemy engine used to connect to the database.
        queries (List[str]): A list of SQL queries to execute.
        bulk_commit (bool): Whether to commit all queries in a single transaction.

    Returns:
        int: The number of rows affected by the last executed query.

    Raises:
        sqlalchemy.exc.OperationalError: If the connection or query execution fails after retries.

    Example:
        ```python
        from physical_operations_utils.sql_utils import execute_sql_queries_with_retry
        from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

        engine = get_db_connection(f"common_trading_data_{environment}").engine
        queries = [
            "UPDATE my_table SET value = 1 WHERE id = 10",
            "DELETE FROM my_table WHERE value IS NULL"
        ]
        rowcount = execute_sql_queries_with_retry(engine, queries, bulk_commit=True)
        print(f"{rowcount} rows affected by the last query.")
        ```
    """
    try:
        with db_connection_engine.connect() as conn:
            for query in queries:
                res = conn.execute(text(query))
                if not bulk_commit:
                    conn.commit()
            if bulk_commit:
                conn.commit()
            return res.rowcount
    except OperationalError as e:
        logging.error(f"OperationalError: {e}")
        logging.error(f"Original exception: {e.orig}")
        logging.error(f"Arguments: {e.orig.args}")
        raise e

fast_upsert_df_with_retry(engine, table_name, df, batch_size=5000, logger=None)

Performs a fast upsert of a DataFrame into the database using a stored procedure with retry logic for transient errors.

The function validates that a stored procedure named FAST_UPSERT_{table_name} exists in the database. It then creates a temporary table, inserts the DataFrame data into it in batches, and calls the stored procedure to perform the upsert operation.

If the data frame contains the columns start_time_lb_utc and stop_time_lb_utc, it validates that they are in UTC datetime format. If the data frame contains variable_id and start_time_lb_utc, it sorts the data frame by those columns before inserting. This is optimized for clustered indices on those columns in the target table.

Parameters:

Name Type Description Default
engine Engine

A SQLAlchemy Engine connected to the target database.

required
table_name str

The name of the target table for the upsert operation.

required
df DataFrame

The DataFrame containing the data to be upserted.

required
batch_size int

The number of rows to insert in each batch. Defaults to 5000.

5000

Returns:

Type Description
None

None

Raises:

Type Description
StoredProcedureNotDefinedError

If the required stored procedure does not exist in the database.

OperationalError

If there is an error executing the SQL commands that is not recoverable after retries.

ValueError

If the DataFrame columns do not have the expected datetime format.

Example
from physical_operations_utils.sql_utils import fast_upsert_df_with_retry
from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

engine = get_db_connection(f"common_trading_data_{environment}").engine

df = pd.DataFrame({
    "variable_id": ["var1", "var2"],
    "start_time_lb_utc": [pd.Timestamp("2024-01-01T00:00:00Z"), pd.Timestamp("2024-01-01T01:00:00Z")],
    "stop_time_lb_utc": [pd.Timestamp("2024-01-01T00:15:00Z"), pd.Timestamp("2024-01-01T01:15:00Z")],
    "variable_value": [100, 200],
    "variable_unit": ["unit", "unit"],
    "resolution_seconds": [900, 900],
    "fcst_creation_time_utc": [pd.Timestamp("2024-01-01T00:05:00Z"), pd.Timestamp("2024-01-01T01:05:00Z")]
})

fast_upsert_df_with_retry(engine, "my_table", df)
Source code in physical_operations_utils/sql_utils.py
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
@retry(
    stop=stop_after_attempt(2),
    wait=wait_fixed(1),
    retry=retry_if_exception_type((pyodbc.OperationalError, pyodbc.InterfaceError)),
)
def fast_upsert_df_with_retry(
    engine: Engine,
    table_name: str,
    df: pd.DataFrame,
    batch_size: int = 5000,
    logger: Logger = None,
) -> None:
    """
    Performs a fast upsert of a DataFrame into the database using a stored procedure with retry logic for transient errors.

    The function validates that a stored procedure named `FAST_UPSERT_{table_name}` exists in the database.
    It then creates a temporary table, inserts the DataFrame data into it in batches, and calls the stored
    procedure to perform the upsert operation.

    If the data frame contains the columns `start_time_lb_utc` and `stop_time_lb_utc`, it validates that they are in UTC datetime format.
    If the data frame contains `variable_id` and `start_time_lb_utc`, it sorts the data frame by those columns before inserting. This
    is optimized for clustered indices on those columns in the target table.

    Parameters:
        engine (Engine): A SQLAlchemy Engine connected to the target database.
        table_name (str): The name of the target table for the upsert operation.
        df (pd.DataFrame): The DataFrame containing the data to be upserted.
        batch_size (int): The number of rows to insert in each batch. Defaults to 5000.

    Returns:
        None

    Raises:
        StoredProcedureNotDefinedError: If the required stored procedure does not exist in the database.
        OperationalError: If there is an error executing the SQL commands that is not recoverable after retries.
        ValueError: If the DataFrame columns do not have the expected datetime format.

    Example:
        ```python
        from physical_operations_utils.sql_utils import fast_upsert_df_with_retry
        from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

        engine = get_db_connection(f"common_trading_data_{environment}").engine

        df = pd.DataFrame({
            "variable_id": ["var1", "var2"],
            "start_time_lb_utc": [pd.Timestamp("2024-01-01T00:00:00Z"), pd.Timestamp("2024-01-01T01:00:00Z")],
            "stop_time_lb_utc": [pd.Timestamp("2024-01-01T00:15:00Z"), pd.Timestamp("2024-01-01T01:15:00Z")],
            "variable_value": [100, 200],
            "variable_unit": ["unit", "unit"],
            "resolution_seconds": [900, 900],
            "fcst_creation_time_utc": [pd.Timestamp("2024-01-01T00:05:00Z"), pd.Timestamp("2024-01-01T01:05:00Z")]
        })

        fast_upsert_df_with_retry(engine, "my_table", df)
        ```
    """
    if not logger:
        logger = get_logger(
            team="physical_operations",
            application_name="physical_operations_utils",
            job_id="fast_upsert_df_with_retry",
        )

    df = df.copy(deep=True)
    for col in [
        "start_time_lb_utc",
        "stop_time_lb_utc",
        "fcst_creation_time_utc",
        "db_updated_utc",
    ]:
        if col in df.columns:
            validate_df_column_is_utc_datetime(df, col)

    # Align with clustered index ("resolution_seconds", "start_time_lb_utc", "variable_id") if those columns exist - this can significantly speed up the upsert
    default_clustered_index = ["resolution_seconds", "start_time_lb_utc", "variable_id"]
    if all(col in df.columns for col in default_clustered_index):
        df = df.sort_values(by=default_clustered_index)

    df = df.where(pd.notnull(df), None)  # Convert NaN -> None

    # Force Python-native scalars (critical for pyodbc)
    df = df.astype(object)
    for col in df.columns:
        df[col] = df[col].apply(lambda x: x.item() if hasattr(x, "item") else x)

    schema = _get_table_schema(engine, table_name)

    if set(df.columns) != set(schema.keys()):
        raise ValueError(
            f"DataFrame columns {df.columns.tolist()} do not match target table schema columns {list(schema.keys())}"
        )

    for col, meta in schema.items():
        if not meta["is_nullable"] and df[col].isnull().any():
            logger.error(
                f"Column '{col}' contains null values but is defined as NOT NULL in the database schema. Offending rows:\n{df[df[col].isnull()]}"
            )
            raise ValueError(
                f"Column '{col}' contains null values but is defined as NOT NULL in the database schema."
            )

    col_defs = _build_temp_table_columns(schema)
    sizes = _build_setinputsizes_for_numeric_columns(schema, list(df.columns))

    sp_name = f"FAST_UPSERT_{table_name}"
    temp_table_name = f"#Incoming_{table_name}"

    if not _stored_procedure_exists(engine, sp_name):
        raise RuntimeError(f"Stored procedure '{sp_name}' does not exist.")

    create_temp_sql = (
        f"CREATE TABLE {temp_table_name} (\n" + ",\n".join(col_defs) + "\n);"
    )

    # Insert values placeholder string: ?, ?, ?, ...
    placeholders = ", ".join(["?"] * len(df.columns))
    insert_sql = f"""
        INSERT INTO {temp_table_name} ({", ".join(df.columns)})
        VALUES ({placeholders})
    """

    rows = [tuple(row) for row in df.itertuples(index=False, name=None)]
    # Convert any nan in rows to None to ensure they are correctly inserted as NULL in SQL Server
    rows = [tuple(None if pd.isna(x) else x for x in row) for row in rows]
    conn = engine.raw_connection()
    try:
        cursor = conn.cursor()
        cursor.execute(create_temp_sql)

        cursor.fast_executemany = True
        if any(s for s in sizes):
            cursor.setinputsizes(sizes)

        for i in range(0, len(rows), batch_size):
            batch = rows[i : i + batch_size]
            cursor.executemany(insert_sql, batch)

        cursor.execute(f"EXEC {sp_name};")
        cursor.execute(f"DROP TABLE {temp_table_name};")
        conn.commit()

    finally:
        conn.close()

get_existing_table_names(db_connection_engine)

Retrieves the names of all existing base tables from a database.

Connects to the database using the provided SQLAlchemy engine and queries the INFORMATION_SCHEMA.TABLES view to get the names of all base tables. Retries up to 3 times with a 2-second wait between attempts in case of transient OperationalErrors.

Parameters:

Name Type Description Default
db_connection_engine Engine

The SQLAlchemy engine used to connect to the database.

required

Returns:

Type Description
List[str]

List[str]: A list of base table names present in the database.

Raises:

Type Description
OperationalError

If the connection or query fails after retries.

Example
from physical_operations_utils.sql_utils import get_existing_table_names
from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

engine = get_db_connection(f"common_trading_data_{environment}").engine
table_names = get_existing_table_names(engine)
print(table_names)
Source code in physical_operations_utils/sql_utils.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
@retry(
    wait=wait_fixed(2),
    stop=stop_after_attempt(3),
)
def get_existing_table_names(db_connection_engine: Engine) -> List[str]:
    """
    Retrieves the names of all existing base tables from a database.

    Connects to the database using the provided SQLAlchemy engine and queries the
    `INFORMATION_SCHEMA.TABLES` view to get the names of all base tables. Retries up
    to 3 times with a 2-second wait between attempts in case of transient `OperationalError`s.

    Args:
        db_connection_engine (sqlalchemy.engine.Engine): The SQLAlchemy engine used to connect to the database.

    Returns:
        List[str]: A list of base table names present in the database.

    Raises:
        sqlalchemy.exc.OperationalError: If the connection or query fails after retries.

    Example:
        ```python
        from physical_operations_utils.sql_utils import get_existing_table_names
        from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

        engine = get_db_connection(f"common_trading_data_{environment}").engine
        table_names = get_existing_table_names(engine)
        print(table_names)
        ```
    """
    try:
        with db_connection_engine.connect() as conn:
            tables_rs = conn.execute(
                text(
                    "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE';"
                )
            ).fetchall()
            return [table[0] for table in tables_rs]
    except OperationalError as e:
        logging.error(f"OperationalError: {e}")
        logging.error(f"Original exception: {e.orig}")
        logging.error(f"Arguments: {e.orig.args}")
        raise e

get_latest_task_execution_time_utc(platform_conn_engine, application_name, job_id)

Retrieves the last successful execution timestamp of a task from the platform task execution tracking table.

The function queries the core_last_successful_task_execution table for a specific application and job ID and returns the timestamp of the last successful execution in UTC. If no execution is found, it returns None. If more than one row is returned by the query, a ValueError is raised.

Parameters:

Name Type Description Default
platform_conn_engine Engine

SQLAlchemy engine to the platform database.

required
application_name str

Name of the application that executed the task.

required
job_id str

Identifier for the specific job/task.

required

Returns:

Type Description
datetime | None

datetime.datetime | None: The UTC datetime of the last successful execution, or None if not found.

Raises:

Type Description
ValueError

If more than one row is returned by the query.

Exception

If the database connection or query fails after retries.

Example
from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection
from physical_operations_utils.environment_utils import setup_environment
from physical_operations_utils.sql_utils import get_latest_task_execution_time_utc

environment = setup_environment()
engine = get_db_connection(f"physical_trading_platorm_django_{environment}")
last_run = get_latest_task_execution_time_utc(engine, "my_app", "my_task")

if last_run:
    print(f"Last successful execution: {last_run}")
else:
    print("No previous execution found.")
Source code in physical_operations_utils/sql_utils.py
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
@retry(
    wait=wait_fixed(2),
    stop=stop_after_attempt(3),
    reraise=True,
)
def get_latest_task_execution_time_utc(
    platform_conn_engine: Engine, application_name: str, job_id: str
) -> datetime.datetime | None:
    """
    Retrieves the last successful execution timestamp of a task from the platform task execution tracking table.

    The function queries the `core_last_successful_task_execution` table for a specific application and job ID and
    returns the timestamp of the last successful execution in UTC. If no execution is found, it returns `None`.
    If more than one row is returned by the query, a `ValueError` is raised.

    Args:
        platform_conn_engine (Engine): SQLAlchemy engine to the platform database.
        application_name (str): Name of the application that executed the task.
        job_id (str): Identifier for the specific job/task.

    Returns:
        datetime.datetime | None: The UTC datetime of the last successful execution, or `None` if not found.

    Raises:
        ValueError: If more than one row is returned by the query.
        Exception: If the database connection or query fails after retries.

    Example:
        ```python
        from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection
        from physical_operations_utils.environment_utils import setup_environment
        from physical_operations_utils.sql_utils import get_latest_task_execution_time_utc

        environment = setup_environment()
        engine = get_db_connection(f"physical_trading_platorm_django_{environment}")
        last_run = get_latest_task_execution_time_utc(engine, "my_app", "my_task")

        if last_run:
            print(f"Last successful execution: {last_run}")
        else:
            print("No previous execution found.")
        ```
    """
    query = "SELECT last_successful_execution FROM core_last_successful_task_execution WHERE application_name LIKE :application_name AND job_id LIKE :job_id"
    params = {"application_name": application_name, "job_id": job_id}
    with platform_conn_engine.connect() as conn:
        res = conn.execute(text(query), params)
        if res.rowcount > 1:
            raise ValueError(
                "More than one row found in query to core_last_successful_task_execution"
            )
        if res.rowcount == 0:
            return None
        last_executed: datetime.datetime = res.fetchone()[0]
        return last_executed.astimezone(UTC_TIMEZONE)

load_data_with_caching(db_engine, table_name, query, params, force_reload)

Loads data from a database with caching support.

If caching is enabled and the table has been previously loaded, it returns the cached DataFrame. Otherwise, it fetches the data from the database.

Parameters:

Name Type Description Default
db_engine Engine

The database engine.

required
table_name str

The name of the table being queried.

required
query str

The SQL query to execute.

required
params dict

Query parameters.

required
force_reload bool

If True, forces data reloading instead of using cache.

required

Returns:

Type Description
DataFrame

pd.DataFrame: A pandas DataFrame containing the query results.

Example
from physical_operations_utils.sql_utils import load_data_with_caching
from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

conn = get_db_connection(f"common_trading_data_{environment}")
query = "SELECT * FROM my_table WHERE date = :date"
params = {"date": "2024-02-01"}

df = load_data_with_caching(conn.engine, "my_table", query, params, force_reload=False)
print(df.head())
Source code in physical_operations_utils/sql_utils.py
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
@retry(
    wait=wait_fixed(2),
    stop=stop_after_attempt(5),
)
def load_data_with_caching(
    db_engine: Engine,
    table_name: str,
    query: str,
    params: dict,
    force_reload: bool,
) -> pd.DataFrame:
    """
    Loads data from a database with caching support.

    If caching is enabled and the table has been previously loaded, it returns
    the cached DataFrame. Otherwise, it fetches the data from the database.

    Args:
        db_engine (sqlalchemy.engine.Engine): The database engine.
        table_name (str): The name of the table being queried.
        query (str): The SQL query to execute.
        params (dict): Query parameters.
        force_reload (bool): If True, forces data reloading instead of using cache.

    Returns:
        pd.DataFrame: A pandas DataFrame containing the query results.

    Example:
        ```python
        from physical_operations_utils.sql_utils import load_data_with_caching
        from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

        conn = get_db_connection(f"common_trading_data_{environment}")
        query = "SELECT * FROM my_table WHERE date = :date"
        params = {"date": "2024-02-01"}

        df = load_data_with_caching(conn.engine, "my_table", query, params, force_reload=False)
        print(df.head())
        ```
    """
    if force_reload or table_name not in _TABLE_TO_DATA_MAP:
        _TABLE_TO_DATA_MAP[table_name] = pandas_read_sql_with_retry(
            db_engine=db_engine, query=query, params=params
        )
    return _TABLE_TO_DATA_MAP[table_name].copy(deep=True)

log_task_execution(platform_conn_engine, application_name, job_id, comment=None)

Record the last successful execution timestamp (and optional comment) of a scheduled task into the core_last_successful_task_execution table.

This function builds a single-row pandas DataFrame containing: - application_name and job_id to identify the task, - an optional free-text comment, - last_successful_execution set to the current Stockholm time (with timezone). It then upserts that row into SQL Server using upsert_into_mssql, matching on (application_name, job_id).

Parameters:

Name Type Description Default
platform_conn_engine Engine

SQLAlchemy Engine connected to the platform database.

required
application_name str

The name of the application or schema (e.g. "nomination").

required
job_id str

A unique identifier for the task (e.g. "monitor variable checker").

required
comment str | None

An optional note or comment about this execution. Defaults to None.

None

Returns:

Type Description
None

None

Raises:

Type Description
OperationalError

If the upsert fails at the database level and is not recoverable.

ValueError

If the DataFrame or key columns are malformed.

Example
log_task_execution(
    platform_conn_engine=engine,
    application_name="nomination",
    job_id="monitor variable checker",
    comment="Ran via cron every 5 minutes"
)
Source code in physical_operations_utils/sql_utils.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
def log_task_execution(
    platform_conn_engine: Engine,
    application_name: str,
    job_id: str,
    comment: str | None = None,
) -> None:
    """
    Record the last successful execution timestamp (and optional comment) of a scheduled task
    into the `core_last_successful_task_execution` table.

    This function builds a single-row pandas DataFrame containing:
    - `application_name` and `job_id` to identify the task,
    - an optional free-text `comment`,
    - `last_successful_execution` set to the current Stockholm time (with timezone).
    It then upserts that row into SQL Server using `upsert_into_mssql`, matching on
    (`application_name`, `job_id`).

    Parameters:
        platform_conn_engine (sqlalchemy.Engine):
            SQLAlchemy Engine connected to the platform database.
        application_name (str):
            The name of the application or schema (e.g. "nomination").
        job_id (str):
            A unique identifier for the task (e.g. "monitor variable checker").
        comment (str | None, optional):
            An optional note or comment about this execution. Defaults to None.

    Returns:
        None

    Raises:
        sqlalchemy.exc.OperationalError:
            If the upsert fails at the database level and is not recoverable.
        ValueError:
            If the DataFrame or key columns are malformed.

    Example:
        ```python
        log_task_execution(
            platform_conn_engine=engine,
            application_name="nomination",
            job_id="monitor variable checker",
            comment="Ran via cron every 5 minutes"
        )
        ```
    """
    now = get_utc_now_custom_precision()

    log = pd.DataFrame(
        {
            "application_name": [application_name],
            "job_id": [job_id],
            "comment": [comment],
            "last_successful_execution": [now],
        }
    )

    upsert_into_mssql(
        db_connection_engine=platform_conn_engine,
        df=log,
        table_name="core_last_successful_task_execution",
        column_names=[
            "application_name",
            "job_id",
            "last_successful_execution",
            "comment",
        ],
        key_columns=["job_id"],
    )
    logging.warning("Task execution timestamps logged successfully.")

pandas_read_sql_with_retry(db_engine, query, params=None)

Executes an SQL query with automatic retry on operational failures making 5 attempts with 2 seconds wait between attempts.

Parameters:

Name Type Description Default
db_engine Engine

A SQLAlchemy database Engine instance.

required
query str

The SQL query to execute.

required
params dict

Dictionary of query parameters.

None

Returns:; pd.DataFrame: A pandas DataFrame containing the query result.

Raises:

Type Description
OperationalError

There is an error in the SQL statement.

Example
from physical_operations_utils.sql_utils import pandas_read_sql_with_retry
from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

ctd_conn = get_db_connection(f"common_trading_data_{environment}")

params={"variable_id": "SE3", "resolution_seconds": 900}
query = "SELECT * FROM my_table WHERE variable_id = :variable_id AND resolution_seconds = :resolution_seconds"

df = pandas_read_sql_with_retry(ctd_conn.engine, query, params)
print(df.head())
Source code in physical_operations_utils/sql_utils.py
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
@retry(
    wait=wait_fixed(2),
    stop=stop_after_attempt(3),
    before=before_log(logger, logging.DEBUG),
    after=after_log(logger, logging.ERROR),
)
def pandas_read_sql_with_retry(
    db_engine: Engine, query: str, params: dict = None
) -> pd.DataFrame:
    """
    Executes an SQL query with automatic retry on operational failures making 5 attempts with 2 seconds wait between attempts.

    Parameters:
        db_engine (Engine): A SQLAlchemy database Engine instance.
        query (str): The SQL query to execute.
        params (dict, optional): Dictionary of query parameters.
    Returns:;
        pd.DataFrame: A pandas DataFrame containing the query result.

    Raises:
        OperationalError: There is an error in the SQL statement.

    Example:
        ```python
        from physical_operations_utils.sql_utils import pandas_read_sql_with_retry
        from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

        ctd_conn = get_db_connection(f"common_trading_data_{environment}")

        params={"variable_id": "SE3", "resolution_seconds": 900}
        query = "SELECT * FROM my_table WHERE variable_id = :variable_id AND resolution_seconds = :resolution_seconds"

        df = pandas_read_sql_with_retry(ctd_conn.engine, query, params)
        print(df.head())
        ```
    """
    params = params or {}

    try:
        with db_engine.connect() as connection:
            return pd.read_sql(text(query), connection, params=params)
    except OperationalError as e:
        logging.error(f"OperationalError: {e}")
        logging.error(f"Original exception: {e.orig}")
        logging.error(f"Arguments: {e.orig.args}")
        raise e

upsert_into_mssql(db_connection_engine, df, table_name, column_names, key_columns)

Performs an upsert (merge) of a pandas DataFrame into a Microsoft SQL Server table with automatic retries (5 retries with 2 second wait).

This function takes a DataFrame and attempts to insert or update rows in the specified table based on the provided key columns. The SQL query is constructed using parameterized bindings to prevent SQL injection, and column and table identifiers are validated before query generation. If the DataFrame is large, it is split into chunks to prevent exceeding parameter or query limits. The chunk size depends on the number of columns. A single transaction is used for all chunks to ensure atomicity, and operational failures are retried up to 5 times with a fixed delay between attempts.

The function returns the total number of rows affected across all chunks, as reported by the database.

Parameters:

Name Type Description Default
db_connection_engine Engine

A SQLAlchemy Engine object as contained in AzureSqlConnection objects.

required
df DataFrame

The pandas DataFrame containing data to upsert.

required
table_name str

The name of the target SQL Server table.

required
column_names list

List of column names to be inserted or updated.

required
key_columns list

List of column names used to determine matches (i.e., primary or unique keys).

required

Returns:

Name Type Description
int int

The total number of rows affected by the upsert operation.

Raises:

Type Description
OperationalError

Raised when an error occurs during SQL execution that is not recoverable after retries.

ValueError

Raised when the input table or column names are invalid.

Example
from physical_operations_utils.sql_utils import upsert_into_mssql
from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

df = pd.DataFrame({"id": [1, 2], "value": ["A", "B"]})

affected = upsert_into_mssql(
    db_connection=get_db_connection(f"common_trading_data_{environment}").engine,
    df=df,
    table_name="my_table",
    column_names=["id", "value"],
    key_columns=["id"]
)

print(f"{affected} rows affected.")
Source code in physical_operations_utils/sql_utils.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
@retry(
    wait=wait_fixed(2),
    stop=stop_after_attempt(3),
    before=before_log(logger, logging.DEBUG),
    after=after_log(logger, logging.ERROR),
)
def upsert_into_mssql(
    db_connection_engine: Engine,
    df: pd.DataFrame,
    table_name: str,
    column_names: list,
    key_columns: list,
) -> int:
    """
    Performs an upsert (merge) of a pandas DataFrame into a Microsoft SQL Server table with automatic retries (5 retries with 2 second wait).

    This function takes a DataFrame and attempts to insert or update rows in the specified table based on the
    provided key columns. The SQL query is constructed using parameterized bindings to prevent SQL injection,
    and column and table identifiers are validated before query generation. If the DataFrame is large, it is
    split into chunks to prevent exceeding parameter or query limits. The chunk size depends on the number of columns.
    A single transaction is used for all chunks to ensure atomicity, and operational failures are retried up to 5 times
    with a fixed delay between attempts.

    The function returns the total number of rows affected across all chunks, as reported by the database.

    Parameters:
        db_connection_engine (sqlalchemy.Engine): A SQLAlchemy Engine object as contained in AzureSqlConnection objects.
        df (pd.DataFrame): The pandas DataFrame containing data to upsert.
        table_name (str): The name of the target SQL Server table.
        column_names (list): List of column names to be inserted or updated.
        key_columns (list): List of column names used to determine matches (i.e., primary or unique keys).

    Returns:
        int: The total number of rows affected by the upsert operation.

    Raises:
        OperationalError: Raised when an error occurs during SQL execution that is not recoverable after retries.
        ValueError: Raised when the input table or column names are invalid.

    Example:
        ```python
        from physical_operations_utils.sql_utils import upsert_into_mssql
        from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

        df = pd.DataFrame({"id": [1, 2], "value": ["A", "B"]})

        affected = upsert_into_mssql(
            db_connection=get_db_connection(f"common_trading_data_{environment}").engine,
            df=df,
            table_name="my_table",
            column_names=["id", "value"],
            key_columns=["id"]
        )

        print(f"{affected} rows affected.")
        ```
    """
    try:
        safe_table_name = _validate_identifiers(table_name)
        safe_column_names = [_validate_identifiers(col) for col in column_names]
        safe_key_columns = [_validate_identifiers(col) for col in key_columns]

        # Replace all pandas NaN with Python None to enable correct conversion to NULL in SQL query
        df = df[safe_column_names].copy(deep=True)
        # Force object dtype where needed
        for col in df.columns:
            if df[col].dtype.kind in {"f", "i", "M"}:  # float, int, datetime
                df[col] = df[col].astype(object)
        df = df.where(pd.notnull(df), None)

        if df.empty:
            return

        # Calculate maximum chunk size based on the number of columns to avoid exceeding SQL Server's parameter limit (2100 parameters per query)
        chunk_size = min(500, max(1, 2000 // len(column_names)))

        affected_rows = 0
        with db_connection_engine.begin() as conn:
            for chunk_start in range(0, len(df), chunk_size):
                chunk_df = df.iloc[chunk_start : chunk_start + chunk_size]
                values = chunk_df[safe_column_names].values.tolist()
                if not values:
                    continue

                merge_sql, all_params = _generate_merge_query_and_parameters(
                    table_name=safe_table_name,
                    column_names=safe_column_names,
                    key_columns=safe_key_columns,
                    values=values,
                )

                result = conn.execute(text(merge_sql), all_params)
                affected_rows += result.rowcount or 0
        return affected_rows

    except OperationalError as e:
        logging.error(f"OperationalError: {e}")
        logging.error(f"Original exception: {e.orig}")
        logging.error(f"Arguments: {e.orig.args}")
        raise e

write_to_application_log(platform_conn_engine, application_name, job_id, messages, print_log=False)

Writes a list of log messages to the core application log table with automatic retry on operational failures.

This function inserts one row per message into the core_application_log table in the physical_trading_platform_django database. Each row includes the application name, job ID, log message, and a UTC timestamp. The function uses parameterized SQL to safely insert values, and retries the entire operation up to 3 times in case of transient operational errors.

The function returns the total number of rows successfully inserted.

Parameters:

Name Type Description Default
platform_conn_engine Engine

A SQLAlchemy Engine object for the platform database.

required
application_name str

The name of the application writing to the log.

required
job_id str

A unique identifier for the job or task writing the log.

required
messages List[str]

A list of log messages to write.

required
print_log bool

A flag to determine if log messages are printed using logging.warning. Defaults to False.

False

Returns:

Name Type Description
int int

The total number of rows written to the log table.

Raises:

Type Description
OperationalError

Raised when a database error occurs that is not resolved after 3 retry attempts.

Example
from physical_operations_utils.sql_utils import upsert_into_mssql
from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

row_count = write_to_application_log(
    platform_conn_engine=get_db_connection(f"physical_trading_platform_django_{environment}").engine,
    application_name="my_pipeline",
    job_id="job_20250328_xyz",
    messages=["Job started", "Job finished successfully"],
    print_log=True,
)

print(f"{row_count} log entries inserted.")
Source code in physical_operations_utils/sql_utils.py
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
@retry(
    wait=wait_fixed(2),
    stop=stop_after_attempt(3),
)
def write_to_application_log(
    platform_conn_engine: Engine,
    application_name: str,
    job_id: str,
    messages: List[str],
    print_log: bool = False,
) -> int:
    """
    Writes a list of log messages to the core application log table with automatic retry on operational failures.

    This function inserts one row per message into the `core_application_log` table in the physical_trading_platform_django database.
    Each row includes the application name, job ID, log message, and a UTC timestamp. The function uses
    parameterized SQL to safely insert values, and retries the entire operation up to 3 times in case of
    transient operational errors.

    The function returns the total number of rows successfully inserted.

    Parameters:
        platform_conn_engine (Engine): A SQLAlchemy Engine object for the platform database.
        application_name (str): The name of the application writing to the log.
        job_id (str): A unique identifier for the job or task writing the log.
        messages (List[str]): A list of log messages to write.
        print_log (bool): A flag to determine if log messages are printed using logging.warning. Defaults to False.

    Returns:
        int: The total number of rows written to the log table.

    Raises:
        OperationalError: Raised when a database error occurs that is not resolved after 3 retry attempts.

    Example:
        ```python
        from physical_operations_utils.sql_utils import upsert_into_mssql
        from physical_operations_utils.azure_utils.azure_sql_connection import get_db_connection

        row_count = write_to_application_log(
            platform_conn_engine=get_db_connection(f"physical_trading_platform_django_{environment}").engine,
            application_name="my_pipeline",
            job_id="job_20250328_xyz",
            messages=["Job started", "Job finished successfully"],
            print_log=True,
        )

        print(f"{row_count} log entries inserted.")
        ```
    """
    if print_log:
        for msg in messages:
            logging.warning(msg)
    try:
        query_text = text(
            """
        INSERT INTO core_application_log (
            application_name,
            job_id,
            message,
            db_updated_utc
        ) VALUES (
            :application_name,
            :job_id,
            :message,
            :db_updated_utc
        );"""
        )
        row_count = 0
        with platform_conn_engine.connect() as conn:
            for message in messages:
                params = {
                    "application_name": application_name,
                    "job_id": job_id,
                    "message": message,
                    "db_updated_utc": get_utc_now_custom_precision(),
                }
                result = conn.execute(query_text, parameters=params)
                row_count += result.rowcount or 0
            conn.commit()
        return row_count
    except OperationalError as e:
        logging.error(f"OperationalError: {e}")
        logging.error(f"Original exception: {e.orig}")
        logging.error(f"Arguments: {e.orig.args}")
        raise e