Skip to content

Commit 766ea85

Browse files
committed
cleanup
1 parent cb5ae54 commit 766ea85

File tree

3 files changed

+17
-35
lines changed

3 files changed

+17
-35
lines changed

mssql_python/constants.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,12 +101,10 @@ class ConstantsDDBC(Enum):
101101
SQL_ROW_SUCCESS_WITH_INFO = 1
102102
SQL_ROW_NOROW = 100
103103
SQL_ATTR_CURSOR_TYPE = 6
104-
SQL_ATTR_CONCURRENCY = 7
105104
SQL_CURSOR_FORWARD_ONLY = 0
106105
SQL_CURSOR_STATIC = 3
107106
SQL_CURSOR_KEYSET_DRIVEN = 2
108107
SQL_CURSOR_DYNAMIC = 3
109-
SQL_CONCUR_READ_ONLY = 1
110108
SQL_NULL_DATA = -1
111109
SQL_C_DEFAULT = 99
112110
SQL_ATTR_ROW_BIND_TYPE = 5

mssql_python/cursor.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -723,13 +723,8 @@ def _build_converter_map(self):
723723
if desc is None:
724724
converter_map.append(None)
725725
continue
726-
727-
# Get SQL type from description
728-
sql_type = desc[1] # type_code is at index 1 in description tuple
729-
730-
# Try to get a converter for this type
726+
sql_type = desc[1]
731727
converter = self.connection.get_output_converter(sql_type)
732-
733728
# If no converter found for the SQL type, try the WVARCHAR converter as a fallback
734729
if converter is None:
735730
from mssql_python.constants import ConstantsDDBC
@@ -1044,7 +1039,7 @@ def execute(
10441039
if self.description: # If we have column descriptions, it's likely a SELECT
10451040
self.rowcount = -1
10461041
self._reset_rownumber()
1047-
# Pre-build column map and converter map for performance
1042+
# Pre-build column map and converter map
10481043
self._cached_column_map = {col_desc[0]: i for i, col_desc in enumerate(self.description)}
10491044
self._cached_converter_map = self._build_converter_map()
10501045
else:
@@ -1766,7 +1761,7 @@ def fetchone(self) -> Union[None, Row]:
17661761

17671762
self.rowcount = self._next_row_index
17681763

1769-
# Build column map once and cache it for performance
1764+
# Build column map once and cache it
17701765
if self._cached_column_map is None and self.description:
17711766
self._cached_column_map = {col_desc[0]: i for i, col_desc in enumerate(self.description)}
17721767
column_map = self._cached_column_map or getattr(self, '_column_name_map', None)
@@ -1817,11 +1812,11 @@ def fetchmany(self, size: int = None) -> List[Row]:
18171812
else:
18181813
self.rowcount = self._next_row_index
18191814

1820-
# Build column map once and cache it for better performance
1815+
# Build column map once and cache it
18211816
if self._cached_column_map is None and self.description:
18221817
self._cached_column_map = {col_desc[0]: i for i, col_desc in enumerate(self.description)}
18231818

1824-
# Convert raw data to Row objects using optimized constructor
1819+
# Convert raw data to Row objects
18251820
column_map = self._cached_column_map or getattr(self, '_column_name_map', None)
18261821
return [Row(row_data, column_map, self) for row_data in rows_data]
18271822
except Exception as e:
@@ -1859,11 +1854,11 @@ def fetchall(self) -> List[Row]:
18591854
else:
18601855
self.rowcount = self._next_row_index
18611856

1862-
# Build column map once and cache it for better performance
1857+
# Build column map once and cache it
18631858
if self._cached_column_map is None and self.description:
18641859
self._cached_column_map = {col_desc[0]: i for i, col_desc in enumerate(self.description)}
18651860

1866-
# Convert raw data to Row objects using optimized constructor
1861+
# Convert raw data to Row objects
18671862
column_map = self._cached_column_map or getattr(self, '_column_name_map', None)
18681863
return [Row(row_data, column_map, self) for row_data in rows_data]
18691864
except Exception as e:

mssql_python/pybind/ddbc_bindings.cpp

Lines changed: 10 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,6 @@ namespace PythonObjectCache {
9494
}
9595
}
9696

97-
98-
9997
//-------------------------------------------------------------------------------------------------
10098
// Class definitions
10199
//-------------------------------------------------------------------------------------------------
@@ -1474,7 +1472,7 @@ SQLRETURN SQLExecDirect_wrap(SqlHandlePtr StatementHandle, const std::wstring& Q
14741472
DriverLoader::getInstance().loadDriver(); // Load the driver
14751473
}
14761474

1477-
// Configure forward-only cursor for optimal performance
1475+
// Configure forward-only cursor
14781476
if (SQLSetStmtAttr_ptr && StatementHandle && StatementHandle->get()) {
14791477
SQLSetStmtAttr_ptr(StatementHandle->get(),
14801478
SQL_ATTR_CURSOR_TYPE,
@@ -1611,7 +1609,7 @@ SQLRETURN SQLExecute_wrap(const SqlHandlePtr statementHandle,
16111609
LOG("Statement handle is null or empty");
16121610
}
16131611

1614-
// Configure forward-only cursor for optimal performance
1612+
// Configure forward-only cursor
16151613
if (SQLSetStmtAttr_ptr && hStmt) {
16161614
SQLSetStmtAttr_ptr(hStmt,
16171615
SQL_ATTR_CURSOR_TYPE,
@@ -3182,7 +3180,6 @@ SQLRETURN FetchBatchData(SQLHSTMT hStmt, ColumnBuffers& buffers, py::list& colum
31823180
py::list& rows, SQLUSMALLINT numCols, SQLULEN& numRowsFetched, const std::vector<SQLUSMALLINT>& lobColumns) {
31833181
LOG("Fetching data in batches");
31843182
SQLRETURN ret = SQLFetchScroll_ptr(hStmt, SQL_FETCH_NEXT, 0);
3185-
// SQLRETURN ret = SQLFetch_ptr(hStmt);
31863183
if (ret == SQL_NO_DATA) {
31873184
LOG("No data to fetch");
31883185
return ret;
@@ -3204,15 +3201,12 @@ SQLRETURN FetchBatchData(SQLHSTMT hStmt, ColumnBuffers& buffers, py::list& colum
32043201
const auto& columnMeta = columnNames[col].cast<py::dict>();
32053202
columnInfos[col].dataType = columnMeta["DataType"].cast<SQLSMALLINT>();
32063203
columnInfos[col].columnSize = columnMeta["ColumnSize"].cast<SQLULEN>();
3207-
columnInfos[col].isLob = std::find(lobColumns.begin(), lobColumns.end(), col + 1) != lobColumns.end(); // col+1 because lobColumns uses 1-based indexing
3208-
3209-
// Pre-compute processed column size and fetch buffer size for char/wchar types
3204+
columnInfos[col].isLob = std::find(lobColumns.begin(), lobColumns.end(), col + 1) != lobColumns.end();
32103205
columnInfos[col].processedColumnSize = columnInfos[col].columnSize;
32113206
HandleZeroColumnSizeAtFetch(columnInfos[col].processedColumnSize);
32123207
columnInfos[col].fetchBufferSize = columnInfos[col].processedColumnSize + 1; // +1 for null terminator
32133208
}
32143209

3215-
// Cache expensive module imports and operations outside the loops
32163210
static const std::string defaultSeparator = ".";
32173211
std::string decimalSeparator = GetDecimalSeparator(); // Cache decimal separator
32183212
bool isDefaultDecimalSeparator = (decimalSeparator == defaultSeparator);
@@ -3226,9 +3220,9 @@ SQLRETURN FetchBatchData(SQLHSTMT hStmt, ColumnBuffers& buffers, py::list& colum
32263220
// Create row container pre-allocated with known column count
32273221
py::list row(numCols);
32283222
for (SQLUSMALLINT col = 1; col <= numCols; col++) {
3229-
const ColumnInfo& colInfo = columnInfos[col - 1];
3230-
SQLSMALLINT dataType = colInfo.dataType;
3231-
SQLLEN dataLen = buffers.indicators[col - 1][i];
3223+
const ColumnInfo& colInfo = columnInfos[col - 1];
3224+
SQLSMALLINT dataType = colInfo.dataType;
3225+
SQLLEN dataLen = buffers.indicators[col - 1][i];
32323226
if (dataLen == SQL_NULL_DATA) {
32333227
row[col - 1] = py::none();
32343228
continue;
@@ -3332,7 +3326,6 @@ SQLRETURN FetchBatchData(SQLHSTMT hStmt, ColumnBuffers& buffers, py::list& colum
33323326

33333327
// Use pre-cached decimal separator
33343328
if (isDefaultDecimalSeparator) {
3335-
// Direct py::str creation without intermediate string
33363329
row[col - 1] = PythonObjectCache::get_decimal_class()(py::str(rawData, decimalDataLen));
33373330
} else {
33383331
std::string numStr(rawData, decimalDataLen);
@@ -3629,8 +3622,6 @@ SQLRETURN FetchMany_wrap(SqlHandlePtr StatementHandle, py::list& rows, int fetch
36293622
// Reset attributes before returning to avoid using stack pointers later
36303623
SQLSetStmtAttr_ptr(hStmt, SQL_ATTR_ROW_ARRAY_SIZE, (SQLPOINTER)1, 0);
36313624
SQLSetStmtAttr_ptr(hStmt, SQL_ATTR_ROWS_FETCHED_PTR, NULL, 0);
3632-
3633-
// Process each column (data is now in buffers at index [0])
36343625
return ret;
36353626
}
36363627

@@ -3661,8 +3652,8 @@ SQLRETURN FetchAll_wrap(SqlHandlePtr StatementHandle, py::list& rows) {
36613652
return ret;
36623653
}
36633654

3664-
// Define a memory limit (1.5 GB)
3665-
const size_t memoryLimit = 1536ULL * 1024 * 1024;
3655+
// Define a memory limit (1 GB)
3656+
const size_t memoryLimit = 1ULL * 1024 * 1024;
36663657
size_t totalRowSize = calculateRowSize(columnNames, numCols);
36673658

36683659
// Calculate fetch size based on the total row size and memory limit
@@ -3690,15 +3681,13 @@ SQLRETURN FetchAll_wrap(SqlHandlePtr StatementHandle, py::list& rows) {
36903681
// If the row size is larger than the memory limit, fetch one row at a time
36913682
fetchSize = 1;
36923683
} else if (numRowsInMemLimit > 0 && numRowsInMemLimit <= 100) {
3693-
fetchSize = 500;
3694-
// } else if (numRowsInMemLimit > 100 && numRowsInMemLimit <= 10000) {
3695-
// fetchSize = 500;
3684+
// If between 100-1000 rows fit in memoryLimit, fetch 100 rows at a time
3685+
fetchSize = 100;
36963686
} else {
36973687
fetchSize = 1000;
36983688
}
36993689
LOG("Fetching data in batch sizes of {}", fetchSize);
37003690

3701-
// fetchSize = 1;
37023691
std::vector<SQLUSMALLINT> lobColumns;
37033692
for (SQLSMALLINT i = 0; i < numCols; i++) {
37043693
auto colMeta = columnNames[i].cast<py::dict>();

0 commit comments

Comments
 (0)