diff --git odbc/src/cpp/HiveConnection.h odbc/src/cpp/HiveConnection.h index 3b2e2b1..b035107 100644 --- odbc/src/cpp/HiveConnection.h +++ odbc/src/cpp/HiveConnection.h @@ -48,10 +48,12 @@ using namespace apache::thrift::transport; * @see DBCloseConnection() */ struct HiveConnection { - HiveConnection(shared_ptr c, shared_ptr t) : - client(c), transport(t) {} + HiveConnection(shared_ptr c, shared_ptr t, + shared_ptr s, shared_ptr d) : client(c), transport(t), socket(s), dbName(d) {} shared_ptr client; shared_ptr transport; + shared_ptr socket; + shared_ptr dbName; }; diff --git odbc/src/cpp/HiveResultSet.cpp odbc/src/cpp/HiveResultSet.cpp index d3d375e..a469f01 100644 --- odbc/src/cpp/HiveResultSet.cpp +++ odbc/src/cpp/HiveResultSet.cpp @@ -17,51 +17,73 @@ */ #include +#include #include "ThriftHive.h" +#include #include "HiveResultSet.h" #include "hiveclienthelper.h" #include "thriftserverconstants.h" +/* MSVC has _snprintf() and it does NOT guarantee NULL termination */ +#ifdef WIN32 +#define snprintf _snprintf +#endif + +static bool idxCmp(Apache::Hadoop::Hive::Index idx1, Apache::Hadoop::Hive::Index idx2); /************************************************************************************************* * HiveQueryResultSet Subclass Definition ************************************************************************************************/ -HiveQueryResultSet::HiveQueryResultSet(int max_buf_rows) { +HiveQueryResultSet::HiveQueryResultSet(int max_buf_rows, int fetch_row_size) { m_connection = NULL; - m_serial_rowset.reset(); + m_serial_rowset = NULL; assert(max_buf_rows > 0); - m_max_buffered_rows = max_buf_rows; + setArraySize(max_buf_rows, fetch_row_size); m_fetch_idx = -1; + m_resultset_idx = -1; m_has_results = false; m_fetch_attempted = false; - /* Allocate the necessary amount of memory to prevent resizing */ - m_result_set_data.reserve(max_buf_rows); } HiveQueryResultSet::~HiveQueryResultSet() { /* Nothing to deallocate */ } -HiveReturn HiveQueryResultSet::initialize(HiveConnection* connection, char* err_buf, +HiveReturn HiveQueryResultSet::initialize(HiveConnection* connection, hive_err_info *hive_error, size_t err_buf_len) { assert(connection != NULL); m_connection = connection; - m_serial_rowset.reset(); m_fetch_idx = -1; m_has_results = false; m_fetch_attempted = false; - return initializeSchema(err_buf, err_buf_len); + return initializeSchema(hive_error, err_buf_len); } -HiveReturn HiveQueryResultSet::fetchNext(char* err_buf, size_t err_buf_len) { - m_fetch_idx++; +void HiveQueryResultSet::setArraySize(int max_buf_rows, int fetch_row_size) { + // setup the resultset fields based on the resultset size + m_max_buffered_rows = max_buf_rows; + m_fetch_row_size = fetch_row_size; + if (m_serial_rowset) { + delete [] m_serial_rowset; + } + m_serial_rowset = new HiveSerializedRowSet[fetch_row_size]; + for (int cnt = 0 ; cnt < fetch_row_size; cnt++) + m_serial_rowset[cnt].reset(); + /* Allocate the necessary amount of memory to prevent resizing */ + m_result_set_data.reserve(max_buf_rows); +} + +HiveReturn HiveQueryResultSet::fetchNext(hive_err_info *hive_error, size_t err_buf_len, int *num_rows) { + int fetch_size, cnt; + + m_fetch_idx ++; if (m_fetch_idx >= (int) m_result_set_data.size()) /* If there are no more buffered rows... */ { /* Repopulate the result buffer */ - if (fetchNewResults(err_buf, err_buf_len) == HIVE_ERROR) { + if (fetchNewResults(hive_error, err_buf_len) == HIVE_ERROR) { return HIVE_ERROR; } /* Set the cursor to point at the first element (fetchNewResults would have reset its position)*/ @@ -70,18 +92,33 @@ HiveReturn HiveQueryResultSet::fetchNext(char* err_buf, size_t err_buf_len) { return HIVE_NO_MORE_DATA; /* No more data to fetch */ } } - m_serial_rowset.reset(); /* Remove old row data before saving next */ - m_serial_rowset.initialize(m_schema, m_result_set_data[m_fetch_idx]); + /* Set the cursor to point at the first element client re*/ + m_resultset_idx = 0; + m_resultset_available = min((size_t)m_fetch_row_size, (size_t)(m_result_set_data.size()-m_fetch_idx)); + *num_rows = m_resultset_available; + // make sure not to read beyond the size of available data + for (cnt = 0; cnt < m_resultset_available; cnt++) { + m_serial_rowset[cnt].reset(); /* Remove old row data before saving next */ + m_serial_rowset[cnt].initialize(m_schema, m_result_set_data[m_fetch_idx+cnt]); + } + m_fetch_idx += cnt - 1; // move the read position to end of the current set + return HIVE_SUCCESS; } -HiveReturn HiveQueryResultSet::hasResults(int* results, char* err_buf, size_t err_buf_len) { +// position the cursor back to the last accessed row +void HiveQueryResultSet::seekPrior() { + if (m_fetch_idx >= 0) + m_fetch_idx -= m_resultset_available; +} + +HiveReturn HiveQueryResultSet::hasResults(int* results, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(results == NULL, __FUNCTION__, - "Pointer to has_results (output) cannot be NULL.", err_buf, err_buf_len, + "Pointer to has_results (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); if (!m_fetch_attempted) { - if (fetchNewResults(err_buf, err_buf_len) == HIVE_ERROR) { + if (fetchNewResults(hive_error, err_buf_len) == HIVE_ERROR) { return HIVE_ERROR; /* An error must have occurred */ } } @@ -90,9 +127,9 @@ HiveReturn HiveQueryResultSet::hasResults(int* results, char* err_buf, size_t er return HIVE_SUCCESS; } -HiveReturn HiveQueryResultSet::getColumnCount(size_t* col_count, char* err_buf, size_t err_buf_len) { +HiveReturn HiveQueryResultSet::getColumnCount(size_t* col_count, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(col_count == NULL, __FUNCTION__, - "Pointer to col_count (output) cannot be NULL.", err_buf, err_buf_len, + "Pointer to col_count (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); /* If m_schema has been initialized, then m_schema.fieldSchemas must be populated */ *col_count = m_schema.fieldSchemas.size(); @@ -101,15 +138,15 @@ HiveReturn HiveQueryResultSet::getColumnCount(size_t* col_count, char* err_buf, } HiveReturn HiveQueryResultSet::createColumnDesc(size_t column_idx, - HiveColumnDesc** column_desc_ptr, char* err_buf, + HiveColumnDesc** column_desc_ptr, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(column_desc_ptr == NULL, __FUNCTION__, - "Pointer to column_desc (output) cannot be NULL.", err_buf, err_buf_len, + "Pointer to column_desc (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(m_schema.fieldSchemas.empty(), __FUNCTION__, - "Resultset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Resultset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= m_schema.fieldSchemas.size(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); *column_desc_ptr = new HiveColumnDesc(); (*column_desc_ptr)->initialize(m_schema.fieldSchemas[column_idx]); @@ -117,28 +154,34 @@ HiveReturn HiveQueryResultSet::createColumnDesc(size_t column_idx, } HiveRowSet& HiveQueryResultSet::getRowSet() { - return m_serial_rowset; + return m_serial_rowset[m_resultset_idx]; +} + +// position to next row in the fetch array. wrapp around to start +void HiveQueryResultSet::seekNextRow() +{ + m_resultset_idx = (++m_resultset_idx) % m_resultset_available; } -HiveReturn HiveQueryResultSet::initializeSchema(char* err_buf, size_t err_buf_len) { +HiveReturn HiveQueryResultSet::initializeSchema(hive_err_info *hive_error, size_t err_buf_len) { try { m_connection->client->getSchema(m_schema); } catch (Apache::Hadoop::Hive::HiveServerException& ex) { - RETURN_FAILURE(__FUNCTION__, ex.what(), err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, ex.SQLState.c_str(), ex.errorCode, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); } catch (...) { - RETURN_FAILURE(__FUNCTION__, - "Unknown Hive get result schema error.", err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, "", -1, + "Unknown Hive get result schema error.", hive_error, err_buf_len, HIVE_ERROR); } /* TODO: hard code this in for now because m_schema.properties not properly implemented; * but remove this when it is implemented */ - m_schema.properties[FIELD_DELIM] = "\t"; + m_schema.properties[FIELD_DELIM] = DEFAULT_FIELD_DELIM; m_schema.properties[SERIALIZATION_NULL_FORMAT] = DEFAULT_NULL_FORMAT; /* TODO: replace the real null representation with 'NULL' because of a bug in the Hive Server * fetch function; remove this when Hive Server has been fixed to not replace the actual null * rep with NULL. */ - m_schema.properties[SERIALIZATION_NULL_FORMAT] = "NULL"; + m_schema.properties[SERIALIZATION_NULL_FORMAT] = DEFAULT_SERIALIZATION_NULL_FORMAT; /* Verify the presence of known m_schema properties */ assert(m_schema.properties.find(FIELD_DELIM) != m_schema.properties.end()); @@ -147,7 +190,7 @@ HiveReturn HiveQueryResultSet::initializeSchema(char* err_buf, size_t err_buf_le return HIVE_SUCCESS; } -HiveReturn HiveQueryResultSet::fetchNewResults(char* err_buf, size_t err_buf_len) { +HiveReturn HiveQueryResultSet::fetchNewResults(hive_err_info *hive_error, size_t err_buf_len) { m_result_set_data.clear(); /* Empty the original buffer just to be safe */ assert(m_connection != NULL); assert(m_connection->client != NULL); @@ -155,10 +198,10 @@ HiveReturn HiveQueryResultSet::fetchNewResults(char* err_buf, size_t err_buf_len try { m_connection->client->fetchN(m_result_set_data, m_max_buffered_rows); } catch (Apache::Hadoop::Hive::HiveServerException& ex) { - RETURN_FAILURE(__FUNCTION__, ex.what(), err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, ex.SQLState.c_str(), ex.errorCode, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); } catch (...) { - RETURN_FAILURE(__FUNCTION__, - "Unknown Hive FetchN error.", err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, "", -1, + "Unknown Hive FetchN error.", hive_error, err_buf_len, HIVE_ERROR); } /* This indicates that a Hive server fetch call has successfully executed */ @@ -172,6 +215,166 @@ HiveReturn HiveQueryResultSet::fetchNewResults(char* err_buf, size_t err_buf_len } +/************************************************************************************************* + * HiveLocalResultSet Subclass Definition + ************************************************************************************************/ +HiveLocalResultSet::HiveLocalResultSet(int max_buf_rows, int resultset_size, const char *localResultSet[], + int localResultSize) : HiveQueryResultSet(max_buf_rows,resultset_size) { + // save the given local resultset + for (int cnt = localResultSize-1 ; cnt >= 0 ; cnt--) { + m_local_result_set_data.push_back(localResultSet[cnt]); + } +} + +HiveReturn HiveLocalResultSet::initialize(HiveConnection* connection, hive_err_info *hive_error, + size_t err_buf_len, const char *pSchema[][3], int numCols) { + HiveReturn rc; + + assert(connection != NULL); + m_connection = connection; + for (int cnt = 0; cnt < m_fetch_row_size; cnt++) + m_serial_rowset[cnt].reset(); + if ((rc = execQuery(connection, hive_error, err_buf_len)) != HIVE_SUCCESS) + return rc; + m_fetch_idx = -1; + m_has_results = true; + m_fetch_attempted = true; + return initializeSchema(hive_error, err_buf_len, pSchema, numCols); +} + +HiveReturn HiveLocalResultSet::initializeSchema(hive_err_info *hive_error, size_t err_buf_len, + const char *pSchema[][3], int numCols) { + /* Initialize the schema values needed for this resultset. + * OK to hardcode because these fields should never change */ + m_schema.properties[SERIALIZATION_NULL_FORMAT] = DEFAULT_NULL_FORMAT; + + Apache::Hadoop::Hive::FieldSchema tmp_field_schema; + for (unsigned int idx = 0; idx < numCols; idx++) { + tmp_field_schema.name = pSchema[idx][0]; + tmp_field_schema.type = pSchema[idx][1]; + /* Makes a copy of this tmp FieldSchema */ + m_schema.fieldSchemas.push_back(tmp_field_schema); + } + + /* TODO: hard code this in for now because m_schema.properties not properly implemented; + * but remove this when it is implemented */ + m_schema.properties[FIELD_DELIM] = DEFAULT_FIELD_DELIM; + m_schema.properties[SERIALIZATION_NULL_FORMAT] = DEFAULT_NULL_FORMAT; + + m_schema.properties[SERIALIZATION_NULL_FORMAT] = DEFAULT_SERIALIZATION_NULL_FORMAT ; + + return HIVE_SUCCESS; +} + +HiveReturn HiveLocalResultSet::execQuery(HiveConnection* connection, hive_err_info *hive_error, size_t err_buf_len) +{ + return HIVE_SUCCESS; +} + +HiveReturn HiveLocalResultSet::fetchNewResults(hive_err_info *hive_error, size_t err_buf_len) { + + m_fetch_idx = -1; /* Reset the cursor b/c the old index no longer has any meaning */ + m_result_set_data.clear(); /* Empty the original buffer just to be safe */ + + int batch_size = min((size_t)m_max_buffered_rows, (size_t)m_local_result_set_data.size()); + if (batch_size > 0) { + // save the resultset rows into the QueryResultSet data vector that fits into the application fetch buffer + for (int cnt = 0; cnt < batch_size; cnt++) { + m_result_set_data.push_back(m_local_result_set_data.back()); + m_local_result_set_data.pop_back(); + } + m_has_results = true; // has more results + } + else { + m_has_results = false; // no more results + } + + return HIVE_SUCCESS; +} + +/************************************************************************************************* + * HiveStatsResultSet Subclass Definition + ************************************************************************************************/ + +HiveReturn HiveStatsResultSet::execQuery(HiveConnection* connection, hive_err_info *hive_error, size_t err_buf_len) +{ + int colPos, idxPos; + string *curRow; + + assert(connection != NULL); + assert(connection->client != NULL); + try { + m_connection->client->get_indexes(m_indexes, *(connection->dbName), m_tableName, 100); + } catch (Apache::Hadoop::Hive::MetaException& ex) { + // MetaException has only error message, no SQLState or native error code + RETURN_FAILURE(__FUNCTION__, "", -1, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); + } catch (Apache::Hadoop::Hive::UnknownTableException& ex) { + RETURN_FAILURE(__FUNCTION__, "", -1, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); + } catch (Apache::Hadoop::Hive::UnknownDBException& ex) { + RETURN_FAILURE(__FUNCTION__, "", -1, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); + } catch (...) { + RETURN_FAILURE(__FUNCTION__, "", -1, + "Unknown Hive get fields error.", hive_error, err_buf_len, HIVE_ERROR); + } + + m_result_set_data.clear(); /* Empty the original buffer just to be safe */ + sort(m_indexes.begin(), m_indexes.end(),idxCmp); // sort the index info on index name + // construct the resultset from the get_index reply + for (idxPos = 0; idxPos < m_indexes.size(); idxPos ++) { + for (colPos = 0; colPos < m_indexes[idxPos].sd.cols.size(); colPos++) { + curRow = new string(); + constructRow(curRow, idxPos, colPos); + m_result_set_data.push_back(*curRow); + } + } + if (m_result_set_data.size() == 0 ) + m_has_results = false; // no more results + else + m_has_results = true; // more data available + m_fetch_idx = -1; // Reset the cursor b/c the old index no longer has any meaning + + return HIVE_SUCCESS; +} + +void HiveStatsResultSet::constructRow(string *currRow, int idxPos, int colPos) { + + char ord[8]; + + //DEFAULT_FIELD_DELIM + // The resultset is created when the object was constructed. subsequent fetchindicates end of result + currRow->append(m_indexes[idxPos].dbName); // 1 - TABLE_CAT + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append(DEFAULT_SERIALIZATION_NULL_FORMAT); // 2 - TABLE_SCHEM + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append(m_indexes[idxPos].origTableName); // 3 - TABLE_NAME + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append("1"); // 4 - NON_UNIQUE + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append(DEFAULT_SERIALIZATION_NULL_FORMAT); // 5 - INDEX_QUALIFIER + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append(m_indexes[idxPos].indexName); // 6 - INDEX_NAME + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append("3"); // 7 - TYPE : SQL_INDEX_OTHER + currRow->append(DEFAULT_FIELD_DELIM); + sprintf(ord, "%d", colPos+1); + currRow->append(ord); // 8 - ORDINAL_POSITION + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append(m_indexes[idxPos].sd.cols[colPos].name); // 9 - COLUMN_NAME + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append(DEFAULT_SERIALIZATION_NULL_FORMAT); // 10 - ASC_OR_DESC + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append(DEFAULT_SERIALIZATION_NULL_FORMAT); // 11 - CARDINALITY + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append(DEFAULT_SERIALIZATION_NULL_FORMAT); // 12 - PAGES + currRow->append(DEFAULT_FIELD_DELIM); + currRow->append(DEFAULT_SERIALIZATION_NULL_FORMAT); // 13 - FILTER_CONDITION +} + +// compare function for index object +static bool idxCmp(Apache::Hadoop::Hive::Index idx1, Apache::Hadoop::Hive::Index idx2) +{ + return (idx1.indexName < idx2.indexName); +} /************************************************************************************************* * HiveTablesResultSet Subclass Definition @@ -205,32 +408,32 @@ HiveTablesResultSet::~HiveTablesResultSet() { } HiveReturn HiveTablesResultSet::initialize(HiveConnection* connection, - const char* tbl_search_pattern, char* err_buf, + const char* tbl_search_pattern, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(connection == NULL, __FUNCTION__, - "Hive connection cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive connection cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(connection->client == NULL, __FUNCTION__, - "Hive connection client cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive connection client cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(tbl_search_pattern == NULL, __FUNCTION__, - "Table search pattern cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Table search pattern cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); m_fetch_idx = -1; try { - /* Just use the default database name for now b/c Hive does not yet support multiple - * databases */ - connection->client->get_tables(m_tables, DEFAULT_DATABASE, tbl_search_pattern); + connection->client->get_tables(m_tables, *(connection->dbName), tbl_search_pattern); } catch (Apache::Hadoop::Hive::MetaException& ex) { - RETURN_FAILURE(__FUNCTION__, ex.what(), err_buf, err_buf_len, HIVE_ERROR); + // MetaException only has message, no SQLState or native error code + RETURN_FAILURE(__FUNCTION__, "", -1, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); } catch (...) { - RETURN_FAILURE(__FUNCTION__, - "Unknown Hive get tables error.", err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, "", -1, + "Unknown Hive get tables error.", hive_error, err_buf_len, HIVE_ERROR); } /* Sort the table names */ sort(m_tables.begin(), m_tables.end()); - return initializeSchema(err_buf, err_buf_len); + + return initializeSchema(hive_error, err_buf_len); } -HiveReturn HiveTablesResultSet::fetchNext(char* err_buf, size_t err_buf_len) { +HiveReturn HiveTablesResultSet::fetchNext(hive_err_info *hive_error, size_t err_buf_len, int *num_rows) { m_fetch_idx++; if (m_fetch_idx >= (int) m_tables.size()) /* If there are no more tables */ { @@ -239,26 +442,33 @@ HiveReturn HiveTablesResultSet::fetchNext(char* err_buf, size_t err_buf_len) { } /* Populate m_curr_row_data with the latest row information */ - if (constructCurrentRow(err_buf, err_buf_len) == HIVE_ERROR) { + if (constructCurrentRow(hive_error, err_buf_len) == HIVE_ERROR) { return HIVE_ERROR; /* An error must have occurred */ } m_vecstring_rowset.reset(); /* Remove old rowset data before saving next */ m_vecstring_rowset.initialize(m_schema, &m_curr_row_data); + *num_rows = 1; return HIVE_SUCCESS; } -HiveReturn HiveTablesResultSet::hasResults(int* results, char* err_buf, size_t err_buf_len) { +// position the cursor back to the last accessed row +void HiveTablesResultSet::seekPrior() { + if (m_fetch_idx >= 0) + m_fetch_idx --; +} + +HiveReturn HiveTablesResultSet::hasResults(int* results, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(results == NULL, __FUNCTION__, - "Pointer to has_results (output) cannot be NULL.", err_buf, err_buf_len, + "Pointer to has_results (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); *results = (m_tables.size() > 0) ? 1 : 0; /* Just check the vector length because no caching */ return HIVE_SUCCESS; } -HiveReturn HiveTablesResultSet::getColumnCount(size_t* col_count, char* err_buf, size_t err_buf_len) { +HiveReturn HiveTablesResultSet::getColumnCount(size_t* col_count, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(col_count == NULL, __FUNCTION__, - "Pointer to col_count (output) cannot be NULL.", err_buf, err_buf_len, + "Pointer to col_count (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); /* If m_schema has been initialized, then m_schema.fieldSchemas must be populated */ *col_count = m_schema.fieldSchemas.size(); @@ -266,15 +476,15 @@ HiveReturn HiveTablesResultSet::getColumnCount(size_t* col_count, char* err_buf, } HiveReturn HiveTablesResultSet::createColumnDesc(size_t column_idx, - HiveColumnDesc** column_desc_ptr, char* err_buf, + HiveColumnDesc** column_desc_ptr, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(column_desc_ptr == NULL, __FUNCTION__, - "Pointer to column_desc (output) cannot be NULL.", err_buf, err_buf_len, + "Pointer to column_desc (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(m_schema.fieldSchemas.empty(), __FUNCTION__, - "Resultset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Resultset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= m_schema.fieldSchemas.size(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); *column_desc_ptr = new HiveColumnDesc(); (*column_desc_ptr)->initialize(m_schema.fieldSchemas[column_idx]); @@ -285,7 +495,11 @@ HiveRowSet& HiveTablesResultSet::getRowSet() { return m_vecstring_rowset; } -HiveReturn HiveTablesResultSet::initializeSchema(char* err_buf, size_t err_buf_len) { +void HiveTablesResultSet::seekNextRow() +{ +} + +HiveReturn HiveTablesResultSet::initializeSchema(hive_err_info *hive_error, size_t err_buf_len) { /* Initialize the schema values needed for this resultset. * OK to hardcode because these fields should never change */ m_schema.properties[SERIALIZATION_NULL_FORMAT] = DEFAULT_NULL_FORMAT; @@ -304,7 +518,7 @@ HiveReturn HiveTablesResultSet::initializeSchema(char* err_buf, size_t err_buf_l return HIVE_SUCCESS; } -HiveReturn HiveTablesResultSet::constructCurrentRow(char* err_buf, size_t err_buf_len) { +HiveReturn HiveTablesResultSet::constructCurrentRow(hive_err_info *hive_error, size_t err_buf_len) { /* Clear out the previous row data just to be safe */ m_curr_row_data.clear(); int column_num; @@ -329,7 +543,6 @@ HiveReturn HiveTablesResultSet::constructCurrentRow(char* err_buf, size_t err_bu return HIVE_SUCCESS; } - /************************************************************************************************* * HiveColumnsResultSet Subclass Definition ************************************************************************************************/ @@ -380,16 +593,16 @@ HiveColumnsResultSet::~HiveColumnsResultSet() { HiveReturn HiveColumnsResultSet::initialize(HiveConnection* connection, const char* tbl_search_pattern, - const char* col_search_pattern, char* err_buf, + const char* col_search_pattern, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(connection == NULL, __FUNCTION__, - "Hive connection cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive connection cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(connection->client == NULL, __FUNCTION__, - "Hive connection client cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive connection client cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(tbl_search_pattern == NULL, __FUNCTION__, - "Table search pattern cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Table search pattern cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(col_search_pattern == NULL, __FUNCTION__, - "Column search pattern cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Column search pattern cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); /* TODO: col_search_pattern is not currently supported; arg is ignored for now; * either add support in Hive Server or here */ @@ -399,26 +612,25 @@ HiveReturn HiveColumnsResultSet::initialize(HiveConnection* connection, m_col_fetch_idx = -1; try { - /* Just use the default database name for now b/c Hive does not yet support multiple - * databases */ - connection->client->get_tables(m_tables, DEFAULT_DATABASE, tbl_search_pattern); + connection->client->get_tables(m_tables, *(connection->dbName), tbl_search_pattern); } catch (Apache::Hadoop::Hive::MetaException& ex) { - RETURN_FAILURE(__FUNCTION__, ex.what(), err_buf, err_buf_len, HIVE_ERROR); + // MetaException has only error message, no SQLState or native error code + RETURN_FAILURE(__FUNCTION__, "", -1, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); } catch (...) { - RETURN_FAILURE(__FUNCTION__, - "Unknown Hive get tables error.", err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, "", -1, + "Unknown Hive get tables error.", hive_error, err_buf_len, HIVE_ERROR); } /* Sort the table names */ sort(m_tables.begin(), m_tables.end()); - return initializeSchema(err_buf, err_buf_len); + return initializeSchema(hive_error, err_buf_len); } -HiveReturn HiveColumnsResultSet::fetchNext(char* err_buf, size_t err_buf_len) { +HiveReturn HiveColumnsResultSet::fetchNext(hive_err_info *hive_error, size_t err_buf_len, int *num_rows) { m_col_fetch_idx++; /* If there are no more columns in the current table */ if (m_col_fetch_idx >= (int) m_columns.size()) { - HiveReturn retval = getNextTableFields(err_buf, err_buf_len); + HiveReturn retval = getNextTableFields(hive_error, err_buf_len); if (retval != HIVE_SUCCESS) { /* Prevent the m_col_fetch_idx from wrapping around after too many calls */ m_col_fetch_idx--; @@ -432,28 +644,35 @@ HiveReturn HiveColumnsResultSet::fetchNext(char* err_buf, size_t err_buf_len) { } /* Populate m_curr_row_data with the latest row information */ - if (constructCurrentRow(err_buf, err_buf_len) == HIVE_ERROR) { + if (constructCurrentRow(hive_error, err_buf_len) == HIVE_ERROR) { return HIVE_ERROR; /* An error must have occurred */ } m_vecstring_rowset.reset(); /* Remove old rowset data before saving next */ m_vecstring_rowset.initialize(m_schema, &m_curr_row_data); + *num_rows = 0; return HIVE_SUCCESS; } -HiveReturn HiveColumnsResultSet::hasResults(int* results, char* err_buf, size_t err_buf_len) { +// position the cursor back to the last accessed row +void HiveColumnsResultSet::seekPrior() { + if (m_col_fetch_idx >= 0) + m_col_fetch_idx--; +} + +HiveReturn HiveColumnsResultSet::hasResults(int* results, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(results == NULL, __FUNCTION__, - "Pointer to has_results (output) cannot be NULL.", err_buf, err_buf_len, + "Pointer to has_results (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); /* If there are tables, then there must be columns to fetch */ *results = (m_tables.size() > 0) ? 1 : 0; return HIVE_SUCCESS; } -HiveReturn HiveColumnsResultSet::getColumnCount(size_t* col_count, char* err_buf, +HiveReturn HiveColumnsResultSet::getColumnCount(size_t* col_count, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(col_count == NULL, __FUNCTION__, - "Pointer to col_count (output) cannot be NULL.", err_buf, err_buf_len, + "Pointer to col_count (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); /* If m_schema has been initialized, then m_schema.fieldSchemas must be populated */ *col_count = m_schema.fieldSchemas.size(); @@ -461,15 +680,15 @@ HiveReturn HiveColumnsResultSet::getColumnCount(size_t* col_count, char* err_buf } HiveReturn HiveColumnsResultSet::createColumnDesc(size_t column_idx, - HiveColumnDesc** column_desc_ptr, char* err_buf, + HiveColumnDesc** column_desc_ptr, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(column_desc_ptr == NULL, __FUNCTION__, - "Pointer to column_desc (output) cannot be NULL.", err_buf, err_buf_len, + "Pointer to column_desc (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(m_schema.fieldSchemas.empty(), __FUNCTION__, - "Resultset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Resultset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= m_schema.fieldSchemas.size(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); *column_desc_ptr = new HiveColumnDesc(); (*column_desc_ptr)->initialize(m_schema.fieldSchemas[column_idx]); @@ -480,7 +699,11 @@ HiveRowSet& HiveColumnsResultSet::getRowSet() { return m_vecstring_rowset; } -HiveReturn HiveColumnsResultSet::getNextTableFields(char* err_buf, size_t err_buf_len) { +void HiveColumnsResultSet::seekNextRow() +{ +} + +HiveReturn HiveColumnsResultSet::getNextTableFields(hive_err_info *hive_error, size_t err_buf_len) { /* Clear out the field schemas for the previous table */ m_columns.clear(); @@ -495,24 +718,24 @@ HiveReturn HiveColumnsResultSet::getNextTableFields(char* err_buf, size_t err_bu assert(m_connection != NULL); assert(m_connection->client != NULL); try { - /* Just use the default database name for now b/c Hive does not yet support multiple databases */ - m_connection->client->get_schema(m_columns, DEFAULT_DATABASE, m_tables[m_tbl_fetch_idx]); + m_connection->client->get_schema(m_columns, *(m_connection->dbName), m_tables[m_tbl_fetch_idx]); } catch (Apache::Hadoop::Hive::MetaException& ex) { - RETURN_FAILURE(__FUNCTION__, ex.what(), err_buf, err_buf_len, HIVE_ERROR); + // MetaException has only error message, no SQLState or native error code + RETURN_FAILURE(__FUNCTION__, "", -1, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); } catch (Apache::Hadoop::Hive::UnknownTableException& ex) { - RETURN_FAILURE(__FUNCTION__, ex.what(), err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, "", -1, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); } catch (Apache::Hadoop::Hive::UnknownDBException& ex) { - RETURN_FAILURE(__FUNCTION__, ex.what(), err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, "", -1, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); } catch (...) { - RETURN_FAILURE(__FUNCTION__, - "Unknown Hive get fields error.", err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, "", -1, + "Unknown Hive get fields error.", hive_error, err_buf_len, HIVE_ERROR); } assert(m_columns.size() > 0); /* Every table must have at least one column */ return HIVE_SUCCESS; } -HiveReturn HiveColumnsResultSet::initializeSchema(char* err_buf, size_t err_buf_len) { +HiveReturn HiveColumnsResultSet::initializeSchema(hive_err_info *hive_error, size_t err_buf_len) { /* Initialize the schema values needed for this resultset. * OK to hardcode because these fields should never change */ m_schema.properties[SERIALIZATION_NULL_FORMAT] = DEFAULT_NULL_FORMAT; @@ -531,7 +754,7 @@ HiveReturn HiveColumnsResultSet::initializeSchema(char* err_buf, size_t err_buf_ return HIVE_SUCCESS; } -HiveReturn HiveColumnsResultSet::constructCurrentRow(char* err_buf, size_t err_buf_len) { +HiveReturn HiveColumnsResultSet::constructCurrentRow(hive_err_info *hive_error, size_t err_buf_len) { /* Clear out the previous row data just to be safe */ m_curr_row_data.clear(); @@ -561,6 +784,7 @@ HiveReturn HiveColumnsResultSet::constructCurrentRow(char* err_buf, size_t err_b case 5: // If Col5: DATA_TYPE snprintf(string_buffer, sizeof(string_buffer), "%i", (*m_fpHiveToSQLType)(column_desc.getHiveType())); + string_buffer[sizeof(string_buffer)-1] = '\0'; m_curr_row_data.push_back(string_buffer); break; @@ -570,11 +794,13 @@ HiveReturn HiveColumnsResultSet::constructCurrentRow(char* err_buf, size_t err_b case 7: // If Col7: COLUMN_SIZE snprintf(string_buffer, sizeof(string_buffer), "%zu", column_desc.getMaxDisplaySize()); + string_buffer[sizeof(string_buffer)-1] = '\0'; m_curr_row_data.push_back(string_buffer); break; case 8: // If Col8: BUFFER_LENGTH snprintf(string_buffer, sizeof(string_buffer), "%zu", column_desc.getFieldByteSize()); + string_buffer[sizeof(string_buffer)-1] = '\0'; m_curr_row_data.push_back(string_buffer); break; @@ -591,11 +817,13 @@ HiveReturn HiveColumnsResultSet::constructCurrentRow(char* err_buf, size_t err_b case 14: // If Col14: SQL_DATA_TYPE snprintf(string_buffer, sizeof(string_buffer), "%i", (*m_fpHiveToSQLType)(column_desc.getHiveType())); + string_buffer[sizeof(string_buffer)-1] = '\0'; m_curr_row_data.push_back(string_buffer); break; case 17: // If Col17: ORDINAL_POSITION snprintf(string_buffer, sizeof(string_buffer), "%i", m_col_fetch_idx + 1); + string_buffer[sizeof(string_buffer)-1] = '\0'; m_curr_row_data.push_back(string_buffer); break; @@ -613,4 +841,3 @@ HiveReturn HiveColumnsResultSet::constructCurrentRow(char* err_buf, size_t err_b assert(m_curr_row_data.size() == 18); return HIVE_SUCCESS; } - diff --git odbc/src/cpp/HiveResultSet.h odbc/src/cpp/HiveResultSet.h index 25eabc4..2a9f2f4 100644 --- odbc/src/cpp/HiveResultSet.h +++ odbc/src/cpp/HiveResultSet.h @@ -28,6 +28,7 @@ #define __hive_resultset_h__ #include +#include #include "hive_metastore_types.h" @@ -53,13 +54,16 @@ using namespace std; class HiveResultSet { public: virtual ~HiveResultSet() {} ///< The constructor should be defined independently by each subclass - virtual HiveReturn fetchNext(char* err_buf, size_t err_buf_len) =0; - virtual HiveReturn hasResults(int* results, char* err_buf, size_t err_buf_len) =0; - virtual HiveReturn getColumnCount(size_t* col_count, char* err_buf, size_t err_buf_len) =0; + virtual HiveReturn fetchNext(hive_err_info *hive_error, size_t err_buf_len, int *num_rows) =0; + virtual HiveReturn hasResults(int* results, hive_err_info *hive_error, size_t err_buf_len) =0; + virtual HiveReturn getColumnCount(size_t* col_count, hive_err_info *hive_error, size_t err_buf_len) =0; virtual HiveReturn createColumnDesc(size_t column_idx, HiveColumnDesc** column_desc_ptr, - char* err_buf, size_t err_buf_len) =0; + hive_err_info *hive_error, size_t err_buf_len) =0; + virtual void seekPrior() =0; /// The rowset will ONLY be valid after fetchNext has been called at least once virtual HiveRowSet& getRowSet() =0; + virtual void seekNextRow() =0; + }; @@ -78,32 +82,87 @@ class HiveResultSet { */ class HiveQueryResultSet: public HiveResultSet { public: - HiveQueryResultSet(int max_buf_rows); + HiveQueryResultSet(int max_buf_rows, int resultset_size); virtual ~HiveQueryResultSet(); - HiveReturn initialize(HiveConnection* connection, char* err_buf, size_t err_buf_len); - HiveReturn fetchNext(char* err_buf, size_t err_buf_len); - HiveReturn hasResults(int* results, char* err_buf, size_t err_buf_len); - HiveReturn getColumnCount(size_t* col_count, char* err_buf, size_t err_buf_len); - HiveReturn createColumnDesc(size_t column_idx, HiveColumnDesc** column_desc_ptr, char* err_buf, + HiveReturn initialize(HiveConnection* connection, hive_err_info *hive_error, size_t err_buf_len); + HiveReturn fetchNext(hive_err_info *hive_error, size_t err_buf_len, int *num_rows); + HiveReturn hasResults(int* results, hive_err_info *hive_error, size_t err_buf_len); + HiveReturn getColumnCount(size_t* col_count, hive_err_info *hive_error, size_t err_buf_len); + HiveReturn createColumnDesc(size_t column_idx, HiveColumnDesc** column_desc_ptr, hive_err_info *hive_error, size_t err_buf_len); HiveRowSet& getRowSet(); + void seekNextRow(); + void setArraySize(int max_buf_rows, int resultset_size); + virtual void seekPrior(); - private: + protected: HiveConnection* m_connection; ///< Hive connection handle - HiveSerializedRowSet m_serial_rowset; ///< Rowset associated with the current fetched row (if any) + HiveSerializedRowSet *m_serial_rowset; ///< Rowset associated with the current fetched row (if any) int m_max_buffered_rows; ///< Max number of rows to buffer in client memory + int m_fetch_row_size; ///< Fetch size specified by client + int m_resultset_available; ///< Rows actually copied in the fetch array by the last fetch operation + int m_resultset_idx; ///< current read position in the client result set int m_fetch_idx; ///< Last row fetched by the client bool m_has_results; ///< Indicates that at least one result row has been successfully fetched bool m_fetch_attempted; ///< Indicates that a Hive server fetch call has successfully executed vector m_result_set_data; ///< Vector of serialized rows Apache::Hadoop::Hive::Schema m_schema; ///< Schema of the result table - HiveReturn initializeSchema(char* err_buf, size_t err_buf_len); - HiveReturn fetchNewResults(char* err_buf, size_t err_buf_len); + HiveReturn initializeSchema(hive_err_info *hive_error, size_t err_buf_len); + virtual HiveReturn fetchNewResults(hive_err_info *hive_error, size_t err_buf_len); }; /************************************************************************************************* + * HiveLocalResultSet Subclass Declaration + ************************************************************************************************/ + + /** + * @brief A container for the local resultsets of catalog functions. + * + * Container class for a catalog functions that are entirely processed by the driver. + * The caller can specify an array of result rows and schema to match. + */ + +class HiveLocalResultSet: public HiveQueryResultSet { + public: + HiveLocalResultSet(int max_buf_rows, int resultset_size) : + HiveQueryResultSet(max_buf_rows, resultset_size) {}; + HiveLocalResultSet(int max_buf_rows, int resultset_size, const char *localResultSet[], int localResultSize); + + HiveReturn initialize(HiveConnection* connection, hive_err_info *hive_error, size_t err_buf_len, + const char *pSchema[][3], int numCols); + protected: + virtual HiveReturn execQuery(HiveConnection* connection, hive_err_info *hive_error, size_t err_buf_len); + + private: + vector m_local_result_set_data; ///< local/static result data + + HiveReturn initializeSchema(hive_err_info *hive_error, size_t err_buf_len, + const char *pSchema[][3], int numCols); + HiveReturn fetchNewResults(hive_err_info *hive_error, size_t err_buf_len); +}; + +/************************************************************************************************* + * HiveStatsResultSet Subclass Declaration + ************************************************************************************************/ + +/** + * @brief A container for resultsets describing the database index information. + */ +class HiveStatsResultSet : public HiveLocalResultSet { + public: + HiveStatsResultSet(int max_buf_rows, int resultset_size, char *tableName) : + HiveLocalResultSet(max_buf_rows, resultset_size), m_tableName(tableName) {}; + private : + string m_tableName; + vector m_indexes; + + HiveReturn execQuery(HiveConnection* connection, hive_err_info *hive_error, size_t err_buf_len); + void constructRow(string *currRow, int idxPos, int colPos); +}; + +/************************************************************************************************* * HiveTablesResultSet Subclass Declaration ************************************************************************************************/ @@ -119,14 +178,16 @@ class HiveTablesResultSet: public HiveResultSet { public: HiveTablesResultSet(); virtual ~HiveTablesResultSet(); - HiveReturn initialize(HiveConnection* connection, const char* tbl_search_pattern, char* err_buf, + HiveReturn initialize(HiveConnection* connection, const char* tbl_search_pattern, hive_err_info *hive_error, size_t err_buf_len); - HiveReturn fetchNext(char* err_buf, size_t err_buf_len); - HiveReturn hasResults(int* results, char* err_buf, size_t err_buf_len); - HiveReturn getColumnCount(size_t* col_count, char* err_buf, size_t err_buf_len); - HiveReturn createColumnDesc(size_t column_idx, HiveColumnDesc** column_desc_ptr, char* err_buf, + HiveReturn fetchNext(hive_err_info *hive_error, size_t err_buf_len, int *num_rows); + HiveReturn hasResults(int* results, hive_err_info *hive_error, size_t err_buf_len); + HiveReturn getColumnCount(size_t* col_count, hive_err_info *hive_error, size_t err_buf_len); + HiveReturn createColumnDesc(size_t column_idx, HiveColumnDesc** column_desc_ptr, hive_err_info *hive_error, size_t err_buf_len); HiveRowSet& getRowSet(); + void seekNextRow(); + virtual void seekPrior(); private: int m_fetch_idx; ///< Last row fetched by the client @@ -137,8 +198,8 @@ class HiveTablesResultSet: public HiveResultSet { vector m_tables; ///< Vector of table names Apache::Hadoop::Hive::Schema m_schema; ///< Schema of the result table - HiveReturn initializeSchema(char* err_buf, size_t err_buf_len); - HiveReturn constructCurrentRow(char* err_buf, size_t err_buf_len); + HiveReturn initializeSchema(hive_err_info *hive_error, size_t err_buf_len); + HiveReturn constructCurrentRow(hive_err_info *hive_error, size_t err_buf_len); }; @@ -160,13 +221,15 @@ class HiveColumnsResultSet: public HiveResultSet { HiveColumnsResultSet(int(*fpHiveToSQLType)(HiveType)); virtual ~HiveColumnsResultSet(); HiveReturn initialize(HiveConnection* connection, const char* tbl_search_pattern, - const char* col_search_pattern, char* err_buf, size_t err_buf_len); - HiveReturn fetchNext(char* err_buf, size_t err_buf_len); - HiveReturn hasResults(int* results, char* err_buf, size_t err_buf_len); - HiveReturn getColumnCount(size_t* col_count, char* err_buf, size_t err_buf_len); - HiveReturn createColumnDesc(size_t column_idx, HiveColumnDesc** column_desc_ptr, char* err_buf, + const char* col_search_pattern, hive_err_info *hive_error, size_t err_buf_len); + HiveReturn fetchNext(hive_err_info *hive_error, size_t err_buf_len, int *num_rows); + HiveReturn hasResults(int* results, hive_err_info *hive_error, size_t err_buf_len); + HiveReturn getColumnCount(size_t* col_count, hive_err_info *hive_error, size_t err_buf_len); + HiveReturn createColumnDesc(size_t column_idx, HiveColumnDesc** column_desc_ptr, hive_err_info *hive_error, size_t err_buf_len); HiveRowSet& getRowSet(); + void seekNextRow(); + virtual void seekPrior(); private: HiveConnection* m_connection; ///< Hive connection handle @@ -181,9 +244,9 @@ class HiveColumnsResultSet: public HiveResultSet { HiveStringVectorRowSet m_vecstring_rowset; Apache::Hadoop::Hive::Schema m_schema; ///< Schema of the result table - HiveReturn getNextTableFields(char* err_buf, size_t err_buf_len); - HiveReturn initializeSchema(char* err_buf, size_t err_buf_len); - HiveReturn constructCurrentRow(char* err_buf, size_t err_buf_len); + HiveReturn getNextTableFields(hive_err_info *hive_error, size_t err_buf_len); + HiveReturn initializeSchema(hive_err_info *hive_error, size_t err_buf_len); + HiveReturn constructCurrentRow(hive_err_info *hive_error, size_t err_buf_len); }; diff --git odbc/src/cpp/HiveRowSet.cpp odbc/src/cpp/HiveRowSet.cpp index 3de6124..9970d35 100644 --- odbc/src/cpp/HiveRowSet.cpp +++ odbc/src/cpp/HiveRowSet.cpp @@ -18,6 +18,7 @@ #include #include +#include #include "HiveRowSet.h" #include "hiveclienthelper.h" @@ -31,10 +32,14 @@ HiveRowSet::HiveRowSet() { m_is_completely_read = false; m_bytes_read = 0; m_last_column_fetched = 0; + m_field_buffer = m_default_field_buffer; + m_field_buffer_len = MAX_BYTE_LENGTH; m_field_buffer[0] = '\0'; } HiveRowSet::~HiveRowSet() { + if (m_field_buffer != m_default_field_buffer) + delete m_field_buffer; } void HiveRowSet::reset() { @@ -46,37 +51,47 @@ void HiveRowSet::reset() { specialized_reset(); /* Call the specialized subclass reset method */ } +// resize the m_field_buffer if required +void HiveRowSet::ensureFieldBufferSize(int newLen) { + if (newLen <= m_field_buffer_len) + return; + if (m_field_buffer != m_default_field_buffer) + delete m_field_buffer; + m_field_buffer = new char[newLen+1]; + m_field_buffer_len = newLen; +} + void HiveRowSet::initFieldBuffer() { /* m_field_buffer should always correspond to the field indicated by m_last_column_fetched*/ extractField(m_last_column_fetched); } -HiveReturn HiveRowSet::getFieldDataLen(size_t column_idx, size_t* col_len, char* err_buf, +HiveReturn HiveRowSet::getFieldDataLen(size_t column_idx, size_t* col_len, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(col_len == NULL, __FUNCTION__, - "Pointer to col_len (output) cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Pointer to col_len (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, - "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Rowset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); *col_len = getFieldLen(column_idx); return HIVE_SUCCESS; } HiveReturn HiveRowSet::getFieldAsCString(size_t column_idx, char* buffer, size_t buffer_len, - size_t* data_byte_size, int* is_null_value, char* err_buf, + size_t* data_byte_size, int* is_null_value, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(buffer == NULL, __FUNCTION__, - "Column data output buffer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Column data output buffer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(is_null_value == NULL, __FUNCTION__, - "Column data is_null_value (output) cannot be NULL.", err_buf, err_buf_len, + "Column data is_null_value (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, - "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Rowset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(buffer_len == 0, __FUNCTION__, - "Output buffer cannot have a size of zero.", err_buf, err_buf_len, HIVE_ERROR); + "Output buffer cannot have a size of zero.", hive_error, err_buf_len, HIVE_ERROR); if (m_last_column_fetched != column_idx) { extractField(column_idx); @@ -110,7 +125,7 @@ HiveReturn HiveRowSet::getFieldAsCString(size_t column_idx, char* buffer, size_t const char* src_str_ptr = m_field_buffer + m_bytes_read; /* The total number of bytes to read (+1 null terminator) should be no more than the * size of the field buffer */ - assert(m_bytes_read + bytes_remaining + 1 <= sizeof(m_field_buffer)); + assert(m_bytes_read + bytes_remaining <= m_field_buffer_len); /* Copy as many characters as possible from the read location */ size_t bytes_copied = safe_strncpy(buffer, src_str_ptr, min(buffer_len, bytes_remaining + 1)); // +1 for null terminator /* bytes_copied does not count the null terminator */ @@ -124,16 +139,16 @@ HiveReturn HiveRowSet::getFieldAsCString(size_t column_idx, char* buffer, size_t } HiveReturn HiveRowSet::getFieldAsDouble(size_t column_idx, double* buffer, int* is_null_value, - char* err_buf, size_t err_buf_len) { + hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(buffer == NULL, __FUNCTION__, - "Column data output buffer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Column data output buffer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(is_null_value == NULL, __FUNCTION__, - "Column data is_null_value (output) cannot be NULL.", err_buf, err_buf_len, + "Column data is_null_value (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, - "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Rowset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); if (m_last_column_fetched != column_idx) { /* Reset if this column was not fetched on the last attempt */ @@ -158,16 +173,16 @@ HiveReturn HiveRowSet::getFieldAsDouble(size_t column_idx, double* buffer, int* } HiveReturn HiveRowSet::getFieldAsInt(size_t column_idx, int* buffer, int* is_null_value, - char* err_buf, size_t err_buf_len) { + hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(buffer == NULL, __FUNCTION__, - "Column data output buffer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Column data output buffer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(is_null_value == NULL, __FUNCTION__, - "Column data is_null_value (output) cannot be NULL.", err_buf, err_buf_len, + "Column data is_null_value (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, - "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Rowset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); if (m_last_column_fetched != column_idx) { extractField(column_idx); @@ -191,16 +206,16 @@ HiveReturn HiveRowSet::getFieldAsInt(size_t column_idx, int* buffer, int* is_nul } HiveReturn HiveRowSet::getFieldAsLong(size_t column_idx, long* buffer, int* is_null_value, - char* err_buf, size_t err_buf_len) { + hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(buffer == NULL, __FUNCTION__, - "Column data output buffer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Column data output buffer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(is_null_value == NULL, __FUNCTION__, - "Column data is_null_value (output) cannot be NULL.", err_buf, err_buf_len, + "Column data is_null_value (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, - "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Rowset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); if (m_last_column_fetched != column_idx) { extractField(column_idx); @@ -224,16 +239,16 @@ HiveReturn HiveRowSet::getFieldAsLong(size_t column_idx, long* buffer, int* is_n } HiveReturn HiveRowSet::getFieldAsULong(size_t column_idx, unsigned long* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len) { + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(buffer == NULL, __FUNCTION__, - "Column data output buffer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Column data output buffer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(is_null_value == NULL, __FUNCTION__, - "Column data is_null_value (output) cannot be NULL.", err_buf, err_buf_len, + "Column data is_null_value (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, - "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Rowset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); if (m_last_column_fetched != column_idx) { extractField(column_idx); @@ -257,16 +272,16 @@ HiveReturn HiveRowSet::getFieldAsULong(size_t column_idx, unsigned long* buffer, } HiveReturn HiveRowSet::getFieldAsI64(size_t column_idx, int64_t* buffer, int* is_null_value, - char* err_buf, size_t err_buf_len) { + hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(buffer == NULL, __FUNCTION__, - "Column data output buffer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Column data output buffer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(is_null_value == NULL, __FUNCTION__, - "Column data is_null_value (output) cannot be NULL.", err_buf, err_buf_len, + "Column data is_null_value (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, - "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Rowset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); if (m_last_column_fetched != column_idx) { extractField(column_idx); @@ -290,16 +305,16 @@ HiveReturn HiveRowSet::getFieldAsI64(size_t column_idx, int64_t* buffer, int* is } HiveReturn HiveRowSet::getFieldAsI64U(size_t column_idx, uint64_t* buffer, int* is_null_value, - char* err_buf, size_t err_buf_len) { + hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(buffer == NULL, __FUNCTION__, - "Column data output buffer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Column data output buffer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(is_null_value == NULL, __FUNCTION__, - "Column data is_null_value (output) cannot be NULL.", err_buf, err_buf_len, + "Column data is_null_value (output) cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(getColumnCount() == 0, __FUNCTION__, - "Rowset contains zero columns.", err_buf, err_buf_len, HIVE_ERROR); + "Rowset contains zero columns.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(column_idx >= getColumnCount(), __FUNCTION__, - "Column index out of bounds.", err_buf, err_buf_len, HIVE_ERROR); + "Column index out of bounds.", hive_error, err_buf_len, HIVE_ERROR); if (m_last_column_fetched != column_idx) { extractField(column_idx); @@ -390,20 +405,16 @@ size_t HiveSerializedRowSet::getFieldLen(size_t column_idx) { assert(m_field_offsets[column_idx + 1] > m_field_offsets[column_idx]); len = m_field_offsets[column_idx + 1] - m_field_offsets[column_idx] - 1; } - /* Enforce the constraint that no data exceed MAX_BYTE_LENGTH */ - len = min(len, (size_t) MAX_BYTE_LENGTH); return len; } void HiveSerializedRowSet::extractField(size_t column_idx) { assert(column_idx < getColumnCount()); assert(m_row_weak_ptr != NULL); - /* The field buffer should always be large enough to hold the field */ - assert(getFieldLen(column_idx) < sizeof(m_field_buffer)); - /* Just safety precaution to prevent buffer overflow */ - /* Reduce buffer size by one to save space for null terminator */ - size_t extract_len = min(getFieldLen(column_idx), sizeof(m_field_buffer) - 1); - size_t copied = m_row_weak_ptr->copy(m_field_buffer, extract_len, m_field_offsets[column_idx]); + ensureFieldBufferSize(getFieldLen(column_idx)); + size_t extract_len = min(getFieldLen(column_idx), (size_t)m_field_buffer_len); + size_t copied = m_row_weak_ptr->copy(m_field_buffer, extract_len, + m_field_offsets[column_idx]); assert(copied == extract_len); /* Make sure the buffer is null terminated */ m_field_buffer[extract_len] = '\0'; @@ -451,15 +462,14 @@ size_t HiveStringVectorRowSet::getFieldLen(size_t column_idx) { assert(column_idx < getColumnCount()); assert(m_fields_weak_ptr != NULL); size_t len = m_fields_weak_ptr->at(column_idx).length(); - /* Enforce the constraint that no data exceed MAX_BYTE_LENGTH */ - len = min(len, (size_t) MAX_BYTE_LENGTH); return len; } void HiveStringVectorRowSet::extractField(size_t column_idx) { assert(column_idx < getColumnCount()); assert(m_fields_weak_ptr != NULL); - safe_strncpy(m_field_buffer, m_fields_weak_ptr->at(column_idx).c_str(), sizeof(m_field_buffer)); + ensureFieldBufferSize(getFieldLen(column_idx)); + safe_strncpy(m_field_buffer, m_fields_weak_ptr->at(column_idx).c_str(), m_field_buffer_len); } diff --git odbc/src/cpp/HiveRowSet.h odbc/src/cpp/HiveRowSet.h index ca6e6af..8f6b5c3 100644 --- odbc/src/cpp/HiveRowSet.h +++ odbc/src/cpp/HiveRowSet.h @@ -53,27 +53,29 @@ class HiveRowSet { HiveRowSet(); virtual ~HiveRowSet(); void reset(); ///< Not overrideable, implement specialized_reset() instead - HiveReturn getFieldDataLen(size_t column_idx, size_t* col_len, char* err_buf, size_t err_buf_len); + void ensureFieldBufferSize(int newLen); + HiveReturn getFieldDataLen(size_t column_idx, size_t* col_len, hive_err_info *hive_error, size_t err_buf_len); HiveReturn getFieldAsCString(size_t column_idx, char* buffer, size_t buffer_len, - size_t* data_byte_size, int* is_null_value, char* err_buf, + size_t* data_byte_size, int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); - HiveReturn getFieldAsDouble(size_t column_idx, double* buffer, int* is_null_value, char* err_buf, + HiveReturn getFieldAsDouble(size_t column_idx, double* buffer, int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); - HiveReturn getFieldAsInt(size_t column_idx, int* buffer, int* is_null_value, char* err_buf, + HiveReturn getFieldAsInt(size_t column_idx, int* buffer, int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); - HiveReturn getFieldAsLong(size_t column_idx, long* buffer, int* is_null_value, char* err_buf, + HiveReturn getFieldAsLong(size_t column_idx, long* buffer, int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); HiveReturn getFieldAsULong(size_t column_idx, unsigned long* buffer, int* is_null_value, - char* err_buf, size_t err_buf_len); - HiveReturn getFieldAsI64(size_t column_idx, int64_t* buffer, int* is_null_value, char* err_buf, + hive_err_info *hive_error, size_t err_buf_len); + HiveReturn getFieldAsI64(size_t column_idx, int64_t* buffer, int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); - HiveReturn getFieldAsI64U(size_t column_idx, uint64_t* buffer, int* is_null_value, char* err_buf, + HiveReturn getFieldAsI64U(size_t column_idx, uint64_t* buffer, int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); protected: - /// Forces all data retrieved to be no more than MAX_BYTE_LENGTH - char m_field_buffer[MAX_BYTE_LENGTH + 1]; - + /// retrieved data + char m_default_field_buffer[MAX_BYTE_LENGTH + 1]; + char *m_field_buffer; + int m_field_buffer_len; /** * @brief Initializes m_field_buffer with the field indicated by m_last_column_fetched. * diff --git odbc/src/cpp/Makefile.am odbc/src/cpp/Makefile.am new file mode 100644 index 0000000..9e57cd8 --- /dev/null +++ odbc/src/cpp/Makefile.am @@ -0,0 +1,77 @@ +# +# Copyright 2005 The Apache Software Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# libhiveclient is a convencience library for libhiveodbc +# It gets rolled into libhiveodbc at link time. +noinst_LTLIBRARIES = libhiveclient.la + +library_includedir=$(includedir) +library_include_HEADERS=hiveclient.h hiveconstants.h + +AM_CPPFLAGS = -I. -Igen-cpp $(LTDINCL) $(THRIFT_CPPFLAGS) $(BOOST_CPPFLAGS) +AM_LDFLAGS = -no-undefined + +if HAVE_WIN32 + AM_LDFLAGS += -avoid-version -Wl,--kill-at -Wl,--strip-all +endif + +libhiveclient_la_LDFLAGS = $(AM_LDFLAGS) $(THRIFT_LDFLAGS) $(BOOST_LDFLAGS) + +libhiveclient_la_CPPFLAGS = $(AM_CPPFLAGS) + +libhiveclient_la_SOURCES = \ + hiveconstants.h \ + hiveclienthelper.cpp \ + hiveclienthelper.h \ + HiveRowSet.cpp \ + hiveclient.cpp \ + hiveclient.h \ + HiveColumnDesc.cpp \ + HiveColumnDesc.h \ + HiveConnection.h \ + HiveResultSet.cpp \ + HiveResultSet.h \ + HiveRowSet.h \ + thriftserverconstants.h \ + gen-cpp/FacebookService.cpp \ + gen-cpp/FacebookService.h \ + gen-cpp/fb303_constants.cpp \ + gen-cpp/fb303_constants.h \ + gen-cpp/fb303_types.cpp \ + gen-cpp/fb303_types.h \ + gen-cpp/hive_metastore_constants.cpp \ + gen-cpp/hive_metastore_constants.h \ + gen-cpp/hive_metastore_types.cpp \ + gen-cpp/hive_metastore_types.h \ + gen-cpp/hive_service_constants.cpp \ + gen-cpp/hive_service_constants.h \ + gen-cpp/hive_service_types.cpp \ + gen-cpp/hive_service_types.h \ + gen-cpp/queryplan_constants.cpp \ + gen-cpp/queryplan_constants.h \ + gen-cpp/queryplan_types.cpp \ + gen-cpp/queryplan_types.h \ + gen-cpp/ThriftHive.cpp \ + gen-cpp/ThriftHive.h \ + gen-cpp/ThriftHiveMetastore.cpp \ + gen-cpp/ThriftHiveMetastore.h + +gen-thrift: + rm -rf gen-cpp/ + ${THRIFT_COMPILER} --gen cpp if/fb303.thrift + ${THRIFT_COMPILER} --gen cpp $(HIVE_ROOT)/metastore/if/hive_metastore.thrift + ${THRIFT_COMPILER} --gen cpp $(HIVE_ROOT)/ql/if/queryplan.thrift + ${THRIFT_COMPILER} --gen cpp $(HIVE_ROOT)/service/if/hive_service.thrift diff --git odbc/src/cpp/Makefile.in odbc/src/cpp/Makefile.in new file mode 100644 index 0000000..ec116df --- /dev/null +++ odbc/src/cpp/Makefile.in @@ -0,0 +1,759 @@ +# Makefile.in generated by automake 1.11.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, +# Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Copyright 2005 The Apache Software Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +@HAVE_WIN32_TRUE@am__append_1 = -avoid-version -Wl,--kill-at -Wl,--strip-all +subdir = src/cpp +DIST_COMMON = $(library_include_HEADERS) $(srcdir)/Makefile.am \ + $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/ax_boost_base.m4 \ + $(top_srcdir)/m4/find_apr.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +LTLIBRARIES = $(noinst_LTLIBRARIES) +libhiveclient_la_LIBADD = +am_libhiveclient_la_OBJECTS = libhiveclient_la-hiveclienthelper.lo \ + libhiveclient_la-HiveRowSet.lo libhiveclient_la-hiveclient.lo \ + libhiveclient_la-HiveColumnDesc.lo \ + libhiveclient_la-HiveResultSet.lo \ + libhiveclient_la-FacebookService.lo \ + libhiveclient_la-fb303_constants.lo \ + libhiveclient_la-fb303_types.lo \ + libhiveclient_la-hive_metastore_constants.lo \ + libhiveclient_la-hive_metastore_types.lo \ + libhiveclient_la-hive_service_constants.lo \ + libhiveclient_la-hive_service_types.lo \ + libhiveclient_la-queryplan_constants.lo \ + libhiveclient_la-queryplan_types.lo \ + libhiveclient_la-ThriftHive.lo \ + libhiveclient_la-ThriftHiveMetastore.lo +libhiveclient_la_OBJECTS = $(am_libhiveclient_la_OBJECTS) +libhiveclient_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(libhiveclient_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/config/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(libhiveclient_la_SOURCES) +DIST_SOURCES = $(libhiveclient_la_SOURCES) +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__installdirs = "$(DESTDIR)$(library_includedir)" +HEADERS = $(library_include_HEADERS) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +APR_CFLAGS = @APR_CFLAGS@ +APR_CPPFLAGS = @APR_CPPFLAGS@ +APR_INCLUDES = @APR_INCLUDES@ +APR_LDFLAGS = @APR_LDFLAGS@ +APR_LIB = @APR_LIB@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +ODBC_CPPFLAGS = @ODBC_CPPFLAGS@ +ODBC_LIB = @ODBC_LIB@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +THRIFT_COMPILER = @THRIFT_COMPILER@ +THRIFT_CPPFLAGS = @THRIFT_CPPFLAGS@ +THRIFT_INCLUDE = @THRIFT_INCLUDE@ +THRIFT_LDFLAGS = @THRIFT_LDFLAGS@ +THRIFT_LIBDIR = @THRIFT_LIBDIR@ +VERSION = @VERSION@ +VER_INFO = @VER_INFO@ +WINDRES = @WINDRES@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +lt_ECHO = @lt_ECHO@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ + +# libhiveclient is a convencience library for libhiveodbc +# It gets rolled into libhiveodbc at link time. +noinst_LTLIBRARIES = libhiveclient.la +library_includedir = $(includedir) +library_include_HEADERS = hiveclient.h hiveconstants.h +AM_CPPFLAGS = -I. -Igen-cpp $(LTDINCL) $(THRIFT_CPPFLAGS) $(BOOST_CPPFLAGS) +AM_LDFLAGS = -no-undefined $(am__append_1) +libhiveclient_la_LDFLAGS = $(AM_LDFLAGS) $(THRIFT_LDFLAGS) $(BOOST_LDFLAGS) +libhiveclient_la_CPPFLAGS = $(AM_CPPFLAGS) +libhiveclient_la_SOURCES = \ + hiveconstants.h \ + hiveclienthelper.cpp \ + hiveclienthelper.h \ + HiveRowSet.cpp \ + hiveclient.cpp \ + hiveclient.h \ + HiveColumnDesc.cpp \ + HiveColumnDesc.h \ + HiveConnection.h \ + HiveResultSet.cpp \ + HiveResultSet.h \ + HiveRowSet.h \ + thriftserverconstants.h \ + gen-cpp/FacebookService.cpp \ + gen-cpp/FacebookService.h \ + gen-cpp/fb303_constants.cpp \ + gen-cpp/fb303_constants.h \ + gen-cpp/fb303_types.cpp \ + gen-cpp/fb303_types.h \ + gen-cpp/hive_metastore_constants.cpp \ + gen-cpp/hive_metastore_constants.h \ + gen-cpp/hive_metastore_types.cpp \ + gen-cpp/hive_metastore_types.h \ + gen-cpp/hive_service_constants.cpp \ + gen-cpp/hive_service_constants.h \ + gen-cpp/hive_service_types.cpp \ + gen-cpp/hive_service_types.h \ + gen-cpp/queryplan_constants.cpp \ + gen-cpp/queryplan_constants.h \ + gen-cpp/queryplan_types.cpp \ + gen-cpp/queryplan_types.h \ + gen-cpp/ThriftHive.cpp \ + gen-cpp/ThriftHive.h \ + gen-cpp/ThriftHiveMetastore.cpp \ + gen-cpp/ThriftHiveMetastore.h + +all: all-am + +.SUFFIXES: +.SUFFIXES: .cpp .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/cpp/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/cpp/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libhiveclient.la: $(libhiveclient_la_OBJECTS) $(libhiveclient_la_DEPENDENCIES) + $(libhiveclient_la_LINK) $(libhiveclient_la_OBJECTS) $(libhiveclient_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-FacebookService.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-HiveColumnDesc.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-HiveResultSet.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-HiveRowSet.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-ThriftHive.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-ThriftHiveMetastore.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-fb303_constants.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-fb303_types.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-hive_metastore_constants.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-hive_metastore_types.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-hive_service_constants.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-hive_service_types.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-hiveclient.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-hiveclienthelper.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-queryplan_constants.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libhiveclient_la-queryplan_types.Plo@am__quote@ + +.cpp.o: +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< + +.cpp.obj: +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cpp.lo: +@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< + +libhiveclient_la-hiveclienthelper.lo: hiveclienthelper.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-hiveclienthelper.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-hiveclienthelper.Tpo -c -o libhiveclient_la-hiveclienthelper.lo `test -f 'hiveclienthelper.cpp' || echo '$(srcdir)/'`hiveclienthelper.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-hiveclienthelper.Tpo $(DEPDIR)/libhiveclient_la-hiveclienthelper.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='hiveclienthelper.cpp' object='libhiveclient_la-hiveclienthelper.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-hiveclienthelper.lo `test -f 'hiveclienthelper.cpp' || echo '$(srcdir)/'`hiveclienthelper.cpp + +libhiveclient_la-HiveRowSet.lo: HiveRowSet.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-HiveRowSet.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-HiveRowSet.Tpo -c -o libhiveclient_la-HiveRowSet.lo `test -f 'HiveRowSet.cpp' || echo '$(srcdir)/'`HiveRowSet.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-HiveRowSet.Tpo $(DEPDIR)/libhiveclient_la-HiveRowSet.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='HiveRowSet.cpp' object='libhiveclient_la-HiveRowSet.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-HiveRowSet.lo `test -f 'HiveRowSet.cpp' || echo '$(srcdir)/'`HiveRowSet.cpp + +libhiveclient_la-hiveclient.lo: hiveclient.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-hiveclient.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-hiveclient.Tpo -c -o libhiveclient_la-hiveclient.lo `test -f 'hiveclient.cpp' || echo '$(srcdir)/'`hiveclient.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-hiveclient.Tpo $(DEPDIR)/libhiveclient_la-hiveclient.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='hiveclient.cpp' object='libhiveclient_la-hiveclient.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-hiveclient.lo `test -f 'hiveclient.cpp' || echo '$(srcdir)/'`hiveclient.cpp + +libhiveclient_la-HiveColumnDesc.lo: HiveColumnDesc.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-HiveColumnDesc.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-HiveColumnDesc.Tpo -c -o libhiveclient_la-HiveColumnDesc.lo `test -f 'HiveColumnDesc.cpp' || echo '$(srcdir)/'`HiveColumnDesc.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-HiveColumnDesc.Tpo $(DEPDIR)/libhiveclient_la-HiveColumnDesc.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='HiveColumnDesc.cpp' object='libhiveclient_la-HiveColumnDesc.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-HiveColumnDesc.lo `test -f 'HiveColumnDesc.cpp' || echo '$(srcdir)/'`HiveColumnDesc.cpp + +libhiveclient_la-HiveResultSet.lo: HiveResultSet.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-HiveResultSet.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-HiveResultSet.Tpo -c -o libhiveclient_la-HiveResultSet.lo `test -f 'HiveResultSet.cpp' || echo '$(srcdir)/'`HiveResultSet.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-HiveResultSet.Tpo $(DEPDIR)/libhiveclient_la-HiveResultSet.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='HiveResultSet.cpp' object='libhiveclient_la-HiveResultSet.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-HiveResultSet.lo `test -f 'HiveResultSet.cpp' || echo '$(srcdir)/'`HiveResultSet.cpp + +libhiveclient_la-FacebookService.lo: gen-cpp/FacebookService.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-FacebookService.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-FacebookService.Tpo -c -o libhiveclient_la-FacebookService.lo `test -f 'gen-cpp/FacebookService.cpp' || echo '$(srcdir)/'`gen-cpp/FacebookService.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-FacebookService.Tpo $(DEPDIR)/libhiveclient_la-FacebookService.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/FacebookService.cpp' object='libhiveclient_la-FacebookService.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-FacebookService.lo `test -f 'gen-cpp/FacebookService.cpp' || echo '$(srcdir)/'`gen-cpp/FacebookService.cpp + +libhiveclient_la-fb303_constants.lo: gen-cpp/fb303_constants.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-fb303_constants.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-fb303_constants.Tpo -c -o libhiveclient_la-fb303_constants.lo `test -f 'gen-cpp/fb303_constants.cpp' || echo '$(srcdir)/'`gen-cpp/fb303_constants.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-fb303_constants.Tpo $(DEPDIR)/libhiveclient_la-fb303_constants.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/fb303_constants.cpp' object='libhiveclient_la-fb303_constants.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-fb303_constants.lo `test -f 'gen-cpp/fb303_constants.cpp' || echo '$(srcdir)/'`gen-cpp/fb303_constants.cpp + +libhiveclient_la-fb303_types.lo: gen-cpp/fb303_types.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-fb303_types.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-fb303_types.Tpo -c -o libhiveclient_la-fb303_types.lo `test -f 'gen-cpp/fb303_types.cpp' || echo '$(srcdir)/'`gen-cpp/fb303_types.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-fb303_types.Tpo $(DEPDIR)/libhiveclient_la-fb303_types.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/fb303_types.cpp' object='libhiveclient_la-fb303_types.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-fb303_types.lo `test -f 'gen-cpp/fb303_types.cpp' || echo '$(srcdir)/'`gen-cpp/fb303_types.cpp + +libhiveclient_la-hive_metastore_constants.lo: gen-cpp/hive_metastore_constants.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-hive_metastore_constants.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-hive_metastore_constants.Tpo -c -o libhiveclient_la-hive_metastore_constants.lo `test -f 'gen-cpp/hive_metastore_constants.cpp' || echo '$(srcdir)/'`gen-cpp/hive_metastore_constants.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-hive_metastore_constants.Tpo $(DEPDIR)/libhiveclient_la-hive_metastore_constants.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/hive_metastore_constants.cpp' object='libhiveclient_la-hive_metastore_constants.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-hive_metastore_constants.lo `test -f 'gen-cpp/hive_metastore_constants.cpp' || echo '$(srcdir)/'`gen-cpp/hive_metastore_constants.cpp + +libhiveclient_la-hive_metastore_types.lo: gen-cpp/hive_metastore_types.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-hive_metastore_types.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-hive_metastore_types.Tpo -c -o libhiveclient_la-hive_metastore_types.lo `test -f 'gen-cpp/hive_metastore_types.cpp' || echo '$(srcdir)/'`gen-cpp/hive_metastore_types.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-hive_metastore_types.Tpo $(DEPDIR)/libhiveclient_la-hive_metastore_types.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/hive_metastore_types.cpp' object='libhiveclient_la-hive_metastore_types.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-hive_metastore_types.lo `test -f 'gen-cpp/hive_metastore_types.cpp' || echo '$(srcdir)/'`gen-cpp/hive_metastore_types.cpp + +libhiveclient_la-hive_service_constants.lo: gen-cpp/hive_service_constants.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-hive_service_constants.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-hive_service_constants.Tpo -c -o libhiveclient_la-hive_service_constants.lo `test -f 'gen-cpp/hive_service_constants.cpp' || echo '$(srcdir)/'`gen-cpp/hive_service_constants.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-hive_service_constants.Tpo $(DEPDIR)/libhiveclient_la-hive_service_constants.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/hive_service_constants.cpp' object='libhiveclient_la-hive_service_constants.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-hive_service_constants.lo `test -f 'gen-cpp/hive_service_constants.cpp' || echo '$(srcdir)/'`gen-cpp/hive_service_constants.cpp + +libhiveclient_la-hive_service_types.lo: gen-cpp/hive_service_types.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-hive_service_types.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-hive_service_types.Tpo -c -o libhiveclient_la-hive_service_types.lo `test -f 'gen-cpp/hive_service_types.cpp' || echo '$(srcdir)/'`gen-cpp/hive_service_types.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-hive_service_types.Tpo $(DEPDIR)/libhiveclient_la-hive_service_types.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/hive_service_types.cpp' object='libhiveclient_la-hive_service_types.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-hive_service_types.lo `test -f 'gen-cpp/hive_service_types.cpp' || echo '$(srcdir)/'`gen-cpp/hive_service_types.cpp + +libhiveclient_la-queryplan_constants.lo: gen-cpp/queryplan_constants.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-queryplan_constants.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-queryplan_constants.Tpo -c -o libhiveclient_la-queryplan_constants.lo `test -f 'gen-cpp/queryplan_constants.cpp' || echo '$(srcdir)/'`gen-cpp/queryplan_constants.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-queryplan_constants.Tpo $(DEPDIR)/libhiveclient_la-queryplan_constants.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/queryplan_constants.cpp' object='libhiveclient_la-queryplan_constants.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-queryplan_constants.lo `test -f 'gen-cpp/queryplan_constants.cpp' || echo '$(srcdir)/'`gen-cpp/queryplan_constants.cpp + +libhiveclient_la-queryplan_types.lo: gen-cpp/queryplan_types.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-queryplan_types.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-queryplan_types.Tpo -c -o libhiveclient_la-queryplan_types.lo `test -f 'gen-cpp/queryplan_types.cpp' || echo '$(srcdir)/'`gen-cpp/queryplan_types.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-queryplan_types.Tpo $(DEPDIR)/libhiveclient_la-queryplan_types.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/queryplan_types.cpp' object='libhiveclient_la-queryplan_types.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-queryplan_types.lo `test -f 'gen-cpp/queryplan_types.cpp' || echo '$(srcdir)/'`gen-cpp/queryplan_types.cpp + +libhiveclient_la-ThriftHive.lo: gen-cpp/ThriftHive.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-ThriftHive.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-ThriftHive.Tpo -c -o libhiveclient_la-ThriftHive.lo `test -f 'gen-cpp/ThriftHive.cpp' || echo '$(srcdir)/'`gen-cpp/ThriftHive.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-ThriftHive.Tpo $(DEPDIR)/libhiveclient_la-ThriftHive.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/ThriftHive.cpp' object='libhiveclient_la-ThriftHive.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-ThriftHive.lo `test -f 'gen-cpp/ThriftHive.cpp' || echo '$(srcdir)/'`gen-cpp/ThriftHive.cpp + +libhiveclient_la-ThriftHiveMetastore.lo: gen-cpp/ThriftHiveMetastore.cpp +@am__fastdepCXX_TRUE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libhiveclient_la-ThriftHiveMetastore.lo -MD -MP -MF $(DEPDIR)/libhiveclient_la-ThriftHiveMetastore.Tpo -c -o libhiveclient_la-ThriftHiveMetastore.lo `test -f 'gen-cpp/ThriftHiveMetastore.cpp' || echo '$(srcdir)/'`gen-cpp/ThriftHiveMetastore.cpp +@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/libhiveclient_la-ThriftHiveMetastore.Tpo $(DEPDIR)/libhiveclient_la-ThriftHiveMetastore.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='gen-cpp/ThriftHiveMetastore.cpp' object='libhiveclient_la-ThriftHiveMetastore.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libhiveclient_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libhiveclient_la-ThriftHiveMetastore.lo `test -f 'gen-cpp/ThriftHiveMetastore.cpp' || echo '$(srcdir)/'`gen-cpp/ThriftHiveMetastore.cpp + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-library_includeHEADERS: $(library_include_HEADERS) + @$(NORMAL_INSTALL) + test -z "$(library_includedir)" || $(MKDIR_P) "$(DESTDIR)$(library_includedir)" + @list='$(library_include_HEADERS)'; test -n "$(library_includedir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(library_includedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(library_includedir)" || exit $$?; \ + done + +uninstall-library_includeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(library_include_HEADERS)'; test -n "$(library_includedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + test -n "$$files" || exit 0; \ + echo " ( cd '$(DESTDIR)$(library_includedir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(library_includedir)" && rm -f $$files + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(library_includedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-library_includeHEADERS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-library_includeHEADERS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-noinstLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am \ + install-library_includeHEADERS install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-library_includeHEADERS + + +gen-thrift: + rm -rf gen-cpp/ + ${THRIFT_COMPILER} --gen cpp if/fb303.thrift + ${THRIFT_COMPILER} --gen cpp $(HIVE_ROOT)/metastore/if/hive_metastore.thrift + ${THRIFT_COMPILER} --gen cpp $(HIVE_ROOT)/ql/if/queryplan.thrift + ${THRIFT_COMPILER} --gen cpp $(HIVE_ROOT)/service/if/hive_service.thrift + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git odbc/src/cpp/hiveclient.cpp odbc/src/cpp/hiveclient.cpp index 450eb0b..9437c4d 100644 --- odbc/src/cpp/hiveclient.cpp +++ odbc/src/cpp/hiveclient.cpp @@ -17,6 +17,7 @@ */ #include +#include #include #include #include @@ -41,61 +42,83 @@ using namespace apache::thrift::protocol; using namespace apache::thrift::transport; + /***************************************************************** * Global Hive Client Functions (usable as C callback functions) *****************************************************************/ HiveConnection* DBOpenConnection(const char* database, const char* host, int port, int framed, - char* err_buf, size_t err_buf_len) { + hive_err_info *hive_error, size_t err_buf_len, unsigned int timeout) { // TODO: add in database selection when Hive supports this feature - shared_ptr socket(new TSocket(host, port)); - shared_ptr transport; + boost::shared_ptr socket(new TSocket(host, port)); + boost::shared_ptr transport; + HiveConnection* hiveConn; + +#if defined(_WIN32) || defined(_WIN64) + TWinsockSingleton::create(); +#endif /* WINxx */ if (framed) { - shared_ptr framedSocket(new TFramedTransport(socket)); + boost::shared_ptr framedSocket(new TFramedTransport(socket)); transport = framedSocket; } else { - shared_ptr bufferedSocket(new TBufferedTransport(socket)); + boost::shared_ptr bufferedSocket(new TBufferedTransport(socket)); transport = bufferedSocket; } - shared_ptr protocol(new TBinaryProtocol(transport)); - shared_ptr client(new Apache::Hadoop::Hive::ThriftHiveClient(protocol)); + boost::shared_ptr protocol(new TBinaryProtocol(transport)); + boost::shared_ptr client(new Apache::Hadoop::Hive::ThriftHiveClient(protocol)); + boost::shared_ptr dbName(new string(database)); try { - transport->open(); + if (timeout) + socket->setConnTimeout(timeout); + transport->open(); + if (timeout) + socket->setConnTimeout(0); } catch (TTransportException& ttx) { - RETURN_FAILURE(__FUNCTION__, ttx.what(), err_buf, err_buf_len, NULL); + RETURN_FAILURE(__FUNCTION__, "", -1, ttx.what(), hive_error, err_buf_len, NULL); } catch (...) { - RETURN_FAILURE(__FUNCTION__, - "Unable to connect to Hive server.", err_buf, err_buf_len, NULL); + RETURN_FAILURE(__FUNCTION__, "", -1, + "Unable to connect to Hive server.", hive_error, err_buf_len, NULL); } - HiveConnection* conn = new HiveConnection(client, transport); - return conn; + hiveConn = new HiveConnection(client, transport, socket, dbName); + /* switch to non-default database if specified */ + if (strcmp(database, DEFAULT_DATABASE)) { + char *dbQry = new char[strlen(database) + 5]; + HiveResultSet *tmpResult; + + sprintf (dbQry, "use %s", database); + if (DBExecute(hiveConn, dbQry, &tmpResult, 1, 1, hive_error, err_buf_len, 0) != HIVE_SUCCESS) + return NULL; + } + return hiveConn; } -HiveReturn DBCloseConnection(HiveConnection* connection, char* err_buf, size_t err_buf_len) { +HiveReturn DBCloseConnection(HiveConnection* connection, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(connection == NULL, __FUNCTION__, - "Hive connection cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive connection cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(connection->transport == NULL, __FUNCTION__, - "Hive connection transport cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive connection transport cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); try { connection->transport->close(); } catch (...) { /* Ignore the exception, we just want to clean up everything... */ } delete connection; + return HIVE_SUCCESS; } HiveReturn DBExecute(HiveConnection* connection, const char* query, HiveResultSet** resultset_ptr, - int max_buf_rows, char* err_buf, size_t err_buf_len) { + int max_buf_rows, int fetch_row_size, hive_err_info *hive_error, size_t err_buf_len, + int qry_timeout) { RETURN_ON_ASSERT(connection == NULL, __FUNCTION__, - "Hive connection cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive connection cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(connection->client == NULL, __FUNCTION__, - "Hive connection client cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive connection client cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); RETURN_ON_ASSERT(query == NULL, __FUNCTION__, - "Query string cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Query string cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); // TODO: remove string query_str(query); @@ -110,146 +133,361 @@ HiveReturn DBExecute(HiveConnection* connection, const char* query, HiveResultSe /* Pass the query onto the Hive server for execution */ /* Query execution is kept separate from the resultset b/c results may not always be needed (i.e. DML) */ try { - connection->client->execute(query_str); /* This is currently implemented as a blocking operation */ + if (qry_timeout) + { + connection->socket->setSendTimeout(qry_timeout); + connection->socket->setRecvTimeout(qry_timeout); + } + connection->client->execute(query_str); /* This is currently implemented as a blocking operation */ + if (qry_timeout) + { + connection->socket->setSendTimeout(0); + connection->socket->setRecvTimeout(0); + } } catch (Apache::Hadoop::Hive::HiveServerException& ex) { - RETURN_FAILURE(__FUNCTION__, ex.what(), err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, ex.SQLState.c_str(), ex.errorCode, ex.message.c_str(), hive_error, err_buf_len, HIVE_ERROR); + } catch (TTransportException& ttx) { + if (ttx.getType() == ttx.TIMED_OUT) { + RETURN_FAILURE(__FUNCTION__, "", -1, ttx.what(), hive_error, err_buf_len, HIVE_TIMEOUT); + } + else { + RETURN_FAILURE(__FUNCTION__, "", -1, ttx.what(), hive_error, err_buf_len, HIVE_NETWORK_ERROR); + } } catch (...) { - RETURN_FAILURE(__FUNCTION__, - "Unknown Hive query execution error.", err_buf, err_buf_len, HIVE_ERROR); + RETURN_FAILURE(__FUNCTION__, "", -1, + "Unknown Hive query execution error.", hive_error, err_buf_len, HIVE_ERROR); } /* resultset_ptr may be NULL if the caller does not care about the result */ if (resultset_ptr != NULL) { - HiveQueryResultSet* query_resultset = new HiveQueryResultSet(max_buf_rows); + HiveQueryResultSet* query_resultset = new HiveQueryResultSet(max_buf_rows, fetch_row_size); *resultset_ptr = query_resultset; /* Store into generic HiveResultSet pointer */ - return query_resultset->initialize(connection, err_buf, err_buf_len); + return query_resultset->initialize(connection, hive_error, err_buf_len); } + + return HIVE_SUCCESS; +} + +// reset the array fetch properties of the resultset */ +HiveReturn DBSetBulkAttr(HiveResultSet* resultset_ptr,int max_buf_rows, int fetch_row_size) { + + HiveQueryResultSet* query_resultset = (HiveQueryResultSet*) resultset_ptr; + query_resultset->setArraySize(max_buf_rows, fetch_row_size); return HIVE_SUCCESS; } HiveReturn DBTables(HiveConnection* connection, const char* tbl_search_pattern, - HiveResultSet** resultset_ptr, char* err_buf, size_t err_buf_len) { + HiveResultSet** resultset_ptr, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset_ptr == NULL, __FUNCTION__, - "Resultset pointer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Resultset pointer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); HiveTablesResultSet* tables_resultset = new HiveTablesResultSet(); *resultset_ptr = tables_resultset; /* Store into generic HiveResultSet pointer */ - return tables_resultset->initialize(connection, tbl_search_pattern, err_buf, err_buf_len); + return tables_resultset->initialize(connection, tbl_search_pattern, hive_error, err_buf_len); } HiveReturn DBColumns(HiveConnection* connection, int(*fpHiveToSQLType)(HiveType), const char* tbl_search_pattern, const char* col_search_pattern, - HiveResultSet** resultset_ptr, char* err_buf, size_t err_buf_len) { + HiveResultSet** resultset_ptr, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset_ptr == NULL, __FUNCTION__, - "Resultset pointer cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Resultset pointer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); HiveColumnsResultSet* columns_resultset = new HiveColumnsResultSet(fpHiveToSQLType); *resultset_ptr = columns_resultset; /* Store into generic HiveResultSet pointer */ - return columns_resultset->initialize(connection, tbl_search_pattern, col_search_pattern, err_buf, + return columns_resultset->initialize(connection, tbl_search_pattern, col_search_pattern, hive_error, err_buf_len); } -HiveReturn DBCloseResultSet(HiveResultSet* resultset, char* err_buf, size_t err_buf_len) { +/* + * g_typeinfo_schema: An array of arrays of C strings used to define expected + * resultset schema for HiveTypeinfoResultSet. All values will be stored as + * C strings (even numbers) as they will eventually be converted to their + * proper types. + * Note that the SQLGetTypeInfo() has two formats in ODBC2 and ODBC3. + */ + +static const char *g_typeinfo_schema_2[NUM_ODBC2_COLS][3] = { + {"TYPE_NAME", STRING_TYPE_NAME, "" }, + {"DATA_TYPE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT }, + {"PRECISION", INT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"LITERAL_PREFIX", STRING_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"LITERAL_SUFFIX", STRING_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"CREATE_PARAMS", STRING_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"NULLABLE", SMALLINT_TYPE_NAME, "1"}, + {"CASE_SENSITIVE", SMALLINT_TYPE_NAME, "0"}, + {"SEARCHABLE", SMALLINT_TYPE_NAME, "3"}, + {"UNSIGNED_ATTRIBUTE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"MONEY", SMALLINT_TYPE_NAME, "0"}, + {"AUTO_INCREMENT", SMALLINT_TYPE_NAME, "0"}, + {"LOCAL_TYPE_NAME", STRING_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"MINIMUM_SCALE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"MAXIMUM_SCALE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT} +}; + + +static const char *g_typeinfo_schema_3[NUM_ODBC3_COLS][3] = { + {"TYPE_NAME", STRING_TYPE_NAME, ""}, + {"DATA_TYPE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"COLUMN_SIZE", INT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"LITERAL_PREFIX", STRING_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"LITERAL_SUFFIX", STRING_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"CREATE_PARAMS", STRING_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"NULLABLE", SMALLINT_TYPE_NAME, "1"}, + {"CASE_SENSITIVE", SMALLINT_TYPE_NAME, "0"}, + {"SEARCHABLE", SMALLINT_TYPE_NAME, "3"}, + {"UNSIGNED_ATTRIBUTE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"FIXED_PREC_SCALE", SMALLINT_TYPE_NAME, "0"}, + {"AUTO_UNIQUE_VALUE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"LOCAL_TYPE_NAME", STRING_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"MINIMUM_SCALE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"MAXIMUM_SCALE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"SQL_DATA_TYPE", SMALLINT_TYPE_NAME, ""}, + {"SQL_DATETIME_SUB", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"NUM_PREC_RADIX", INT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"INTERVAL_PRECISION", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT} +}; + +HiveReturn DBGetTypeInfo(HiveConnection* connection, short is_ODBC2, const char *resultSet[], + int resultSetSize, HiveResultSet** resultset_ptr, + hive_err_info *hive_error, size_t err_buf_len) { + RETURN_ON_ASSERT(resultset_ptr == NULL, __FUNCTION__, + "Resultset pointer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + HiveLocalResultSet* type_resultset; + HiveReturn rc; + + type_resultset = new HiveLocalResultSet(1, 1, resultSet, resultSetSize); + *resultset_ptr = type_resultset; /* Store into generic HiveResultSet pointer */ + + if (is_ODBC2) + return type_resultset->initialize(connection, hive_error, err_buf_len, g_typeinfo_schema_2, NUM_ODBC2_COLS); + else + return type_resultset->initialize(connection, hive_error, err_buf_len, g_typeinfo_schema_3, NUM_ODBC3_COLS); +} + +// resultset schema for SQLPrimaryKey() +static const char *g_pkey_schema[][3] = { + {"TABLE_CAT", STRING_TYPE_NAME, ""}, + {"TABLE_SCHEM", STRING_TYPE_NAME, ""}, + {"TABLE_NAME" , STRING_TYPE_NAME, ""}, + {"COLUMN_NAME", STRING_TYPE_NAME, ""}, + {"KEY_SEQ", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"PK_NAME", STRING_TYPE_NAME, ""}, +}; + +HiveReturn DBPrimaryKeys(HiveConnection* connection, HiveResultSet** resultset_ptr, + hive_err_info *hive_error, size_t err_buf_len) { + RETURN_ON_ASSERT(resultset_ptr == NULL, __FUNCTION__, + "Resultset pointer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + + HiveLocalResultSet* query_resultset = new HiveLocalResultSet(1, 1, NULL, 0); + *resultset_ptr = query_resultset; /* Store into generic HiveResultSet pointer */ + return query_resultset->initialize(connection, hive_error, err_buf_len, g_pkey_schema, LENGTH(g_pkey_schema)); +} + +// resultset schema for SQLForeignKeys() +static const char *g_fkey_schema[][3] = { + {"PKTABLE_CAT", STRING_TYPE_NAME, ""}, + {"PKTABLE_SCHEM", STRING_TYPE_NAME, ""}, + {"PKTABLE_NAME" , STRING_TYPE_NAME, ""}, + {"PKCOLUMN_NAME", STRING_TYPE_NAME, ""}, + {"FKTABLE_CAT", STRING_TYPE_NAME, ""}, + {"FKTABLE_SCHEM", STRING_TYPE_NAME, ""}, + {"FKTABLE_NAME" , STRING_TYPE_NAME, ""}, + {"FKCOLUMN_NAME", STRING_TYPE_NAME, ""}, + {"KEY_SEQ", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"UPDATE_RULE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"DELETE_RULE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"FK_NAME", STRING_TYPE_NAME, ""}, + {"PK_NAME", STRING_TYPE_NAME, ""}, + {"DEFERRABILITY", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT} +}; + +HiveReturn DBForeignKeys(HiveConnection* connection, HiveResultSet** resultset_ptr, + hive_err_info *hive_error, size_t err_buf_len) { + RETURN_ON_ASSERT(resultset_ptr == NULL, __FUNCTION__, + "Resultset pointer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + + HiveLocalResultSet* query_resultset = new HiveLocalResultSet(1, 1, NULL, 0); + *resultset_ptr = query_resultset; /* Store into generic HiveResultSet pointer */ + return query_resultset->initialize(connection, hive_error, err_buf_len, g_fkey_schema, LENGTH(g_fkey_schema)); +} + +// resultset schema for SQLSpecialColumns() +static const char *g_spcols_schema[][3] = { + {"SCOPE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"COLUMN_NAME", STRING_TYPE_NAME, ""}, + {"DATA_TYPE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"TYPE_NAME", STRING_TYPE_NAME, ""}, + {"COLUMN_SIZE", INT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"BUFFER_LENGTH", INT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"DECIMAL_DIGITS", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + {"PSEUDO_COLUMN", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT} +}; + +HiveReturn DBSpecialColumns(HiveConnection* connection, HiveResultSet** resultset_ptr, + hive_err_info *hive_error, size_t err_buf_len) { + RETURN_ON_ASSERT(resultset_ptr == NULL, __FUNCTION__, + "Resultset pointer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + + HiveLocalResultSet* query_resultset = new HiveLocalResultSet(1, 1, NULL, 0); + *resultset_ptr = query_resultset; /* Store into generic HiveResultSet pointer */ + return query_resultset->initialize(connection, hive_error, err_buf_len, g_spcols_schema, LENGTH(g_spcols_schema)); +} + + +/** + * Columns for result set of SQLStatistics(). + */ + +static const char * g_stats_schema[][3] = { + { "TABLE_CAT", STRING_TYPE_NAME, ""}, + { "TABLE_SCHEM", STRING_TYPE_NAME, ""}, + { "TABLE_NAME", STRING_TYPE_NAME, ""}, + { "NON_UNIQUE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + { "INDEX_QUALIFIER", STRING_TYPE_NAME, ""}, + { "INDEX_NAME", STRING_TYPE_NAME, ""}, + { "TYPE", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + { "ORDINAL_POSITION", SMALLINT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + { "COLUMN_NAME", STRING_TYPE_NAME, ""}, + { "ASC_OR_DESC", STRING_TYPE_NAME, ""}, + { "CARDINALITY", INT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + { "PAGES", INT_TYPE_NAME, DEFAULT_NULL_FORMAT}, + { "FILTER_CONDITION", STRING_TYPE_NAME, ""} +}; + +HiveReturn DBStatistics(HiveConnection* connection, HiveResultSet** resultset_ptr, char *tabName, + short isSupported, hive_err_info *hive_error, size_t err_buf_len) { + HiveLocalResultSet* query_resultset; + + RETURN_ON_ASSERT(resultset_ptr == NULL, __FUNCTION__, + "Resultset pointer cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + + if (isSupported) + query_resultset = new HiveStatsResultSet(1, 1, tabName); + else + query_resultset = new HiveLocalResultSet(1, 1); + *resultset_ptr = query_resultset; /* Store into generic HiveResultSet pointer */ + return query_resultset->initialize(connection, hive_error, err_buf_len, g_stats_schema, LENGTH(g_stats_schema)); +} + +HiveReturn DBCloseResultSet(HiveResultSet* resultset, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); delete resultset; return HIVE_SUCCESS; } -HiveReturn DBFetch(HiveResultSet* resultset, char* err_buf, size_t err_buf_len) { +HiveReturn DBFetch(HiveResultSet* resultset, hive_err_info *hive_error, size_t err_buf_len, int *num_rows) { + HiveReturn rc; + RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->fetchNext(err_buf, err_buf_len); + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + rc = resultset->fetchNext(hive_error, err_buf_len, num_rows); + return rc; } -HiveReturn DBHasResults(HiveResultSet* resultset, int* has_results, char* err_buf, +HiveReturn DBSeekPrior(HiveResultSet* resultset, hive_err_info *hive_error, size_t err_buf_len) { + RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + resultset->seekPrior(); + return HIVE_SUCCESS; +} + +HiveReturn DBHasResults(HiveResultSet* resultset, int* has_results, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->hasResults(has_results, err_buf, err_buf_len); + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->hasResults(has_results, hive_error, err_buf_len); +} + +HiveReturn DBSeekNextRow(HiveResultSet* resultset, hive_err_info *hive_error,size_t err_buf_len) { + RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + resultset->seekNextRow(); + return HIVE_SUCCESS; } -HiveReturn DBGetColumnCount(HiveResultSet* resultset, size_t* col_count, char* err_buf, +HiveReturn DBGetColumnCount(HiveResultSet* resultset, size_t* col_count, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->getColumnCount(col_count, err_buf, err_buf_len); + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->getColumnCount(col_count, hive_error, err_buf_len); } HiveReturn DBCreateColumnDesc(HiveResultSet* resultset, size_t column_idx, - HiveColumnDesc** column_desc_ptr, char* err_buf, size_t err_buf_len) { + HiveColumnDesc** column_desc_ptr, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->createColumnDesc(column_idx, column_desc_ptr, err_buf, err_buf_len); + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->createColumnDesc(column_idx, column_desc_ptr, hive_error, err_buf_len); } HiveReturn DBGetFieldDataLen(HiveResultSet* resultset, size_t column_idx, size_t* col_len, - char* err_buf, size_t err_buf_len) { + hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->getRowSet().getFieldDataLen(column_idx, col_len, err_buf, err_buf_len); + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->getRowSet().getFieldDataLen(column_idx, col_len, hive_error, err_buf_len); } HiveReturn DBGetFieldAsCString(HiveResultSet* resultset, size_t column_idx, char* buffer, size_t buffer_len, size_t* data_byte_size, int* is_null_value, - char* err_buf, size_t err_buf_len) { + hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); return resultset->getRowSet().getFieldAsCString(column_idx, buffer, buffer_len, data_byte_size, - is_null_value, err_buf, err_buf_len); + is_null_value, hive_error, err_buf_len); } HiveReturn DBGetFieldAsDouble(HiveResultSet* resultset, size_t column_idx, double* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len) { + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->getRowSet().getFieldAsDouble(column_idx, buffer, is_null_value, err_buf, + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->getRowSet().getFieldAsDouble(column_idx, buffer, is_null_value, hive_error, err_buf_len); } HiveReturn DBGetFieldAsInt(HiveResultSet* resultset, size_t column_idx, int* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len) { + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->getRowSet().getFieldAsInt(column_idx, buffer, is_null_value, err_buf, + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->getRowSet().getFieldAsInt(column_idx, buffer, is_null_value, hive_error, err_buf_len); } HiveReturn DBGetFieldAsLong(HiveResultSet* resultset, size_t column_idx, long* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len) { + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->getRowSet().getFieldAsLong(column_idx, buffer, is_null_value, err_buf, + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->getRowSet().getFieldAsLong(column_idx, buffer, is_null_value, hive_error, err_buf_len); } HiveReturn DBGetFieldAsULong(HiveResultSet* resultset, size_t column_idx, unsigned long* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len) { + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->getRowSet().getFieldAsULong(column_idx, buffer, is_null_value, err_buf, + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->getRowSet().getFieldAsULong(column_idx, buffer, is_null_value, hive_error, err_buf_len); } HiveReturn DBGetFieldAsI64(HiveResultSet* resultset, size_t column_idx, int64_t* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len) { + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->getRowSet().getFieldAsI64(column_idx, buffer, is_null_value, err_buf, + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->getRowSet().getFieldAsI64(column_idx, buffer, is_null_value, hive_error, err_buf_len); } HiveReturn DBGetFieldAsI64U(HiveResultSet* resultset, size_t column_idx, uint64_t* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len) { + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(resultset == NULL, __FUNCTION__, - "Hive resultset cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); - return resultset->getRowSet().getFieldAsI64U(column_idx, buffer, is_null_value, err_buf, + "Hive resultset cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); + return resultset->getRowSet().getFieldAsI64U(column_idx, buffer, is_null_value, hive_error, err_buf_len); } -HiveReturn DBCloseColumnDesc(HiveColumnDesc* column_desc, char* err_buf, size_t err_buf_len) { +HiveReturn DBCloseColumnDesc(HiveColumnDesc* column_desc, hive_err_info *hive_error, size_t err_buf_len) { RETURN_ON_ASSERT(column_desc == NULL, __FUNCTION__, - "Hive column descriptor cannot be NULL.", err_buf, err_buf_len, HIVE_ERROR); + "Hive column descriptor cannot be NULL.", hive_error, err_buf_len, HIVE_ERROR); delete column_desc; return HIVE_SUCCESS; } diff --git odbc/src/cpp/hiveclient.def odbc/src/cpp/hiveclient.def new file mode 100644 index 0000000..8ea68fa --- /dev/null +++ odbc/src/cpp/hiveclient.def @@ -0,0 +1,72 @@ +:################################################################### +: Licensed to the Apache Software Foundation (ASF) under one +: or more contributor license agreements. See the NOTICE file +: distributed with this work for additional information +: regarding copyright ownership. The ASF licenses this file +: to you under the Apache License, Version 2.0 (the +: "License"); you may not use this file except in compliance +: with the License. You may obtain a copy of the License at +: +: http://www.apache.org/licenses/LICENSE-2.0 +: +: Unless required by applicable law or agreed to in writing, +: software distributed under the License is distributed on an +: "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +: KIND, either express or implied. See the License for the +: specific language governing permissions and limitations +: under the License. +:################################################################### +EXPORTS +DBOpenConnection +DBCloseConnection +DBTables +DBColumns +DBGetFieldAsCString +DBGetFieldDataLen +DBGetFieldAsDouble +DBGetFieldAsI64 +DBGetFieldAsI64U +DBGetFieldAsLong +DBGetFieldAsULong +DBGetFieldAsInt +DBHasResults +DBFetch +DBGetIsNullable +DBGetHiveType +DBGetColumnType +DBGetColumnName +DBCloseColumnDesc +DBCreateColumnDesc +DBGetColumnCount +DBGetTypeInfo +DBExecute +DBSetBulkAttr +DBOpenConnection +DBCloseConnection +DBTables +DBColumns +DBGetFieldAsCString +DBGetFieldDataLen +DBGetFieldAsDouble +DBGetFieldAsI64 +DBGetFieldAsI64U +DBGetFieldAsLong +DBGetFieldAsULong +DBGetFieldAsInt +DBHasResults +DBFetch +DBGetIsNullable +DBGetHiveType +DBGetColumnType +DBGetColumnName +DBCloseColumnDesc +DBCreateColumnDesc +DBGetColumnCount +DBExecute +DBGetFieldByteSize +DBSeekNextRow +DBPrimaryKeys +DBForeignKeys +DBSpecialColumns +DBStatistics +DBSeekPrior diff --git odbc/src/cpp/hiveclient.h odbc/src/cpp/hiveclient.h index f1af670..9c0ba6d 100644 --- odbc/src/cpp/hiveclient.h +++ odbc/src/cpp/hiveclient.h @@ -1,4 +1,4 @@ -/**************************************************************************//** +/**************************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -61,10 +61,10 @@ #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif -#include #include #include "hiveconstants.h" +#include "thriftserverconstants.h" /****************************************************************************** @@ -82,6 +82,9 @@ extern "C" { #endif // __cplusplus +static const int HIVECLIENT_BUFFERED_SOCKET = 0; +static const int HIVECLIENT_FRAMED_SOCKET = 1; + /****************************************************************************** * Global Hive Client Functions (usable as C callback functions) *****************************************************************************/ @@ -102,12 +105,13 @@ extern "C" { * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. * NULL can be used if the caller does not care about the error message. * @param err_buf_len Size of the err_buf buffer. + * @param timeout connection timeout. * * @return A HiveConnection object representing the established database connection, * or NULL if an error occurred. Error messages will be stored in err_buf. */ HiveConnection* DBOpenConnection(const char* database, const char* host, int port, int framed, - char* err_buf, size_t err_buf_len); + hive_err_info *hive_error, size_t err_buf_len, unsigned int timeout); /** * @brief Disconnects from a Hive database. @@ -126,7 +130,7 @@ HiveConnection* DBOpenConnection(const char* database, const char* host, int por * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred * (error messages will be stored in err_buf) */ -HiveReturn DBCloseConnection(HiveConnection* connection, char* err_buf, size_t err_buf_len); +HiveReturn DBCloseConnection(HiveConnection* connection, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Execute a query. @@ -143,15 +147,28 @@ HiveReturn DBCloseConnection(HiveConnection* connection, char* err_buf, size_t e * result, or NULL if the result is not needed. * @param max_buf_rows Maximum number of rows to buffer in the new HiveResultSet for the query * results + * @param resultset_size Result size of a single fetch specified by the client * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. * NULL can be used if the caller does not care about the error message. * @param err_buf_len Size of the err_buf buffer. + * @param qry_timeout client timeout in sec. * * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred * (error messages will be stored in err_buf) */ HiveReturn DBExecute(HiveConnection* connection, const char* query, HiveResultSet** resultset_ptr, - int max_buf_rows, char* err_buf, size_t err_buf_len); + int max_buf_rows, int resultset_size, hive_err_info *hive_error, size_t err_buf_len, + int qry_timeout); + +/** + * @brief change the bulk fetch attributes of the given resultset + * @param resultset_ptr A pointer to a HiveResultSet pointer which will be associated with the + * result, or NULL if the result is not needed. + * @param max_buf_rows Maximum number of rows to buffer in the new HiveResultSet for the query + * results + * @param resultset_size Result size of a single fetch specified by the client + */ +HiveReturn DBSetBulkAttr(HiveResultSet* resultset_ptr,int max_buf_rows, int fetch_row_size); /** * @brief Query for database tables. @@ -174,7 +191,7 @@ HiveReturn DBExecute(HiveConnection* connection, const char* query, HiveResultSe * (error messages will be stored in err_buf) */ HiveReturn DBTables(HiveConnection* connection, const char* tbl_search_pattern, - HiveResultSet** resultset_ptr, char* err_buf, size_t err_buf_len); + HiveResultSet** resultset_ptr, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Query for columns in table(s). @@ -201,7 +218,96 @@ HiveReturn DBTables(HiveConnection* connection, const char* tbl_search_pattern, */ HiveReturn DBColumns(HiveConnection* connection, int (*fpHiveToSQLType)(HiveType), const char* tbl_search_pattern, const char* col_search_pattern, - HiveResultSet** resultset_ptr, char* err_buf, size_t err_buf_len); + HiveResultSet** resultset_ptr, hive_err_info *hive_error, size_t err_buf_len); + +/** + * @brief Query for types in database(s). + * + * Gets a resultset containing the set of one or all data types supported in the database. + * Caller takes ownership of returned HiveResultSet and is responsible for deallocating the object by + * calling DBCloseResultSet. + * + * @see DBCloseResultSet() + * + * @param connection A HiveConnection object associated with a database connection. + * @param isODBC2 A boolean to indicate if its ODBC 2.0 connection. + * @param resultSet Pointer to array of hard coded resultset for data type query. Note that currectly + * is hardcode in the driver. + * @param resultSetSize Number of rows in the given resultset, 1 or all. + * @param resultset_ptr A pointer to a HiveResultSet pointer which will be associated with the + * result. + * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. + * NULL can be used if the caller does not care about the error message. + * @param err_buf_len Size of the err_buf buffer. + * + * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred + * (error messages will be stored in err_buf) + */ +HiveReturn DBGetTypeInfo(HiveConnection* connection, short isODBC2, const char *resultSet[], + int resultSetSize, HiveResultSet** resultset_ptr, + hive_err_info *hive_error, size_t err_buf_len); +/* + * @param connection A HiveConnection object associated with a database connection. + * + * @param resultset_ptr A pointer to a HiveResultSet pointer which will be associated with the + * result. + * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. + * NULL can be used if the caller does not care about the error message. + * @param err_buf_len Size of the err_buf buffer. + * + * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred + * (error messages will be stored in err_buf) + */ +HiveReturn DBPrimaryKeys(HiveConnection* connection, HiveResultSet** resultset_ptr, + hive_err_info *hive_error, size_t err_buf_len); + +/* + * @param connection A HiveConnection object associated with a database connection. + * + * @param resultset_ptr A pointer to a HiveResultSet pointer which will be associated with the + * result. + * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. + * NULL can be used if the caller does not care about the error message. + * @param err_buf_len Size of the err_buf buffer. + * + * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred + * (error messages will be stored in err_buf) + */ +HiveReturn DBForeignKeys(HiveConnection* connection, HiveResultSet** resultset_ptr, + hive_err_info *hive_error, size_t err_buf_len); + +/* + * @param connection A HiveConnection object associated with a database connection. + * + * @param resultset_ptr A pointer to a HiveResultSet pointer which will be associated with the + * result. + * @param tabName Table name to get the index info + * @param isSupported Whether the requested operation is supported. For unsupported cases, + * an empty resultset will be returned. + * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. + * NULL can be used if the caller does not care about the error message. + * @param err_buf_len Size of the err_buf buffer. + * + * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred + * (error messages will be stored in err_buf) + */ +HiveReturn DBSpecialColumns(HiveConnection* connection, HiveResultSet** resultset_ptr, + hive_err_info *hive_error, size_t err_buf_len); + +/* + * @param connection A HiveConnection object associated with a database connection. + * + * @param resultset_ptr A pointer to a HiveResultSet pointer which will be associated with the + * result. + * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. + * NULL can be used if the caller does not care about the error message. + * @param err_buf_len Size of the err_buf buffer. + * + * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred + * (error messages will be stored in err_buf) + */ +HiveReturn DBStatistics(HiveConnection* connection, HiveResultSet** resultset_ptr, char *tabName, + short isSupported, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Destroys any specified HiveResultSet object. @@ -217,7 +323,7 @@ HiveReturn DBColumns(HiveConnection* connection, int (*fpHiveToSQLType)(HiveType * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred * (error messages will be stored in err_buf) */ -HiveReturn DBCloseResultSet(HiveResultSet* resultset, char* err_buf, size_t err_buf_len); +HiveReturn DBCloseResultSet(HiveResultSet* resultset, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Fetches the next unfetched row in a HiveResultSet. @@ -229,13 +335,26 @@ HiveReturn DBCloseResultSet(HiveResultSet* resultset, char* err_buf, size_t err_ * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. * NULL can be used if the caller does not care about the error message. * @param err_buf_len Size of the err_buf buffer. - * + * @param num_rows pointer to number of rows fetched by the operations * @return HIVE_SUCCESS if successful, * HIVE_ERROR if an error occurred, * HIVE_NO_MORE_DATA if there are no more rows to fetch. * (error messages will be stored in err_buf) */ -HiveReturn DBFetch(HiveResultSet* resultset, char* err_buf, size_t err_buf_len); +HiveReturn DBFetch(HiveResultSet* resultset, hive_err_info *hive_error, size_t err_buf_len, int *num_rows); + +/** + * @brief Positions the cursor to prior row/batch + * + * + * @param resultset A HiveResultSet from which to fetch rows. + * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. + * NULL can be used if the caller does not care about the error message. + * @param err_buf_len Size of the err_buf buffer. + * @return HIVE_SUCCESS if successful, + * HIVE_ERROR if an error occurred + */ +HiveReturn DBSeekPrior(HiveResultSet* resultset, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Check for results. @@ -251,10 +370,25 @@ HiveReturn DBFetch(HiveResultSet* resultset, char* err_buf, size_t err_buf_len); * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred * (error messages will be stored in err_buf) */ -HiveReturn DBHasResults(HiveResultSet* resultset, int* has_results, char* err_buf, +HiveReturn DBHasResults(HiveResultSet* resultset, int* has_results, hive_err_info *hive_error, size_t err_buf_len); /** + * @move to next row in the fetch array resultset. + * + * move the current row index in the current client fetch array resultset. + * + * @param resultset A HiveResultSet from which to check for results. + * @param err_buf Buffer to receive an error message if HIVE_ERROR is returned. + * NULL can be used if the caller does not care about the error message. + * @param err_buf_len Size of the err_buf buffer. + * + * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred + * (error messages will be stored in err_buf) + */ +HiveReturn DBSeekNextRow(HiveResultSet* resultset, hive_err_info *hive_error,size_t err_buf_len); + +/** * @brief Determines the number of columns in the HiveResultSet. * * @param resultset A HiveResultSet from which to retrieve the column count. @@ -266,7 +400,7 @@ HiveReturn DBHasResults(HiveResultSet* resultset, int* has_results, char* err_bu * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred * (error messages will be stored in err_buf) */ -HiveReturn DBGetColumnCount(HiveResultSet* resultset, size_t* col_count, char* err_buf, +HiveReturn DBGetColumnCount(HiveResultSet* resultset, size_t* col_count, hive_err_info *hive_error, size_t err_buf_len); /** @@ -290,7 +424,7 @@ HiveReturn DBGetColumnCount(HiveResultSet* resultset, size_t* col_count, char* e * (error messages will be stored in err_buf) */ HiveReturn DBCreateColumnDesc(HiveResultSet* resultset, size_t column_idx, - HiveColumnDesc** column_desc_ptr, char* err_buf, size_t err_buf_len); + HiveColumnDesc** column_desc_ptr, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Find the size of a field as a string. @@ -309,7 +443,7 @@ HiveReturn DBCreateColumnDesc(HiveResultSet* resultset, size_t column_idx, * (error messages will be stored in err_buf) */ HiveReturn DBGetFieldDataLen(HiveResultSet* resultset, size_t column_idx, size_t* col_len, - char* err_buf, size_t err_buf_len); + hive_err_info *hive_error, size_t err_buf_len); /** * @brief Get a field as a C string. @@ -337,7 +471,7 @@ HiveReturn DBGetFieldDataLen(HiveResultSet* resultset, size_t column_idx, size_t */ HiveReturn DBGetFieldAsCString(HiveResultSet* resultset, size_t column_idx, char* buffer, size_t buffer_len, size_t* data_byte_size, int* is_null_value, - char* err_buf, size_t err_buf_len); + hive_err_info *hive_error, size_t err_buf_len); /** * @brief Get a field as a double. @@ -359,7 +493,7 @@ HiveReturn DBGetFieldAsCString(HiveResultSet* resultset, size_t column_idx, char * HIVE_NO_MORE_DATA if this field has already been fetched */ HiveReturn DBGetFieldAsDouble(HiveResultSet* resultset, size_t column_idx, double* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len); + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Get a field as an int. @@ -381,7 +515,7 @@ HiveReturn DBGetFieldAsDouble(HiveResultSet* resultset, size_t column_idx, doubl * HIVE_NO_MORE_DATA if this field has already been fetched */ HiveReturn DBGetFieldAsInt(HiveResultSet* resultset, size_t column_idx, int* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len); + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Get a field as a long int. @@ -403,7 +537,7 @@ HiveReturn DBGetFieldAsInt(HiveResultSet* resultset, size_t column_idx, int* buf * HIVE_NO_MORE_DATA if this field has already been fetched */ HiveReturn DBGetFieldAsLong(HiveResultSet* resultset, size_t column_idx, long* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len); + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Get a field as an unsigned long int. @@ -425,7 +559,7 @@ HiveReturn DBGetFieldAsLong(HiveResultSet* resultset, size_t column_idx, long* b * HIVE_NO_MORE_DATA if this field has already been fetched */ HiveReturn DBGetFieldAsULong(HiveResultSet* resultset, size_t column_idx, unsigned long* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len); + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Get a field as an int64_t. @@ -447,7 +581,7 @@ HiveReturn DBGetFieldAsULong(HiveResultSet* resultset, size_t column_idx, unsign * HIVE_NO_MORE_DATA if this field has already been fetched */ HiveReturn DBGetFieldAsI64(HiveResultSet* resultset, size_t column_idx, int64_t* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len); + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Get a field as an uint64_t. @@ -469,7 +603,7 @@ HiveReturn DBGetFieldAsI64(HiveResultSet* resultset, size_t column_idx, int64_t* * HIVE_NO_MORE_DATA if this field has already been fetched */ HiveReturn DBGetFieldAsI64U(HiveResultSet* resultset, size_t column_idx, uint64_t* buffer, - int* is_null_value, char* err_buf, size_t err_buf_len); + int* is_null_value, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Destroys a HiveColumnDesc object. @@ -484,7 +618,7 @@ HiveReturn DBGetFieldAsI64U(HiveResultSet* resultset, size_t column_idx, uint64_ * @return HIVE_SUCCESS if successful, or HIVE_ERROR if an error occurred * (error messages will be stored in err_buf) */ -HiveReturn DBCloseColumnDesc(HiveColumnDesc* column_desc, char* err_buf, size_t err_buf_len); +HiveReturn DBCloseColumnDesc(HiveColumnDesc* column_desc, hive_err_info *hive_error, size_t err_buf_len); /** * @brief Get a column name. diff --git odbc/src/cpp/hiveclienthelper.h odbc/src/cpp/hiveclienthelper.h index 5814a03..d4666d3 100644 --- odbc/src/cpp/hiveclienthelper.h +++ odbc/src/cpp/hiveclienthelper.h @@ -27,6 +27,7 @@ #ifndef __hive_client_helper_h__ #define __hive_client_helper_h__ +#include #include #include "hiveconstants.h" @@ -50,14 +51,22 @@ * * Macro will work for both 32 and 64 bit architectures */ +#if defined(WIN32) || defined(WIN64) +#define ATOI64(val) int64_t(_strtoi64(val, NULL, 10)) +#else #define ATOI64(val) int64_t(strtoll(val, NULL, 10)) +#endif /** * @brief A macro that converts a string to an unsigned 64 bit integer. * * Macro will work for both 32 and 64 bit architectures */ +#if defined(WIN32) || defined(WIN64) +#define ATOI64U(val) uint64_t(_strtoui64(val, NULL, 10)) +#else #define ATOI64U(val) uint64_t(strtoull(val, NULL, 10)) +#endif /** * @brief Convert a Macro'ed value to a string. @@ -79,10 +88,10 @@ * 2. saves the message to err_buf * 3. returns the specified ret_val */ -#define RETURN_ON_ASSERT(condition, funct_name, error_msg, err_buf, err_buf_len, ret_val) { \ +#define RETURN_ON_ASSERT(condition, funct_name, error_msg, hive_error, err_buf_len, ret_val) { \ if (condition) { \ cerr << funct_name << ": " << error_msg << endl << flush; \ - safe_strncpy(err_buf, error_msg, err_buf_len); \ + safe_strncpy((hive_error)->err_buf, (error_msg), (err_buf_len)); \ return ret_val; \ } \ } @@ -93,10 +102,13 @@ * 2. saves the message to err_buf * 3. returns the specified ret_val */ -#define RETURN_FAILURE(funct_name, error_msg, err_buf, err_buf_len, ret_val) { \ - RETURN_ON_ASSERT(true, funct_name, error_msg, err_buf, err_buf_len, ret_val); \ +#define RETURN_FAILURE(funct_name, err_state, err_code, error_msg, hive_error, err_buf_len, ret_val) { \ + safe_strncpy((hive_error)->sql_state, (err_state), 6); \ + (hive_error)->native_err = err_code; \ + RETURN_ON_ASSERT(true, funct_name, error_msg, hive_error, err_buf_len, ret_val); \ } + /***************************************************************** * Global Helper Functions *****************************************************************/ diff --git odbc/src/cpp/hiveconstants.h odbc/src/cpp/hiveconstants.h index 72f1049..96b5750 100644 --- odbc/src/cpp/hiveconstants.h +++ odbc/src/cpp/hiveconstants.h @@ -28,11 +28,11 @@ #define __hive_constants_h__ /// Maximum length of a Hive Client error message -static const int MAX_HIVE_ERR_MSG_LEN = 128; +#define MAX_HIVE_ERR_MSG_LEN 1024 /// Maximum length of a column name -static const int MAX_COLUMN_NAME_LEN = 64; +#define MAX_COLUMN_NAME_LEN 64 /// Maximum length of a column type name -static const int MAX_COLUMN_TYPE_LEN = 64; +#define MAX_COLUMN_TYPE_LEN 64 /* Default connection parameters */ @@ -43,8 +43,13 @@ static const char* DEFAULT_HOST = "localhost"; /// Default Hive Server port static const char* DEFAULT_PORT = "10000"; /// Default connection socket type -static const char* DEFAULT_FRAMED = "0"; +static const char* DEFAULT_FRAMED = "1"; +#define TYPEINFO_RESULT_SIZE 8 +#define NUM_ODBC2_COLS 15 /* ODBC 2.0 resultset for SQLGetTypeInfo() will have 15 columns */ +#define NUM_ODBC3_COLS 19 /* ODBC 3.0 resultset for SQLGetTypeInfo() will have 19 columns */ +#define PKEYS_NUM_COLS 6 + /** * Enumeration of known Hive data types */ @@ -77,7 +82,16 @@ enum HiveReturn HIVE_ERROR, HIVE_NO_MORE_DATA, HIVE_SUCCESS_WITH_MORE_DATA, - HIVE_STILL_EXECUTING + HIVE_STILL_EXECUTING, + HIVE_TIMEOUT, + HIVE_NETWORK_ERROR }; +/* Error information from Hive server */ +typedef struct _hive_err_info { + char err_buf[MAX_HIVE_ERR_MSG_LEN]; + char sql_state[6]; + int native_err; +} hive_err_info; + #endif // __hive_constants_h__ diff --git odbc/src/cpp/if/fb303.thrift odbc/src/cpp/if/fb303.thrift new file mode 100644 index 0000000..ee1cc5a --- /dev/null +++ odbc/src/cpp/if/fb303.thrift @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * fb303.thrift + */ + +namespace cpp facebook.fb303 + +/** + * Common status reporting mechanism across all services + */ +enum fb_status { + DEAD = 0, + STARTING = 1, + ALIVE = 2, + STOPPING = 3, + STOPPED = 4, + WARNING = 5, +} + +/** + * Standard base service + */ +service FacebookService { + + /** + * Returns a descriptive name of the service + */ + string getName(), + + /** + * Returns the version of the service + */ + string getVersion(), + + /** + * Gets the status of this service + */ + fb_status getStatus(), + + /** + * User friendly description of status, such as why the service is in + * the dead or warning state, or what is being started or stopped. + */ + string getStatusDetails(), + + /** + * Gets the counters for this service + */ + map getCounters(), + + /** + * Gets the value of a single counter + */ + i64 getCounter(1: string key), + + /** + * Sets an option + */ + void setOption(1: string key, 2: string value), + + /** + * Gets an option + */ + string getOption(1: string key), + + /** + * Gets all options + */ + map getOptions(), + + /** + * Returns a CPU profile over the given time interval (client and server + * must agree on the profile format). + */ + string getCpuProfile(1: i32 profileDurationInSec), + + /** + * Returns the unix time that the server has been running since + */ + i64 aliveSince(), + + /** + * Tell the server to reload its configuration, reopen log files, etc + */ + oneway void reinitialize(), + + /** + * Suggest a shutdown to the server + */ + oneway void shutdown(), + +} diff --git odbc/src/cpp/if/hive_metastore.thrift odbc/src/cpp/if/hive_metastore.thrift new file mode 100644 index 0000000..4048354 --- /dev/null +++ odbc/src/cpp/if/hive_metastore.thrift @@ -0,0 +1,405 @@ +#!/usr/local/bin/thrift -java +# +# Thrift Service that the MetaStore is built on +# + +include "fb303.thrift" + +namespace cpp Apache.Hadoop.Hive + +const string DDL_TIME = "transient_lastDdlTime" + +struct Version { + 1: string version, + 2: string comments +} + +struct FieldSchema { + 1: string name, // name of the field + 2: string type, // type of the field. primitive types defined above, specify list, map for lists & maps + 3: string comment +} + +struct Type { + 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types + 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE) + 3: optional string type2, // val type if the name is 'map' (MAP_TYPE) + 4: optional list fields // if the name is one of the user defined types +} + +enum HiveObjectType { + GLOBAL = 1, + DATABASE = 2, + TABLE = 3, + PARTITION = 4, + COLUMN = 5, +} + +enum PrincipalType { + USER = 1, + ROLE = 2, + GROUP = 3, +} + +struct HiveObjectRef{ + 1: HiveObjectType objectType, + 2: string dbName, + 3: string objectName, + 4: list partValues, + 5: string columnName, +} + +struct PrivilegeGrantInfo { + 1: string privilege, + 2: i32 createTime, + 3: string grantor, + 4: PrincipalType grantorType, + 5: bool grantOption, +} + +struct HiveObjectPrivilege { + 1: HiveObjectRef hiveObject, + 2: string principalName, + 3: PrincipalType principalType, + 4: PrivilegeGrantInfo grantInfo, +} + +struct PrivilegeBag { + 1: list privileges, +} + +struct PrincipalPrivilegeSet { + 1: map> userPrivileges, // user name -> privilege grant info + 2: map> groupPrivileges, // group name -> privilege grant info + 3: map> rolePrivileges, //role name -> privilege grant info +} + +struct Role { + 1: string roleName, + 2: i32 createTime, + 3: string ownerName, +} + +// namespace for tables +struct Database { + 1: string name, + 2: string description, + 3: string locationUri, + 4: map parameters, // properties associated with the database + 5: optional PrincipalPrivilegeSet privileges +} + +// This object holds the information needed by SerDes +struct SerDeInfo { + 1: string name, // name of the serde, table name by default + 2: string serializationLib, // usually the class that implements the extractor & loader + 3: map parameters // initialization parameters +} + +// sort order of a column (column name along with asc(1)/desc(0)) +struct Order { + 1: string col, // sort column name + 2: i32 order // asc(1) or desc(0) +} + +// this object holds all the information about physical storage of the data belonging to a table +struct StorageDescriptor { + 1: list cols, // required (refer to types defined above) + 2: string location, // defaults to //tablename + 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format + 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format + 5: bool compressed, // compressed or not + 6: i32 numBuckets, // this must be specified if there are any dimension columns + 7: SerDeInfo serdeInfo, // serialization and deserialization information + 8: list bucketCols, // reducer grouping columns and clustering columns and bucketing columns` + 9: list sortCols, // sort order of the data in each bucket + 10: map parameters // any user supplied key value hash +} + +// table information +struct Table { + 1: string tableName, // name of the table + 2: string dbName, // database name ('default') + 3: string owner, // owner of this table + 4: i32 createTime, // creation time of the table + 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on) + 6: i32 retention, // retention time + 7: StorageDescriptor sd, // storage descriptor of the table + 8: list partitionKeys, // partition keys of the table. only primitive types are supported + 9: map parameters, // to store comments or any other user level parameters + 10: string viewOriginalText, // original view text, null for non-view + 11: string viewExpandedText, // expanded view text, null for non-view + 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE + 13: optional PrincipalPrivilegeSet privileges, +} + +struct Partition { + 1: list values // string value is converted to appropriate partition key type + 2: string dbName, + 3: string tableName, + 4: i32 createTime, + 5: i32 lastAccessTime, + 6: StorageDescriptor sd, + 7: map parameters, + 8: optional PrincipalPrivilegeSet privileges +} + +struct Index { + 1: string indexName, // unique with in the whole database namespace + 2: string indexHandlerClass, // reserved + 3: string dbName, + 4: string origTableName, + 5: i32 createTime, + 6: i32 lastAccessTime, + 7: string indexTableName, + 8: StorageDescriptor sd, + 9: map parameters, + 10: bool deferredRebuild +} + +// schema of the table/query results etc. +struct Schema { + // column names, types, comments + 1: list fieldSchemas, // delimiters etc + 2: map properties +} + +exception MetaException { + 1: string message +} + +exception UnknownTableException { + 1: string message +} + +exception UnknownDBException { + 1: string message +} + +exception AlreadyExistsException { + 1: string message +} + +exception InvalidObjectException { + 1: string message +} + +exception NoSuchObjectException { + 1: string message +} + +exception IndexAlreadyExistsException { + 1: string message +} + +exception InvalidOperationException { + 1: string message +} + +exception ConfigValSecurityException { + 1: string message +} + +/** +* This interface is live. +*/ +service ThriftHiveMetastore extends fb303.FacebookService +{ + void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) + Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) + void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + list get_databases(1:string pattern) throws(1:MetaException o1) + list get_all_databases() throws(1:MetaException o1) + void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // returns the type with given name (make seperate calls for the dependent types if needed) + Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2) + bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) + bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2) + map get_type_all(1:string name) + throws(1:MetaException o2) + + // Gets a list of FieldSchemas describing the columns of a particular table + list get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), + + // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table + list get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + + // create a Hive table. Following fields must be set + // tableName + // database (only 'default' for now until Hive QL supports databases) + // owner (not needed, but good to have for tracking purposes) + // sd.cols (list of field schemas) + // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat) + // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat) + // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe + // * See notes on DDL_TIME + void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) + // drops the table and all the partitions associated with it if the table has partitions + // delete data (including partitions) if deleteData is set to true + void drop_table(1:string dbname, 2:string name, 3:bool deleteData) + throws(1:NoSuchObjectException o1, 2:MetaException o3) + list get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1) + list get_all_tables(1: string db_name) throws (1: MetaException o1) + + Table get_table(1:string dbname, 2:string tbl_name) + throws (1:MetaException o1, 2:NoSuchObjectException o2) + // alter table applies to only future partitions not for existing partitions + // * See notes on DDL_TIME + void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl) + throws (1:InvalidOperationException o1, 2:MetaException o2) + + // the following applies to only tables that have partitions + // * See notes on DDL_TIME + Partition add_partition(1:Partition new_part) + throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + Partition append_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) + throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name) + throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + bool drop_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:bool deleteData) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, + 4: string user_name, 5: list group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2) + + Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // returns all the partitions for this table in reverse chronological order. + // If max parts is given then it will return only that many. + list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, + 4: string user_name, 5: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + + list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) + throws(1:MetaException o2) + + // get_partition*_ps methods allow filtering by a partial partition specification, + // as needed for dynamic partitions. The values that are not restricted should + // be empty strings. Nulls were considered (instead of "") but caused errors in + // generated Python code. The size of part_vals may be smaller than the + // number of partition columns - the unspecified values are considered the same + // as "". + list get_partitions_ps(1:string db_name 2:string tbl_name + 3:list part_vals, 4:i16 max_parts=-1) + throws(1:MetaException o1) + list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, + 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + + list get_partition_names_ps(1:string db_name, + 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) + throws(1:MetaException o1) + + // get the partitions matching the given partition filter + list get_partitions_by_filter(1:string db_name 2:string tbl_name + 3:string filter, 4:i16 max_parts=-1) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // get partitions give a list of partition names + list get_partitions_by_names(1:string db_name 2:string tbl_name 3:list names) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // changes the partition to the new partition object. partition is identified from the part values + // in the new_part + // * See notes on DDL_TIME + void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part) + throws(1:InvalidOperationException o1, 2:MetaException o2) + + // gets the value of the configuration key in the metastore server. returns + // defaultValue if the key does not exist. if the configuration key does not + // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is + // thrown. + string get_config_value(1:string name, 2:string defaultValue) + throws(1:ConfigValSecurityException o1) + + // converts a partition name into a partition values array + list partition_name_to_vals(1: string part_name) + throws(1: MetaException o1) + // converts a partition name into a partition specification (a mapping from + // the partition cols to the values) + map partition_name_to_spec(1: string part_name) + throws(1: MetaException o1) + + //index + Index add_index(1:Index new_index, 2: Table index_table) + throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx) + throws (1:InvalidOperationException o1, 2:MetaException o2) + bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + list get_indexes(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) + throws(1:MetaException o2) + + //authorization privileges + + bool create_role(1:Role role) throws(1:MetaException o1) + bool drop_role(1:string role_name) throws(1:MetaException o1) + list get_role_names() throws(1:MetaException o1) + bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type, + 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1) + bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type) + throws(1:MetaException o1) + list list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1) + + PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name, + 3: list group_names) throws(1:MetaException o1) + list list_privileges(1:string principal_name, 2:PrincipalType principal_type, + 3: HiveObjectRef hiveObject) throws(1:MetaException o1) + + bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) + bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) + + //Authentication (delegation token) interfaces + + // get metastore server delegation token for use from the map/reduce tasks to authenticate + // to metastore server + string get_delegation_token(1:string token_owner, 2:string renewer_kerberos_principal_name) + throws (1:MetaException o1) + + // method to renew delegation token obtained from metastore server + i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1) + + // method to cancel delegation token obtained from metastore server + void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1) +} + +// * Note about the DDL_TIME: When creating or altering a table or a partition, +// if the DDL_TIME is not set, the current time will be used. + +// For storing info about archived partitions in parameters + +// Whether the partition is archived +const string IS_ARCHIVED = "is_archived", +// The original location of the partition, before archiving. After archiving, +// this directory will contain the archive. When the partition +// is dropped, this directory will be deleted +const string ORIGINAL_LOCATION = "original_location", + +// these should be needed only for backward compatibility with filestore +const string META_TABLE_COLUMNS = "columns", +const string META_TABLE_COLUMN_TYPES = "columns.types", +const string BUCKET_FIELD_NAME = "bucket_field_name", +const string BUCKET_COUNT = "bucket_count", +const string FIELD_TO_DIMENSION = "field_to_dimension", +const string META_TABLE_NAME = "name", +const string META_TABLE_DB = "db", +const string META_TABLE_LOCATION = "location", +const string META_TABLE_SERDE = "serde", +const string META_TABLE_PARTITION_COLUMNS = "partition_columns", +const string FILE_INPUT_FORMAT = "file.inputformat", +const string FILE_OUTPUT_FORMAT = "file.outputformat", +const string META_TABLE_STORAGE = "storage_handler", + + + diff --git odbc/src/cpp/if/hive_service.thrift odbc/src/cpp/if/hive_service.thrift new file mode 100644 index 0000000..6fec5c1 --- /dev/null +++ odbc/src/cpp/if/hive_service.thrift @@ -0,0 +1,85 @@ +#!/usr/local/bin/thrift -java + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Thrift Service that the hive service is built on +# + +# +# TODO: include/thrift is shared among different components. It +# should not be under metastore. + +include "fb303.thrift" +include "hive_metastore.thrift" +include "queryplan.thrift" + +namespace cpp Apache.Hadoop.Hive + +// Enumeration of JobTracker.State +enum JobTrackerState { + INITIALIZING = 1, + RUNNING = 2, +} + +// Map-Reduce cluster status information +struct HiveClusterStatus { + 1: i32 taskTrackers, + 2: i32 mapTasks, + 3: i32 reduceTasks, + 4: i32 maxMapTasks, + 5: i32 maxReduceTasks, + 6: JobTrackerState state, +} + +exception HiveServerException { + 1: string message + 2: i32 errorCode + 3: string SQLState +} + +# Interface for Thrift Hive Server +service ThriftHive extends hive_metastore.ThriftHiveMetastore { + # Execute a query. Takes a HiveQL string + void execute(1:string query) throws(1:HiveServerException ex) + + # Fetch one row. This row is the serialized form + # of the result of the query + string fetchOne() throws(1:HiveServerException ex) + + # Fetch a given number of rows or remaining number of + # rows whichever is smaller. + list fetchN(1:i32 numRows) throws(1:HiveServerException ex) + + # Fetch all rows of the query result + list fetchAll() throws(1:HiveServerException ex) + + # Get a schema object with fields represented with native Hive types + hive_metastore.Schema getSchema() throws(1:HiveServerException ex) + + # Get a schema object with fields represented with Thrift DDL types + hive_metastore.Schema getThriftSchema() throws(1:HiveServerException ex) + + # Get the status information about the Map-Reduce cluster + HiveClusterStatus getClusterStatus() throws(1:HiveServerException ex) + + # Get the queryplan annotated with counter information + queryplan.QueryPlan getQueryPlan() throws(1:HiveServerException ex) + + # clean up last Hive query (releasing locks etc.) + void clean() +} diff --git odbc/src/cpp/if/queryplan.thrift odbc/src/cpp/if/queryplan.thrift new file mode 100644 index 0000000..e437d75 --- /dev/null +++ odbc/src/cpp/if/queryplan.thrift @@ -0,0 +1,106 @@ +namespace cpp Apache.Hadoop.Hive + +enum AdjacencyType { CONJUNCTIVE, DISJUNCTIVE } +struct Adjacency { +1: string node, +2: list children, +3: AdjacencyType adjacencyType, +} + +enum NodeType { OPERATOR, STAGE } +struct Graph { +1: NodeType nodeType, +2: list roots, +3: list adjacencyList, +} + +#Represents a operator along with its counters +enum OperatorType { + JOIN, + MAPJOIN, + EXTRACT, + FILTER, + FORWARD, + GROUPBY, + LIMIT, + SCRIPT, + SELECT, + TABLESCAN, + FILESINK, + REDUCESINK, + UNION, + UDTF, + LATERALVIEWJOIN, + LATERALVIEWFORWARD, + HASHTABLESINK, + HASHTABLEDUMMY, +} + +struct Operator { +1: string operatorId, +2: OperatorType operatorType, +3: map operatorAttributes, +4: map operatorCounters, +5: bool done, +6: bool started, +} + +# Represents whether it is a map-reduce job or not. In future, different tasks can add their dependencies +# The operator graph shows the operator tree +enum TaskType { MAP, REDUCE, OTHER } +struct Task { +1: string taskId, +2: TaskType taskType +3: map taskAttributes, +4: map taskCounters, +5: optional Graph operatorGraph, +6: optional list operatorList, +7: bool done, +8: bool started, +} + +# Represents a Stage - unfortunately, it is represented as Task in ql/exec +enum StageType { + CONDITIONAL, + COPY, + DDL, + MAPRED, + EXPLAIN, + FETCH, + FUNC, + MAPREDLOCAL, + MOVE, + STATS, +} + +struct Stage { +1: string stageId, +2: StageType stageType, +3: map stageAttributes, +4: map stageCounters, +5: list taskList, +6: bool done, +7: bool started, +} + +# Represents a query - +# The graph maintains the stage dependency.In case of conditional tasks, it is represented as if only +# one of the dependencies need to be executed +struct Query { +1: string queryId, +2: string queryType, +3: map queryAttributes, +4: map queryCounters, +5: Graph stageGraph, +6: list stageList, +7: bool done, +8: bool started, +} + +# List of all queries - each query maintains if it is done or started +# This can be used to track all the queries in the session +struct QueryPlan { +1: list queries, +2: bool done, +3: bool started, +} diff --git odbc/src/cpp/thriftserverconstants.h odbc/src/cpp/thriftserverconstants.h index fe4bac4..46cc763 100644 --- odbc/src/cpp/thriftserverconstants.h +++ odbc/src/cpp/thriftserverconstants.h @@ -1,5 +1,4 @@ -/**************************************************************************//** - * +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,20 +7,19 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * - ****************************************************************************** - * + */ + +/** * @file thriftserverconstants.h * @brief Provides constants necessary for Hive Client interaction with Hive Server - * - *****************************************************************************/ + */ #ifndef __thrift_server_constants_h__ @@ -36,6 +34,14 @@ static const int MAX_BYTE_LENGTH = 334; /// Default null format string representation static const char* DEFAULT_NULL_FORMAT = "\\N"; +/// Default null format string representation +#define DEFAULT_FIELD_DELIM "\t" + +/* TODO: replace the real null representation with 'NULL' because of a bug in the Hive Server + * fetch function; remove this when Hive Server has been fixed to not replace the actual null + * rep with NULL. */ +#define DEFAULT_SERIALIZATION_NULL_FORMAT "NULL" + /// Schema map property key for field delimiters static const char* FIELD_DELIM = "field.delim"; /// Schema map property key for null format diff --git odbc/src/driver/Makefile.am odbc/src/driver/Makefile.am new file mode 100644 index 0000000..6b57165 --- /dev/null +++ odbc/src/driver/Makefile.am @@ -0,0 +1,43 @@ +# +# Copyright 2005 The Apache Software Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +lib_LTLIBRARIES = libhiveodbc.la + +AM_CPPFLAGS = -I. -I../cpp $(LTDINCL) $(ODBC_CPPFLAGS) -DWITHOUT_WINTERFACE=1 +AM_LDFLAGS = -no-undefined + +if HAVE_WIN32 + AM_LDFLAGS += -avoid-version -Wl,--kill-at -Wl,--strip-all +endif + +VERSION_INFO = `grep -v '^\#' $(srcdir)/libtool-version` + +libhiveodbc_la_LDFLAGS = $(AM_LDFLAGS) -version-info $(VERSION_INFO) + +libhiveodbc_la_LIBADD = ../cpp/libhiveclient.la $(ODBC_LIB) + +libhiveodbc_la_SOURCES = hiveodbc.c hiveodbc.h + +libhiveodbc_la_DEPENDENCIES = + +if HAVE_WIN32 +libhiveodbc_la_DEPENDENCIES += hiveodbc_win32_rc.$(OBJEXT) +libhiveodbc_la_LDFLAGS += -Wl,hiveodbc_win32_rc.$(OBJEXT) +endif + +hiveodbc_win32_rc.$(OBJEXT): hiveodbc_win32_rc.rc + $(WINDRES) -i $< -o $@ + diff --git odbc/src/driver/Makefile.in odbc/src/driver/Makefile.in new file mode 100644 index 0000000..25a36cc --- /dev/null +++ odbc/src/driver/Makefile.in @@ -0,0 +1,568 @@ +# Makefile.in generated by automake 1.11.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, +# Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Copyright 2005 The Apache Software Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +@HAVE_WIN32_TRUE@am__append_1 = -avoid-version -Wl,--kill-at -Wl,--strip-all +@HAVE_WIN32_TRUE@am__append_2 = hiveodbc_win32_rc.$(OBJEXT) +@HAVE_WIN32_TRUE@am__append_3 = -Wl,hiveodbc_win32_rc.$(OBJEXT) +subdir = src/driver +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/ax_boost_base.m4 \ + $(top_srcdir)/m4/find_apr.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__installdirs = "$(DESTDIR)$(libdir)" +LTLIBRARIES = $(lib_LTLIBRARIES) +am__DEPENDENCIES_1 = +am_libhiveodbc_la_OBJECTS = hiveodbc.lo +libhiveodbc_la_OBJECTS = $(am_libhiveodbc_la_OBJECTS) +libhiveodbc_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(libhiveodbc_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/config/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(libhiveodbc_la_SOURCES) +DIST_SOURCES = $(libhiveodbc_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +APR_CFLAGS = @APR_CFLAGS@ +APR_CPPFLAGS = @APR_CPPFLAGS@ +APR_INCLUDES = @APR_INCLUDES@ +APR_LDFLAGS = @APR_LDFLAGS@ +APR_LIB = @APR_LIB@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +ODBC_CPPFLAGS = @ODBC_CPPFLAGS@ +ODBC_LIB = @ODBC_LIB@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +THRIFT_COMPILER = @THRIFT_COMPILER@ +THRIFT_CPPFLAGS = @THRIFT_CPPFLAGS@ +THRIFT_INCLUDE = @THRIFT_INCLUDE@ +THRIFT_LDFLAGS = @THRIFT_LDFLAGS@ +THRIFT_LIBDIR = @THRIFT_LIBDIR@ +VERSION = @VERSION@ +VER_INFO = @VER_INFO@ +WINDRES = @WINDRES@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +lt_ECHO = @lt_ECHO@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +lib_LTLIBRARIES = libhiveodbc.la +AM_CPPFLAGS = -I. -I../cpp $(LTDINCL) $(ODBC_CPPFLAGS) -DWITHOUT_WINTERFACE=1 +AM_LDFLAGS = -no-undefined $(am__append_1) +VERSION_INFO = `grep -v '^\#' $(srcdir)/libtool-version` +libhiveodbc_la_LDFLAGS = $(AM_LDFLAGS) -version-info $(VERSION_INFO) \ + $(am__append_3) +libhiveodbc_la_LIBADD = ../cpp/libhiveclient.la $(ODBC_LIB) +libhiveodbc_la_SOURCES = hiveodbc.c hiveodbc.h +libhiveodbc_la_DEPENDENCIES = $(am__append_2) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/driver/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/driver/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-libLTLIBRARIES: $(lib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ + } + +uninstall-libLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ + done + +clean-libLTLIBRARIES: + -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) + @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libhiveodbc.la: $(libhiveodbc_la_OBJECTS) $(libhiveodbc_la_DEPENDENCIES) + $(libhiveodbc_la_LINK) -rpath $(libdir) $(libhiveodbc_la_OBJECTS) $(libhiveodbc_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hiveodbc.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(libdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-libLTLIBRARIES + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-libLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libLTLIBRARIES clean-libtool ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am \ + install-libLTLIBRARIES install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-libLTLIBRARIES + + +hiveodbc_win32_rc.$(OBJEXT): hiveodbc_win32_rc.rc + $(WINDRES) -i $< -o $@ + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git odbc/src/driver/hiveodbc.c odbc/src/driver/hiveodbc.c new file mode 100644 index 0000000..3923453 --- /dev/null +++ odbc/src/driver/hiveodbc.c @@ -0,0 +1,12295 @@ +/**************************************************************************** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ****************************************************************************/ + +/* + Original sqliteodbc license: + + This software is copyrighted by Christian Werner + and other authors. The following terms apply to all files associated + with the software unless explicitly disclaimed in individual files. + + The authors hereby grant permission to use, copy, modify, distribute, + and license this software and its documentation for any purpose, provided + that existing copyright notices are retained in all copies and that this + notice is included verbatim in any distributions. No written agreement, + license, or royalty fee is required for any of the authorized uses. + Modifications to this software may be copyrighted by their authors + and need not follow the licensing terms described here, provided that + the new terms are clearly indicated on the first page of each file where + they apply. + + IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY + FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY + DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE + IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE + NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR + MODIFICATIONS. +*/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "hiveodbc.h" + +#ifndef WITHOUT_WINTERFACE + #define WINTERFACE +#endif + +#ifdef WINTERFACE + #include +#endif + +#if defined(_WIN32) || defined(_WIN64) + #include "hiveodbc_win32_rc.h" + #define ODBC_INI "ODBC.INI" +#else + #define ODBC_INI ".odbc.ini" +#endif + +#ifndef COLATTRIBUTE_LAST_ARG_TYPE + #ifdef _WIN64 + #define COLATTRIBUTE_LAST_ARG_TYPE SQLLEN * + #else + #define COLATTRIBUTE_LAST_ARG_TYPE SQLPOINTER + #endif +#endif + +#ifndef SETSTMTOPTION_LAST_ARG_TYPE + #define SETSTMTOPTION_LAST_ARG_TYPE SQLROWCOUNT +#endif + +#undef min +#define min(a, b) ((a) < (b) ? (a) : (b)) + +#undef max +#define max(a, b) ((a) < (b) ? (b) : (a)) + +#ifndef PTRDIFF_T + #define PTRDIFF_T int +#endif + +#define array_size(x) (sizeof (x) / sizeof (x[0])) + +#define stringify1(s) #s +#define stringify(s) stringify1(s) + +#define verinfo(maj, min, lev) ((maj) << 16 | (min) << 8 | (lev)) + +/* Column types for static string column descriptions (SQLTables etc.) */ + +#if defined(WINTERFACE) && !defined(_WIN32) && !defined(_WIN64) + #define SCOL_VARCHAR SQL_WVARCHAR + #define SCOL_CHAR SQL_WCHAR +#else + #define SCOL_VARCHAR SQL_VARCHAR + #define SCOL_CHAR SQL_CHAR +#endif + +#define ENV_MAGIC 0x53544145 +#define DBC_MAGIC 0x53544144 +#define DEAD_MAGIC 0xdeadbeef + +static const char *xdigits = "0123456789ABCDEFabcdef"; + +#ifdef MEMORY_DEBUG +static void * +xmalloc_(int n, char *file, int line) +{ + int nn = n + 4 * sizeof (long); + long *p; + + p = malloc(nn); + if (!p) { + #if (MEMORY_DEBUG > 1) + fprintf(stderr, "malloc\t%d\tNULL\t%s:%d\n", n, file, line); + #endif + return NULL; + } + p[0] = 0xdead1234; + nn = nn / sizeof (long) - 1; + p[1] = n; + p[nn] = 0xdead5678; + #if (MEMORY_DEBUG > 1) + fprintf(stderr, "malloc\t%d\t%p\t%s:%d\n", n, &p[2], file, line); + #endif + return (void *) &p[2]; +} + + + +static void * +xrealloc_(void *old, int n, char *file, int line) +{ + int nn = n + 4 * sizeof (long), nnn; + long *p, *pp; + + if (n == 0 || !old) { + return xmalloc_(n, file, line); + } + p = &((long *) old)[-2]; + if (p[0] != 0xdead1234) { + fprintf(stderr, "*** low end corruption @ %p\n", old); + abort(); + } + nnn = p[1] + 4 * sizeof (long); + nnn = nnn / sizeof (long) - 1; + if (p[nnn] != 0xdead5678) { + fprintf(stderr, "*** high end corruption @ %p\n", old); + abort(); + } + pp = realloc(p, nn); + if (!pp) { + #if (MEMORY_DEBUG > 1) + fprintf(stderr, "realloc\t%p,%d\tNULL\t%s:%d\n", old, n, file, line); + #endif + return NULL; + } + #if (MEMORY_DEBUG > 1) + fprintf(stderr, "realloc\t%p,%d\t%p\t%s:%d\n", old, n, &pp[2], file, line); + #endif + p = pp; + p[1] = n; + nn = nn / sizeof (long) - 1; + p[nn] = 0xdead5678; + return (void *) &p[2]; +} + +static void +xfree_(void *x, char *file, int line) +{ + long *p; + int n; + + if (!x) { + return; + } + p = &((long *) x)[-2]; + if (p[0] != 0xdead1234) { + fprintf(stderr, "*** low end corruption @ %p\n", x); + abort(); + } + n = p[1] + 4 * sizeof (long); + n = n / sizeof (long) - 1; + if (p[n] != 0xdead5678) { + fprintf(stderr, "*** high end corruption @ %p\n", x); + abort(); + } + #if (MEMORY_DEBUG > 1) + fprintf(stderr, "free\t%p\t\t%s:%d\n", x, file, line); + #endif + free(p); +} + +static void +xfree__(void *x) +{ + xfree_(x, "unknown location", 0); +} + +static char * +xstrdup_(const char *str, char *file, int line) +{ + char *p; + + if (!str) { + #if (MEMORY_DEBUG > 1) + fprintf(stderr, "strdup\tNULL\tNULL\t%s:%d\n", file, line); + #endif + return NULL; + } + p = xmalloc_(strlen(str) + 1, file, line); + if (p) { + strcpy(p, str); + } + #if (MEMORY_DEBUG > 1) + fprintf(stderr, "strdup\t%p\t%p\t%s:%d\n", str, p, file, line); + #endif + return p; +} + + #define xmalloc(x) xmalloc_(x, __FILE__, __LINE__) + #define xrealloc(x,y) xrealloc_(x, y, __FILE__, __LINE__) + #define xfree(x) xfree_(x, __FILE__, __LINE__) + #define xstrdup(x) xstrdup_(x, __FILE__, __LINE__) + +#else + + #define xmalloc(x) malloc(x) + #define xrealloc(x,y) realloc(x, y) + #define xfree(x) free(x) + #define xstrdup(x) strdup_(x) + +#endif /* #ifdef MEMORY_DEBUG */ + + +#if defined(_WIN32) || defined(_WIN64) + + #define vsnprintf _vsnprintf + #define snprintf _snprintf + #define strcasecmp _stricmp + #define strncasecmp _strnicmp + +static HINSTANCE NEAR hModule; /* Saved module handle for resources */ + +#endif + + +#if defined(_WIN32) || defined(_WIN64) + +/* + * SQLHENV, SQLHDBC, and SQLHSTMT synchronization + * is done using a critical section in ENV structure. + */ + + #define HDBC_LOCK(hdbc) \ +{ \ + DBC *d; \ + \ + if ((hdbc) == SQL_NULL_HDBC) { \ + return SQL_INVALID_HANDLE; \ + } \ + d = (DBC *) (hdbc); \ + if (d->magic != DBC_MAGIC || !d->env) { \ + return SQL_INVALID_HANDLE; \ + } \ + if (d->env->magic != ENV_MAGIC) { \ + return SQL_INVALID_HANDLE; \ + } \ + EnterCriticalSection(&d->env->cs); \ + d->env->owner = GetCurrentThreadId(); \ + TRACE_MSG("HDBC_LOCK(dbc=%p, env=%p) %s\n", d, d->env, __FUNCTION__); \ +} + + #define HDBC_UNLOCK(hdbc) \ + if ((hdbc) != SQL_NULL_HDBC) { \ + DBC *d; \ + \ + d = (DBC *) (hdbc); \ + if (d->magic == DBC_MAGIC && d->env && \ + d->env->magic == ENV_MAGIC) { \ + d->env->owner = 0; \ + LeaveCriticalSection(&d->env->cs); \ + TRACE_MSG("HDBC_UNLOCK(dbc=%p, env=%p) %s\n", d, d->env, __FUNCTION__); \ + } \ + } + + #define HSTMT_LOCK(hstmt) \ +{ \ + DBC *d; \ + \ + if ((hstmt) == SQL_NULL_HSTMT) { \ + return SQL_INVALID_HANDLE; \ + } \ + d = (DBC *) ((STMT *) (hstmt))->dbc; \ + if (d->magic != DBC_MAGIC || !d->env) { \ + return SQL_INVALID_HANDLE; \ + } \ + if (d->env->magic != ENV_MAGIC) { \ + return SQL_INVALID_HANDLE; \ + } \ + EnterCriticalSection(&d->env->cs); \ + d->env->owner = GetCurrentThreadId(); \ + TRACE_MSG("HSTMT_LOCK(stmt=%p, dbc=%p, env=%p) %s\n", hstmt, d, d->env, __FUNCTION__); \ +} + + #define HSTMT_UNLOCK(hstmt) \ + if ((hstmt) != SQL_NULL_HSTMT) { \ + DBC *d; \ + \ + d = (DBC *) ((STMT *) (hstmt))->dbc; \ + if (d->magic == DBC_MAGIC && d->env && \ + d->env->magic == ENV_MAGIC) { \ + d->env->owner = 0; \ + LeaveCriticalSection(&d->env->cs); \ + TRACE_MSG("HSTMT_UNLOCK(stmt=%p, dbc=%p, env=%p) %s\n", hstmt, d, d->env, __FUNCTION__); \ + } \ + } + +#else /* #if defined(_WIN32) || defined(_WIN64) */ + +/* + * On UN*X assume that we are single-threaded or + * the driver manager provides serialization for us. + * + * In iODBC (3.52.x) serialization can be turned + * on using the DSN property "ThreadManager=yes". + * + * In unixODBC that property is named + * "Threading=0-3" and takes one of these values: + * + * 0 - no protection + * 1 - statement level protection + * 2 - connection level protection + * 3 - environment level protection + * + * unixODBC 2.2.11 uses environment level protection + * by default when it has been built with pthread + * support. + */ + + #define HDBC_LOCK(hdbc) + #define HDBC_UNLOCK(hdbc) + #define HSTMT_LOCK(hdbc) + #define HSTMT_UNLOCK(hdbc) + +#endif /* defined(_WIN32) || defined(_WIN64) */ + +#if defined(ENABLE_NVFS) && ENABLE_NVFS + extern void nvfs_init(void); + extern const char *nvfs_makevfs(const char *); +#endif + +/* + * tolower() replacement w/o locale + */ + +static const char upper_chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; +static const char lower_chars[] = "abcdefghijklmnopqrstuvwxyz"; + +static int +TOLOWER(int c) +{ + if (c) { + char *p = strchr(upper_chars, c); + + if (p) { + c = lower_chars[p - upper_chars]; + } + } + return c; +} + +/* + * isdigit() replacement w/o ctype.h + */ + +static const char digit_chars[] = "0123456789"; + +#define ISDIGIT(c) \ + ((c) && strchr(digit_chars, (c)) != NULL) + +/* + * isspace() replacement w/o ctype.h + */ + +static const char space_chars[] = " \f\n\r\t\v"; + +#define ISSPACE(c) \ + ((c) && strchr(space_chars, (c)) != NULL) + + +#ifdef HIVE_ODBC_TRACE +static const char *TRACE_FILE_PATH = + #if defined(_WIN32) || defined(_WIN64) + "C:\\odbchivetrace.txt"; + #else + "/tmp/odbchivetrace.txt"; + #endif + +#define TRACE_FUNC_START() \ +do { \ + FILE *trace_file = fopen(TRACE_FILE_PATH, "a+"); \ + if (NULL != trace_file) { \ + fprintf(trace_file, "Entering %s\n", __FUNCTION__); \ + fflush(trace_file); \ + fclose(trace_file); \ + } \ +} while (0) + +#define TRACE_MSG(_fmt, ...) \ +do { \ +FILE *trace_file = fopen(TRACE_FILE_PATH, "a+"); \ +if (NULL != trace_file) { \ + fprintf(trace_file, "[%s, %d] " _fmt, __FUNCTION__, __LINE__, __VA_ARGS__); \ + fflush(trace_file); \ +fclose(trace_file); \ + } \ +} while (0) + +#else + #define TRACE_FUNC_START() do {} while (0) + #define TRACE_MSG(_fmt, ...) do {} while (0) +#endif + +/* the result cache size is multiple of user specified resultset size */ +#define SET_FETCH_ROW_SIZE(s,f) \ + { \ + if (MAX_BUFFERED_RESULT_ROWS > (s)) \ + (f) = (MAX_BUFFERED_RESULT_ROWS/(s)) * (s); \ + else \ + (f) = (s); \ + } + +/* + * Forward declarations of static functions. + */ + +static void dbtraceapi(DBC *d, char *fn, const char *sql); +static void freedyncols(STMT *s); +static void freeresult(STMT *s, int clrcols); +static void freerows(char **rowp); +static void unbindcols(STMT *s); + +static SQLRETURN drvexecute(SQLHSTMT stmt, int initial); +static SQLRETURN freestmt(HSTMT stmt); +static SQLRETURN mkbindcols(STMT *s, int ncols); +static SQLRETURN setupdyncols(STMT *s); +static SQLRETURN setupparbuf(STMT *s, BINDPARM *p); + +#if (defined(_WIN32) || defined(_WIN64)) && defined(WINTERFACE) +/* MS Access hack part 1 (reserved error -7748) */ +static COL *statSpec2P, *statSpec3P; +#endif + +#if (MEMORY_DEBUG < 1) +/** + * Duplicate string using xmalloc(). + * @param str string to be duplicated + * @result pointer to new string or NULL + */ + +static char * +strdup_(const char *str) +{ + char *p = NULL; + + if (str) { + p = xmalloc(strlen(str) + 1); + if (p) { + strcpy(p, str); + } + } + return p; +} +#endif + +#ifdef WINTERFACE + +/** + * Return length of UNICODE string. + * @param str UNICODE string + * @result length of string + */ + +static int +uc_strlen(SQLWCHAR *str) +{ + int len = 0; + + if (str) { + while (*str) { + ++len; + ++str; + } + } + return len; +} + +/** + * Copy UNICODE string like strncpy(). + * @param dest destination area + * @param src source area + * @param len length of source area + * @return pointer to destination area + */ + +static SQLWCHAR * +uc_strncpy(SQLWCHAR *dest, SQLWCHAR *src, int len) +{ + int i = 0; + + while (i < len) { + if (!src[i]) { + break; + } + dest[i] = src[i]; + ++i; + } + if (i < len) { + dest[i] = 0; + } + return dest; +} + +/** + * Make UNICODE string from UTF8 string into buffer. + * @param str UTF8 string to be converted + * @param len length of str or -1 + * @param uc destination area to receive UNICODE string + * @param ucLen byte length of destination area + */ + +static void +uc_from_utf_buf(unsigned char *str, int len, SQLWCHAR *uc, int ucLen) +{ + ucLen = ucLen / sizeof (SQLWCHAR); + if (!uc || ucLen < 0) { + return; + } + if (len < 0) { + len = ucLen * 5; + } + uc[0] = 0; + if (str) { + int i = 0; + + while (i < len && *str && i < ucLen) { + unsigned char c = str[0]; + + if (c < 0x80) { + uc[i++] = c; + ++str; + } else if (c <= 0xc1 || c >= 0xf5) { + /* illegal, ignored */ + ++str; + } else if (c < 0xe0) { + if ((str[1] & 0xc0) == 0x80) { + unsigned long t = ((c & 0x1f) << 6) | (str[1] & 0x3f); + + uc[i++] = t; + str += 2; + } else { + uc[i++] = c; + ++str; + } + } else if (c < 0xf0) { + if ((str[1] & 0xc0) == 0x80 && (str[2] & 0xc0) == 0x80) { + unsigned long t = ((c & 0x0f) << 12) | + ((str[1] & 0x3f) << 6) | (str[2] & 0x3f); + + uc[i++] = t; + str += 3; + } else { + uc[i++] = c; + ++str; + } + } else if (c < 0xf8) { + if ((str[1] & 0xc0) == 0x80 + && (str[2] & 0xc0) == 0x80 + && (str[3] & 0xc0) == 0x80) { + unsigned long t = ((c & 0x03) << 18) | + ((str[1] & 0x3f) << 12) | ((str[2] & 0x3f) << 6) | + (str[4] & 0x3f); + + if (sizeof (SQLWCHAR) == 2 * sizeof (char) && + t >= 0x10000) { + t -= 0x10000; + uc[i++] = 0xd800 | (t & 0x3ff); + if (i >= ucLen) { + break; + } + t = 0xdc00 | ((t >> 10) & 0x3ff); + } + uc[i++] = t; + str += 4; + } else { + uc[i++] = c; + ++str; + } + } else if (c < 0xfc) { + if ((str[1] & 0xc0) == 0x80 && (str[2] & 0xc0) == 0x80 && + (str[3] & 0xc0) == 0x80 && (str[4] & 0xc0) == 0x80) { + unsigned long t = ((c & 0x01) << 24) | + ((str[1] & 0x3f) << 18) | ((str[2] & 0x3f) << 12) | + ((str[4] & 0x3f) << 6) | (str[5] & 0x3f); + + if (sizeof (SQLWCHAR) == 2 * sizeof (char) && + t >= 0x10000) { + t -= 0x10000; + uc[i++] = 0xd800 | (t & 0x3ff); + if (i >= ucLen) { + break; + } + t = 0xdc00 | ((t >> 10) & 0x3ff); + } + uc[i++] = t; + str += 5; + } else { + uc[i++] = c; + ++str; + } + } else { + /* ignore */ + ++str; + } + } + if (i < ucLen) { + uc[i] = 0; + } + } +} + +/** + * Make UNICODE string from UTF8 string. + * @param str UTF8 string to be converted + * @param len length of UTF8 string + * @return alloc'ed UNICODE string to be free'd by uc_free() + */ + +static SQLWCHAR * +uc_from_utf(unsigned char *str, int len) +{ + SQLWCHAR *uc = NULL; + int ucLen; + + if (str) { + if (len == SQL_NTS) { + len = strlen((char *) str); + } + ucLen = sizeof (SQLWCHAR) * (len + 1); + uc = xmalloc(ucLen); + if (uc) { + uc_from_utf_buf(str, len, uc, ucLen); + } + } + return uc; +} + +/** + * Make UTF8 string from UNICODE string. + * @param str UNICODE string to be converted + * @param len length of UNICODE string in bytes + * @return alloc'ed UTF8 string to be free'd by uc_free() + */ + +static char * +uc_to_utf(SQLWCHAR *str, int len) +{ + int i; + char *cp, *ret = NULL; + + if (!str) { + return ret; + } + if (len == SQL_NTS) { + len = uc_strlen(str); + } else { + len = len / sizeof (SQLWCHAR); + } + cp = xmalloc(len * 6 + 1); + if (!cp) { + return ret; + } + ret = cp; + for (i = 0; i < len; i++) { + unsigned long c = str[i]; + + if (sizeof (SQLWCHAR) == 2 * sizeof (char)) { + c &= 0xffff; + } + if (c < 0x80) { + *cp++ = c; + } else if (c < 0x800) { + *cp++ = 0xc0 | ((c >> 6) & 0x1f); + *cp++ = 0x80 | (c & 0x3f); + } else if (c < 0x10000) { + if (sizeof (SQLWCHAR) == 2 * sizeof (char) && + c >= 0xd800 && c <= 0xdbff && i + 1 < len) { + unsigned long c2 = str[i + 1] & 0xffff; + + if (c2 >= 0xdc00 && c <= 0xdfff) { + c = ((c & 0x3ff) | ((c2 & 0x3ff) << 10)) + 0x10000; + *cp++ = 0xf0 | ((c >> 18) & 0x07); + *cp++ = 0x80 | ((c >> 12) & 0x3f); + *cp++ = 0x80 | ((c >> 6) & 0x3f); + *cp++ = 0x80 | (c & 0x3f); + ++i; + continue; + } + } + *cp++ = 0xe0 | ((c >> 12) & 0x0f); + *cp++ = 0x80 | ((c >> 6) & 0x3f); + *cp++ = 0x80 | (c & 0x3f); + } else if (c < 0x200000) { + *cp++ = 0xf0 | ((c >> 18) & 0x07); + *cp++ = 0x80 | ((c >> 12) & 0x3f); + *cp++ = 0x80 | ((c >> 6) & 0x3f); + *cp++ = 0x80 | (c & 0x3f); + } else if (c < 0x4000000) { + *cp++ = 0xf8 | ((c >> 24) & 0x03); + *cp++ = 0x80 | ((c >> 18) & 0x3f); + *cp++ = 0x80 | ((c >> 12) & 0x3f); + *cp++ = 0x80 | ((c >> 6) & 0x3f); + *cp++ = 0x80 | (c & 0x3f); + } else if (c < 0x80000000) { + *cp++ = 0xfc | ((c >> 31) & 0x01); + *cp++ = 0x80 | ((c >> 24) & 0x3f); + *cp++ = 0x80 | ((c >> 18) & 0x3f); + *cp++ = 0x80 | ((c >> 12) & 0x3f); + *cp++ = 0x80 | ((c >> 6) & 0x3f); + *cp++ = 0x80 | (c & 0x3f); + } + } + *cp = '\0'; + return ret; +} + +/** + * Make UTF8 string from UNICODE string. + * @param str UNICODE string to be converted + * @param len length of UNICODE string in characters + * @return alloc'ed UTF8 string to be free'd by uc_free() + */ + +static char * +uc_to_utf_c(SQLWCHAR *str, int len) +{ + if (len != SQL_NTS) { + len = len * sizeof (SQLWCHAR); + } + return uc_to_utf(str, len); +} + +#endif /* WINTERFACE */ + +#if defined(WINTERFACE) || defined(_WIN32) || defined(_WIN64) + +/** + * Free converted UTF8 or UNICODE string. + * @param str string to be free'd + */ + +static void +uc_free(void *str) +{ + if (str) { + xfree(str); + } +} + +#endif + +#if defined(_WIN32) || defined(_WIN64) + +/** + * Convert multibyte, current code page string to UTF8 string, + * @param str multibyte string to be converted + * @param len length of multibyte string + * @return alloc'ed UTF8 string to be free'd by uc_free() + */ + +static char * +wmb_to_utf(char *str, int len) +{ + WCHAR *wstr; + OSVERSIONINFO ovi; + int nchar, is2k, cp = CP_OEMCP; + + ovi.dwOSVersionInfoSize = sizeof (ovi); + GetVersionEx(&ovi); + is2k = ovi.dwPlatformId == VER_PLATFORM_WIN32_NT && ovi.dwMajorVersion > 4; + if (AreFileApisANSI()) { + cp = is2k ? CP_THREAD_ACP : CP_ACP; + } + nchar = MultiByteToWideChar(cp, 0, str, len, NULL, 0); + wstr = xmalloc((nchar + 1) * sizeof (WCHAR)); + if (!wstr) { + return NULL; + } + wstr[0] = 0; + nchar = MultiByteToWideChar(cp, 0, str, len, wstr, nchar); + wstr[nchar] = 0; + str = xmalloc((nchar + 1) * 7); + if (!str) { + xfree(wstr); + return NULL; + } + str[0] = '\0'; + nchar = WideCharToMultiByte(CP_UTF8, 0, wstr, -1, str, nchar * 7, 0, 0); + str[nchar] = '\0'; + xfree(wstr); + return str; +} + +/** + * Convert UTF8 string to multibyte, current code page string, + * @param str UTF8 string to be converted + * @param len length of UTF8 string + * @return alloc'ed multibyte string to be free'd by uc_free() + */ + +static char * +utf_to_wmb(char *str, int len) +{ + WCHAR *wstr; + OSVERSIONINFO ovi; + int nchar, is2k, cp = CP_OEMCP; + + ovi.dwOSVersionInfoSize = sizeof (ovi); + GetVersionEx(&ovi); + is2k = ovi.dwPlatformId == VER_PLATFORM_WIN32_NT && ovi.dwMajorVersion > 4; + if (AreFileApisANSI()) { + cp = is2k ? CP_THREAD_ACP : CP_ACP; + } + nchar = MultiByteToWideChar(CP_UTF8, 0, str, len, NULL, 0); + wstr = xmalloc((nchar + 1) * sizeof (WCHAR)); + if (!wstr) { + return NULL; + } + wstr[0] = 0; + nchar = MultiByteToWideChar(CP_UTF8, 0, str, len, wstr, nchar); + wstr[nchar] = 0; + str = xmalloc((nchar + 1) * 7); + if (!str) { + xfree(wstr); + return NULL; + } + str[0] = '\0'; + nchar = WideCharToMultiByte(cp, 0, wstr, -1, str, nchar * 7, 0, 0); + str[nchar] = '\0'; + xfree(wstr); + return str; +} + + #ifdef WINTERFACE + +/** + * Convert multibyte, current code page string to UNICODE string, + * @param str multibyte string to be converted + * @param len length of multibyte string + * @return alloc'ed UNICODE string to be free'd by uc_free() + */ + +static WCHAR * +wmb_to_uc(char *str, int len) +{ + WCHAR *wstr; + OSVERSIONINFO ovi; + int nchar, is2k, cp = CP_OEMCP; + + ovi.dwOSVersionInfoSize = sizeof (ovi); + GetVersionEx(&ovi); + is2k = ovi.dwPlatformId == VER_PLATFORM_WIN32_NT && ovi.dwMajorVersion > 4; + if (AreFileApisANSI()) { + cp = is2k ? CP_THREAD_ACP : CP_ACP; + } + nchar = MultiByteToWideChar(cp, 0, str, len, NULL, 0); + wstr = xmalloc((nchar + 1) * sizeof (WCHAR)); + if (!wstr) { + return NULL; + } + wstr[0] = 0; + nchar = MultiByteToWideChar(cp, 0, str, len, wstr, nchar); + wstr[nchar] = 0; + return wstr; +} + +/** + * Convert UNICODE string to multibyte, current code page string, + * @param str UNICODE string to be converted + * @param len length of UNICODE string + * @return alloc'ed multibyte string to be free'd by uc_free() + */ + +static char * +uc_to_wmb(WCHAR *wstr, int len) +{ + char *str; + OSVERSIONINFO ovi; + int nchar, is2k, cp = CP_OEMCP; + + ovi.dwOSVersionInfoSize = sizeof (ovi); + GetVersionEx(&ovi); + is2k = ovi.dwPlatformId == VER_PLATFORM_WIN32_NT && ovi.dwMajorVersion > 4; + if (AreFileApisANSI()) { + cp = is2k ? CP_THREAD_ACP : CP_ACP; + } + nchar = WideCharToMultiByte(cp, 0, wstr, len, NULL, 0, 0, 0); + str = xmalloc((nchar + 1) * 2); + if (!str) { + return NULL; + } + str[0] = '\0'; + nchar = WideCharToMultiByte(cp, 0, wstr, len, str, nchar * 2, 0, 0); + str[nchar] = '\0'; + return str; +} + + #endif /* WINTERFACE */ + +#endif /* _WIN32 || _WIN64 */ + + +#ifdef USE_DLOPEN_FOR_GPPS + + #include + + #define SQLGetPrivateProfileString(A,B,C,D,E,F) drvgpps(d,A,B,C,D,E,F) + +/* + * EXPERIMENTAL: SQLGetPrivateProfileString infrastructure using + * dlopen(), in theory this makes the driver independent from the + * driver manager, i.e. the same driver binary can run with iODBC + * and unixODBC. + */ + +static void +drvgetgpps(DBC *d) +{ + void *lib; + int (*gpps)(); + + lib = dlopen("libodbcinst.so.1", RTLD_LAZY); + if (!lib) { + lib = dlopen("libodbcinst.so", RTLD_LAZY); + } + if (!lib) { + lib = dlopen("libiodbcinst.so.2", RTLD_LAZY); + } + if (!lib) { + lib = dlopen("libiodbcinst.so", RTLD_LAZY); + } + if (lib) { + gpps = (int (*)()) dlsym(lib, "SQLGetPrivateProfileString"); + if (!gpps) { + dlclose(lib); + return; + } + d->instlib = lib; + d->gpps = gpps; + } +} + +static void +drvrelgpps(DBC *d) +{ + if (d->instlib) { + dlclose(d->instlib); + d->instlib = 0; + } +} + +static int +drvgpps(DBC *d, char *sect, char *ent, char *def, char *buf, + int bufsiz, char *fname) +{ + if (d->gpps) { + return d->gpps(sect, ent, def, buf, bufsiz, fname); + } + strncpy(buf, def, bufsiz); + buf[bufsiz - 1] = '\0'; + return 1; +} +#else + #include + #define drvgetgpps(d) + #define drvrelgpps(d) +#endif /* USE_DLOPEN_FOR_GPPS */ + + +/** + * Set error message and SQL state on DBC + * @param d database connection pointer + * @param naterr native error code + * @param msg error message + * @param st SQL state + */ + +#if defined(__GNUC__) && (__GNUC__ >= 2) +static void setstatd(DBC *, int, char *, char *, ...) +__attribute__((format (printf, 3, 5))); +#endif + +static void +setstatd(DBC *d, int naterr, char *msg, char *st, ...) +{ + va_list ap; + + if (!d) { + return; + } + d->naterr = naterr; + d->logmsg[0] = '\0'; + if (msg) { + int count; + + va_start(ap, st); + count = vsnprintf((char *) d->logmsg, sizeof (d->logmsg), msg, ap); + va_end(ap); + if (count < 0) { + d->logmsg[sizeof (d->logmsg) - 1] = '\0'; + } + } + if (!st) { + st = "?????"; + } + strncpy(d->sqlstate, st, 5); + d->sqlstate[5] = '\0'; +} + +/** + * Set error message and SQL state on statement + * @param s statement pointer + * @param naterr native error code + * @param msg error message + * @param st SQL state + */ + +#if defined(__GNUC__) && (__GNUC__ >= 2) +static void setstat(STMT *, int, char *, char *, ...) +__attribute__((format (printf, 3, 5))); +#endif + +static void +setstat(STMT *s, int naterr, char *msg, char *st, ...) +{ + va_list ap; + + if (!s) { + return; + } + s->naterr = naterr; + s->logmsg[0] = '\0'; + if (msg) { + int count; + + va_start(ap, st); + count = vsnprintf((char *) s->logmsg, sizeof (s->logmsg), msg, ap); + va_end(ap); + if (count < 0) { + s->logmsg[sizeof (s->logmsg) - 1] = '\0'; + } + } + if (!st) { + st = "?????"; + } + strncpy(s->sqlstate, st, 5); + s->sqlstate[5] = '\0'; +} + +/** + * Report IM001 (not implemented) SQL error code for HDBC. + * @param dbc database connection handle + * @result ODBC error code + */ + +static SQLRETURN +drvunimpldbc(HDBC dbc) +{ + DBC *d; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + setstatd(d, -1, "not supported", "IM001"); + return SQL_ERROR; +} + +/** + * Report IM001 (not implemented) SQL error code for HSTMT. + * @param stmt statement handle + * @result ODBC error code + */ + +static SQLRETURN +drvunimplstmt(HSTMT stmt) +{ + STMT *s; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + setstat(s, -1, "not supported", "IM001"); + return SQL_ERROR; +} + +/** + * Free memory given pointer to memory pointer. + * @param x pointer to pointer to memory to be free'd + */ + +static void +freep(void *x) +{ + if (x && ((char **) x)[0]) { + xfree(((char **) x)[0]); + ((char **) x)[0] = NULL; + } +} + +/** + * Report S1000 (out of memory) SQL error given STMT. + * @param s statement pointer + * @result ODBC error code + */ + +static SQLRETURN +nomem(STMT *s) +{ + setstat(s, -1, "out of memory", (*s->ov3) ? "HY000" : "S1000"); + return SQL_ERROR; +} + +/** + * Report S1000 (not connected) SQL error given STMT. + * @param s statement pointer + * @result ODBC error code + */ + +static SQLRETURN +noconn(STMT *s) +{ + setstat(s, -1, "not connected", (*s->ov3) ? "HY000" : "S1000"); + return SQL_ERROR; +} + +/** + * Internal locale neutral strtod function. + * @param data pointer to string + * @param endp pointer for ending character + * @result double value + */ + +static double +ln_strtod(const char *data, char **endp) +{ +#if defined(HAVE_LOCALECONV) || defined(_WIN32) || defined(_WIN64) + struct lconv *lc; + char buf[128], *p, *end; + double value; + + lc = localeconv(); + if (lc && lc->decimal_point && lc->decimal_point[0] && + lc->decimal_point[0] != '.') { + strncpy(buf, data, sizeof (buf) - 1); + buf[sizeof (buf) - 1] = '\0'; + p = strchr(buf, '.'); + if (p) { + *p = lc->decimal_point[0]; + } + p = buf; + } else { + p = (char *) data; + } + value = strtod(p, &end); + end = (char *) data + (end - p); + if (endp) { + *endp = end; + } + return value; +#else + return strtod(data, endp); +#endif +} + +/** + * Strip quotes from quoted string in-place. + * @param str string + */ + +static char * +unquote(char *str) +{ + if (str) { + int len = strlen(str); + + if (len > 1) { + if ((str[0] == '\'' && str[len - 1] == '\'') || + (str[0] == '"' && str[len - 1] == '"') || + (str[0] == '[' && str[len - 1] == ']')) { + str[len - 1] = '\0'; + strcpy(str, str + 1); + } + } + } + return str; +} + +/** + * Unescape search pattern for e.g. table name in + * catalog functions. Replacements in string are done in-place. + * @param str string + * @result number of pattern characters in string or 0 + */ + +static int +unescpat(char *str) +{ + char *p, *q; + int count = 0; + + p = str; + while ((q = strchr(p, '_')) != NULL) { + if (q == str || q[-1] != '\\') { + count++; + } + p = q + 1; + } + p = str; + while ((q = strchr(p, '%')) != NULL) { + if (q == str || q[-1] != '\\') { + count++; + } + p = q + 1; + } + p = str; + while ((q = strchr(p, '\\')) != NULL) { + if (q[1] == '\\' || q[1] == '_' || q[1] == '%') { + strcpy(q, q + 1); + } + p = q + 1; + } + return count; +} + +/** + * SQL LIKE string match with optional backslash escape handling. + * @param str string + * @param pat pattern + * @param esc when true, treat literally "\\" as "\", "\?" as "?", "\_" as "_" + * @result true when pattern matched + */ + +static int +namematch(char *str, char *pat, int esc) +{ + int cp, ch; + + while (1) { + cp = TOLOWER(*pat); + if (cp == '\0') { + if (*str != '\0') { + goto nomatch; + } + break; + } + if (*str == '\0' && cp != '%') { + goto nomatch; + } + if (cp == '%') { + while (*pat == '%') { + ++pat; + } + cp = TOLOWER(*pat); + if (cp == '\0') { + break; + } + while (1) { + if (cp != '_' && cp != '\\') { + while (*str) { + ch = TOLOWER(*str); + if (ch == cp) { + break; + } + ++str; + } + } + if (namematch(str, pat, esc)) { + goto match; + } + if (*str == '\0') { + goto nomatch; + } + ch = TOLOWER(*str); + ++str; + } + } + if (cp == '_') { + pat++; + str++; + continue; + } + if (esc && cp == '\\' && + (pat[1] == '\\' || pat[1] == '%' || pat[1] == '_')) { + ++pat; + cp = TOLOWER(*pat); + } + ch = TOLOWER(*str++); + ++pat; + if (ch != cp) { + goto nomatch; + } + } +match: + return 1; +nomatch: + return 0; +} + + + +/** + * Free counted array of char pointers. + * @param rowp pointer to char pointer array + * + * The -1-th element of the array holds the array size. + * All non-NULL pointers of the array and then the array + * itself are free'd. + */ + +static void +freerows(char **rowp) +{ + PTRDIFF_T size, i; + + if (!rowp) { + return; + } + --rowp; + size = (PTRDIFF_T) rowp[0]; + for (i = 1; i <= size; i++) { + freep(&rowp[i]); + } + freep(&rowp); +} + +/** + * Map SQL field type from string to ODBC integer type code. + * @param typename field type string + * @param nosign pointer to indicator for unsigned field or NULL + * @param ov3 boolean, true for SQL_OV_ODBC3 + * @param nowchar boolean, for WINTERFACE don't use WCHAR + * @result SQL data type + */ + +static int +mapsqltype(const char *typename, int *nosign, int ov3, int nowchar) +{ + char *p, *q; + int testsign = 0, result; + +#ifdef WINTERFACE + result = nowchar ? SQL_VARCHAR : SQL_WVARCHAR; +#else + result = SQL_VARCHAR; +#endif + if (!typename) { + return result; + } + q = p = xmalloc(strlen(typename) + 1); + if (!p) { + return result; + } + strcpy(p, typename); + while (*q) { + *q = TOLOWER(*q); + ++q; + } + if (strncmp(p, "inter", 5) == 0) { + } else if (strncmp(p, "int", 3) == 0 || + strncmp(p, "mediumint", 9) == 0) { + testsign = 1; + result = SQL_INTEGER; + } else if (strncmp(p, "numeric", 7) == 0) { + result = SQL_DOUBLE; + } else if (strncmp(p, "tinyint", 7) == 0) { + testsign = 1; + result = SQL_TINYINT; + } else if (strncmp(p, "smallint", 8) == 0) { + testsign = 1; + result = SQL_SMALLINT; + } else if (strncmp(p, "float", 5) == 0) { + result = SQL_DOUBLE; + } else if (strncmp(p, "double", 6) == 0 || + strncmp(p, "real", 4) == 0) { + result = SQL_DOUBLE; + } else if (strncmp(p, "timestamp", 9) == 0) { +#ifdef SQL_TYPE_TIMESTAMP + result = ov3 ? SQL_TYPE_TIMESTAMP : SQL_TIMESTAMP; +#else + result = SQL_TIMESTAMP; +#endif + } else if (strncmp(p, "datetime", 8) == 0) { +#ifdef SQL_TYPE_TIMESTAMP + result = ov3 ? SQL_TYPE_TIMESTAMP : SQL_TIMESTAMP; +#else + result = SQL_TIMESTAMP; +#endif + } else if (strncmp(p, "time", 4) == 0) { +#ifdef SQL_TYPE_TIME + result = ov3 ? SQL_TYPE_TIME : SQL_TIME; +#else + result = SQL_TIME; +#endif + } else if (strncmp(p, "date", 4) == 0) { +#ifdef SQL_TYPE_DATE + result = ov3 ? SQL_TYPE_DATE : SQL_DATE; +#else + result = SQL_DATE; +#endif +#ifdef SQL_LONGVARCHAR + } else if (strncmp(p, "text", 4) == 0 || + strncmp(p, "memo", 4) == 0) { + #ifdef WINTERFACE + result = nowchar ? SQL_LONGVARCHAR : SQL_WLONGVARCHAR; + #else + result = SQL_LONGVARCHAR; + #endif + #ifdef WINTERFACE + } else if (strncmp(p, "wtext", 5) == 0 || + strncmp(p, "wvarchar", 8) == 0 || + strncmp(p, "longwvarchar", 12) == 0) { + result = SQL_WLONGVARCHAR; + #endif +#endif +#ifdef SQL_BIT + } else if (strncmp(p, "bool", 4) == 0 || + strncmp(p, "bit", 3) == 0) { + result = SQL_BIT; +#endif +#ifdef SQL_BIGINT + } else if (strncmp(p, "bigint", 6) == 0) { + result = SQL_BIGINT; +#endif + } else if (strncmp(p, "blob", 4) == 0) { + result = SQL_BINARY; + } else if (strncmp(p, "varbinary", 9) == 0) { + result = SQL_VARBINARY; + } else if (strncmp(p, "longvarbinary", 13) == 0) { + result = SQL_LONGVARBINARY; + } + if (nosign) { + if (testsign) { + *nosign = strstr(p, "unsigned") != NULL; + } else { + *nosign = 1; + } + } + xfree(p); + return result; +} + +/** + * Get maximum display size and number of digits after decimal point + * from field type specification. + * @param typename field type specification + * @param sqltype target SQL data type + * @param mp pointer to maximum display size or NULL + * @param dp pointer to number of digits after decimal point or NULL + */ + +static void +getmd(const char *typename, int sqltype, int *mp, int *dp) +{ + int m = 0, d = 0; + + switch (sqltype) { + case SQL_INTEGER: + m = 10; + d = 9; + break; + case SQL_TINYINT: + m = 4; + d = 3; + break; + case SQL_SMALLINT: + m = 6; + d = 5; + break; + case SQL_FLOAT: + m = 25; + d = 24; + break; + case SQL_DOUBLE: + m = 54; + d = 53; + break; + case SQL_VARCHAR: + m = 255; + d = 0; + break; +#ifdef WINTERFACE + #ifdef SQL_WVARCHAR + case SQL_WVARCHAR: + m = 255; + d = 0; + break; + #endif +#endif +#ifdef SQL_TYPE_DATE + case SQL_TYPE_DATE: +#endif + case SQL_DATE: + m = 10; + d = 0; + break; +#ifdef SQL_TYPE_TIME + case SQL_TYPE_TIME: +#endif + case SQL_TIME: + m = 8; + d = 0; + break; +#ifdef SQL_TYPE_TIMESTAMP + case SQL_TYPE_TIMESTAMP: +#endif + case SQL_TIMESTAMP: + m = 32; + d = 0; + break; +#ifdef SQL_LONGVARCHAR + case SQL_LONGVARCHAR : + m = 65536; + d = 0; + break; +#endif +#ifdef WINTERFACE + #ifdef SQL_WLONGVARCHAR + case SQL_WLONGVARCHAR: + m = 65536; + d = 0; + break; + #endif +#endif + case SQL_VARBINARY: + m = 255; + d = 0; + break; + case SQL_LONGVARBINARY: + m = 65536; + d = 0; + break; +#ifdef SQL_BIGINT + case SQL_BIGINT: + m = 20; + d = 19; + break; +#endif +#ifdef SQL_BIT + case SQL_BIT: + m = 1; + d = 1; + break; +#endif + } + if (m && typename) { + int mm, dd; + + if (sscanf(typename, "%*[^(](%d)", &mm) == 1) { + m = d = mm; + } else if (sscanf(typename, "%*[^(](%d,%d)", &mm, &dd) == 2) { + m = mm; + d = dd; + } + } + if (mp) { + *mp = m; + } + if (dp) { + *dp = d; + } +} + +/** + * Map SQL_C_DEFAULT to proper C type. + * @param type input C type + * @param stype input SQL type + * @param nosign 0=signed, 0>unsigned, 0 0) ? SQL_C_ULONG : SQL_C_LONG; + break; + case SQL_TINYINT: + type = (nosign > 0) ? SQL_C_UTINYINT : SQL_C_TINYINT; + break; + case SQL_SMALLINT: + type = (nosign > 0) ? SQL_C_USHORT : SQL_C_SHORT; + break; + case SQL_FLOAT: + type = SQL_C_FLOAT; + break; + case SQL_DOUBLE: + type = SQL_C_DOUBLE; + break; + case SQL_TIMESTAMP: + type = SQL_C_TIMESTAMP; + break; + case SQL_TIME: + type = SQL_C_TIME; + break; + case SQL_DATE: + type = SQL_C_DATE; + break; +#ifdef SQL_C_TYPE_TIMESTAMP + case SQL_TYPE_TIMESTAMP: + type = SQL_C_TYPE_TIMESTAMP; + break; +#endif +#ifdef SQL_C_TYPE_TIME + case SQL_TYPE_TIME: + type = SQL_C_TYPE_TIME; + break; +#endif +#ifdef SQL_C_TYPE_DATE + case SQL_TYPE_DATE: + type = SQL_C_TYPE_DATE; + break; +#endif +#ifdef WINTERFACE + case SQL_WVARCHAR: + case SQL_WCHAR: + #ifdef SQL_WLONGVARCHAR + case SQL_WLONGVARCHAR: + #endif + type = nowchar ? SQL_C_CHAR : SQL_C_WCHAR; + break; +#endif + case SQL_BINARY: + case SQL_VARBINARY: + case SQL_LONGVARBINARY: + type = SQL_C_BINARY; + break; +#ifdef SQL_BIT + case SQL_BIT: + type = SQL_C_BIT; + break; +#endif +#ifdef SQL_BIGINT + case SQL_BIGINT: + type = (nosign > 0) ? SQL_C_UBIGINT : SQL_C_SBIGINT; + break; +#endif + default: +#ifdef WINTERFACE + type = nowchar ? SQL_C_CHAR : SQL_C_WCHAR; +#else + type = SQL_C_CHAR; +#endif + break; + } + } + return type; +} + +/** + * Fixup query string with optional parameter markers. + * @param sql original query string + * @param sqlLen length of query string or SQL_NTS + * @param nparam output number of parameters + * @param isselect output indicator for SELECT statement + * @param errmsg output error message + * @result newly allocated string containing query string for Hive or NULL + */ + +static char * +fixupsql(char *sql, int sqlLen, int *nparam, int *isselect, char **errmsg) +{ + char *q = sql, *qz = NULL, *p, *inq = NULL, *out; + int np = 0, isddl = -1, size; + + *errmsg = NULL; + if (sqlLen != SQL_NTS) { + qz = q = xmalloc(sqlLen + 1); + if (!qz) { + return NULL; + } + memcpy(q, sql, sqlLen); + q[sqlLen] = '\0'; + size = sqlLen * 4; + } else { + size = strlen(sql) * 4; + } + size += sizeof (char *) - 1; + size &= ~(sizeof (char *) - 1); + p = xmalloc(size); + if (!p) { +errout: + freep(&qz); + return NULL; + } + memset(p, 0, size); + out = p; + while (*q) { + switch (*q) { + case '\'': + case '\"': + if (q == inq) { + inq = NULL; + } else if (!inq) { + inq = q + 1; + + while (*inq) { + if (*inq == *q) { + if (inq[1] == *q) { + inq++; + } else { + break; + } + } + inq++; + } + } + *p++ = *q; + break; + case '?': + *p++ = *q; + if (!inq) { + np++; + } + break; + case ';': + if (!inq) { + if (isddl < 0) { + char *qq = out; + + while (*qq && ISSPACE(*qq)) { + ++qq; + } + if (*qq && *qq != ';') { + size = strlen(qq); + if ((size >= 5) && + (strncasecmp(qq, "create", 5) == 0)) { + isddl = 1; + } else if ((size >= 4) && + (strncasecmp(qq, "drop", 4) == 0)) { + isddl = 1; + } else { + isddl = 0; + } + } + } + if (isddl == 0) { + char *qq = q; + + do { + ++qq; + } while (*qq && ISSPACE(*qq)); + if (*qq && *qq != ';') { + freep(&out); + *errmsg = "only one SQL statement allowed"; + goto errout; + } + } + } + *p++ = *q; + break; + case '{': + /* deal with {d 'YYYY-MM-DD'}, {t ...}, and {ts ...} */ + if (!inq) { + char *end = q + 1; + + while (*end && *end != '}') { + ++end; + } + if (*end == '}') { + char *start = q + 1; + char *end2 = end - 1; + + while (start < end2 && *start != '\'') { + ++start; + } + while (end2 > start && *end2 != '\'') { + --end2; + } + if (*start == '\'' && *end2 == '\'') { + while (start <= end2) { + *p++ = *start; + ++start; + } + q = end; + break; + } + } + } + /* FALL THROUGH */ + default: + *p++ = *q; + } + ++q; + } + freep(&qz); + *p = '\0'; + if (nparam) { + *nparam = np; + } + if (isselect) { + if (isddl > 0) { + *isselect = 0; + } else { + p = out; + while (*p && ISSPACE(*p)) { + ++p; + } + size = strlen(p); + *isselect = (size >= 6) && (strncasecmp(p, "select", 6) == 0); + } + } + return out; +} + +/** + * Find column given name in string array. + * @param cols string array + * @param ncols number of strings + * @param name column name + * @result >= 0 on success, -1 on error + */ + +static int +findcol(char **cols, int ncols, char *name) +{ + int i; + + if (cols) { + for (i = 0; i < ncols; i++) { + if (strcmp(cols[i], name) == 0) { + return i; + } + } + } + return -1; +} + + +/** + * Return number of month days. + * @param year + * @param month 1..12 + * @result number of month days or 0 + */ + +static int +getmdays(int year, int month) +{ + static const int mdays[] = { + 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 + }; + int mday; + + if (month < 1) { + return 0; + } + mday = mdays[(month - 1) % 12]; + if (mday == 28 && year % 4 == 0 && + (!(year % 100 == 0) || year % 400 == 0)) { + mday++; + } + return mday; +} + +/** + * Convert string to ODBC DATE_STRUCT. + * @param str string to be converted + * @param ds output DATE_STRUCT + * @result 0 on success, -1 on error + * + * Strings of the format 'YYYYMMDD' or 'YYYY-MM-DD' or + * 'YYYY/MM/DD' are converted to a DATE_STRUCT. + */ + +static int +str2date(char *str, DATE_STRUCT *ds) +{ + int i, err = 0; + char *p, *q; + + ds->year = ds->month = ds->day = 0; + p = str; + while (*p && !ISDIGIT(*p)) { + ++p; + } + q = p; + i = 0; + while (*q && !ISDIGIT(*q)) { + ++i; + ++q; + } + if (i >= 8) { + char buf[8]; + + strncpy(buf, p + 0, 4); + buf[4] = '\0'; + ds->year = strtol(buf, NULL, 10); + strncpy(buf, p + 4, 2); + buf[2] = '\0'; + ds->month = strtol(buf, NULL, 10); + strncpy(buf, p + 6, 2); + buf[2] = '\0'; + ds->day = strtol(buf, NULL, 10); + goto done; + } + i = 0; + while (i < 3) { + int n; + + q = NULL; + n = strtol(p, &q, 10); + if (!q || q == p) { + if (*q == '\0') { + if (i == 0) { + err = 1; + } + goto done; + } + } + if (*q == '-' || *q == '/' || *q == '\0' || i == 2) { + switch (i) { + case 0: + ds->year = n; + break; + case 1: + ds->month = n; + break; + case 2: + ds->day = n; + break; + } + ++i; + if (*q) { + ++q; + } + } else { + i = 0; + while (*q && !ISDIGIT(*q)) { + ++q; + } + } + p = q; + } +done: + /* final check for overflow */ + if (err || + ds->month < 1 || ds->month > 12 || + ds->day < 1 || ds->day > getmdays(ds->year, ds->month)) { + return -1; + } + return 0; +} + +/** + * Convert string to ODBC TIME_STRUCT. + * @param str string to be converted + * @param ts output TIME_STRUCT + * @result 0 on success, -1 on error + * + * Strings of the format 'HHMMSS' or 'HH:MM:SS' + * are converted to a TIME_STRUCT. + */ + +static int +str2time(char *str, TIME_STRUCT *ts) +{ + int i, err = 0; + char *p, *q; + + ts->hour = ts->minute = ts->second = 0; + p = str; + while (*p && !ISDIGIT(*p)) { + ++p; + } + q = p; + i = 0; + while (*q && ISDIGIT(*q)) { + ++i; + ++q; + } + if (i >= 6) { + char buf[4]; + + strncpy(buf, p + 0, 2); + buf[2] = '\0'; + ts->hour = strtol(buf, NULL, 10); + strncpy(buf, p + 2, 2); + buf[2] = '\0'; + ts->minute = strtol(buf, NULL, 10); + strncpy(buf, p + 4, 2); + buf[2] = '\0'; + ts->second = strtol(buf, NULL, 10); + goto done; + } + i = 0; + while (i < 3) { + int n; + + q = NULL; + n = strtol(p, &q, 10); + if (!q || q == p) { + if (*q == '\0') { + if (i == 0) { + err = 1; + } + goto done; + } + } + if (*q == ':' || *q == '\0' || i == 2) { + switch (i) { + case 0: + ts->hour = n; + break; + case 1: + ts->minute = n; + break; + case 2: + ts->second = n; + break; + } + ++i; + if (*q) { + ++q; + } + } else { + i = 0; + while (*q && !ISDIGIT(*q)) { + ++q; + } + } + p = q; + } +done: + /* final check for overflow */ + if (err || ts->hour > 23 || ts->minute > 59 || ts->second > 59) { + return -1; + } + return 0; +} + +/** + * Convert string to ODBC TIMESTAMP_STRUCT. + * @param str string to be converted + * @param tss output TIMESTAMP_STRUCT + * @result 0 on success, -1 on error + * + * Strings of the format 'YYYYMMDDhhmmssff' or 'YYYY-MM-DD hh:mm:ss ff' + * or 'YYYY/MM/DD hh:mm:ss ff' or 'hh:mm:ss ff YYYY-MM-DD' are + * converted to a TIMESTAMP_STRUCT. The ISO8601 formats + * YYYY-MM-DDThh:mm:ss[.f]Z + * YYYY-MM-DDThh:mm:ss[.f]shh:mm + * are also supported. In case a time zone field is present, + * the resulting TIMESTAMP_STRUCT is expressed in UTC. + */ + +static int +str2timestamp(char *str, TIMESTAMP_STRUCT *tss) +{ + int i, m, n, err = 0; + char *p, *q, in = '\0'; + + tss->year = tss->month = tss->day = 0; + tss->hour = tss->minute = tss->second = 0; + tss->fraction = 0; + p = str; + while (*p && !ISDIGIT(*p)) { + ++p; + } + q = p; + i = 0; + while (*q && ISDIGIT(*q)) { + ++i; + ++q; + } + if (i >= 14) { + char buf[16]; + + strncpy(buf, p + 0, 4); + buf[4] = '\0'; + tss->year = strtol(buf, NULL, 10); + strncpy(buf, p + 4, 2); + buf[2] = '\0'; + tss->month = strtol(buf, NULL, 10); + strncpy(buf, p + 6, 2); + buf[2] = '\0'; + tss->day = strtol(buf, NULL, 10); + strncpy(buf, p + 8, 2); + buf[2] = '\0'; + tss->hour = strtol(buf, NULL, 10); + strncpy(buf, p + 10, 2); + buf[2] = '\0'; + tss->minute = strtol(buf, NULL, 10); + strncpy(buf, p + 12, 2); + buf[2] = '\0'; + tss->second = strtol(buf, NULL, 10); + if (i > 14) { + m = i - 14; + strncpy(buf, p + 14, m); + while (m < 9) { + buf[m] = '0'; + ++m; + } + buf[m] = '\0'; + tss->fraction = strtol(buf, NULL, 0); + } + m = 7; + goto done; + } + m = i = 0; + while ((m & 7) != 7) { + q = NULL; + n = strtol(p, &q, 10); + if (!q || q == p) { + if (*q == '\0') { + if (m < 1) { + err = 1; + } + goto done; + } + } + if (in == '\0') { + switch (*q) { + case '-': + case '/': + if ((m & 1) == 0) { + in = *q; + i = 0; + } + break; + case ':': + if ((m & 2) == 0) { + in = *q; + i = 0; + } + break; + case ' ': + case '.': + break; + default: + in = '\0'; + i = 0; + break; + } + } + switch (in) { + case '-': + case '/': + switch (i) { + case 0: + tss->year = n; + break; + case 1: + tss->month = n; + break; + case 2: + tss->day = n; + break; + } + if (++i >= 3) { + i = 0; + m |= 1; + if (!(m & 2)) { + m |= 8; + } + goto skip; + } else { + ++q; + } + break; + case ':': + switch (i) { + case 0: + tss->hour = n; + break; + case 1: + tss->minute = n; + break; + case 2: + tss->second = n; + break; + } + if (++i >= 3) { + i = 0; + m |= 2; + if (*q == '.') { + in = '.'; + goto skip2; + } + if (*q == ' ') { + if ((m & 1) == 0) { + char *e = NULL; + int dummy; + + dummy = strtol(q + 1, &e, 10); + if (e && *e == '-') { + goto skip; + } + } + in = '.'; + goto skip2; + } + goto skip; + } else { + ++q; + } + break; + case '.': + if (++i >= 1) { + int ndig = q - p; + + if (p[0] == '+' || p[0] == '-') { + ndig--; + } + while (ndig < 9) { + n = n * 10; + ++ndig; + } + tss->fraction = n; + m |= 4; + i = 0; + } + default: +skip: + in = '\0'; +skip2: + while (*q && !ISDIGIT(*q)) { + ++q; + } + } + p = q; + } + if ((m & 7) > 1 && (m & 8)) { + /* ISO8601 timezone */ + if (p > str && ISDIGIT(*p)) { + int nn, sign; + + q = p - 1; + if (*q != '+' && *q != '-') { + goto done; + } + sign = (*q == '+') ? -1 : 1; + q = NULL; + n = strtol(p, &q, 10); + if (!q || *q++ != ':' || !ISDIGIT(*q)) { + goto done; + } + p = q; + q = NULL; + nn = strtol(p, &q, 0); + tss->minute += nn * sign; + if ((SQLSMALLINT) tss->minute < 0) { + tss->hour -= 1; + tss->minute += 60; + } else if (tss->minute >= 60) { + tss->hour += 1; + tss->minute -= 60; + } + tss->hour += n * sign; + if ((SQLSMALLINT) tss->hour < 0) { + tss->day -= 1; + tss->hour += 24; + } else if (tss->hour >= 24) { + tss->day += 1; + tss->hour -= 24; + } + if ((short) tss->day < 1 || tss->day >= 28) { + int mday, pday, pmon; + + mday = getmdays(tss->year, tss->month); + pmon = tss->month - 1; + if (pmon < 1) { + pmon = 12; + } + pday = getmdays(tss->year, pmon); + if ((SQLSMALLINT) tss->day < 1) { + tss->month -= 1; + tss->day = pday; + } else if (tss->day > mday) { + tss->month += 1; + tss->day = 1; + } + if ((SQLSMALLINT) tss->month < 1) { + tss->year -= 1; + tss->month = 12; + } else if (tss->month > 12) { + tss->year += 1; + tss->month = 1; + } + } + } + } +done: + /* Replace missing year/month/day with current date */ + if (!err && (m & 1) == 0) { +#ifdef _WIN32 + SYSTEMTIME t; + + GetLocalTime(&t); + tss->year = t.wYear; + tss->month = t.wMonth; + tss->day = t.wDay; +#else + struct timeval tv; + struct tm tm; + + gettimeofday(&tv, NULL); + tm = *localtime(&tv.tv_sec); + tss->year = tm.tm_year + 1900; + tss->month = tm.tm_mon + 1; + tss->day = tm.tm_mday; +#endif + } + /* Normalize fraction */ + if (tss->fraction < 0) { + tss->fraction = 0; + } + /* Final check for overflow */ + if (err || + tss->month < 1 || tss->month > 12 || + tss->day < 1 || tss->day > getmdays(tss->year, tss->month) || + tss->hour > 23 || tss->minute > 59 || tss->second > 59) { + return -1; + } + return ((m & 7) < 1) ? -1 : 0; +} + +/** + * Get boolean flag from string. + * @param string string to be inspected + * @result true or false + */ + +static int +getbool(char *string) +{ + if (string) { + return string[0] && strchr("Yy123456789Tt", string[0]) != NULL; + } + return 0; +} + + +/** + * Trace function for HiveClient API calls + * @param d pointer to database connection handle + * @param fn HiveClient function name + * @param sql SQL string + */ + +static void +dbtraceapi(DBC *d, char *fn, const char *sql) +{ + if (fn && d->trace) { + if (sql) { + fprintf(d->trace, "-- %s: %s\n", fn, sql); + } else { + fprintf(d->trace, "-- %s\n", fn); + } + fflush(d->trace); + } +} + +/** + * Trace function for Hive return codes + * @param d pointer to database connection handle + * @param rc Hive return code + * @param err error string or NULL + */ +/* TODO: check for HIVE_SUCCESS_WITH_MORE_DATA */ +static void +dbtracerc(DBC *d, int rc, char *err) +{ + if (rc != HIVE_SUCCESS && d->trace) { + fprintf(d->trace, "-- HIVE ERROR CODE %d", rc); + fprintf(d->trace, err ? ": %s\n" : "\n", err); + fflush(d->trace); + } +} + + +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLBulkOperations(SQLHSTMT stmt, SQLSMALLINT oper) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} + +#ifndef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLDataSources(SQLHENV env, SQLUSMALLINT dir, SQLCHAR *srvname, + SQLSMALLINT buflen1, SQLSMALLINT *lenp1, + SQLCHAR *desc, SQLSMALLINT buflen2, SQLSMALLINT *lenp2) +{ + if (env == SQL_NULL_HENV) { + return SQL_INVALID_HANDLE; + } + return SQL_ERROR; +} +#endif + +#ifdef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLDataSourcesW(SQLHENV env, SQLUSMALLINT dir, SQLWCHAR *srvname, + SQLSMALLINT buflen1, SQLSMALLINT *lenp1, + SQLWCHAR *desc, SQLSMALLINT buflen2, SQLSMALLINT *lenp2) +{ + if (env == SQL_NULL_HENV) { + return SQL_INVALID_HANDLE; + } + return SQL_ERROR; +} +#endif + +#ifndef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLDrivers(SQLHENV env, SQLUSMALLINT dir, SQLCHAR *drvdesc, + SQLSMALLINT descmax, SQLSMALLINT *desclenp, + SQLCHAR *drvattr, SQLSMALLINT attrmax, SQLSMALLINT *attrlenp) +{ + if (env == SQL_NULL_HENV) { + return SQL_INVALID_HANDLE; + } + return SQL_ERROR; +} +#endif + +#ifdef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLDriversW(SQLHENV env, SQLUSMALLINT dir, SQLWCHAR *drvdesc, + SQLSMALLINT descmax, SQLSMALLINT *desclenp, + SQLWCHAR *drvattr, SQLSMALLINT attrmax, SQLSMALLINT *attrlenp) +{ + if (env == SQL_NULL_HENV) { + return SQL_INVALID_HANDLE; + } + return SQL_ERROR; +} +#endif + +#ifndef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLBrowseConnect(SQLHDBC dbc, SQLCHAR *connin, SQLSMALLINT conninLen, + SQLCHAR *connout, SQLSMALLINT connoutMax, + SQLSMALLINT *connoutLen) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvunimpldbc(dbc); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLBrowseConnectW(SQLHDBC dbc, SQLWCHAR *connin, SQLSMALLINT conninLen, + SQLWCHAR *connout, SQLSMALLINT connoutMax, + SQLSMALLINT *connoutLen) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvunimpldbc(dbc); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +/** + * Internal put (partial) parameter data into executing statement. + * @param stmt statement handle + * @param data pointer to data + * @param len length of data + * @result ODBC error code + */ + +static SQLRETURN +drvputdata(SQLHSTMT stmt, SQLPOINTER data, SQLLEN len) +{ + STMT *s; + int i, dlen, done = 0; + BINDPARM *p; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (!s->query || s->nparams <= 0) { +seqerr: + setstat(s, -1, "sequence error", "HY010"); + return SQL_ERROR; + } + for (i = 0; i < s->nparams; i++) { + p = &s->bindparms[i]; + if (p->need > 0) { + int type = mapdeftype(p->type, p->stype, -1, s->nowchar[0]); + + if (len == SQL_NULL_DATA) { + freep(&p->parbuf); + p->param = NULL; + p->len = SQL_NULL_DATA; + p->need = -1; + } else if (type != SQL_C_CHAR +#ifdef WINTERFACE + && type != SQL_C_WCHAR +#endif + && type != SQL_C_BINARY) { + int size = 0; + + switch (type) { + case SQL_C_TINYINT: + case SQL_C_UTINYINT: + case SQL_C_STINYINT: +#ifdef SQL_BIT + case SQL_C_BIT: +#endif + size = sizeof (char); + break; + case SQL_C_SHORT: + case SQL_C_USHORT: + case SQL_C_SSHORT: + size = sizeof (short); + break; + case SQL_C_LONG: + case SQL_C_ULONG: + case SQL_C_SLONG: + size = sizeof (long); + break; +#ifdef SQL_BIGINT + case SQL_C_UBIGINT: + case SQL_C_SBIGINT: + size = sizeof (SQLBIGINT); + break; +#endif + case SQL_C_FLOAT: + size = sizeof (float); + break; + case SQL_C_DOUBLE: + size = sizeof (double); + break; +#ifdef SQL_C_TYPE_DATE + case SQL_C_TYPE_DATE: +#endif + case SQL_C_DATE: + size = sizeof (DATE_STRUCT); + break; +#ifdef SQL_C_TYPE_DATE + case SQL_C_TYPE_TIME: +#endif + case SQL_C_TIME: + size = sizeof (TIME_STRUCT); + break; +#ifdef SQL_C_TYPE_DATE + case SQL_C_TYPE_TIMESTAMP: +#endif + case SQL_C_TIMESTAMP: + size = sizeof (TIMESTAMP_STRUCT); + break; + } + freep(&p->parbuf); + p->parbuf = xmalloc(size); + if (!p->parbuf) { + return nomem(s); + } + p->param = p->parbuf; + memcpy(p->param, data, size); + p->len = size; + p->need = -1; + } else if (len == SQL_NTS && ( + type == SQL_C_CHAR +#ifdef WINTERFACE + || type == SQL_C_WCHAR +#endif + )) { + char *dp = data; + +#ifdef WINTERFACE + if (type == SQL_C_WCHAR) { + dp = uc_to_utf(data, len); + if (!dp) { + return nomem(s); + } + } +#endif + dlen = strlen(dp); + freep(&p->parbuf); + p->parbuf = xmalloc(dlen + 1); + if (!p->parbuf) { +#ifdef WINTERFACE + if (dp != data) { + uc_free(dp); + } +#endif + return nomem(s); + } + p->param = p->parbuf; + strcpy(p->param, dp); +#ifdef WINTERFACE + if (dp != data) { + uc_free(dp); + } +#endif + p->len = dlen; + p->need = -1; + } else if (len < 0) { + setstat(s, -1, "invalid length", "HY090"); + return SQL_ERROR; + } else { + dlen = min(p->len - p->offs, len); + if (!p->param) { + setstat(s, -1, "no memory for parameter", "HY013"); + return SQL_ERROR; + } + memcpy((char *) p->param + p->offs, data, dlen); + p->offs += dlen; + if (p->offs >= p->len) { +#ifdef WINTERFACE + if (type == SQL_C_WCHAR) { + char *dp = uc_to_utf(p->param, p->len); + char *np; + int nlen; + + if (!dp) { + return nomem(s); + } + nlen = strlen(dp); + np = xmalloc(nlen + 1); + if (!np) { + uc_free(dp); + return nomem(s); + } + strcpy(np, dp); + uc_free(dp); + if (p->param == p->parbuf) { + freep(&p->parbuf); + } + p->parbuf = p->param = np; + p->len = nlen; + } else { + *((char *) p->param + p->len) = '\0'; + } + p->need = (type == SQL_C_CHAR || type == SQL_C_WCHAR) + ? -1 : 0; +#else + *((char *) p->param + p->len) = '\0'; + p->need = (type == SQL_C_CHAR) ? -1 : 0; +#endif +#if defined(_WIN32) || defined(_WIN64) + if (p->type == SQL_C_WCHAR && + (p->stype == SQL_VARCHAR || + p->stype == SQL_LONGVARCHAR) && + p->len == p->coldef * sizeof (SQLWCHAR)) { + /* fix for MS-Access */ + p->len = p->coldef; + } +#endif + } + } + done = 1; + break; + } + } + if (!done) { + goto seqerr; + } + return SQL_SUCCESS; +} + +/** + * Put (partial) parameter data into executing statement. + * @param stmt statement handle + * @param data pointer to data + * @param len length of data + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLPutData(SQLHSTMT stmt, SQLPOINTER data, SQLLEN len) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvputdata(stmt, data, len); + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Clear out parameter bindings, if any. + * @param s statement pointer + */ + +static SQLRETURN +freeparams(STMT *s) +{ + if (s->bindparms) { + int n; + + for (n = 0; n < s->nbindparms; n++) { + freep(&s->bindparms[n].parbuf); + memset(&s->bindparms[n], 0, sizeof (BINDPARM)); + } + } + return SQL_SUCCESS; +} + + +/** + * Internal bind parameter on HSTMT. + * @param stmt statement handle + * @param pnum parameter number, starting at 1 + * @param iotype input/output type of parameter + * @param buftype type of host variable + * @param ptype + * @param coldef + * @param scale + * @param data pointer to host variable + * @param buflen length of host variable + * @param len output length pointer + * @result ODBC error code + */ + +static SQLRETURN +drvbindparam(SQLHSTMT stmt, SQLUSMALLINT pnum, SQLSMALLINT iotype, + SQLSMALLINT buftype, SQLSMALLINT ptype, SQLUINTEGER coldef, + SQLSMALLINT scale, + SQLPOINTER data, SQLINTEGER buflen, SQLLEN *len) +{ + STMT *s; + BINDPARM *p; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (pnum == 0) { + setstat(s, -1, "invalid parameter", (*s->ov3) ? "07009" : "S1093"); + return SQL_ERROR; + } + if (!data && (!len || (*len != SQL_NULL_DATA && + *len > SQL_LEN_DATA_AT_EXEC_OFFSET))) { + setstat(s, -1, "invalid buffer", "HY003"); + return SQL_ERROR; + } + if (len && *len < 0 && *len > SQL_LEN_DATA_AT_EXEC_OFFSET && + *len != SQL_NTS && *len != SQL_NULL_DATA) { + setstat(s, -1, "invalid length reference", "HY009"); + return SQL_ERROR; + } + --pnum; + if (s->bindparms) { + if (pnum >= s->nbindparms) { + BINDPARM *newparms; + + newparms = xrealloc(s->bindparms, + (pnum + 1) * sizeof (BINDPARM)); + if (!newparms) { +outofmem: + return nomem(s); + } + s->bindparms = newparms; + memset(&s->bindparms[s->nbindparms], 0, + (pnum + 1 - s->nbindparms) * sizeof (BINDPARM)); + s->nbindparms = pnum + 1; + } + } else { + int npar = max(10, pnum + 1); + + s->bindparms = xmalloc(npar * sizeof (BINDPARM)); + if (!s->bindparms) { + goto outofmem; + } + memset(s->bindparms, 0, npar * sizeof (BINDPARM)); + s->nbindparms = npar; + } + p = &s->bindparms[pnum]; + p->type = buftype; + p->stype = ptype; + p->coldef = coldef; + p->scale = scale; + p->max = buflen; + p->inc = buflen; + p->lenp = p->lenp0 = len; + p->offs = 0; + p->len = 0; + p->param0 = data; + freep(&p->parbuf); + p->param = p->param0; + p->bound = 1; + p->need = 0; + if (p->lenp && *p->lenp <= SQL_LEN_DATA_AT_EXEC_OFFSET) { + p->need = 1; + } + return SQL_SUCCESS; +} + +/** + * Bind parameter on HSTMT. + * @param stmt statement handle + * @param pnum parameter number, starting at 1 + * @param iotype input/output type of parameter + * @param buftype type of host variable + * @param ptype + * @param coldef + * @param scale + * @param data pointer to host variable + * @param buflen length of host variable + * @param len output length pointer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLBindParameter(SQLHSTMT stmt, SQLUSMALLINT pnum, SQLSMALLINT iotype, + SQLSMALLINT buftype, SQLSMALLINT ptype, SQLULEN coldef, + SQLSMALLINT scale, + SQLPOINTER data, SQLLEN buflen, SQLLEN *len) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvbindparam(stmt, pnum, iotype, buftype, ptype, coldef, + scale, data, buflen, len); + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Bind parameter on HSTMT. + * @param stmt statement handle + * @param pnum parameter number, starting at 1 + * @param vtype input/output type of parameter + * @param ptype + * @param lenprec + * @param scale + * @param val pointer to host variable + * @param lenp output length pointer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLBindParam(SQLHSTMT stmt, SQLUSMALLINT pnum, SQLSMALLINT vtype, + SQLSMALLINT ptype, SQLULEN lenprec, + SQLSMALLINT scale, SQLPOINTER val, + SQLLEN *lenp) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvbindparam(stmt, pnum, SQL_PARAM_INPUT, vtype, ptype, + lenprec, scale, val, 0, lenp); + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Return number of parameters. + * @param stmt statement handle + * @param nparam output parameter count + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLNumParams(SQLHSTMT stmt, SQLSMALLINT *nparam) +{ + STMT *s; + SQLSMALLINT dummy; + + HSTMT_LOCK(stmt); + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (!nparam) { + nparam = &dummy; + } + *nparam = s->nparams; + HSTMT_UNLOCK(stmt); + return SQL_SUCCESS; +} + +/** + * Setup parameter buffer for deferred parameter. + * @param s pointer to STMT + * @param p pointer to BINDPARM + * @result ODBC error code (success indicated by SQL_NEED_DATA) + */ + +static SQLRETURN +setupparbuf(STMT *s, BINDPARM *p) +{ + if (!p->parbuf) { + p->len = SQL_LEN_DATA_AT_EXEC(*p->lenp); + if (p->len < 0 && p->len != SQL_NTS && + p->len != SQL_NULL_DATA) { + setstat(s, -1, "invalid length", "HY009"); + return SQL_ERROR; + } + if (p->len >= 0) { + p->parbuf = xmalloc(p->len + 1); + if (!p->parbuf) { + return nomem(s); + } + p->param = p->parbuf; + } else { + p->param = NULL; + } + } + return SQL_NEED_DATA; +} + +/** + * Retrieve next parameter for sending data to executing query. + * @param stmt statement handle + * @param pind pointer to output parameter indicator + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLParamData(SQLHSTMT stmt, SQLPOINTER *pind) +{ + STMT *s; + int i; + SQLPOINTER dummy; + SQLRETURN ret; + + HSTMT_LOCK(stmt); + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (!pind) { + pind = &dummy; + } + for (i = 0; i < s->nparams; i++) { + BINDPARM *p = &s->bindparms[i]; + + if (p->need > 0) { + *pind = (SQLPOINTER) p->param0; + ret = setupparbuf(s, p); + goto done; + } + } + ret = drvexecute(stmt, 0); +done: + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Return information about parameter. + * @param stmt statement handle + * @param pnum parameter number, starting at 1 + * @param dtype output type indicator + * @param size output size indicator + * @param decdigits output number of digits + * @param nullable output NULL allowed indicator + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLDescribeParam(SQLHSTMT stmt, SQLUSMALLINT pnum, SQLSMALLINT *dtype, + SQLULEN *size, SQLSMALLINT *decdigits, SQLSMALLINT *nullable) +{ + STMT *s; + SQLRETURN ret = SQL_ERROR; + + HSTMT_LOCK(stmt); + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + --pnum; + if (pnum >= s->nparams) { + setstat(s, -1, "invalid parameter index", + (*s->ov3) ? "HY000" : "S1000"); + goto done; + } + if (dtype) { +#ifdef SQL_LONGVARCHAR + #ifdef WINTERFACE + *dtype = s->nowchar[0] ? SQL_LONGVARCHAR : SQL_WLONGVARCHAR; + #else + *dtype = SQL_LONGVARCHAR; + #endif +#else + #ifdef WINTERFACE + *dtype = s->nowchar[0] ? SQL_VARCHAR : SQL_WVARCHAR; + #else + *dtype = SQL_VARCHAR; + #endif +#endif + } + if (size) { +#ifdef SQL_LONGVARCHAR + *size = 65536; +#else + *size = 255; +#endif + } + if (decdigits) { + *decdigits = 0; + } + if (nullable) { + *nullable = SQL_NULLABLE; + } + ret = SQL_SUCCESS; +done: + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Set information on parameter. + * @param stmt statement handle + * @param par parameter number, starting at 1 + * @param type type of host variable + * @param sqltype + * @param coldef + * @param scale + * @param val pointer to host variable + * @param nval output length pointer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetParam(SQLHSTMT stmt, SQLUSMALLINT par, SQLSMALLINT type, + SQLSMALLINT sqltype, SQLULEN coldef, + SQLSMALLINT scale, SQLPOINTER val, SQLLEN *nval) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvbindparam(stmt, par, SQL_PARAM_INPUT, + type, sqltype, coldef, scale, val, + SQL_SETPARAM_VALUE_MAX, nval); + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLParamOptions(SQLHSTMT stmt, SQLULEN rows, SQLULEN *rowp) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} + +#ifndef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLGetDescField(SQLHDESC handle, SQLSMALLINT recno, + SQLSMALLINT fieldid, SQLPOINTER value, + SQLINTEGER buflen, SQLINTEGER *strlen) +{ + /* Not implemented */ + return SQL_ERROR; +} +#endif + +#ifdef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLGetDescFieldW(SQLHDESC handle, SQLSMALLINT recno, + SQLSMALLINT fieldid, SQLPOINTER value, + SQLINTEGER buflen, SQLINTEGER *strlen) +{ + /* Not implemented */ + return SQL_ERROR; +} +#endif + +#ifndef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLSetDescField(SQLHDESC handle, SQLSMALLINT recno, + SQLSMALLINT fieldid, SQLPOINTER value, + SQLINTEGER buflen) +{ + /* Not implemented */ + return SQL_ERROR; +} +#endif + +#ifdef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLSetDescFieldW(SQLHDESC handle, SQLSMALLINT recno, + SQLSMALLINT fieldid, SQLPOINTER value, + SQLINTEGER buflen) +{ + /* Not implemented */ + return SQL_ERROR; +} +#endif + +#ifndef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLGetDescRec(SQLHDESC handle, SQLSMALLINT recno, + SQLCHAR *name, SQLSMALLINT buflen, + SQLSMALLINT *strlen, SQLSMALLINT *type, + SQLSMALLINT *subtype, SQLLEN *len, + SQLSMALLINT *prec, SQLSMALLINT *scale, + SQLSMALLINT *nullable) +{ + /* Not implemented */ + return SQL_ERROR; +} +#endif + +#ifdef WINTERFACE +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLGetDescRecW(SQLHDESC handle, SQLSMALLINT recno, + SQLWCHAR *name, SQLSMALLINT buflen, + SQLSMALLINT *strlen, SQLSMALLINT *type, + SQLSMALLINT *subtype, SQLLEN *len, + SQLSMALLINT *prec, SQLSMALLINT *scale, + SQLSMALLINT *nullable) +{ + /* Not implemented */ + return SQL_ERROR; +} +#endif + +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLSetDescRec(SQLHDESC handle, SQLSMALLINT recno, + SQLSMALLINT type, SQLSMALLINT subtype, + SQLLEN len, SQLSMALLINT prec, + SQLSMALLINT scale, SQLPOINTER data, + SQLLEN *strlen, SQLLEN *indicator) +{ + /* Not implemented */ + return SQL_ERROR; +} + +/** + * Setup empty result set from constant column specification. + * @param stmt statement handle + * @param colspec column specification array (default, ODBC2) + * @param ncols number of columns (default, ODBC2) + * @param colspec3 column specification array (ODBC3) + * @param ncols3 number of columns (ODBC3) + * @param nret returns number of columns + * @result ODBC error code + */ + +static SQLRETURN +mkresultset(HSTMT stmt, COL *colspec, int ncols, COL *colspec3, + int ncols3, int *nret) +{ + STMT *s; + DBC *d; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (s->dbc == SQL_NULL_HDBC) { +noconn: + return noconn(s); + } + d = (DBC *) s->dbc; + if (!d->hive_conn) { + goto noconn; + } + freeresult(s, 0); + if (colspec3 && *s->ov3) { + s->ncols = ncols3; + s->cols = colspec3; + } else { + s->ncols = ncols; + s->cols = colspec; + } + mkbindcols(s, s->ncols); + s->nowchar[1] = 1; + s->nrows = 0; + s->rowp = -1; + s->isselect = -1; + if (nret) { + *nret = s->ncols; + } + return SQL_SUCCESS; +} + + +#if !defined(WINTERFACE) || (defined(HAVE_UNIXODBC) && HAVE_UNIXODBC) +/** + * Retrieve privileges on tables and/or views. + * @param stmt statement handle + * @param catalog catalog name/pattern or NULL + * @param catalogLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLTablePrivileges(SQLHSTMT stmt, + SQLCHAR *catalog, SQLSMALLINT catalogLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *table, SQLSMALLINT tableLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#if !defined(HAVE_UNIXODBC) || !HAVE_UNIXODBC + #ifdef WINTERFACE +/** + * Retrieve privileges on tables and/or views (UNICODE version). + * @param stmt statement handle + * @param catalog catalog name/pattern or NULL + * @param catalogLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLTablePrivilegesW(SQLHSTMT stmt, + SQLWCHAR *catalog, SQLSMALLINT catalogLen, + SQLWCHAR *schema, SQLSMALLINT schemaLen, + SQLWCHAR *table, SQLSMALLINT tableLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} + #endif +#endif + + +#if !defined(WINTERFACE) || (defined(HAVE_UNIXODBC) && HAVE_UNIXODBC) +/** + * Retrieve privileges on columns. + * @param stmt statement handle + * @param catalog catalog name/pattern or NULL + * @param catalogLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param column column name or NULL + * @param columnLen length of column name or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLColumnPrivileges(SQLHSTMT stmt, + SQLCHAR *catalog, SQLSMALLINT catalogLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *table, SQLSMALLINT tableLen, + SQLCHAR *column, SQLSMALLINT columnLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#if !defined(HAVE_UNIXODBC) || !HAVE_UNIXODBC + #ifdef WINTERFACE +/** + * Retrieve privileges on columns (UNICODE version). + * @param stmt statement handle + * @param catalog catalog name/pattern or NULL + * @param catalogLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param column column name or NULL + * @param columnLen length of column name or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLColumnPrivilegesW(SQLHSTMT stmt, + SQLWCHAR *catalog, SQLSMALLINT catalogLen, + SQLWCHAR *schema, SQLSMALLINT schemaLen, + SQLWCHAR *table, SQLSMALLINT tableLen, + SQLWCHAR *column, SQLSMALLINT columnLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} + #endif +#endif + + +/** + * Internal retrieve information about indexed columns. + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @result ODBC error code + */ +static SQLRETURN +drvprimarykey(SQLHSTMT stmt, + SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *table, SQLSMALLINT tableLen) +{ + STMT *s; + DBC *d; + SQLRETURN ret; + HiveReturn rc; + hive_err_info hive_error; + + if (SQL_NULL_HSTMT == stmt) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (SQL_NULL_HDBC == s->dbc) { + return noconn(s); + } + d = (DBC *) s->dbc; + if (NULL == d->hive_conn) { + return noconn(s); + } + rc = DBPrimaryKeys(d->hive_conn, &(s->hive_resultset), + &hive_error, sizeof(hive_error.err_buf)); + + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + freeresult(s, 0); + return SQL_ERROR; + } + + ret = setupdyncols(s); + if (SQL_SUCCESS != ret) { + /* TODO: log error */ + freeresult(s, 1); + return ret; + } + + return SQL_SUCCESS; +} + + +#ifndef WINTERFACE +/** + * Retrieve information about indexed columns. + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLPrimaryKeys(SQLHSTMT stmt, + SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *table, SQLSMALLINT tableLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvprimarykey(stmt, cat, catLen, + schema, schemaLen, table, tableLen); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Retrieve information about indexed columns (UNICODE version). + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLPrimaryKeysW(SQLHSTMT stmt, + SQLWCHAR *cat, SQLSMALLINT catLen, + SQLWCHAR *schema, SQLSMALLINT schemaLen, + SQLWCHAR *table, SQLSMALLINT tableLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifndef WINTERFACE +/** + * Retrieve information about indexed columns. + * @param stmt statement handle + * @param id type of information, e.g. best row id + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param scope + * @param nullable + * @result ODBC error code + */ +static SQLRETURN +drvspecialcolumns(SQLHSTMT stmt, SQLUSMALLINT id, + SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *table, SQLSMALLINT tableLen, + SQLUSMALLINT scope, SQLUSMALLINT nullable) +{ + STMT *s; + DBC *d; + SQLRETURN ret; + HiveReturn rc; + hive_err_info hive_error; + + if (SQL_NULL_HSTMT == stmt) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (SQL_NULL_HDBC == s->dbc) { + return noconn(s); + } + d = (DBC *) s->dbc; + if (NULL == d->hive_conn) { + return noconn(s); + } + rc = DBSpecialColumns(d->hive_conn, &(s->hive_resultset), + &hive_error, sizeof(hive_error.err_buf)); + + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + freeresult(s, 0); + return SQL_ERROR; + } + + ret = setupdyncols(s); + if (SQL_SUCCESS != ret) { + /* TODO: log error */ + freeresult(s, 1); + return ret; + } + + return SQL_SUCCESS; +} + + +SQLRETURN SQL_API +SQLSpecialColumns(SQLHSTMT stmt, SQLUSMALLINT id, + SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *table, SQLSMALLINT tableLen, + SQLUSMALLINT scope, SQLUSMALLINT nullable) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvspecialcolumns(stmt, id, cat, catLen, + schema, schemaLen,table, tableLen, + scope, nullable); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Retrieve information about indexed columns (UNICODE version). + * @param stmt statement handle + * @param id type of information, e.g. best row id + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param scope + * @param nullable + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSpecialColumnsW(SQLHSTMT stmt, SQLUSMALLINT id, + SQLWCHAR *cat, SQLSMALLINT catLen, + SQLWCHAR *schema, SQLSMALLINT schemaLen, + SQLWCHAR *table, SQLSMALLINT tableLen, + SQLUSMALLINT scope, SQLUSMALLINT nullable) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + + +#ifndef WINTERFACE +/** + * Retrieve information about primary/foreign keys. + * @param stmt statement handle + * @param PKcatalog primary key catalog name/pattern or NULL + * @param PKcatalogLen length of PKcatalog or SQL_NTS + * @param PKschema primary key schema name/pattern or NULL + * @param PKschemaLen length of PKschema or SQL_NTS + * @param PKtable primary key table name/pattern or NULL + * @param PKtableLen length of PKtable or SQL_NTS + * @param FKcatalog foreign key catalog name/pattern or NULL + * @param FKcatalogLen length of FKcatalog or SQL_NTS + * @param FKschema foreign key schema name/pattern or NULL + * @param FKschemaLen length of FKschema or SQL_NTS + * @param FKtable foreign key table name/pattern or NULL + * @param FKtableLen length of FKtable or SQL_NTS + * @result ODBC error code + */ + +static SQLRETURN +drvforeignkey(SQLHSTMT stmt, + SQLCHAR *PKcatalog, SQLSMALLINT PKcatalogLen, + SQLCHAR *PKschema, SQLSMALLINT PKschemaLen, + SQLCHAR *PKtable, SQLSMALLINT PKtableLen, + SQLCHAR *FKcatalog, SQLSMALLINT FKcatalogLen, + SQLCHAR *FKschema, SQLSMALLINT FKschemaLen, + SQLCHAR *FKtable, SQLSMALLINT FKtableLen) +{ + STMT *s; + DBC *d; + SQLRETURN ret; + HiveReturn rc; + hive_err_info hive_error; + + if (SQL_NULL_HSTMT == stmt) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (SQL_NULL_HDBC == s->dbc) { + return noconn(s); + } + d = (DBC *) s->dbc; + if (NULL == d->hive_conn) { + return noconn(s); + } + rc = DBForeignKeys(d->hive_conn, &(s->hive_resultset), + &hive_error, sizeof(hive_error.err_buf)); + + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + freeresult(s, 0); + return SQL_ERROR; + } + + ret = setupdyncols(s); + if (SQL_SUCCESS != ret) { + /* TODO: log error */ + freeresult(s, 1); + return ret; + } + + return SQL_SUCCESS; +} + +SQLRETURN SQL_API +SQLForeignKeys(SQLHSTMT stmt, + SQLCHAR *PKcatalog, SQLSMALLINT PKcatalogLen, + SQLCHAR *PKschema, SQLSMALLINT PKschemaLen, + SQLCHAR *PKtable, SQLSMALLINT PKtableLen, + SQLCHAR *FKcatalog, SQLSMALLINT FKcatalogLen, + SQLCHAR *FKschema, SQLSMALLINT FKschemaLen, + SQLCHAR *FKtable, SQLSMALLINT FKtableLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvforeignkey(stmt, PKcatalog, PKcatalogLen, + PKschema, PKschemaLen, PKtable, PKtableLen, + FKcatalog, FKcatalogLen, FKschema, FKschemaLen, + FKtable, FKtableLen); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Retrieve information about primary/foreign keys (UNICODE version). + * @param stmt statement handle + * @param PKcatalog primary key catalog name/pattern or NULL + * @param PKcatalogLen length of PKcatalog or SQL_NTS + * @param PKschema primary key schema name/pattern or NULL + * @param PKschemaLen length of PKschema or SQL_NTS + * @param PKtable primary key table name/pattern or NULL + * @param PKtableLen length of PKtable or SQL_NTS + * @param FKcatalog foreign key catalog name/pattern or NULL + * @param FKcatalogLen length of FKcatalog or SQL_NTS + * @param FKschema foreign key schema name/pattern or NULL + * @param FKschemaLen length of FKschema or SQL_NTS + * @param FKtable foreign key table name/pattern or NULL + * @param FKtableLen length of FKtable or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLForeignKeysW(SQLHSTMT stmt, + SQLWCHAR *PKcatalog, SQLSMALLINT PKcatalogLen, + SQLWCHAR *PKschema, SQLSMALLINT PKschemaLen, + SQLWCHAR *PKtable, SQLSMALLINT PKtableLen, + SQLWCHAR *FKcatalog, SQLSMALLINT FKcatalogLen, + SQLWCHAR *FKschema, SQLSMALLINT FKschemaLen, + SQLWCHAR *FKtable, SQLSMALLINT FKtableLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + + +/** + * Commit or rollback transaction. + * @param type type of handle + * @param handle HDBC, HENV, or HSTMT handle + * @param comptype SQL_COMMIT or SQL_ROLLBACK + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLEndTran(SQLSMALLINT type, SQLHANDLE handle, SQLSMALLINT comptype) +{ + /* not supported */ + return SQL_ERROR; +} + +/** + * Commit or rollback transaction. + * @param env environment handle or NULL + * @param dbc database connection handle or NULL + * @param type SQL_COMMIT or SQL_ROLLBACK + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLTransact(SQLHENV env, SQLHDBC dbc, SQLUSMALLINT type) +{ + /* not supported */ + return SQL_ERROR; +} + +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLCopyDesc(SQLHDESC source, SQLHDESC target) +{ + return SQL_ERROR; +} + +#ifndef WINTERFACE +/** + * Translate SQL string. + * @param stmt statement handle + * @param sqlin input string + * @param sqlinLen length of input string + * @param sql output string + * @param sqlMax max space in output string + * @param sqlLen value return for length of output string + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLNativeSql(SQLHSTMT stmt, SQLCHAR *sqlin, SQLINTEGER sqlinLen, + SQLCHAR *sql, SQLINTEGER sqlMax, SQLINTEGER *sqlLen) +{ + int outLen = 0; + SQLRETURN ret = SQL_SUCCESS; + + HSTMT_LOCK(stmt); + if (sqlinLen == SQL_NTS) { + sqlinLen = strlen((char *) sqlin); + } + if (sql) { + if (sqlMax > 0) { + strncpy((char *) sql, (char *) sqlin, sqlMax - 1); + sqlin[sqlMax - 1] = '\0'; + outLen = min(sqlMax - 1, sqlinLen); + } + } else { + outLen = sqlinLen; + } + if (sqlLen) { + *sqlLen = outLen; + } + if (sql && outLen < sqlinLen) { + setstat((STMT *) stmt, -1, "data right truncated", "01004"); + ret = SQL_SUCCESS_WITH_INFO; + } + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Translate SQL string (UNICODE version). + * @param stmt statement handle + * @param sqlin input string + * @param sqlinLen length of input string + * @param sql output string + * @param sqlMax max space in output string + * @param sqlLen value return for length of output string + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLNativeSqlW(SQLHSTMT stmt, SQLWCHAR *sqlin, SQLINTEGER sqlinLen, + SQLWCHAR *sql, SQLINTEGER sqlMax, SQLINTEGER *sqlLen) +{ + int outLen = 0; + SQLRETURN ret = SQL_SUCCESS; + + HSTMT_LOCK(stmt); + if (sqlinLen == SQL_NTS) { + sqlinLen = uc_strlen(sqlin); + } + if (sql) { + if (sqlMax > 0) { + uc_strncpy(sql, sqlin, sqlMax - 1); + sqlin[sqlMax - 1] = 0; + outLen = min(sqlMax - 1, sqlinLen); + } + } else { + outLen = sqlinLen; + } + if (sqlLen) { + *sqlLen = outLen; + } + if (sql && outLen < sqlinLen) { + setstat((STMT *) stmt, -1, "data right truncated", "01004"); + ret = SQL_SUCCESS_WITH_INFO; + } + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Columns for result set of SQLProcedures(). + */ + +static COL procSpec2[] = { + { "SYSTEM", "PROCEDURE", "PROCEDURE_QUALIFIER", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCEDURE", "PROCEDURE_OWNER", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCEDURE", "PROCEDURE_NAME", SCOL_VARCHAR, 255 }, + { "SYSTEM", "PROCEDURE", "NUM_INPUT_PARAMS", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCEDURE", "NUM_OUTPUT_PARAMS", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCEDURE", "NUM_RESULT_SETS", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCEDURE", "REMARKS", SCOL_VARCHAR, 255 }, + { "SYSTEM", "PROCEDURE", "PROCEDURE_TYPE", SQL_SMALLINT, 5 } +}; + +static COL procSpec3[] = { + { "SYSTEM", "PROCEDURE", "PROCEDURE_CAT", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCEDURE", "PROCEDURE_SCHEM", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCEDURE", "PROCEDURE_NAME", SCOL_VARCHAR, 255 }, + { "SYSTEM", "PROCEDURE", "NUM_INPUT_PARAMS", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCEDURE", "NUM_OUTPUT_PARAMS", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCEDURE", "NUM_RESULT_SETS", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCEDURE", "REMARKS", SCOL_VARCHAR, 255 }, + { "SYSTEM", "PROCEDURE", "PROCEDURE_TYPE", SQL_SMALLINT, 5 } +}; + +#ifndef WINTERFACE +/** + * Retrieve information about stored procedures. + * @param stmt statement handle + * @param catalog catalog name/pattern or NULL + * @param catalogLen length of catalog or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema or SQL_NTS + * @param proc procedure name/pattern or NULL + * @param procLen length of proc or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLProcedures(SQLHSTMT stmt, + SQLCHAR *catalog, SQLSMALLINT catalogLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *proc, SQLSMALLINT procLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = mkresultset(stmt, procSpec2, array_size(procSpec2), + procSpec3, array_size(procSpec3), NULL); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Retrieve information about stored procedures (UNICODE version). + * @param stmt statement handle + * @param catalog catalog name/pattern or NULL + * @param catalogLen length of catalog or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema or SQL_NTS + * @param proc procedure name/pattern or NULL + * @param procLen length of proc or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLProceduresW(SQLHSTMT stmt, + SQLWCHAR *catalog, SQLSMALLINT catalogLen, + SQLWCHAR *schema, SQLSMALLINT schemaLen, + SQLWCHAR *proc, SQLSMALLINT procLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = mkresultset(stmt, procSpec2, array_size(procSpec2), + procSpec3, array_size(procSpec3), NULL); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Columns for result set of SQLProcedureColumns(). + */ + +static COL procColSpec2[] = { + { "SYSTEM", "PROCCOL", "PROCEDURE_QUALIFIER", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "PROCEDURE_OWNER", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "PROCEDURE_NAME", SCOL_VARCHAR, 255 }, + { "SYSTEM", "PROCCOL", "COLUMN_NAME", SCOL_VARCHAR, 255 }, + { "SYSTEM", "PROCCOL", "COLUMN_TYPE", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "DATA_TYPE", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "TYPE_NAME", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "PRECISION", SQL_INTEGER, 10 }, + { "SYSTEM", "PROCCOL", "LENGTH", SQL_INTEGER, 10 }, + { "SYSTEM", "PROCCOL", "SCALE", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "RADIX", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "NULLABLE", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "REMARKS", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "COLUMN_DEF", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "SQL_DATA_TYPE", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "SQL_DATETIME_SUB", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "CHAR_OCTET_LENGTH", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "ORDINAL_POSITION", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "IS_NULLABLE", SCOL_VARCHAR, 50 } +}; + +static COL procColSpec3[] = { + { "SYSTEM", "PROCCOL", "PROCEDURE_CAT", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "PROCEDURE_SCHEM", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "PROCEDURE_NAME", SCOL_VARCHAR, 255 }, + { "SYSTEM", "PROCCOL", "COLUMN_NAME", SCOL_VARCHAR, 255 }, + { "SYSTEM", "PROCCOL", "COLUMN_TYPE", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "DATA_TYPE", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "TYPE_NAME", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "COLUMN_SIZE", SQL_INTEGER, 10 }, + { "SYSTEM", "PROCCOL", "BUFFER_LENGTH", SQL_INTEGER, 10 }, + { "SYSTEM", "PROCCOL", "DECIMAL_DIGITS", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "NUM_PREC_RADIX", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "NULLABLE", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "REMARKS", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "COLUMN_DEF", SCOL_VARCHAR, 50 }, + { "SYSTEM", "PROCCOL", "SQL_DATA_TYPE", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "SQL_DATETIME_SUB", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "CHAR_OCTET_LENGTH", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "ORDINAL_POSITION", SQL_SMALLINT, 5 }, + { "SYSTEM", "PROCCOL", "IS_NULLABLE", SCOL_VARCHAR, 50 } +}; + +#ifndef WINTERFACE +/** + * Retrieve information about columns in result set of stored procedures. + * @param stmt statement handle + * @param catalog catalog name/pattern or NULL + * @param catalogLen length of catalog or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema or SQL_NTS + * @param proc procedure name/pattern or NULL + * @param procLen length of proc or SQL_NTS + * @param column column name/pattern or NULL + * @param columnLen length of column or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLProcedureColumns(SQLHSTMT stmt, + SQLCHAR *catalog, SQLSMALLINT catalogLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *proc, SQLSMALLINT procLen, + SQLCHAR *column, SQLSMALLINT columnLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = mkresultset(stmt, procColSpec2, array_size(procColSpec2), + procColSpec3, array_size(procColSpec3), NULL); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Retrieve information about columns in result + * set of stored procedures (UNICODE version). + * @param stmt statement handle + * @param catalog catalog name/pattern or NULL + * @param catalogLen length of catalog or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema or SQL_NTS + * @param proc procedure name/pattern or NULL + * @param procLen length of proc or SQL_NTS + * @param column column name/pattern or NULL + * @param columnLen length of column or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLProcedureColumnsW(SQLHSTMT stmt, + SQLWCHAR *catalog, SQLSMALLINT catalogLen, + SQLWCHAR *schema, SQLSMALLINT schemaLen, + SQLWCHAR *proc, SQLSMALLINT procLen, + SQLWCHAR *column, SQLSMALLINT columnLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = mkresultset(stmt, procColSpec2, array_size(procColSpec2), + procColSpec3, array_size(procColSpec3), NULL); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Get information of HENV. + * @param env environment handle + * @param attr attribute to be retrieved + * @param val output buffer + * @param len length of output buffer + * @param lenp output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetEnvAttr(SQLHENV env, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER len, SQLINTEGER *lenp) +{ + ENV *e; + SQLRETURN ret = SQL_ERROR; + + if (env == SQL_NULL_HENV) { + return SQL_INVALID_HANDLE; + } + e = (ENV *) env; + if (!e || e->magic != ENV_MAGIC) { + return SQL_INVALID_HANDLE; + } +#if defined(_WIN32) || defined(_WIN64) + EnterCriticalSection(&e->cs); + e->owner = GetCurrentThreadId(); +#endif + switch (attr) { + case SQL_ATTR_CONNECTION_POOLING: + ret = SQL_ERROR; + break; + case SQL_ATTR_CP_MATCH: + ret = SQL_NO_DATA; + break; + case SQL_ATTR_OUTPUT_NTS: + if (val) { + *((SQLINTEGER *) val) = SQL_TRUE; + } + if (lenp) { + *lenp = sizeof (SQLINTEGER); + } + ret = SQL_SUCCESS; + break; + case SQL_ATTR_ODBC_VERSION: + if (val) { + *((SQLINTEGER *) val) = e->ov3 ? SQL_OV_ODBC3 : SQL_OV_ODBC2; + } + if (lenp) { + *lenp = sizeof (SQLINTEGER); + } + ret = SQL_SUCCESS; + break; + } +#if defined(_WIN32) || defined(_WIN64) + e->owner = 0; + LeaveCriticalSection(&e->cs); +#endif + return ret; +} + +/** + * Set information in HENV. + * @param env environment handle + * @param attr attribute to be retrieved + * @param val parameter buffer + * @param len length of parameter + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetEnvAttr(SQLHENV env, SQLINTEGER attr, SQLPOINTER val, SQLINTEGER len) +{ + ENV *e; + SQLRETURN ret = SQL_ERROR; + + if (env == SQL_NULL_HENV) { + return SQL_INVALID_HANDLE; + } + e = (ENV *) env; + if (!e || e->magic != ENV_MAGIC) { + return SQL_INVALID_HANDLE; + } +#if defined(_WIN32) || defined(_WIN64) + EnterCriticalSection(&e->cs); + e->owner = GetCurrentThreadId(); +#endif + switch (attr) { + case SQL_ATTR_CONNECTION_POOLING: + ret = SQL_SUCCESS; + break; + case SQL_ATTR_CP_MATCH: + ret = SQL_NO_DATA; + break; + case SQL_ATTR_OUTPUT_NTS: + if (val == (SQLPOINTER) SQL_TRUE) { + ret = SQL_SUCCESS; + } + break; + case SQL_ATTR_ODBC_VERSION: + if (!val) { + break; + } + if (val == (SQLPOINTER) SQL_OV_ODBC2) { + e->ov3 = 0; + ret = SQL_SUCCESS; + } + if (val == (SQLPOINTER) SQL_OV_ODBC3) { + e->ov3 = 1; + ret = SQL_SUCCESS; + } + break; + } +#if defined(_WIN32) || defined(_WIN64) + e->owner = 0; + LeaveCriticalSection(&e->cs); +#endif + return ret; +} + +/** + * Internal get error message given handle (HENV, HDBC, or HSTMT). + * @param htype handle type + * @param handle HENV, HDBC, or HSTMT + * @param recno + * @param sqlstate output buffer for SQL state + * @param nativeerr output buffer of native error code + * @param msg output buffer for error message + * @param buflen length of output buffer + * @param msglen output length + * @result ODBC error code + */ + +static SQLRETURN +drvgetdiagrec(SQLSMALLINT htype, SQLHANDLE handle, SQLSMALLINT recno, + SQLCHAR *sqlstate, SQLINTEGER *nativeerr, SQLCHAR *msg, + SQLSMALLINT buflen, SQLSMALLINT *msglen) +{ + DBC *d = NULL; + STMT *s = NULL; + int len, naterr; + char *logmsg, *sqlst; + SQLRETURN ret = SQL_ERROR; + + if (handle == SQL_NULL_HANDLE) { + return SQL_INVALID_HANDLE; + } + if (sqlstate) { + sqlstate[0] = '\0'; + } + if (msg && buflen > 0) { + msg[0] = '\0'; + } + if (msglen) { + *msglen = 0; + } + if (nativeerr) { + *nativeerr = 0; + } + switch (htype) { + case SQL_HANDLE_ENV: + case SQL_HANDLE_DESC: + return SQL_NO_DATA; + case SQL_HANDLE_DBC: + HDBC_LOCK((SQLHDBC) handle); + d = (DBC *) handle; + logmsg = (char *) d->logmsg; + sqlst = d->sqlstate; + naterr = d->naterr; + break; + case SQL_HANDLE_STMT: + HSTMT_LOCK((SQLHSTMT) handle); + s = (STMT *) handle; + logmsg = (char *) s->logmsg; + sqlst = s->sqlstate; + naterr = s->naterr; + break; + default: + return SQL_INVALID_HANDLE; + } + if (buflen < 0) { + goto done; + } + if (recno > 1) { + ret = SQL_NO_DATA; + goto done; + } + len = strlen(logmsg); + if (len == 0) { + ret = SQL_NO_DATA; + goto done; + } + if (nativeerr) { + *nativeerr = naterr; + } + if (sqlstate) { + strcpy((char *) sqlstate, sqlst); + } + if (msglen) { + *msglen = len; + } + if (len >= buflen) { + if (msg && buflen > 0) { + strncpy((char *) msg, logmsg, buflen); + msg[buflen - 1] = '\0'; + logmsg[0] = '\0'; + } + } else if (msg) { + strcpy((char *) msg, logmsg); + logmsg[0] = '\0'; + } + ret = SQL_SUCCESS; +done: + switch (htype) { + case SQL_HANDLE_DBC: + HDBC_UNLOCK((SQLHDBC) handle); + break; + case SQL_HANDLE_STMT: + HSTMT_UNLOCK((SQLHSTMT) handle); + break; + } + return ret; +} + +#if !defined(WINTERFACE) || (defined(HAVE_UNIXODBC) && HAVE_UNIXODBC) +/** + * Get error message given handle (HENV, HDBC, or HSTMT). + * @param htype handle type + * @param handle HENV, HDBC, or HSTMT + * @param recno + * @param sqlstate output buffer for SQL state + * @param nativeerr output buffer of native error code + * @param msg output buffer for error message + * @param buflen length of output buffer + * @param msglen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetDiagRec(SQLSMALLINT htype, SQLHANDLE handle, SQLSMALLINT recno, + SQLCHAR *sqlstate, SQLINTEGER *nativeerr, SQLCHAR *msg, + SQLSMALLINT buflen, SQLSMALLINT *msglen) +{ + return drvgetdiagrec(htype, handle, recno, sqlstate, + nativeerr, msg, buflen, msglen); +} +#endif + +#if !defined(HAVE_UNIXODBC) || !HAVE_UNIXODBC + #ifdef WINTERFACE +/** + * Get error message given handle (HENV, HDBC, or HSTMT) + * (UNICODE version). + * @param htype handle type + * @param handle HENV, HDBC, or HSTMT + * @param recno + * @param sqlstate output buffer for SQL state + * @param nativeerr output buffer of native error code + * @param msg output buffer for error message + * @param buflen length of output buffer + * @param msglen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetDiagRecW(SQLSMALLINT htype, SQLHANDLE handle, SQLSMALLINT recno, + SQLWCHAR *sqlstate, SQLINTEGER *nativeerr, SQLWCHAR *msg, + SQLSMALLINT buflen, SQLSMALLINT *msglen) +{ + char state[16]; + SQLSMALLINT len; + SQLRETURN ret; + + ret = drvgetdiagrec(htype, handle, recno, (SQLCHAR *) state, + nativeerr, (SQLCHAR *) msg, buflen, &len); + if (ret == SQL_SUCCESS) { + if (sqlstate) { + uc_from_utf_buf((SQLCHAR *) state, -1, sqlstate, + 6 * sizeof (SQLWCHAR)); + } + if (msg) { + if (len > 0) { + SQLWCHAR *m = NULL; + + m = uc_from_utf((unsigned char *) msg, len); + if (m) { + if (buflen) { + buflen /= sizeof (SQLWCHAR); + uc_strncpy(msg, m, buflen); + m[len] = 0; + len = min(buflen, uc_strlen(m)); + } else { + len = uc_strlen(m); + } + uc_free(m); + } else { + len = 0; + } + } + if (len <= 0) { + len = 0; + if (buflen > 0) { + msg[0] = 0; + } + } + } else { + /* estimated length !!! */ + len *= sizeof (SQLWCHAR); + } + if (msglen) { + *msglen = len; + } + } else if (ret == SQL_NO_DATA) { + if (sqlstate) { + sqlstate[0] = 0; + } + if (msg) { + if (buflen > 0) { + msg[0] = 0; + } + } + if (msglen) { + *msglen = 0; + } + } + return ret; +} + #endif +#endif + +/** + * Get error record given handle (HDBC or HSTMT). + * @param htype handle type + * @param handle HDBC or HSTMT + * @param recno diag record number for which info to be retrieved + * @param id diag id for which info to be retrieved + * @param info output buffer for error message + * @param buflen length of output buffer + * @param stringlen output length + * @result ODBC error code + */ + +static SQLRETURN +drvgetdiagfield(SQLSMALLINT htype, SQLHANDLE handle, SQLSMALLINT recno, + SQLSMALLINT id, SQLPOINTER info, + SQLSMALLINT buflen, SQLSMALLINT *stringlen) +{ + DBC *d = NULL; + STMT *s = NULL; + int len, naterr; + char *logmsg, *sqlst, *clrmsg = NULL; + SQLRETURN ret = SQL_ERROR; + + if (handle == SQL_NULL_HANDLE) { + return SQL_INVALID_HANDLE; + } + if (stringlen) { + *stringlen = 0; + } + switch (htype) { + case SQL_HANDLE_ENV: + case SQL_HANDLE_DESC: + return SQL_NO_DATA; + case SQL_HANDLE_DBC: + HDBC_LOCK((SQLHDBC) handle); + d = (DBC *) handle; + logmsg = (char *) d->logmsg; + sqlst = d->sqlstate; + naterr = d->naterr; + break; + case SQL_HANDLE_STMT: + HSTMT_LOCK((SQLHSTMT) handle); + s = (STMT *) handle; + d = (DBC *) s->dbc; + logmsg = (char *) s->logmsg; + sqlst = s->sqlstate; + naterr = s->naterr; + break; + default: + return SQL_INVALID_HANDLE; + } + if (buflen < 0) { + goto done; + } + if (recno > 1) { + ret = SQL_NO_DATA; + goto done; + } + switch (id) { + case SQL_DIAG_CLASS_ORIGIN: + logmsg = "ISO 9075"; + if (sqlst[0] == 'I' && sqlst[1] == 'M') { + logmsg = "ODBC 3.0"; + } + break; + case SQL_DIAG_SUBCLASS_ORIGIN: + logmsg = "ISO 9075"; + if (sqlst[0] == 'I' && sqlst[1] == 'M') { + logmsg = "ODBC 3.0"; + } else if (sqlst[0] == 'H' && sqlst[1] == 'Y') { + logmsg = "ODBC 3.0"; + } else if (sqlst[0] == '2' || sqlst[0] == '0' || sqlst[0] == '4') { + logmsg = "ODBC 3.0"; + } + break; + case SQL_DIAG_CONNECTION_NAME: + case SQL_DIAG_SERVER_NAME: + logmsg = d->dsn ? d->dsn : "No DSN"; + break; + case SQL_DIAG_SQLSTATE: + logmsg = sqlst; + break; + case SQL_DIAG_MESSAGE_TEXT: + clrmsg = logmsg; + break; + case SQL_DIAG_NUMBER: + naterr = 1; + /* fall through */ + case SQL_DIAG_NATIVE: + len = strlen(logmsg); + if (len == 0) { + ret = SQL_NO_DATA; + goto done; + } + if (info) { + *((SQLINTEGER *) info) = naterr; + } + ret = SQL_SUCCESS; + goto done; + default: + goto done; + } + if (info && buflen > 0) { + ((char *) info)[0] = '\0'; + } + len = strlen(logmsg); + if (len == 0) { + ret = SQL_NO_DATA; + goto done; + } + if (stringlen) { + *stringlen = len; + } + if (len >= buflen) { + if (info && buflen > 0) { + if (stringlen) { + *stringlen = buflen - 1; + } + strncpy((char *) info, logmsg, buflen); + ((char *) info)[buflen - 1] = '\0'; + } + } else if (info) { + strcpy((char *) info, logmsg); + } + if (clrmsg) { + *clrmsg = '\0'; + } + ret = SQL_SUCCESS; +done: + switch (htype) { + case SQL_HANDLE_DBC: + HDBC_UNLOCK((SQLHDBC) handle); + break; + case SQL_HANDLE_STMT: + HSTMT_UNLOCK((SQLHSTMT) handle); + break; + } + return ret; +} + +#ifndef WINTERFACE +/** + * Get error record given handle (HDBC or HSTMT). + * @param htype handle type + * @param handle HDBC or HSTMT + * @param recno diag record number for which info to be retrieved + * @param id diag id for which info to be retrieved + * @param info output buffer for error message + * @param buflen length of output buffer + * @param stringlen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetDiagField(SQLSMALLINT htype, SQLHANDLE handle, SQLSMALLINT recno, + SQLSMALLINT id, SQLPOINTER info, + SQLSMALLINT buflen, SQLSMALLINT *stringlen) +{ + return drvgetdiagfield(htype, handle, recno, id, info, buflen, stringlen); +} +#endif + +#ifdef WINTERFACE +/** + * Get error record given handle (HDBC or HSTMT). + * @param htype handle type + * @param handle HDBC or HSTMT + * @param recno diag record number for which info to be retrieved + * @param id diag id for which info to be retrieved + * @param info output buffer for error message + * @param buflen length of output buffer + * @param stringlen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetDiagFieldW(SQLSMALLINT htype, SQLHANDLE handle, SQLSMALLINT recno, + SQLSMALLINT id, SQLPOINTER info, + SQLSMALLINT buflen, SQLSMALLINT *stringlen) +{ + SQLSMALLINT len; + SQLRETURN ret; + + ret = drvgetdiagfield(htype, handle, recno, id, info, buflen, &len); + if (ret == SQL_SUCCESS) { + if (info) { + switch (id) { + case SQL_DIAG_CLASS_ORIGIN: + case SQL_DIAG_SUBCLASS_ORIGIN: + case SQL_DIAG_CONNECTION_NAME: + case SQL_DIAG_SERVER_NAME: + case SQL_DIAG_SQLSTATE: + case SQL_DIAG_MESSAGE_TEXT: + if (len > 0) { + SQLWCHAR *m = NULL; + + m = uc_from_utf((unsigned char *) info, len); + if (m) { + if (buflen) { + buflen /= sizeof (SQLWCHAR); + uc_strncpy(info, m, buflen); + m[len] = 0; + len = min(buflen, uc_strlen(m)); + } else { + len = uc_strlen(m); + } + uc_free(m); + len *= sizeof (SQLWCHAR); + } else { + len = 0; + } + } + if (len <= 0) { + len = 0; + if (buflen > 0) { + ((SQLWCHAR *) info)[0] = 0; + } + } + } + } else { + switch (id) { + case SQL_DIAG_CLASS_ORIGIN: + case SQL_DIAG_SUBCLASS_ORIGIN: + case SQL_DIAG_CONNECTION_NAME: + case SQL_DIAG_SERVER_NAME: + case SQL_DIAG_SQLSTATE: + case SQL_DIAG_MESSAGE_TEXT: + len *= sizeof (SQLWCHAR); + break; + } + } + if (stringlen) { + *stringlen = len; + } + } + return ret; +} +#endif + +/** + * Internal get option of HSTMT. + * @param stmt statement handle + * @param attr attribute to be retrieved + * @param val output buffer + * @param bufmax length of output buffer + * @param buflen output length + * @result ODBC error code + */ + +static SQLRETURN +drvgetstmtattr(SQLHSTMT stmt, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER bufmax, SQLINTEGER *buflen) +{ + STMT *s = (STMT *) stmt; + SQLUINTEGER *uval = (SQLUINTEGER *) val; + + switch (attr) { + case SQL_QUERY_TIMEOUT: + *uval = s->query_timeout; + return SQL_SUCCESS; + case SQL_ATTR_CURSOR_TYPE: + *uval = s->curtype; + return SQL_SUCCESS; + case SQL_ATTR_CURSOR_SCROLLABLE: + *uval = (s->curtype != SQL_CURSOR_FORWARD_ONLY) ? + SQL_SCROLLABLE : SQL_NONSCROLLABLE; + return SQL_SUCCESS; +#ifdef SQL_ATTR_CURSOR_SENSITIVITY + case SQL_ATTR_CURSOR_SENSITIVITY: + *uval = SQL_UNSPECIFIED; + return SQL_SUCCESS; +#endif + case SQL_ATTR_ROW_NUMBER: + /* FIXME not implemented */ + return SQL_ERROR; + case SQL_ATTR_ASYNC_ENABLE: + *uval = SQL_ASYNC_ENABLE_OFF; + return SQL_SUCCESS; + case SQL_CONCURRENCY: + *uval = SQL_CONCUR_LOCK; + return SQL_SUCCESS; + case SQL_ATTR_RETRIEVE_DATA: + *uval = s->retr_data; + return SQL_SUCCESS; + case SQL_ROWSET_SIZE: + case SQL_ATTR_ROW_ARRAY_SIZE: + *uval = s->rowset_size; + return SQL_SUCCESS; +/* The get/set column descriptors is useful for passing the output of one stmt as input to another + * Since Hive doesn't support parametric (prepare, bind, execute) stmt execution, there's no benefit + * of supporting get/set column descriptors. + */ + case SQL_ATTR_IMP_ROW_DESC: + case SQL_ATTR_APP_ROW_DESC: + case SQL_ATTR_IMP_PARAM_DESC: + case SQL_ATTR_APP_PARAM_DESC: + *((SQLHDESC *) val) = (SQLHDESC) DEAD_MAGIC; + return SQL_SUCCESS; + case SQL_ATTR_ROW_STATUS_PTR: + *((SQLUSMALLINT **) val) = s->row_status; + return SQL_SUCCESS; + case SQL_ATTR_ROWS_FETCHED_PTR: + *((SQLUINTEGER **) val) = s->row_count; + return SQL_SUCCESS; + case SQL_ATTR_USE_BOOKMARKS: { + STMT *s = (STMT *) stmt; + + *(SQLUINTEGER *) val = s->bkmrk ? SQL_UB_ON : SQL_UB_OFF; + return SQL_SUCCESS; + } + case SQL_ATTR_PARAM_BIND_OFFSET_PTR: + *((SQLUINTEGER **) val) = s->parm_bind_offs; + return SQL_SUCCESS; + case SQL_ATTR_PARAM_BIND_TYPE: + *((SQLUINTEGER *) val) = s->parm_bind_type; + return SQL_SUCCESS; + case SQL_ATTR_PARAM_OPERATION_PTR: + *((SQLUSMALLINT **) val) = s->parm_oper; + return SQL_SUCCESS; + case SQL_ATTR_PARAM_STATUS_PTR: + *((SQLUSMALLINT **) val) = s->parm_status; + return SQL_SUCCESS; + case SQL_ATTR_PARAMS_PROCESSED_PTR: + *((SQLUINTEGER **) val) = s->parm_proc; + return SQL_SUCCESS; + case SQL_ATTR_PARAMSET_SIZE: + *((SQLUINTEGER *) val) = s->paramset_size; + return SQL_SUCCESS; + case SQL_ATTR_ROW_BIND_TYPE: + *(SQLUINTEGER *) val = s->bind_type; + return SQL_SUCCESS; + case SQL_ATTR_ROW_BIND_OFFSET_PTR: + *((SQLUINTEGER **) val) = s->bind_offs; + return SQL_SUCCESS; + case SQL_ATTR_MAX_ROWS: + *((SQLUINTEGER *) val) = s->max_rows; + case SQL_ATTR_MAX_LENGTH: + *((SQLINTEGER *) val) = 1000000000; + return SQL_SUCCESS; + case SQL_ATTR_FETCH_BOOKMARK_PTR: + val = NULL; + return SQL_SUCCESS; + case SQL_ATTR_KEYSET_SIZE: + *((SQLINTEGER *) val) = 0; + return SQL_SUCCESS; + } + return drvunimplstmt(stmt); +} + +#if (defined(HAVE_UNIXODBC) && HAVE_UNIXODBC) || !defined(WINTERFACE) +/** + * Get option of HSTMT. + * @param stmt statement handle + * @param attr attribute to be retrieved + * @param val output buffer + * @param bufmax length of output buffer + * @param buflen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetStmtAttr(SQLHSTMT stmt, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER bufmax, SQLINTEGER *buflen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvgetstmtattr(stmt, attr, val, bufmax, buflen); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Get option of HSTMT (UNICODE version). + * @param stmt statement handle + * @param attr attribute to be retrieved + * @param val output buffer + * @param bufmax length of output buffer + * @param buflen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetStmtAttrW(SQLHSTMT stmt, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER bufmax, SQLINTEGER *buflen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvgetstmtattr(stmt, attr, val, bufmax, buflen); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Internal set option on HSTMT. + * @param stmt statement handle + * @param attr attribute to be set + * @param val input buffer (attribute value) + * @param buflen length of input buffer + * @result ODBC error code + */ + +static SQLRETURN +drvsetstmtattr(SQLHSTMT stmt, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER buflen) +{ + STMT *s = (STMT *) stmt; + + switch (attr) { + case SQL_ATTR_CURSOR_TYPE: + /* Hive doesn't support scroll cursors. */ + if (val != (SQLPOINTER) SQL_CURSOR_FORWARD_ONLY) + goto e01s02; + return SQL_SUCCESS; + case SQL_ATTR_CURSOR_SCROLLABLE: + if (val != (SQLPOINTER) SQL_CURSOR_FORWARD_ONLY) + goto e01s02; + return SQL_SUCCESS; + case SQL_ATTR_ASYNC_ENABLE: + if (val != (SQLPOINTER) SQL_ASYNC_ENABLE_OFF) { +e01s02: + setstat(s, -1, "option value changed", "01S02"); + return SQL_SUCCESS_WITH_INFO; + } + return SQL_SUCCESS; + case SQL_CONCURRENCY: + if (val != (SQLPOINTER) SQL_CONCUR_LOCK) { + goto e01s02; + } + return SQL_SUCCESS; +#ifdef SQL_ATTR_CURSOR_SENSITIVITY + case SQL_ATTR_CURSOR_SENSITIVITY: + if (val != (SQLPOINTER) SQL_UNSPECIFIED) { + goto e01s02; + } + return SQL_SUCCESS; +#endif + case SQL_ATTR_QUERY_TIMEOUT: + s->query_timeout = (SQLUINTEGER)val; + return SQL_SUCCESS; + case SQL_ATTR_RETRIEVE_DATA: + if (val != (SQLPOINTER) SQL_RD_ON && + val != (SQLPOINTER) SQL_RD_OFF) { + goto e01s02; + } + s->retr_data = (PTRDIFF_T) val; + return SQL_SUCCESS; + case SQL_ROWSET_SIZE: + case SQL_ATTR_ROW_ARRAY_SIZE: + if ((PTRDIFF_T) val < 1) { + setstat(s, -1, "invalid rowset size", "HY000"); + return SQL_ERROR; + } else { + SQLUSMALLINT *rst = &s->row_status1; + + if ((PTRDIFF_T) val > 1) { + rst = xmalloc(sizeof (SQLUSMALLINT) * (PTRDIFF_T) val); + if (!rst) { + return nomem(s); + } + } + if (s->row_status0 != &s->row_status1) { + freep(&s->row_status0); + } + s->row_status0 = rst; + s->rowset_size = (PTRDIFF_T) val; + if (s->hive_resultset) { + int fetch_row_size; + + SET_FETCH_ROW_SIZE(s->rowset_size, fetch_row_size); + DBSetBulkAttr(s->hive_resultset, s->rowset_size, fetch_row_size); + } + } + return SQL_SUCCESS; + case SQL_ATTR_ROW_STATUS_PTR: + s->row_status = (SQLUSMALLINT *) val; + return SQL_SUCCESS; + case SQL_ATTR_ROWS_FETCHED_PTR: + /* if ROWS_FETCHED_PTR is being set to null, + * then reset it back to local count + * the fetch assumes that its non null + */ + if (val == (SQLUINTEGER *)NULL) + val = &s->row_count0; + s->row_count = (SQLUINTEGER *) val; + return SQL_SUCCESS; + case SQL_ATTR_PARAM_BIND_OFFSET_PTR: + s->parm_bind_offs = (SQLUINTEGER *) val; + return SQL_SUCCESS; + case SQL_ATTR_PARAM_BIND_TYPE: + s->parm_bind_type = (PTRDIFF_T) val; + return SQL_SUCCESS; + case SQL_ATTR_PARAM_OPERATION_PTR: + s->parm_oper = (SQLUSMALLINT *) val; + return SQL_SUCCESS; + case SQL_ATTR_PARAM_STATUS_PTR: + s->parm_status = (SQLUSMALLINT *) val; + return SQL_SUCCESS; + case SQL_ATTR_PARAMS_PROCESSED_PTR: + s->parm_proc = (SQLUINTEGER *) val; + return SQL_SUCCESS; + case SQL_ATTR_PARAMSET_SIZE: + if ((PTRDIFF_T) val < 1) { + goto e01s02; + } + s->paramset_size = (PTRDIFF_T) val; + s->paramset_count = 0; + return SQL_SUCCESS; + case SQL_ATTR_ROW_BIND_TYPE: + s->bind_type = (PTRDIFF_T) val; + return SQL_SUCCESS; + case SQL_ATTR_ROW_BIND_OFFSET_PTR: + s->bind_offs = (SQLUINTEGER *) val; + return SQL_SUCCESS; + case SQL_ATTR_USE_BOOKMARKS: + if (val != (SQLPOINTER) SQL_UB_OFF && + val != (SQLPOINTER) SQL_UB_ON) { + goto e01s02; + } + s->bkmrk = val == (SQLPOINTER) SQL_UB_ON; + return SQL_SUCCESS; + case SQL_ATTR_MAX_ROWS: + s->max_rows = (PTRDIFF_T) val; + return SQL_SUCCESS; + case SQL_ATTR_MAX_LENGTH: + if (val != (SQLPOINTER) 1000000000) { + goto e01s02; + } + return SQL_SUCCESS; + case SQL_ATTR_ENABLE_AUTO_IPD: + /* Hive doesn't support prepare. There no descriptor information available just after prepare call */ + if (val != (SQLPOINTER) SQL_FALSE) { + goto e01s02; + } + return SQL_SUCCESS; + case SQL_ATTR_FETCH_BOOKMARK_PTR: + /* Hive doesn't support scroll cursors */ + if (val != (SQLPOINTER) NULL) { + goto e01s02; + } + return SQL_SUCCESS; + case SQL_ATTR_KEYSET_SIZE: + /* Hive doesn't mixed cursors */ + if (val != (SQLPOINTER) 0) { + goto e01s02; + } + return SQL_SUCCESS; + } + return drvunimplstmt(stmt); +} + +#if (defined(HAVE_UNIXODBC) && HAVE_UNIXODBC) || !defined(WINTERFACE) +/** + * Set option on HSTMT. + * @param stmt statement handle + * @param attr attribute to be set + * @param val input buffer (attribute value) + * @param buflen length of input buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetStmtAttr(SQLHSTMT stmt, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER buflen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvsetstmtattr(stmt, attr, val, buflen); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Set option on HSTMT (UNICODE version). + * @param stmt statement handle + * @param attr attribute to be set + * @param val input buffer (attribute value) + * @param buflen length of input buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetStmtAttrW(SQLHSTMT stmt, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER buflen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvsetstmtattr(stmt, attr, val, buflen); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Internal get option of HSTMT. + * @param stmt statement handle + * @param opt option to be retrieved + * @param param output buffer + * @result ODBC error code + */ + +static SQLRETURN +drvgetstmtoption(SQLHSTMT stmt, SQLUSMALLINT opt, SQLPOINTER param) +{ + STMT *s = (STMT *) stmt; + SQLUINTEGER *ret = (SQLUINTEGER *) param; + + /* TODO: SQLGetStmtOption was deprecated in ODBC 3.X, replaced + with SQLGetStmtAttr. This function should be removed and replaced + with a call to drvgetstmtattr + */ + + switch (opt) { + case SQL_QUERY_TIMEOUT: + *ret = s->query_timeout; + return SQL_SUCCESS; + case SQL_CURSOR_TYPE: + *ret = s->curtype; + return SQL_SUCCESS; + case SQL_ROW_NUMBER: + return SQL_ERROR; + case SQL_ASYNC_ENABLE: + *ret = SQL_ASYNC_ENABLE_OFF; + return SQL_SUCCESS; + case SQL_CONCURRENCY: + *ret = SQL_CONCUR_LOCK; + return SQL_SUCCESS; + case SQL_ATTR_RETRIEVE_DATA: + *ret = s->retr_data; + return SQL_SUCCESS; + case SQL_ROWSET_SIZE: + case SQL_ATTR_ROW_ARRAY_SIZE: + *ret = s->rowset_size; + return SQL_SUCCESS; + case SQL_ATTR_MAX_ROWS: + *ret = s->max_rows; + return SQL_SUCCESS; + case SQL_ATTR_MAX_LENGTH: + *ret = 1000000000; + return SQL_SUCCESS; + } + return drvunimplstmt(stmt); +} + +/** + * Get option of HSTMT. + * @param stmt statement handle + * @param opt option to be retrieved + * @param param output buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetStmtOption(SQLHSTMT stmt, SQLUSMALLINT opt, SQLPOINTER param) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvgetstmtoption(stmt, opt, param); + HSTMT_UNLOCK(stmt); + return ret; +} + +#ifdef WINTERFACE +/** + * Get option of HSTMT (UNICODE version). + * @param stmt statement handle + * @param opt option to be retrieved + * @param param output buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetStmtOptionW(SQLHSTMT stmt, SQLUSMALLINT opt, SQLPOINTER param) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvgetstmtoption(stmt, opt, param); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Internal set option on HSTMT. + * @param stmt statement handle + * @param opt option to be set + * @param param input buffer (option value) + * @result ODBC error code + */ + +static SQLRETURN +drvsetstmtoption(SQLHSTMT stmt, SQLUSMALLINT opt, SQLUINTEGER param) +{ + STMT *s = (STMT *) stmt; + + switch (opt) { + case SQL_CURSOR_TYPE: + /* Hive doesn't support scrollable cursors */ + if (param != SQL_CURSOR_FORWARD_ONLY) + goto e01s02; + return SQL_SUCCESS; + case SQL_ASYNC_ENABLE: + if (param != SQL_ASYNC_ENABLE_OFF) { + goto e01s02; + } + return SQL_SUCCESS; + case SQL_CONCURRENCY: + if (param != SQL_CONCUR_LOCK) { + goto e01s02; + } + return SQL_SUCCESS; + case SQL_QUERY_TIMEOUT: + s->query_timeout = (SQLUINTEGER)param; + return SQL_SUCCESS; + case SQL_RETRIEVE_DATA: + if (param != SQL_RD_ON && param != SQL_RD_OFF) { +e01s02: + setstat(s, -1, "option value changed", "01S02"); + return SQL_SUCCESS_WITH_INFO; + } + s->retr_data = (int) param; + return SQL_SUCCESS; + case SQL_ROWSET_SIZE: + case SQL_ATTR_ROW_ARRAY_SIZE: + if (param < 1) { + setstat(s, -1, "invalid rowset size", "HY000"); + return SQL_ERROR; + } else { + SQLUSMALLINT *rst = &s->row_status1; + + if (param > 1) { + rst = xmalloc(sizeof (SQLUSMALLINT) * param); + if (!rst) { + return nomem(s); + } + } + if (s->row_status0 != &s->row_status1) { + freep(&s->row_status0); + } + s->row_status0 = rst; + s->rowset_size = param; + if (s->hive_resultset) { + int fetch_row_size; + + SET_FETCH_ROW_SIZE(s->rowset_size, fetch_row_size); + DBSetBulkAttr(s->hive_resultset, s->rowset_size, fetch_row_size); + } + } + return SQL_SUCCESS; + case SQL_ATTR_MAX_ROWS: + s->max_rows = param; + return SQL_SUCCESS; + case SQL_ATTR_MAX_LENGTH: + if (param != 1000000000) { + goto e01s02; + } + return SQL_SUCCESS; + } + return drvunimplstmt(stmt); +} + +/** + * Set option on HSTMT. + * @param stmt statement handle + * @param opt option to be set + * @param param input buffer (option value) + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetStmtOption(SQLHSTMT stmt, SQLUSMALLINT opt, + SETSTMTOPTION_LAST_ARG_TYPE param) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvsetstmtoption(stmt, opt, (SQLUINTEGER) param); + HSTMT_UNLOCK(stmt); + return ret; +} + +#ifdef WINTERFACE +/** + * Set option on HSTMT (UNICODE version). + * @param stmt statement handle + * @param opt option to be set + * @param param input buffer (option value) + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetStmtOptionW(SQLHSTMT stmt, SQLUSMALLINT opt, + SETSTMTOPTION_LAST_ARG_TYPE param) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvsetstmtoption(stmt, opt, (SQLUINTEGER) param); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Internal set position on result in HSTMT. + * @param stmt statement handle + * @param row row to be positioned + * @param op operation code + * @param lock locking type + * @result ODBC error code + */ + +static SQLRETURN +drvsetpos(SQLHSTMT stmt, SQLSETPOSIROW row, SQLUSMALLINT op, SQLUSMALLINT lock) +{ + STMT *s = (STMT *) stmt; + + if (op != SQL_POSITION) { + return drvunimplstmt(stmt); + } + if (!s->rows || row <= 0 || row > s->nrows) { + setstat(s, -1, "row out of range", (*s->ov3) ? "HY107" : "S1107"); + return SQL_ERROR; + } + s->rowp = row - 1; + return SQL_SUCCESS; +} + +/** + * Set position on result in HSTMT. + * @param stmt statement handle + * @param row row to be positioned + * @param op operation code + * @param lock locking type + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetPos(SQLHSTMT stmt, SQLSETPOSIROW row, SQLUSMALLINT op, SQLUSMALLINT lock) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvsetpos(stmt, row, op, lock); + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Function not implemented. + */ + +SQLRETURN SQL_API +SQLSetScrollOptions(SQLHSTMT stmt, SQLUSMALLINT concur, SQLLEN rowkeyset, + SQLUSMALLINT rowset) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} + +#define strmak(dst, src, max, lenp) { \ + int len = strlen(src); \ + int cnt = min(len + 1, max); \ + strncpy(dst, src, cnt); \ + *lenp = (cnt > len) ? len : cnt; \ +} + +/** + * Internal return information about what this ODBC driver supports. + * @param dbc database connection handle + * @param type type of information to be retrieved + * @param val output buffer + * @param valMax length of output buffer + * @param valLen output length + * @result ODBC error code + */ + +static SQLRETURN +drvgetinfo(SQLHDBC dbc, SQLUSMALLINT type, SQLPOINTER val, SQLSMALLINT valMax, + SQLSMALLINT *valLen) +{ + DBC *d; + char dummyc[16]; + SQLSMALLINT dummy; +#if defined(_WIN32) || defined(_WIN64) + char drvname[301]; +#else + char *drvname = DEFAULT_DRIVER_NAME; +#endif + char server_name[300]; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + if (valMax) { + valMax--; + } + if (!valLen) { + valLen = &dummy; + } + if (!val) { + val = dummyc; + valMax = sizeof (dummyc) - 1; + } + switch (type) { + case SQL_MAX_USER_NAME_LEN: + *((SQLSMALLINT *) val) = 16; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_USER_NAME: + strmak(val, "", valMax, valLen); + break; + case SQL_DRIVER_ODBC_VER: +#if 0 + strmak(val, (*d->ov3) ? "03.00" : "02.50", valMax, valLen); +#else + strmak(val, "03.00", valMax, valLen); +#endif + break; + case SQL_ACTIVE_CONNECTIONS: + case SQL_ACTIVE_STATEMENTS: + *((SQLSMALLINT *) val) = 0; + *valLen = sizeof (SQLSMALLINT); + break; +#ifdef SQL_ASYNC_MODE + case SQL_ASYNC_MODE: + *((SQLUINTEGER *) val) = SQL_AM_NONE; + *valLen = sizeof (SQLUINTEGER); + break; +#endif +#ifdef SQL_CREATE_TABLE + case SQL_CREATE_TABLE: + *((SQLUINTEGER *) val) = SQL_CT_CREATE_TABLE; + *valLen = sizeof (SQLUINTEGER); + break; +#endif +#ifdef SQL_CREATE_VIEW + case SQL_CREATE_VIEW: + *((SQLUINTEGER *) val) = SQL_CV_CREATE_VIEW; + *valLen = sizeof (SQLUINTEGER); + break; +#endif +#ifdef SQL_DDL_INDEX + case SQL_DDL_INDEX: + *((SQLUINTEGER *) val) = SQL_DI_CREATE_INDEX | SQL_DI_DROP_INDEX; + *valLen = sizeof (SQLUINTEGER); + break; +#endif +#ifdef SQL_DROP_TABLE + case SQL_DROP_TABLE: + *((SQLUINTEGER *) val) = SQL_DT_DROP_TABLE; + *valLen = sizeof (SQLUINTEGER); + break; +#endif +#ifdef SQL_DROP_VIEW + case SQL_DROP_VIEW: + *((SQLUINTEGER *) val) = SQL_DV_DROP_VIEW; + *valLen = sizeof (SQLUINTEGER); + break; +#endif +#ifdef SQL_INDEX_KEYWORDS + case SQL_INDEX_KEYWORDS: + *((SQLUINTEGER *) val) = SQL_IK_NONE; + *valLen = sizeof (SQLUINTEGER); + break; +#endif + case SQL_DATA_SOURCE_NAME: + strmak(val, d->dsn ? d->dsn : "", valMax, valLen); + break; + case SQL_DRIVER_NAME: +#if defined(_WIN32) || defined(_WIN64) + GetModuleFileName(hModule, drvname, sizeof (drvname)); +#endif + strmak(val, drvname, valMax, valLen); + break; + case SQL_DRIVER_VER: + strmak(val, VERSION, valMax, valLen); + break; + case SQL_FETCH_DIRECTION: + *((SQLUINTEGER *) val) = SQL_FD_FETCH_NEXT; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_ODBC_VER: + strmak(val, (*d->ov3) ? "03.00" : "02.50", valMax, valLen); + break; +#ifdef NEVER + case SQL_ODBC_SAG_CLI_CONFORMANCE: + *((SQLSMALLINT *) val) = SQL_OSCC_NOT_COMPLIANT; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_STANDARD_CLI_CONFORMANCE: + *((SQLUINTEGER *) val) = SQL_SCC_XOPEN_CLI_VERSION1; + *valLen = sizeof (SQLUINTEGER); + break; +#endif /* NEVER */ + case SQL_SERVER_NAME: + if (d->dsn_host && d->dsn_port) + sprintf(server_name, "%s:%s", d->dsn_host, d->dsn_port); + else + sprintf(server_name, "Hive Server"); + strmak(val, server_name, valMax, valLen); + break; + case SQL_DATABASE_NAME: + strmak(val, d->dbname ? d->dbname : "", valMax, valLen); + break; + case SQL_SEARCH_PATTERN_ESCAPE: + strmak(val, "\\", valMax, valLen); + break; +#ifdef NEVER + case SQL_ODBC_SQL_CONFORMANCE: + *((SQLSMALLINT *) val) = SQL_OSC_MINIMUM; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_ODBC_API_CONFORMANCE: + *((SQLSMALLINT *) val) = SQL_OAC_LEVEL1; + *valLen = sizeof (SQLSMALLINT); + break; +#endif /* NEVER */ + case SQL_DBMS_NAME: + strmak(val, "Apache Hive", valMax, valLen); + break; + case SQL_DBMS_VER: + strmak(val, HIVE_VERSION, valMax, valLen); + break; + case SQL_COLUMN_ALIAS: + case SQL_NEED_LONG_DATA_LEN: + strmak(val, "Y", valMax, valLen); + break; + case SQL_ROW_UPDATES: + case SQL_ACCESSIBLE_PROCEDURES: + case SQL_PROCEDURES: + case SQL_EXPRESSIONS_IN_ORDERBY: + case SQL_ODBC_SQL_OPT_IEF: + case SQL_ORDER_BY_COLUMNS_IN_SELECT: + case SQL_ACCESSIBLE_TABLES: + case SQL_MULT_RESULT_SETS: + case SQL_MULTIPLE_ACTIVE_TXN: + case SQL_MAX_ROW_SIZE_INCLUDES_LONG: + strmak(val, "N", valMax, valLen); + break; + case SQL_LIKE_ESCAPE_CLAUSE: + case SQL_OUTER_JOINS: + strmak(val, "Y", valMax, valLen); + break; +#ifdef SQL_CATALOG_NAME + case SQL_CATALOG_NAME: +#if defined(_WIN32) || defined(_WIN64) + strmak(val, d->xcelqrx ? "Y" : "N", valMax, valLen); +#else + strmak(val, "N", valMax, valLen); +#endif + break; +#endif + case SQL_DATA_SOURCE_READ_ONLY: + strmak(val, "N", valMax, valLen); + break; +#ifdef SQL_OJ_CAPABILITIES + case SQL_OJ_CAPABILITIES: + *((SQLUINTEGER *) val) = SQL_OJ_LEFT|SQL_OJ_RIGHT|SQL_OJ_FULL|SQL_OJ_NESTED| \ + SQL_OJ_NOT_ORDERED| SQL_OJ_INNER; + *valLen = sizeof (SQLUINTEGER); + break; +#endif +#ifdef SQL_MAX_IDENTIFIER_LEN + case SQL_MAX_IDENTIFIER_LEN: + /* TODO : need to verify the max object name in Hive */ + *((SQLUSMALLINT *) val) = 255; + *valLen = sizeof (SQLUSMALLINT); + break; +#endif + case SQL_CONCAT_NULL_BEHAVIOR: + *((SQLSMALLINT *) val) = SQL_CB_NULL; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_CURSOR_COMMIT_BEHAVIOR: + case SQL_CURSOR_ROLLBACK_BEHAVIOR: + *((SQLSMALLINT *) val) = SQL_CB_DELETE; + *valLen = sizeof (SQLSMALLINT); + break; +#ifdef SQL_CURSOR_SENSITIVITY + case SQL_CURSOR_SENSITIVITY: + *((SQLUINTEGER *) val) = SQL_UNSPECIFIED; + *valLen = sizeof (SQLUINTEGER); + break; +#endif + case SQL_DEFAULT_TXN_ISOLATION: + *((SQLUINTEGER *) val) = SQL_TXN_READ_UNCOMMITTED; + *valLen = sizeof (SQLUINTEGER); + break; +#ifdef SQL_DESCRIBE_PARAMETER + case SQL_DESCRIBE_PARAMETER: + strmak(val, "N", valMax, valLen); + break; +#endif + case SQL_TXN_ISOLATION_OPTION: + *((SQLUINTEGER *) val) = SQL_TXN_READ_UNCOMMITTED; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_IDENTIFIER_CASE: + *((SQLSMALLINT *) val) = SQL_IC_MIXED; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_IDENTIFIER_QUOTE_CHAR: + strmak(val, "\"", valMax, valLen); + break; + case SQL_MAX_TABLE_NAME_LEN: + case SQL_MAX_COLUMN_NAME_LEN: + *((SQLSMALLINT *) val) = 128; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_MAX_CURSOR_NAME_LEN: + *((SQLSMALLINT *) val) = 0; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_MAX_PROCEDURE_NAME_LEN: + *((SQLSMALLINT *) val) = 128; + break; + case SQL_MAX_QUALIFIER_NAME_LEN: + case SQL_MAX_OWNER_NAME_LEN: + /* TODO. verify the max table/column name length for Hive */ + *((SQLSMALLINT *) val) = 128; + break; + case SQL_OWNER_TERM: + strmak(val, "", valMax, valLen); + break; + case SQL_PROCEDURE_TERM: + strmak(val, "PROCEDURE", valMax, valLen); + break; + case SQL_QUALIFIER_NAME_SEPARATOR: + strmak(val, ".", valMax, valLen); + break; + case SQL_QUALIFIER_TERM: +#if defined(_WIN32) || defined(_WIN64) + strmak(val, d->xcelqrx ? "catalog" : "", valMax, valLen); +#else + strmak(val, "", valMax, valLen); +#endif + break; + case SQL_QUALIFIER_USAGE: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_SCROLL_CONCURRENCY: + *((SQLUINTEGER *) val) = SQL_SCCO_LOCK; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_SCROLL_OPTIONS: + *((SQLUINTEGER *) val) = SQL_SO_STATIC | SQL_SO_FORWARD_ONLY; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_TABLE_TERM: + strmak(val, "TABLE", valMax, valLen); + break; + case SQL_TXN_CAPABLE: + *((SQLSMALLINT *) val) = SQL_TC_NONE; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_CONVERT_FUNCTIONS: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_SYSTEM_FUNCTIONS: + case SQL_NUMERIC_FUNCTIONS: + case SQL_STRING_FUNCTIONS: + case SQL_TIMEDATE_FUNCTIONS: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + /* Hive doesn't support scalar convert functions */ + case SQL_CONVERT_BIGINT: + case SQL_CONVERT_BIT: + case SQL_CONVERT_CHAR: + case SQL_CONVERT_DECIMAL: + case SQL_CONVERT_DOUBLE: + case SQL_CONVERT_FLOAT: + case SQL_CONVERT_INTEGER: + case SQL_CONVERT_LONGVARCHAR: + case SQL_CONVERT_NUMERIC: + case SQL_CONVERT_REAL: + case SQL_CONVERT_SMALLINT: + case SQL_CONVERT_TINYINT: + case SQL_CONVERT_VARCHAR: + case SQL_CONVERT_DATE: + case SQL_CONVERT_TIME: + case SQL_CONVERT_TIMESTAMP: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_CONVERT_BINARY: + case SQL_CONVERT_VARBINARY: + case SQL_CONVERT_LONGVARBINARY: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_POSITIONED_STATEMENTS: + case SQL_LOCK_TYPES: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_BOOKMARK_PERSISTENCE: + *((SQLUINTEGER *) val) = SQL_BP_DROP; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_UNION: + *((SQLUINTEGER *) val) = SQL_U_UNION | SQL_U_UNION_ALL; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_OWNER_USAGE: + case SQL_SUBQUERIES: + case SQL_TIMEDATE_ADD_INTERVALS: + case SQL_TIMEDATE_DIFF_INTERVALS: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_QUOTED_IDENTIFIER_CASE: + *((SQLUSMALLINT *) val) = SQL_IC_SENSITIVE; + *valLen = sizeof (SQLUSMALLINT); + break; + case SQL_POS_OPERATIONS: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_ALTER_TABLE: + *((SQLUINTEGER *) val) = SQL_AT_ADD_COLUMN|SQL_AT_DROP_COLUMN ; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_CORRELATION_NAME: + *((SQLSMALLINT *) val) = SQL_CN_DIFFERENT; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_NON_NULLABLE_COLUMNS: + *((SQLSMALLINT *) val) = SQL_NNC_NULL; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_NULL_COLLATION: + *((SQLSMALLINT *) val) = SQL_NC_END; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_MAX_COLUMNS_IN_GROUP_BY: + case SQL_MAX_COLUMNS_IN_ORDER_BY: + case SQL_MAX_COLUMNS_IN_SELECT: + case SQL_MAX_COLUMNS_IN_TABLE: + case SQL_MAX_ROW_SIZE: + case SQL_MAX_TABLES_IN_SELECT: + *((SQLSMALLINT *) val) = 0; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_MAX_BINARY_LITERAL_LEN: + case SQL_MAX_CHAR_LITERAL_LEN: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_MAX_COLUMNS_IN_INDEX: + *((SQLSMALLINT *) val) = 0; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_MAX_INDEX_SIZE: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; +#ifdef SQL_MAX_IDENTIFIER_LENGTH + case SQL_MAX_IDENTIFIER_LENGTH: + *((SQLUINTEGER *) val) = 255; + *valLen = sizeof (SQLUINTEGER); + break; +#endif + case SQL_MAX_STATEMENT_LEN: + *((SQLUINTEGER *) val) = 16384; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_QUALIFIER_LOCATION: + *((SQLSMALLINT *) val) = SQL_QL_START; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_GETDATA_EXTENSIONS: + *((SQLUINTEGER *) val) = + SQL_GD_ANY_COLUMN | SQL_GD_ANY_ORDER | SQL_GD_BOUND; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_STATIC_SENSITIVITY: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_FILE_USAGE: + *((SQLSMALLINT *) val) = SQL_FILE_NOT_SUPPORTED; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_GROUP_BY: + *((SQLSMALLINT *) val) = SQL_GB_NO_RELATION; + *valLen = sizeof (SQLSMALLINT); + break; + case SQL_KEYWORDS: + /* TODO : Add more keywords from the Hive grammer */ + strmak(val, "CREATE,SELECT,DROP,INSERT,VIEW,GROUP,BY" + "INTO,VALUES,TABLE,INDEX,FROM,SET,WHERE,AND,OVERWRITE", + valMax, valLen); + break; + case SQL_SPECIAL_CHARACTERS: +#ifdef SQL_COLLATION_SEQ + case SQL_COLLATION_SEQ: +#endif + strmak(val, "", valMax, valLen); + break; + case SQL_BATCH_SUPPORT: + *((SQLUINTEGER *) val) = SQL_BS_SELECT_EXPLICIT; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_BATCH_ROW_COUNT: + case SQL_PARAM_ARRAY_ROW_COUNTS: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1: + *((SQLUINTEGER *) val) = SQL_CA1_NEXT; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_STATIC_CURSOR_ATTRIBUTES1: + *((SQLUINTEGER *) val) = SQL_CA1_NEXT; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2: + case SQL_STATIC_CURSOR_ATTRIBUTES2: + *((SQLUINTEGER *) val) = SQL_CA2_READ_ONLY_CONCURRENCY; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_KEYSET_CURSOR_ATTRIBUTES1: + case SQL_KEYSET_CURSOR_ATTRIBUTES2: + case SQL_DYNAMIC_CURSOR_ATTRIBUTES1: + case SQL_DYNAMIC_CURSOR_ATTRIBUTES2: + *((SQLUINTEGER *) val) = 0; + *valLen = sizeof (SQLUINTEGER); + break; + case SQL_ODBC_INTERFACE_CONFORMANCE: + *((SQLUINTEGER *) val) = SQL_OIC_CORE; + *valLen = sizeof (SQLUINTEGER); + break; + + default: + setstatd(d, -1, "unsupported info option %d", + (*d->ov3) ? "HYC00" : "S1C00", type); + return SQL_ERROR; + } + return SQL_SUCCESS; +} + +#if (defined(HAVE_UNIXODBC) && HAVE_UNIXODBC) || !defined(WINTERFACE) +/** + * Return information about what this ODBC driver supports. + * @param dbc database connection handle + * @param type type of information to be retrieved + * @param val output buffer + * @param valMax length of output buffer + * @param valLen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetInfo(SQLHDBC dbc, SQLUSMALLINT type, SQLPOINTER val, SQLSMALLINT valMax, + SQLSMALLINT *valLen) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvgetinfo(dbc, type, val, valMax, valLen); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Return information about what this ODBC driver supports. + * @param dbc database connection handle + * @param type type of information to be retrieved + * @param val output buffer + * @param valMax length of output buffer + * @param valLen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetInfoW(SQLHDBC dbc, SQLUSMALLINT type, SQLPOINTER val, SQLSMALLINT valMax, + SQLSMALLINT *valLen) +{ + SQLRETURN ret; + SQLSMALLINT len = 0; + + HDBC_LOCK(dbc); + ret = drvgetinfo(dbc, type, val, valMax, &len); + HDBC_UNLOCK(dbc); + if (ret == SQL_SUCCESS) { + SQLWCHAR *v = NULL; + + switch (type) { + case SQL_USER_NAME: + case SQL_DRIVER_ODBC_VER: + case SQL_DATA_SOURCE_NAME: + case SQL_DRIVER_NAME: + case SQL_DRIVER_VER: + case SQL_ODBC_VER: + case SQL_SERVER_NAME: + case SQL_DATABASE_NAME: + case SQL_SEARCH_PATTERN_ESCAPE: + case SQL_DBMS_NAME: + case SQL_DBMS_VER: + case SQL_NEED_LONG_DATA_LEN: + case SQL_ROW_UPDATES: + case SQL_ACCESSIBLE_PROCEDURES: + case SQL_PROCEDURES: + case SQL_EXPRESSIONS_IN_ORDERBY: + case SQL_ODBC_SQL_OPT_IEF: + case SQL_LIKE_ESCAPE_CLAUSE: + case SQL_ORDER_BY_COLUMNS_IN_SELECT: + case SQL_OUTER_JOINS: + case SQL_COLUMN_ALIAS: + case SQL_ACCESSIBLE_TABLES: + case SQL_MULT_RESULT_SETS: + case SQL_MULTIPLE_ACTIVE_TXN: + case SQL_MAX_ROW_SIZE_INCLUDES_LONG: + case SQL_DATA_SOURCE_READ_ONLY: +#ifdef SQL_DESCRIBE_PARAMETER + case SQL_DESCRIBE_PARAMETER: +#endif + case SQL_IDENTIFIER_QUOTE_CHAR: + case SQL_OWNER_TERM: + case SQL_PROCEDURE_TERM: + case SQL_QUALIFIER_NAME_SEPARATOR: + case SQL_QUALIFIER_TERM: + case SQL_TABLE_TERM: + case SQL_KEYWORDS: + case SQL_SPECIAL_CHARACTERS: +#ifdef SQL_CATALOG_NAME + case SQL_CATALOG_NAME: +#endif +#ifdef SQL_COLLATION_SEQ + case SQL_COLLATION_SEQ: +#endif + if (val) { + if (len > 0) { + v = uc_from_utf((SQLCHAR *) val, len); + if (v) { + int vmax = valMax / sizeof (SQLWCHAR); + + uc_strncpy(val, v, vmax); + v[len] = 0; + len = min(vmax, uc_strlen(v)); + uc_free(v); + len *= sizeof (SQLWCHAR); + } else { + len = 0; + } + } + if (len <= 0) { + len = 0; + if (valMax >= sizeof (SQLWCHAR)) { + *((SQLWCHAR *)val) = 0; + } + } + } else { + len = 0; + } + break; + } + if (valLen) { + *valLen = len; + } + } + return ret; +} +#endif + +/** + * Return information about supported ODBC API functions. + * @param dbc database connection handle + * @param func function code to be retrieved + * @param flags output indicator + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetFunctions(SQLHDBC dbc, SQLUSMALLINT func, + SQLUSMALLINT *flags) +{ + DBC *d; + int i; + SQLUSMALLINT exists[100]; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + for (i = 0; i < array_size(exists); i++) { + exists[i] = SQL_FALSE; + } + exists[SQL_API_SQLALLOCCONNECT] = SQL_TRUE; + exists[SQL_API_SQLFETCH] = SQL_TRUE; + exists[SQL_API_SQLALLOCENV] = SQL_TRUE; + exists[SQL_API_SQLFREECONNECT] = SQL_TRUE; + exists[SQL_API_SQLALLOCSTMT] = SQL_TRUE; + exists[SQL_API_SQLFREEENV] = SQL_TRUE; + exists[SQL_API_SQLBINDCOL] = SQL_TRUE; + exists[SQL_API_SQLFREESTMT] = SQL_TRUE; + exists[SQL_API_SQLCANCEL] = SQL_FALSE; + exists[SQL_API_SQLGETCURSORNAME] = SQL_TRUE; + exists[SQL_API_SQLCOLATTRIBUTES] = SQL_TRUE; + exists[SQL_API_SQLNUMRESULTCOLS] = SQL_TRUE; + exists[SQL_API_SQLCONNECT] = SQL_TRUE; + exists[SQL_API_SQLPREPARE] = SQL_TRUE; + exists[SQL_API_SQLDESCRIBECOL] = SQL_TRUE; + exists[SQL_API_SQLROWCOUNT] = SQL_TRUE; + exists[SQL_API_SQLDISCONNECT] = SQL_TRUE; + exists[SQL_API_SQLSETCURSORNAME] = SQL_FALSE; + exists[SQL_API_SQLERROR] = SQL_TRUE; + exists[SQL_API_SQLSETPARAM] = SQL_TRUE; + exists[SQL_API_SQLEXECDIRECT] = SQL_TRUE; + exists[SQL_API_SQLTRANSACT] = SQL_FALSE; + exists[SQL_API_SQLEXECUTE] = SQL_TRUE; + exists[SQL_API_SQLBINDPARAMETER] = SQL_TRUE; + exists[SQL_API_SQLGETTYPEINFO] = SQL_TRUE; + exists[SQL_API_SQLCOLUMNS] = SQL_TRUE; + exists[SQL_API_SQLPARAMDATA] = SQL_TRUE; + exists[SQL_API_SQLDRIVERCONNECT] = SQL_TRUE; + exists[SQL_API_SQLPUTDATA] = SQL_TRUE; + exists[SQL_API_SQLGETCONNECTOPTION] = SQL_TRUE; + exists[SQL_API_SQLSETCONNECTOPTION] = SQL_TRUE; + exists[SQL_API_SQLGETDATA] = SQL_TRUE; + exists[SQL_API_SQLSETSTMTOPTION] = SQL_TRUE; + exists[SQL_API_SQLGETFUNCTIONS] = SQL_TRUE; + exists[SQL_API_SQLSPECIALCOLUMNS] = SQL_TRUE; + exists[SQL_API_SQLGETINFO] = SQL_TRUE; + exists[SQL_API_SQLSTATISTICS] = SQL_TRUE; + exists[SQL_API_SQLGETSTMTOPTION] = SQL_TRUE; + exists[SQL_API_SQLTABLES] = SQL_TRUE; + exists[SQL_API_SQLBROWSECONNECT] = SQL_FALSE; + exists[SQL_API_SQLNUMPARAMS] = SQL_TRUE; + exists[SQL_API_SQLCOLUMNPRIVILEGES] = SQL_FALSE; + exists[SQL_API_SQLPARAMOPTIONS] = SQL_FALSE; + exists[SQL_API_SQLDATASOURCES] = SQL_TRUE; + exists[SQL_API_SQLPRIMARYKEYS] = SQL_TRUE; + exists[SQL_API_SQLDESCRIBEPARAM] = SQL_TRUE; + exists[SQL_API_SQLPROCEDURECOLUMNS] = SQL_TRUE; + exists[SQL_API_SQLDRIVERS] = SQL_FALSE; + exists[SQL_API_SQLPROCEDURES] = SQL_TRUE; + exists[SQL_API_SQLEXTENDEDFETCH] = SQL_TRUE; + exists[SQL_API_SQLSETPOS] = SQL_TRUE; + exists[SQL_API_SQLFOREIGNKEYS] = SQL_TRUE; + exists[SQL_API_SQLSETSCROLLOPTIONS] = SQL_FALSE; + exists[SQL_API_SQLMORERESULTS] = SQL_FALSE; + exists[SQL_API_SQLTABLEPRIVILEGES] = SQL_FALSE; + exists[SQL_API_SQLNATIVESQL] = SQL_FALSE; + if (func == SQL_API_ALL_FUNCTIONS) { + memcpy(flags, exists, sizeof (exists)); + } else if (func == SQL_API_ODBC3_ALL_FUNCTIONS) { + int i; +#define SET_EXISTS(x) \ + flags[(x) >> 4] |= (1 << ((x) & 0xF)) +#define CLR_EXISTS(x) \ + flags[(x) >> 4] &= ~(1 << ((x) & 0xF)) + + memset(flags, 0, + sizeof (SQLUSMALLINT) * SQL_API_ODBC3_ALL_FUNCTIONS_SIZE); + for (i = 0; i < array_size(exists); i++) { + if (exists[i]) { + flags[i >> 4] |= (1 << (i & 0xF)); + } + } + SET_EXISTS(SQL_API_SQLFETCHSCROLL); + SET_EXISTS(SQL_API_SQLALLOCHANDLE); + SET_EXISTS(SQL_API_SQLFREEHANDLE); + SET_EXISTS(SQL_API_SQLGETSTMTATTR); + SET_EXISTS(SQL_API_SQLSETSTMTATTR); + SET_EXISTS(SQL_API_SQLGETCONNECTATTR); + SET_EXISTS(SQL_API_SQLSETCONNECTATTR); + SET_EXISTS(SQL_API_SQLGETENVATTR); + SET_EXISTS(SQL_API_SQLSETENVATTR); + SET_EXISTS(SQL_API_SQLBINDPARAM); +#if !defined(HAVE_UNIXODBC) || !HAVE_UNIXODBC + /* + * Some unixODBC versions have problems with + * SQLError() vs. SQLGetDiagRec() with loss + * of error/warning messages. + */ + SET_EXISTS(SQL_API_SQLGETDIAGREC); +#endif + SET_EXISTS(SQL_API_SQLGETDIAGFIELD); + } else { + if (func < array_size(exists)) { + *flags = exists[func]; + } else { + switch (func) { + case SQL_API_SQLALLOCHANDLE: + case SQL_API_SQLFREEHANDLE: + case SQL_API_SQLGETSTMTATTR: + case SQL_API_SQLSETSTMTATTR: + case SQL_API_SQLGETCONNECTATTR: + case SQL_API_SQLSETCONNECTATTR: + case SQL_API_SQLGETENVATTR: + case SQL_API_SQLSETENVATTR: + case SQL_API_SQLBINDPARAM: +#if !defined(HAVE_UNIXODBC) || !HAVE_UNIXODBC + /* + * Some unixODBC versions have problems with + * SQLError() vs. SQLGetDiagRec() with loss + * of error/warning messages. + */ + case SQL_API_SQLGETDIAGREC: +#endif + case SQL_API_SQLGETDIAGFIELD: + *flags = SQL_TRUE; + break; + default: + *flags = SQL_FALSE; + } + } + } + return SQL_SUCCESS; +} + +/** + * Internal allocate HENV. + * @param env pointer to environment handle + * @result ODBC error code + */ + +static SQLRETURN +drvallocenv(SQLHENV *env) +{ + ENV *e; + + if (env == NULL) { + return SQL_INVALID_HANDLE; + } + e = (ENV *) xmalloc(sizeof (ENV)); + if (e == NULL) { + *env = SQL_NULL_HENV; + return SQL_ERROR; + } + e->magic = ENV_MAGIC; + e->ov3 = 0; +#if defined(_WIN32) || defined(_WIN64) + InitializeCriticalSection(&e->cs); + e->owner = 0; +#else + #if defined(ENABLE_NVFS) && ENABLE_NVFS + nvfs_init(); + #endif +#endif + e->dbcs = NULL; + *env = (SQLHENV) e; + return SQL_SUCCESS; +} + +/** + * Allocate HENV. + * @param env pointer to environment handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLAllocEnv(SQLHENV *env) +{ + return drvallocenv(env); +} + +/** + * Internal free HENV. + * @param env environment handle + * @result ODBC error code + */ + +static SQLRETURN +drvfreeenv(SQLHENV env) +{ + ENV *e; + + if (env == SQL_NULL_HENV) { + return SQL_INVALID_HANDLE; + } + e = (ENV *) env; + if (e->magic != ENV_MAGIC) { + return SQL_SUCCESS; + } +#if defined(_WIN32) || defined(_WIN64) + EnterCriticalSection(&e->cs); + e->owner = GetCurrentThreadId(); +#endif + if (e->dbcs) { +#if defined(_WIN32) || defined(_WIN64) + LeaveCriticalSection(&e->cs); + e->owner = 0; +#endif + return SQL_ERROR; + } + e->magic = DEAD_MAGIC; +#if defined(_WIN32) || defined(_WIN64) + e->owner = 0; + LeaveCriticalSection(&e->cs); + DeleteCriticalSection(&e->cs); +#endif + xfree(e); + return SQL_SUCCESS; +} + +/** + * Free HENV. + * @param env environment handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLFreeEnv(SQLHENV env) +{ + return drvfreeenv(env); +} + +/** + * Internal allocate HDBC. + * @param env environment handle + * @param dbc pointer to database connection handle + * @result ODBC error code + */ + +static SQLRETURN +drvallocconnect(SQLHENV env, SQLHDBC *dbc) +{ + DBC *d; + ENV *e; + const char *verstr; + int maj = 0, min = 0, lev = 0; + + if (dbc == NULL) { + return SQL_ERROR; + } + d = (DBC *) xmalloc(sizeof (DBC)); + if (d == NULL) { + *dbc = SQL_NULL_HDBC; + return SQL_ERROR; + } + memset(d, 0, sizeof (DBC)); + d->curtype = SQL_CURSOR_FORWARD_ONLY; + d->ov3 = &d->ov3val; + sscanf(HIVE_VERSION, "%d.%d.%d", &maj, &min, &lev); + d->version = verinfo(maj & 0xFF, min & 0xFF, lev & 0xFF); + e = (ENV *) env; +#if defined(_WIN32) || defined(_WIN64) + if (e->magic == ENV_MAGIC) { + EnterCriticalSection(&e->cs); + e->owner = GetCurrentThreadId(); + } +#endif + if (e->magic == ENV_MAGIC) { + DBC *n, *p; + + d->env = e; + d->ov3 = &e->ov3; + p = NULL; + n = e->dbcs; + while (n) { + p = n; + n = n->next; + } + if (p) { + p->next = d; + } else { + e->dbcs = d; + } + } +#if defined(_WIN32) || defined(_WIN64) + if (e->magic == ENV_MAGIC) { + e->owner = 0; + LeaveCriticalSection(&e->cs); + } +#endif + d->autocommit = 0; /* Hive doesn't support transactions */ + d->magic = DBC_MAGIC; + *dbc = (SQLHDBC) d; + drvgetgpps(d); + return SQL_SUCCESS; +} + +/** + * Allocate HDBC. + * @param env environment handle + * @param dbc pointer to database connection handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLAllocConnect(SQLHENV env, SQLHDBC *dbc) +{ + return drvallocconnect(env, dbc); +} + +/** + * Internal free connection (HDBC). + * @param dbc database connection handle + * @result ODBC error code + */ + +static SQLRETURN +drvfreeconnect(SQLHDBC dbc) +{ + DBC *d; + ENV *e; + SQLRETURN ret = SQL_ERROR; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + if (d->magic != DBC_MAGIC) { + return SQL_INVALID_HANDLE; + } + e = d->env; + if (e && e->magic == ENV_MAGIC) { +#if defined(_WIN32) || defined(_WIN64) + EnterCriticalSection(&e->cs); + e->owner = GetCurrentThreadId(); +#endif + } else { + e = NULL; + } + if (NULL != d->hive_conn) { + setstatd(d, -1, "not disconnected", (*d->ov3) ? "HY000" : "S1000"); + goto done; + } + while (NULL != d->stmt) { + freestmt((HSTMT) d->stmt); + } + if (e && e->magic == ENV_MAGIC) { + DBC *n, *p; + + p = NULL; + n = e->dbcs; + while (n) { + if (n == d) { + break; + } + p = n; + n = n->next; + } + if (n) { + if (p) { + p->next = d->next; + } else { + e->dbcs = d->next; + } + } + } + drvrelgpps(d); + d->magic = DEAD_MAGIC; + if (d->trace) { + fclose(d->trace); + } + xfree(d); + ret = SQL_SUCCESS; +done: +#if defined(_WIN32) || defined(_WIN64) + if (e) { + e->owner = 0; + LeaveCriticalSection(&e->cs); + } +#endif + return ret; +} + +/** + * Free connection (HDBC). + * @param dbc database connection handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLFreeConnect(SQLHDBC dbc) +{ + return drvfreeconnect(dbc); +} + +/** + * Internal get connect attribute of HDBC. + * @param dbc database connection handle + * @param attr option to be retrieved + * @param val output buffer + * @param bufmax size of output buffer + * @param buflen output length + * @result ODBC error code + */ + +static SQLRETURN +drvgetconnectattr(SQLHDBC dbc, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER bufmax, SQLINTEGER *buflen) +{ + DBC *d; + SQLINTEGER dummy; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + if (!val) { + val = (SQLPOINTER) &dummy; + } + if (!buflen) { + buflen = &dummy; + } + switch (attr) { + case SQL_ATTR_CONNECTION_DEAD: + *((SQLINTEGER *) val) = (NULL != d->hive_conn) ? SQL_CD_FALSE : SQL_CD_TRUE; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_ACCESS_MODE: + *((SQLINTEGER *) val) = SQL_MODE_READ_WRITE; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_AUTOCOMMIT: + *((SQLINTEGER *) val) = + d->autocommit ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_LOGIN_TIMEOUT: + *((SQLINTEGER *) val) = d->login_timeout; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_ODBC_CURSORS: + *((SQLINTEGER *) val) = SQL_CUR_USE_DRIVER; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_PACKET_SIZE: + *((SQLINTEGER *) val) = 16384; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_TXN_ISOLATION: + *((SQLINTEGER *) val) = SQL_TXN_READ_UNCOMMITTED; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_TRACE: + case SQL_ATTR_QUIET_MODE: + case SQL_ATTR_TRANSLATE_OPTION: + *((SQLINTEGER *) val) = 0; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_CURRENT_CATALOG: + case SQL_ATTR_TRACEFILE: + *((SQLCHAR *) val) = 0; + *buflen = SQL_NTS; + break; + case SQL_ATTR_ASYNC_ENABLE: +#ifdef SQL_ATTR_ASYNC_DBC_FUNCTIONS_ENABLE + case SQL_ATTR_ASYNC_DBC_FUNCTIONS_ENABLE: +#endif + *((SQLINTEGER *) val) = SQL_ASYNC_ENABLE_OFF; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_AUTO_IPD: + case SQL_ATTR_METADATA_ID: + *((SQLINTEGER *) val) = SQL_FALSE; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_NOSCAN: + *((SQLINTEGER *) val) = SQL_NOSCAN_ON; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_CONCURRENCY: + *((SQLINTEGER *) val) = SQL_CONCUR_LOCK; + *buflen = sizeof (SQLINTEGER); + break; +#ifdef SQL_ATTR_CURSOR_SENSITIVITY + case SQL_ATTR_CURSOR_SENSITIVITY: + *((SQLINTEGER *) val) = SQL_UNSPECIFIED; + *buflen = sizeof (SQLINTEGER); + break; +#endif + case SQL_ATTR_SIMULATE_CURSOR: + *((SQLINTEGER *) val) = SQL_SC_NON_UNIQUE; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_MAX_ROWS: + *((SQLINTEGER *) val) = 0; + *buflen = sizeof (SQLINTEGER); + case SQL_ATTR_MAX_LENGTH: + *((SQLINTEGER *) val) = 1000000000; + *buflen = sizeof (SQLINTEGER); + break; + case SQL_ATTR_CURSOR_TYPE: + *((SQLINTEGER *) val) = d->curtype; + *buflen = sizeof (SQLINTEGER); + break; + default: + *((SQLINTEGER *) val) = 0; + *buflen = sizeof (SQLINTEGER); + setstatd(d, -1, "unsupported connect attribute %d", + (*d->ov3) ? "HYC00" : "S1C00", (int) attr); + return SQL_ERROR; + } + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Get connect attribute of HDBC. + * @param dbc database connection handle + * @param attr option to be retrieved + * @param val output buffer + * @param bufmax size of output buffer + * @param buflen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetConnectAttr(SQLHDBC dbc, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER bufmax, SQLINTEGER *buflen) +{ + SQLRETURN ret; + + TRACE_FUNC_START(); + + HDBC_LOCK(dbc); + ret = drvgetconnectattr(dbc, attr, val, bufmax, buflen); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Get connect attribute of HDBC (UNICODE version). + * @param dbc database connection handle + * @param attr option to be retrieved + * @param val output buffer + * @param bufmax size of output buffer + * @param buflen output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetConnectAttrW(SQLHDBC dbc, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER bufmax, SQLINTEGER *buflen) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvgetconnectattr(dbc, attr, val, bufmax, buflen); + if (SQL_SUCCEEDED(ret)) { + switch (attr) { + case SQL_ATTR_TRACEFILE: + case SQL_ATTR_CURRENT_CATALOG: + case SQL_ATTR_TRANSLATE_LIB: + if (val && bufmax >= sizeof (SQLWCHAR)) { + *(SQLWCHAR *) val = 0; + } + break; + } + } + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +/** + * Internal set connect attribute of HDBC. + * @param dbc database connection handle + * @param attr option to be set + * @param val option value + * @param len size of option + * @result ODBC error code + * At the moment, we can't modify any of the connection attribute for the Hive. + * changing any values will result in SQL_SUCCESS_WITH_INFO with + * SQLState set to HYC00 + */ + +static SQLRETURN +drvsetconnectattr(SQLHDBC dbc, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER len) +{ + DBC *d; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + switch (attr) { + case SQL_ATTR_LOGIN_TIMEOUT: + d->login_timeout = 0; /* Thrift socket issues with non-blocking connections. ignore the login_timeout for now */ + return SQL_SUCCESS; + default: + setstatd(d, -1, "not implemented", "HYC00"); + return SQL_SUCCESS_WITH_INFO; + } + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Set connect attribute of HDBC. + * @param dbc database connection handle + * @param attr option to be set + * @param val option value + * @param len size of option + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetConnectAttr(SQLHDBC dbc, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER len) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvsetconnectattr(dbc, attr, val, len); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Set connect attribute of HDBC (UNICODE version). + * @param dbc database connection handle + * @param attr option to be set + * @param val option value + * @param len size of option + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetConnectAttrW(SQLHDBC dbc, SQLINTEGER attr, SQLPOINTER val, + SQLINTEGER len) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvsetconnectattr(dbc, attr, val, len); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +/** + * Internal get connect option of HDBC. + * @param dbc database connection handle + * @param opt option to be retrieved + * @param param output buffer + * @result ODBC error code + */ + +static SQLRETURN +drvgetconnectoption(SQLHDBC dbc, SQLUSMALLINT opt, SQLPOINTER param) +{ + DBC *d; + SQLINTEGER dummy; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + if (!param) { + param = (SQLPOINTER) &dummy; + } + switch (opt) { + case SQL_ACCESS_MODE: + *((SQLINTEGER *) param) = SQL_MODE_READ_WRITE; + break; + case SQL_AUTOCOMMIT: + *((SQLINTEGER *) param) = + d->autocommit ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF; + break; + case SQL_TXN_ISOLATION: + *((SQLINTEGER *) param) = SQL_TXN_READ_UNCOMMITTED; + break; + case SQL_LOGIN_TIMEOUT: + *((SQLINTEGER *) param) = d->login_timeout; + break; + case SQL_ODBC_CURSORS: + *((SQLINTEGER *) param) = SQL_CUR_USE_DRIVER; + break; + case SQL_PACKET_SIZE: + *((SQLINTEGER *) param) = 16384; + break; + case SQL_OPT_TRACE: + case SQL_QUIET_MODE: + case SQL_TRANSLATE_OPTION: + *((SQLINTEGER *) param) = 0; + break; + case SQL_OPT_TRACEFILE: + case SQL_TRANSLATE_DLL: + *((SQLCHAR*) param) = 0; + break; + case SQL_ASYNC_ENABLE: + *((SQLINTEGER *) param) = SQL_ASYNC_ENABLE_OFF; + break; + default: + *((SQLINTEGER *) param) = 0; + setstatd(d, -1, "unsupported connect option %d", + (*d->ov3) ? "HYC00" : "S1C00", opt); + return SQL_ERROR; + } + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Get connect option of HDBC. + * @param dbc database connection handle + * @param opt option to be retrieved + * @param param output buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetConnectOption(SQLHDBC dbc, SQLUSMALLINT opt, SQLPOINTER param) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvgetconnectoption(dbc, opt, param); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Get connect option of HDBC (UNICODE version). + * @param dbc database connection handle + * @param opt option to be retrieved + * @param param output buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetConnectOptionW(SQLHDBC dbc, SQLUSMALLINT opt, SQLPOINTER param) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvgetconnectoption(dbc, opt, param); + if (SQL_SUCCEEDED(ret)) { + switch (opt) { + case SQL_OPT_TRACEFILE: + case SQL_CURRENT_QUALIFIER: + case SQL_TRANSLATE_DLL: + if (param) { + *(SQLWCHAR *) param = 0; + } + break; + } + } + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +/** + * Internal set option on HDBC. + * @param dbc database connection handle + * @param opt option to be set + * @param param option value + * @result ODBC error code + * + * At the moment, we can't modify any of the connection attribute for the Hive. + * changing any values will result in SQL_SUCCESS_WITH_INFO with + * SQLState set to IM001 + */ + +static SQLRETURN +drvsetconnectoption(SQLHDBC dbc, SQLUSMALLINT opt, SQLUINTEGER param) +{ + DBC *d; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + switch (opt) { +// + case SQL_LOGIN_TIMEOUT: + d->login_timeout = 0; /* Thrift socket issues with non-blocking connections. ignore the login_timeout for now */ + return SQL_SUCCESS; + case SQL_QUIET_MODE: + return SQL_SUCCESS; /* we anyway don't generate any additional message boxes */ + default: + setstatd(d, -1, "not implemented", "HYC00"); + return SQL_SUCCESS_WITH_INFO; + } + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Set option on HDBC. + * @param dbc database connection handle + * @param opt option to be set + * @param param option value + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetConnectOption(SQLHDBC dbc, SQLUSMALLINT opt, SQLULEN param) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvsetconnectoption(dbc, opt, param); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Set option on HDBC (UNICODE version). + * @param dbc database connection handle + * @param opt option to be set + * @param param option value + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetConnectOptionW(SQLHDBC dbc, SQLUSMALLINT opt, SQLULEN param) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvsetconnectoption(dbc, opt, param); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +#if defined(WITHOUT_DRIVERMGR) || (!defined(_WIN32) && !defined(_WIN64)) +// || (!defined(WINGUI) && (defined(_WIN32)||defined(_WIN64))) + +/** + * Handling of SQLConnect() connection attributes + * for standalone operation without driver manager. + * @param dsn DSN/driver connection string + * @param attr attribute string to be retrieved + * @param out output buffer + * @param outLen length of output buffer + * @result true or false + */ + +static int +getdsnattr(char *dsn, char *attr, char *out, int outLen) +{ + char *str = dsn, *start; + int len = strlen(attr); + + while (*str) { + while (*str && *str == ';') { + ++str; + } + start = str; + if ((str = strchr(str, '=')) == NULL) { + return 0; + } + if (str - start == len && strncasecmp(start, attr, len) == 0) { + start = ++str; + while (*str && *str != ';') { + ++str; + } + len = min(outLen - 1, str - start); + strncpy(out, start, len); + out[len] = '\0'; + return 1; + } + while (*str && *str != ';') { + ++str; + } + } + return 0; +} +#endif + +/** + * Internal connect to SQLite database. + * @param dbc database connection handle + * @param dsn DSN string + * @param dsnLen length of DSN string or SQL_NTS + * @param isu true/false: file name is UTF8 encoded + * @result ODBC error code + */ + +static SQLRETURN +drvconnect(SQLHDBC dbc, SQLCHAR *dsn, SQLSMALLINT dsnLen, int isu) +{ + DBC *d; + int len; + SQLRETURN ret = SQL_SUCCESS; + char buf[SQL_MAX_MESSAGE_LENGTH]; + char dbname[SQL_MAX_MESSAGE_LENGTH / 4]; + char host[SQL_MAX_MESSAGE_LENGTH / 4]; + char port[SQL_MAX_MESSAGE_LENGTH / 4]; + char framed[SQL_MAX_MESSAGE_LENGTH / 4]; + char tracef[SQL_MAX_MESSAGE_LENGTH]; + hive_err_info hive_error; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + if (d->magic != DBC_MAGIC) { + return SQL_INVALID_HANDLE; + } + if (NULL != d->hive_conn) { + setstatd(d, -1, "connection already established", "08002"); + return SQL_ERROR; + } + buf[0] = '\0'; + if (dsnLen == SQL_NTS) { + len = sizeof (buf) - 1; + } else { + len = min(sizeof (buf) - 1, dsnLen); + } + if (dsn != NULL) { + strncpy(buf, (char *) dsn, len); + } + buf[len] = '\0'; + if (!strlen(buf)) { + setstatd(d, -1, "invalid DSN", (*d->ov3) ? "HY090" : "S1090"); + return SQL_ERROR; + } +#if defined(_WIN32) || defined(_WIN64) + /* + * When DSN is in UTF it must be converted to ANSI + * here for ANSI SQLGetPrivateProfileString() + */ + if (isu) { + char *cdsn = utf_to_wmb(buf, len); + + if (NULL != cdsn) { + setstatd(d, -1, "out of memory", (*d->ov3) ? "HY000" : "S1000"); + return SQL_ERROR; + } + strcpy(buf, cdsn); + uc_free(cdsn); + } +#endif +#ifdef WITHOUT_DRIVERMGR + dbname[0] = '\0'; + getdsnattr(buf, "DATABASE", dbname, sizeof (dbname)); + if (!strlen(dbname)) { + strncpy(dbname, buf, sizeof (dbname)); + dbname[sizeof (dbname) - 1] = '\0'; + } + + host[0] = '\0'; + getdsnattr(buf, "HOST", host, sizeof(host)); + if (!strlen(host)) { + strncpy(host, buf, sizeof(host)); + host[sizeof(host)-1] = '\0'; + } + + port[0] = '\0'; + getdsnattr(buf, "PORT", port, sizeof(port)); + if (!strlen(port)) { + strncpy(port, buf, sizeof(port)); + port[sizeof(port)-1] = '\0'; + } + + framed[0] = '\0'; + getdsnattr(buf, "FRAMED", framed, sizeof(framed)); + if (!strlen(framed)) { + strncpy(framed, buf, sizeof(framed)); + framed[sizeof(framed)-1] = '\0'; + } + + tracef[0] = '\0'; + getdsnattr(buf, "tracefile", tracef, sizeof (tracef)); +#else /* WITHOUT_DRIVERMGR */ + SQLGetPrivateProfileString(buf, "DATABASE", DEFAULT_DATABASE, + dbname, sizeof(dbname), ODBC_INI); + #if defined(_WIN32) || defined(_WIN64) + /* database name read from registry is not UTF8 !!! */ + isu = 0; + #endif + SQLGetPrivateProfileString(buf, "HOST", DEFAULT_HOST, + host, sizeof(host), ODBC_INI); + SQLGetPrivateProfileString(buf, "PORT", DEFAULT_PORT, + port, sizeof(port), ODBC_INI); + SQLGetPrivateProfileString(buf, "FRAMED", DEFAULT_FRAMED, + framed, sizeof(framed), ODBC_INI); + SQLGetPrivateProfileString(buf, "tracefile", DEFAULT_TRACEFILE, + tracef, sizeof (tracef), ODBC_INI); +#endif /* WITHOUT_DRIVERMGR */ + /* + if (strlen(tracef)) { + d->trace = fopen(tracef, "a+"); + } + */ + if (!dbname || !dbname[0]) + d->dbname = strdup(DEFAULT_DATABASE); + else + d->dbname = strdup(dbname); + d->hive_conn = DBOpenConnection(d->dbname, host, atoi(port), HIVECLIENT_BUFFERED_SOCKET, + &hive_error, sizeof(hive_error.err_buf), d->login_timeout); + if (NULL == d->hive_conn) { + setstatd(d, -1, "Communication link failure", "08S01"); + return SQL_ERROR; + } + d->dsn = strdup(buf); + d->dsn_host = strdup(host); + d->dsn_port = strdup(port); + return ret; +} + +#ifndef WINTERFACE +/** + * Connect to SQLite database. + * @param dbc database connection handle + * @param dsn DSN string + * @param dsnLen length of DSN string or SQL_NTS + * @param uid user id string or NULL + * @param uidLen length of user id string or SQL_NTS + * @param pass password string or NULL + * @param passLen length of password string or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLConnect(SQLHDBC dbc, SQLCHAR *dsn, SQLSMALLINT dsnLen, + SQLCHAR *uid, SQLSMALLINT uidLen, + SQLCHAR *pass, SQLSMALLINT passLen) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvconnect(dbc, dsn, dsnLen, 0); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Connect to SQLite database. + * @param dbc database connection handle + * @param dsn DSN string + * @param dsnLen length of DSN string or SQL_NTS + * @param uid user id string or NULL + * @param uidLen length of user id string or SQL_NTS + * @param pass password string or NULL + * @param passLen length of password string or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLConnectW(SQLHDBC dbc, SQLWCHAR *dsn, SQLSMALLINT dsnLen, + SQLWCHAR *uid, SQLSMALLINT uidLen, + SQLWCHAR *pass, SQLSMALLINT passLen) +{ + char *dsna = NULL; + SQLRETURN ret; + + HDBC_LOCK(dbc); + if (dsn) { + dsna = uc_to_utf_c(dsn, dsnLen); + if (!dsna) { + DBC *d = (DBC *) dbc; + + setstatd(d, -1, "out of memory", (*d->ov3) ? "HY000" : "S1000"); + ret = SQL_ERROR; + goto done; + } + } + ret = drvconnect(dbc, (SQLCHAR *) dsna, SQL_NTS, 1); +done: + HDBC_UNLOCK(dbc); + uc_free(dsna); + return ret; +} +#endif + +/** + * Internal disconnect given HDBC. + * @param dbc database connection handle + * @result ODBC error code + */ + +static SQLRETURN +drvdisconnect(SQLHDBC dbc) +{ + DBC *d; + hive_err_info hive_error; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + if (d->magic != DBC_MAGIC) { + return SQL_INVALID_HANDLE; + } + if (NULL != d->hive_conn) { + if (d->trace) { + fprintf(d->trace, "-- Hive Close Connection: '%s'\n", d->dbname); + fflush(d->trace); + } + // FIXME need to check return value of DBCloseConnection + DBCloseConnection(d->hive_conn, &hive_error, sizeof(hive_error.err_buf)); + d->hive_conn = NULL; + } + freep(&d->dbname); + freep(&d->dsn); + freep(&d->dsn_host); + freep(&d->dsn_port); + + return SQL_SUCCESS; +} + +/** + * Disconnect given HDBC. + * @param dbc database connection handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLDisconnect(SQLHDBC dbc) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvdisconnect(dbc); + HDBC_UNLOCK(dbc); + return ret; +} + +#if defined(WITHOUT_DRIVERMGR) || (!defined(_WIN32) && !defined(_WIN64)) +// || (!defined(WINGUI) && (defined(_WIN32)||defined(_WIN64))) + +/** + * Internal standalone (w/o driver manager) database connect. + * @param dbc database connection handle + * @param hwnd dummy window handle or NULL + * @param connIn driver connect input string + * @param connInLen length of driver connect input string or SQL_NTS + * @param connOut driver connect output string + * @param connOutMax length of driver connect output string + * @param connOutLen output length of driver connect output string + * @param drvcompl completion type + * @result ODBC error code + */ + +static SQLRETURN +drvdriverconnect(SQLHDBC dbc, SQLHWND hwnd, + SQLCHAR *connIn, SQLSMALLINT connInLen, + SQLCHAR *connOut, SQLSMALLINT connOutMax, + SQLSMALLINT *connOutLen, SQLUSMALLINT drvcompl) +{ + DBC *d; + int len; + char buf[SQL_MAX_MESSAGE_LENGTH * 2]; + char dsn[SQL_MAX_MESSAGE_LENGTH]; + char dbname[SQL_MAX_MESSAGE_LENGTH]; + char host[SQL_MAX_MESSAGE_LENGTH]; + char port[SQL_MAX_MESSAGE_LENGTH]; + char framed[SQL_MAX_MESSAGE_LENGTH]; + char tracef[SQL_MAX_MESSAGE_LENGTH]; + hive_err_info hive_error; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + if (drvcompl != SQL_DRIVER_COMPLETE && + drvcompl != SQL_DRIVER_COMPLETE_REQUIRED && + drvcompl != SQL_DRIVER_PROMPT && + drvcompl != SQL_DRIVER_NOPROMPT) { + return SQL_NO_DATA; + } + d = (DBC *) dbc; + if (NULL != d->hive_conn) { + setstatd(d, -1, "connection already established", "08002"); + return SQL_ERROR; + } + buf[0] = '\0'; + if (connInLen == SQL_NTS) { + len = sizeof (buf) - 1; + } else { + len = min(connInLen, sizeof (buf) - 1); + } + if (connIn != NULL) { + strncpy(buf, (char *) connIn, len); + } + buf[len] = '\0'; + if (!strlen(buf)) { + setstatd(d, -1, "invalid connect attributes", + (*d->ov3) ? "HY090" : "S1090"); + return SQL_ERROR; + } + dsn[0] = '\0'; + getdsnattr(buf, "DSN", dsn, sizeof (dsn)); + + /* special case: connIn is sole DSN value without keywords */ + if (!dsn[0] && !strchr(buf, ';') && !strchr(buf, '=')) { + strncpy(dsn, buf, sizeof (dsn) - 1); + dsn[sizeof (dsn) - 1] = '\0'; + } + + + dbname[0] = '\0'; + getdsnattr(buf, "DATABASE", dbname, sizeof (dbname)); + #ifndef WITHOUT_DRIVERMGR + if (strlen(dsn) && !strlen(dbname)) { + SQLGetPrivateProfileString(dsn, "DATABASE", DEFAULT_DATABASE, + dbname, sizeof (dbname), ODBC_INI); + } + #endif + + host[0] = '\0'; + getdsnattr(buf, "HOST", host, sizeof(host)); + #ifndef WITHOUT_DRIVERMGR + if (strlen(dsn) && !strlen(host)) { + SQLGetPrivateProfileString(dsn, "HOST", DEFAULT_HOST, + host, sizeof(host), ODBC_INI); + } + #endif + + port[0] = '\0'; + getdsnattr(buf, "PORT", port, sizeof(port)); + #ifndef WITHOUT_DRIVERMGR + if (strlen(dsn) && !strlen(port)) { + SQLGetPrivateProfileString(dsn, "PORT", DEFAULT_PORT, + port, sizeof(port), ODBC_INI); + } + #endif + + framed[0] = '\0'; + getdsnattr(buf, "FRAMED", framed, sizeof(framed)); + #ifndef WITHOUT_DRIVERMGR + if (strlen(dsn) && !strlen(port)) { + SQLGetPrivateProfileString(dsn, "FRAMED", DEFAULT_FRAMED, + framed, sizeof(framed), ODBC_INI); + } + #endif + + tracef[0] = '\0'; + getdsnattr(buf, "TRACEFILE", tracef, sizeof (tracef)); + #ifndef WITHOUT_DRIVERMGR + if (strlen(dsn) && !strlen(tracef)) { + SQLGetPrivateProfileString(dsn, "TRACEFILE", DEFAULT_TRACEFILE, + tracef, sizeof (tracef), ODBC_INI); + } + #endif + + if (connOut || connOutLen) { + int count; + + buf[0] = '\0'; + count = snprintf(buf, sizeof (buf), + "DSN=%s;DATABASE=%s;HOST=%s;PORT=%s;" + "FRAMED=%s;TRACEFILE=%s", + dsn, dbname, host, port, framed, tracef); + if (count < 0) { + buf[sizeof(buf) - 1] = '\0'; + } + len = min(connOutMax - 1, strlen(buf)); + if (connOut) { + strncpy((char *) connOut, buf, len); + connOut[len] = '\0'; + } + if (connOutLen) { + *connOutLen = len; + } + } + /* + if (strlen(tracef)) { + d->trace = fopen(tracef, "a+"); + } + */ + d->hive_conn = DBOpenConnection(dbname, host, atoi(port), HIVECLIENT_BUFFERED_SOCKET, + &hive_error, sizeof(hive_error.err_buf), d->login_timeout); + if (NULL == d->hive_conn) { + setstatd(d, -1, "Communication link failure", "08S01"); + return SQL_ERROR; + } + return SQL_SUCCESS; +} +#endif + +/** + * Internal free function for HSTMT. + * @param stmt statement handle + * @result ODBC error code7043 + + */ + +static SQLRETURN +freestmt(SQLHSTMT stmt) +{ + STMT *s; + DBC *d; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + + s = (STMT *) stmt; + freeresult(s, 1); + freep(&s->query); + d = (DBC *) s->dbc; + if (d && d->magic == DBC_MAGIC) { + STMT *p, *n; + + p = NULL; + n = d->stmt; + while (n) { + if (n == s) { + break; + } + p = n; + n = n->next; + } + if (n) { + if (p) { + p->next = s->next; + } else { + d->stmt = s->next; + } + } + } + freeparams(s); + freep(&s->bindparms); + if (s->row_status0 != &s->row_status1) { + freep(&s->row_status0); + s->rowset_size = 1; + s->row_status0 = &s->row_status1; + } + xfree(s); + return SQL_SUCCESS; +} + +/** + * Allocate HSTMT given HDBC (driver internal version). + * @param dbc database connection handle + * @param stmt pointer to statement handle + * @result ODBC error code + */ + +static SQLRETURN +drvallocstmt(SQLHDBC dbc, SQLHSTMT *stmt) +{ + DBC *d; + STMT *s, *sl, *pl; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + if (d->magic != DBC_MAGIC || stmt == NULL) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) xmalloc(sizeof (STMT)); + if (s == NULL) { + *stmt = SQL_NULL_HSTMT; + return SQL_ERROR; + } + *stmt = (SQLHSTMT) s; + memset(s, 0, sizeof (STMT)); + s->dbc = dbc; + s->ov3 = d->ov3; + s->nowchar[0] = d->nowchar; + s->nowchar[1] = 0; + s->curtype = d->curtype; + s->row_status0 = &s->row_status1; + s->row_count = &s->row_count0; + s->rowset_size = 1; + s->longnames = d->longnames; + s->retr_data = SQL_RD_ON; + s->max_rows = 0; + s->bind_type = SQL_BIND_BY_COLUMN; + s->bind_offs = NULL; + s->paramset_size = 1; + s->parm_bind_type = SQL_PARAM_BIND_BY_COLUMN; +#ifdef _WIN64 + sprintf((char *) s->cursorname, "CUR_%I64X", (SQLUBIGINT) *stmt); +#else + sprintf((char *) s->cursorname, "CUR_%08lX", (long) *stmt); +#endif + sl = d->stmt; + pl = NULL; + while (sl) { + pl = sl; + sl = sl->next; + } + if (pl) { + pl->next = s; + } else { + d->stmt = s; + } + return SQL_SUCCESS; +} + +/** + * Allocate HSTMT given HDBC. + * @param dbc database connection handle + * @param stmt pointer to statement handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLAllocStmt(SQLHDBC dbc, SQLHSTMT *stmt) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvallocstmt(dbc, stmt); + HDBC_UNLOCK(dbc); + return ret; +} + +/** + * Internal function to perform certain kinds of free/close on STMT. + * @param stmt statement handle + * @param opt SQL_RESET_PARAMS, SQL_UNBIND, SQL_CLOSE, or SQL_DROP + * @result ODBC error code + */ + +static SQLRETURN +drvfreestmt(SQLHSTMT stmt, SQLUSMALLINT opt) +{ + STMT *s; + SQLRETURN ret = SQL_SUCCESS; + SQLHDBC dbc; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + + HSTMT_LOCK(stmt); + s = (STMT *) stmt; + dbc = s->dbc; + switch (opt) { + case SQL_RESET_PARAMS: + freeparams(s); + break; + case SQL_UNBIND: + unbindcols(s); + break; + case SQL_CLOSE: + freeresult(s, 0); + break; + case SQL_DROP: + ret = freestmt(stmt); + break; + default: + setstat(s, -1, "unsupported option", (*s->ov3) ? "HYC00" : "S1C00"); + ret = SQL_ERROR; + break; + } + HDBC_UNLOCK(dbc); + return ret; +} + +/** + * Free HSTMT. + * @param stmt statement handle + * @param opt SQL_RESET_PARAMS, SQL_UNBIND, SQL_CLOSE, or SQL_DROP + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLFreeStmt(SQLHSTMT stmt, SQLUSMALLINT opt) +{ + return drvfreestmt(stmt, opt); +} + +/** + * Cancel HSTMT closing cursor. + * @param stmt statement handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLCancel(SQLHSTMT stmt) +{ + /* NOT IMPLEMENTED */ + return SQL_ERROR; +} + +/** + * Internal function to get cursor name of STMT. + * @param stmt statement handle + * @param cursor output buffer + * @param buflen length of output buffer + * @param lenp output length + * @result ODBC error code + */ + +static SQLRETURN +drvgetcursorname(SQLHSTMT stmt, SQLCHAR *cursor, SQLSMALLINT buflen, + SQLSMALLINT *lenp) +{ + STMT *s; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (lenp && !cursor) { + *lenp = strlen((char *) s->cursorname); + return SQL_SUCCESS; + } + if (cursor) { + if (buflen > 0) { + strncpy((char *) cursor, (char *) s->cursorname, buflen - 1); + cursor[buflen - 1] = '\0'; + } + if (lenp) { + *lenp = min(strlen((char *) s->cursorname), buflen - 1); + } + } + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Get cursor name of STMT. + * @param stmt statement handle + * @param cursor output buffer + * @param buflen length of output buffer + * @param lenp output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetCursorName(SQLHSTMT stmt, SQLCHAR *cursor, SQLSMALLINT buflen, + SQLSMALLINT *lenp) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Get cursor name of STMT (UNICODE version). + * @param stmt statement handle + * @param cursor output buffer + * @param buflen length of output buffer + * @param lenp output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetCursorNameW(SQLHSTMT stmt, SQLWCHAR *cursor, SQLSMALLINT buflen, + SQLSMALLINT *lenp) +{ + SQLRETURN ret; + SQLSMALLINT len = 0; + + HSTMT_LOCK(stmt); + ret = drvgetcursorname(stmt, (SQLCHAR *) cursor, buflen, &len); + if (ret == SQL_SUCCESS) { + SQLWCHAR *c = NULL; + + if (cursor) { + c = uc_from_utf((SQLCHAR *) cursor, len); + if (!c) { + ret = nomem((STMT *) stmt); + goto done; + } + c[len] = 0; + len = uc_strlen(c); + if (buflen > 0) { + uc_strncpy(cursor, c, buflen - 1); + cursor[buflen - 1] = 0; + } + uc_free(c); + } + if (lenp) { + *lenp = min(len, buflen - 1); + } + } +done: + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Internal function to set cursor name on STMT. + * @param stmt statement handle + * @param cursor new cursor name + * @param len length of cursor name or SQL_NTS + * @result ODBC error code + */ + +static SQLRETURN +drvsetcursorname(SQLHSTMT stmt, SQLCHAR *cursor, SQLSMALLINT len) +{ + STMT *s; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (!cursor || + !((cursor[0] >= 'A' && cursor[0] <= 'Z') || + (cursor[0] >= 'a' && cursor[0] <= 'z'))) { + setstat(s, -1, "invalid cursor name", (*s->ov3) ? "HYC00" : "S1C00"); + return SQL_ERROR; + } + if (len == SQL_NTS) { + len = sizeof (s->cursorname) - 1; + } else { + len = min(sizeof (s->cursorname) - 1, len); + } + strncpy((char *) s->cursorname, (char *) cursor, len); + s->cursorname[len] = '\0'; + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Set cursor name on STMT. + * @param stmt statement handle + * @param cursor new cursor name + * @param len length of cursor name or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetCursorName(SQLHSTMT stmt, SQLCHAR *cursor, SQLSMALLINT len) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvunimplstmt(stmt); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Set cursor name on STMT (UNICODE version). + * @param stmt statement handle + * @param cursor new cursor name + * @param len length of cursor name or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLSetCursorNameW(SQLHSTMT stmt, SQLWCHAR *cursor, SQLSMALLINT len) +{ + char *c = NULL; + SQLRETURN ret; + + HSTMT_LOCK(stmt); + if (cursor) { + c = uc_to_utf_c(cursor, len); + if (!c) { + ret = nomem((STMT *) stmt); + goto done; + } + } + ret = drvsetcursorname(stmt, (SQLCHAR *) c, SQL_NTS); +done: + HSTMT_UNLOCK(stmt); + uc_free(c); + return ret; +} +#endif + +/** + * Close open cursor. + * @param stmt statement handle + * @return ODBC error code + */ + +SQLRETURN SQL_API +SQLCloseCursor(SQLHSTMT stmt) +{ + return drvfreestmt(stmt, SQL_CLOSE); +} + +/** + * Allocate a HENV, HDBC, or HSTMT handle. + * @param type handle type + * @param input input handle (HENV, HDBC) + * @param output pointer to output handle (HENV, HDBC, HSTMT) + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLAllocHandle(SQLSMALLINT type, SQLHANDLE input, SQLHANDLE *output) +{ + SQLRETURN ret; + + switch (type) { + case SQL_HANDLE_ENV: + ret = drvallocenv((SQLHENV *) output); + if (ret == SQL_SUCCESS) { + ENV *e = (ENV *) *output; + + if (e && e->magic == ENV_MAGIC) { + e->ov3 = 1; + } + } + return ret; + case SQL_HANDLE_DBC: + return drvallocconnect((SQLHENV) input, (SQLHDBC *) output); + case SQL_HANDLE_STMT: + HDBC_LOCK((SQLHDBC) input); + ret = drvallocstmt((SQLHDBC) input, (SQLHSTMT *) output); + HDBC_UNLOCK((SQLHDBC) input); + return ret; + } + return SQL_ERROR; +} + +/** + * Free a HENV, HDBC, or HSTMT handle. + * @param type handle type + * @param h handle (HENV, HDBC, or HSTMT) + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLFreeHandle(SQLSMALLINT type, SQLHANDLE h) +{ + switch (type) { + case SQL_HANDLE_ENV: + return drvfreeenv((SQLHENV) h); + case SQL_HANDLE_DBC: + return drvfreeconnect((SQLHDBC) h); + case SQL_HANDLE_STMT: + return drvfreestmt((SQLHSTMT) h, SQL_DROP); + } + return SQL_ERROR; +} + +/** + * Free dynamically allocated column descriptions of STMT. + * @param s statement pointer + */ + +static void +freedyncols(STMT *s) +{ + if (s->dyncols) { + int i; + + for (i = 0; i < s->dcols; i++) { + freep(&s->dyncols[i].typename); + } + if (s->cols == s->dyncols) { + s->cols = NULL; + s->ncols = 0; + } + freep(&s->dyncols); + } + s->dcols = 0; +} + +/** + * Free statement's result. + * @param s statement pointer + * @param clrcols flag to clear column information + * + * The result rows are free'd using the rowfree function pointer. + * If clrcols is greater than zero, then column bindings and dynamic column + * descriptions are free'd. + * If clrcols is less than zero, then dynamic column descriptions are free'd. + */ + +static void +freeresult(STMT *s, int clrcols) +{ + if (s->rows) { + if (s->rowfree) { + s->rowfree(s->rows); + s->rowfree = NULL; + } + s->rows = NULL; + } + s->nrows = -1; + if (clrcols > 0) { + freep(&s->bindcols); + s->nbindcols = 0; + } + if (clrcols) { + freedyncols(s); + s->cols = NULL; + s->ncols = 0; + s->nowchar[1] = 0; + } +} + +/** + * Reset bound columns to unbound state. + * @param s statement pointer + */ + +static void +unbindcols(STMT *s) +{ + int i; + + s->bkmrkcol.type = -1; + s->bkmrkcol.max = 0; + s->bkmrkcol.lenp = NULL; + s->bkmrkcol.valp = NULL; + s->bkmrkcol.index = 0; + s->bkmrkcol.offs = 0; + + for (i = 0; s->bindcols && i < s->nbindcols; i++) { + s->bindcols[i].type = -1; + s->bindcols[i].max = 0; + s->bindcols[i].lenp = NULL; + s->bindcols[i].valp = NULL; + s->bindcols[i].index = i; + s->bindcols[i].offs = 0; + } +} + +/** + * Reallocate space for bound columns. + * @param s statement pointer + * @param ncols number of columns + * @result ODBC error code + */ + +static SQLRETURN +mkbindcols(STMT *s, int ncols) +{ + if (s->bindcols) { + if (s->nbindcols < ncols) { + int i; + BINDCOL *bindcols = + xrealloc(s->bindcols, ncols * sizeof (BINDCOL)); + + if (!bindcols) { + return nomem(s); + } + for (i = s->nbindcols; i < ncols; i++) { + bindcols[i].type = -1; + bindcols[i].max = 0; + bindcols[i].lenp = NULL; + bindcols[i].valp = NULL; + bindcols[i].index = i; + bindcols[i].offs = 0; + } + s->bindcols = bindcols; + s->nbindcols = ncols; + } + } else if (ncols > 0) { + s->bindcols = (BINDCOL *) xmalloc(ncols * sizeof (BINDCOL)); + if (!s->bindcols) { + return nomem(s); + } + s->nbindcols = ncols; + unbindcols(s); + } + return SQL_SUCCESS; +} + +/** + * Internal function to retrieve row data getrowdata_set() + * @param s statement pointer + * @param colnum column number, 0 based + * @param otype output data type + * @param val output buffer + * @param len length of output buffer + * @param lenp output length + * @param partial flag for partial data retrieval + * @result ODBC error code + */ + +static SQLRETURN +getrowdata(STMT *s, SQLUSMALLINT colnum, SQLSMALLINT otype, + SQLPOINTER val, SQLINTEGER len, SQLLEN *lenp, int partial) +{ + char *data = NULL; + char valdummy[16]; + int valnull = 0; + int type = otype; + int int_buffer; + long long_buffer; + unsigned long ULong_buffer; + int64_t I64_buffer; + uint64_t I64U_buffer; + double double_buffer; + HiveReturn rc; + hive_err_info hive_error; + SQLRETURN ret = SQL_SUCCESS; +#if defined(_WIN32) || defined(_WIN64) + #ifdef SQL_BIGINT + char endc; + #endif +#endif + int offs = 0; + /* Map SQL_C_DEFAULT to proper C type */ + type = mapdeftype(type, s->cols[colnum].type, s->cols[colnum].nosign ? 1 : 0, + s->nowchar[0]); +#if (defined(_WIN32) || defined(_WIN64)) && defined(WINTERFACE) + /* MS Access hack part 3 (map SQL_C_DEFAULT to SQL_C_CHAR) */ + if (type == SQL_C_WCHAR && otype == SQL_C_DEFAULT) { + type = SQL_C_CHAR; + } +#endif + + switch (type) { + case SQL_C_UTINYINT: + case SQL_C_TINYINT: + case SQL_C_STINYINT: + rc = DBGetFieldAsInt(s->hive_resultset, colnum, &int_buffer, + &valnull, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + *((SCHAR *) val) = (SCHAR) int_buffer; + *lenp = valnull ? SQL_NULL_DATA : sizeof(SCHAR); + break; + + case SQL_C_USHORT: + rc = DBGetFieldAsInt(s->hive_resultset, colnum, &int_buffer, + &valnull, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + *((SQLUSMALLINT *) val) = (SQLUSMALLINT) int_buffer; + *lenp = valnull ? SQL_NULL_DATA : sizeof(SQLUSMALLINT); + break; + + case SQL_C_SHORT: + case SQL_C_SSHORT: + rc = DBGetFieldAsInt(s->hive_resultset, colnum, &int_buffer, + &valnull, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + *((SQLSMALLINT *) val) = (SQLSMALLINT) int_buffer; + *lenp = valnull ? SQL_NULL_DATA : sizeof(SQLSMALLINT); + break; + + case SQL_C_ULONG: + rc = DBGetFieldAsULong(s->hive_resultset, colnum, &ULong_buffer, + &valnull, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + *((SQLUINTEGER *) val) = (SQLUINTEGER) ULong_buffer; + *lenp = valnull ? SQL_NULL_DATA : sizeof(SQLUINTEGER); + break; + + case SQL_C_LONG: + case SQL_C_SLONG: + rc = DBGetFieldAsLong(s->hive_resultset, colnum, &long_buffer, + &valnull, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + *((SQLINTEGER *) val) = (SQLINTEGER) long_buffer; + *lenp = valnull ? SQL_NULL_DATA : sizeof(SQLINTEGER); + break; + +#ifdef SQL_BIGINT + case SQL_C_UBIGINT: + rc = DBGetFieldAsI64U(s->hive_resultset, colnum, &I64U_buffer, + &valnull, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + *((SQLUBIGINT *) val) = (SQLUBIGINT) I64U_buffer; + *lenp = valnull ? SQL_NULL_DATA : sizeof(SQLUBIGINT); + break; + + case SQL_C_SBIGINT: + rc = DBGetFieldAsI64(s->hive_resultset, colnum, &I64_buffer, + &valnull, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + *((SQLBIGINT *) val) = (SQLBIGINT) I64_buffer; + *lenp = valnull ? SQL_NULL_DATA : sizeof(SQLBIGINT); + break; +#endif /* SQL_BIGINT */ + + case SQL_C_FLOAT: + rc = DBGetFieldAsDouble(s->hive_resultset, colnum, &double_buffer, + &valnull, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + *((SQLREAL *) val) = (SQLREAL) double_buffer; + *lenp = valnull ? SQL_NULL_DATA : sizeof(SQLREAL); + break; + + case SQL_C_DOUBLE: + rc = DBGetFieldAsDouble(s->hive_resultset, colnum, &double_buffer, + &valnull, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + *((SQLDOUBLE *) val) = (SQLDOUBLE) double_buffer; + *lenp = valnull ? SQL_NULL_DATA : sizeof(SQLDOUBLE); + break; + +#ifdef WINTERFACE + case SQL_C_WCHAR: +#endif + case SQL_C_CHAR: { + size_t dlen; + int doz, zlen = len - 1; + + rc = DBGetFieldDataLen(s->hive_resultset, colnum, &dlen, + &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + + /* DBGetFieldDataLen does not count null terminator */ + data = xmalloc(++dlen); + + rc = DBGetFieldAsCString(s->hive_resultset, colnum, (char *)data, + dlen, NULL, &valnull, + &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + /* TODO */ + ret = SQL_ERROR; + goto done; + } + +#ifdef WINTERFACE + SQLWCHAR *ucdata = NULL; +#endif + +#if (defined(_WIN32) || defined(_WIN64)) && defined(WINTERFACE) + /* MS Access hack part 2 (reserved error -7748) */ + if (!valnull && + (s->cols == statSpec2P || s->cols == statSpec3P) && + type == SQL_C_WCHAR) { + if (len > 0 && len <= sizeof (SQLWCHAR)) { + ((char *) val)[0] = data[0]; + memset((char *) val + 1, 0, len - 1); + *lenp = 1; + ret = SQL_SUCCESS; + goto done; + } + } +#endif + +#ifdef WINTERFACE + switch (type) { + case SQL_C_CHAR: + doz = 1; + break; + case SQL_C_WCHAR: + doz = sizeof (SQLWCHAR); + break; + default: + doz = 0; + break; + } + if (type == SQL_C_WCHAR) { + ucdata = uc_from_utf((SQLCHAR *) data, dlen); + if (!ucdata) { + ret = nomem(s); + goto done; + } + dlen = uc_strlen(ucdata) * sizeof (SQLWCHAR); + } +#else + doz = (type == SQL_C_CHAR) ? 1 : 0; +#endif + if (partial && len && s->bindcols) { + if (s->bindcols[colnum].offs >= dlen) { +#ifdef WINTERFACE + uc_free(ucdata); +#endif + *lenp = 0; + if (doz && val) { +#ifdef WINTERFACE + if (type == SQL_C_WCHAR) { + ((SQLWCHAR *) val)[0] = 0; + } else { + ((char *) val)[0] = '\0'; + } +#else + ((char *) val)[0] = '\0'; +#endif + } + if (!dlen && s->bindcols[colnum].offs == dlen) { + s->bindcols[colnum].offs = 1; + ret = SQL_SUCCESS; + goto done; + } + s->bindcols[colnum].offs = 0; + ret = SQL_NO_DATA; + goto done; + } + offs = s->bindcols[colnum].offs; + dlen -= offs; + } + if (val && !valnull && len) { +#ifdef WINTERFACE + if (type == SQL_C_WCHAR) { + uc_strncpy(val, ucdata + offs / sizeof (SQLWCHAR), + (len - doz) / sizeof (SQLWCHAR)); + } else { + strncpy(val, data + offs, len - doz); + } +#else + strncpy(val, data + offs, len - doz); +#endif + } + if (valnull || len < 1) { + /* *lenp = dlen; */ + *lenp = SQL_NULL_DATA; + } else { + *lenp = min(len - doz, dlen); + if (*lenp == len - doz && *lenp != dlen) { + *lenp = SQL_NO_TOTAL; + } else if (*lenp < zlen) { + zlen = *lenp; + } + } + if (len && !valnull && doz) { +#ifdef WINTERFACE + if (type == SQL_C_WCHAR) { + ((SQLWCHAR *) val)[zlen / sizeof (SQLWCHAR)] = 0; + } else { + ((char *) val)[zlen] = '\0'; + } +#else + ((char *) val)[zlen] = '\0'; +#endif + } +#ifdef WINTERFACE + uc_free(ucdata); +#endif + if (partial && len && s->bindcols) { + if (*lenp == SQL_NO_TOTAL) { + *lenp = dlen; + s->bindcols[colnum].offs += len - doz; + setstat(s, -1, "data right truncated", "01004"); + if (s->bindcols[colnum].lenp) { + *s->bindcols[colnum].lenp = dlen; + } + ret = SQL_SUCCESS_WITH_INFO; + goto done; + } + s->bindcols[colnum].offs += *lenp; + } + if (*lenp == SQL_NO_TOTAL) { + *lenp = dlen; + setstat(s, -1, "data right truncated", "01004"); + ret = SQL_SUCCESS_WITH_INFO; + goto done; + } + } + break; /* SQL_C_WCHAR SQL_C_CHAR */ + + default: + /* TODO log unrecognized type message */ + ret = SQL_ERROR; + goto done; + } + + done: + if (NULL != data) { + xfree(data); + } + if (HIVE_ERROR == rc) + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + return ret; +} + +/** + * Internal function to retrieve row data set, used by SQLFetch() and + * friends and SQLGetData(). + * @param s statement pointer + * @param colnum column number, 0 based + * @param otype output data type + * @param val output buffer + * @param len length of output buffer + * @param lenp output length + * @param partial flag for partial data retrieval + * @result ODBC error code + */ +static SQLRETURN +getrowdata_set(STMT *s, SQLUSMALLINT colnum, SQLSMALLINT otype, + SQLPOINTER val, SQLINTEGER len, SQLLEN *lenp, int partial) +{ + HiveReturn rc; + int has_results; + SQLRETURN ret = SQL_SUCCESS, row_status= SQL_SUCCESS; + SQLLEN dummy; + hive_err_info hive_error; + int cnt = 0; + + if (!lenp) { + lenp = &dummy; + } + if (colnum >= s->ncols) { + setstat(s, -1, "invalid column", (*s->ov3) ? "07009" : "S1002"); + return SQL_ERROR; + } + + rc = DBHasResults(s->hive_resultset, &has_results, + &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + return SQL_ERROR; + } + if (!has_results) { + /* TODO: log warning, no query results */ + return SQL_ERROR; + } + + for (cnt =0; cnt < *s->row_count; cnt++) { + row_status = getrowdata(s, colnum, otype, val, len, lenp, partial); + if (row_status != SQL_SUCCESS) { + ret = row_status; + if (ret == SQL_ERROR) + break; + } + DBSeekNextRow(s->hive_resultset, &hive_error, sizeof(hive_error.err_buf)); + val = (char*)val+len; /* move to next value pointer in the fetch array */ + lenp++; /* move to next length pointer in the fetch array */ + } + return ret; +} + +/** + * Interal bind C variable to column of result set. + * @param stmt statement handle + * @param col column number, starting at 1 + * @param type output type + * @param val output buffer + * @param max length of output buffer + * @param lenp output length pointer + * @result ODBC error code + */ + +static SQLRETURN +drvbindcol(SQLHSTMT stmt, SQLUSMALLINT col, SQLSMALLINT type, + SQLPOINTER val, SQLLEN max, SQLLEN *lenp) +{ + STMT *s; + int sz = 0; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (col < 1) { + if (col == 0 && s->bkmrk && type == SQL_C_BOOKMARK) { + s->bkmrkcol.type = type; + s->bkmrkcol.max = sizeof (SQLINTEGER); + s->bkmrkcol.lenp = lenp; + s->bkmrkcol.valp = val; + s->bkmrkcol.offs = 0; + if (lenp) { + *lenp = 0; + } + return SQL_SUCCESS; + } + setstat(s, -1, "invalid column", (*s->ov3) ? "07009" : "S1002"); + return SQL_ERROR; + } + if (mkbindcols(s, col) != SQL_SUCCESS) { + return SQL_ERROR; + } + --col; + if (type == SQL_C_DEFAULT) { + type = mapdeftype(type, s->cols[col].type, 0, + s->nowchar[0] || s->nowchar[1]); + } + switch (type) { + case SQL_C_LONG: + case SQL_C_ULONG: + case SQL_C_SLONG: + sz = sizeof (SQLINTEGER); + break; + case SQL_C_TINYINT: + case SQL_C_UTINYINT: + case SQL_C_STINYINT: + sz = sizeof (SQLCHAR); + break; + case SQL_C_SHORT: + case SQL_C_USHORT: + case SQL_C_SSHORT: + sz = sizeof (short); + break; + case SQL_C_FLOAT: + sz = sizeof (SQLFLOAT); + break; + case SQL_C_DOUBLE: + sz = sizeof (SQLDOUBLE); + break; + case SQL_C_TIMESTAMP: + sz = sizeof (SQL_TIMESTAMP_STRUCT); + break; + case SQL_C_TIME: + sz = sizeof (SQL_TIME_STRUCT); + break; + case SQL_C_DATE: + sz = sizeof (SQL_DATE_STRUCT); + break; + case SQL_C_CHAR: + break; +#ifdef WINTERFACE + case SQL_C_WCHAR: + break; +#endif +#ifdef SQL_C_TYPE_DATE + case SQL_C_TYPE_DATE: + sz = sizeof (SQL_DATE_STRUCT); + break; +#endif +#ifdef SQL_C_TYPE_TIME + case SQL_C_TYPE_TIME: + sz = sizeof (SQL_TIME_STRUCT); + break; +#endif +#ifdef SQL_C_TYPE_TIMESTAMP + case SQL_C_TYPE_TIMESTAMP: + sz = sizeof (SQL_TIMESTAMP_STRUCT); + break; +#endif +#ifdef SQL_BIT + case SQL_C_BIT: + sz = sizeof (SQLCHAR); + break; +#endif + case SQL_C_BINARY: + break; +#ifdef SQL_BIGINT + case SQL_C_SBIGINT: + case SQL_C_UBIGINT: + sz = sizeof (SQLBIGINT); + break; +#endif + default: + if (val == NULL) { + /* fall through, unbinding column */ + break; + } + setstat(s, -1, "invalid type %d", "HY003", type); + return SQL_ERROR; + } + if (val == NULL) { + /* unbind column */ + s->bindcols[col].type = -1; + s->bindcols[col].max = 0; + s->bindcols[col].lenp = NULL; + s->bindcols[col].valp = NULL; + s->bindcols[col].offs = 0; + } else { + if (sz == 0 && max < 0) { + setstat(s, -1, "invalid length", "HY090"); + return SQL_ERROR; + } + s->bindcols[col].type = type; + s->bindcols[col].max = (sz == 0) ? max : sz; + s->bindcols[col].lenp = lenp; + s->bindcols[col].valp = val; + s->bindcols[col].offs = 0; + if (lenp) { + *lenp = 0; + } + } + return SQL_SUCCESS; +} + +/** + * Bind C variable to column of result set. + * @param stmt statement handle + * @param col column number, starting at 1 + * @param type output type + * @param val output buffer + * @param max length of output buffer + * @param lenp output length pointer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLBindCol(SQLHSTMT stmt, SQLUSMALLINT col, SQLSMALLINT type, + SQLPOINTER val, SQLLEN max, SQLLEN *lenp) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvbindcol(stmt, col, type, val, max, lenp); + HSTMT_UNLOCK(stmt); + return ret; +} + + +/** + * Retrieve information on tables and/or views. + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param type types of tables string or NULL + * @param typeLen length of types of tables string or SQL_NTS + * @result ODBC error code + */ + +static SQLRETURN +drvtables(SQLHSTMT stmt, + SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *_table, SQLSMALLINT tableLen, + SQLCHAR *type, SQLSMALLINT typeLen) +{ + STMT *s; + DBC *d; + SQLRETURN ret; + HiveReturn rc; + hive_err_info hive_error; + SQLCHAR *table; + short freeTableName = 0; + + if (SQL_NULL_HSTMT == stmt) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (SQL_NULL_HDBC == s->dbc) { + return noconn(s); + } + d = (DBC *) s->dbc; + if (NULL == d->hive_conn) { + return noconn(s); + } + + /* If table name/pattern not specified, assume caller wants all tables */ + if (NULL == _table) { + table = "*"; + } + else if (tableLen == SQL_NTS) + table = _table; + else { + /* make a local copy of table name */ + table = (SQLCHAR*) xmalloc(tableLen+1); + if (table == NULL) + return nomem(s); + memcpy(table, _table, tableLen); + table[tableLen] = '\0'; + freeTableName = 1; + } + + + if (NULL != type + && strcmp(type, "") + && !strstr((char *)type, "TABLE")) { + /* force zero results if the type list does not contain "TABLE" */ + if (freeTableName) + xfree(table); + table = ""; + } + + + /* + * TODO: If type is SQL_ALL_TABLE_TYPES and cat, schema, and table + * are empty strings, the result set should contain a list of valid + * table types for the data source (which is just "TABLE" for Hive right now). + * (All columns except the TABLE_TYPE column contain NULLs.) + */ + + rc = DBTables(d->hive_conn, (char *)table, &(s->hive_resultset), + &hive_error, sizeof(hive_error.err_buf)); + if (freeTableName) + xfree(table); + + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + freeresult(s, 0); + return SQL_ERROR; + } + + ret = setupdyncols(s); + if (SQL_SUCCESS != ret) { + /* TODO: log error */ + freeresult(s, 1); + return ret; + } + + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Retrieve information on tables and/or views. + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param type types of tables string or NULL + * @param typeLen length of types of tables string or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLTables(SQLHSTMT stmt, + SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *table, SQLSMALLINT tableLen, + SQLCHAR *type, SQLSMALLINT typeLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvtables(stmt, cat, catLen, schema, schemaLen, + table, tableLen, type, typeLen); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Retrieve information on tables and/or views. + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param type types of tables string or NULL + * @param typeLen length of types of tables string or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLTablesW(SQLHSTMT stmt, + SQLWCHAR *cat, SQLSMALLINT catLen, + SQLWCHAR *schema, SQLSMALLINT schemaLen, + SQLWCHAR *table, SQLSMALLINT tableLen, + SQLWCHAR *type, SQLSMALLINT typeLen) +{ + char *c = NULL, *s = NULL, *t = NULL, *y = NULL; + SQLRETURN ret; + + HSTMT_LOCK(stmt); + if (cat) { + c = uc_to_utf_c(cat, catLen); + if (!c) { + ret = nomem((STMT *) stmt); + goto done; + } + } + if (schema) { + s = uc_to_utf_c(schema, schemaLen); + if (!s) { + ret = nomem((STMT *) stmt); + goto done; + } + } + if (table) { + t = uc_to_utf_c(table, tableLen); + if (!t) { + ret = nomem((STMT *) stmt); + goto done; + } + } + if (type) { + y = uc_to_utf_c(type, typeLen); + if (!y) { + ret = nomem((STMT *) stmt); + goto done; + } + } + ret = drvtables(stmt, (SQLCHAR *) c, SQL_NTS, (SQLCHAR *) s, SQL_NTS, + (SQLCHAR *) t, SQL_NTS, (SQLCHAR *) y, SQL_NTS); +done: + HSTMT_UNLOCK(stmt); + uc_free(y); + uc_free(t); + uc_free(s); + uc_free(c); + return ret; +} +#endif + +/** + * Internal function to map Hive type to SQL type + * @param hive_type Hive data type code + * @result SQL data type code + */ +static SQLSMALLINT +hive_to_sql_type(HiveType hive_type) +{ + switch (hive_type) { + case HIVE_BOOLEAN_TYPE: + return SQL_BIT; + case HIVE_TINYINT_TYPE: + return SQL_TINYINT; + case HIVE_SMALLINT_TYPE: + return SQL_SMALLINT; + case HIVE_INT_TYPE: + return SQL_INTEGER; + case HIVE_BIGINT_TYPE: + return SQL_BIGINT; + case HIVE_FLOAT_TYPE: + return SQL_FLOAT; + case HIVE_DOUBLE_TYPE: + return SQL_DOUBLE; + case HIVE_STRING_TYPE: + return SQL_VARCHAR; + case HIVE_DATE_TYPE: + return SQL_TYPE_DATE; + case HIVE_DATETIME_TYPE: + return SQL_TYPE_TIMESTAMP; + case HIVE_TIMESTAMP_TYPE: + return SQL_TYPE_TIMESTAMP; + case HIVE_LIST_TYPE: + case HIVE_MAP_TYPE: + case HIVE_STRUCT_TYPE: + case HIVE_VOID_TYPE: + case HIVE_UNKNOWN_TYPE: + default: + return SQL_UNKNOWN_TYPE; + } +} + +/** + * Internal retrieve column information on table. + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param col column name/pattern or NULL + * @param colLen length of column name/pattern or SQL_NTS + * @result ODBC error code + */ + +static SQLRETURN +drvcolumns(SQLHSTMT stmt, + SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *_table, SQLSMALLINT tableLen, + SQLCHAR *_col, SQLSMALLINT colLen) +{ + STMT *s; + DBC *d; + hive_err_info hive_error; + SQLRETURN ret; + HiveReturn rc; + SQLCHAR *table, *col; + short freeTableName=0, freeColName=0; + + if (SQL_NULL_HSTMT == stmt) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (SQL_NULL_HDBC == s->dbc) { + return noconn(s); + } + d = (DBC *) s->dbc; + if (NULL == d->hive_conn) { + return noconn(s); + } + + if (NULL == _table || !strlen(_table)) { + /* TODO: log error */ + return SQL_ERROR; + } + + if (tableLen == SQL_NTS) + table = _table; + else { + /* make a local copy of table name */ + table = (SQLCHAR*) xmalloc(tableLen+1); + if (table == NULL) + return nomem(s); + memcpy(table, _table, tableLen); + table[tableLen] = '\0'; + freeTableName = 1; + } + + + freeresult(s, 1); + + /* if column name is unspecified, assume caller wants to retrieve all columns */ + if (NULL == _col) { + col = "*"; + } + else if (colLen == SQL_NTS) + col = _col; + else { + /* make a local copy of table name */ + col = (SQLCHAR*) xmalloc(colLen+1); + if (col == NULL) + return nomem(s); + memcpy(col, _col, colLen); + col[colLen] = '\0'; + freeColName = 1; + } + + + rc = DBColumns(d->hive_conn, &hive_to_sql_type, (char *) table, (char *) col, + &(s->hive_resultset), &hive_error, sizeof(hive_error.err_buf)); + if (freeTableName) + xfree(table); + if (freeColName) + xfree(col); + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + freeresult(s, 0); + return SQL_ERROR; + } + + ret = setupdyncols(s); + if (SQL_SUCCESS != ret) { + freeresult(s, 1); + return ret; + } + + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Retrieve column information on table. + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param col column name/pattern or NULL + * @param colLen length of column name/pattern or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLColumns(SQLHSTMT stmt, + SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *table, SQLSMALLINT tableLen, + SQLCHAR *col, SQLSMALLINT colLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvcolumns(stmt, cat, catLen, schema, schemaLen, + table, tableLen, col, colLen); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Retrieve column information on table (UNICODE version). + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param col column name/pattern or NULL + * @param colLen length of column name/pattern or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLColumnsW(SQLHSTMT stmt, + SQLWCHAR *cat, SQLSMALLINT catLen, + SQLWCHAR *schema, SQLSMALLINT schemaLen, + SQLWCHAR *table, SQLSMALLINT tableLen, + SQLWCHAR *col, SQLSMALLINT colLen) +{ + char *c = NULL, *s = NULL, *t = NULL, *k = NULL; + SQLRETURN ret; + + HSTMT_LOCK(stmt); + if (cat) { + c = uc_to_utf_c(cat, catLen); + if (!c) { + ret = nomem((STMT *) stmt); + goto done; + } + } + if (schema) { + s = uc_to_utf_c(schema, schemaLen); + if (!s) { + ret = nomem((STMT *) stmt); + goto done; + } + } + if (table) { + t = uc_to_utf_c(table, tableLen); + if (!t) { + ret = nomem((STMT *) stmt); + goto done; + } + } + if (col) { + k = uc_to_utf_c(col, colLen); + if (!k) { + ret = nomem((STMT *) stmt); + goto done; + } + } + ret = drvcolumns(stmt, (SQLCHAR *) c, SQL_NTS, (SQLCHAR *) s, SQL_NTS, + (SQLCHAR *) t, SQL_NTS, (SQLCHAR *) k, SQL_NTS); +done: + HSTMT_UNLOCK(stmt); + uc_free(k); + uc_free(t); + uc_free(s); + uc_free(c); + return ret; + +} +#endif + +/** + * Columns for result set of SQLGetTypeInfo(). + * Note that the odbc driver module has the ODBC include files. The ODBC constants like SQL_, SQL_NULLABLE etc are available here. + * The client module doesn't have these definitions. Hence the schema structure is defined here and passed to DBTypeInfo(). + */ +#define str(s) #s +#define TOSTR(s) str(s) +static const int g_typeinfo_types [] = { + SQL_TINYINT, + SQL_SMALLINT, + SQL_INTEGER, + SQL_BIGINT, + SQL_BIT, + SQL_FLOAT, + SQL_DOUBLE, + SQL_VARCHAR +}; + + +static const char* g_typeinfo_resultset_2[] = { + "TINYINT" DEFAULT_FIELD_DELIM TOSTR(SQL_TINYINT) DEFAULT_FIELD_DELIM TOSTR(3) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT, + + "SMALLINT" DEFAULT_FIELD_DELIM TOSTR(SQL_SMALLINT) DEFAULT_FIELD_DELIM TOSTR(5) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT, + + "INT" DEFAULT_FIELD_DELIM TOSTR(SQL_INTEGER) DEFAULT_FIELD_DELIM TOSTR(10) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT, + + "BIGINT" DEFAULT_FIELD_DELIM TOSTR(SQL_BIGINT) DEFAULT_FIELD_DELIM TOSTR(16) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT, + + "BOOLEAN" DEFAULT_FIELD_DELIM TOSTR(SQL_BIT) DEFAULT_FIELD_DELIM TOSTR(1) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT, + + "FLOAT" DEFAULT_FIELD_DELIM TOSTR(SQL_FLOAT) DEFAULT_FIELD_DELIM TOSTR(15) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT, + + "DOUBLE" DEFAULT_FIELD_DELIM TOSTR(SQL_DOUBLE) DEFAULT_FIELD_DELIM TOSTR(15) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT, + + "STRING" DEFAULT_FIELD_DELIM TOSTR(SQL_VARCHAR) DEFAULT_FIELD_DELIM TOSTR(255) DEFAULT_FIELD_DELIM\ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT +}; + +static const char* g_typeinfo_resultset_3[] = { + "TINYINT" DEFAULT_FIELD_DELIM TOSTR(SQL_TINYINT) DEFAULT_FIELD_DELIM TOSTR(3) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM TOSTR(SQL_TINYINT) DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT , + + "SMALLINT" DEFAULT_FIELD_DELIM TOSTR(SQL_SMALLINT) DEFAULT_FIELD_DELIM TOSTR(5) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_SMALLINT) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT, + + "INT" DEFAULT_FIELD_DELIM TOSTR(SQL_INTEGER) DEFAULT_FIELD_DELIM TOSTR(10) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM TOSTR(SQL_INTEGER) DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT, + + "BIGINT" DEFAULT_FIELD_DELIM TOSTR(SQL_BIGINT) DEFAULT_FIELD_DELIM TOSTR(16) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM TOSTR(SQL_INTEGER) DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT, + + "BOOLEAN" DEFAULT_FIELD_DELIM TOSTR(SQL_BIT) DEFAULT_FIELD_DELIM TOSTR(1) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_BIT) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT, + + "FLOAT" DEFAULT_FIELD_DELIM TOSTR(SQL_FLOAT) DEFAULT_FIELD_DELIM TOSTR(15) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FLOAT) DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT, + + "DOUBLE" DEFAULT_FIELD_DELIM TOSTR(SQL_DOUBLE) DEFAULT_FIELD_DELIM TOSTR(15) DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM TOSTR(SQL_DOUBLE) DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT, + + "STRING" DEFAULT_FIELD_DELIM TOSTR(SQL_VARCHAR) DEFAULT_FIELD_DELIM TOSTR(255) DEFAULT_FIELD_DELIM\ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM TOSTR(SQL_NULLABLE) \ + DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM \ + TOSTR(SQL_TRUE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) DEFAULT_FIELD_DELIM TOSTR(SQL_FALSE) \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM TOSTR(SQL_CHAR) DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT \ + DEFAULT_FIELD_DELIM DEFAULT_SERIALIZATION_NULL_FORMAT DEFAULT_FIELD_DELIM \ + DEFAULT_SERIALIZATION_NULL_FORMAT +}; + +static SQLRETURN +drvgettypeinfo(SQLHSTMT stmt, SQLSMALLINT sqltype) +{ + STMT *s; + DBC *d; + SQLRETURN ret; + HiveReturn rc; + hive_err_info hive_error; + int resultSetSize; + int cnt=0; + + if (SQL_NULL_HSTMT == stmt) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (SQL_NULL_HDBC == s->dbc) { + return noconn(s); + } + d = (DBC *) s->dbc; + if (NULL == d->hive_conn) { + return noconn(s); + } + + if (sqltype == SQL_ALL_TYPES) { + resultSetSize = TYPEINFO_RESULT_SIZE; + } else { + /* search for specific type in the resultset array */ + if (s->ov3) { + for (cnt=0; cnt < TYPEINFO_RESULT_SIZE; cnt++) { + if (sqltype == g_typeinfo_types[cnt]) + break; + } + } + else { + for (cnt=0; cnt < TYPEINFO_RESULT_SIZE; cnt++) { + if (sqltype == g_typeinfo_types[cnt]) + break; + } + } + + if (cnt == TYPEINFO_RESULT_SIZE) + resultSetSize = 0; + else + resultSetSize = 1; + } + + if (!(s->ov3)) { + rc = DBGetTypeInfo(d->hive_conn, !(s->ov3), g_typeinfo_resultset_2+cnt, resultSetSize, + &(s->hive_resultset), &hive_error, sizeof(hive_error.err_buf)); + } else { + rc = DBGetTypeInfo(d->hive_conn, !(s->ov3), g_typeinfo_resultset_3+cnt, resultSetSize, + &(s->hive_resultset), &hive_error, sizeof(hive_error.err_buf)); + } + + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + freeresult(s, 0); + return SQL_ERROR; + } + + ret = setupdyncols(s); + if (SQL_SUCCESS != ret) { + /* TODO: log error */ + freeresult(s, 1); + return ret; + } + + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Return data type information. + * @param stmt statement handle + * @param sqltype which type to retrieve + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetTypeInfo(SQLHSTMT stmt, SQLSMALLINT sqltype) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvgettypeinfo(stmt, sqltype); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Return data type information (UNICODE version). + * @param stmt statement handle + * @param sqltype which type to retrieve + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetTypeInfoW(SQLHSTMT stmt, SQLSMALLINT sqltype) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvgettypeinfo(stmt, sqltype); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + + +/** + * Internal return statistic information on table indices. + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param itype type of index information + * @param resv reserved + * @result ODBC error code + */ + +static SQLRETURN +drvstatistics(SQLHSTMT stmt, SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *_table, SQLSMALLINT tableLen, + SQLUSMALLINT itype, SQLUSMALLINT resv) +{ + STMT *s; + DBC *d; + SQLRETURN ret; + HiveReturn rc; + hive_err_info hive_error; + SQLCHAR *table; + short freeTableName = 0; + + if (SQL_NULL_HSTMT == stmt) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (SQL_NULL_HDBC == s->dbc) { + return noconn(s); + } + d = (DBC *) s->dbc; + if (NULL == d->hive_conn) { + return noconn(s); + } + + if (tableLen == SQL_NTS) + table = _table; + else { + /* make a local copy of table name */ + table = (SQLCHAR*) xmalloc(tableLen+1); + if (table == NULL) + return nomem(s); + memcpy(table, _table, tableLen); + table[tableLen] = '\0'; + freeTableName = 1; + } + + + rc = DBStatistics(d->hive_conn, &(s->hive_resultset), table, + (itype == SQL_INDEX_UNIQUE), + &hive_error, sizeof(hive_error.err_buf)); + if (freeTableName) + xfree(table); + + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + freeresult(s, 0); + return SQL_ERROR; + } + + ret = setupdyncols(s); + if (SQL_SUCCESS != ret) { + /* TODO: log error */ + freeresult(s, 1); + return ret; + } + + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Return statistic information on table indices. + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param itype type of index information + * @param resv reserved + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLStatistics(SQLHSTMT stmt, SQLCHAR *cat, SQLSMALLINT catLen, + SQLCHAR *schema, SQLSMALLINT schemaLen, + SQLCHAR *table, SQLSMALLINT tableLen, + SQLUSMALLINT itype, SQLUSMALLINT resv) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvstatistics(stmt, cat, catLen, schema, schemaLen, + table, tableLen, itype, resv); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Return statistic information on table indices (UNICODE version). + * @param stmt statement handle + * @param cat catalog name/pattern or NULL + * @param catLen length of catalog name/pattern or SQL_NTS + * @param schema schema name/pattern or NULL + * @param schemaLen length of schema name/pattern or SQL_NTS + * @param table table name/pattern or NULL + * @param tableLen length of table name/pattern or SQL_NTS + * @param itype type of index information + * @param resv reserved + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLStatisticsW(SQLHSTMT stmt, SQLWCHAR *cat, SQLSMALLINT catLen, + SQLWCHAR *schema, SQLSMALLINT schemaLen, + SQLWCHAR *table, SQLSMALLINT tableLen, + SQLUSMALLINT itype, SQLUSMALLINT resv) +{ + char *c = NULL, *s = NULL, *t = NULL; + SQLRETURN ret; + + HSTMT_LOCK(stmt); + if (cat) { + c = uc_to_utf_c(cat, catLen); + if (!c) { + ret = nomem((STMT *) stmt); + goto done; + } + } + if (schema) { + s = uc_to_utf_c(schema, schemaLen); + if (!s) { + ret = nomem((STMT *) stmt); + goto done; + } + } + if (table) { + t = uc_to_utf_c(table, tableLen); + if (!t) { + ret = nomem((STMT *) stmt); + goto done; + } + } + ret = drvstatistics(stmt, (SQLCHAR *) c, SQL_NTS, (SQLCHAR *) s, SQL_NTS, + (SQLCHAR *) t, SQL_NTS, itype, resv); +done: + HSTMT_UNLOCK(stmt); + uc_free(t); + uc_free(s); + uc_free(c); + return ret; +} +#endif + +/** + * Retrieve row data after fetch. + * @param stmt statement handle + * @param colnum column number, starting at 1 + * @param type output type + * @param val output buffer + * @param len length of output buffer + * @param lenp output length + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLGetData(SQLHSTMT stmt, SQLUSMALLINT colnum, SQLSMALLINT type, + SQLPOINTER val, SQLLEN len, SQLLEN *lenp) +{ + STMT *s; + SQLRETURN ret = SQL_ERROR; + + HSTMT_LOCK(stmt); + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (colnum == 0 && s->bkmrk && type == SQL_C_BOOKMARK) { + *((long *) val) = s->rowp; + if (lenp) { + *lenp = sizeof (long); + } + ret = SQL_SUCCESS; + goto done; + } + if (colnum < 1 || colnum > s->ncols) { + setstat(s, -1, "invalid column", (*s->ov3) ? "07009" : "S1002"); + goto done; + } + ret = getrowdata_set(s, colnum-1, type, val, len, lenp, 1); +done: + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Internal: fetch and bind from statement's current row + * @param s statement pointer + * @param rsi rowset index + * @result ODBC error code + */ + +static SQLRETURN +dofetchbind(STMT *s, int rsi) +{ + int ret, i, withinfo = 0; + + s->row_status0[rsi] = SQL_ROW_SUCCESS; + if (s->bkmrk && s->bkmrkcol.valp) { + long *val; + + if (s->bind_type != SQL_BIND_BY_COLUMN) { + val = (long *) ((char *) s->bkmrkcol.valp + s->bind_type * rsi); + } else { + val = (long *) s->bkmrkcol.valp + rsi; + } + if (s->bind_offs) { + val = (long *) ((char *) val + *s->bind_offs); + } + *val = s->rowp; + if (s->bkmrkcol.lenp) { + SQLLEN *ival; + + if (s->bind_type != SQL_BIND_BY_COLUMN) { + ival = (SQLLEN *) + ((char *) s->bkmrkcol.lenp + s->bind_type * rsi); + } else { + ival = &s->bkmrkcol.lenp[rsi]; + } + if (s->bind_offs) { + ival = (SQLLEN *) ((char *) ival + *s->bind_offs); + } + *ival = sizeof (long); + } + } + ret = SQL_SUCCESS; + for (i = 0; s->bindcols && i < s->ncols; i++) { + BINDCOL *b = &s->bindcols[i]; + SQLPOINTER dp = 0; + SQLLEN *lp = 0; + + b->offs = 0; + if (b->valp) { + if (s->bind_type != SQL_BIND_BY_COLUMN) { + dp = (SQLPOINTER) ((char *) b->valp + s->bind_type * rsi); + } else { + dp = (SQLPOINTER) ((char *) b->valp + b->max * rsi); + } + if (s->bind_offs) { + dp = (SQLPOINTER) ((char *) dp + *s->bind_offs); + } + } + if (b->lenp) { + if (s->bind_type != SQL_BIND_BY_COLUMN) { + lp = (SQLLEN *) ((char *) b->lenp + s->bind_type * rsi); + } else { + lp = b->lenp + rsi; + } + if (s->bind_offs) { + lp = (SQLLEN *) ((char *) lp + *s->bind_offs); + } + } + if (dp || lp) { + ret = getrowdata_set(s, (SQLUSMALLINT) i, b->type, dp, b->max, lp, 0); + if (!SQL_SUCCEEDED(ret)) { + s->row_status0[rsi] = SQL_ROW_ERROR; + break; + } + if (ret != SQL_SUCCESS) { + withinfo = 1; +#ifdef SQL_ROW_SUCCESS_WITH_INFO + s->row_status0[rsi] = SQL_ROW_SUCCESS_WITH_INFO; +#endif + } + } + } + if (SQL_SUCCEEDED(ret)) { + ret = withinfo ? SQL_SUCCESS_WITH_INFO : SQL_SUCCESS; + } + return ret; +} + + +/** + * Internal fetch function for SQLFetch(). + * @param stmt statment handle + * @result ODBC error code + */ + +static SQLRETURN +drvfetch(SQLHSTMT stmt) +{ + STMT *s; + HiveReturn rc; + hive_err_info hive_error; + SQLSMALLINT i; + + if (SQL_NULL_HSTMT == stmt) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + + rc = DBFetch(s->hive_resultset, &hive_error, sizeof(hive_error.err_buf), s->row_count); + switch (rc) { + case HIVE_ERROR: + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + return SQL_ERROR; + case HIVE_NO_MORE_DATA: + /* TODO: log info message */ + return SQL_NO_DATA; + } + + /* Copy data out of result set into bound column storage */ + for (i = 0; i < s->nbindcols; i++) { + BINDCOL *bcol = &(s->bindcols[i]); + SQLRETURN ret = getrowdata_set(s, bcol->index, bcol->type, + bcol->valp, bcol->max, + bcol->lenp, 0); + if (SQL_SUCCESS != ret) { + /* Log error */ + return ret; + } + } + + return SQL_SUCCESS; +} + + +/** + * Internal fetch function for SQLFetch(). + * @param stmt statment handle + * @param fetch_orientation fetch type + * @param fetch_offset offset for fetch orientation + * @result ODBC error code + */ + +static SQLRETURN +drvfetchscroll(SQLHSTMT stmt, SQLSMALLINT fetch_orientation, + SQLLEN fetch_offset, SQLULEN *row_count_ptr, + SQLUSMALLINT *row_status_array) +{ + STMT *s; + HiveReturn rc = HIVE_SUCCESS; + hive_err_info hive_error; + SQLSMALLINT i; + SQLRETURN ret = SQL_SUCCESS; + SQLRETURN bind_status = SQL_SUCCESS; + + + if (SQL_NULL_HSTMT == stmt) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + + /* Currently we only support fetch next */ + if ((fetch_orientation != SQL_FETCH_NEXT) && + ((fetch_orientation != SQL_FETCH_RELATIVE) || (fetch_offset != 0))) + { + setstat(s, -1, "Fetch type out of range", "HY106"); + return SQL_ERROR; + } + + if (!row_status_array) + row_status_array = s->row_status; + if (!row_count_ptr) + row_count_ptr = s->row_count; + + /* if we are re-fetching with relative offset, then reposition the cursor */ + if (fetch_orientation == SQL_FETCH_RELATIVE) + rc =DBSeekPrior(s->hive_resultset, &hive_error, sizeof(hive_error.err_buf)); + + if (rc == HIVE_SUCCESS) + rc = DBFetch(s->hive_resultset, &hive_error, sizeof(hive_error.err_buf), + row_count_ptr); + + switch (rc) { + case HIVE_ERROR: + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + return SQL_ERROR; + case HIVE_NO_MORE_DATA: + /* TODO: log info message */ + return SQL_NO_DATA; + } + + /* set the row status for fetched rows and any last empty slots */ + if (row_status_array && row_count_ptr) { + for (i =0 ; i < *row_count_ptr; i++) + row_status_array[i] = SQL_ROW_SUCCESS; + for (i = *row_count_ptr ; i < s->rowset_size; i++) + row_status_array[i] = SQL_ROW_NOROW; + } + + /* Copy data out of result set into bound column storage */ + for (i = 0; i < s->nbindcols; i++) { + BINDCOL *bcol = &(s->bindcols[i]); + if (bcol->type == -1) + continue; + bind_status = getrowdata_set(s, bcol->index, bcol->type, + bcol->valp, bcol->max, + bcol->lenp, 0); + if (bind_status == SQL_SUCCESS_WITH_INFO) { + ret = SQL_SUCCESS_WITH_INFO; + } + else { + if (bind_status != SQL_SUCCESS) { + /* Log error */ + return bind_status; + } + } + } + return ret ; +} + + +/** + * Fetch next result row. + * @param stmt statement handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLFetch(SQLHSTMT stmt) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvfetchscroll(stmt, SQL_FETCH_NEXT, 0, NULL, NULL); + + HSTMT_UNLOCK(stmt); + return ret; +} + + +/** + * Array Fetch + * @param stmt statement handle + * @param fetch_orientation fetch type + * @param fetch_offset offset for fetch orientation + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLFetchScroll(SQLHSTMT stmt, SQLSMALLINT fetch_orientation, + SQLLEN fetch_offset) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvfetchscroll(stmt, fetch_orientation, fetch_offset, NULL, NULL); + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Array Fetch. ODBC 2.0 API + * @param stmt statement handle + * @param fetch_orientation fetch type + * @param fetch_offset offset for fetch orientation + * @param row_count_ptr pointer to number of rows fetched + * @param row_status_array array to return the status of each row. + * @result ODBC error code + */ +SQLRETURN SQL_API +SQLExtendedFetch(SQLHSTMT stmt, SQLUSMALLINT fetch_orientation, + SQLLEN fetch_offset, SQLULEN *row_count_ptr, + SQLUSMALLINT *row_status_array) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvfetchscroll(stmt, fetch_orientation, fetch_offset, row_count_ptr, row_status_array); + HSTMT_UNLOCK(stmt); + return ret; +} + +/** + * Return number of affected rows of HSTMT. + * + * This function always returns -1, which is acceptable according to the + * ODBC standard since Hive does not support UPDATE, INSERT, or DELETE + * statements. + * + * @param stmt statement handle + * @param nrows output number of rows + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLRowCount(SQLHSTMT stmt, SQLLEN *nrows) +{ + STMT *s; + + HSTMT_LOCK(stmt); + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (nrows) { + *nrows = -1; + } + HSTMT_UNLOCK(stmt); + return SQL_SUCCESS; +} + +/** + * Return number of columns of result set given HSTMT. + * @param stmt statement handle + * @param ncols output number of columns + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLNumResultCols(SQLHSTMT stmt, SQLSMALLINT *ncols) +{ + STMT *s; + + HSTMT_LOCK(stmt); + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (ncols) { + *ncols = s->ncols; + } + HSTMT_UNLOCK(stmt); + return SQL_SUCCESS; +} + +/** + * Internal describe column information. + * @param stmt statement handle + * @param col column number, starting at 1 + * @param name buffer for column name + * @param nameMax length of name buffer + * @param nameLen output length of column name + * @param type output SQL type + * @param size output column size + * @param digits output number of digits + * @param nullable output NULL allowed indicator + * @result ODBC error code + */ + +static SQLRETURN +drvdescribecol(SQLHSTMT stmt, SQLUSMALLINT col, SQLCHAR *name, + SQLSMALLINT nameMax, SQLSMALLINT *nameLen, + SQLSMALLINT *type, SQLULEN *size, + SQLSMALLINT *digits, SQLSMALLINT *nullable) +{ + STMT *s; + COL *c; + int didname = 0; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (!s->cols) { + setstat(s, -1, "no columns", (*s->ov3) ? "07009" : "S1002"); + return SQL_ERROR; + } + if (col < 1 || col > s->ncols) { + setstat(s, -1, "invalid column", (*s->ov3) ? "07009" : "S1002"); + return SQL_ERROR; + } + c = s->cols + col - 1; + if (name && nameMax > 0) { + strncpy((char *) name, c->column, nameMax); + name[nameMax - 1] = '\0'; + didname = 1; + } + if (nameLen) { + if (didname) { + *nameLen = strlen((char *) name); + } else { + *nameLen = strlen(c->column); + } + } + if (type) { + *type = c->type; +#ifdef WINTERFACE + if (s->nowchar[0] || s->nowchar[1]) { + switch (c->type) { + case SQL_WCHAR: + *type = SQL_CHAR; + break; + case SQL_WVARCHAR: + *type = SQL_VARCHAR; + break; + #ifdef SQL_LONGVARCHAR + case SQL_WLONGVARCHAR: + *type = SQL_LONGVARCHAR; + break; + #endif + } + } +#endif + } + if (size) { + *size = c->size; + } + if (digits) { + /* + * If the number of decimal digits cannot be determined or + * is not applicable, the driver returns 0. + */ + *digits = 0; + } + if (nullable) { + *nullable = c->notnull; + } + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Describe column information. + * @param stmt statement handle + * @param col column number, starting at 1 + * @param name buffer for column name + * @param nameMax length of name buffer + * @param nameLen output length of column name + * @param type output SQL type + * @param size output column size + * @param digits output number of digits + * @param nullable output NULL allowed indicator + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLDescribeCol(SQLHSTMT stmt, SQLUSMALLINT col, SQLCHAR *name, + SQLSMALLINT nameMax, SQLSMALLINT *nameLen, + SQLSMALLINT *type, SQLULEN *size, + SQLSMALLINT *digits, SQLSMALLINT *nullable) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvdescribecol(stmt, col, name, nameMax, nameLen, + type, size, digits, nullable); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Describe column information (UNICODE version). + * @param stmt statement handle + * @param col column number, starting at 1 + * @param name buffer for column name + * @param nameMax length of name buffer + * @param nameLen output length of column name + * @param type output SQL type + * @param size output column size + * @param digits output number of digits + * @param nullable output NULL allowed indicator + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLDescribeColW(SQLHSTMT stmt, SQLUSMALLINT col, SQLWCHAR *name, + SQLSMALLINT nameMax, SQLSMALLINT *nameLen, + SQLSMALLINT *type, SQLULEN *size, + SQLSMALLINT *digits, SQLSMALLINT *nullable) +{ + SQLRETURN ret; + SQLSMALLINT len = 0; + + HSTMT_LOCK(stmt); + ret = drvdescribecol(stmt, col, (SQLCHAR *) name, + (SQLSMALLINT) (nameMax * sizeof (SQLWCHAR)), + &len, type, size, digits, nullable); + if (ret == SQL_SUCCESS) { + if (name) { + if (len > 0) { + SQLWCHAR *n = NULL; + + n = uc_from_utf((SQLCHAR *) name, len); + if (n) { + uc_strncpy(name, n, nameMax); + n[len] = 0; + len = min(nameMax, uc_strlen(n)); + uc_free(n); + } else { + len = 0; + } + } + if (len <= 0) { + len = 0; + if (nameMax > 0) { + name[0] = 0; + } + } + } else { + STMT *s = (STMT *) stmt; + COL *c = s->cols + col - 1; + + len = 0; + if (c->column) { + len = strlen(c->column); + } + } + if (nameLen) { + *nameLen = len; + } + } + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Internal retrieve column attributes. + * @param stmt statement handle + * @param col column number, starting at 1 + * @param id attribute id + * @param val output buffer + * @param valMax length of output buffer + * @param valLen output length + * @param val2 integer output buffer + * @result ODBC error code + */ + +static SQLRETURN +drvcolattributes(SQLHSTMT stmt, SQLUSMALLINT col, SQLUSMALLINT id, + SQLPOINTER val, SQLSMALLINT valMax, SQLSMALLINT *valLen, + SQLLEN *val2) +{ + STMT *s; + COL *c; + SQLSMALLINT dummy; + char *valc = (char *) val; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (!s->cols) { + return SQL_ERROR; + } + if (!valLen) { + valLen = &dummy; + } + if (id == SQL_COLUMN_COUNT) { + if (val2) { + *val2 = s->ncols; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + } + if (id == SQL_COLUMN_TYPE && col == 0) { + if (val2) { + *val2 = SQL_INTEGER; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + } +#ifdef SQL_DESC_OCTET_LENGTH + if (id == SQL_DESC_OCTET_LENGTH && col == 0) { + if (val2) { + *val2 = 4; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + } +#endif + if (col < 1 || col > s->ncols) { + setstat(s, -1, "invalid column", (*s->ov3) ? "07009": "S1002"); + return SQL_ERROR; + } + c = s->cols + col - 1; + + switch (id) { + case SQL_COLUMN_LABEL: + if (c->label) { + if (valc && valMax > 0) { + strncpy(valc, c->label, valMax); + valc[valMax - 1] = '\0'; + } + *valLen = strlen(c->label); + goto checkLen; + } + /* fall through */ + case SQL_COLUMN_NAME: + case SQL_DESC_NAME: + if (valc && valMax > 0) { + strncpy(valc, c->column, valMax); + valc[valMax - 1] = '\0'; + } + *valLen = strlen(c->column); +checkLen: + if (*valLen >= valMax) { + setstat(s, -1, "data right truncated", "01004"); + return SQL_SUCCESS_WITH_INFO; + } + return SQL_SUCCESS; +#ifdef SQL_DESC_BASE_COLUMN_NAME + if (strchr(c->column, '(') || strchr(c->column, ')')) { + valc[0] = '\0'; + *valLen = 0; + } else if (valc && valMax > 0) { + strncpy(valc, c->column, valMax); + valc[valMax - 1] = '\0'; + *valLen = strlen(c->column); + } + goto checkLen; +#endif + case SQL_COLUMN_TYPE: + case SQL_DESC_TYPE: +#ifdef WINTERFACE + { + int type = c->type; + + if (s->nowchar[0] || s->nowchar[1]) { + switch (type) { + case SQL_WCHAR: + type = SQL_CHAR; + break; + case SQL_WVARCHAR: + type = SQL_VARCHAR; + break; + #ifdef SQL_LONGVARCHAR + case SQL_WLONGVARCHAR: + type = SQL_LONGVARCHAR; + break; + } + } + if (val2) { + *val2 = type; + } + #endif + } +#else + if (val2) { + *val2 = c->type; + } +#endif + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_DISPLAY_SIZE: + if (val2) { + *val2 = c->size; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_UNSIGNED: + if (val2) { + *val2 = c->nosign ? SQL_TRUE : SQL_FALSE; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_SCALE: + case SQL_DESC_SCALE: + if (c->type == SQL_TIMESTAMP) { + if (val2) { + *val2 = 3; + } + } +#ifdef SQL_TYPE_TIMESTAMP + else if (c->type == SQL_TYPE_TIMESTAMP) { + if (val2) { + *val2 = 3; + } + } +#endif + else if (val2) { + *val2 = c->scale; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_PRECISION: + case SQL_DESC_PRECISION: + if (val2) { + switch (c->type) { + case SQL_SMALLINT: + *val2 = 5; + break; + case SQL_INTEGER: + *val2 = 10; + break; + case SQL_FLOAT: + case SQL_REAL: + case SQL_DOUBLE: + *val2 = 15; + break; + case SQL_DATE: + *val2 = 10; + break; + case SQL_TIME: + *val2 = 8; + break; +#ifdef SQL_TYPE_TIMESTAMP + case SQL_TYPE_TIMESTAMP: +#endif + case SQL_TIMESTAMP: + *val2 = 23; + break; + default: + *val2 = c->prec; + break; + } + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_MONEY: + if (val2) { + *val2 = SQL_FALSE; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_AUTO_INCREMENT: + if (val2) { + *val2 = c->autoinc; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_LENGTH: + case SQL_DESC_LENGTH: + if (val2) { + *val2 = c->size; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_NULLABLE: + case SQL_DESC_NULLABLE: + if (val2) { + *val2 = c->notnull; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_SEARCHABLE: + if (val2) { + *val2 = SQL_SEARCHABLE; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_CASE_SENSITIVE: + if (val2) { + *val2 = SQL_TRUE; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_UPDATABLE: + if (val2) { + *val2 = SQL_TRUE; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_DESC_COUNT: + if (val2) { + *val2 = s->ncols; + } + *valLen = sizeof (int); + return SQL_SUCCESS; + case SQL_COLUMN_TYPE_NAME: { + char *p = NULL, *tn = c->typename ? c->typename : "varchar"; + +#ifdef WINTERFACE + if (c->type == SQL_WCHAR || + c->type == SQL_WVARCHAR || + c->type == SQL_WLONGVARCHAR) { + if (!(s->nowchar[0] || s->nowchar[1])) { + if (strcasecmp(tn, "varchar") == 0) { + tn = "wvarchar"; + } + } + } +#endif + if (valc && valMax > 0) { + strncpy(valc, tn, valMax); + valc[valMax - 1] = '\0'; + p = strchr(valc, '('); + if (p) { + *p = '\0'; + while (p > valc && ISSPACE(p[-1])) { + --p; + *p = '\0'; + } + } + *valLen = strlen(valc); + } else { + *valLen = strlen(tn); + p = strchr(tn, '('); + if (p) { + *valLen = p - tn; + while (p > tn && ISSPACE(p[-1])) { + --p; + *valLen -= 1; + } + } + } + goto checkLen; + } + case SQL_COLUMN_OWNER_NAME: + case SQL_COLUMN_QUALIFIER_NAME: { + char *z = ""; + + if (valc && valMax > 0) { + strncpy(valc, z, valMax); + valc[valMax - 1] = '\0'; + } + *valLen = strlen(z); + goto checkLen; + } + case SQL_COLUMN_TABLE_NAME: +#if (SQL_COLUMN_TABLE_NAME != SQL_DESC_TABLE_NAME) + case SQL_DESC_TABLE_NAME: +#endif +#ifdef SQL_DESC_BASE_TABLE_NAME + case SQL_DESC_BASE_TABLE_NAME: +#endif + if (valc && valMax > 0) { + strncpy(valc, c->table, valMax); + valc[valMax - 1] = '\0'; + } + *valLen = strlen(c->table); + goto checkLen; +#ifdef SQL_DESC_NUM_PREC_RADIX + case SQL_DESC_NUM_PREC_RADIX: + if (val2) { + switch (c->type) { + #ifdef WINTERFACE + case SQL_WCHAR: + case SQL_WVARCHAR: + #ifdef SQL_LONGVARCHAR + case SQL_WLONGVARCHAR: + #endif + #endif + case SQL_CHAR: + case SQL_VARCHAR: + #ifdef SQL_LONGVARCHAR + case SQL_LONGVARCHAR: + #endif + case SQL_BINARY: + case SQL_VARBINARY: + case SQL_LONGVARBINARY: + *val2 = 0; + break; + default: + *val2 = 2; + } + } + *valLen = sizeof (int); + return SQL_SUCCESS; +#endif + } + setstat(s, -1, "unsupported column attributes %d", "HY091", id); + return SQL_ERROR; +} + +#ifndef WINTERFACE +/** + * Retrieve column attributes. + * @param stmt statement handle + * @param col column number, starting at 1 + * @param id attribute id + * @param val output buffer + * @param valMax length of output buffer + * @param valLen output length + * @param val2 integer output buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLColAttributes(SQLHSTMT stmt, SQLUSMALLINT col, SQLUSMALLINT id, + SQLPOINTER val, SQLSMALLINT valMax, SQLSMALLINT *valLen, + SQLLEN *val2) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvcolattributes(stmt, col, id, val, valMax, valLen, val2); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Retrieve column attributes (UNICODE version). + * @param stmt statement handle + * @param col column number, starting at 1 + * @param id attribute id + * @param val output buffer + * @param valMax length of output buffer + * @param valLen output length + * @param val2 integer output buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLColAttributesW(SQLHSTMT stmt, SQLUSMALLINT col, SQLUSMALLINT id, + SQLPOINTER val, SQLSMALLINT valMax, SQLSMALLINT *valLen, + SQLLEN *val2) +{ + SQLRETURN ret; + SQLSMALLINT len = 0; + + HSTMT_LOCK(stmt); + ret = drvcolattributes(stmt, col, id, val, valMax, &len, val2); + if (SQL_SUCCEEDED(ret)) { + SQLWCHAR *v = NULL; + + switch (id) { + case SQL_COLUMN_LABEL: + case SQL_COLUMN_NAME: + case SQL_DESC_NAME: + case SQL_COLUMN_TYPE_NAME: + case SQL_COLUMN_OWNER_NAME: + case SQL_COLUMN_QUALIFIER_NAME: + case SQL_COLUMN_TABLE_NAME: +#if (SQL_COLUMN_TABLE_NAME != SQL_DESC_TABLE_NAME) + case SQL_DESC_TABLE_NAME: +#endif +#ifdef SQL_DESC_BASE_COLUMN_NAME + case SQL_DESC_BASE_COLUMN_NAME: +#endif +#ifdef SQL_DESC_BASE_TABLE_NAME + case SQL_DESC_BASE_TABLE_NAME: +#endif + if (val && valMax > 0) { + int vmax = valMax / sizeof (SQLWCHAR); + + v = uc_from_utf((SQLCHAR *) val, SQL_NTS); + if (v) { + uc_strncpy(val, v, vmax); + len = min(vmax, uc_strlen(v)); + uc_free(v); + len *= sizeof (SQLWCHAR); + } + if (vmax > 0) { + v = (SQLWCHAR *) val; + v[vmax - 1] = '\0'; + } + } + if (len <= 0) { + len = 0; + } + break; + } + if (valLen) { + *valLen = len; + } + } + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Internal retrieve column attributes. + * @param stmt statement handle + * @param col column number, starting at 1 + * @param id attribute id + * @param val output buffer + * @param valMax length of output buffer + * @param valLen output length + * @param val2 integer output buffer + * @result ODBC error code + */ + +static SQLRETURN +drvcolattribute(SQLHSTMT stmt, SQLUSMALLINT colnum, SQLUSMALLINT id, + SQLPOINTER val, SQLSMALLINT valMax, SQLSMALLINT *valLen, + SQLPOINTER val2) +{ + STMT *s; + COL *col; + int v = 0; + char *valc = (char *) val; + SQLSMALLINT dummy; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (!s->cols) { + return SQL_ERROR; + } + if (colnum < 1 || colnum > s->ncols) { + setstat(s, -1, "invalid column", (*s->ov3) ? "07009" : "S1002"); + return SQL_ERROR; + } + if (!valLen) { + valLen = &dummy; + } + col = s->cols + colnum - 1; + switch (id) { + case SQL_DESC_COUNT: + v = s->ncols; + break; + case SQL_DESC_CATALOG_NAME: + if (valc && valMax > 0) { + strncpy(valc, col->db, valMax); + valc[valMax - 1] = '\0'; + } + *valLen = strlen(col->db); +checkLen: + if (*valLen >= valMax) { + setstat(s, -1, "data right truncated", "01004"); + return SQL_SUCCESS_WITH_INFO; + } + break; + case SQL_COLUMN_LENGTH: + case SQL_DESC_LENGTH: + v = col->size; + break; + case SQL_COLUMN_LABEL: + if (col->label) { + if (valc && valMax > 0) { + strncpy(valc, col->label, valMax); + valc[valMax - 1] = '\0'; + } + *valLen = strlen(col->label); + goto checkLen; + } + /* fall through */ + case SQL_COLUMN_NAME: + case SQL_DESC_NAME: + if (valc && valMax > 0) { + strncpy(valc, col->column, valMax); + valc[valMax - 1] = '\0'; + } + *valLen = strlen(col->column); + goto checkLen; + case SQL_DESC_SCHEMA_NAME: { + char *z = ""; + + if (valc && valMax > 0) { + strncpy(valc, z, valMax); + valc[valMax - 1] = '\0'; + } + *valLen = strlen(z); + goto checkLen; + } +#ifdef SQL_DESC_BASE_COLUMN_NAME + case SQL_DESC_BASE_COLUMN_NAME: + if (strchr(col->column, '(') || strchr(col->column, ')')) { + valc[0] = '\0'; + *valLen = 0; + } else if (valc && valMax > 0) { + strncpy(valc, col->column, valMax); + valc[valMax - 1] = '\0'; + *valLen = strlen(col->column); + } + goto checkLen; +#endif + case SQL_DESC_TYPE_NAME: { + char *p = NULL, *tn = col->typename ? col->typename : "varchar"; + +#ifdef WINTERFACE + if (col->type == SQL_WCHAR || + col->type == SQL_WVARCHAR || + col->type == SQL_WLONGVARCHAR) { + if (!(s->nowchar[0] || s->nowchar[1])) { + if (strcasecmp(tn, "varchar") == 0) { + tn = "wvarchar"; + } + } + } +#endif + if (valc && valMax > 0) { + strncpy(valc, tn, valMax); + valc[valMax - 1] = '\0'; + p = strchr(valc, '('); + if (p) { + *p = '\0'; + while (p > valc && ISSPACE(p[-1])) { + --p; + *p = '\0'; + } + } + *valLen = strlen(valc); + } else { + *valLen = strlen(tn); + p = strchr(tn, '('); + if (p) { + *valLen = p - tn; + while (p > tn && ISSPACE(p[-1])) { + --p; + *valLen -= 1; + } + } + } + goto checkLen; + } + case SQL_DESC_OCTET_LENGTH: + v = col->size; +#ifdef WINTERFACE + if (col->type == SQL_WCHAR || + col->type == SQL_WVARCHAR || + col->type == SQL_WLONGVARCHAR) { + if (!(s->nowchar[0] || s->nowchar[1])) { + v *= sizeof (SQLWCHAR); + } + } +#endif + break; +#if (SQL_COLUMN_TABLE_NAME != SQL_DESC_TABLE_NAME) + case SQL_COLUMN_TABLE_NAME: +#endif +#ifdef SQL_DESC_BASE_TABLE_NAME + case SQL_DESC_BASE_TABLE_NAME: +#endif + case SQL_DESC_TABLE_NAME: + if (valc && valMax > 0) { + strncpy(valc, col->table, valMax); + valc[valMax - 1] = '\0'; + } + *valLen = strlen(col->table); + goto checkLen; + case SQL_DESC_TYPE: + v = col->type; +#ifdef WINTERFACE + if (s->nowchar[0] || s->nowchar[1]) { + switch (v) { + case SQL_WCHAR: + v = SQL_CHAR; + break; + case SQL_WVARCHAR: + v = SQL_VARCHAR; + break; + #ifdef SQL_LONGVARCHAR + case SQL_WLONGVARCHAR: + v = SQL_LONGVARCHAR; + break; + #endif + } + } +#endif + break; + case SQL_DESC_CONCISE_TYPE: + switch (col->type) { + case SQL_INTEGER: + v = SQL_C_LONG; + break; + case SQL_TINYINT: + v = SQL_C_TINYINT; + break; + case SQL_SMALLINT: + v = SQL_C_SHORT; + break; + case SQL_FLOAT: + v = SQL_C_FLOAT; + break; + case SQL_DOUBLE: + v = SQL_C_DOUBLE; + break; + case SQL_TIMESTAMP: + v = SQL_C_TIMESTAMP; + break; + case SQL_TIME: + v = SQL_C_TIME; + break; + case SQL_DATE: + v = SQL_C_DATE; + break; +#ifdef SQL_C_TYPE_TIMESTAMP + case SQL_TYPE_TIMESTAMP: + v = SQL_C_TYPE_TIMESTAMP; + break; +#endif +#ifdef SQL_C_TYPE_TIME + case SQL_TYPE_TIME: + v = SQL_C_TYPE_TIME; + break; +#endif +#ifdef SQL_C_TYPE_DATE + case SQL_TYPE_DATE: + v = SQL_C_TYPE_DATE; + break; +#endif +#ifdef SQL_BIT + case SQL_BIT: + v = SQL_C_BIT; + break; +#endif +#ifdef SQL_BIGINT + case SQL_BIGINT: + v = SQL_C_SBIGINT; + break; +#endif + default: +#ifdef WINTERFACE + v = (s->nowchar[0] || s->nowchar[1]) ? SQL_C_CHAR : SQL_C_WCHAR; +#else + v = SQL_C_CHAR; +#endif + break; + } + break; + case SQL_DESC_UPDATABLE: + v = SQL_TRUE; + break; + case SQL_COLUMN_DISPLAY_SIZE: + v = col->size; + break; + case SQL_COLUMN_UNSIGNED: + v = col->nosign ? SQL_TRUE : SQL_FALSE; + break; + case SQL_COLUMN_SEARCHABLE: + v = SQL_SEARCHABLE; + break; + case SQL_COLUMN_SCALE: + case SQL_DESC_SCALE: + if (col->type == SQL_TIMESTAMP) { + v = 3; + } +#ifdef SQL_TYPE_TIMESTAMP + else if (col->type == SQL_TYPE_TIMESTAMP) { + v = 3; + } +#endif + else { + v = col->scale; + } + break; + case SQL_COLUMN_PRECISION: + case SQL_DESC_PRECISION: + switch (col->type) { + case SQL_SMALLINT: + v = 5; + break; + case SQL_INTEGER: + v = 10; + break; + case SQL_FLOAT: + case SQL_REAL: + case SQL_DOUBLE: + v = 15; + break; + case SQL_DATE: + v = 10; + break; + case SQL_TIME: + v = 8; + break; +#ifdef SQL_TYPE_TIMESTAMP + case SQL_TYPE_TIMESTAMP: +#endif + case SQL_TIMESTAMP: + v = 23; + break; + default: + v = col->prec; + break; + } + break; + case SQL_COLUMN_MONEY: + v = SQL_FALSE; + break; + case SQL_COLUMN_AUTO_INCREMENT: + v = col->autoinc; + break; + case SQL_DESC_NULLABLE: + v = col->notnull; + break; +#ifdef SQL_DESC_NUM_PREC_RADIX + case SQL_DESC_NUM_PREC_RADIX: + switch (col->type) { + #ifdef WINTERFACE + case SQL_WCHAR: + case SQL_WVARCHAR: + #ifdef SQL_LONGVARCHAR + case SQL_WLONGVARCHAR: + #endif + #endif + case SQL_CHAR: + case SQL_VARCHAR: + #ifdef SQL_LONGVARCHAR + case SQL_LONGVARCHAR: + #endif + case SQL_BINARY: + case SQL_VARBINARY: + case SQL_LONGVARBINARY: + v = 0; + break; + default: + v = 2; + } + break; +#endif + default: + setstat(s, -1, "unsupported column attribute %d", "HY091", id); + return SQL_ERROR; + } + if (val2) { + *(int *) val2 = v; + } + return SQL_SUCCESS; +} + +#ifndef WINTERFACE +/** + * Retrieve column attributes. + * @param stmt statement handle + * @param col column number, starting at 1 + * @param id attribute id + * @param val output buffer + * @param valMax length of output buffer + * @param valLen output length + * @param val2 integer output buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLColAttribute(SQLHSTMT stmt, SQLUSMALLINT col, SQLUSMALLINT id, + SQLPOINTER val, SQLSMALLINT valMax, SQLSMALLINT *valLen, + COLATTRIBUTE_LAST_ARG_TYPE val2) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvcolattribute(stmt, col, id, val, valMax, valLen, + (SQLPOINTER) val2); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Retrieve column attributes (UNICODE version). + * @param stmt statement handle + * @param col column number, starting at 1 + * @param id attribute id + * @param val output buffer + * @param valMax length of output buffer + * @param valLen output length + * @param val2 integer output buffer + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLColAttributeW(SQLHSTMT stmt, SQLUSMALLINT col, SQLUSMALLINT id, + SQLPOINTER val, SQLSMALLINT valMax, SQLSMALLINT *valLen, + COLATTRIBUTE_LAST_ARG_TYPE val2) +{ + SQLRETURN ret; + SQLSMALLINT len = 0; + + HSTMT_LOCK(stmt); + ret = drvcolattribute(stmt, col, id, val, valMax, &len, + (SQLPOINTER) val2); + if (SQL_SUCCEEDED(ret)) { + SQLWCHAR *v = NULL; + + switch (id) { + case SQL_DESC_SCHEMA_NAME: + case SQL_DESC_CATALOG_NAME: + case SQL_COLUMN_LABEL: + case SQL_DESC_NAME: + case SQL_DESC_TABLE_NAME: + #ifdef SQL_DESC_BASE_TABLE_NAME + case SQL_DESC_BASE_TABLE_NAME: + #endif + #ifdef SQL_DESC_BASE_COLUMN_NAME + case SQL_DESC_BASE_COLUMN_NAME: + #endif + case SQL_DESC_TYPE_NAME: + if (val && valMax > 0) { + int vmax = valMax / sizeof (SQLWCHAR); + + v = uc_from_utf((SQLCHAR *) val, SQL_NTS); + if (v) { + uc_strncpy(val, v, vmax); + len = min(vmax, uc_strlen(v)); + uc_free(v); + len *= sizeof (SQLWCHAR); + } + if (vmax > 0) { + v = (SQLWCHAR *) val; + v[vmax - 1] = '\0'; + } + } + if (len <= 0) { + len = 0; + } + break; + } + if (valLen) { + *valLen = len; + } + } + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Internal return last HDBC or HSTMT error message. + * @param env environment handle or NULL + * @param dbc database connection handle or NULL + * @param stmt statement handle or NULL + * @param sqlState output buffer for SQL state + * @param nativeErr output buffer for native error code + * @param errmsg output buffer for error message + * @param errmax length of output buffer for error message + * @param errlen output length of error message + * @result ODBC error code + */ + +static SQLRETURN +drverror(SQLHENV env, SQLHDBC dbc, SQLHSTMT stmt, + SQLCHAR *sqlState, SQLINTEGER *nativeErr, + SQLCHAR *errmsg, SQLSMALLINT errmax, SQLSMALLINT *errlen) +{ + SQLCHAR dummy0[6]; + SQLINTEGER dummy1; + SQLSMALLINT dummy2; + + if (env == SQL_NULL_HENV && + dbc == SQL_NULL_HDBC && + stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + if (sqlState) { + sqlState[0] = '\0'; + } else { + sqlState = dummy0; + } + if (!nativeErr) { + nativeErr = &dummy1; + } + *nativeErr = 0; + if (!errlen) { + errlen = &dummy2; + } + *errlen = 0; + if (errmsg) { + if (errmax > 0) { + errmsg[0] = '\0'; + } + } else { + errmsg = dummy0; + errmax = 0; + } + if (stmt) { + STMT *s = (STMT *) stmt; + + HSTMT_LOCK(stmt); + if (s->logmsg[0] == '\0') { + HSTMT_UNLOCK(stmt); + goto noerr; + } + *nativeErr = s->naterr; + strcpy((char *) sqlState, s->sqlstate); + if (errmax == SQL_NTS) { + strcpy((char *) errmsg, "[SQLite]"); + strcat((char *) errmsg, (char *) s->logmsg); + *errlen = strlen((char *) errmsg); + } else { + strncpy((char *) errmsg, "[SQLite]", errmax); + if (errmax - 8 > 0) { + strncpy((char *) errmsg + 8, (char *) s->logmsg, errmax - 8); + } + *errlen = min(strlen((char *) s->logmsg) + 8, errmax); + } + s->logmsg[0] = '\0'; + HSTMT_UNLOCK(stmt); + return SQL_SUCCESS; + } + if (dbc) { + DBC *d = (DBC *) dbc; + + HDBC_LOCK(dbc); + if (d->magic != DBC_MAGIC || d->logmsg[0] == '\0') { + HDBC_UNLOCK(dbc); + goto noerr; + } + *nativeErr = d->naterr; + strcpy((char *) sqlState, d->sqlstate); + if (errmax == SQL_NTS) { + strcpy((char *) errmsg, "[SQLite]"); + strcat((char *) errmsg, (char *) d->logmsg); + *errlen = strlen((char *) errmsg); + } else { + strncpy((char *) errmsg, "[SQLite]", errmax); + if (errmax - 8 > 0) { + strncpy((char *) errmsg + 8, (char *) d->logmsg, errmax - 8); + } + *errlen = min(strlen((char *) d->logmsg) + 8, errmax); + } + d->logmsg[0] = '\0'; + HDBC_UNLOCK(dbc); + return SQL_SUCCESS; + } +noerr: + sqlState[0] = '\0'; + errmsg[0] = '\0'; + *nativeErr = 0; + *errlen = 0; + return SQL_NO_DATA; +} + +#ifndef WINTERFACE +/** + * Return last HDBC or HSTMT error message. + * @param env environment handle or NULL + * @param dbc database connection handle or NULL + * @param stmt statement handle or NULL + * @param sqlState output buffer for SQL state + * @param nativeErr output buffer for native error code + * @param errmsg output buffer for error message + * @param errmax length of output buffer for error message + * @param errlen output length of error message + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLError(SQLHENV env, SQLHDBC dbc, SQLHSTMT stmt, + SQLCHAR *sqlState, SQLINTEGER *nativeErr, + SQLCHAR *errmsg, SQLSMALLINT errmax, SQLSMALLINT *errlen) +{ + return drverror(env, dbc, stmt, sqlState, nativeErr, + errmsg, errmax, errlen); +} +#endif + +#ifdef WINTERFACE +/** + * Return last HDBC or HSTMT error message (UNICODE version). + * @param env environment handle or NULL + * @param dbc database connection handle or NULL + * @param stmt statement handle or NULL + * @param sqlState output buffer for SQL state + * @param nativeErr output buffer for native error code + * @param errmsg output buffer for error message + * @param errmax length of output buffer for error message + * @param errlen output length of error message + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLErrorW(SQLHENV env, SQLHDBC dbc, SQLHSTMT stmt, + SQLWCHAR *sqlState, SQLINTEGER *nativeErr, + SQLWCHAR *errmsg, SQLSMALLINT errmax, SQLSMALLINT *errlen) +{ + char state[16]; + SQLSMALLINT len = 0; + SQLRETURN ret; + + ret = drverror(env, dbc, stmt, (SQLCHAR *) state, nativeErr, + (SQLCHAR *) errmsg, errmax, &len); + if (ret == SQL_SUCCESS) { + if (sqlState) { + uc_from_utf_buf((SQLCHAR *) state, -1, sqlState, + 6 * sizeof (SQLWCHAR)); + } + if (errmsg) { + if (len > 0) { + SQLWCHAR *e = NULL; + + e = uc_from_utf((SQLCHAR *) errmsg, len); + if (e) { + if (errmax > 0) { + uc_strncpy(errmsg, e, errmax); + e[len] = 0; + len = min(errmax, uc_strlen(e)); + } else { + len = uc_strlen(e); + } + uc_free(e); + } else { + len = 0; + } + } + if (len <= 0) { + len = 0; + if (errmax > 0) { + errmsg[0] = 0; + } + } + } else { + len = 0; + } + if (errlen) { + *errlen = len; + } + } else if (ret == SQL_NO_DATA) { + if (sqlState) { + sqlState[0] = 0; + } + if (errmsg) { + if (errmax > 0) { + errmsg[0] = 0; + } + } + if (errlen) { + *errlen = 0; + } + } + return ret; +} +#endif + +/** + * Return information for more result sets. + * @param stmt statement handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLMoreResults(SQLHSTMT stmt) +{ + HSTMT_LOCK(stmt); + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + HSTMT_UNLOCK(stmt); + return SQL_NO_DATA; +} + +/** + * Internal function to setup column name/type information + * @param s statement poiner + * @result ODBC error code + */ + +static SQLRETURN +setupdyncols(STMT *s) +{ + size_t ncols; + SQLRETURN ret = SQL_SUCCESS; + hive_err_info hive_error; + HiveReturn rc; + int i; + PTRDIFF_T size; + char *p; + COL *dyncols = NULL; + DBC *d = (DBC *) s->dbc; + HiveColumnDesc *column_desc = NULL; + char colname[MAX_COLUMN_NAME_LEN]; + char typename[MAX_COLUMN_TYPE_LEN]; + + rc = DBGetColumnCount(s->hive_resultset, &ncols, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + return SQL_ERROR; + } + + if (ncols == 0) { + /* TODO: log INFO message zero columns found in resultset */ + return SQL_SUCCESS; + } + + for (i = size = 0; i < ncols; i++) { + rc = DBCreateColumnDesc(s->hive_resultset, i, &column_desc, + &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + DBCloseColumnDesc(column_desc, &hive_error, sizeof(hive_error.err_buf)); + return SQL_ERROR; + } + + DBGetColumnName(column_desc, colname, sizeof(colname)); + size += 3 + 3 * strlen(colname); + DBCloseColumnDesc(column_desc, &hive_error, sizeof(hive_error.err_buf)); + } + + dyncols = xmalloc(ncols * sizeof (COL) + size); + if (NULL == dyncols) { + freedyncols(s); + /* FIXME? *ncolsp = 0; */ + ret = SQL_ERROR; + } + + p = (char *) (dyncols + ncols); + + for (i = 0; i < ncols; i++) { + char *q; + + rc = DBCreateColumnDesc(s->hive_resultset, i, &column_desc, + &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + DBCloseColumnDesc(column_desc, &hive_error, sizeof(hive_error.err_buf)); + return SQL_ERROR; + } + DBGetColumnName(column_desc, colname, sizeof(colname)); + DBGetColumnType(column_desc, typename, sizeof(typename)); + + if (d->trace) { + fprintf(d->trace, "-- column %d name: '%s'\n", + i + 1, colname); + fflush(d->trace); + } + dyncols[i].db = ((DBC *) (s->dbc))->dbname; + strcpy(p, colname); + dyncols[i].label = p; + p += strlen(p) + 1; + q = strchr(colname, '.'); + if (q) { + char *q2 = strchr(q + 1, '.'); + + /* SQLite 3.3.4 produces view.table.column sometimes */ + if (q2) { + q = q2; + } + } + if (q) { + strncpy(p, colname, q - colname); + p[q - colname] = '\0'; + p += strlen(p) + 1; + strcpy(p, q + 1); + dyncols[i].column = p; + p += strlen(p) + 1; + } else { + strcpy(p, colname); + dyncols[i].column = p; + p += strlen(p) + 1; + } + if (s->longnames) { + dyncols[i].column = dyncols[i].label; + } + dyncols[i].type = hive_to_sql_type(DBGetHiveType(column_desc)); + dyncols[i].size = (int) DBGetFieldByteSize(column_desc); + dyncols[i].index = i; + dyncols[i].scale = 0; + dyncols[i].prec = 0; + dyncols[i].nosign = 1; + dyncols[i].autoinc = SQL_FALSE; + dyncols[i].notnull = DBGetIsNullable(column_desc) ? SQL_NULLABLE : SQL_NO_NULLS; + dyncols[i].typename = xstrdup(typename); + + DBCloseColumnDesc(column_desc, &hive_error, sizeof(hive_error.err_buf)); + } + + freedyncols(s); + s->dyncols = s->cols = dyncols; + s->dcols = s->ncols = ncols; + return ret; +} + +/** + * Internal query preparation used by SQLPrepare() and SQLExecDirect(). + * @param stmt statement handle + * @param query query string + * @param queryLen length of query string or SQL_NTS + * @result ODBC error code + */ + +static SQLRETURN +drvprepare(SQLHSTMT stmt, SQLCHAR *query, SQLINTEGER queryLen) +{ + STMT *s; + DBC *d; + char *errp = NULL; + SQLRETURN sret; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (s->dbc == SQL_NULL_HDBC) { +noconn: + return noconn(s); + } + d = s->dbc; + if (NULL == d->hive_conn) { + goto noconn; + } + freep(&s->query); + s->query = (SQLCHAR *) fixupsql((char *) query, queryLen, + &s->nparams, &s->isselect, &errp); + if (!s->query) { + if (errp) { + setstat(s, -1, errp, (*s->ov3) ? "HY000" : "S1000"); + return SQL_ERROR; + } + return nomem(s); + } + errp = NULL; + freeresult(s, -1); + mkbindcols(s, s->ncols); + if (s->nparams) { + s->paramset_count = 0; + } + return SQL_SUCCESS; +} + +/** + * Internal query execution used by SQLExecute() and SQLExecDirect(). + * @param stmt statement handle + * @param initial false when called from SQLPutData() + * @result ODBC error code + */ + +static SQLRETURN +drvexecute(SQLHSTMT stmt, int initial) +{ + STMT *s; + DBC *d; + char *errp = NULL; + int i, busy_count; + size_t ncols = 0; + hive_err_info hive_error; + SQLRETURN ret; + HiveReturn rc; + int fetch_row_size; + + if (stmt == SQL_NULL_HSTMT) { + return SQL_INVALID_HANDLE; + } + s = (STMT *) stmt; + if (s->dbc == SQL_NULL_HDBC) { + return noconn(s); + } + d = (DBC *) s->dbc; + if (NULL == d->hive_conn) { + return noconn(s); + } + if (NULL == s->query) { + setstat(s, -1, "no query prepared", (*s->ov3) ? "HY000" : "S1000"); + return SQL_ERROR; + } + + if (NULL != s->hive_resultset) { + freeresult(s, 1); + } + + SET_FETCH_ROW_SIZE(s->rowset_size, fetch_row_size); + rc = DBExecute(d->hive_conn, s->query, &(s->hive_resultset), s->rowset_size, + fetch_row_size, &hive_error, sizeof(hive_error.err_buf), + s->query_timeout*1000); + switch(rc) { + case HIVE_TIMEOUT: + setstat(s, -1, "Timeout expired", "HYT00"); + return SQL_ERROR; + case HIVE_NETWORK_ERROR: + setstat(s, -1, "Communication link failure", "08S01"); + return SQL_ERROR; + case HIVE_ERROR: + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + return SQL_ERROR; + default: + break; + } + + /* prepare the results */ + /* TODO: factor this out */ + rc = DBGetColumnCount(s->hive_resultset, &ncols, &hive_error, sizeof(hive_error.err_buf)); + if (HIVE_ERROR == rc) { + setstat(s, hive_error.native_err, hive_error.err_buf, hive_error.sql_state); + return SQL_ERROR; + } + + if (0 == ncols) { + /* TODO: Log INFO */ + return SQL_SUCCESS; + } + + ret = setupdyncols(s); + return ret; +} + +#ifndef WINTERFACE +/** + * Prepare HSTMT. + * @param stmt statement handle + * @param query query string + * @param queryLen length of query string or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLPrepare(SQLHSTMT stmt, SQLCHAR *query, SQLINTEGER queryLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvprepare(stmt, query, queryLen); + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Prepare HSTMT (UNICODE version). + * @param stmt statement handle + * @param query query string + * @param queryLen length of query string or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLPrepareW(SQLHSTMT stmt, SQLWCHAR *query, SQLINTEGER queryLen) +{ + SQLRETURN ret; + char *q = uc_to_utf_c(query, queryLen); + + HSTMT_LOCK(stmt); + if (!q) { + ret = nomem((STMT *) stmt); + goto done; + } + ret = drvprepare(stmt, (SQLCHAR *) q, SQL_NTS); + uc_free(q); +done: + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +/** + * Execute query. + * @param stmt statement handle + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLExecute(SQLHSTMT stmt) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvexecute(stmt, 1); + HSTMT_UNLOCK(stmt); + return ret; +} + +#ifndef WINTERFACE +/** + * Execute query directly. + * @param stmt statement handle + * @param query query string + * @param queryLen length of query string or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLExecDirect(SQLHSTMT stmt, SQLCHAR *query, SQLINTEGER queryLen) +{ + SQLRETURN ret; + + HSTMT_LOCK(stmt); + ret = drvprepare(stmt, query, queryLen); + if (ret == SQL_SUCCESS) { + ret = drvexecute(stmt, 1); + } + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Execute query directly (UNICODE version). + * @param stmt statement handle + * @param query query string + * @param queryLen length of query string or SQL_NTS + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLExecDirectW(SQLHSTMT stmt, SQLWCHAR *query, SQLINTEGER queryLen) +{ + SQLRETURN ret; + char *q = uc_to_utf_c(query, queryLen); + + HSTMT_LOCK(stmt); + if (!q) { + ret = nomem((STMT *) stmt); + goto done; + } + ret = drvprepare(stmt, (SQLCHAR *) q, SQL_NTS); + uc_free(q); + if (ret == SQL_SUCCESS) { + ret = drvexecute(stmt, 1); + } +done: + HSTMT_UNLOCK(stmt); + return ret; +} +#endif + + +#if (defined(_WIN32) || defined(_WIN64)) +//&& defined(WINGUI) + #ifndef WITHOUT_DRIVERMGR + +/* + * Windows configuration dialog stuff. + */ + + #include + #include + + #define MAXPATHLEN (255+1) /* Max path length */ + #define MAXKEYLEN (15+1) /* Max keyword length */ + #define MAXDSNAME (32+1) /* Max data source name length */ + #define MAXDBNAME (255+1) + #define MAXHOSTNAME (255+1) + #define MAXPORTNAME (5+1) + #define MAXFRAMEDNAME (1+1) + #define MAXTRACEFILENAME (255+1) + +/* Attribute key indexes into an array of Attr structs, see below */ + + #define KEY_DSN 0 + #define KEY_DBNAME 1 + #define KEY_DRIVER 2 + #define KEY_HOST 3 + #define KEY_PORT 4 + #define KEY_FRAMED 5 + #define KEY_TRACEFILE 6 + #define NUMOFKEYS 7 + +typedef struct { + BOOL supplied; + char attr[MAXPATHLEN*4]; +} ATTR; + +typedef struct { + SQLHWND parent; + LPCSTR driver; + ATTR attr[NUMOFKEYS]; + char DSN[MAXDSNAME]; + BOOL newDSN; + BOOL defDSN; +} SETUPDLG; + +static struct { + char *key; + int ikey; +} attrLookup[] = { + { "DSN", KEY_DSN }, + { "Database", KEY_DBNAME }, + { "Driver", KEY_DRIVER }, + { "Host", KEY_HOST }, + { "Port", KEY_PORT }, + { "Framed", KEY_FRAMED }, + { "Tracefile", KEY_TRACEFILE }, + { NULL, 0 } +}; + +/** + * Setup dialog data from datasource attributes. + * @param attribs attribute string + * @param setupdlg pointer to dialog data + */ + +static void +ParseAttributes(LPCSTR attribs, SETUPDLG *setupdlg) +{ + char *str = (char *) attribs, *start, key[MAXKEYLEN]; + int elem, nkey; + + while (*str) { + start = str; + if ((str = strchr(str, '=')) == NULL) { + return; + } + elem = -1; + nkey = str - start; + if (nkey < sizeof (key)) { + int i; + + memcpy(key, start, nkey); + key[nkey] = '\0'; + for (i = 0; attrLookup[i].key; i++) { + if (strcasecmp(attrLookup[i].key, key) == 0) { + elem = attrLookup[i].ikey; + break; + } + } + } + start = ++str; + while (*str && *str != ';') { + ++str; + } + if (elem >= 0) { + int end = min(str - start, sizeof (setupdlg->attr[elem].attr) - 1); + + setupdlg->attr[elem].supplied = TRUE; + memcpy(setupdlg->attr[elem].attr, start, end); + setupdlg->attr[elem].attr[end] = '\0'; + } + ++str; + } +} + +/** + * Set datasource attributes in registry. + * @param parent handle of parent window + * @param setupdlg pointer to dialog data + * @result true or false + */ + +static BOOL +SetDSNAttributes(HWND parent, SETUPDLG *setupdlg) +{ + char *dsn = setupdlg->attr[KEY_DSN].attr; + + if (setupdlg->newDSN && strlen(dsn) == 0) { + return FALSE; + } + if (!SQLWriteDSNToIni(dsn, setupdlg->driver)) { + if (parent) { + char buf[MAXPATHLEN], msg[MAXPATHLEN]; + + LoadString(hModule, IDS_BADDSN, buf, sizeof (buf)); + wsprintf(msg, buf, dsn); + LoadString(hModule, IDS_MSGTITLE, buf, sizeof (buf)); + MessageBox(parent, msg, buf, + MB_ICONEXCLAMATION | MB_OK | MB_TASKMODAL | + MB_SETFOREGROUND); + } + return FALSE; + } + if (parent || setupdlg->attr[KEY_DBNAME].supplied) { + SQLWritePrivateProfileString(dsn, "Database", + setupdlg->attr[KEY_DBNAME].attr, + ODBC_INI); + } + if (parent || setupdlg->attr[KEY_HOST].supplied) { + SQLWritePrivateProfileString(dsn, "Host", + setupdlg->attr[KEY_HOST].attr, + ODBC_INI); + } + if (parent || setupdlg->attr[KEY_PORT].supplied) { + SQLWritePrivateProfileString(dsn, "Port", + setupdlg->attr[KEY_PORT].attr, + ODBC_INI); + } + if (parent || setupdlg->attr[KEY_FRAMED].supplied) { + SQLWritePrivateProfileString(dsn, "Framed", + setupdlg->attr[KEY_FRAMED].attr, + ODBC_INI); + } + if (parent || setupdlg->attr[KEY_TRACEFILE].supplied) { + SQLWritePrivateProfileString(dsn, "Tracefile", + setupdlg->attr[KEY_TRACEFILE].attr, + ODBC_INI); + } + if (setupdlg->attr[KEY_DSN].supplied && + strcasecmp(setupdlg->DSN, setupdlg->attr[KEY_DSN].attr)) { + SQLRemoveDSNFromIni(setupdlg->DSN); + } + return TRUE; +} + +/** + * Get datasource attributes from registry. + * @param setupdlg pointer to dialog data + */ + +static void +GetAttributes(SETUPDLG *setupdlg) +{ + char *dsn = setupdlg->attr[KEY_DSN].attr; + + if (!setupdlg->attr[KEY_DBNAME].supplied) { + SQLGetPrivateProfileString(dsn, "Database", DEFAULT_DATABASE, + setupdlg->attr[KEY_DBNAME].attr, + sizeof (setupdlg->attr[KEY_DBNAME].attr), + ODBC_INI); + } + if (!setupdlg->attr[KEY_HOST].supplied) { + SQLGetPrivateProfileString(dsn, "Host", DEFAULT_HOST, + setupdlg->attr[KEY_HOST].attr, + sizeof (setupdlg->attr[KEY_HOST].attr), + ODBC_INI); + } + if (!setupdlg->attr[KEY_PORT].supplied) { + SQLGetPrivateProfileString(dsn, "Port", DEFAULT_PORT, + setupdlg->attr[KEY_PORT].attr, + sizeof (setupdlg->attr[KEY_PORT].attr), + ODBC_INI); + } + if (!setupdlg->attr[KEY_FRAMED].supplied) { + SQLGetPrivateProfileString(dsn, "Framed", DEFAULT_FRAMED, + setupdlg->attr[KEY_FRAMED].attr, + sizeof (setupdlg->attr[KEY_FRAMED].attr), + ODBC_INI); + } + if (!setupdlg->attr[KEY_TRACEFILE].supplied) { + SQLGetPrivateProfileString(dsn, "Tracefile", "", + setupdlg->attr[KEY_TRACEFILE].attr, + sizeof (setupdlg->attr[KEY_TRACEFILE].attr), + ODBC_INI); + } +} + + +/** + * Dialog procedure for ConfigDSN(). + * @param hdlg handle of dialog window + * @param wmsg type of message + * @param wparam wparam of message + * @param lparam lparam of message + * @result true or false + */ + +static BOOL CALLBACK +ConfigDlgProc(HWND hdlg, WORD wmsg, WPARAM wparam, LPARAM lparam) +{ + SETUPDLG *setupdlg = NULL; + WORD index; + + switch (wmsg) { + case WM_INITDIALOG: + #ifdef _WIN64 + SetWindowLong(hdlg, DWLP_USER, lparam); + #else + SetWindowLong(hdlg, DWL_USER, lparam); + #endif + setupdlg = (SETUPDLG *) lparam; + GetAttributes(setupdlg); + + SetDlgItemText(hdlg, IDC_DSNAME, setupdlg->attr[KEY_DSN].attr); + SendDlgItemMessage(hdlg, IDC_DSNAME, EM_LIMITTEXT, + (WPARAM) (MAXDSNAME - 1), (LPARAM) 0); + + SetDlgItemText(hdlg, IDC_DBNAME, setupdlg->attr[KEY_DBNAME].attr); + SendDlgItemMessage(hdlg, IDC_DBNAME, EM_LIMITTEXT, + (WPARAM) (MAXDBNAME - 1), (LPARAM) 0); + + SetDlgItemText(hdlg, IDC_HOSTNAME, setupdlg->attr[KEY_HOST].attr); + SendDlgItemMessage(hdlg, IDC_HOSTNAME, EM_LIMITTEXT, + (WPARAM) (MAXHOSTNAME - 1), (LPARAM) 0); + + SetDlgItemText(hdlg, IDC_PORTNAME, setupdlg->attr[KEY_PORT].attr); + SendDlgItemMessage(hdlg, IDC_PORTNAME, EM_LIMITTEXT, + (WPARAM) (MAXPORTNAME - 1), (LPARAM) 0); + +#ifdef NEVER + SetDlgItemText(hdlg, IDC_FRAMEDNAME, setupdlg->attr[KEY_FRAMED].attr); + SendDlgItemMessage(hdlg, IDC_FRAMEDNAME, EM_LIMITTEXT, + (WPARAM) (MAXFRAMEDNAME - 1), (LPARAM) 0); + + SetDlgItemText(hdlg, IDC_TRACEFILENAME, setupdlg->attr[KEY_TRACEFILE].attr); + SendDlgItemMessage(hdlg, IDC_TRACEFILENAME, EM_LIMITTEXT, + (WPARAM) (MAXTRACEFILENAME - 1), (LPARAM) 0); +#endif + + if (setupdlg->defDSN) { + EnableWindow(GetDlgItem(hdlg, IDC_DSNAME), FALSE); + EnableWindow(GetDlgItem(hdlg, IDC_DSNAMETEXT), FALSE); + } + return TRUE; + case WM_COMMAND: + switch (GET_WM_COMMAND_ID(wparam, lparam)) { + case IDC_DSNAME: + if (GET_WM_COMMAND_CMD(wparam, lparam) == EN_CHANGE) { + char item[MAXDSNAME]; + + EnableWindow(GetDlgItem(hdlg, IDOK), + GetDlgItemText(hdlg, IDC_DSNAME, + item, sizeof (item))); + return TRUE; + } + break; + case IDOK: + #ifdef _WIN64 + setupdlg = (SETUPDLG *) GetWindowLongPtr(hdlg, DWLP_USER); + #else + setupdlg = (SETUPDLG *) GetWindowLong(hdlg, DWL_USER); + #endif + if (!setupdlg->defDSN) { + GetDlgItemText(hdlg, IDC_DSNAME, + setupdlg->attr[KEY_DSN].attr, + sizeof (setupdlg->attr[KEY_DSN].attr)); + } + + GetDlgItemText(hdlg, IDC_DBNAME, + setupdlg->attr[KEY_DBNAME].attr, + sizeof (setupdlg->attr[KEY_DBNAME].attr)); + GetDlgItemText(hdlg, IDC_HOSTNAME, + setupdlg->attr[KEY_HOST].attr, + sizeof (setupdlg->attr[KEY_HOST].attr)); + GetDlgItemText(hdlg, IDC_PORTNAME, + setupdlg->attr[KEY_PORT].attr, + sizeof (setupdlg->attr[KEY_PORT].attr)); + +#ifdef NEVER + GetDlgItemText(hdlg, IDC_FRAMEDNAME, + setupdlg->attr[KEY_FRAMED].attr, + sizeof (setupdlg->attr[KEY_FRAMED].attr)); + GetDlgItemText(hdlg, IDC_TRACEFILENAME, + setupdlg->attr[KEY_TRACEFILE].attr, + sizeof (setupdlg->attr[KEY_TRACEFILE].attr)); +#endif + SetDSNAttributes(hdlg, setupdlg); + /* FALL THROUGH */ + case IDCANCEL: + EndDialog(hdlg, wparam); + return TRUE; + } + break; + } + return FALSE; +} + +/** + * ODBC INSTAPI procedure for DSN configuration. + * @param hwnd parent window handle + * @param request type of request + * @param driver driver name + * @param attribs attribute string of DSN + * @result true or false + */ + +BOOL INSTAPI +ConfigDSN(HWND hwnd, WORD request, LPCSTR driver, LPCSTR attribs) +{ + BOOL success; + SETUPDLG *setupdlg; + + setupdlg = (SETUPDLG *) xmalloc(sizeof (SETUPDLG)); + if (setupdlg == NULL) { + return FALSE; + } + memset(setupdlg, 0, sizeof (SETUPDLG)); + if (attribs) { + ParseAttributes(attribs, setupdlg); + } + if (setupdlg->attr[KEY_DSN].supplied) { + strcpy(setupdlg->DSN, setupdlg->attr[KEY_DSN].attr); + } else { + setupdlg->DSN[0] = '\0'; + } + if (request == ODBC_REMOVE_DSN) { + if (!setupdlg->attr[KEY_DSN].supplied) { + success = FALSE; + } else { + success = SQLRemoveDSNFromIni(setupdlg->attr[KEY_DSN].attr); + } + } else { + setupdlg->parent = hwnd; + setupdlg->driver = driver; + setupdlg->newDSN = request == ODBC_ADD_DSN; + setupdlg->defDSN = strcasecmp(setupdlg->attr[KEY_DSN].attr, + "Default") == 0; + if (hwnd) { + success = DialogBoxParam(hModule, MAKEINTRESOURCE(CONFIGDSN), + hwnd, (DLGPROC) ConfigDlgProc, + (LPARAM) setupdlg) == IDOK; + } else if (setupdlg->attr[KEY_DSN].supplied) { + success = SetDSNAttributes(hwnd, setupdlg); + } else { + success = FALSE; + } + } + xfree(setupdlg); + return success; +} + +/** + * Dialog procedure for SQLDriverConnect(). + * @param hdlg handle of dialog window + * @param wmsg type of message + * @param wparam wparam of message + * @param lparam lparam of message + * @result true or false + */ + +static BOOL CALLBACK +DriverConnectProc(HWND hdlg, WORD wmsg, WPARAM wparam, LPARAM lparam) +{ + SETUPDLG *setupdlg; + WORD index; + + switch (wmsg) { + case WM_INITDIALOG: + #ifdef _WIN64 + SetWindowLong(hdlg, DWLP_USER, lparam); + #else + SetWindowLong(hdlg, DWL_USER, lparam); + #endif + setupdlg = (SETUPDLG *) lparam; + + SetDlgItemText(hdlg, IDC_DSNAME, setupdlg->attr[KEY_DSN].attr); + SendDlgItemMessage(hdlg, IDC_DSNAME, EM_LIMITTEXT, + (WPARAM) (MAXDSNAME - 1), (LPARAM) 0); + + SetDlgItemText(hdlg, IDC_DBNAME, setupdlg->attr[KEY_DBNAME].attr); + SendDlgItemMessage(hdlg, IDC_DBNAME, EM_LIMITTEXT, + (WPARAM) (MAXDBNAME - 1), (LPARAM) 0); + + SetDlgItemText(hdlg, IDC_HOSTNAME, setupdlg->attr[KEY_HOST].attr); + SendDlgItemMessage(hdlg, IDC_HOSTNAME, EM_LIMITTEXT, + (WPARAM) (MAXHOSTNAME - 1), (LPARAM) 0); + + SetDlgItemText(hdlg, IDC_PORTNAME, setupdlg->attr[KEY_PORT].attr); + SendDlgItemMessage(hdlg, IDC_PORTNAME, EM_LIMITTEXT, + (WPARAM) (MAXPORTNAME - 1), (LPARAM) 0); + +#ifdef NEVER + SetDlgItemText(hdlg, IDC_FRAMEDNAME, setupdlg->attr[KEY_FRAMED].attr); + SendDlgItemMessage(hdlg, IDC_FRAMEDNAME, EM_LIMITTEXT, + (WPARAM) (MAXFRAMEDNAME - 1), (LPARAM) 0); + + SetDlgItemText(hdlg, IDC_TRACEFILENAME, setupdlg->attr[KEY_TRACEFILE].attr); + SendDlgItemMessage(hdlg, IDC_TRACEFILENAME, EM_LIMITTEXT, + (WPARAM) (MAXTRACEFILENAME - 1), (LPARAM) 0); +#endif + if (setupdlg->defDSN) { + EnableWindow(GetDlgItem(hdlg, IDC_DSNAME), FALSE); + EnableWindow(GetDlgItem(hdlg, IDC_DSNAMETEXT), FALSE); + } + return TRUE; + case WM_COMMAND: + switch (GET_WM_COMMAND_ID(wparam, lparam)) { + case IDOK: + #ifdef _WIN64 + setupdlg = (SETUPDLG *) GetWindowLongPtr(hdlg, DWLP_USER); + #else + setupdlg = (SETUPDLG *) GetWindowLong(hdlg, DWL_USER); + #endif + GetDlgItemText(hdlg, IDC_DSNAME, + setupdlg->attr[KEY_DSN].attr, + sizeof (setupdlg->attr[KEY_DSN].attr)); + + GetDlgItemText(hdlg, IDC_DBNAME, + setupdlg->attr[KEY_DBNAME].attr, + sizeof (setupdlg->attr[KEY_DBNAME].attr)); + GetDlgItemText(hdlg, IDC_HOSTNAME, + setupdlg->attr[KEY_HOST].attr, + sizeof (setupdlg->attr[KEY_HOST].attr)); + GetDlgItemText(hdlg, IDC_PORTNAME, + setupdlg->attr[KEY_PORT].attr, + sizeof (setupdlg->attr[KEY_PORT].attr)); +#ifdef NEVER + GetDlgItemText(hdlg, IDC_FRAMEDNAME, + setupdlg->attr[KEY_FRAMED].attr, + sizeof (setupdlg->attr[KEY_FRAMED].attr)); + GetDlgItemText(hdlg, IDC_TRACEFILENAME, + setupdlg->attr[KEY_TRACEFILE].attr, + sizeof (setupdlg->attr[KEY_TRACEFILE].attr)); +#endif + /* FALL THROUGH */ + case IDCANCEL: + EndDialog(hdlg, GET_WM_COMMAND_ID(wparam, lparam) == IDOK); + return TRUE; + } + } + return FALSE; +} + +/** + * Internal connect using a driver connection string. + * @param dbc database connection handle + * @param hwnd parent window handle + * @param connIn driver connect input string + * @param connInLen length of driver connect input string or SQL_NTS + * @param connOut driver connect output string + * @param connOutMax length of driver connect output string + * @param connOutLen output length of driver connect output string + * @param drvcompl completion type + * @result ODBC error code + */ + +static SQLRETURN +drvdriverconnect(SQLHDBC dbc, SQLHWND hwnd, + SQLCHAR *connIn, SQLSMALLINT connInLen, + SQLCHAR *connOut, SQLSMALLINT connOutMax, + SQLSMALLINT *connOutLen, SQLUSMALLINT drvcompl) +{ + BOOL maybeprompt, prompt = FALSE; + DBC *d; + SETUPDLG *setupdlg; + SQLRETURN ret = SQL_SUCCESS; + char *dsn = NULL, *driver = NULL, *dbname = NULL; + hive_err_info hive_error; + + if (dbc == SQL_NULL_HDBC) { + return SQL_INVALID_HANDLE; + } + d = (DBC *) dbc; + if (d->hive_conn) { + setstatd(d, -1, "connection already established", "08002"); + return SQL_ERROR; + } + setupdlg = (SETUPDLG *) xmalloc(sizeof (SETUPDLG)); + if (setupdlg == NULL) { + return SQL_ERROR; + } + memset(setupdlg, 0, sizeof (SETUPDLG)); + maybeprompt = drvcompl == SQL_DRIVER_COMPLETE || + drvcompl == SQL_DRIVER_COMPLETE_REQUIRED; + if (connIn == NULL || !connInLen || + (connInLen == SQL_NTS && !connIn[0])) { + prompt = TRUE; + } else { + ParseAttributes((LPCSTR) connIn, setupdlg); + if (!setupdlg->attr[KEY_DSN].attr[0] && + drvcompl == SQL_DRIVER_COMPLETE_REQUIRED) { + strcpy(setupdlg->attr[KEY_DSN].attr, "DEFAULT"); + } + GetAttributes(setupdlg); + if (drvcompl == SQL_DRIVER_PROMPT || + (maybeprompt && + !setupdlg->attr[KEY_DBNAME].attr[0])) { + prompt = TRUE; + } + } +retry: + if (prompt) { + short dlgret; + + setupdlg->defDSN = setupdlg->attr[KEY_DRIVER].attr[0] != '\0'; + dlgret = DialogBoxParam(hModule, MAKEINTRESOURCE(DRIVERCONNECT), + hwnd, (DLGPROC) DriverConnectProc, + (LPARAM) setupdlg); + + if (!dlgret || dlgret == -1) { + xfree(setupdlg); + return SQL_NO_DATA; + } + } + dsn = setupdlg->attr[KEY_DSN].attr; + driver = setupdlg->attr[KEY_DRIVER].attr; + dbname = setupdlg->attr[KEY_DBNAME].attr; + if (connOut || connOutLen) { + char buf[2048]; + int len, count; + char dsn_0 = dsn ? dsn[0] : '\0'; + char drv_0 = driver ? driver[0] : '\0'; + + buf[0] = '\0'; + count = snprintf(buf, sizeof (buf), + "%s%s%s%s%s%sDatabase=%s;" + "Host=%s;Port=%s;" + "Framed=%s;Tracefile=%s;", + dsn_0 ? "DSN=" : "", + dsn_0 ? dsn : "", + dsn_0 ? ";" : "", + drv_0 ? "Driver=" : "", + drv_0 ? driver : "", + drv_0 ? ";" : "", + dbname ? dbname : "", + setupdlg->attr[KEY_HOST].attr, + setupdlg->attr[KEY_PORT].attr, + setupdlg->attr[KEY_FRAMED].attr, + setupdlg->attr[KEY_TRACEFILE].attr); + if (count < 0) { + buf[sizeof (buf) - 1] = '\0'; + } + len = min(connOutMax - 1, strlen(buf)); + if (connOut) { + strncpy((char *) connOut, buf, len); + connOut[len] = '\0'; + } + if (connOutLen) { + *connOutLen = len; + } + } + if (dsn[0]) { + char tracef[SQL_MAX_MESSAGE_LENGTH]; + + tracef[0] = '\0'; + SQLGetPrivateProfileString(setupdlg->attr[KEY_DSN].attr, + "TRACEFILE", "", tracef, + sizeof (tracef), ODBC_INI); + } + d->hive_conn = DBOpenConnection(DEFAULT_DATABASE, + setupdlg->attr[KEY_HOST].attr, + atoi(setupdlg->attr[KEY_PORT].attr), + HIVECLIENT_BUFFERED_SOCKET, + &hive_error, sizeof(hive_error.err_buf), d->login_timeout); + if (NULL == d->hive_conn) { + if (maybeprompt && !prompt) { + prompt = TRUE; + goto retry; + } else { + setstatd(d, -1, "Communication link failure", "08S01"); + return SQL_ERROR; + } + } + xfree(setupdlg); + return ret; +} + + #endif /* WITHOUT_DRIVERMGR */ +#endif /* (_WIN32 || _WIN64) */ + +#ifndef WINTERFACE +/** + * Connect using a driver connection string. + * @param dbc database connection handle + * @param hwnd parent window handle + * @param connIn driver connect input string + * @param connInLen length of driver connect input string or SQL_NTS + * @param connOut driver connect output string + * @param connOutMax length of driver connect output string + * @param connOutLen output length of driver connect output string + * @param drvcompl completion type + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLDriverConnect(SQLHDBC dbc, SQLHWND hwnd, + SQLCHAR *connIn, SQLSMALLINT connInLen, + SQLCHAR *connOut, SQLSMALLINT connOutMax, + SQLSMALLINT *connOutLen, SQLUSMALLINT drvcompl) +{ + SQLRETURN ret; + + HDBC_LOCK(dbc); + ret = drvdriverconnect(dbc, hwnd, connIn, connInLen, + connOut, connOutMax, connOutLen, drvcompl); + HDBC_UNLOCK(dbc); + return ret; +} +#endif + +#ifdef WINTERFACE +/** + * Connect using a driver connection string (UNICODE version). + * @param dbc database connection handle + * @param hwnd parent window handle + * @param connIn driver connect input string + * @param connInLen length of driver connect input string or SQL_NTS + * @param connOut driver connect output string + * @param connOutMax length of driver connect output string + * @param connOutLen output length of driver connect output string + * @param drvcompl completion type + * @result ODBC error code + */ + +SQLRETURN SQL_API +SQLDriverConnectW(SQLHDBC dbc, SQLHWND hwnd, + SQLWCHAR *connIn, SQLSMALLINT connInLen, + SQLWCHAR *connOut, SQLSMALLINT connOutMax, + SQLSMALLINT *connOutLen, SQLUSMALLINT drvcompl) +{ + SQLRETURN ret; + char *ci = NULL; + SQLSMALLINT len = 0; + + HDBC_LOCK(dbc); + if (connIn) { + #if defined(_WIN32) || defined(_WIN64) + ci = uc_to_wmb(connIn, connInLen); + #else + ci = uc_to_utf(connIn, connInLen); + #endif + if (!ci) { + DBC *d = (DBC *) dbc; + + setstatd(d, -1, "out of memory", (*d->ov3) ? "HY000" : "S1000"); + HDBC_UNLOCK(dbc); + return SQL_ERROR; + } + } + ret = drvdriverconnect(dbc, hwnd, (SQLCHAR *) ci, SQL_NTS, + (SQLCHAR *) connOut, connOutMax, &len, drvcompl); + HDBC_UNLOCK(dbc); + uc_free(ci); + if (ret == SQL_SUCCESS) { + SQLWCHAR *co = NULL; + + if (connOut) { + if (len > 0) { + #if defined(_WIN32) || defined(_WIN64) + co = wmb_to_uc((char *) connOut, len); + #else + co = uc_from_utf((SQLCHAR *) connOut, len); + #endif + if (co) { + uc_strncpy(connOut, co, connOutMax); + co[len] = 0; + len = min(connOutMax, uc_strlen(co)); + uc_free(co); + } else { + len = 0; + } + } + if (len <= 0) { + len = 0; + connOut[0] = 0; + } + } else { + len = 0; + } + if (connOutLen) { + *connOutLen = len; + } + } + return ret; +} +#endif + +#if defined(_WIN32) || defined(_WIN64) + +/** + * DLL initializer for WIN32. + * @param hinst instance handle + * @param reason reason code for entry point + * @param reserved + * @result always true + */ + +BOOL APIENTRY +LibMain(HANDLE hinst, DWORD reason, LPVOID reserved) +{ + static int initialized = 0; + + switch (reason) { + case DLL_PROCESS_ATTACH: + if (!initialized++) { + hModule = hinst; + #ifdef WINTERFACE + /* MS Access hack part 1 (reserved error -7748) */ + statSpec2P = statSpec2; + statSpec3P = statSpec3; + #endif + } + #if defined(ENABLE_NVFS) && ENABLE_NVFS + nvfs_init(); + #endif + break; + case DLL_THREAD_ATTACH: + break; + case DLL_PROCESS_DETACH: + --initialized; + break; + case DLL_THREAD_DETACH: + break; + default: + break; + } + return TRUE; +} + +/** + * DLL entry point for WIN32. + * @param hinst instance handle + * @param reason reason code for entry point + * @param reserved + * @result always true + */ + +int __stdcall +DllMain(HANDLE hinst, DWORD reason, LPVOID reserved) +{ + return LibMain(hinst, reason, reserved); +} + + #ifndef WITHOUT_INSTALLER + +/** + * Handler for driver installer/uninstaller error messages. + * @param name name of API function for which to show error messages + * @result true when error message retrieved + */ + +static BOOL +InUnError(char *name) +{ + WORD err = 1; + DWORD code; + char errmsg[301]; + WORD errlen, errmax = sizeof (errmsg) - 1; + int sqlret; + BOOL ret = FALSE; + + do { + errmsg[0] = '\0'; + sqlret = SQLInstallerError(err, &code, errmsg, errmax, &errlen); + if (SQL_SUCCEEDED(sqlret)) { + MessageBox(NULL, errmsg, name, + MB_ICONSTOP|MB_OK|MB_TASKMODAL|MB_SETFOREGROUND); + ret = TRUE; + } + err++; + } while (sqlret != SQL_NO_DATA); + return ret; +} + +/** + * Built in driver installer/uninstaller. + * @param remove true for uninstall + * @param cmdline command line string of rundll32 + */ + +static BOOL +InUn(int remove, char *cmdline) +{ + static char *drivername = "Apache Hive ODBC Driver"; + static char *dsname = "Apache Hive Datasource"; + char *dllname, *p; + char dllbuf[301], path[301], driver[300], attr[300], inst[400]; + WORD pathmax = sizeof (path) - 1, pathlen; + DWORD usecnt, mincnt; + int quiet = 0; + + dllbuf[0] = '\0'; + GetModuleFileName(hModule, dllbuf, sizeof (dllbuf)); + p = strrchr(dllbuf, '\\'); + dllname = p ? (p + 1) : dllbuf; + quiet = cmdline && strstr(cmdline, "quiet"); + if (SQLInstallDriverManager(path, pathmax, &pathlen)) { + sprintf(driver, "%s;Driver=%s;Setup=%s;", + drivername, dllname, dllname); + p = driver; + while (*p) { + if (*p == ';') { + *p = '\0'; + } + ++p; + } + usecnt = 0; + path[0] = '\0'; + SQLInstallDriverEx(driver, NULL, path, pathmax, NULL, + ODBC_INSTALL_INQUIRY, &usecnt); + pathlen = strlen(path); + while (pathlen > 0 && path[pathlen - 1] == '\\') { + --pathlen; + path[pathlen] = '\0'; + } + sprintf(driver, "%s;Driver=%s\\%s;Setup=%s\\%s;", + drivername, path, dllname, path, dllname); + p = driver; + while (*p) { + if (*p == ';') { + *p = '\0'; + } + ++p; + } + sprintf(inst, "%s\\%s", path, dllname); + if (!remove && usecnt > 0) { + /* first install try: copy over driver dll, keeping DSNs */ + if (GetFileAttributes(dllbuf) != 0xFFFFFFFF && + CopyFile(dllbuf, inst, 0)) { + if (!quiet) { + char buf[512]; + + sprintf(buf, "%s replaced.", drivername); + MessageBox(NULL, buf, "Info", + MB_ICONINFORMATION|MB_OK|MB_TASKMODAL| + MB_SETFOREGROUND); + } + return TRUE; + } + } + mincnt = remove ? 1 : 0; + while (usecnt != mincnt) { + if (!SQLRemoveDriver(driver, TRUE, &usecnt)) { + break; + } + } + if (remove) { + if (usecnt && !SQLRemoveDriver(driver, TRUE, &usecnt)) { + InUnError("SQLRemoveDriver"); + return FALSE; + } + if (!usecnt) { + char buf[512]; + + DeleteFile(inst); + if (!quiet) { + sprintf(buf, "%s uninstalled.", drivername); + MessageBox(NULL, buf, "Info", + MB_ICONINFORMATION|MB_OK|MB_TASKMODAL| + MB_SETFOREGROUND); + } + } + sprintf(attr, "DSN=%s;Database=default;", dsname); + p = attr; + while (*p) { + if (*p == ';') { + *p = '\0'; + } + ++p; + } + SQLConfigDataSource(NULL, ODBC_REMOVE_SYS_DSN, drivername, attr); + return TRUE; + } + if (GetFileAttributes(dllbuf) == 0xFFFFFFFF) { + return FALSE; + } + if (strcmp(dllbuf, inst) != 0 && !CopyFile(dllbuf, inst, 0)) { + char buf[512]; + + sprintf(buf, "Copy %s to %s failed.", dllbuf, inst); + MessageBox(NULL, buf, "CopyFile", + MB_ICONSTOP|MB_OK|MB_TASKMODAL|MB_SETFOREGROUND); + return FALSE; + } + if (!SQLInstallDriverEx(driver, path, path, pathmax, &pathlen, + ODBC_INSTALL_COMPLETE, &usecnt)) { + InUnError("SQLInstallDriverEx"); + return FALSE; + } + sprintf(attr, "DSN=%s;Database=sqlite.db;", dsname); + p = attr; + while (*p) { + if (*p == ';') { + *p = '\0'; + } + ++p; + } + SQLConfigDataSource(NULL, ODBC_REMOVE_SYS_DSN, drivername, attr); + if (!SQLConfigDataSource(NULL, ODBC_ADD_SYS_DSN, drivername, attr)) { + InUnError("SQLConfigDataSource"); + return FALSE; + } + if (!quiet) { + char buf[512]; + + sprintf(buf, "%s installed.", drivername); + MessageBox(NULL, buf, "Info", + MB_ICONINFORMATION|MB_OK|MB_TASKMODAL| + MB_SETFOREGROUND); + } + } else { + InUnError("SQLInstallDriverManager"); + return FALSE; + } + return TRUE; +} + +/** + * RunDLL32 entry point for driver installation. + * @param hwnd window handle of caller + * @param hinst of this DLL + * @param lpszCmdLine rundll32 command line tail + * @param nCmdShow ignored + */ + +void CALLBACK +install(HWND hwnd, HINSTANCE hinst, LPSTR lpszCmdLine, int nCmdShow) +{ + InUn(0, lpszCmdLine); +} + +/** + * RunDLL32 entry point for driver uninstallation. + * @param hwnd window handle of caller + * @param hinst of this DLL + * @param lpszCmdLine rundll32 command line tail + * @param nCmdShow ignored + */ + +void CALLBACK +uninstall(HWND hwnd, HINSTANCE hinst, LPSTR lpszCmdLine, int nCmdShow) +{ + InUn(1, lpszCmdLine); +} + + #endif /* WITHOUT_INSTALLER */ +#endif /* _WIN32 || _WIN64 */ + +#if defined(HAVE_ODBCINSTEXT_H) && HAVE_ODBCINSTEXT_H + +/* + * unixODBC property page for this driver, + * may or may not work depending on unixODBC version. + */ + +#include + +int +ODBCINSTGetProperties(HODBCINSTPROPERTY prop) +{ + static const char *instYN[] = { "No", "Yes", NULL }; + static const char *syncPragma[] = { "NORMAL", "OFF", "FULL", NULL }; + + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + prop = prop->pNext; + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_FILENAME; + strncpy(prop->szName, "Database", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "", INI_MAX_PROPERTY_VALUE); + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + prop = prop->pNext; + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_TEXTEDIT; + strncpy(prop->szName, "Timeout", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "100000", INI_MAX_PROPERTY_VALUE); + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + prop = prop->pNext; + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_COMBOBOX; + prop->aPromptData = malloc(sizeof (instYN)); + memcpy(prop->aPromptData, instYN, sizeof (instYN)); + strncpy(prop->szName, "StepAPI", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "No", INI_MAX_PROPERTY_VALUE); + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + prop = prop->pNext; + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_COMBOBOX; + prop->aPromptData = malloc(sizeof (instYN)); + memcpy(prop->aPromptData, instYN, sizeof (instYN)); + strncpy(prop->szName, "ShortNames", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "No", INI_MAX_PROPERTY_VALUE); + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + prop = prop->pNext; + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_COMBOBOX; + prop->aPromptData = malloc(sizeof (instYN)); + memcpy(prop->aPromptData, instYN, sizeof (instYN)); + strncpy(prop->szName, "LongNames", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "No", INI_MAX_PROPERTY_VALUE); + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_COMBOBOX; + prop->aPromptData = malloc(sizeof (instYN)); + memcpy(prop->aPromptData, instYN, sizeof (instYN)); + strncpy(prop->szName, "NoCreat", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "No", INI_MAX_PROPERTY_VALUE); + #ifdef WINTERFACE + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_COMBOBOX; + prop->aPromptData = malloc(sizeof (instYN)); + memcpy(prop->aPromptData, instYN, sizeof (instYN)); + strncpy(prop->szName, "NoWCHAR", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "No", INI_MAX_PROPERTY_VALUE); + #endif + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_COMBOBOX; + prop->aPromptData = malloc(sizeof (instYN)); + memcpy(prop->aPromptData, instYN, sizeof (instYN)); + strncpy(prop->szName, "FKSupport", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "No", INI_MAX_PROPERTY_VALUE); + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + prop = prop->pNext; + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_COMBOBOX; + prop->aPromptData = malloc(sizeof (syncPragma)); + memcpy(prop->aPromptData, syncPragma, sizeof (syncPragma)); + strncpy(prop->szName, "SyncPragma", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "NORMAL", INI_MAX_PROPERTY_VALUE); + prop->pNext = (HODBCINSTPROPERTY) malloc(sizeof (ODBCINSTPROPERTY)); + prop = prop->pNext; + memset(prop, 0, sizeof (ODBCINSTPROPERTY)); + prop->nPromptType = ODBCINST_PROMPTTYPE_TEXTEDIT; + strncpy(prop->szName, "LoadExt", INI_MAX_PROPERTY_NAME); + strncpy(prop->szValue, "", INI_MAX_PROPERTY_VALUE); + return 1; +} + +#endif /* HAVE_ODBCINSTEXT_H */ diff --git odbc/src/driver/hiveodbc.h odbc/src/driver/hiveodbc.h new file mode 100644 index 0000000..f850dd4 --- /dev/null +++ odbc/src/driver/hiveodbc.h @@ -0,0 +1,313 @@ +/**************************************************************************** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ****************************************************************************/ + +/* + Original sqliteodbc license: + + This software is copyrighted by Christian Werner + and other authors. The following terms apply to all files associated + with the software unless explicitly disclaimed in individual files. + + The authors hereby grant permission to use, copy, modify, distribute, + and license this software and its documentation for any purpose, provided + that existing copyright notices are retained in all copies and that this + notice is included verbatim in any distributions. No written agreement, + license, or royalty fee is required for any of the authorized uses. + Modifications to this software may be copyrighted by their authors + and need not follow the licensing terms described here, provided that + the new terms are clearly indicated on the first page of each file where + they apply. + + IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY + FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY + DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE + IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE + NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR + MODIFICATIONS. +*/ + +#ifndef _HIVEODBC_H +#define _HIVEODBC_H + +/** + * @mainpage + * @section readme README + * @verbinclude README + * @section changelog ChangeLog + * @verbinclude ChangeLog + * @section copying License Terms + * @verbinclude license.terms + */ + + +#if defined(_WIN32) || defined(_WIN64) +#include +#include +#include +#else +#include +#include +#include +#include +#endif +#include +#if defined(HAVE_LOCALECONV) || defined(_WIN32) || defined(_WIN64) +#include +#endif +#include +#include +#include +#include +#include +#include + +#include "hiveclient.h" +#ifdef HAVE_IODBC +#include +#endif +#if defined(HAVE_UNIXODBC) || defined(_WIN32) || defined(_WIN64) +#include +#endif + +#ifndef SQL_API +#define SQL_API +#endif + +#ifndef HAVE_SQLLEN +#define SQLLEN SQLINTEGER +#endif + +#define SQLLEN_PTR SQLLEN * + +#ifndef HAVE_SQLULEN +#define SQLULEN SQLUINTEGER +#endif + +#ifndef HAVE_SQLROWCOUNT +#define SQLROWCOUNT SQLUINTEGER +#endif + +#ifndef HAVE_SQLSETPOSIROW +#define SQLSETPOSIROW SQLUSMALLINT +#endif + +#ifndef HAVE_SQLROWOFFSET +#define SQLROWOFFSET SQLLEN +#endif + +#ifndef HAVE_SQLROWSETSIZE +#define SQLROWSETSIZE SQLULEN +#endif + +static const char *DEFAULT_DRIVER_NAME = "hiveodbc.so"; +static const char *DEFAULT_DSN = "Hive"; +static const char *DEFAULT_TRACEFILE = "/tmp/hiveodbc.trc"; + +/* TODO: move this into the ODBC config or calculate dynamically */ +#define MAX_BUFFERED_RESULT_ROWS 8192 + +/* Currently the Hive server doesn't return the version number. Keep it at the same as the current Hive release */ +#define HIVE_VERSION "0.10.0" + +struct dbc; +struct stmt; + +/** + * @typedef ENV + * @struct ENV + * Driver internal structure for environment (HENV). + */ + +typedef struct { + int magic; /**< Magic cookie */ + int ov3; /**< True for SQL_OV_ODBC3 */ +#if defined(_WIN32) || defined(_WIN64) + CRITICAL_SECTION cs; /**< For serializing most APIs */ + DWORD owner; /**< Current owner of CS or 0 */ +#endif + struct dbc *dbcs; /**< Pointer to first DBC */ +} ENV; + + +/** + * @typedef DBC + * @struct dbc + * Driver internal structure for database connection (HDBC). + */ + +typedef struct dbc { + int magic; /**< Magic cookie */ + ENV *env; /**< Pointer to environment */ + struct dbc *next; /**< Pointer to next DBC */ + HiveConnection* hive_conn; /**< Hive Server Connection Handle */ + int version; /**< SQLITE version number */ + char *dbname; /**< SQLITE database name */ + char *dsn; /**< ODBC data source name */ + char *dsn_host; /**< host name in the DSN */ + char *dsn_port; /**< port number in the DSN */ + int *ov3; /**< True for SQL_OV_ODBC3 */ + int ov3val; /**< True for SQL_OV_ODBC3 */ + int autocommit; /**< Auto commit state */ + struct stmt *stmt; /**< STMT list of this DBC */ + struct stmt *cur_stmt; /**< Current STMT executing Hive statement */ + int naterr; /**< Native error code */ + char sqlstate[6]; /**< SQL state for SQLError() */ + SQLCHAR logmsg[1024]; /**< Message for SQLError() */ + int nowchar; /**< Don't try to use WCHAR */ + int shortnames; /**< Always use short column names */ + int longnames; /**< Don't shorten column names */ + int nocreat; /**< Don't auto create database file */ + int fksupport; /**< Foreign keys on or off */ + int curtype; /**< Default cursor type */ + int step_enable; /**< True for sqlite_compile/step/finalize */ + int trans_disable; /**< True for no transaction support */ + FILE *trace; /**< sqlite3_trace() file pointer or NULL */ +#ifdef USE_DLOPEN_FOR_GPPS + void *instlib; + int (*gpps)(); +#endif +#if defined(_WIN32) || defined(_WIN64) + int xcelqrx; +#endif + SQLUINTEGER login_timeout; +} DBC; + +/** + * @typedef COL + * @struct COL + * Internal structure to describe a column in a result set. + */ + +typedef struct { + char *db; /**< Database name */ + char *table; /**< Table name */ + char *column; /**< Column name */ + int type; /**< Data type of column */ + int size; /**< Size of column */ + int index; /**< Index of column in result */ + int nosign; /**< Unsigned type */ + int scale; /**< Scale of column */ + int prec; /**< Precision of column */ + int autoinc; /**< AUTO_INCREMENT column */ + int notnull; /**< NOT NULL constraint on column */ + char *typename; /**< Column type name or NULL */ + char *label; /**< Column label or NULL */ +} COL; + +/** + * @typedef BINDCOL + * @struct BINDCOL + * Internal structure for bound column (SQLBindCol). + */ + +typedef struct { + SQLSMALLINT type; /**< ODBC type */ + SQLINTEGER max; /**< Max. size of value buffer */ + SQLLEN *lenp; /**< Value return, actual size of value buffer */ + SQLPOINTER valp; /**< Value buffer */ + int index; /**< Index of column in result */ + int offs; /**< Byte offset for SQLGetData() */ +} BINDCOL; + +/** + * @typedef BINDPARM + * @struct BINDPARM + * Internal structure for bound parameter (SQLBindParameter). + */ + +typedef struct { + int type, stype; /**< ODBC and SQL types */ + int coldef, scale; /**< from SQLBindParameter() */ + SQLLEN max; /**< Max. size size of parameter buffer */ + SQLLEN *lenp; /**< Actual size of parameter buffer */ + SQLLEN *lenp0; /**< Actual size of parameter buffer, initial value */ + void *param; /**< Parameter buffer */ + void *param0; /**< Parameter buffer, initial value */ + int inc; /**< Increment for paramset size > 1 */ + int need; /**< True when SQL_LEN_DATA_AT_EXEC */ + int bound; /**< True when SQLBindParameter() called */ + int offs, len; /**< Offset/length for SQLParamData()/SQLPutData() */ + void *parbuf; /**< Buffer for SQL_LEN_DATA_AT_EXEC etc. */ + char strbuf[64]; /**< String buffer for scalar data */ +} BINDPARM; + +/** + * @typedef STMT + * @struct stmt + * Driver internal structure representing SQL statement (HSTMT). + */ + +typedef struct stmt { + struct stmt *next; /**< Linkage for STMT list in DBC */ + HDBC dbc; /**< Pointer to DBC */ + SQLCHAR cursorname[32]; /**< Cursor name */ + SQLCHAR *query; /**< Current query, raw string */ + int *ov3; /**< True for SQL_OV_ODBC3 */ + int isselect; /**< > 0 if query is a SELECT statement */ + int ncols; /**< Number of result columns */ + COL *cols; /**< Result column array */ + COL *dyncols; /**< Column array, but malloc()ed */ + int dcols; /**< Number of entries in dyncols */ + int bkmrk; /**< True when bookmarks used */ + BINDCOL bkmrkcol; /**< Bookmark bound column */ + BINDCOL *bindcols; /**< Array of bound columns */ + int nbindcols; /**< Number of entries in bindcols */ + int nbindparms; /**< Number bound parameters */ + BINDPARM *bindparms; /**< Array of bound parameters */ + int nparams; /**< Number of parameters in query */ + int nrows; /**< Number of result rows */ + int rowp; /**< Current result row */ + char **rows; /**< 2-dim array, result set */ + void (*rowfree)(); /**< Free function for rows */ + int naterr; /**< Native error code */ + char sqlstate[6]; /**< SQL state for SQLError() */ + SQLCHAR logmsg[1024]; /**< Message for SQLError() */ + int nowchar[2]; /**< Don't try to use WCHAR */ + int longnames; /**< Don't shorten column names */ + int retr_data; /**< SQL_ATTR_RETRIEVE_DATA */ + SQLUINTEGER rowset_size; /**< Size of rowset */ + SQLUSMALLINT *row_status; /**< Row status pointer */ + SQLUSMALLINT *row_status0; /**< Internal status array */ + SQLUSMALLINT row_status1; /**< Internal status array for 1 row rowsets */ + SQLUINTEGER *row_count; /**< Row count pointer */ + SQLUINTEGER row_count0; /**< Row count */ + SQLUINTEGER paramset_size; /**< SQL_ATTR_PARAMSET_SIZE */ + SQLUINTEGER paramset_count; /**< Internal for paramset */ + SQLUINTEGER paramset_nrows; /**< Row count for paramset handling */ + SQLUINTEGER max_rows; /**< SQL_ATTR_MAX_ROWS */ + SQLUINTEGER bind_type; /**< SQL_ATTR_ROW_BIND_TYPE */ + SQLUINTEGER *bind_offs; /**< SQL_ATTR_ROW_BIND_OFFSET_PTR */ + /* Dummies to make ADO happy */ + SQLUINTEGER *parm_bind_offs;/**< SQL_ATTR_PARAM_BIND_OFFSET_PTR */ + SQLUSMALLINT *parm_oper; /**< SQL_ATTR_PARAM_OPERATION_PTR */ + SQLUSMALLINT *parm_status; /**< SQL_ATTR_PARAMS_STATUS_PTR */ + SQLUINTEGER *parm_proc; /**< SQL_ATTR_PARAMS_PROCESSED_PTR */ + SQLUINTEGER parm_bind_type; /**< SQL_ATTR_PARAM_BIND_TYPE */ + int curtype; /**< Cursor type */ + SQLUINTEGER query_timeout; /**< SQL_ATTR_QUERY_TIMEOUT */ + HiveResultSet *hive_resultset; /**< Handle to the resultset generated by a query execution */ +} STMT; + +#endif /* _HIVEODBC_H */ diff --git odbc/src/driver/hiveodbc_win32_rc.h odbc/src/driver/hiveodbc_win32_rc.h new file mode 100644 index 0000000..3437a2d --- /dev/null +++ odbc/src/driver/hiveodbc_win32_rc.h @@ -0,0 +1,36 @@ +/**************************************************************************** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ****************************************************************************/ + +#ifndef IDC_STATIC +#define IDC_STATIC (-1) +#endif + +#define DRIVERCONNECT 104 +#define CONFIGDSN 105 +#define IDC_DSNAME 400 +#define IDC_DSNAMETEXT 401 +#define IDS_MSGTITLE 40000 +#define IDS_BADDSN 40001 +#define IDS_EXTTITLE 40002 +#define IDS_EXTERR 40003 +#define IDC_HOSTNAMETEXT 40004 +#define IDC_PORTNAMETEXT 40005 +#define IDC_HOSTNAME 40009 +#define IDC_PORTNAME 40013 +#define IDC_DBNAME 40015 diff --git odbc/src/driver/hiveodbc_win32_rc.rc odbc/src/driver/hiveodbc_win32_rc.rc new file mode 100644 index 0000000..2b1057b --- /dev/null +++ odbc/src/driver/hiveodbc_win32_rc.rc @@ -0,0 +1,131 @@ +//##################################################################### +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +//##################################################################### +// Generated by ResEdit 1.5.7 +// Copyright (C) 2006-2010 +// http://www.resedit.net + +#include +#include +#include +#include "hiveodbc_win32_rc.h" + + + +// +// Dialog resources +// +LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL +CONFIGDSN DIALOG 65, 43, 277, 127 +STYLE DS_CENTER | DS_MODALFRAME | DS_SETFONT | WS_CAPTION | WS_VISIBLE | WS_POPUP | WS_SYSMENU +CAPTION "Hive ODBC DSN Configuration" +FONT 8, "MS Sans Serif" +{ + EDITTEXT IDC_DSNAME, 82, 20, 140, 12, WS_GROUP | ES_AUTOHSCROLL + EDITTEXT IDC_HOSTNAME, 82, 37, 139, 14, ES_AUTOHSCROLL + DEFPUSHBUTTON "OK", IDOK, 178, 102, 40, 14 + PUSHBUTTON "Cancel", IDCANCEL, 225, 102, 40, 14 + CTEXT "Enter options for connect", 1003, 95, 6, 80, 8, SS_CENTER + RTEXT "Data Source Name:", IDC_DSNAMETEXT, 6, 22, 73, 9, NOT WS_GROUP | SS_RIGHT + LTEXT "Host:", IDC_HOSTNAMETEXT, 61, 40, 18, 8, SS_LEFT + EDITTEXT IDC_PORTNAME, 81, 55, 139, 14, ES_AUTOHSCROLL + LTEXT "Port:", IDC_STATIC, 63, 58, 16, 8, SS_LEFT + EDITTEXT IDC_DBNAME, 81, 74, 139, 14, ES_AUTOHSCROLL + LTEXT "Database:", IDC_STATIC, 45, 76, 34, 8, SS_LEFT +} + + + +LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL +DRIVERCONNECT DIALOG 65, 43, 277, 122 +STYLE DS_CENTER | DS_MODALFRAME | DS_SETFONT | WS_CAPTION | WS_VISIBLE | WS_POPUP | WS_SYSMENU +CAPTION "Hive ODBC Driver Connect" +FONT 8, "MS Sans Serif" +{ + EDITTEXT IDC_DSNAME, 82, 20, 140, 12, WS_GROUP | ES_AUTOHSCROLL + DEFPUSHBUTTON "OK", IDOK, 174, 98, 40, 14 + PUSHBUTTON "Cancel", IDCANCEL, 223, 98, 40, 14 + CTEXT "Enter options for connect", 1003, 95, 6, 80, 8, SS_CENTER + RTEXT "Data Source Name:", IDC_DSNAMETEXT, 6, 22, 73, 9, NOT WS_GROUP | SS_RIGHT + LTEXT "Host:", IDC_HOSTNAMETEXT, 61, 40, 18, 8, SS_LEFT + LTEXT "Port:", IDC_PORTNAMETEXT, 63, 57, 16, 8, SS_LEFT + EDITTEXT IDC_HOSTNAME, 82, 38, 140, 12, ES_AUTOHSCROLL + EDITTEXT IDC_PORTNAME, 82, 55, 140, 13, ES_AUTOHSCROLL + LTEXT "Database:", IDC_STATIC, 47, 74, 34, 8, SS_LEFT + EDITTEXT IDC_DBNAME, 82, 73, 139, 14, ES_AUTOHSCROLL +} + + + +// +// String Table resources +// +LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL +STRINGTABLE +{ + IDS_MSGTITLE "Hive ODBC Setup" + IDS_BADDSN "%s cannot be used as a data source name." + IDS_EXTTITLE "Hive ODBC Extension" + IDS_EXTERR "Extension '%s' did not load:\n%s" +} + + + +// +// Icon resources +// +LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL +ico1 ICON "hiveodbc_logo.ico" + + + +// +// Version Information resources +// +LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL +VS_VERSION_INFO VERSIONINFO + FILEVERSION 0,10,0,0 + PRODUCTVERSION 0,10,0,0 + FILEOS VOS__WINDOWS32 + FILETYPE VFT_DLL + FILESUBTYPE VFT2_UNKNOWN + FILEFLAGSMASK 0x00000003 + FILEFLAGS 0x00000000 +{ + BLOCK "StringFileInfo" + { + BLOCK "040904e4" + { + VALUE "CompanyName", "Apache\0" + VALUE "FileDescription", "Apache HIVE ODBC Driver\0" + VALUE "FileVersion", "0.10\0" + VALUE "InternalName", "HIVEODBC\0" + VALUE "LegalCopyright", "\0" + VALUE "LegalTrademarks", "\0" + VALUE "OriginalFilename", "LIBHIVEODBC-0.DLL\0" + VALUE "PrivateBuild", "\0" + VALUE "ProductName", "Apache ODBC Driver for Apache Hive\0" + VALUE "ProductVersion", "0.10\0" + VALUE "SpecialBuild", "\0" + } + } + BLOCK "VarFileInfo" + { + VALUE "Translation", 0x0409, 0x04E4 + } +} diff --git odbc/src/driver/libhiveodbc.def odbc/src/driver/libhiveodbc.def new file mode 100644 index 0000000..2ad3bc4 --- /dev/null +++ odbc/src/driver/libhiveodbc.def @@ -0,0 +1,100 @@ +:################################################################### +: Licensed to the Apache Software Foundation (ASF) under one +: or more contributor license agreements. See the NOTICE file +: distributed with this work for additional information +: regarding copyright ownership. The ASF licenses this file +: to you under the Apache License, Version 2.0 (the +: "License"); you may not use this file except in compliance +: with the License. You may obtain a copy of the License at +: +: http://www.apache.org/licenses/LICENSE-2.0 +: +: Unless required by applicable law or agreed to in writing, +: software distributed under the License is distributed on an +: "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +: KIND, either express or implied. See the License for the +: specific language governing permissions and limitations +: under the License. +:################################################################### +EXPORTS +ConfigDSN +LibMain +SQLAllocConnect +SQLAllocEnv +SQLAllocHandle +SQLAllocStmt +SQLBindCol +SQLBindParam +SQLBindParameter +SQLBrowseConnect +SQLBulkOperations +SQLCancel +SQLCloseCursor +SQLColAttribute +SQLColAttributes +SQLColumnPrivileges +SQLColumns +SQLConnect +SQLCopyDesc +SQLDataSources +SQLDescribeCol +SQLDescribeParam +SQLDisconnect +SQLDriverConnect +SQLDrivers +SQLEndTran +SQLError +SQLExecDirect +SQLExecute +SQLExtendedFetch +SQLFetch +SQLFetchScroll +SQLForeignKeys +SQLFreeConnect +SQLFreeEnv +SQLFreeHandle +SQLFreeStmt +SQLGetConnectAttr +SQLGetConnectOption +SQLGetCursorName +SQLGetData +SQLGetDescField +SQLGetDescRec +SQLGetDiagField +SQLGetDiagRec +SQLGetEnvAttr +SQLGetFunctions +SQLGetInfo +SQLGetStmtAttr +SQLGetStmtOption +SQLGetTypeInfo +SQLMoreResults +SQLNativeSql +SQLNumParams +SQLNumResultCols +SQLParamData +SQLParamOptions +SQLPrepare +SQLPrimaryKeys +SQLProcedureColumns +SQLProcedures +SQLPutData +SQLRowCount +SQLSetConnectAttr +SQLSetConnectOption +SQLSetCursorName +SQLSetDescField +SQLSetDescRec +SQLSetEnvAttr +SQLSetParam +SQLSetPos +SQLSetScrollOptions +SQLSetStmtAttr +SQLSetStmtOption +SQLSpecialColumns +SQLStatistics +SQLTablePrivileges +SQLTables +SQLTransact +install +uninstall diff --git odbc/src/test/Makefile.am odbc/src/test/Makefile.am new file mode 100644 index 0000000..5cb6ae2 --- /dev/null +++ odbc/src/test/Makefile.am @@ -0,0 +1,28 @@ +# +# Copyright 2005 The Apache Software Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +bin_PROGRAMS = hiveclienttest hiveodbctest + +AM_CXXFLAGS = -Wall +AM_CPPFLAGS = -I. -I../cpp $(ODBC_CPPFLAGS) $(APR_CPPFLAGS) $(APR_INCLUDES) + +hiveclienttest_SOURCES = hiveclienttest.c hivetest.h +hiveclienttest_LDADD = ../cpp/libhiveclient.la $(APR_LIB) $(THRIFT_LIBDIR)/libthrift.la + +hiveodbctest_SOURCES = hiveodbctest.c hivetest.h +hiveodbctest_LDADD = ../cpp/libhiveclient.la \ + ../driver/libhiveodbc.la \ + $(ODBC_LIB) $(APR_LIB) $(THRIFT_LIBDIR)/libthrift.la diff --git odbc/src/test/Makefile.in odbc/src/test/Makefile.in new file mode 100644 index 0000000..7317809 --- /dev/null +++ odbc/src/test/Makefile.in @@ -0,0 +1,562 @@ +# Makefile.in generated by automake 1.11.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, +# Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Copyright 2005 The Apache Software Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +bin_PROGRAMS = hiveclienttest$(EXEEXT) hiveodbctest$(EXEEXT) +subdir = src/test +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/ax_boost_base.m4 \ + $(top_srcdir)/m4/find_apr.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__installdirs = "$(DESTDIR)$(bindir)" +PROGRAMS = $(bin_PROGRAMS) +am_hiveclienttest_OBJECTS = hiveclienttest.$(OBJEXT) +hiveclienttest_OBJECTS = $(am_hiveclienttest_OBJECTS) +am__DEPENDENCIES_1 = +hiveclienttest_DEPENDENCIES = ../cpp/libhiveclient.la \ + $(am__DEPENDENCIES_1) $(THRIFT_LIBDIR)/libthrift.la +am_hiveodbctest_OBJECTS = hiveodbctest.$(OBJEXT) +hiveodbctest_OBJECTS = $(am_hiveodbctest_OBJECTS) +hiveodbctest_DEPENDENCIES = ../cpp/libhiveclient.la \ + ../driver/libhiveodbc.la $(am__DEPENDENCIES_1) \ + $(am__DEPENDENCIES_1) $(THRIFT_LIBDIR)/libthrift.la +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/config/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(hiveclienttest_SOURCES) $(hiveodbctest_SOURCES) +DIST_SOURCES = $(hiveclienttest_SOURCES) $(hiveodbctest_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +APR_CFLAGS = @APR_CFLAGS@ +APR_CPPFLAGS = @APR_CPPFLAGS@ +APR_INCLUDES = @APR_INCLUDES@ +APR_LDFLAGS = @APR_LDFLAGS@ +APR_LIB = @APR_LIB@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +ODBC_CPPFLAGS = @ODBC_CPPFLAGS@ +ODBC_LIB = @ODBC_LIB@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +THRIFT_COMPILER = @THRIFT_COMPILER@ +THRIFT_CPPFLAGS = @THRIFT_CPPFLAGS@ +THRIFT_INCLUDE = @THRIFT_INCLUDE@ +THRIFT_LDFLAGS = @THRIFT_LDFLAGS@ +THRIFT_LIBDIR = @THRIFT_LIBDIR@ +VERSION = @VERSION@ +VER_INFO = @VER_INFO@ +WINDRES = @WINDRES@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +lt_ECHO = @lt_ECHO@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AM_CXXFLAGS = -Wall +AM_CPPFLAGS = -I. -I../cpp $(ODBC_CPPFLAGS) $(APR_CPPFLAGS) $(APR_INCLUDES) +hiveclienttest_SOURCES = hiveclienttest.c hivetest.h +hiveclienttest_LDADD = ../cpp/libhiveclient.la $(APR_LIB) $(THRIFT_LIBDIR)/libthrift.la +hiveodbctest_SOURCES = hiveodbctest.c hivetest.h +hiveodbctest_LDADD = ../cpp/libhiveclient.la \ + ../driver/libhiveodbc.la \ + $(ODBC_LIB) $(APR_LIB) $(THRIFT_LIBDIR)/libthrift.la + +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/test/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/test/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) + test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + for p in $$list; do echo "$$p $$p"; done | \ + sed 's/$(EXEEXT)$$//' | \ + while read p p1; do if test -f $$p || test -f $$p1; \ + then echo "$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ + -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ + sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) files[d] = files[d] " " $$1; \ + else { print "f", $$3 "/" $$4, $$1; } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-binPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ + -e 's/$$/$(EXEEXT)/' `; \ + test -n "$$list" || exit 0; \ + echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(bindir)" && rm -f $$files + +clean-binPROGRAMS: + @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list +hiveclienttest$(EXEEXT): $(hiveclienttest_OBJECTS) $(hiveclienttest_DEPENDENCIES) + @rm -f hiveclienttest$(EXEEXT) + $(LINK) $(hiveclienttest_OBJECTS) $(hiveclienttest_LDADD) $(LIBS) +hiveodbctest$(EXEEXT): $(hiveodbctest_OBJECTS) $(hiveodbctest_DEPENDENCIES) + @rm -f hiveodbctest$(EXEEXT) + $(LINK) $(hiveodbctest_OBJECTS) $(hiveodbctest_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hiveclienttest.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hiveodbctest.Po@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: + for dir in "$(DESTDIR)$(bindir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-binPROGRAMS + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-binPROGRAMS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ + clean-generic clean-libtool ctags distclean distclean-compile \ + distclean-generic distclean-libtool distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-binPROGRAMS install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am \ + uninstall-binPROGRAMS + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git odbc/src/test/hiveclienttest.c odbc/src/test/hiveclienttest.c index fbb4e24..e921ae9 100644 --- odbc/src/test/hiveclienttest.c +++ odbc/src/test/hiveclienttest.c @@ -21,7 +21,13 @@ #include #include #include + +#include +#include +#include + #include "hiveclient.h" +#include "hivetest.h" /* INSTRUCTIONS: * This test suite should have been compiled when 'make' was executed from the @@ -48,68 +54,173 @@ #define BIGGEST_POS_FLOAT 3.4028235E+38 #define BIGGEST_POS_DOUBLE 1.797693134862315E+308 -// Convert a macro value to a string -#define STRINGIFY(x) XSTRINGIFY(x) -#define XSTRINGIFY(x) #x -// Path to test data (should be supplied at compile time) -#ifdef TEST_DATA_DIR -#define TEST_DATA_DIR_STR STRINGIFY(TEST_DATA_DIR) -#else -#define TEST_DATA_DIR_STR "/tmp/testdata" // Provide a default if not defined -#endif -/** - * Checks an error condition, and if true: - * 1. prints the error to stderr - * 2. returns the specified ret_val - */ -#define RETURN_ON_ASSERT_ONE_ARG(condition, err_format, arg, ret_val) { \ - if (condition) { \ - fprintf(stderr, "----LINE %i: ", __LINE__); \ - fprintf(stderr, err_format, arg); \ - return ret_val; \ - } \ -} +static const char *database; +static const char *host; +int port; +int framed; +static const char *datadir; +static int row_count; -/** - * Checks an error condition, and if true: - * 1. prints the error to stderr - * 2. closes the DB connection on db_conn - * 3. returns the specified ret_val - */ -#define RETURN_ON_ASSERT_NO_ARG_CLOSE(condition, err_format, db_conn, ret_val) { \ - if (condition) { \ - char error_buffer_[MAX_HIVE_ERR_MSG_LEN]; \ - fprintf(stderr, "----LINE %i: ", __LINE__); \ - fprintf(stderr, err_format); \ - DBCloseConnection(db_conn, error_buffer_, sizeof(error_buffer_)); \ - return ret_val; \ - } \ -} -#define RETURN_ON_ASSERT_ONE_ARG_CLOSE(condition, err_format, arg, db_conn, ret_val) { \ - if (condition) { \ - char error_buffer_[MAX_HIVE_ERR_MSG_LEN]; \ - fprintf(stderr, "----LINE %i: ", __LINE__); \ - fprintf(stderr, err_format, arg); \ - DBCloseConnection(db_conn, error_buffer_, sizeof(error_buffer_)); \ - return ret_val; \ - } \ -} -#define RETURN_ON_ASSERT_TWO_ARG_CLOSE(condition, err_format, arg1, arg2, db_conn, ret_val) { \ - if (condition) { \ - char error_buffer_[MAX_HIVE_ERR_MSG_LEN]; \ - fprintf(stderr, "----LINE %i: ", __LINE__); \ - fprintf(stderr, err_format, arg1, arg2); \ - DBCloseConnection(db_conn, error_buffer_, sizeof(error_buffer_)); \ - return ret_val; \ - } \ +/* Default memory pool for APR lib */ +apr_pool_t *mp; + +#define HELP_SHORT_OPT 255+1 + +/* getopt command line options */ +static const apr_getopt_option_t opt_option[] = { + /* long-option, short-option, has-arg flag, description */ + { "database", 'd', TRUE, "database name" }, + { "host", 'h', TRUE, "host name" }, + { "port", 'p', TRUE, "port number" }, + { "framed", 'f', TRUE, "framed" }, + { "test-data-dir", 't', TRUE, "test data directory" }, + { "help", HELP_SHORT_OPT, FALSE, "display this help and exit" }, + { NULL, 0, 0, NULL }, /* end (a.k.a. sentinel) */ +}; + +void process_args(int argc, const char *argv[]); +void print_usage(const apr_getopt_option_t *opts); + +/************************************************************************************************** + * MAIN FUNCTION + **************************************************************************************************/ + +int main(int argc, const char *argv[]) { + int failed = 0; + + /* apr_app_initialize(&argc, &argv, NULL); */ + apr_initialize(); + apr_pool_create(&mp, NULL); + + process_args(argc, argv); + + fprintf(stderr, "\nStarting Hive Client Library tests...\n\n"); + + RUN_TEST(basic_connect_disconnect_test); + RUN_TEST(basic_query_exec_test); + RUN_TEST(basic_fetch_test); + RUN_TEST(show_tables_test); + RUN_TEST(query_fetch_test); + RUN_TEST(numeric_range_test); + RUN_TEST(field_multifetch_test); + RUN_TEST(meta_data_function_test); + + /* cleanup */ + apr_pool_destroy(mp); + apr_terminate(); + + if (failed == 0) { + fprintf(stderr, "\nALL HIVE CLIENT TESTS PASSED!\n\n"); + return 0; + } else { + fprintf(stderr, "\nHIVE CLIENT TEST FAILURE: %i test(s) failed.\n\n", failed); + return 1; + } } + + /************************************************************************************************** * HELPER FUNCTIONS **************************************************************************************************/ +void process_args(int argc, const char *argv[]) { + apr_status_t rv; + apr_getopt_t *opt; + int optch; + const char *optarg; + + int database_set = FALSE; + int host_set = FALSE; + int port_set = FALSE; + int framed_set = FALSE; + int datadir_set = FALSE; + + /* initialize apr_getopt_t */ + apr_getopt_init(&opt, mp, argc, argv); + + while ((rv = apr_getopt_long(opt, opt_option, &optch, &optarg)) != APR_EOF) { + switch (rv) { + case APR_BADCH: + /* Found a bad option character */ + print_usage(opt_option); + exit(EXIT_FAILURE); + break; + case APR_BADARG: + /* No argument followed the option flag */ + print_usage(opt_option); + exit(EXIT_FAILURE); + break; + case APR_SUCCESS: + /* The next option was found */ + switch (optch) { + case 'd': /* --database */ + database = apr_pstrndup(mp, optarg, strlen(optarg)); + database_set = TRUE; + break; + case 'h': /* --host */ + host = apr_pstrndup(mp, optarg, strlen(optarg)); + host_set = TRUE; + break; + case 'p': /* --port */ + port = atoi(optarg); + port_set = TRUE; + break; + case 'f': /* --framed */ + framed = atoi(optarg); + framed_set = TRUE; + break; + case 't': /* --test-data-dir */ + datadir = apr_pstrndup(mp, optarg, strlen(optarg)); + datadir_set = TRUE; + break; + case HELP_SHORT_OPT: + print_usage(opt_option); + exit(EXIT_SUCCESS); + } + break; + } + } + + if (!database_set) { + database = DEFAULT_DATABASE; + } + if (!host_set) { + host = DEFAULT_HOST; + } + if (!port_set) { + port = atoi(DEFAULT_PORT); + } + if (!framed_set) { + framed = atoi(DEFAULT_FRAMED); + } + if (!datadir_set) { + datadir = apr_pstrndup(mp, TEST_DATA_DIR_STR, strlen(TEST_DATA_DIR_STR)); + } + + printf("Running Hive Client Tests with the following configuration:\n\n"); + printf("database = %s\n", database); + printf("host = %s\n", host); + printf("port = %d\n", port); + printf("framed = %d\n", framed); + printf("test-data-dir = %s\n", datadir); +} + +void print_usage(const apr_getopt_option_t *opts) { + printf("Usage:\n"); + for (;opts->name != NULL; opts++) { + if (opts->optch < 255) { + /* Unfortunately apr_getopt.h does not define a symbolic value to check. */ + printf("-%c, ", opts->optch); + } else { + printf(" "); + } + printf("--%s\t%s\n", opts->name, opts->description); + } +} + int dummyHiveTypeConverter(HiveType type) { return 1; // For testing purposes, just return an arbitrary value } @@ -122,7 +233,7 @@ HiveReturn dropTable(HiveConnection* connection, const char* table_name) { HiveResultSet* resultset; sprintf(query, "DROP TABLE %s", table_name); - retval = DBExecute(connection, query, &resultset, 10, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, &resultset, 10, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, HIVE_ERROR); @@ -151,16 +262,16 @@ int basic_connect_disconnect_test() { fprintf(stderr, "Running %s...\n", __FUNCTION__); char err_buf[MAX_HIVE_ERR_MSG_LEN]; HiveReturn retval; - HiveConnection* connection = DBOpenConnection(DEFAULT_DATABASE, DEFAULT_HOST, atoi(DEFAULT_PORT), - atoi(DEFAULT_FRAMED), err_buf, sizeof(err_buf)); + HiveConnection* connection = DBOpenConnection(database, host, port, + framed, err_buf, sizeof(err_buf), 0); if (connection == NULL) { /* If this fails, make sure that Hive server is running with the connect parameter arguments */ fprintf(stderr, "Connect failed: %s\n", err_buf); fprintf(stderr, "\n\n\nMAKE SURE YOU HAVE THE STANDALONE HIVESERVER RUNNING!\n"); fprintf(stderr, "Expected Connection Parameters:\n"); - fprintf(stderr, "HOST: %s\n", DEFAULT_HOST); - fprintf(stderr, "PORT: %s\n", DEFAULT_PORT); - fprintf(stderr, "DATABASE: %s\n\n\n", DEFAULT_DATABASE); + fprintf(stderr, "HOST: %s\n", host); + fprintf(stderr, "PORT: %d\n", port); + fprintf(stderr, "DATABASE: %s\n\n\n", database); assert(connection != NULL); } retval = DBCloseConnection(connection, err_buf, sizeof(err_buf)); @@ -176,12 +287,12 @@ int basic_query_exec_test() { HiveResultSet* resultset; HiveReturn retval; - HiveConnection* connection = DBOpenConnection(DEFAULT_DATABASE, DEFAULT_HOST, atoi(DEFAULT_PORT), - atoi(DEFAULT_FRAMED), err_buf, sizeof(err_buf)); + HiveConnection* connection = DBOpenConnection(database, host, port, + framed, err_buf, sizeof(err_buf), 0); /* If this fails, make sure that Hive server is running with the connect parameter arguments */ assert(connection != NULL); - retval = DBExecute(connection, "SHOW TABLES", &resultset, 10, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, "SHOW TABLES", &resultset, 10, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); @@ -205,12 +316,12 @@ int basic_fetch_test() { HiveResultSet* resultset; HiveReturn retval; - HiveConnection* connection = DBOpenConnection(DEFAULT_DATABASE, DEFAULT_HOST, atoi(DEFAULT_PORT), - atoi(DEFAULT_FRAMED), err_buf, sizeof(err_buf)); + HiveConnection* connection = DBOpenConnection(database, host, port, + framed, err_buf, sizeof(err_buf), 0); /* If this fails, make sure that Hive server is running with the connect parameter arguments */ assert(connection != NULL); - retval = DBExecute(connection, "SHOW TABLES", &resultset, 10, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, "SHOW TABLES", &resultset, 10, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); @@ -220,7 +331,7 @@ int basic_fetch_test() { "DBHasResults failed: %s\n", err_buf, connection, 0); - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -257,8 +368,8 @@ int show_tables_test() { int is_null_value; HiveResultSet* resultset; - HiveConnection* connection = DBOpenConnection(DEFAULT_DATABASE, DEFAULT_HOST, atoi(DEFAULT_PORT), - atoi(DEFAULT_FRAMED), err_buf, sizeof(err_buf)); + HiveConnection* connection = DBOpenConnection(database, host, port, + framed, err_buf, sizeof(err_buf), 0); assert(connection != NULL); /* If this fails, make sure that Hive server is running with the connect parameter arguments */ // Drop pre-existing tables of the same name @@ -269,7 +380,7 @@ int show_tables_test() { // Create the table sprintf(query, "CREATE TABLE %s (key int, value string)", table_name); - retval = DBExecute(connection, query, &resultset, 10, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, &resultset, 10, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); @@ -289,7 +400,7 @@ int show_tables_test() { // Test 'show tables' query sprintf(query, "SHOW TABLES '%s'", table_name); - retval = DBExecute(connection, query, &resultset, 10, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, &resultset, 10, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); @@ -311,7 +422,7 @@ int show_tables_test() { col_count, connection, 0); // Fetch row - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -338,7 +449,7 @@ int show_tables_test() { col_len, strlen(field), connection, 0); // Fetch row (check that there is nothing else to fetch) - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -369,7 +480,7 @@ int query_fetch_test() { fprintf(stderr, "Running %s...\n", __FUNCTION__); char err_buf[MAX_HIVE_ERR_MSG_LEN]; const char* table_name = "ehwang_tmp_test"; - const char* test_data_path = TEST_DATA_DIR_STR "/dataset1.input"; + const char* test_data_path = apr_pstrcat(mp, datadir, "/dataset1.input", NULL); HiveReturn retval; char query[MAX_QUERY_LEN]; char string_field[MAX_FIELD_LEN]; @@ -384,8 +495,8 @@ int query_fetch_test() { HiveType hive_type; HiveType expected_hive_type; - HiveConnection* connection = DBOpenConnection(DEFAULT_DATABASE, DEFAULT_HOST, atoi(DEFAULT_PORT), - atoi(DEFAULT_FRAMED), err_buf, sizeof(err_buf)); + HiveConnection* connection = DBOpenConnection(database, host, port, + framed, err_buf, sizeof(err_buf), 0); /* If this fails, make sure that Hive server is running with the connect parameter arguments */ assert(connection != NULL); @@ -397,7 +508,7 @@ int query_fetch_test() { // Create the table sprintf(query, "CREATE TABLE %s (key int, value string) STORED AS TEXTFILE", table_name); - retval = DBExecute(connection, query, NULL, 0, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, NULL, 0, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); @@ -406,7 +517,7 @@ int query_fetch_test() { // NOTE: test_data_path has to be local to the hive server // NOTE: test_data_path is a ctrl-A separated file with two fields per line sprintf(query, "LOAD DATA LOCAL INPATH '%s' INTO TABLE %s", test_data_path, table_name); - retval = DBExecute(connection, query, NULL, 0, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, NULL, 0, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); @@ -414,7 +525,7 @@ int query_fetch_test() { // Run Select * query sprintf(query, "SELECT * FROM %s", table_name); // max_buf_len value of 1 to test client side result buffer fetching - retval = DBExecute(connection, query, &resultset, 1, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, &resultset, 1, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); @@ -468,7 +579,7 @@ int query_fetch_test() { err_buf, connection, 0); // Fetch row - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -512,7 +623,7 @@ int query_fetch_test() { data_byte_size, strlen(string_field) + 1, connection, 0); // Fetch second row - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -556,7 +667,7 @@ int query_fetch_test() { data_byte_size, strlen(string_field) + 1, connection, 0); // Fetch non-existant row - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -587,7 +698,7 @@ int numeric_range_test() { fprintf(stderr, "Running %s...\n", __FUNCTION__); char err_buf[MAX_HIVE_ERR_MSG_LEN]; const char* table_name = "ehwang_tmp_test"; - const char* test_data_path = TEST_DATA_DIR_STR "/dataset_types.input"; + const char* test_data_path = apr_pstrcat(mp, datadir, "/dataset_types.input", NULL); HiveReturn retval; char query[MAX_QUERY_LEN]; int int_field; @@ -596,8 +707,8 @@ int numeric_range_test() { int is_null_value; HiveResultSet* resultset; - HiveConnection* connection = DBOpenConnection(DEFAULT_DATABASE, DEFAULT_HOST, atoi(DEFAULT_PORT), - atoi(DEFAULT_FRAMED), err_buf, sizeof(err_buf)); + HiveConnection *connection = DBOpenConnection(database, host, port, + framed, err_buf, sizeof(err_buf), 0); /* If this fails, make sure that Hive server is running with the connect parameter arguments */ assert(connection != NULL); @@ -612,7 +723,7 @@ int numeric_range_test() { query, "CREATE TABLE %s (tinyint_type tinyint, smallint_type smallint, int_type int, bigint_type bigint, float_type float, double_type double, null_test int) STORED AS TEXTFILE", table_name); - retval = DBExecute(connection, query, NULL, 0, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, NULL, 0, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); @@ -621,20 +732,20 @@ int numeric_range_test() { // NOTE: test_data_path has to be local to the hive server // NOTE: test_data_path is a ctrl-A separated file with seven fields per line sprintf(query, "LOAD DATA LOCAL INPATH '%s' INTO TABLE %s", test_data_path, table_name); - retval = DBExecute(connection, query, NULL, 0, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, NULL, 0, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); // Run Select * query sprintf(query, "SELECT * FROM %s", table_name); - retval = DBExecute(connection, query, &resultset, 1, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, &resultset, 1, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); // Fetch row of minimum numeric values - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -724,7 +835,7 @@ int numeric_range_test() { "Field should be NULL\n", connection, 0); // Fetch row of maximum numeric values - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); RETURN_ON_ASSERT_NO_ARG_CLOSE(retval == HIVE_NO_MORE_DATA, "DBFetch failed: Could not fetch the second row\n", @@ -830,7 +941,7 @@ int field_multifetch_test() { fprintf(stderr, "Running %s...\n", __FUNCTION__); char err_buf[MAX_HIVE_ERR_MSG_LEN]; const char* table_name = "ehwang_tmp_test"; - const char* test_data_path = TEST_DATA_DIR_STR "/dataset2.input"; + const char* test_data_path = apr_pstrcat(mp, datadir, "/dataset2.input", NULL); HiveReturn retval; char query[MAX_QUERY_LEN]; char string_field[MAX_FIELD_LEN]; @@ -839,8 +950,8 @@ int field_multifetch_test() { int is_null_value; HiveResultSet* resultset; - HiveConnection* connection = DBOpenConnection(DEFAULT_DATABASE, DEFAULT_HOST, atoi(DEFAULT_PORT), - atoi(DEFAULT_FRAMED), err_buf, sizeof(err_buf)); + HiveConnection* connection = DBOpenConnection(database, host, port, + framed, err_buf, sizeof(err_buf), 0); /* If this fails, make sure that Hive server is running with the connect parameter arguments */ assert(connection != NULL); @@ -853,7 +964,7 @@ int field_multifetch_test() { // Create the table sprintf(query, "CREATE TABLE %s (fixed_len_field int, var_len_field string) STORED AS TEXTFILE", table_name); - retval = DBExecute(connection, query, NULL, 0, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, NULL, 0, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); @@ -862,20 +973,20 @@ int field_multifetch_test() { // NOTE: test_data_path has to be local to the hive server // NOTE: test_data_path is a ctrl-A separated file with two fields per line sprintf(query, "LOAD DATA LOCAL INPATH '%s' INTO TABLE %s", test_data_path, table_name); - retval = DBExecute(connection, query, NULL, 0, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, NULL, 0, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); // Run Select * query sprintf(query, "SELECT * FROM %s", table_name); - retval = DBExecute(connection, query, &resultset, 1, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, &resultset, 1, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); // Fetch row - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -1061,8 +1172,8 @@ int meta_data_function_test() { int is_null_value; HiveResultSet* resultset; - HiveConnection* connection = DBOpenConnection(DEFAULT_DATABASE, DEFAULT_HOST, atoi(DEFAULT_PORT), - atoi(DEFAULT_FRAMED), err_buf, sizeof(err_buf)); + HiveConnection* connection = DBOpenConnection(database, host, port, + framed, err_buf, sizeof(err_buf), 0); assert(connection != NULL); /* If this fails, make sure that Hive server is running with the connect parameter arguments */ // Drop pre-existing tables of the same name @@ -1073,13 +1184,14 @@ int meta_data_function_test() { // Create the table sprintf(query, "CREATE TABLE %s (key int, value string)", table_name); - retval = DBExecute(connection, query, NULL, 10, err_buf, sizeof(err_buf)); + retval = DBExecute(connection, query, NULL, 10, 1, err_buf, sizeof(err_buf), 0); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBExecute failed: %s\n", err_buf, connection, 0); // Test DBTables - retval = DBTables(connection, table_name, &resultset, err_buf, sizeof(err_buf)); + //retval = DBTables(connection, table_name, &resultset, err_buf, sizeof(err_buf)); + retval = DBTables(connection, "*", &resultset, err_buf, sizeof(err_buf)); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBTables failed: %s\n", err_buf, connection, 0); @@ -1101,7 +1213,7 @@ int meta_data_function_test() { col_count, connection, 0); // Fetch row - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -1129,7 +1241,7 @@ int meta_data_function_test() { col_len, strlen(field), connection, 0); // Fetch row (check that there is nothing else to fetch) - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -1170,7 +1282,7 @@ int meta_data_function_test() { col_count, connection, 0); // Fetch first row - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -1242,7 +1354,7 @@ int meta_data_function_test() { 1, int_buffer, connection, 0); // Fetch second row - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -1315,7 +1427,7 @@ int meta_data_function_test() { 2, int_buffer, connection, 0); // Fetch row (check that there is nothing else to fetch) - retval = DBFetch(resultset, err_buf, sizeof(err_buf)); + retval = DBFetch(resultset, err_buf, sizeof(err_buf), &row_count); RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, "DBFetch failed: %s\n", err_buf, connection, 0); @@ -1343,53 +1455,3 @@ int meta_data_function_test() { return 1; } -/************************************************************************************************** - * MAIN FUNCTION - **************************************************************************************************/ - -int main() { - int failed = 0; - fprintf(stderr, "\nStarting Hive Client C tests...\n\n"); - - if (basic_connect_disconnect_test() == 0) { - failed++; - fprintf(stderr, "----FAILED basic_connect_disconnect_test!\n"); - } - if (basic_query_exec_test() == 0) { - failed++; - fprintf(stderr, "----FAILED basic_query_exec_test!\n"); - } - if (basic_fetch_test() == 0) { - failed++; - fprintf(stderr, "----FAILED basic_fetch_test!\n"); - } - if (show_tables_test() == 0) { - failed++; - fprintf(stderr, "----FAILED show_tables_test!\n"); - } - if (query_fetch_test() == 0) { - failed++; - fprintf(stderr, "----FAILED query_fetch_test!\n"); - } - if (numeric_range_test() == 0) { - failed++; - fprintf(stderr, "----FAILED numeric_range_test!\n"); - } - if (field_multifetch_test() == 0) { - failed++; - fprintf(stderr, "----FAILED field_multifetch_test!\n"); - } - if (meta_data_function_test() == 0) { - failed++; - fprintf(stderr, "----FAILED meta_data_function_test!\n"); - } - - if (failed == 0) { - fprintf(stderr, "\nALL HIVE CLIENT TESTS PASSED!\n\n"); - return 0; - } else { - fprintf(stderr, "\nHIVE CLIENT TEST FAILURE: %i test(s) failed.\n\n", failed); - return 1; - } -} - diff --git odbc/src/test/hiveodbctest.c odbc/src/test/hiveodbctest.c new file mode 100644 index 0000000..42d480d --- /dev/null +++ odbc/src/test/hiveodbctest.c @@ -0,0 +1,524 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifdef HAVE_CONFIG_H + #include "config.h" +#endif +#if defined(_WIN32) || defined(_WIN64) + #include +#endif +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "hiveclient.h" +#include "hivetest.h" + +#define MAX_QUERY_LEN 1024 +#define MAX_FIELD_LEN 255 + +static const char *DEFAULT_DSN = "Hive DataSource"; + +static const char *connect_str; +static const char *dsn; +static const char *database; +static const char *host; +int port; +static const char *datadir; + +/* Default memory pool for APR lib */ +apr_pool_t *mp; + +/* + apr_getopt ignores short option characters with + valus > 255. Unfortunately the library does not + define a symbolic value for this constant. +*/ +#define HELP_SHORT_OPT 255+1 + +/* getopt command line options */ +static const apr_getopt_option_t opt_option[] = { + /* long-option, short-option, has-arg flag, description */ + { "dsn", 's', TRUE, "datasource name [Hive]" }, + { "database", 'd', TRUE, "database name [default]" }, + { "host", 'h', TRUE, "host name [localhost]" }, + { "port", 'p', TRUE, "port number [10000]" }, + { "test-data-dir", 't', TRUE, "test data directory" }, + { "help", HELP_SHORT_OPT, FALSE, "display this help and exit" }, + { NULL, 0, 0, NULL }, /* end (a.k.a. sentinel) */ +}; + + +/* Test Functions */ +int basic_connect_disconnect_test(); +int SQLDriverConnect_test(); +int SQLGetInfo_test(); +int SQLTables_test(); +int SQLTables2_test(); + +/* Utility Functions */ +void process_args(int argc, const char *argv[]); +void print_usage(const apr_getopt_option_t *opts); +int dummyHiveTypeConverter(HiveType type); +HiveReturn dropTable(HiveConnection* connection, const char* table_name); +void print_error(char *fn, SQLHANDLE handle, SQLSMALLINT type); + + +int main(int argc, const char *argv[]) { + int failed = 0; + + apr_app_initialize(&argc, (char const *const **)&argv, NULL); + apr_pool_create(&mp, NULL); + + process_args(argc, argv); + + connect_str = apr_psprintf(mp, "DSN=%s;DATABASE=%s;HOST=%s;PORT=%d", + dsn, database, host, port); + + fprintf(stderr, "Using the following ODBC connect string: %s\n", connect_str); + fprintf(stderr, "\n\nStarting Hive ODBC tests...\n\n"); + + + RUN_TEST(basic_connect_disconnect_test); + RUN_TEST(SQLDriverConnect_test); + RUN_TEST(SQLGetInfo_test); + RUN_TEST(SQLTables_test); + RUN_TEST(SQLTables2_test); + + /* cleanup */ + apr_pool_destroy(mp); + apr_terminate(); + + + if (failed == 0) { + fprintf(stderr, "\nALL HIVE ODBC TESTS PASSED!\n\n"); + return 0; + } else { + fprintf(stderr, "\nHIVE ODBC TEST FAILURE(S): %i test(s) failed.\n\n", failed); + return 1; + } +} + + +void process_args(int argc, const char *argv[]) { + apr_status_t rv; + apr_getopt_t *opt; + int optch; + const char *optarg; + + int dsn_set = FALSE; + int database_set = FALSE; + int host_set = FALSE; + int port_set = FALSE; + int datadir_set = FALSE; + + /* initialize apr_getopt_t */ + apr_getopt_init(&opt, mp, argc, argv); + + while ((rv = apr_getopt_long(opt, opt_option, &optch, &optarg)) != APR_EOF) { + switch (rv) { + case APR_BADCH: + /* Found a bad option character */ + print_usage(opt_option); + exit(EXIT_FAILURE); + break; + case APR_BADARG: + /* No argument followed the option flag */ + print_usage(opt_option); + exit(EXIT_FAILURE); + break; + case APR_SUCCESS: + /* The next option was found */ + switch (optch) { + case 's': /* --dsn */ + dsn = apr_pstrndup(mp, optarg, strlen(optarg)); + dsn_set = TRUE; + break; + case 'd': /* --database */ + database = apr_pstrndup(mp, optarg, strlen(optarg)); + database_set = TRUE; + break; + case 'h': /* --host */ + host = apr_pstrndup(mp, optarg, strlen(optarg)); + host_set = TRUE; + break; + case 'p': /* --port */ + port = atoi(optarg); + port_set = TRUE; + break; + case 't': /* --test-data-dir */ + datadir = apr_pstrndup(mp, optarg, strlen(optarg)); + datadir_set = TRUE; + break; + case HELP_SHORT_OPT: + print_usage(opt_option); + exit(EXIT_SUCCESS); + } + break; + } + } + + if (!dsn_set) { + dsn = DEFAULT_DSN; + } + if (!database_set) { + database = DEFAULT_DATABASE; + } + if (!host_set) { + host = DEFAULT_HOST; + } + if (!port_set) { + port = atoi(DEFAULT_PORT); + } + if (!datadir_set) { + datadir = apr_pstrndup(mp, TEST_DATA_DIR_STR, strlen(TEST_DATA_DIR_STR)); + } + + printf("Running Hive Client Tests with the following configuration:\n\n"); + printf("dsn = %s\n", dsn); + printf("database = %s\n", database); + printf("host = %s\n", host); + printf("port = %d\n", port); + printf("test-data-dir = %s\n", datadir); +} + +void print_usage(const apr_getopt_option_t *opts) { + printf("Usage:\n"); + for (;opts->name != NULL; opts++) { + if (opts->optch < 255) { + /* Unfortunately apr_getopt.h does not define a symbolic + value to check. + */ + printf("-%c, ", opts->optch); + } else { + printf(" "); + } + printf("--%s\t%s\n", opts->name, opts->description); + } +} + +/************************************************************************************************** + * TEST FUNCTIONS + **************************************************************************************************/ + +int basic_connect_disconnect_test() { + fprintf(stderr, "Running %s...\n", __FUNCTION__); + char err_buf[MAX_HIVE_ERR_MSG_LEN]; + HiveReturn retval; + HiveConnection *connection = DBOpenConnection(database, host, port, + FALSE, err_buf, sizeof(err_buf), 0); + if (connection == NULL) { + /* If this fails, make sure that Hive server is running with the connect parameter arguments */ + fprintf(stderr, "Connect failed: %s\n", err_buf); + fprintf(stderr, "\n\n\nMAKE SURE YOU HAVE THE STANDALONE HIVESERVER RUNNING!\n"); + fprintf(stderr, "Expected Connection Parameters:\n"); + fprintf(stderr, "HOST: %s\n", host); + fprintf(stderr, "PORT: %s\n", port); + fprintf(stderr, "DATABASE: %s\n\n\n", database); + assert(connection != NULL); + } + retval = DBCloseConnection(connection, err_buf, sizeof(err_buf)); + RETURN_ON_ASSERT_ONE_ARG(retval == HIVE_ERROR, + "Disconnect failed: %s\n", + err_buf, 0) + return 1; +} + +int SQLDriverConnect_test() { + SQLHENV env; + SQLHDBC dbc; + SQLHSTMT stmt; + SQLRETURN ret; + SQLCHAR outstr[1024]; + SQLSMALLINT outstrlen; + + fprintf(stderr, "Running %s...\n", __FUNCTION__); + + SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &env); + SQLSetEnvAttr(env, SQL_ATTR_ODBC_VERSION, (void *)SQL_OV_ODBC3, 0); + SQLAllocHandle(SQL_HANDLE_DBC, env, &dbc); + ret = SQLDriverConnect(dbc, NULL, (SQLCHAR *)connect_str, SQL_NTS, + outstr, sizeof(outstr), &outstrlen, SQL_DRIVER_COMPLETE); + if (SQL_SUCCEEDED(ret)) { + fprintf(stderr, "Connected\n"); + fprintf(stderr, "Returned connection string: \n\t%s\n", outstr); + if (SQL_SUCCESS_WITH_INFO == ret) { + fprintf(stderr, "Driver reported the following diagnostic info:\n"); + print_error("SQLDriverConnect", dbc, SQL_HANDLE_DBC); + } + SQLDisconnect(dbc); + } else { + fprintf(stderr, "Failed to connect\n"); + print_error("SQLDriverConnect", dbc, SQL_HANDLE_DBC); + } + SQLFreeHandle(SQL_HANDLE_DBC, dbc); + SQLFreeHandle(SQL_HANDLE_ENV, env); + return 1; +} + + +int SQLGetInfo_test() { + SQLHENV env; + SQLHDBC dbc; + SQLRETURN ret; + SQLCHAR buf[1024]; + SQLSMALLINT len; + + SQLCHAR expected[][128] = { + [SQL_ODBC_VER] "03.00", + [SQL_DBMS_NAME] "Hive", + [SQL_DBMS_VER] "0.6.0", + [SQL_DRIVER_VER] VERSION, + }; + + + fprintf(stderr, "Running %s...\n", __FUNCTION__); + + SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &env); + SQLSetEnvAttr(env, SQL_ATTR_ODBC_VERSION, (void *)SQL_OV_ODBC3, 0); + SQLAllocHandle(SQL_HANDLE_DBC, env, &dbc); + ret = SQLDriverConnect(dbc, NULL, (SQLCHAR *)connect_str, SQL_NTS, + buf, sizeof(buf), &len, SQL_DRIVER_COMPLETE); + if (!SQL_SUCCEEDED(ret)) { + fprintf(stderr, "Failed to connect\n"); + print_error("SQLDriverConnect", dbc, SQL_HANDLE_DBC); + return 0; + } + + ret = SQLGetInfo(dbc, + SQL_ODBC_VER, + buf, + sizeof(buf), + &len); + if (SQL_SUCCEEDED(ret)) { + printf("SQL_ODBC_VER: %s\n", buf); + if (strcmp(expected[SQL_ODBC_VER], buf)) { + return 1; + } + } else { + return 0; + } + + ret = SQLGetInfo(dbc, + SQL_DBMS_NAME, + buf, + sizeof(buf), + &len); + if (SQL_SUCCEEDED(ret)) { + printf("SQL_DBMS_NAME: %s\n", buf); + if (strcmp(expected[SQL_DBMS_NAME], buf)) { + return 1; + } + } else { + return 0; + } + + ret = SQLGetInfo(dbc, + SQL_DBMS_VER, + buf, + sizeof(buf), + &len); + if (SQL_SUCCEEDED(ret)) { + printf("SQL_DBMS_VER: %s\n", buf); + if (strcmp(expected[SQL_DBMS_VER], buf)) { + return 1; + } + } else { + return 0; + } + + ret = SQLGetInfo(dbc, + SQL_DRIVER_VER, + buf, + sizeof(buf), + &len); + if (SQL_SUCCEEDED(ret)) { + printf("SQL_DRIVER_VER: %s\n", buf); + if (strcmp(expected[SQL_DRIVER_VER], buf)) { + return 1; + } + } else { + return 0; + } + + SQLFreeHandle(SQL_HANDLE_DBC, dbc); + SQLFreeHandle(SQL_HANDLE_ENV, env); + + return 1; +} + + +int SQLTables_test() { + SQLHENV env; + SQLHDBC dbc; + SQLHSTMT stmt; + SQLRETURN ret; + SQLSMALLINT columns; + int row = 0; + + fprintf(stderr, "Running %s...\n", __FUNCTION__); + + SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &env); + + /* Request ODBC V3 support */ + SQLSetEnvAttr(env, SQL_ATTR_ODBC_VERSION, (void *) SQL_OV_ODBC3, 0); + + SQLAllocHandle(SQL_HANDLE_DBC, env, &dbc); + + SQLDriverConnect(dbc, NULL, (SQLCHAR *)connect_str, SQL_NTS, + NULL, 0, NULL, SQL_DRIVER_COMPLETE); + + SQLAllocHandle(SQL_HANDLE_STMT, dbc, &stmt); + + /* SQLTables(stmt, NULL, 0, NULL, 0, NULL, 0, "TABLE", SQL_NTS); */ + SQLTables(stmt, NULL, 0, NULL, 0, NULL, 0, NULL, 0); + + SQLNumResultCols(stmt, &columns); + fprintf(stderr, "SQLNumResultColumns = %u\n", columns); + + while (SQL_SUCCEEDED(ret = SQLFetch(stmt))) { + SQLUSMALLINT i; + printf("Row %d\n", row++); + for (i = 1; i <= columns; i++) { + SQLLEN indicator; + char buf[512]; + ret = SQLGetData(stmt, i, SQL_C_CHAR, + buf, sizeof(buf), &indicator); + if (SQL_SUCCEEDED(ret)) { + if (indicator == SQL_NULL_DATA) strcpy(buf, "NULL"); + printf(" Column %u : %s\n", i, buf); + } + } + } + return 1; +} + +int SQLTables2_test() { + SQLHENV env; + SQLHDBC dbc; + SQLHSTMT stmt; + SQLRETURN ret; + SQLSMALLINT columns; + SQLCHAR buf[5][64]; + SQLLEN indicator[5]; + int row = 0; + int i; + + fprintf(stderr, "Running %s...\n", __FUNCTION__); + + SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &env); + + /* Request ODBC V3 support */ + SQLSetEnvAttr(env, SQL_ATTR_ODBC_VERSION, (void *) SQL_OV_ODBC3, 0); + + SQLAllocHandle(SQL_HANDLE_DBC, env, &dbc); + + SQLDriverConnect(dbc, NULL, (SQLCHAR *)connect_str, SQL_NTS, + NULL, 0, NULL, SQL_DRIVER_COMPLETE); + + SQLAllocHandle(SQL_HANDLE_STMT, dbc, &stmt); + + /* SQLTables(stmt, NULL, 0, NULL, 0, NULL, 0, "TABLE", SQL_NTS); */ + SQLTables(stmt, NULL, 0, NULL, 0, NULL, 0, NULL, 0); + + SQLNumResultCols(stmt, &columns); + fprintf(stderr, "SQLNumResultColumns = %u\n", columns); + + for (i = 0; i < columns; i++) { + SQLBindCol(stmt, i + 1, SQL_C_CHAR, + buf[i], sizeof(buf[i]), &indicator[i]); + } + + while (SQL_SUCCEEDED(SQLFetch(stmt))) { + for (i = 0; i < columns; i++) { + if (indicator[i] == SQL_NULL_DATA) { + printf(" Column %u : NULL\n", i); + } else { + printf(" Column %u : %s\n", i, buf[i]); + } + } + } + + return 1; +} + + +/************************************************************************************************** + * HELPER FUNCTIONS + **************************************************************************************************/ + +int dummyHiveTypeConverter(HiveType type) { + return 1; // For testing purposes, just return an arbitrary value +} + +HiveReturn dropTable(HiveConnection* connection, const char* table_name) { + char err_buf[MAX_HIVE_ERR_MSG_LEN]; + HiveReturn retval; + char query[MAX_QUERY_LEN]; + int has_results; + HiveResultSet* resultset; + + sprintf(query, "DROP TABLE %s", table_name); + retval = DBExecute(connection, query, &resultset, 10, 1, err_buf, sizeof(err_buf), 0); + RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, + "DBExecute failed: %s\n", + err_buf, connection, HIVE_ERROR); + + retval = DBHasResults(resultset, &has_results, err_buf, sizeof(err_buf)); + RETURN_ON_ASSERT_ONE_ARG_CLOSE(retval == HIVE_ERROR, + "DBHasResults failed: %s\n", + err_buf, connection, HIVE_ERROR); + RETURN_ON_ASSERT_ONE_ARG_CLOSE(has_results, + "Query '%s' generated results\n", + query, connection, HIVE_ERROR); + + retval = DBCloseResultSet(resultset, err_buf, sizeof(err_buf)); + RETURN_ON_ASSERT_ONE_ARG(retval == HIVE_ERROR, + "DBCloseResultSet failed: %s\n", + err_buf, HIVE_ERROR) + + return HIVE_SUCCESS; +} + +void print_error(char *fn, SQLHANDLE handle, SQLSMALLINT type) { + SQLINTEGER i = 0; + SQLINTEGER native; + SQLCHAR state[7]; + SQLCHAR text[256]; + SQLSMALLINT len; + SQLRETURN ret; + + fprintf(stderr,"\nDiagnostic info %s\n\n", fn); + + do { + ret = SQLGetDiagRec(type, handle, ++i, state, + &native, text, sizeof(text), &len); + if (SQL_SUCCEEDED(ret)) { + fprintf(stderr, "%s:%d:%d:%s\n", state, i, native, text); + } + } while (SQL_SUCCESS != ret); +} diff --git odbc/src/test/hivetest.h odbc/src/test/hivetest.h new file mode 100644 index 0000000..783dd37 --- /dev/null +++ odbc/src/test/hivetest.h @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _HIVETEST_H_ +#define _HIVETEST_H_ + + +// Convert a macro value to a string +#define STRINGIFY(x) XSTRINGIFY(x) +#define XSTRINGIFY(x) #x + +// Path to test data (should be supplied at compile time) +#ifdef TEST_DATA_DIR + #define TEST_DATA_DIR_STR STRINGIFY(TEST_DATA_DIR) +#else + #define TEST_DATA_DIR_STR "/tmp/testdata" +#endif + +/** + * Checks an error condition, and if true: + * 1. prints the error to stderr + * 2. returns the specified ret_val + */ +#define RETURN_ON_ASSERT_ONE_ARG(condition, err_format, arg, ret_val) { \ + if (condition) { \ + fprintf(stderr, "----LINE %i: ", __LINE__); \ + fprintf(stderr, err_format, arg); \ + return ret_val; \ + } \ +} + +/** + * Checks an error condition, and if true: + * 1. prints the error to stderr + * 2. closes the DB connection on db_conn + * 3. returns the specified ret_val + */ +#define RETURN_ON_ASSERT_NO_ARG_CLOSE(condition, err_format, db_conn, ret_val) { \ + if (condition) { \ + char error_buffer_[MAX_HIVE_ERR_MSG_LEN]; \ + fprintf(stderr, "----LINE %i: ", __LINE__); \ + fprintf(stderr, err_format); \ + DBCloseConnection(db_conn, error_buffer_, sizeof(error_buffer_)); \ + return ret_val; \ + } \ +} +#define RETURN_ON_ASSERT_ONE_ARG_CLOSE(condition, err_format, arg, db_conn, ret_val) { \ + if (condition) { \ + char error_buffer_[MAX_HIVE_ERR_MSG_LEN]; \ + fprintf(stderr, "----LINE %i: ", __LINE__); \ + fprintf(stderr, err_format, arg); \ + DBCloseConnection(db_conn, error_buffer_, sizeof(error_buffer_)); \ + return ret_val; \ + } \ +} +#define RETURN_ON_ASSERT_TWO_ARG_CLOSE(condition, err_format, arg1, arg2, db_conn, ret_val) { \ + if (condition) { \ + char error_buffer_[MAX_HIVE_ERR_MSG_LEN]; \ + fprintf(stderr, "----LINE %i: ", __LINE__); \ + fprintf(stderr, err_format, arg1, arg2); \ + DBCloseConnection(db_conn, error_buffer_, sizeof(error_buffer_)); \ + return ret_val; \ + } \ +} + + +#define RUN_TEST(testfunc) \ + do { \ + if (testfunc() == 0) { \ + failed++; \ + fprintf(stderr, " ----FAILED!\n"); \ + } else { \ + fprintf(stderr, " ----SUCCEEDED!\n"); \ + } \ + } while (0) + +#endif /* _HIVETEST_H_ */