@@ -10995,6 +10995,8 @@ int ha_rocksdb::check_and_lock_sk(
1099510995 lock for this key.
1099610996 */
1099710997 if (!(key_info->flags & HA_NOSAME)) {
10998+ if (rocksdb_use_range_locking)
10999+ return check_and_lock_non_unique_sk(key_id, row_info);
1099811000 return HA_EXIT_SUCCESS;
1099911001 }
1100011002
@@ -11111,6 +11113,57 @@ int ha_rocksdb::check_and_lock_sk(
1111111113 return rc;
1111211114}
1111311115
11116+
11117+ /**
11118+ @brief
11119+ Lock the non-unique sk for range locking
11120+ */
11121+ int ha_rocksdb::check_and_lock_non_unique_sk(
11122+ const uint key_id, const struct update_row_info &row_info) {
11123+
11124+ DBUG_ASSERT(rocksdb_use_range_locking);
11125+ const Rdb_key_def &kd = *m_key_descr_arr[key_id];
11126+ bool store_row_debug_checksums = should_store_row_debug_checksums();
11127+
11128+ if (row_info.old_data != nullptr) {
11129+ rocksdb::Slice old_key_slice;
11130+ int old_packed_size;
11131+
11132+ old_packed_size = kd.pack_record(
11133+ table, m_pack_buffer, row_info.old_data, m_sk_packed_tuple_old,
11134+ &m_sk_tails_old, store_row_debug_checksums, row_info.hidden_pk_id, 0,
11135+ nullptr, m_ttl_bytes);
11136+
11137+ old_key_slice = rocksdb::Slice(
11138+ reinterpret_cast<const char *>(m_sk_packed_tuple_old), old_packed_size);
11139+
11140+ auto s= row_info.tx->lock_singlepoint_range(kd.get_cf(), old_key_slice);
11141+ if (!s.ok()) {
11142+ return (row_info.tx->set_status_error(table->in_use, s, kd,
11143+ m_tbl_def, m_table_handler));
11144+ }
11145+ }
11146+
11147+ int new_packed_size;
11148+ rocksdb::Slice new_key_slice;
11149+ rocksdb::Slice new_value_slice;
11150+ new_packed_size =
11151+ kd.pack_record(table, m_pack_buffer, row_info.new_data,
11152+ m_sk_packed_tuple, &m_sk_tails, 0,
11153+ row_info.hidden_pk_id, 0, nullptr, m_ttl_bytes);
11154+ new_key_slice = rocksdb::Slice(
11155+ reinterpret_cast<const char *>(m_sk_packed_tuple), new_packed_size);
11156+
11157+ auto s= row_info.tx->lock_singlepoint_range(kd.get_cf(), new_key_slice);
11158+ if (!s.ok()) {
11159+ return (row_info.tx->set_status_error(table->in_use, s, kd,
11160+ m_tbl_def, m_table_handler));
11161+ }
11162+
11163+ return HA_EXIT_SUCCESS;
11164+ }
11165+
11166+
1111411167/**
1111511168 Enumerate all keys to check their uniquess and also lock it
1111611169
@@ -11428,15 +11481,6 @@ int ha_rocksdb::update_write_sk(const TABLE *const table_arg,
1142811481 old_key_slice = rocksdb::Slice(
1142911482 reinterpret_cast<const char *>(m_sk_packed_tuple_old), old_packed_size);
1143011483
11431- /* Range locking: lock the index tuple being deleted */
11432- if (rocksdb_use_range_locking) {
11433- auto s= row_info.tx->lock_singlepoint_range(kd.get_cf(), old_key_slice);
11434- if (!s.ok()) {
11435- return (row_info.tx->set_status_error(table->in_use, s, kd,
11436- m_tbl_def, m_table_handler));
11437- }
11438- }
11439-
1144011484 // TODO(mung) - If the new_data and old_data below to the same partial index
1144111485 // group (ie. have the same prefix), we can make use of the read below to
1144211486 // determine whether to issue SingleDelete or not.
@@ -11482,14 +11526,6 @@ int ha_rocksdb::update_write_sk(const TABLE *const table_arg,
1148211526 if (bulk_load_sk && row_info.old_data == nullptr) {
1148311527 rc = bulk_load_key(row_info.tx, kd, new_key_slice, new_value_slice, true);
1148411528 } else {
11485- /* Range locking: lock the index tuple being inserted */
11486- if (rocksdb_use_range_locking) {
11487- auto s= row_info.tx->lock_singlepoint_range(kd.get_cf(), new_key_slice);
11488- if (!s.ok()) {
11489- return (row_info.tx->set_status_error(table->in_use, s, kd,
11490- m_tbl_def, m_table_handler));
11491- }
11492- }
1149311529 row_info.tx->get_indexed_write_batch()->Put(kd.get_cf(), new_key_slice,
1149411530 new_value_slice);
1149511531 }
0 commit comments