+ uint8_t rseg_cmd[] = {TOPAZ_RSEG, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint8_t rseg_response[131];
+
+ rseg_cmd[1] = segno << 4;
+ memcpy(&rseg_cmd[10], uid, 4);
+ if (!topaz_send_cmd(rseg_cmd, sizeof(rseg_cmd), rseg_response)) {
+ topaz_switch_off_field();
+ return -1; // RSEG failed
+ }
+
+ memcpy(segment_data, &rseg_response[1], 128);
+
+ return 0;
+}
+
+
+// search for the lock area descriptor for the lockable area including byteno
+static dynamic_lock_area_t *get_dynamic_lock_area(uint16_t byteno)
+{
+ dynamic_lock_area_t *lock_area;
+
+ lock_area = topaz_tag.dynamic_lock_areas;
+
+ while (lock_area != NULL) {
+ if (byteno < lock_area->first_locked_byte) {
+ lock_area = lock_area->next;
+ } else {
+ return lock_area;
+ }
+ }
+
+ return NULL;
+}
+
+
+// check if a memory byte is locked.
+static bool topaz_byte_is_locked(uint16_t byteno)
+{
+ uint8_t *lockbits;
+ uint16_t locked_bytes_per_bit;
+ dynamic_lock_area_t *lock_area;
+
+ if (byteno < TOPAZ_STATIC_MEMORY) {
+ lockbits = &topaz_tag.data_blocks[0x0e][0];
+ locked_bytes_per_bit = 8;
+ } else {
+ lock_area = get_dynamic_lock_area(byteno);
+ if (lock_area == NULL) {
+ return false;
+ } else {
+ lockbits = &topaz_tag.dynamic_memory[lock_area->byte_offset - TOPAZ_STATIC_MEMORY];
+ locked_bytes_per_bit = lock_area->bytes_locked_per_bit;
+ byteno = byteno - lock_area->first_locked_byte;
+ }
+ }
+
+ uint16_t blockno = byteno / locked_bytes_per_bit;
+ if(lockbits[blockno/8] & (0x01 << (blockno % 8))) {