ascii-chat 0.8.38
Real-time terminal-based video chat with ASCII art conversion
Loading...
Searching...
No Matches
lock.c
Go to the documentation of this file.
1
8// Header must be included even in release builds to get inline no-op stubs
9#include <ascii-chat/debug/lock.h>
10
11#ifdef DEBUG_LOCKS
12// Only compile lock_debug implementation when DEBUG_LOCKS is defined
13// Without DEBUG_LOCKS, lock_debug.h provides inline no-op stubs
14
15#include <ascii-chat/common.h>
16#include <ascii-chat/common/buffer_sizes.h>
17#include <ascii-chat/asciichat_errno.h>
18#include <ascii-chat/platform/abstraction.h>
19#include <ascii-chat/util/fnv1a.h>
20#include <ascii-chat/util/time.h>
21#include <ascii-chat/util/path.h>
22#include <ascii-chat/uthash/uthash.h>
23#include <stdlib.h>
24#include <time.h>
25#include <stdatomic.h>
26#include <string.h>
27
28// Enable lock debug tracing to stderr (bypasses logging system)
29// Set to 1 to trace all lock operations for debugging deadlocks
30#define TRACE_LOCK_DEBUG 0
31
32#if TRACE_LOCK_DEBUG
33#include <stdio.h>
34// Simple trace for internal lock debug operations
35#define LOCK_TRACE(fmt, ...) fprintf(stderr, "[LOCK_TRACE] %s:%d " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
36// Trace every lock/unlock with caller info
37#define LOCK_OP_TRACE(op, lock_type, file, line, func) \
38 fprintf(stderr, "[LOCK_OP] %s %s @ %s:%d in %s()\n", op, lock_type, file, line, func)
39#else
40#define LOCK_TRACE(fmt, ...) ((void)0)
41#define LOCK_OP_TRACE(op, lock_type, file, line, func) ((void)0)
42#endif
43
44#ifdef _WIN32
45#include <io.h>
46#include <conio.h>
47#else
48#include <sys/select.h>
49#include <unistd.h>
50// termios.h no longer needed - lock debug thread no longer reads stdin
51#include <fcntl.h>
52#endif
53
54// ============================================================================
55// Constants
56// ============================================================================
57
65#define LOCK_HOLD_TIME_WARNING_MS 500
66
73#define SAFE_BUFFER_SIZE(buffer_size, offset) \
74 ((offset) < 0 || (size_t)(offset) >= (buffer_size) ? 0 : (buffer_size) - (size_t)(offset))
75
76// ============================================================================
77// Global State
78// ============================================================================
79
80lock_debug_manager_t g_lock_debug_manager = {0};
81atomic_bool g_initializing = false; // Flag to prevent tracking during initialization
82
83// Terminal state no longer needed - lock debug thread no longer reads stdin
84
94static lock_record_t *create_lock_record(void *lock_address, lock_type_t lock_type, const char *file_name,
95 int line_number, const char *function_name) {
96 lock_record_t *record = SAFE_CALLOC(1, sizeof(lock_record_t), lock_record_t *);
97
98 // Fill in basic information
99 record->key = lock_record_key(lock_address, lock_type);
100 record->lock_address = lock_address;
101 record->lock_type = lock_type;
102 record->thread_id = asciichat_thread_current_id();
103 record->file_name = file_name;
104 record->line_number = line_number;
105 record->function_name = function_name;
106
107 // Get current time in nanoseconds
108 record->acquisition_time_ns = time_get_ns();
109
110 // Capture backtrace
111 // NOTE: platform_backtrace_symbols_destroy() safely handles the case where
112 // backtrace_symbols() uses system malloc() but mimalloc overrides free()
113 // by detecting our enhanced format and not freeing system-allocated memory
114 record->backtrace_size = platform_backtrace(record->backtrace_buffer, MAX_BACKTRACE_FRAMES);
115 if (record->backtrace_size > 0) {
116 record->backtrace_symbols = platform_backtrace_symbols(record->backtrace_buffer, record->backtrace_size);
117 if (!record->backtrace_symbols) {
118 static bool symbolize_error_logged = false;
119 if (!symbolize_error_logged) {
120 log_warn("Failed to symbolize backtrace for lock record (backtrace support may be unavailable)");
121 symbolize_error_logged = true;
122 }
123 }
124 } else {
125 // Backtrace unavailable (e.g., no libexecinfo)
126 // Only log this once to avoid spam
127 static bool backtrace_error_logged = false;
128 if (!backtrace_error_logged) {
129 log_debug("Backtrace not available for lock debugging");
130 backtrace_error_logged = true;
131 }
132 }
133
134 return record;
135}
136
141static void free_lock_record(lock_record_t *record) {
142 if (!record) {
143 SET_ERRNO(ERROR_INVALID_STATE, "Freeing NULL lock record");
144 return;
145 }
146
147 // Free symbolized backtrace
148 if (record->backtrace_symbols) {
149 platform_backtrace_symbols_destroy(record->backtrace_symbols);
150 }
151
152 SAFE_FREE(record);
153}
154
155// ============================================================================
156// Callback Functions
157// ============================================================================
158
164static void collect_lock_record_callback(lock_record_t *record, void *user_data) {
165 struct lock_collector {
166 uint32_t count;
167 char *buffer;
168 int *offset;
169 } *collector = (struct lock_collector *)user_data;
170
171 collector->count++;
172 int *offset = collector->offset;
173 char *buffer = collector->buffer;
174 size_t buffer_size = 32768; // Match the buffer size in lock_debug_print_state
175
176 // Print lock information
177 const char *lock_type_str;
178 switch (record->lock_type) {
179 case LOCK_TYPE_MUTEX:
180 lock_type_str = "MUTEX";
181 break;
182 case LOCK_TYPE_RWLOCK_READ:
183 lock_type_str = "RWLOCK_READ";
184 break;
185 case LOCK_TYPE_RWLOCK_WRITE:
186 lock_type_str = "RWLOCK_WRITE";
187 break;
188 default:
189 lock_type_str = "UNKNOWN";
190 break;
191 }
192
193 *offset += safe_snprintf(buffer + *offset, SAFE_BUFFER_SIZE(buffer_size, *offset), "Lock #%u: %s at %p\n",
194 collector->count, lock_type_str, record->lock_address);
195 *offset += safe_snprintf(buffer + *offset, SAFE_BUFFER_SIZE(buffer_size, *offset), " Thread ID: %llu\n",
196 (unsigned long long)record->thread_id);
197 *offset +=
198 safe_snprintf(buffer + *offset, SAFE_BUFFER_SIZE(buffer_size, *offset), " Acquired: %s:%d in %s()\n",
199 extract_project_relative_path(record->file_name), record->line_number, record->function_name);
200
201 // Calculate how long the lock has been held
202 uint64_t current_time_ns = time_get_ns();
203 uint64_t held_ns = time_elapsed_ns(record->acquisition_time_ns, current_time_ns);
204 char duration_str[32];
205 format_duration_ns((double)held_ns, duration_str, sizeof(duration_str));
206 *offset += safe_snprintf(buffer + *offset, SAFE_BUFFER_SIZE(buffer_size, *offset), " Held for: %s\n", duration_str);
207
208 // Print backtrace using platform symbol resolution with colored format
209 if (record->backtrace_size > 0) {
210 char **symbols = platform_backtrace_symbols(record->backtrace_buffer, record->backtrace_size);
211 *offset += platform_format_backtrace_symbols(buffer + *offset, SAFE_BUFFER_SIZE(buffer_size, *offset), "Call stack",
212 symbols, record->backtrace_size, 0, 0, NULL);
213 if (symbols) {
215 }
216 } else {
217 *offset +=
218 safe_snprintf(buffer + *offset, SAFE_BUFFER_SIZE(buffer_size, *offset), " Call stack: <capture failed>\n");
219 }
220}
221
227static void cleanup_lock_record_callback(lock_record_t *record, void *user_data) {
228 uint32_t *count = (uint32_t *)user_data;
229
230 (*count)++;
231 free_lock_record(record);
232}
233
239static void print_usage_stats_callback(lock_usage_stats_t *stats, void *user_data) {
240 uint32_t *count = (uint32_t *)user_data;
241 (*count)++;
242
243 // Get lock type string
244 const char *lock_type_str;
245 switch (stats->lock_type) {
246 case LOCK_TYPE_MUTEX:
247 lock_type_str = "MUTEX";
248 break;
249 case LOCK_TYPE_RWLOCK_READ:
250 lock_type_str = "RWLOCK_READ";
251 break;
252 case LOCK_TYPE_RWLOCK_WRITE:
253 lock_type_str = "RWLOCK_WRITE";
254 break;
255 default:
256 lock_type_str = "UNKNOWN";
257 break;
258 }
259
260 // Calculate average hold time
261 uint64_t avg_hold_time_ns = stats->total_hold_time_ns / stats->total_acquisitions;
262
263 // Format all information into a single log message with newlines
264 char log_message[BUFFER_SIZE_LARGE];
265 int offset = 0;
266
267 offset += safe_snprintf(log_message + offset, SAFE_BUFFER_SIZE(sizeof(log_message), offset),
268 "Usage #%u: %s at %s:%d in %s()\n", *count, lock_type_str,
269 extract_project_relative_path(stats->file_name), stats->line_number, stats->function_name);
270 offset += safe_snprintf(log_message + offset, SAFE_BUFFER_SIZE(sizeof(log_message), offset),
271 " Total acquisitions: %llu\n", (unsigned long long)stats->total_acquisitions);
272 offset += safe_snprintf(log_message + offset, SAFE_BUFFER_SIZE(sizeof(log_message), offset),
273 " Total hold time: %llu.%03llu ms\n",
274 (unsigned long long)(stats->total_hold_time_ns / NS_PER_MS_INT),
275 (unsigned long long)((stats->total_hold_time_ns % NS_PER_MS_INT) / 1000));
276 offset +=
277 safe_snprintf(log_message + offset, SAFE_BUFFER_SIZE(sizeof(log_message), offset),
278 " Average hold time: %llu.%03llu ms\n", (unsigned long long)(avg_hold_time_ns / NS_PER_MS_INT),
279 (unsigned long long)((avg_hold_time_ns % NS_PER_MS_INT) / 1000));
280 offset +=
281 safe_snprintf(log_message + offset, SAFE_BUFFER_SIZE(sizeof(log_message), offset),
282 " Max hold time: %llu.%03llu ms\n", (unsigned long long)(stats->max_hold_time_ns / NS_PER_MS_INT),
283 (unsigned long long)((stats->max_hold_time_ns % NS_PER_MS_INT) / 1000));
284 offset +=
285 safe_snprintf(log_message + offset, SAFE_BUFFER_SIZE(sizeof(log_message), offset),
286 " Min hold time: %llu.%03llu ms\n", (unsigned long long)(stats->min_hold_time_ns / NS_PER_MS_INT),
287 (unsigned long long)((stats->min_hold_time_ns % NS_PER_MS_INT) / 1000));
288 char first_acq_str[32];
289 format_duration_ns((double)stats->first_acquisition_ns, first_acq_str, sizeof(first_acq_str));
290 offset += safe_snprintf(log_message + offset, SAFE_BUFFER_SIZE(sizeof(log_message), offset),
291 " First acquisition: %s\n", first_acq_str);
292
293 char last_acq_str[32];
294 format_duration_ns((double)stats->last_acquisition_ns, last_acq_str, sizeof(last_acq_str));
295 offset += safe_snprintf(log_message + offset, SAFE_BUFFER_SIZE(sizeof(log_message), offset), " Last acquisition: %s",
296 last_acq_str);
297
298 log_info("%s", log_message);
299}
300
306static void cleanup_usage_stats_callback(lock_usage_stats_t *stats, void *user_data) {
307 UNUSED(user_data);
308 SAFE_FREE(stats);
309}
310
316void print_orphaned_release_callback(lock_record_t *record, void *user_data) {
317 uint32_t *count = (uint32_t *)user_data;
318 (*count)++;
319
320 // Print lock information
321 const char *lock_type_str;
322 switch (record->lock_type) {
323 case LOCK_TYPE_MUTEX:
324 lock_type_str = "MUTEX";
325 break;
326 case LOCK_TYPE_RWLOCK_READ:
327 lock_type_str = "RWLOCK_READ";
328 break;
329 case LOCK_TYPE_RWLOCK_WRITE:
330 lock_type_str = "RWLOCK_WRITE";
331 break;
332 default:
333 lock_type_str = "UNKNOWN";
334 break;
335 }
336
337 log_info("Orphaned Release #%u: %s at %p", *count, lock_type_str, record->lock_address);
338 log_info(" Thread ID: %llu", (unsigned long long)record->thread_id);
339 log_info(" Released: %s:%d in %s()", extract_project_relative_path(record->file_name), record->line_number,
340 record->function_name);
341 char release_time_str[32];
342 format_duration_ns((double)record->acquisition_time_ns, release_time_str, sizeof(release_time_str));
343 log_info(" Released at: %s (nanosecond %llu)", release_time_str, (unsigned long long)record->acquisition_time_ns);
344
345 // Print backtrace for the orphaned release
346 if (record->backtrace_size > 0) {
347 char **symbols = platform_backtrace_symbols(record->backtrace_buffer, record->backtrace_size);
348 platform_print_backtrace_symbols(" Release call stack", symbols, record->backtrace_size, 0, 0, NULL);
349 if (symbols) {
351 }
352 } else {
353 log_info(" Release call stack: <capture failed>");
354 }
355}
356
357// ============================================================================
358// Debug Thread Functions
359// ============================================================================
360
370// Structure to hold info about a long-held lock (for deferred logging)
371typedef struct {
372 char duration_str[32];
373 const char *lock_type_str;
374 void *lock_address;
375 char file_name[BUFFER_SIZE_SMALL];
376 int line_number;
377 char function_name[128];
378 uint64_t thread_id;
379 void *backtrace_buffer[MAX_BACKTRACE_FRAMES]; // copied acquisition backtrace
380 int backtrace_size;
381} long_held_lock_info_t;
382
383#define MAX_LONG_HELD_LOCKS 32
384
385static void check_long_held_locks(void) {
386 if (!atomic_load(&g_lock_debug_manager.initialized)) {
387 return;
388 }
389 LOCK_TRACE("acquiring lock_records_lock (read)");
390
391 // Collect long-held lock info while holding the read lock
392 // We MUST NOT call log functions while holding lock_records_lock because:
393 // - log functions may call mutex_lock() for rotation
394 // - mutex_lock() macro calls debug_process_tracked_lock()
395 // - debug_process_tracked_lock() tries to acquire lock_records_lock as WRITE
396 // - DEADLOCK: we already hold it as READ, can't upgrade to WRITE
397 long_held_lock_info_t long_held_locks[MAX_LONG_HELD_LOCKS];
398 int num_long_held = 0;
399
400 // Acquire read lock for lock_records
401 rwlock_rdlock_impl(&g_lock_debug_manager.lock_records_lock);
402
403 uint64_t current_time_ns = time_get_ns();
404
405 // Iterate through all currently held locks and collect info
406 lock_record_t *entry, *tmp;
407 HASH_ITER(hash_handle, g_lock_debug_manager.lock_records, entry, tmp) {
408 if (num_long_held >= MAX_LONG_HELD_LOCKS) {
409 break; // Limit how many we report
410 }
411
412 // Calculate how long the lock has been held in nanoseconds
413 uint64_t held_ns = time_elapsed_ns(entry->acquisition_time_ns, current_time_ns);
414
415 if (held_ns > 100 * NS_PER_MS_INT) {
416 long_held_lock_info_t *info = &long_held_locks[num_long_held];
417
418 // Get lock type string
419 switch (entry->lock_type) {
420 case LOCK_TYPE_MUTEX:
421 info->lock_type_str = "MUTEX";
422 break;
423 case LOCK_TYPE_RWLOCK_READ:
424 info->lock_type_str = "RWLOCK_READ";
425 break;
426 case LOCK_TYPE_RWLOCK_WRITE:
427 info->lock_type_str = "RWLOCK_WRITE";
428 break;
429 default:
430 info->lock_type_str = "UNKNOWN";
431 break;
432 }
433
434 // Copy all info we need for logging later
435 format_duration_ns((double)held_ns, info->duration_str, sizeof(info->duration_str));
436 info->lock_address = entry->lock_address;
437 SAFE_STRNCPY(info->file_name, entry->file_name, sizeof(info->file_name) - 1);
438 info->file_name[sizeof(info->file_name) - 1] = '\0';
439 info->line_number = entry->line_number;
440 SAFE_STRNCPY(info->function_name, entry->function_name, sizeof(info->function_name) - 1);
441 info->function_name[sizeof(info->function_name) - 1] = '\0';
442 info->thread_id = entry->thread_id;
443
444 // Copy acquisition backtrace for logging later
445 info->backtrace_size = entry->backtrace_size;
446 if (entry->backtrace_size > 0) {
447 memcpy(info->backtrace_buffer, entry->backtrace_buffer, (size_t)entry->backtrace_size * sizeof(void *));
448 }
449
450 num_long_held++;
451 }
452 }
453
454 // Release the lock BEFORE logging to prevent deadlock
455 rwlock_rdunlock_impl(&g_lock_debug_manager.lock_records_lock);
456 LOCK_TRACE("released lock_records_lock (read), found %d long-held locks", num_long_held);
457
458 // Now safe to log - we no longer hold lock_records_lock
459 for (int i = 0; i < num_long_held; i++) {
460 long_held_lock_info_t *info = &long_held_locks[i];
461 log_warn_every(LOG_RATE_FAST,
462 "Lock held for %s (threshold: 100ms) - %s at %p\n"
463 " Acquired: %s:%d in %s()\n"
464 " Thread ID: %llu",
465 info->duration_str, info->lock_type_str, info->lock_address,
466 extract_project_relative_path(info->file_name), info->line_number, info->function_name,
467 (unsigned long long)info->thread_id);
468 }
469
470 // Print per-lock acquisition backtraces for each long-held lock
471 for (int i = 0; i < num_long_held; i++) {
472 long_held_lock_info_t *info = &long_held_locks[i];
473 if (info->backtrace_size > 0) {
474 char **symbols = platform_backtrace_symbols(info->backtrace_buffer, info->backtrace_size);
475 if (symbols) {
476 platform_print_backtrace_symbols("Lock acquisition call stack", symbols, info->backtrace_size, 1, 0, NULL);
478 }
479 }
480 }
481}
482
488static void *debug_thread_func(void *arg) {
489 UNUSED(arg);
490
491 log_debug("Lock debug thread started (use SIGUSR1 to print lock state)");
492 LOCK_TRACE("debug thread loop starting");
493
494 while (atomic_load(&g_lock_debug_manager.debug_thread_running)) {
495 // Check for locks held > 100ms and log warnings
496 check_long_held_locks();
497
498 // Allow external trigger via flag (non-blocking, set by SIGUSR1 handler)
499 if (atomic_exchange(&g_lock_debug_manager.should_print_locks, false)) {
501 }
502
503 // Do not read from stdin. The keyboard thread is the sole reader.
504 // Use SIGUSR1 to trigger lock state printing: kill -USR1 <pid>
505#ifdef _WIN32
507#else
509#endif
510
512 }
513
514 // Thread exiting
515 return NULL;
516}
517
518// ============================================================================
519// Public API Implementation
520// ============================================================================
521
522int lock_debug_init(void) {
523 log_debug("Starting lock debug system initialization...");
524
525 if (atomic_load(&g_lock_debug_manager.initialized)) {
526 log_info("Lock debug system already initialized");
527 return 0; // Already initialized
528 }
529
530 // Set initialization flag to prevent tracking during init
531 atomic_store(&g_initializing, true);
532
533 // Initialize uthash hash tables to NULL (required)
534 g_lock_debug_manager.lock_records = NULL;
535 g_lock_debug_manager.usage_stats = NULL;
536 g_lock_debug_manager.orphaned_releases = NULL;
537
538 // Initialize rwlocks for thread safety (uthash requires external locking)
539 if (rwlock_init(&g_lock_debug_manager.lock_records_lock) != 0) {
540 atomic_store(&g_initializing, false);
541 SET_ERRNO(ERROR_THREAD, "Failed to initialize lock_records rwlock");
542 return -1;
543 }
544
545 if (rwlock_init(&g_lock_debug_manager.usage_stats_lock) != 0) {
546 rwlock_destroy(&g_lock_debug_manager.lock_records_lock);
547 atomic_store(&g_initializing, false);
548 SET_ERRNO(ERROR_THREAD, "Failed to initialize usage_stats rwlock");
549 return -1;
550 }
551
552 if (rwlock_init(&g_lock_debug_manager.orphaned_releases_lock) != 0) {
553 rwlock_destroy(&g_lock_debug_manager.lock_records_lock);
554 rwlock_destroy(&g_lock_debug_manager.usage_stats_lock);
555 atomic_store(&g_initializing, false);
556 SET_ERRNO(ERROR_THREAD, "Failed to initialize orphaned_releases rwlock");
557 return -1;
558 }
559
560 // Initialize atomic variables
561 atomic_store(&g_lock_debug_manager.total_locks_acquired, 0);
562 atomic_store(&g_lock_debug_manager.total_locks_released, 0);
563 atomic_store(&g_lock_debug_manager.current_locks_held, 0);
564 atomic_store(&g_lock_debug_manager.debug_thread_running, false);
565 atomic_store(&g_lock_debug_manager.debug_thread_created, false);
566 atomic_store(&g_lock_debug_manager.should_print_locks, false);
567
568 // Initialize thread handle to invalid value
569#ifdef _WIN32
570 g_lock_debug_manager.debug_thread = NULL;
571#else
572 // On POSIX, pthread_t doesn't have a standard "invalid" value
573 // but we'll rely on the debug_thread_running flag
574#endif
575
576 // Clear initialization flag FIRST, then mark as initialized
577 // This prevents race condition where initialized=true but initializing=true
578 atomic_store(&g_initializing, false);
579 atomic_store(&g_lock_debug_manager.initialized, true);
580
581 // Note: lock_debug_destroy() will be called during normal shutdown sequence
582 // and lock_debug_cleanup_thread() will be called as one of the last things before exit
583
584 // log_info("Lock debug system initialized with uthash");
585 return 0;
586}
587
588int lock_debug_start_thread(void) {
589 if (!atomic_load(&g_lock_debug_manager.initialized)) {
590 return -1;
591 }
592
593 if (atomic_load(&g_lock_debug_manager.debug_thread_running)) {
594 return 0; // Already running
595 }
596
597 atomic_store(&g_lock_debug_manager.debug_thread_running, true);
598
599 int thread_result = asciichat_thread_create(&g_lock_debug_manager.debug_thread, debug_thread_func, NULL);
600
601 if (thread_result != 0) {
602 SET_ERRNO(ERROR_THREAD, "Failed to create lock debug thread: %d", thread_result);
603 atomic_store(&g_lock_debug_manager.debug_thread_running, false);
604 atomic_store(&g_lock_debug_manager.debug_thread_created, false);
605 return -1;
606 }
607
608 // Thread was successfully created
609 atomic_store(&g_lock_debug_manager.debug_thread_created, true);
610 return 0;
611}
612
613void lock_debug_trigger_print(void) {
614 if (atomic_load(&g_lock_debug_manager.initialized)) {
615 atomic_store(&g_lock_debug_manager.should_print_locks, true);
616 }
617}
618
619void lock_debug_destroy(void) {
620 // Use atomic exchange to ensure cleanup only runs once
621 // This prevents double-cleanup from both atexit() and manual calls
622 bool was_initialized = atomic_exchange(&g_lock_debug_manager.initialized, false);
623 if (!was_initialized) {
624 return;
625 }
626
627 // Signal debug thread to stop but don't join it yet
628 // Thread joining will happen later in lock_debug_cleanup_thread()
629 if (atomic_load(&g_lock_debug_manager.debug_thread_running)) {
630 atomic_store(&g_lock_debug_manager.debug_thread_running, false);
631 }
632
633 // Clean up all remaining lock records
634 rwlock_wrlock_impl(&g_lock_debug_manager.lock_records_lock);
635 // Free all lock records using HASH_ITER
636 uint32_t lock_records_cleaned = 0;
637 lock_record_t *entry, *tmp;
638 HASH_ITER(hash_handle, g_lock_debug_manager.lock_records, entry, tmp) {
639 HASH_DELETE(hash_handle, g_lock_debug_manager.lock_records, entry);
640 cleanup_lock_record_callback(entry, &lock_records_cleaned);
641 }
642 if (lock_records_cleaned > 0) {
643 log_info("Cleaned up %u lock records", lock_records_cleaned);
644 }
645
646 rwlock_wrunlock_impl(&g_lock_debug_manager.lock_records_lock);
647 rwlock_destroy(&g_lock_debug_manager.lock_records_lock);
648 g_lock_debug_manager.lock_records = NULL;
649
650 // Clean up usage statistics
651 rwlock_wrlock_impl(&g_lock_debug_manager.usage_stats_lock);
652 // Free all usage statistics using HASH_ITER
653 lock_usage_stats_t *stats_entry, *stats_tmp;
654 HASH_ITER(hash_handle, g_lock_debug_manager.usage_stats, stats_entry, stats_tmp) {
655 HASH_DELETE(hash_handle, g_lock_debug_manager.usage_stats, stats_entry);
656 cleanup_usage_stats_callback(stats_entry, NULL);
657 }
658
659 rwlock_wrunlock_impl(&g_lock_debug_manager.usage_stats_lock);
660 rwlock_destroy(&g_lock_debug_manager.usage_stats_lock);
661 g_lock_debug_manager.usage_stats = NULL;
662
663 // Clean up orphaned releases
664 rwlock_wrlock_impl(&g_lock_debug_manager.orphaned_releases_lock);
665
666 // Free all orphaned release records using HASH_ITER
667 uint32_t orphaned_releases_cleaned = 0;
668 lock_record_t *orphan_entry, *orphan_tmp;
669 HASH_ITER(hash_handle, g_lock_debug_manager.orphaned_releases, orphan_entry, orphan_tmp) {
670 HASH_DELETE(hash_handle, g_lock_debug_manager.orphaned_releases, orphan_entry);
671 cleanup_lock_record_callback(orphan_entry, &orphaned_releases_cleaned);
672 }
673 if (orphaned_releases_cleaned > 0) {
674 log_info("Cleaned up %u orphaned release records", orphaned_releases_cleaned);
675 }
676
677 rwlock_wrunlock_impl(&g_lock_debug_manager.orphaned_releases_lock);
678 rwlock_destroy(&g_lock_debug_manager.orphaned_releases_lock);
679 g_lock_debug_manager.orphaned_releases = NULL;
680
681 // initialized flag already set to false at the beginning via atomic_exchange
682 log_debug("Lock debug system cleaned up");
683}
684
685void lock_debug_cleanup_thread(void) {
686 // Check if thread is/was running and join it
687 if (atomic_load(&g_lock_debug_manager.debug_thread_running)) {
688 atomic_store(&g_lock_debug_manager.debug_thread_running, false);
689 }
690
691#ifdef _WIN32
692 // On Windows, check if thread handle is valid before joining
693 if (g_lock_debug_manager.debug_thread != NULL) {
694 int join_result = asciichat_thread_join(&g_lock_debug_manager.debug_thread, NULL);
695 if (join_result == 0) {
696 // Thread handle is now NULL due to cleanup in asciichat_thread_join
697 } else {
698 // Force cleanup if join failed
699 g_lock_debug_manager.debug_thread = NULL;
700 }
701 }
702#else
703 // On POSIX, only attempt join if thread was actually created
704 // Use the debug_thread_created flag to reliably track if the thread exists
705 if (atomic_load(&g_lock_debug_manager.debug_thread_created)) {
706 asciichat_thread_join(&g_lock_debug_manager.debug_thread, NULL);
707 // Clear the flag after joining
708 atomic_store(&g_lock_debug_manager.debug_thread_created, false);
709 }
710
711 // Terminal restore no longer needed - lock debug thread no longer touches stdin
712#endif
713}
714
715// ============================================================================
716// Common Helper Functions
717// ============================================================================
718
726static bool debug_should_skip_lock_tracking(void *lock_ptr, const char *file_name, const char *function_name) {
727 if (!lock_ptr || !file_name || !function_name) {
728 return true;
729 }
730
731 // Skip tracking if system is not fully initialized or during initialization
732 bool initialized = atomic_load(&g_lock_debug_manager.initialized);
733 bool initializing = atomic_load(&g_initializing);
734
735 if (!initialized || initializing) {
736 return true;
737 }
738
739 // Filter out ALL functions that our lock debug system uses internally
740 // to prevent infinite recursion and deadlock
741 // Note: uthash uses macros (HASH_FIND_INT, etc.) so there are no function names to filter
742 if (strstr(function_name, "log_") != NULL || strstr(function_name, "platform_") != NULL ||
743 strstr(function_name, "create_lock_record") != NULL || strstr(function_name, "update_usage_stats") != NULL ||
744 strstr(function_name, "print_") != NULL || strstr(function_name, "debug_") != NULL ||
745 strstr(function_name, "lock_debug") != NULL || strstr(file_name, "symbols.c") != NULL ||
746 strstr(function_name, "ascii_thread") != NULL ||
747 strstr(function_name, "maybe_rotate_log") != NULL || // Logging mutex - prevent deadlock during backtrace
748 strstr(function_name, "rotate_log") != NULL) { // Also filter other rotation functions
749 return true;
750 }
751
752 return false;
753}
754
759static uint32_t debug_decrement_lock_counter(void) {
760 // Decrement using atomic fetch_sub which avoids type size issues
761 uint_fast32_t current = atomic_load(&g_lock_debug_manager.current_locks_held);
762 if (current > 0) {
763 uint_fast32_t prev = atomic_fetch_sub(&g_lock_debug_manager.current_locks_held, 1);
764 // If prev was already 0, we underflowed - add it back
765 if (prev == 0) {
766 atomic_fetch_add(&g_lock_debug_manager.current_locks_held, 1);
767 return 0;
768 }
769 return (uint32_t)(prev - 1);
770 }
771 return 0;
772}
773
783static bool debug_create_and_insert_lock_record(void *lock_address, lock_type_t lock_type, const char *file_name,
784 int line_number, const char *function_name) {
785#ifndef DEBUG_LOCKS
786 UNUSED(lock_address);
787 UNUSED(lock_type);
788 UNUSED(file_name);
789 UNUSED(line_number);
790 UNUSED(function_name);
791#endif
792
793 lock_record_t *record = create_lock_record(lock_address, lock_type, file_name, line_number, function_name);
794 if (record) {
795 // Acquire write lock to make the entire operation atomic
796 LOCK_TRACE("acquiring lock_records_lock (write) for %s:%d %s", file_name, line_number, function_name);
797 rwlock_wrlock_impl(&g_lock_debug_manager.lock_records_lock);
798 LOCK_TRACE("acquired lock_records_lock (write)");
799
800 // Double-check cache is still initialized after acquiring lock
801 if (!atomic_load(&g_lock_debug_manager.initialized)) {
802 rwlock_wrunlock_impl(&g_lock_debug_manager.lock_records_lock);
803 free_lock_record(record);
804 return false;
805 }
806
807 // Check if entry already exists
808 lock_record_t *existing = NULL;
809 HASH_FIND(hash_handle, g_lock_debug_manager.lock_records, &record->key, sizeof(record->key), existing);
810
811 if (existing) {
812 // Entry exists - this shouldn't happen, but handle it gracefully
813 rwlock_wrunlock_impl(&g_lock_debug_manager.lock_records_lock);
814 free_lock_record(record);
815 return false;
816 }
817
818 // Add to hash table
819 HASH_ADD(hash_handle, g_lock_debug_manager.lock_records, key, sizeof(record->key), record);
820
821 // Release lock
822 rwlock_wrunlock_impl(&g_lock_debug_manager.lock_records_lock);
823 LOCK_TRACE("released lock_records_lock (write) - record added");
824
825 atomic_fetch_add(&g_lock_debug_manager.total_locks_acquired, 1);
826 atomic_fetch_add(&g_lock_debug_manager.current_locks_held, 1);
827 return true;
828 }
829 // Record allocation failed
830 LOCK_TRACE("record allocation failed");
831 return false;
832}
833
844static bool debug_process_tracked_unlock(void *lock_ptr, uint32_t key, const char *lock_type_str, const char *file_name,
845 int line_number, const char *function_name) {
846#ifndef DEBUG_LOCKS
847 UNUSED(lock_ptr);
848 UNUSED(lock_type_str);
849 UNUSED(file_name);
850 UNUSED(line_number);
851 UNUSED(function_name);
852#endif
853
854 // Variables to hold info for deferred logging (MUST log AFTER releasing lock_records_lock)
855 // Logging while holding lock_records_lock causes deadlock:
856 // - log_warn() may call mutex_lock() for rotation
857 // - mutex_lock macro calls debug_process_tracked_lock()
858 // - debug_process_tracked_lock() tries to acquire lock_records_lock (write)
859 // - DEADLOCK: we already hold it, rwlocks don't support recursive write locking
860 bool should_log_warning = false;
861 char deferred_duration_str[32] = {0};
862 char deferred_file_name[BUFFER_SIZE_SMALL] = {0};
863 int deferred_line_number = 0;
864 char deferred_function_name[128] = {0};
865 void *deferred_lock_ptr = NULL;
866 const char *deferred_lock_type_str = NULL;
867 char **deferred_backtrace_symbols = NULL;
868 int deferred_backtrace_size = 0;
869
870 // Acquire write lock for removal
871 LOCK_TRACE("acquiring lock_records_lock (write) for unlock %s %s:%d", lock_type_str, file_name, line_number);
872 rwlock_wrlock_impl(&g_lock_debug_manager.lock_records_lock);
873 LOCK_TRACE("acquired lock_records_lock (write) for unlock");
874
875 lock_record_t *record = NULL;
876 HASH_FIND(hash_handle, g_lock_debug_manager.lock_records, &key, sizeof(key), record);
877 if (record) {
878 // Calculate lock hold time BEFORE removing record
879 uint64_t current_time_ns = time_get_ns();
880 uint64_t held_ns = time_elapsed_ns(record->acquisition_time_ns, current_time_ns);
881 uint64_t held_ms = time_ns_to_ms(held_ns);
882
883 // Check if lock was held too long - collect info for deferred logging
884 if (held_ms > LOCK_HOLD_TIME_WARNING_MS) {
885 should_log_warning = true;
886 format_duration_ms((double)held_ms, deferred_duration_str, sizeof(deferred_duration_str));
887 SAFE_STRNCPY(deferred_file_name, file_name, sizeof(deferred_file_name) - 1);
888 deferred_line_number = line_number;
889 SAFE_STRNCPY(deferred_function_name, function_name, sizeof(deferred_function_name) - 1);
890 deferred_lock_ptr = lock_ptr;
891 deferred_lock_type_str = lock_type_str;
892
893 // Copy backtrace symbols if available (we need to copy, not just reference)
894 if (record->backtrace_size > 0 && record->backtrace_symbols) {
895 deferred_backtrace_size = record->backtrace_size;
896 deferred_backtrace_symbols = record->backtrace_symbols;
897 record->backtrace_symbols = NULL; // Transfer ownership to avoid double-free
898 }
899 }
900
901 HASH_DELETE(hash_handle, g_lock_debug_manager.lock_records, record);
902 rwlock_wrunlock_impl(&g_lock_debug_manager.lock_records_lock);
903 LOCK_TRACE("released lock_records_lock (write) - record removed");
904
905 free_lock_record(record);
906 atomic_fetch_add(&g_lock_debug_manager.total_locks_released, 1);
907 debug_decrement_lock_counter();
908
909 // NOW safe to log - we no longer hold lock_records_lock
910 if (should_log_warning) {
911 log_warn("Lock held for %s (threshold: %d ms) at %s:%d in %s()\n"
912 " Lock type: %s, address: %p",
913 deferred_duration_str, LOCK_HOLD_TIME_WARNING_MS, extract_project_relative_path(deferred_file_name),
914 deferred_line_number, deferred_function_name, deferred_lock_type_str, deferred_lock_ptr);
915
916 // Print backtrace from when lock was acquired
917 if (deferred_backtrace_size > 0 && deferred_backtrace_symbols) {
918 platform_print_backtrace_symbols("Backtrace from lock acquisition", deferred_backtrace_symbols,
919 deferred_backtrace_size, 0, 10, NULL);
920 free(deferred_backtrace_symbols); // Free the transferred ownership
921 } else {
922 // No backtrace available, print current backtrace
923 log_warn("No backtrace available. Current backtrace:");
924 platform_print_backtrace(2); // Skip 2 frames (this function and debug_process_tracked_unlock)
925 }
926 }
927
928 return true;
929 }
930
931 rwlock_wrunlock_impl(&g_lock_debug_manager.lock_records_lock);
932 LOCK_TRACE("released lock_records_lock (write) - no record found for %s", lock_type_str);
933 return false;
934}
935
945static void debug_process_untracked_unlock(void *lock_ptr, uint32_t key, const char *lock_type_str,
946 const char *file_name, int line_number, const char *function_name) {
947 atomic_fetch_add(&g_lock_debug_manager.total_locks_released, 1);
948 uint32_t current_held = atomic_load(&g_lock_debug_manager.current_locks_held);
949 if (current_held > 0) {
950 debug_decrement_lock_counter();
951 } else {
952 // FIXME: this SHOULD be an error. asciichat_errno is broken somehow. it's too noisy for now.
953 // SET_ERRNO(ERROR_INVALID_STATE, "Attempting to release %s lock when no locks held!", lock_type_str);
954 }
955#ifdef DEBUG_LOCKS
956 log_warn_every(10 * NS_PER_SEC_INT, "Attempting to release %s lock when no locks held!", lock_type_str);
957 // SET_ERRNO(ERROR_INVALID_STATE,
958 // "%s UNTRACKED RELEASED: %p (key=%u) at %s:%d in %s() - total=%llu, held=%u (lock was tracked but record "
959 // "was lost)",
960 // lock_type_str, lock_ptr, key, extract_project_relative_path(file_name), line_number, function_name,
961 // (unsigned long long)released, held);
962#endif
963
964 // Create an orphaned release record to track this problematic unlock
965 lock_record_t *orphan_record = SAFE_CALLOC(1, sizeof(lock_record_t), lock_record_t *);
966 if (orphan_record) {
967 orphan_record->key = key;
968 orphan_record->lock_address = lock_ptr;
969 if (strcmp(lock_type_str, "MUTEX") == 0) {
970 orphan_record->lock_type = LOCK_TYPE_MUTEX;
971 } else if (strcmp(lock_type_str, "READ") == 0) {
972 orphan_record->lock_type = LOCK_TYPE_RWLOCK_READ;
973 } else if (strcmp(lock_type_str, "WRITE") == 0) {
974 orphan_record->lock_type = LOCK_TYPE_RWLOCK_WRITE;
975 }
976 orphan_record->thread_id = asciichat_thread_current_id();
977 orphan_record->file_name = file_name;
978 orphan_record->line_number = line_number;
979 orphan_record->function_name = function_name;
980 orphan_record->acquisition_time_ns = time_get_ns(); // Use release time
981
982 // Capture backtrace for this orphaned release
983#ifdef _WIN32
984 orphan_record->backtrace_size =
985 CaptureStackBackTrace(1, MAX_BACKTRACE_FRAMES, orphan_record->backtrace_buffer, NULL);
986#else
987 orphan_record->backtrace_size = platform_backtrace(orphan_record->backtrace_buffer, MAX_BACKTRACE_FRAMES);
988#endif
989
990 if (orphan_record->backtrace_size > 0) {
991 orphan_record->backtrace_symbols =
992 platform_backtrace_symbols(orphan_record->backtrace_buffer, orphan_record->backtrace_size);
993 }
994
995 // Store in orphaned releases hash table for later analysis
996 rwlock_wrlock_impl(&g_lock_debug_manager.orphaned_releases_lock);
997 HASH_ADD(hash_handle, g_lock_debug_manager.orphaned_releases, key, sizeof(orphan_record->key), orphan_record);
998 rwlock_wrunlock_impl(&g_lock_debug_manager.orphaned_releases_lock);
999 }
1000}
1001
1002// ============================================================================
1003// Tracked Lock Functions Implementation
1004// ============================================================================
1005
1006int debug_mutex_lock(mutex_t *mutex, const char *file_name, int line_number, const char *function_name) {
1007 LOCK_OP_TRACE("LOCK", "MUTEX", file_name, line_number, function_name);
1008 if (debug_should_skip_lock_tracking(mutex, file_name, function_name)) {
1009 return mutex_lock_impl(mutex);
1010 }
1011
1012 // Acquire the actual lock first (call implementation to avoid recursion)
1013 int result = mutex_lock_impl(mutex);
1014 if (result != 0) {
1015 return result;
1016 }
1017
1018 // Create and add lock record
1019 debug_create_and_insert_lock_record(mutex, LOCK_TYPE_MUTEX, file_name, line_number, function_name);
1020
1021 return 0;
1022}
1023
1024int debug_mutex_trylock(mutex_t *mutex, const char *file_name, int line_number, const char *function_name) {
1025 LOCK_OP_TRACE("TRYLOCK", "MUTEX", file_name, line_number, function_name);
1026 if (debug_should_skip_lock_tracking(mutex, file_name, function_name)) {
1027 return mutex_trylock_impl(mutex);
1028 }
1029
1030 // Try to acquire the lock (call implementation to avoid recursion)
1031 int result = mutex_trylock_impl(mutex);
1032 if (result != 0) {
1033 // Lock not acquired - no tracking needed
1034 return result;
1035 }
1036
1037 // Lock acquired - create and add lock record
1038 debug_create_and_insert_lock_record(mutex, LOCK_TYPE_MUTEX, file_name, line_number, function_name);
1039
1040 return 0;
1041}
1042
1043int debug_mutex_unlock(mutex_t *mutex, const char *file_name, int line_number, const char *function_name) {
1044 LOCK_OP_TRACE("UNLOCK", "MUTEX", file_name, line_number, function_name);
1045 if (debug_should_skip_lock_tracking(mutex, file_name, function_name)) {
1046 return mutex_unlock_impl(mutex);
1047 }
1048
1049 // Look for mutex lock record specifically
1050 uint32_t key = lock_record_key(mutex, LOCK_TYPE_MUTEX);
1051 if (!debug_process_tracked_unlock(mutex, key, "MUTEX", file_name, line_number, function_name)) {
1052 // No record found - this is a genuine tracking error
1053 debug_process_untracked_unlock(mutex, key, "MUTEX", file_name, line_number, function_name);
1054 }
1055
1056 // Unlock the actual mutex (call implementation to avoid recursion)
1057 return mutex_unlock_impl(mutex);
1058}
1059
1060int debug_rwlock_rdlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name) {
1061 LOCK_OP_TRACE("LOCK", "RWLOCK_RD", file_name, line_number, function_name);
1062 if (debug_should_skip_lock_tracking(rwlock, file_name, function_name)) {
1063 return rwlock_rdlock_impl(rwlock);
1064 }
1065
1066 // Acquire the actual lock first (call implementation to avoid recursion)
1067 int result = rwlock_rdlock_impl(rwlock);
1068 if (result != 0) {
1069 return result;
1070 }
1071
1072 // Create and add lock record
1073 debug_create_and_insert_lock_record(rwlock, LOCK_TYPE_RWLOCK_READ, file_name, line_number, function_name);
1074
1075 return 0;
1076}
1077
1078int debug_rwlock_wrlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name) {
1079 LOCK_OP_TRACE("LOCK", "RWLOCK_WR", file_name, line_number, function_name);
1080 if (debug_should_skip_lock_tracking(rwlock, file_name, function_name)) {
1081 return rwlock_wrlock_impl(rwlock);
1082 }
1083
1084 // Acquire the actual lock first (call implementation to avoid recursion)
1085 int result = rwlock_wrlock_impl(rwlock);
1086 if (result != 0) {
1087 return result;
1088 }
1089
1090 // Create and add lock record
1091 debug_create_and_insert_lock_record(rwlock, LOCK_TYPE_RWLOCK_WRITE, file_name, line_number, function_name);
1092
1093 return 0;
1094}
1095
1096int debug_rwlock_rdunlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name) {
1097 LOCK_OP_TRACE("UNLOCK", "RWLOCK_RD", file_name, line_number, function_name);
1098 if (debug_should_skip_lock_tracking(rwlock, file_name, function_name)) {
1100 }
1101
1102 // Look for read lock record specifically
1103 uint32_t read_key = lock_record_key(rwlock, LOCK_TYPE_RWLOCK_READ);
1104
1105 if (!debug_process_tracked_unlock(rwlock, read_key, "READ", file_name, line_number, function_name)) {
1106 debug_process_untracked_unlock(rwlock, read_key, "READ", file_name, line_number, function_name);
1107 }
1108
1110}
1111
1112int debug_rwlock_wrunlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name) {
1113 LOCK_OP_TRACE("UNLOCK", "RWLOCK_WR", file_name, line_number, function_name);
1114 if (debug_should_skip_lock_tracking(rwlock, file_name, function_name)) {
1116 }
1117
1118 // Look for write lock record specifically
1119 uint32_t write_key = lock_record_key(rwlock, LOCK_TYPE_RWLOCK_WRITE);
1120 if (!debug_process_tracked_unlock(rwlock, write_key, "WRITE", file_name, line_number, function_name)) {
1121 debug_process_untracked_unlock(rwlock, write_key, "WRITE", file_name, line_number, function_name);
1122 }
1123
1125}
1126
1127// Removed debug_rwlock_unlock - generic unlock is ambiguous and problematic
1128// Use debug_rwlock_rdunlock or debug_rwlock_wrunlock instead
1129
1130// ============================================================================
1131// Statistics Functions
1132// ============================================================================
1133
1134void lock_debug_get_stats(uint64_t *total_acquired, uint64_t *total_released, uint32_t *currently_held) {
1135 if (total_acquired) {
1136 *total_acquired = atomic_load(&g_lock_debug_manager.total_locks_acquired);
1137 }
1138 if (total_released) {
1139 *total_released = atomic_load(&g_lock_debug_manager.total_locks_released);
1140 }
1141 if (currently_held) {
1142 *currently_held = atomic_load(&g_lock_debug_manager.current_locks_held);
1143 }
1144}
1145
1146bool lock_debug_is_initialized(void) {
1147 bool initialized = atomic_load(&g_lock_debug_manager.initialized);
1148 bool initializing = atomic_load(&g_initializing);
1149 bool result = initialized && !initializing;
1150
1151 return result;
1152}
1153
1154void lock_debug_print_state(void) {
1155 if (!atomic_load(&g_lock_debug_manager.initialized)) {
1156 log_warn("Lock debug system not initialized.");
1157 return;
1158 }
1159
1160 // Acquire read lock for lock_records
1161 rwlock_rdlock_impl(&g_lock_debug_manager.lock_records_lock);
1162
1163 // Read counters atomically while holding the lock to ensure consistency with lock records
1164 uint64_t total_acquired = atomic_load(&g_lock_debug_manager.total_locks_acquired);
1165 uint64_t total_released = atomic_load(&g_lock_debug_manager.total_locks_released);
1166 uint32_t currently_held = atomic_load(&g_lock_debug_manager.current_locks_held);
1167
1168 // Build comprehensive log message with all information
1169 char log_buffer[32768]; // Large buffer for lock details
1170 int offset = 0;
1171
1172 // Collect lock information during iteration
1173 struct lock_collector {
1174 uint32_t count;
1175 char *buffer;
1176 int *offset;
1177 } lock_collector = {0, log_buffer, &offset};
1178
1179 lock_record_t *entry, *tmp;
1180 HASH_ITER(hash_handle, g_lock_debug_manager.lock_records, entry, tmp) {
1181 collect_lock_record_callback(entry, &lock_collector);
1182 }
1183 uint32_t active_locks = lock_collector.count;
1184
1185 rwlock_rdunlock_impl(&g_lock_debug_manager.lock_records_lock);
1186
1187 // Header
1188 offset +=
1189 safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset), "=== LOCK DEBUG STATE ===\n");
1190
1191 // Historical Statistics
1192 offset +=
1193 safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset), "Historical Statistics:\n");
1194 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1195 " Total locks acquired: %llu\n", (unsigned long long)total_acquired);
1196 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1197 " Total locks released: %llu\n", (unsigned long long)total_released);
1198 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset), " Currently held: %u\n",
1199 currently_held);
1200
1201 // Check for underflow before subtraction to avoid UB
1202 if (total_acquired >= total_released) {
1203 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1204 " Net locks (acquired - released): %lld\n", (long long)(total_acquired - total_released));
1205 } else {
1206 // This shouldn't happen - means more releases than acquires
1207 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1208 " *** ERROR: More releases (%llu) than acquires (%llu)! Difference: -%lld ***\n",
1209 (unsigned long long)total_released, (unsigned long long)total_acquired,
1210 (long long)(total_released - total_acquired));
1211 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1212 " *** This indicates lock tracking was not enabled for some acquires ***\n");
1213 }
1214
1215 // Currently Active Locks
1216 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1217 "\n=== Currently Active Locks ===\n");
1218 if (active_locks == 0) {
1219 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1220 " No locks currently held.\n");
1221 // Check for consistency issues
1222 if (currently_held > 0) {
1223 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1224 " *** CONSISTENCY WARNING: Counter shows %u held locks but no records found! ***\n",
1225 currently_held);
1226 offset +=
1227 safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1228 " *** This may indicate a crash during lock acquisition or hash table corruption. ***\n");
1229
1230 // Additional debug: Check hash table statistics
1231 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1232 " *** DEBUG: Hash table stats for lock_records: ***\n");
1233 rwlock_rdlock_impl(&g_lock_debug_manager.lock_records_lock);
1234 size_t count = HASH_CNT(hash_handle, g_lock_debug_manager.lock_records);
1235 rwlock_rdunlock_impl(&g_lock_debug_manager.lock_records_lock);
1236 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1237 " *** Hash table size: %zu ***\n", count);
1238 if (count > 0) {
1239 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1240 " *** Hash table has entries but iteration didn't find them! ***\n");
1241 }
1242 }
1243 } else {
1244 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset), " Active locks: %u\n",
1245 active_locks);
1246 // Verify consistency the other way
1247 if (active_locks != currently_held) {
1248 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1249 " *** CONSISTENCY WARNING: Found %u active locks but counter shows %u! ***\n",
1250 active_locks, currently_held);
1251 }
1252
1253 // The lock details are already in the buffer from collect_lock_record_callback
1254 // No need to add them again here
1255 }
1256
1257 // Print usage statistics by code location
1258 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1259 "\n=== Lock Usage Statistics by Code Location ===\n");
1260 rwlock_rdlock_impl(&g_lock_debug_manager.usage_stats_lock);
1261
1262 uint32_t total_usage_locations = 0;
1263 lock_usage_stats_t *stats_entry, *stats_tmp;
1264 HASH_ITER(hash_handle, g_lock_debug_manager.usage_stats, stats_entry, stats_tmp) {
1265 print_usage_stats_callback(stats_entry, &total_usage_locations);
1266 }
1267
1268 rwlock_rdunlock_impl(&g_lock_debug_manager.usage_stats_lock);
1269
1270 if (total_usage_locations == 0) {
1271 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1272 " No lock usage statistics available.\n");
1273 } else {
1274 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1275 " Total code locations with lock usage: %u\n", total_usage_locations);
1276 }
1277
1278 // Print orphaned releases (unlocks without corresponding locks)
1279 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1280 "\n=== Orphaned Releases (unlocks without corresponding locks) ===\n");
1281 rwlock_rdlock_impl(&g_lock_debug_manager.orphaned_releases_lock);
1282
1283 uint32_t orphan_count = 0;
1284 lock_record_t *orphan_entry, *orphan_tmp;
1285 HASH_ITER(hash_handle, g_lock_debug_manager.orphaned_releases, orphan_entry, orphan_tmp) {
1286 orphan_count++;
1287
1288 // Get lock type string
1289 const char *lock_type_str;
1290 switch (orphan_entry->lock_type) {
1291 case LOCK_TYPE_MUTEX:
1292 lock_type_str = "MUTEX";
1293 break;
1294 case LOCK_TYPE_RWLOCK_READ:
1295 lock_type_str = "RWLOCK_READ";
1296 break;
1297 case LOCK_TYPE_RWLOCK_WRITE:
1298 lock_type_str = "RWLOCK_WRITE";
1299 break;
1300 default:
1301 lock_type_str = "UNKNOWN";
1302 break;
1303 }
1304
1305 offset +=
1306 safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1307 "Orphaned Release #%u: %s at %p\n", orphan_count, lock_type_str, orphan_entry->lock_address);
1308 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset), " Thread ID: %llu\n",
1309 (unsigned long long)orphan_entry->thread_id);
1310 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1311 " Released: %s:%d in %s()\n", extract_project_relative_path(orphan_entry->file_name),
1312 orphan_entry->line_number, orphan_entry->function_name);
1313 char release_time_str[32];
1314 format_duration_ns((double)orphan_entry->acquisition_time_ns, release_time_str, sizeof(release_time_str));
1315 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1316 " Released at: %s (nanosecond %llu)\n", release_time_str,
1317 (unsigned long long)orphan_entry->acquisition_time_ns);
1318
1319 // Print backtrace for orphaned release
1320 if (orphan_entry->backtrace_size > 0) {
1321 char **symbols = platform_backtrace_symbols(orphan_entry->backtrace_buffer, orphan_entry->backtrace_size);
1322 offset +=
1323 platform_format_backtrace_symbols(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1324 "Release call stack", symbols, orphan_entry->backtrace_size, 0, 0, NULL);
1325 if (symbols) {
1327 }
1328 } else {
1329 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1330 " Release call stack: <capture failed>\n");
1331 }
1332 }
1333
1334 rwlock_rdunlock_impl(&g_lock_debug_manager.orphaned_releases_lock);
1335
1336 if (orphan_count == 0) {
1337 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1338 " No orphaned releases found.\n");
1339 } else {
1340 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1341 " Total orphaned releases: %u\n", orphan_count);
1342 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1343 " *** WARNING: %u releases without corresponding locks detected! ***\n", orphan_count);
1344 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1345 " *** This indicates double unlocks or missing lock acquisitions! ***\n");
1346 }
1347
1348 // End marker
1349 offset += safe_snprintf(log_buffer + offset, SAFE_BUFFER_SIZE(sizeof(log_buffer), offset),
1350 "\n=== End Lock Debug State ===\n");
1351
1352 // Print all at once
1353 log_info("%s", log_buffer);
1354}
1355
1356#else // !DEBUG_LOCKS - Provide stub implementations when lock debugging is disabled
1357
1358#include <stdbool.h>
1359#include <stdint.h>
1360#include <ascii-chat/platform/mutex.h>
1361#include <ascii-chat/platform/rwlock.h>
1362
1363// Stub implementations - no-ops when DEBUG_LOCKS is not defined
1365 return 0;
1366}
1368 return 0;
1369}
1372void lock_debug_get_stats(uint64_t *total_acquired, uint64_t *total_released, uint32_t *currently_held) {
1373 if (total_acquired)
1374 *total_acquired = 0;
1375 if (total_released)
1376 *total_released = 0;
1377 if (currently_held)
1378 *currently_held = 0;
1379}
1381 return false;
1382}
1385
1386// Stub implementations for debug_* functions - just pass through to _impl versions
1387// These are needed because the macros in mutex.h/rwlock.h reference them even when
1388// lock_debug_is_initialized() returns false (linker still needs symbols to exist)
1389int debug_mutex_lock(mutex_t *mutex, const char *file_name, int line_number, const char *function_name) {
1390 (void)file_name;
1391 (void)line_number;
1392 (void)function_name;
1393 return mutex_lock_impl(mutex);
1394}
1395
1396int debug_mutex_trylock(mutex_t *mutex, const char *file_name, int line_number, const char *function_name) {
1397 (void)file_name;
1398 (void)line_number;
1399 (void)function_name;
1400 return mutex_trylock_impl(mutex);
1401}
1402
1403int debug_mutex_unlock(mutex_t *mutex, const char *file_name, int line_number, const char *function_name) {
1404 (void)file_name;
1405 (void)line_number;
1406 (void)function_name;
1407 return mutex_unlock_impl(mutex);
1408}
1409
1410int debug_rwlock_rdlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name) {
1411 (void)file_name;
1412 (void)line_number;
1413 (void)function_name;
1414 return rwlock_rdlock_impl(rwlock);
1415}
1416
1417int debug_rwlock_wrlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name) {
1418 (void)file_name;
1419 (void)line_number;
1420 (void)function_name;
1421 return rwlock_wrlock_impl(rwlock);
1422}
1423
1424int debug_rwlock_rdunlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name) {
1425 (void)file_name;
1426 (void)line_number;
1427 (void)function_name;
1429}
1430
1431int debug_rwlock_wrunlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name) {
1432 (void)file_name;
1433 (void)line_number;
1434 (void)function_name;
1436}
1437
1438#endif // DEBUG_LOCKS
int thread_id
int buffer_size
Size of circular buffer.
Definition grep.c:84
int lock_debug_init(void)
Definition lock.c:1364
void lock_debug_trigger_print(void)
Definition lock.c:1384
int debug_rwlock_rdlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name)
Definition lock.c:1410
int debug_rwlock_wrlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name)
Definition lock.c:1417
void lock_debug_cleanup_thread(void)
Definition lock.c:1371
void lock_debug_get_stats(uint64_t *total_acquired, uint64_t *total_released, uint32_t *currently_held)
Definition lock.c:1372
void lock_debug_destroy(void)
Definition lock.c:1370
int debug_mutex_unlock(mutex_t *mutex, const char *file_name, int line_number, const char *function_name)
Definition lock.c:1403
int debug_mutex_lock(mutex_t *mutex, const char *file_name, int line_number, const char *function_name)
Definition lock.c:1389
int lock_debug_start_thread(void)
Definition lock.c:1367
void lock_debug_print_state(void)
Definition lock.c:1383
int debug_rwlock_wrunlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name)
Definition lock.c:1431
bool lock_debug_is_initialized(void)
Definition lock.c:1380
int debug_mutex_trylock(mutex_t *mutex, const char *file_name, int line_number, const char *function_name)
Definition lock.c:1396
int debug_rwlock_rdunlock(rwlock_t *rwlock, const char *file_name, int line_number, const char *function_name)
Definition lock.c:1424
bool initialized
Definition mmap.c:38
const char * extract_project_relative_path(const char *file)
Definition path.c:410
void platform_sleep_ms(unsigned int ms)
int safe_snprintf(char *buffer, size_t buffer_size, const char *format,...)
Safe formatted string printing to buffer.
Definition system.c:456
void platform_print_backtrace_symbols(const char *label, char **symbols, int count, int skip_frames, int max_frames, backtrace_frame_filter_t filter)
Print pre-resolved backtrace symbols with colored terminal output and plain log file output.
Definition system.c:535
int mutex_lock_impl(mutex_t *mutex)
Definition threading.c:26
int rwlock_wrunlock_impl(rwlock_t *rwlock)
Definition threading.c:79
int asciichat_thread_create(asciichat_thread_t *thread, void *(*start_routine)(void *), void *arg)
Definition threading.c:42
int rwlock_wrlock_impl(rwlock_t *rwlock)
Definition threading.c:71
int mutex_trylock_impl(mutex_t *mutex)
Definition threading.c:31
int rwlock_rdunlock_impl(rwlock_t *rwlock)
Definition threading.c:75
int rwlock_init(rwlock_t *rwlock)
Definition threading.c:63
int asciichat_thread_join(asciichat_thread_t *thread, void **retval)
Definition threading.c:46
int rwlock_rdlock_impl(rwlock_t *rwlock)
Definition threading.c:67
uint64_t asciichat_thread_current_id(void)
Definition threading.c:84
int mutex_unlock_impl(mutex_t *mutex)
Definition threading.c:36
rwlock_t rwlock
Read-write lock for thread-safe access (uthash requires external locking)
Definition util/time.c:32
uint64_t time_get_ns(void)
Definition util/time.c:48
int format_duration_ns(double nanoseconds, char *buffer, size_t buffer_size)
Definition util/time.c:275
uint64_t time_elapsed_ns(uint64_t start_ns, uint64_t end_ns)
Definition util/time.c:90
int format_duration_ms(double milliseconds, char *buffer, size_t buffer_size)
Definition util/time.c:357
int platform_backtrace(void **buffer, int size)
Definition util.c:14
void platform_backtrace_symbols_destroy(char **symbols)
Definition util.c:26
void platform_print_backtrace(int skip_frames)
Definition util.c:42
char ** platform_backtrace_symbols(void *const *buffer, int size)
Definition util.c:20