ascii-chat 0.8.38
Real-time terminal-based video chat with ASCII art conversion
Loading...
Searching...
No Matches
stream.c
Go to the documentation of this file.
1
131#include <stdatomic.h>
132#include <stdio.h>
133#include <string.h>
134#include <time.h>
135#include <math.h>
136#include <float.h>
137
138#include "main.h"
139#include "stream.h"
140#include "client.h"
141#include <ascii-chat/common.h>
142#include <ascii-chat/util/endian.h>
143#include <ascii-chat/buffer_pool.h>
144#include <ascii-chat/network/packet_queue.h>
145#include <ascii-chat/ringbuffer.h>
146#include <ascii-chat/video/video_frame.h>
147#include <ascii-chat/video/image.h>
148#include <ascii-chat/video/ascii.h>
149#include <ascii-chat/video/color_filter.h>
150#include <ascii-chat/util/aspect_ratio.h>
151#include <ascii-chat/util/endian.h>
152#include <ascii-chat/util/time.h>
153#include <ascii-chat/util/image.h>
154
164static atomic_int g_previous_active_video_count = 0;
165
204typedef struct {
206 image_t *image;
208 uint32_t client_id;
212
220static int collect_video_sources(image_source_t *sources, int max_sources) {
221 int source_count = 0;
222
223 // Check for shutdown before acquiring locks to prevent lock corruption
224 if (atomic_load(&g_server_should_exit)) {
225 return 0;
226 }
227
228 // Collect client info snapshots WITHOUT holding rwlock
229 typedef struct {
230 uint32_t client_id;
231 bool is_active;
232 bool is_sending_video;
233 video_frame_buffer_t *video_buffer;
234 } client_snapshot_t;
235
236 client_snapshot_t client_snapshots[MAX_CLIENTS];
237 int snapshot_count = 0;
238
239 // NO LOCK: All fields are atomic or stable pointers
240 for (int i = 0; i < MAX_CLIENTS; i++) {
241 client_info_t *client = &g_client_manager.clients[i];
242
243 if (atomic_load(&client->client_id) == 0) {
244 continue; // Skip uninitialized clients
245 }
246
247 // Snapshot all needed client state (all atomic reads or stable pointers)
248 client_snapshots[snapshot_count].client_id = atomic_load(&client->client_id);
249 client_snapshots[snapshot_count].is_active = atomic_load(&client->active);
250 client_snapshots[snapshot_count].is_sending_video = atomic_load(&client->is_sending_video);
251 client_snapshots[snapshot_count].video_buffer = client->incoming_video_buffer; // Stable pointer
252 snapshot_count++;
253 }
254
255 // Process frames (expensive operations)
256 log_dev_every(5 * NS_PER_MS_INT, "collect_video_sources: Processing %d snapshots", snapshot_count);
257 for (int i = 0; i < snapshot_count && source_count < max_sources; i++) {
258 client_snapshot_t *snap = &client_snapshots[i];
259
260 if (!snap->is_active) {
261 log_dev_every(5 * NS_PER_MS_INT, "collect_video_sources: Skipping inactive client %u", snap->client_id);
262 continue;
263 }
264
265 log_dev_every(5 * NS_PER_MS_INT, "collect_video_sources: Client %u: is_sending_video=%d", snap->client_id,
266 snap->is_sending_video);
267
268 sources[source_count].client_id = snap->client_id;
269 sources[source_count].image = NULL; // Will be set if video is available
270 sources[source_count].has_video = false;
271
272 // Declare these outside the if block so they're accessible later
273 multi_source_frame_t current_frame = {0};
274 bool got_new_frame = false;
275
276 // Always try to get the last available video frame for consistent ASCII generation
277 // The double buffer ensures we always have the last valid frame
278 if (snap->is_sending_video && snap->video_buffer) {
279 // Get the latest frame (always available from double buffer)
280 const video_frame_t *frame = video_frame_get_latest(snap->video_buffer);
281
282 if (!frame) {
283 continue; // Skip to next snapshot
284 }
285
286 // Try to access frame fields ONE AT A TIME to pinpoint the hang
287 void *frame_data_ptr = frame->data;
288
289 size_t frame_size_val = frame->size;
290
291 // Compute hash of incoming frame to verify it's changing
292 uint32_t incoming_hash = 0;
293 if (frame_data_ptr && frame_size_val > 0) {
294 for (size_t i = 0; i < frame_size_val && i < 1000; i++) {
295 uint8_t byte = ((unsigned char *)frame_data_ptr)[i];
296 incoming_hash = (uint32_t)((uint64_t)incoming_hash * 31 + byte);
297 }
298 }
299
300 // DIAGNOSTIC: Track incoming frame changes from buffer
301 static uint32_t last_buffer_hash = 0;
302 if (incoming_hash != last_buffer_hash) {
303 log_info("BUFFER_FRAME CHANGE: Client %u got NEW frame from buffer: hash=0x%08x (prev=0x%08x) size=%zu",
304 snap->client_id, incoming_hash, last_buffer_hash, frame_size_val);
305 last_buffer_hash = incoming_hash;
306 } else {
307 log_dev_every(25000, "BUFFER_FRAME DUPLICATE: Client %u frame hash=0x%08x size=%zu (no change)",
308 snap->client_id, incoming_hash, frame_size_val);
309 }
310
311 // DETAILED BUFFER INSPECTION: Extract and log frame dimensions + first pixels
312 if (frame_data_ptr && frame_size_val >= 8) {
313 uint32_t width_net, height_net;
314 memcpy(&width_net, frame_data_ptr, sizeof(uint32_t));
315 memcpy(&height_net, (char *)frame_data_ptr + sizeof(uint32_t), sizeof(uint32_t));
316 uint32_t width = NET_TO_HOST_U32(width_net);
317 uint32_t height = NET_TO_HOST_U32(height_net);
318
319 // Extract first 3 RGB pixels to inspect actual pixel data
320 uint8_t *pixel_ptr = (uint8_t *)frame_data_ptr + 8;
321 uint32_t first_pixel_rgb = 0;
322 if (frame_size_val >= 11) {
323 first_pixel_rgb = ((uint32_t)pixel_ptr[0] << 16) | ((uint32_t)pixel_ptr[1] << 8) | (uint32_t)pixel_ptr[2];
324 }
325
326 log_info("BUFFER_INSPECT: Client %u dims=%ux%u pixel_data_size=%zu first_pixel_rgb=0x%06x data_hash=0x%08x",
327 snap->client_id, width, height, frame_size_val - 8, first_pixel_rgb, incoming_hash);
328 }
329
330 log_debug_every(5 * NS_PER_MS_INT, "Video mixer: client %u incoming frame hash=0x%08x size=%zu", snap->client_id,
331 incoming_hash, frame_size_val);
332
333 if (frame_data_ptr && frame_size_val > 0 && frame_size_val >= (sizeof(uint32_t) * 2 + 3)) {
334 // PARSE AND VALIDATE DIMENSIONS BEFORE COPYING
335 // Don't trust frame->size - calculate correct size from dimensions
336 uint32_t peek_width = NET_TO_HOST_U32(read_u32_unaligned(frame_data_ptr));
337 uint32_t peek_height = NET_TO_HOST_U32(read_u32_unaligned(frame_data_ptr + sizeof(uint32_t)));
338
339 // Reject obviously corrupted dimensions
340 if (peek_width == 0 || peek_height == 0 || peek_width > 4096 || peek_height > 2160) {
341 log_debug("Per-client %u: rejected dimensions %ux%u as corrupted", snap->client_id, peek_width, peek_height);
342 continue;
343 }
344
345 // Validate dimensions
346 if (image_validate_dimensions((size_t)peek_width, (size_t)peek_height) != ASCIICHAT_OK) {
347 continue;
348 }
349
350 // Calculate CORRECT frame size based on dimensions (don't trust frame->size)
351 size_t correct_frame_size = sizeof(uint32_t) * 2;
352 {
353 size_t rgb_size = 0;
354 if (image_calc_rgb_size((size_t)peek_width, (size_t)peek_height, &rgb_size) != ASCIICHAT_OK) {
355 log_debug("Per-client: rgb_size calc failed for %ux%u", peek_width, peek_height);
356 continue;
357 }
358 correct_frame_size += rgb_size;
359 }
360
361 log_debug_every(NS_PER_MS_INT, "Per-client: frame dimensions=%ux%u, frame_size=%zu, correct_size=%zu",
362 peek_width, peek_height, frame_size_val, correct_frame_size);
363
364 // Verify frame is at least large enough for the correct size
365 if (frame_size_val < correct_frame_size) {
366 log_debug("Per-client: frame too small: got %zu, need %zu", frame_size_val, correct_frame_size);
367 continue;
368 }
369
370 // We have frame data - copy ONLY the correct amount based on dimensions
371 // Use SAFE_MALLOC (not buffer pool - image_new_from_pool uses pool and causes overlap)
372 current_frame.data = SAFE_MALLOC(correct_frame_size, void *);
373
374 if (current_frame.data) {
375 memcpy(current_frame.data, frame->data, correct_frame_size);
376 current_frame.size = correct_frame_size;
377 current_frame.source_client_id = snap->client_id;
378 current_frame.timestamp = (uint32_t)(frame->capture_timestamp_ns / NS_PER_SEC_INT);
379 got_new_frame = true;
380 }
381 } else {
382 }
383 }
384
385 multi_source_frame_t *frame_to_use = got_new_frame ? &current_frame : NULL;
386
387 if (frame_to_use && frame_to_use->data && frame_to_use->size > sizeof(uint32_t) * 2) {
388 // Parse the image data
389 // Format: [width:4][height:4][rgb_data:w*h*3]
390 // Use unaligned read helpers - frame data may not be aligned
391 uint32_t img_width = NET_TO_HOST_U32(read_u32_unaligned(frame_to_use->data));
392 uint32_t img_height = NET_TO_HOST_U32(read_u32_unaligned(frame_to_use->data + sizeof(uint32_t)));
393
394 // Debug logging to understand the data
395 if (img_width == 0xBEBEBEBE || img_height == 0xBEBEBEBE) {
396 SET_ERRNO(ERROR_INVALID_STATE, "UNINITIALIZED MEMORY DETECTED! First 16 bytes of frame data:");
397 uint8_t *bytes = (uint8_t *)frame_to_use->data;
398 SET_ERRNO(ERROR_INVALID_STATE,
399 " %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X", bytes[0],
400 bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], bytes[8], bytes[9], bytes[10],
401 bytes[11], bytes[12], bytes[13], bytes[14], bytes[15]);
402 }
403
404 // Validate dimensions using image utility function
405 if (image_validate_dimensions((size_t)img_width, (size_t)img_height) != ASCIICHAT_OK) {
406 SET_ERRNO(ERROR_INVALID_STATE,
407 "Per-client: Invalid image dimensions from client %u: %ux%u (data may be corrupted)", snap->client_id,
408 img_width, img_height);
409 source_count++;
410 continue;
411 }
412
413 // Calculate expected frame size with overflow checking
414 size_t expected_size = sizeof(uint32_t) * 2;
415 {
416 size_t rgb_size = 0;
417 if (image_calc_rgb_size((size_t)img_width, (size_t)img_height, &rgb_size) != ASCIICHAT_OK) {
418 SET_ERRNO(ERROR_INVALID_STATE, "Per-client: RGB size calculation failed for client %u: %ux%u",
419 snap->client_id, img_width, img_height);
420 source_count++;
421 continue;
422 }
423 expected_size += rgb_size;
424 }
425 if (frame_to_use->size != expected_size) {
426 SET_ERRNO(ERROR_INVALID_STATE,
427 "Per-client: Frame size mismatch from client %u: got %zu, expected %zu for %ux%u image",
428 snap->client_id, frame_to_use->size, expected_size, img_width, img_height);
429 source_count++;
430 continue;
431 }
432
433 // Extract pixel data pointer
434 rgb_pixel_t *pixels = (rgb_pixel_t *)(frame_to_use->data + (sizeof(uint32_t) * 2));
435
436 // Create image from buffer pool for consistent video pipeline management
437 image_t *img = image_new_from_pool(img_width, img_height);
438 if (!img) {
439 log_error("Per-client: image_new_from_pool failed for %ux%u", img_width, img_height);
440 continue;
441 }
442 memcpy(img->pixels, pixels, (size_t)img_width * (size_t)img_height * sizeof(rgb_pixel_t));
443 sources[source_count].image = img;
444 sources[source_count].has_video = true;
445
446 // Free temporary frame buffer - image has its own pixel data now
447 if (got_new_frame) {
448 SAFE_FREE(current_frame.data);
449 }
450 } else {
451 // frame_to_use check failed - clean up allocated frame data
452 if (got_new_frame && current_frame.data) {
453 SAFE_FREE(current_frame.data);
454 }
455 }
456
457 // Increment source count for this active client (with or without video)
458 source_count++;
459 }
460
461 return source_count;
462}
463
474static image_t *create_single_source_composite(image_source_t *sources, int source_count,
475 uint32_t target_client_id __attribute__((unused)),
476 unsigned short width __attribute__((unused)),
477 unsigned short height __attribute__((unused))) {
478 // Find the single source with video
479 image_t *single_source = NULL;
480 for (int i = 0; i < source_count; i++) {
481 if (sources[i].has_video && sources[i].image) {
482 single_source = sources[i].image;
483 break;
484 }
485 }
486
487 if (!single_source) {
488 SET_ERRNO(ERROR_INVALID_STATE, "Logic error: sources_with_video=1 but no source found");
489 return NULL;
490 }
491
492 // For single source, don't pre-fit the image. Let ascii_convert_with_capabilities handle
493 // all aspect ratio fitting with proper CHAR_ASPECT correction. This avoids double-correction
494 // that was happening when the image was pre-fitted in pixel space and then aspect_ratio()
495 // was called again with CHAR_ASPECT=2.0.
496 // Just return the source image as-is; ascii_convert_with_capabilities will fit it properly.
497 return single_source;
498}
499
521static void calculate_optimal_grid_layout(image_source_t *sources, int source_count, int sources_with_video,
522 int terminal_width, int terminal_height, int *out_cols, int *out_rows) {
523 // Special cases
524 if (sources_with_video == 0) {
525 *out_cols = 0;
526 *out_rows = 0;
527 return;
528 }
529
530 if (sources_with_video == 1) {
531 *out_cols = 1;
532 *out_rows = 1;
533 return;
534 }
535
536 // ASCII character aspect ratio: characters are ~2x taller than wide
537 // So we need to adjust terminal dimensions to visual space
538 const float CHAR_ASPECT = 2.0f; // Character height / width ratio
539
540 // Calculate average aspect ratio of all video sources
541 float avg_aspect = 0.0f;
542 int aspect_count = 0;
543 for (int i = 0; i < source_count; i++) {
544 if (sources[i].has_video && sources[i].image) {
545 float aspect = (float)sources[i].image->w / (float)sources[i].image->h;
546 avg_aspect += aspect;
547 aspect_count++;
548 }
549 }
550 if (aspect_count > 0) {
551 avg_aspect /= aspect_count;
552 } else {
553 avg_aspect = 1.6f; // Default aspect ratio
554 }
555
556 // Try all reasonable grid configurations
557 int best_cols = 1;
558 int best_rows = sources_with_video;
559 float best_utilization = 0.0f;
560
561 // Try grid configurations from 1x1 up to reasonable limits
562 for (int cols = 1; cols <= sources_with_video; cols++) {
563 int rows = (sources_with_video + cols - 1) / cols; // Ceiling division
564
565 // Skip configurations with too many empty cells (more than one row worth)
566 int total_cells = cols * rows;
567 int empty_cells = total_cells - sources_with_video;
568 if (empty_cells > cols) {
569 continue;
570 }
571
572 // Calculate cell dimensions for this configuration
573 int cell_width = terminal_width / cols;
574 int cell_height = terminal_height / rows;
575
576 // Skip if cells would be too small
577 if (cell_width < 20 || cell_height < 10) {
578 continue;
579 }
580
581 // Calculate total area utilized by all videos in this configuration
582 // For each video, calculate how much space it would use in a cell
583 // IMPORTANT: Account for character aspect ratio (chars are 2x taller than wide)
584 float total_area_used = 0.0f;
585 int cell_area = cell_width * cell_height;
586
587 for (int i = 0; i < sources_with_video; i++) {
588 // Use average aspect ratio for calculation
589 float video_aspect = avg_aspect;
590
591 // Calculate VISUAL cell aspect ratio (accounting for character shape)
592 // A cell that is cell_width chars wide and cell_height chars tall
593 // has visual aspect = cell_width / (cell_height * CHAR_ASPECT)
594 float cell_visual_aspect = (float)cell_width / ((float)cell_height * CHAR_ASPECT);
595
596 // Calculate fitted dimensions while preserving aspect ratio
597 int fitted_width, fitted_height;
598
599 if (video_aspect > cell_visual_aspect) {
600 // Video is wider than cell - fit to width
601 fitted_width = cell_width;
602 // Visual height needed: cell_width / video_aspect
603 // Character height: visual_height / CHAR_ASPECT
604 fitted_height = (int)((cell_width / video_aspect) / CHAR_ASPECT);
605 } else {
606 // Video is taller than cell - fit to height
607 fitted_height = cell_height;
608 // Visual width needed: cell_height * CHAR_ASPECT * video_aspect
609 fitted_width = (int)(cell_height * CHAR_ASPECT * video_aspect);
610 }
611
612 // Clamp to cell bounds
613 if (fitted_width > cell_width) {
614 fitted_width = cell_width;
615 }
616 if (fitted_height > cell_height) {
617 fitted_height = cell_height;
618 }
619
620 // Add area used by this video
621 total_area_used += fitted_width * fitted_height;
622 }
623
624 // Calculate utilization percentage
625 float total_available_area = (float)(cell_area * sources_with_video);
626 float utilization = total_area_used / total_available_area;
627
628 float test_cell_visual_aspect = (float)cell_width / ((float)cell_height * CHAR_ASPECT);
629 log_dev_every(LOG_RATE_NORMAL, " Testing %dx%d: cell=%dx%d (visual aspect %.2f), utilization=%.1f%%", cols, rows,
630 cell_width, cell_height, test_cell_visual_aspect, utilization * 100.0f);
631
632 // Prefer configurations with better utilization
633 if (utilization > best_utilization) {
634 best_utilization = utilization;
635 best_cols = cols;
636 best_rows = rows;
637 }
638 }
639
640 *out_cols = best_cols;
641 *out_rows = best_rows;
642
643 float terminal_visual_aspect = (float)terminal_width / ((float)terminal_height * CHAR_ASPECT);
644 log_dev_every(LOG_RATE_NORMAL,
645 "Grid layout: %d clients -> %dx%d grid (%.1f%% utilization) | terminal=%dx%d (char aspect %.2f, VISUAL "
646 "aspect %.2f), video aspect: %.2f",
647 sources_with_video, best_cols, best_rows, best_utilization * 100.0f, terminal_width, terminal_height,
648 (float)terminal_width / (float)terminal_height, terminal_visual_aspect, avg_aspect);
649}
650
662static image_t *create_multi_source_composite(image_source_t *sources, int source_count, int sources_with_video,
663 uint32_t target_client_id, unsigned short width, unsigned short height) {
664 (void)target_client_id; // Unused - composite is same for all clients now
665
666 // Calculate optimal grid layout using space-maximizing algorithm
667 int grid_cols, grid_rows;
668 calculate_optimal_grid_layout(sources, source_count, sources_with_video, width, height, &grid_cols, &grid_rows);
669
670 // Calculate composite dimensions in PIXELS for half-block mode
671 // Terminal dimensions are in CHARACTERS, need to convert to pixels:
672 // - Width: 1 char = 1 horizontal pixel
673 // - Height: 1 char = 2 vertical pixels (half-block = 2 pixels per char)
674 const int PIXELS_PER_CHAR_HEIGHT = 2;
675 int composite_width_px = width; // chars = pixels horizontally
676 int composite_height_px = height * PIXELS_PER_CHAR_HEIGHT; // chars * 2 = pixels vertically
677
678 // Create composite with final dimensions - no recreation needed
679 image_t *composite = image_new_from_pool(composite_width_px, composite_height_px);
680 image_clear(composite);
681
682 // Place each source in the grid
683 int video_source_index = 0; // Track only sources with video
684 for (int i = 0; i < source_count && video_source_index < 9; i++) { // Max 9 sources in 3x3 grid
685 if (!sources[i].image)
686 continue;
687
688 int row = video_source_index / grid_cols;
689 int col = video_source_index % grid_cols;
690 video_source_index++;
691
692 // Use actual composite dimensions for cell calculations
693 // Composite is now in PIXELS (already converted from terminal characters)
694 int cell_width_px = composite->w / grid_cols;
695 int cell_height_px = composite->h / grid_rows;
696
697 // Calculate aspect ratios
698 float src_aspect = (float)sources[i].image->w / (float)sources[i].image->h;
699 float cell_visual_aspect = (float)cell_width_px / (float)cell_height_px;
700
701 int target_width_px, target_height_px;
702
703 // CONTAIN strategy: Fill one dimension completely, let other scale down (never overflow)
704 // Compare aspects to decide which dimension to fill
705 if (src_aspect > cell_visual_aspect) {
706 // Video is WIDER than cell → fill WIDTH (height will be smaller)
707 target_width_px = cell_width_px;
708 target_height_px = (int)((cell_width_px / src_aspect) + 0.5f);
709 } else {
710 // Video is TALLER than cell → fill HEIGHT (width will be smaller)
711 target_height_px = cell_height_px;
712 target_width_px = (int)((cell_height_px * src_aspect) + 0.5f);
713 }
714
715 log_dev_every(LOG_RATE_NORMAL, "Cell %d: %dx%d px, video %.1f, cell %.2f → target %dx%d px (fill %s)",
716 video_source_index - 1, cell_width_px, cell_height_px, src_aspect, cell_visual_aspect,
717 target_width_px, target_height_px, (src_aspect > cell_visual_aspect) ? "WIDTH" : "HEIGHT");
718
719 // Create resized image with standard allocation
720 image_t *resized = image_new_from_pool(target_width_px, target_height_px);
721 image_resize(sources[i].image, resized);
722
723 // Calculate cell position in pixels (after any composite recreation)
724 int cell_x_offset_px = col * cell_width_px;
725 int cell_y_offset_px = row * cell_height_px;
726
727 // Grid centering strategy:
728 // - Multi-client: Apply padding ONLY to edge cells to center the grid as a whole
729 // (left column gets left padding, right column gets right padding)
730 // (top row gets top padding, bottom row gets bottom padding)
731 // This keeps cells edge-to-edge while centering the entire grid
732 // - Single client: Center the image within the cell
733 int x_padding_px, y_padding_px;
734
735 // Center images within their cells for all layouts
736 // This prevents gaps/stripes between clients
737 x_padding_px = (cell_width_px - target_width_px) / 2;
738 y_padding_px = (cell_height_px - target_height_px) / 2;
739
740 // Define cell boundaries for clipping (prevents bleeding into adjacent cells)
741 int cell_x_min = cell_x_offset_px;
742 int cell_x_max = cell_x_offset_px + cell_width_px - 1;
743 int cell_y_min = cell_y_offset_px;
744 int cell_y_max = cell_y_offset_px + cell_height_px - 1;
745
746 // Copy resized image to composite with cell boundary clipping
747 // This allows images to fill cells completely while preventing overlap
748
749 for (int y = 0; y < resized->h; y++) {
750 for (int x = 0; x < resized->w; x++) {
751 // Calculate destination position
752 int dst_x = cell_x_offset_px + x_padding_px + x;
753 int dst_y = cell_y_offset_px + y_padding_px + y;
754
755 // Clip to cell boundaries (prevents bleeding into adjacent cells)
756 if (dst_x < cell_x_min || dst_x > cell_x_max || dst_y < cell_y_min || dst_y > cell_y_max) {
757 continue; // Skip pixels outside cell boundaries
758 }
759
760 // Additional composite boundary check
761 if (dst_x < 0 || dst_x >= composite->w || dst_y < 0 || dst_y >= composite->h) {
762 continue; // Skip pixels outside composite
763 }
764
765 // Copy pixel
766 int src_idx = (y * resized->w) + x;
767 int dst_idx = (dst_y * composite->w) + dst_x;
768 composite->pixels[dst_idx] = resized->pixels[src_idx];
769 }
770 }
771
772 image_destroy_to_pool(resized);
773 }
774
775 return composite;
776}
777
787static char *convert_composite_to_ascii(image_t *composite, uint32_t target_client_id, unsigned short width,
788 unsigned short height) {
789 // LOCK OPTIMIZATION: Don't call find_client_by_id() - it would acquire rwlock unnecessarily
790 // Instead, the render thread already has snapshot of client state, so we just need palette data
791 // which is stable after initialization
792
793 // We need to find the client to access palette data, but we can do this without locking
794 // since palette is initialized once and never changes
795 client_info_t *render_client = NULL;
796
797 // Find client without locking - client_id is atomic and stable once set
798 for (int i = 0; i < MAX_CLIENTS; i++) {
799 client_info_t *client = &g_client_manager.clients[i];
800 if (atomic_load(&client->client_id) == target_client_id) {
801 render_client = client;
802 break;
803 }
804 }
805
806 if (!render_client) {
807 SET_ERRNO(ERROR_INVALID_STATE, "Per-client %u: Target client not found", target_client_id);
808 return NULL;
809 }
810
811 // Snapshot terminal capabilities WITHOUT holding rwlock
812 // Terminal caps are set once during handshake and never change, so this is safe
813 bool has_terminal_caps_snapshot = render_client->has_terminal_caps;
814 if (!has_terminal_caps_snapshot) {
815 SET_ERRNO(ERROR_INVALID_STATE, "Per-client %u: Terminal capabilities not received", target_client_id);
816 return NULL;
817 }
818
819 terminal_capabilities_t caps_snapshot = render_client->terminal_caps;
820
821 if (!render_client->client_palette_initialized) {
822 SET_ERRNO(ERROR_TERMINAL, "Client %u palette not initialized - cannot render frame", target_client_id);
823 return NULL;
824 }
825
826 // Render with client's custom palette using enhanced capabilities
827 // Palette data is stable after initialization, so no locking needed
828 const int h = caps_snapshot.render_mode == RENDER_MODE_HALF_BLOCK ? height * 2 : height;
829
830 // DEBUG: Log dimensions being used for ASCII conversion
831 log_dev_every(LOG_RATE_SLOW, "convert_composite_to_ascii: composite=%dx%d, terminal=%dx%d, h=%d (mode=%d)",
832 composite->w, composite->h, width, height, h, caps_snapshot.render_mode);
833
834 // Pass full terminal dimensions so ascii_convert_with_capabilities can fit the image correctly
835 // with proper CHAR_ASPECT correction. The composite may have been pre-fitted in pixel space,
836 // but ascii_convert will handle terminal character aspect ratio properly when aspect_ratio=true.
837 uint64_t convert_start_ns = time_get_ns();
838 char *ascii_frame = ascii_convert_with_capabilities(composite, width, h, &caps_snapshot, true, false,
839 render_client->client_palette_chars);
840 uint64_t convert_end_ns = time_get_ns();
841 uint64_t convert_duration_ns = convert_end_ns - convert_start_ns;
842
843 if (convert_duration_ns > 5 * NS_PER_MS_INT) { // Log if > 5ms
844 char duration_str[32];
845 format_duration_ns((double)convert_duration_ns, duration_str, sizeof(duration_str));
846 log_warn("SLOW_ASCII_CONVERT: Client %u took %s to convert %dx%d image to ASCII", target_client_id, duration_str,
847 composite->w, composite->h);
848 }
849
850 return ascii_frame;
851}
852
853/* ============================================================================
854 * Per-Client Video Mixing and Frame Generation
855 * ============================================================================
856 */
857
953// Compute hash of all active video sources for cache invalidation
954// Uses hardware-accelerated CRC32 for ultra-fast hashing
955char *create_mixed_ascii_frame_for_client(uint32_t target_client_id, unsigned short width, unsigned short height,
956 bool wants_stretch, size_t *out_size, bool *out_grid_changed,
957 int *out_sources_count) {
958 (void)wants_stretch; // Unused - we always handle aspect ratio ourselves
959
960 uint64_t frame_gen_start_ns = time_get_ns();
961
962 // Initialize output parameters
963 if (out_grid_changed) {
964 *out_grid_changed = false;
965 }
966 if (out_sources_count) {
967 *out_sources_count = 0;
968 }
969
970 if (!out_size || width == 0 || height == 0) {
971 SET_ERRNO(ERROR_INVALID_PARAM,
972 "Invalid parameters for create_mixed_ascii_frame_for_client: width=%u, height=%u, out_size=%p", width,
973 height, out_size);
974 return NULL;
975 }
976
977 // Collect all active clients and their image sources
978 image_source_t sources[MAX_CLIENTS];
979 uint64_t collect_start_ns = time_get_ns();
980 int source_count = collect_video_sources(sources, MAX_CLIENTS);
981 uint64_t collect_end_ns = time_get_ns();
982
983 // Count sources that actually have video data
984 int sources_with_video = 0;
985 for (int i = 0; i < source_count; i++) {
986 if (sources[i].has_video && sources[i].image) {
987 sources_with_video++;
988 }
989 }
990
991 static uint64_t last_detailed_log = 0;
992 uint64_t now_ns = collect_end_ns;
993 if (now_ns - last_detailed_log > 333 * NS_PER_MS_INT) { // Log every 333ms (3x per second)
994 last_detailed_log = now_ns;
995 log_info("FRAME_GEN_START: target_client=%u sources=%d collect=%.1fms", target_client_id, sources_with_video,
996 (collect_end_ns - collect_start_ns) / NS_PER_MS);
997 }
998
999 // Return the source count for debugging/tracking
1000 if (out_sources_count) {
1001 *out_sources_count = sources_with_video;
1002 }
1003
1004 // GRID LAYOUT CHANGE DETECTION:
1005 // Check if the number of active video sources has changed
1006 // NOTE: We only UPDATE the count and SIGNAL the change via out parameter
1007 // Broadcasting CLEAR_CONSOLE must happen AFTER the new frames are written to buffers
1008 // to prevent race condition where CLEAR arrives before new frame is ready
1009 int previous_count = atomic_load(&g_previous_active_video_count);
1010 if (sources_with_video != previous_count) {
1011 // Use compare-and-swap to ensure only ONE thread detects the change
1012 if (atomic_compare_exchange_strong(&g_previous_active_video_count, &previous_count, sources_with_video)) {
1013 log_dev_every(
1014 LOG_RATE_DEFAULT,
1015 "Grid layout changing: %d -> %d active video sources - caller will broadcast clear AFTER buffering frame",
1016 previous_count, sources_with_video);
1017 if (out_grid_changed) {
1018 *out_grid_changed = true; // Signal to caller
1019 }
1020 }
1021 }
1022
1023 // No active video sources - don't generate placeholder frames
1024 image_t *composite = NULL;
1025
1026 if (sources_with_video == 0) {
1027 *out_size = 0;
1028 // No active video sources for client - this isn't an error.
1029 // Return NULL to indicate no frame should be sent
1030 return NULL;
1031 }
1032
1033 if (sources_with_video == 1) {
1034 // Single source handling - create composite and convert to ASCII
1035 // Note: create_single_source_composite returns a reference to sources[i].image
1036 // which could be modified by other threads. Make a copy to prevent concurrent
1037 // modification during ascii_convert_with_capabilities.
1038 image_t *single_source = create_single_source_composite(sources, source_count, target_client_id, width, height);
1039 if (single_source) {
1040 composite = image_new_copy(single_source);
1041 if (!composite) {
1042 SET_ERRNO(ERROR_MEMORY, "Failed to copy single source composite");
1043 *out_size = 0;
1044 return NULL;
1045 }
1046 }
1047 } else {
1048 // Multiple sources - create grid layout
1049 composite =
1050 create_multi_source_composite(sources, source_count, sources_with_video, target_client_id, width, height);
1051 }
1052
1053 char *out = NULL;
1054
1055 if (!composite) {
1056 SET_ERRNO(ERROR_INVALID_STATE, "Per-client %u: Failed to create composite image", target_client_id);
1057 *out_size = 0;
1058 out = NULL;
1059 }
1060
1061 // Convert composite to ASCII using client capabilities
1062 // Pass terminal dimensions so the frame can be padded to full width
1063 char *ascii_frame = convert_composite_to_ascii(composite, target_client_id, width, height);
1064
1065 if (ascii_frame) {
1066 // The frame should have been null-terminated by the padding functions.
1067 // Use strlen() which is optimized and reliable
1068 size_t ascii_len = strlen(ascii_frame);
1069
1070 // Safety check: don't accept unreasonably large frames (10MB limit)
1071 if (ascii_len > 10 * 1024 * 1024) {
1072 log_error("Frame size exceeds 10MB safety limit (possible buffer overflow)");
1073 SET_ERRNO(ERROR_INVALID_PARAM, "Frame size exceeds 10MB");
1074 return NULL;
1075 }
1076
1077 // Ensure frame ends with a reset sequence to avoid garbage at terminal
1078 // This prevents color codes from leaking into uninitialized memory
1079 const char reset_seq[] = "\033[0m";
1080 const size_t reset_len = 4;
1081
1082 if (ascii_len >= reset_len) {
1083 // Check if frame already ends with reset
1084 const char *frame_end = ascii_frame + ascii_len - reset_len;
1085 if (strncmp(frame_end, reset_seq, reset_len) == 0) {
1086 // Frame properly ends with reset, use full length
1087 *out_size = ascii_len;
1088 } else {
1089 // Frame doesn't end with reset - this is the REAL bug!
1090 // Truncate to the last occurrence of reset sequence
1091 const char *last_reset = NULL;
1092 for (const char *p = ascii_frame + ascii_len - reset_len; p >= ascii_frame; p--) {
1093 if (strncmp(p, reset_seq, reset_len) == 0) {
1094 last_reset = p;
1095 break;
1096 }
1097 }
1098
1099 if (last_reset) {
1100 // Include the reset sequence and truncate after it
1101 *out_size = (size_t)(last_reset - ascii_frame) + reset_len;
1102 ascii_frame[*out_size] = '\0'; // Ensure null termination
1103 log_warn("Frame was missing reset at end (had garbage), truncated from %zu to %zu bytes", ascii_len,
1104 *out_size);
1105 } else {
1106 // No reset found, use full length as fallback
1107 *out_size = ascii_len;
1108 log_warn("Frame has no reset sequences, sending full %zu bytes", ascii_len);
1109 }
1110 }
1111 } else {
1112 // Frame too short to have a reset, use as-is
1113 *out_size = ascii_len;
1114 }
1115
1116 log_dev_every(LOG_RATE_SLOW, "create_mixed_ascii_frame_for_client: Final frame size=%zu bytes for client %u",
1117 *out_size, target_client_id);
1118
1119 // Debug: Log the last 50 bytes of the frame to see what's really there
1120 if (*out_size >= 50) {
1121 char hex_buf[300] = {0};
1122 size_t hex_len = 0;
1123 const uint8_t *last_bytes = (const uint8_t *)ascii_frame + (*out_size - 50);
1124 for (int i = 0; i < 50 && hex_len < sizeof(hex_buf) - 5; i++) {
1125 hex_len += snprintf(hex_buf + hex_len, sizeof(hex_buf) - hex_len, "%02X ", last_bytes[i]);
1126 }
1127 log_dev_every(4500 * US_PER_MS_INT, "FRAME_LAST_50_BYTES (hex): %s", hex_buf);
1128
1129 // Also log as ASCII for readability
1130 char ascii_buf[100] = {0};
1131 for (int i = 0; i < 50 && i < (int)sizeof(ascii_buf) - 1; i++) {
1132 if (last_bytes[i] >= 32 && last_bytes[i] < 127) {
1133 ascii_buf[i] = (char)last_bytes[i];
1134 } else if (last_bytes[i] == '\n') {
1135 ascii_buf[i] = 'N';
1136 } else if (last_bytes[i] == '\0') {
1137 ascii_buf[i] = '0';
1138 } else {
1139 ascii_buf[i] = '.';
1140 }
1141 }
1142 log_dev_every(4500 * US_PER_MS_INT, "FRAME_LAST_50_ASCII: %s", ascii_buf);
1143 }
1144
1145 out = ascii_frame;
1146 } else {
1147 SET_ERRNO(ERROR_TERMINAL, "Per-client %u: Failed to convert image to ASCII", target_client_id);
1148 *out_size = 0;
1149 }
1150
1151 if (composite) {
1152 // For single source, composite is a malloc-allocated copy, not from pool
1153 // Check alloc method to determine correct destroy function
1154 if (composite->alloc_method == IMAGE_ALLOC_POOL) {
1155 image_destroy_to_pool(composite);
1156 } else {
1157 image_destroy(composite);
1158 }
1159 }
1160 for (int i = 0; i < source_count; i++) {
1161 if (sources[i].image) {
1162 image_destroy_to_pool(sources[i].image);
1163 }
1164 }
1165
1166 uint64_t frame_gen_end_ns = time_get_ns();
1167 uint64_t frame_gen_duration_ns = frame_gen_end_ns - frame_gen_start_ns;
1168 if (frame_gen_duration_ns > 10 * NS_PER_MS_INT) { // Log if > 10ms
1169 char duration_str[32];
1170 format_duration_ns((double)frame_gen_duration_ns, duration_str, sizeof(duration_str));
1171 log_warn("SLOW_FRAME_GENERATION: Client %u full frame gen took %s", target_client_id, duration_str);
1172 }
1173
1174 return out;
1175}
1176
1177/* ============================================================================
1178 * Frame Queuing and Delivery Functions
1179 * ============================================================================
1180 */
1181
1240// REMOVED: queue_ascii_frame_for_client - video now uses double buffer directly in client->outgoing_video_buffer
1241
1285int queue_audio_for_client(client_info_t *client, const void *audio_data, size_t data_size) {
1286 if (!client || !client->audio_queue || !audio_data || data_size == 0) {
1287 return -1;
1288 }
1289
1290 return packet_queue_enqueue(client->audio_queue, PACKET_TYPE_AUDIO_BATCH, audio_data, data_size, 0, true);
1291}
1292
1306 // LOCK OPTIMIZATION: Don't acquire rwlock - all fields we access are atomic
1307 // client_id, active, is_sending_video are all atomic variables
1308
1309 // Iterate through all client slots
1310 for (int i = 0; i < MAX_CLIENTS; i++) {
1311 client_info_t *client = &g_client_manager.clients[i];
1312
1313 // Skip uninitialized clients (atomic read)
1314 if (atomic_load(&client->client_id) == 0) {
1315 continue;
1316 }
1317
1318 // Check if client is active and sending video (both atomic reads)
1319 bool is_active = atomic_load(&client->active);
1320 bool is_sending = atomic_load(&client->is_sending_video);
1321
1322 if (is_active && is_sending) {
1323 return true;
1324 }
1325 }
1326
1327 return false;
1328}
char * ascii_convert_with_capabilities(image_t *original, const ssize_t width, const ssize_t height, const terminal_capabilities_t *caps, const bool use_aspect_ratio, const bool stretch, const char *palette_chars)
Definition ascii.c:190
#define CHAR_ASPECT
Per-client state management and lifecycle orchestration.
__attribute__((constructor))
Register fork handlers for common module.
Definition common.c:104
int packet_queue_enqueue(packet_queue_t *queue, packet_type_t type, const void *data, size_t data_len, uint32_t client_id, bool copy_data)
atomic_bool g_server_should_exit
Global atomic shutdown flag shared across all threads.
ascii-chat Server Mode Entry Point Header
client_manager_t g_client_manager
Global client manager singleton - central coordination point.
bool any_clients_sending_video(void)
Check if any connected clients are currently sending video.
Definition stream.c:1305
char * create_mixed_ascii_frame_for_client(uint32_t target_client_id, unsigned short width, unsigned short height, bool wants_stretch, size_t *out_size, bool *out_grid_changed, int *out_sources_count)
Generate personalized ASCII frame for a specific client.
Definition stream.c:955
int queue_audio_for_client(client_info_t *client, const void *audio_data, size_t data_size)
Queue ASCII frame for delivery to specific client.
Definition stream.c:1285
Multi-client video mixing and ASCII frame generation.
client_info_t clients[MAX_CLIENTS]
Array of client_info_t structures (backing storage)
Definition client.h:65
Image source structure for multi-client video mixing.
Definition stream.c:204
uint32_t client_id
Unique client identifier for this source.
Definition stream.c:208
bool has_video
Whether this client has active video stream.
Definition stream.c:210
image_t * image
Pointer to client's current video frame (owned by buffer system)
Definition stream.c:206
asciichat_error_t image_calc_rgb_size(size_t width, size_t height, size_t *out_size)
Definition util/image.c:54
uint64_t time_get_ns(void)
Definition util/time.c:48
int format_duration_ns(double nanoseconds, char *buffer, size_t buffer_size)
Definition util/time.c:275
image_t * image_new_copy(const image_t *source)
void image_destroy_to_pool(image_t *image)
void image_resize(const image_t *s, image_t *d)
void image_clear(image_t *p)
image_t * image_new_from_pool(size_t width, size_t height)
void image_destroy(image_t *p)
Definition video/image.c:85
asciichat_error_t image_validate_dimensions(size_t width, size_t height)
Definition video.c:12
const video_frame_t * video_frame_get_latest(video_frame_buffer_t *vfb)