ascii-chat 0.6.0
Real-time terminal-based video chat with ASCII art conversion
Loading...
Searching...
No Matches
stream.c
Go to the documentation of this file.
1
131#include <stdatomic.h>
132#include <stdio.h>
133#include <string.h>
134#include <time.h>
135#include <math.h>
136#include <float.h>
137
138#include "stream.h"
139#include "client.h"
140#include "common.h"
141#include "util/endian.h"
142#include "buffer_pool.h"
143#include "network/packet_queue.h"
144#include "ringbuffer.h"
145#include "video/video_frame.h"
146#include "video/image.h"
147#include "video/ascii.h"
148#include "util/aspect_ratio.h"
149#include "util/endian.h"
150#include "util/image.h"
151
152// Global client manager from client.c - needed for any_clients_sending_video()
155
165static atomic_int g_previous_active_video_count = 0;
166
167/* ============================================================================
168 * Helper Functions
169 * ============================================================================
170 */
171
177static void cleanup_current_frame_data(multi_source_frame_t *frame) {
178 if (frame && frame->data) {
180 if (pool) {
181 buffer_pool_free(pool, frame->data, frame->size);
182 } else {
183 SAFE_FREE(frame->data);
184 }
185 frame->data = NULL;
186 }
187}
188
237
245static int collect_video_sources(image_source_t *sources, int max_sources) {
246 int source_count = 0;
247
248 // Check for shutdown before acquiring locks to prevent lock corruption
249 if (atomic_load(&g_server_should_exit)) {
250 return 0;
251 }
252
253 // LOCK OPTIMIZATION: No locks needed! All client fields are atomic or stable pointers
254 // client_id, active, is_sending_video are all atomic variables
255 // incoming_video_buffer is set once during client creation and never changes
256
257 // Collect client info snapshots WITHOUT holding rwlock
258 typedef struct {
259 uint32_t client_id;
260 bool is_active;
261 bool is_sending_video;
262 video_frame_buffer_t *video_buffer;
263 } client_snapshot_t;
264
265 client_snapshot_t client_snapshots[MAX_CLIENTS];
266 int snapshot_count = 0;
267
268 // NO LOCK: All fields are atomic or stable pointers
269 for (int i = 0; i < MAX_CLIENTS; i++) {
271
272 if (atomic_load(&client->client_id) == 0) {
273 continue; // Skip uninitialized clients
274 }
275
276 // Snapshot all needed client state (all atomic reads or stable pointers)
277 client_snapshots[snapshot_count].client_id = atomic_load(&client->client_id);
278 client_snapshots[snapshot_count].is_active = atomic_load(&client->active);
279 client_snapshots[snapshot_count].is_sending_video = atomic_load(&client->is_sending_video);
280 client_snapshots[snapshot_count].video_buffer = client->incoming_video_buffer; // Stable pointer
281 snapshot_count++;
282 }
283
284 // Process frames (expensive operations)
285 for (int i = 0; i < snapshot_count && source_count < max_sources; i++) {
286 client_snapshot_t *snap = &client_snapshots[i];
287
288 if (!snap->is_active) {
289 continue;
290 }
291
292 sources[source_count].client_id = snap->client_id;
293 sources[source_count].image = NULL; // Will be set if video is available
294 sources[source_count].has_video = false;
295
296 // Declare these outside the if block so they're accessible later
297 multi_source_frame_t current_frame = {0};
298 bool got_new_frame = false;
299
300 // Always try to get the last available video frame for consistent ASCII generation
301 // The double buffer ensures we always have the last valid frame
302 if (snap->is_sending_video && snap->video_buffer) {
303 // Get the latest frame (always available from double buffer)
304 const video_frame_t *frame = video_frame_get_latest(snap->video_buffer);
305
306 if (!frame) {
307 continue; // Skip to next snapshot
308 }
309
310 // Try to access frame fields ONE AT A TIME to pinpoint the hang
311 void *frame_data_ptr = frame->data;
312
313 size_t frame_size_val = frame->size;
314
315 if (frame_data_ptr && frame_size_val > 0) {
316 // We have frame data - copy it to our working structure
318 if (pool) {
319 current_frame.data = buffer_pool_alloc(pool, frame->size);
320 }
321 if (!current_frame.data) {
322 // 64-byte cache-line alignment improves performance for large video frames
323 current_frame.data = SAFE_MALLOC_ALIGNED(frame->size, 64, void *);
324 }
325
326 if (current_frame.data) {
327 memcpy(current_frame.data, frame->data, frame->size);
328 current_frame.size = frame->size;
329 current_frame.source_client_id = snap->client_id;
330 current_frame.timestamp = (uint32_t)(frame->capture_timestamp_us / 1000000);
331 got_new_frame = true;
332 }
333 } else {
334 }
335 } else {
336 }
337
338 multi_source_frame_t *frame_to_use = got_new_frame ? &current_frame : NULL;
339
340 if (frame_to_use && frame_to_use->data && frame_to_use->size > sizeof(uint32_t) * 2) {
341 // Parse the image data
342 // Format: [width:4][height:4][rgb_data:w*h*3]
343 // Use unaligned read helpers - frame data may not be aligned
344 uint32_t img_width = NET_TO_HOST_U32(read_u32_unaligned(frame_to_use->data));
345 uint32_t img_height = NET_TO_HOST_U32(read_u32_unaligned(frame_to_use->data + sizeof(uint32_t)));
346
347 // Debug logging to understand the data
348 if (img_width == 0xBEBEBEBE || img_height == 0xBEBEBEBE) {
349 SET_ERRNO(ERROR_INVALID_STATE, "UNINITIALIZED MEMORY DETECTED! First 16 bytes of frame data:");
350 uint8_t *bytes = (uint8_t *)frame_to_use->data;
352 " %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X", bytes[0],
353 bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], bytes[8], bytes[9], bytes[10],
354 bytes[11], bytes[12], bytes[13], bytes[14], bytes[15]);
355 }
356
357 // Validate dimensions using image utility function
358 if (image_validate_dimensions((size_t)img_width, (size_t)img_height) != ASCIICHAT_OK) {
360 "Per-client: Invalid image dimensions from client %u: %ux%u (data may be corrupted)", snap->client_id,
361 img_width, img_height);
362 // Clean up the current frame if we got a new one
363 if (got_new_frame) {
364 cleanup_current_frame_data(&current_frame);
365 }
366 source_count++;
367 continue;
368 }
369
370 // Calculate expected frame size with overflow checking
371 size_t expected_size = sizeof(uint32_t) * 2;
372 {
373 size_t rgb_size = 0;
374 if (image_calc_rgb_size((size_t)img_width, (size_t)img_height, &rgb_size) != ASCIICHAT_OK) {
375 SET_ERRNO(ERROR_INVALID_STATE, "Per-client: RGB size calculation failed for client %u: %ux%u",
376 snap->client_id, img_width, img_height);
377 if (got_new_frame) {
378 cleanup_current_frame_data(&current_frame);
379 }
380 source_count++;
381 continue;
382 }
383 expected_size += rgb_size;
384 }
385 if (frame_to_use->size != expected_size) {
387 "Per-client: Frame size mismatch from client %u: got %zu, expected %zu for %ux%u image",
388 snap->client_id, frame_to_use->size, expected_size, img_width, img_height);
389 // Clean up the current frame if we got a new one
390 if (got_new_frame) {
391 cleanup_current_frame_data(&current_frame);
392 }
393 source_count++;
394 continue;
395 }
396
397 // Extract pixel data
398 rgb_pixel_t *pixels = (rgb_pixel_t *)(frame_to_use->data + (sizeof(uint32_t) * 2));
399
400 // Create image from buffer pool for consistent video pipeline management
401 image_t *img = image_new_from_pool(img_width, img_height);
402 memcpy(img->pixels, pixels, (size_t)img_width * (size_t)img_height * sizeof(rgb_pixel_t));
403 sources[source_count].image = img;
404 sources[source_count].has_video = true;
405 }
406
407 // Clean up current_frame.data if we allocated it but frame_to_use check failed
408 // This handles cases where: frame too small, no data, etc.
409 if (got_new_frame && current_frame.data) {
410 cleanup_current_frame_data(&current_frame);
411 }
412
413 // Increment source count for this active client (with or without video)
414 source_count++;
415 }
416
417 return source_count;
418}
419
430static image_t *create_single_source_composite(image_source_t *sources, int source_count, uint32_t target_client_id,
431 unsigned short width, unsigned short height) {
432 // Find the single source with video
433 image_t *single_source = NULL;
434 for (int i = 0; i < source_count; i++) {
435 if (sources[i].has_video && sources[i].image) {
436 single_source = sources[i].image;
437 break;
438 }
439 }
440
441 if (!single_source) {
442 SET_ERRNO(ERROR_INVALID_STATE, "Logic error: sources_with_video=1 but no source found");
443 return NULL;
444 }
445
446 // Single source - check if target client wants half-block mode for 2x resolution
447 // LOCK OPTIMIZATION: Find client without calling find_client_by_id() to avoid rwlock
448 client_info_t *target_client = NULL;
449 for (int i = 0; i < MAX_CLIENTS; i++) {
451 if (atomic_load(&client->client_id) == target_client_id) {
452 target_client = client;
453 break;
454 }
455 }
456 bool use_half_block = target_client && target_client->has_terminal_caps &&
458
459 int composite_width_px, composite_height_px;
460
461 if (use_half_block) {
462 // Half-block mode: use full terminal dimensions for 2x resolution
463 composite_width_px = width;
464 composite_height_px = height * 2;
465 } else {
466 // Normal modes: use aspect-ratio fitted dimensions
467 calculate_fit_dimensions_pixel(single_source->w, single_source->h, width, height, &composite_width_px,
468 &composite_height_px);
469 }
470
471 // Create composite from buffer pool for consistent memory management
472 image_t *composite = image_new_from_pool(composite_width_px, composite_height_px);
473 image_clear(composite);
474
475 if (use_half_block) {
476 // Half-block mode: manual aspect ratio and centering to preserve 2x resolution
477 float src_aspect = (float)single_source->w / (float)single_source->h;
478 float target_aspect = (float)composite_width_px / (float)composite_height_px;
479
480 int fitted_width, fitted_height;
481 if (src_aspect > target_aspect) {
482 // Source is wider - fit to width
483 fitted_width = composite_width_px;
484 fitted_height = (int)(composite_width_px / src_aspect);
485 } else {
486 // Source is taller - fit to height
487 fitted_height = composite_height_px;
488 fitted_width = (int)(composite_height_px * src_aspect);
489 }
490
491 // Calculate centering offsets
492 int x_offset = (composite_width_px - fitted_width) / 2;
493 int y_offset = (composite_height_px - fitted_height) / 2;
494
495 // Create fitted image from buffer pool
496 image_t *fitted = image_new_from_pool(fitted_width, fitted_height);
497 image_resize(single_source, fitted);
498
499 // Copy fitted image to center of composite
500 for (int y = 0; y < fitted_height; y++) {
501 for (int x = 0; x < fitted_width; x++) {
502 int src_idx = (y * fitted_width) + x;
503 int dst_x = x_offset + x;
504 int dst_y = y_offset + y;
505 int dst_idx = (dst_y * composite->w) + dst_x;
506
507 if (dst_x >= 0 && dst_x < composite->w && dst_y >= 0 && dst_y < composite->h) {
508 composite->pixels[dst_idx] = fitted->pixels[src_idx];
509 }
510 }
511 }
512
513 image_destroy_to_pool(fitted);
514 } else {
515 // Normal modes: Simple resize to fitted dimensions
516 image_resize(single_source, composite);
517 }
518
519 return composite;
520}
521
543static void calculate_optimal_grid_layout(image_source_t *sources, int source_count, int sources_with_video,
544 int terminal_width, int terminal_height, int *out_cols, int *out_rows) {
545 // Special cases
546 if (sources_with_video == 0) {
547 *out_cols = 0;
548 *out_rows = 0;
549 return;
550 }
551
552 if (sources_with_video == 1) {
553 *out_cols = 1;
554 *out_rows = 1;
555 return;
556 }
557
558 // ASCII character aspect ratio: characters are ~2x taller than wide
559 // So we need to adjust terminal dimensions to visual space
560 const float CHAR_ASPECT = 2.0f; // Character height / width ratio
561
562 // Calculate average aspect ratio of all video sources
563 float avg_aspect = 0.0f;
564 int aspect_count = 0;
565 for (int i = 0; i < source_count; i++) {
566 if (sources[i].has_video && sources[i].image) {
567 float aspect = (float)sources[i].image->w / (float)sources[i].image->h;
568 avg_aspect += aspect;
569 aspect_count++;
570 }
571 }
572 if (aspect_count > 0) {
573 avg_aspect /= aspect_count;
574 } else {
575 avg_aspect = 1.6f; // Default aspect ratio
576 }
577
578 // Try all reasonable grid configurations
579 int best_cols = 1;
580 int best_rows = sources_with_video;
581 float best_utilization = 0.0f;
582
583 // Try grid configurations from 1x1 up to reasonable limits
584 for (int cols = 1; cols <= sources_with_video; cols++) {
585 int rows = (sources_with_video + cols - 1) / cols; // Ceiling division
586
587 // Skip configurations with too many empty cells (more than one row worth)
588 int total_cells = cols * rows;
589 int empty_cells = total_cells - sources_with_video;
590 if (empty_cells > cols) {
591 continue;
592 }
593
594 // Calculate cell dimensions for this configuration
595 int cell_width = terminal_width / cols;
596 int cell_height = terminal_height / rows;
597
598 // Skip if cells would be too small
599 if (cell_width < 20 || cell_height < 10) {
600 continue;
601 }
602
603 // Calculate total area utilized by all videos in this configuration
604 // For each video, calculate how much space it would use in a cell
605 // IMPORTANT: Account for character aspect ratio (chars are 2x taller than wide)
606 float total_area_used = 0.0f;
607 int cell_area = cell_width * cell_height;
608
609 for (int i = 0; i < sources_with_video; i++) {
610 // Use average aspect ratio for calculation
611 float video_aspect = avg_aspect;
612
613 // Calculate VISUAL cell aspect ratio (accounting for character shape)
614 // A cell that is cell_width chars wide and cell_height chars tall
615 // has visual aspect = cell_width / (cell_height * CHAR_ASPECT)
616 float cell_visual_aspect = (float)cell_width / ((float)cell_height * CHAR_ASPECT);
617
618 // Calculate fitted dimensions while preserving aspect ratio
619 int fitted_width, fitted_height;
620
621 if (video_aspect > cell_visual_aspect) {
622 // Video is wider than cell - fit to width
623 fitted_width = cell_width;
624 // Visual height needed: cell_width / video_aspect
625 // Character height: visual_height / CHAR_ASPECT
626 fitted_height = (int)((cell_width / video_aspect) / CHAR_ASPECT);
627 } else {
628 // Video is taller than cell - fit to height
629 fitted_height = cell_height;
630 // Visual width needed: cell_height * CHAR_ASPECT * video_aspect
631 fitted_width = (int)(cell_height * CHAR_ASPECT * video_aspect);
632 }
633
634 // Clamp to cell bounds
635 if (fitted_width > cell_width) {
636 fitted_width = cell_width;
637 }
638 if (fitted_height > cell_height) {
639 fitted_height = cell_height;
640 }
641
642 // Add area used by this video
643 total_area_used += fitted_width * fitted_height;
644 }
645
646 // Calculate utilization percentage
647 float total_available_area = (float)(cell_area * sources_with_video);
648 float utilization = total_area_used / total_available_area;
649
650 float test_cell_visual_aspect = (float)cell_width / ((float)cell_height * CHAR_ASPECT);
651 log_debug_every(LOG_RATE_NORMAL, " Testing %dx%d: cell=%dx%d (visual aspect %.2f), utilization=%.1f%%", cols, rows,
652 cell_width, cell_height, test_cell_visual_aspect, utilization * 100.0f);
653
654 // Prefer configurations with better utilization
655 if (utilization > best_utilization) {
656 best_utilization = utilization;
657 best_cols = cols;
658 best_rows = rows;
659 }
660 }
661
662 *out_cols = best_cols;
663 *out_rows = best_rows;
664
665 float terminal_visual_aspect = (float)terminal_width / ((float)terminal_height * CHAR_ASPECT);
666 log_info("Grid layout: %d clients -> %dx%d grid (%.1f%% utilization) | terminal=%dx%d (char aspect %.2f, VISUAL "
667 "aspect %.2f), video aspect: %.2f",
668 sources_with_video, best_cols, best_rows, best_utilization * 100.0f, terminal_width, terminal_height,
669 (float)terminal_width / (float)terminal_height, terminal_visual_aspect, avg_aspect);
670}
671
683static image_t *create_multi_source_composite(image_source_t *sources, int source_count, int sources_with_video,
684 uint32_t target_client_id, unsigned short width, unsigned short height) {
685 (void)target_client_id; // Unused - composite is same for all clients now
686
687 // Calculate optimal grid layout using space-maximizing algorithm
688 int grid_cols, grid_rows;
689 calculate_optimal_grid_layout(sources, source_count, sources_with_video, width, height, &grid_cols, &grid_rows);
690
691 // Calculate composite dimensions in PIXELS for half-block mode
692 // Terminal dimensions are in CHARACTERS, need to convert to pixels:
693 // - Width: 1 char = 1 horizontal pixel
694 // - Height: 1 char = 2 vertical pixels (half-block = 2 pixels per char)
695 const int PIXELS_PER_CHAR_HEIGHT = 2;
696 int composite_width_px = width; // chars = pixels horizontally
697 int composite_height_px = height * PIXELS_PER_CHAR_HEIGHT; // chars * 2 = pixels vertically
698
699 // Create composite with final dimensions - no recreation needed
700 image_t *composite = image_new_from_pool(composite_width_px, composite_height_px);
701 image_clear(composite);
702
703 // Place each source in the grid
704 int video_source_index = 0; // Track only sources with video
705 for (int i = 0; i < source_count && video_source_index < 9; i++) { // Max 9 sources in 3x3 grid
706 if (!sources[i].image)
707 continue;
708
709 int row = video_source_index / grid_cols;
710 int col = video_source_index % grid_cols;
711 video_source_index++;
712
713 // Use actual composite dimensions for cell calculations
714 // Composite is now in PIXELS (already converted from terminal characters)
715 int cell_width_px = composite->w / grid_cols;
716 int cell_height_px = composite->h / grid_rows;
717
718 // Calculate aspect ratios
719 float src_aspect = (float)sources[i].image->w / (float)sources[i].image->h;
720 float cell_visual_aspect = (float)cell_width_px / (float)cell_height_px;
721
722 int target_width_px, target_height_px;
723
724 // CONTAIN strategy: Fill one dimension completely, let other scale down (never overflow)
725 // Compare aspects to decide which dimension to fill
726 if (src_aspect > cell_visual_aspect) {
727 // Video is WIDER than cell → fill WIDTH (height will be smaller)
728 target_width_px = cell_width_px;
729 target_height_px = (int)((cell_width_px / src_aspect) + 0.5f);
730 } else {
731 // Video is TALLER than cell → fill HEIGHT (width will be smaller)
732 target_height_px = cell_height_px;
733 target_width_px = (int)((cell_height_px * src_aspect) + 0.5f);
734 }
735
736 log_info("Cell %d: %dx%d px, video %.2f, cell %.2f → target %dx%d px (fill %s)", video_source_index - 1,
737 cell_width_px, cell_height_px, src_aspect, cell_visual_aspect, target_width_px, target_height_px,
738 (src_aspect > cell_visual_aspect) ? "WIDTH" : "HEIGHT");
739
740 // Create resized image with standard allocation
741 image_t *resized = image_new_from_pool(target_width_px, target_height_px);
742 image_resize(sources[i].image, resized);
743
744 // Calculate cell position in pixels (after any composite recreation)
745 int cell_x_offset_px = col * cell_width_px;
746 int cell_y_offset_px = row * cell_height_px;
747
748 // Grid centering strategy:
749 // - Multi-client: Apply padding ONLY to edge cells to center the grid as a whole
750 // (left column gets left padding, right column gets right padding)
751 // (top row gets top padding, bottom row gets bottom padding)
752 // This keeps cells edge-to-edge while centering the entire grid
753 // - Single client: Center the image within the cell
754 int x_padding_px, y_padding_px;
755
756 // Center images within their cells for all layouts
757 // This prevents gaps/stripes between clients
758 x_padding_px = (cell_width_px - target_width_px) / 2;
759 y_padding_px = (cell_height_px - target_height_px) / 2;
760
761 // Define cell boundaries for clipping (prevents bleeding into adjacent cells)
762 int cell_x_min = cell_x_offset_px;
763 int cell_x_max = cell_x_offset_px + cell_width_px - 1;
764 int cell_y_min = cell_y_offset_px;
765 int cell_y_max = cell_y_offset_px + cell_height_px - 1;
766
767 // Copy resized image to composite with cell boundary clipping
768 // This allows images to fill cells completely while preventing overlap
769
770 for (int y = 0; y < resized->h; y++) {
771 for (int x = 0; x < resized->w; x++) {
772 // Calculate destination position
773 int dst_x = cell_x_offset_px + x_padding_px + x;
774 int dst_y = cell_y_offset_px + y_padding_px + y;
775
776 // Clip to cell boundaries (prevents bleeding into adjacent cells)
777 if (dst_x < cell_x_min || dst_x > cell_x_max || dst_y < cell_y_min || dst_y > cell_y_max) {
778 continue; // Skip pixels outside cell boundaries
779 }
780
781 // Additional composite boundary check
782 if (dst_x < 0 || dst_x >= composite->w || dst_y < 0 || dst_y >= composite->h) {
783 continue; // Skip pixels outside composite
784 }
785
786 // Copy pixel
787 int src_idx = (y * resized->w) + x;
788 int dst_idx = (dst_y * composite->w) + dst_x;
789 composite->pixels[dst_idx] = resized->pixels[src_idx];
790 }
791 }
792
793 image_destroy_to_pool(resized);
794 }
795
796 return composite;
797}
798
808static char *convert_composite_to_ascii(image_t *composite, uint32_t target_client_id, unsigned short width,
809 unsigned short height) {
810 // LOCK OPTIMIZATION: Don't call find_client_by_id() - it would acquire rwlock unnecessarily
811 // Instead, the render thread already has snapshot of client state, so we just need palette data
812 // which is stable after initialization
813
814 // We need to find the client to access palette data, but we can do this without locking
815 // since palette is initialized once and never changes
816 client_info_t *render_client = NULL;
817
818 // Find client without locking - client_id is atomic and stable once set
819 for (int i = 0; i < MAX_CLIENTS; i++) {
821 if (atomic_load(&client->client_id) == target_client_id) {
822 render_client = client;
823 break;
824 }
825 }
826
827 if (!render_client) {
828 SET_ERRNO(ERROR_INVALID_STATE, "Per-client %u: Target client not found", target_client_id);
829 return NULL;
830 }
831
832 // Snapshot terminal capabilities WITHOUT holding rwlock
833 // Terminal caps are set once during handshake and never change, so this is safe
834 bool has_terminal_caps_snapshot = render_client->has_terminal_caps;
835 if (!has_terminal_caps_snapshot) {
836 SET_ERRNO(ERROR_INVALID_STATE, "Per-client %u: Terminal capabilities not received", target_client_id);
837 return NULL;
838 }
839
840 terminal_capabilities_t caps_snapshot = render_client->terminal_caps;
841
842 if (!render_client->client_palette_initialized) {
843 SET_ERRNO(ERROR_TERMINAL, "Client %u palette not initialized - cannot render frame", target_client_id);
844 return NULL;
845 }
846
847 // Render with client's custom palette using enhanced capabilities
848 // Palette data is stable after initialization, so no locking needed
849 const int h = caps_snapshot.render_mode == RENDER_MODE_HALF_BLOCK ? height * 2 : height;
850 char *ascii_frame =
851 ascii_convert_with_capabilities(composite, width, h, &caps_snapshot, true, false,
852 render_client->client_palette_chars, render_client->client_luminance_palette);
853
854 return ascii_frame;
855}
856
857/* ============================================================================
858 * Per-Client Video Mixing and Frame Generation
859 * ============================================================================
860 */
861
957// Compute hash of all active video sources for cache invalidation
958// Uses hardware-accelerated CRC32 for ultra-fast hashing
959char *create_mixed_ascii_frame_for_client(uint32_t target_client_id, unsigned short width, unsigned short height,
960 bool wants_stretch, size_t *out_size, bool *out_grid_changed,
961 int *out_sources_count) {
962 (void)wants_stretch; // Unused - we always handle aspect ratio ourselves
963
964 // Initialize output parameters
965 if (out_grid_changed) {
966 *out_grid_changed = false;
967 }
968 if (out_sources_count) {
969 *out_sources_count = 0;
970 }
971
972 if (!out_size || width == 0 || height == 0) {
974 "Invalid parameters for create_mixed_ascii_frame_for_client: width=%u, height=%u, out_size=%p", width,
975 height, out_size);
976 return NULL;
977 }
978
979 // Collect all active clients and their image sources
981 int source_count = collect_video_sources(sources, MAX_CLIENTS);
982
983 // Count sources that actually have video data
984 int sources_with_video = 0;
985 for (int i = 0; i < source_count; i++) {
986 if (sources[i].has_video && sources[i].image) {
987 sources_with_video++;
988 }
989 }
990
991 // Return the source count for debugging/tracking
992 if (out_sources_count) {
993 *out_sources_count = sources_with_video;
994 }
995
996 // GRID LAYOUT CHANGE DETECTION:
997 // Check if the number of active video sources has changed
998 // NOTE: We only UPDATE the count and SIGNAL the change via out parameter
999 // Broadcasting CLEAR_CONSOLE must happen AFTER the new frames are written to buffers
1000 // to prevent race condition where CLEAR arrives before new frame is ready
1001 int previous_count = atomic_load(&g_previous_active_video_count);
1002 if (sources_with_video != previous_count) {
1003 // Use compare-and-swap to ensure only ONE thread detects the change
1004 if (atomic_compare_exchange_strong(&g_previous_active_video_count, &previous_count, sources_with_video)) {
1005 log_info(
1006 "Grid layout changing: %d -> %d active video sources - caller will broadcast clear AFTER buffering frame",
1007 previous_count, sources_with_video);
1008 if (out_grid_changed) {
1009 *out_grid_changed = true; // Signal to caller
1010 }
1011 }
1012 }
1013
1014 // No active video sources - don't generate placeholder frames
1015 image_t *composite = NULL;
1016
1017 if (sources_with_video == 0) {
1018 *out_size = 0;
1019 // No active video sources for client - this isn't an error.
1020 // Return NULL to indicate no frame should be sent
1021 return NULL;
1022 }
1023
1024 if (sources_with_video == 1) {
1025 // Single source handling
1026 composite = create_single_source_composite(sources, source_count, target_client_id, width, height);
1027 } else {
1028 // Multiple sources - create grid layout
1029 composite =
1030 create_multi_source_composite(sources, source_count, sources_with_video, target_client_id, width, height);
1031 }
1032
1033 char *out = NULL;
1034
1035 if (!composite) {
1036 SET_ERRNO(ERROR_INVALID_STATE, "Per-client %u: Failed to create composite image", target_client_id);
1037 *out_size = 0;
1038 out = NULL;
1039 }
1040
1041 // Convert composite to ASCII using client capabilities
1042 char *ascii_frame = convert_composite_to_ascii(composite, target_client_id, width, height);
1043
1044 if (ascii_frame) {
1045 *out_size = strlen(ascii_frame);
1046 out = ascii_frame;
1047 } else {
1048 SET_ERRNO(ERROR_TERMINAL, "Per-client %u: Failed to convert image to ASCII", target_client_id);
1049 *out_size = 0;
1050 }
1051
1052 if (composite) {
1053 image_destroy_to_pool(composite);
1054 }
1055 for (int i = 0; i < source_count; i++) {
1056 if (sources[i].image) {
1057 image_destroy_to_pool(sources[i].image);
1058 }
1059 }
1060
1061 return out;
1062}
1063
1064/* ============================================================================
1065 * Frame Queuing and Delivery Functions
1066 * ============================================================================
1067 */
1068
1127// REMOVED: queue_ascii_frame_for_client - video now uses double buffer directly in client->outgoing_video_buffer
1128
1172int queue_audio_for_client(client_info_t *client, const void *audio_data, size_t data_size) {
1173 if (!client || !client->audio_queue || !audio_data || data_size == 0) {
1174 return -1;
1175 }
1176
1177 return packet_queue_enqueue(client->audio_queue, PACKET_TYPE_AUDIO, audio_data, data_size, 0, true);
1178}
1179
1193 // LOCK OPTIMIZATION: Don't acquire rwlock - all fields we access are atomic
1194 // client_id, active, is_sending_video are all atomic variables
1195
1196 // Iterate through all client slots
1197 for (int i = 0; i < MAX_CLIENTS; i++) {
1199
1200 // Skip uninitialized clients (atomic read)
1201 if (atomic_load(&client->client_id) == 0) {
1202 continue;
1203 }
1204
1205 // Check if client is active and sending video (both atomic reads)
1206 bool is_active = atomic_load(&client->active);
1207 bool is_sending = atomic_load(&client->is_sending_video);
1208
1209 if (is_active && is_sending) {
1210 return true;
1211 }
1212 }
1213
1214 return false;
1215}
🖼️ ASCII Art Conversion and Output Interface
#define CHAR_ASPECT
📐 Aspect Ratio Calculation Functions
🗃️ Lock-Free Unified Memory Buffer Pool with Lazy Allocation
🔄 Network byte order conversion helpers
#define NET_TO_HOST_U32(val)
Definition endian.h:86
buffer_pool_t * buffer_pool_get_global(void)
void buffer_pool_free(buffer_pool_t *pool, void *data, size_t size)
Free a buffer back to the pool (lock-free)
void * buffer_pool_alloc(buffer_pool_t *pool, size_t size)
Allocate a buffer from the pool (lock-free fast path)
#define SAFE_MALLOC_ALIGNED(size, alignment, cast)
Definition common.h:293
unsigned int uint32_t
Definition common.h:58
#define SAFE_FREE(ptr)
Definition common.h:320
#define read_u32_unaligned
Definition common.h:405
unsigned char uint8_t
Definition common.h:56
#define SET_ERRNO(code, context_msg,...)
Set error code with custom context message and log it.
@ ERROR_INVALID_STATE
@ ASCIICHAT_OK
Definition error_codes.h:48
@ ERROR_INVALID_PARAM
@ ERROR_TERMINAL
Definition error_codes.h:66
#define MAX_CLIENTS
Maximum possible clients (static array size) - actual runtime limit set by –max-clients (1-32)
Definition limits.h:23
#define LOG_RATE_NORMAL
Log rate limit: 3 seconds (3,000,000 microseconds)
Definition log_rates.h:29
#define log_debug_every(interval_us, fmt,...)
Rate-limited DEBUG logging.
#define log_info(...)
Log an INFO message.
int packet_queue_enqueue(packet_queue_t *queue, packet_type_t type, const void *data, size_t data_len, uint32_t client_id, bool copy_data)
Enqueue a packet into the queue.
@ PACKET_TYPE_AUDIO
Single audio packet (legacy)
Definition packet.h:291
pthread_rwlock_t rwlock_t
Read-write lock type (POSIX: pthread_rwlock_t)
Definition rwlock.h:40
@ RENDER_MODE_HALF_BLOCK
Unicode half-block characters (mixed foreground/background)
Definition terminal.h:473
void calculate_fit_dimensions_pixel(int img_width, int img_height, int max_width, int max_height, int *out_width, int *out_height)
Calculate fit dimensions for pixel-based images.
const video_frame_t * video_frame_get_latest(video_frame_buffer_t *vfb)
Reader API: Get latest frame if available.
void image_destroy_to_pool(image_t *image)
Destroy an image allocated from buffer pool.
void image_clear(image_t *p)
Clear image (set all pixels to black)
void image_resize(const image_t *s, image_t *d)
Resize image using nearest-neighbor interpolation.
image_t * image_new_from_pool(size_t width, size_t height)
Create a new image from buffer pool.
char * ascii_convert_with_capabilities(image_t *original, const ssize_t width, const ssize_t height, const terminal_capabilities_t *caps, const bool use_aspect_ratio, const bool stretch, const char *palette_chars, const char luminance_palette[256])
Convert image to ASCII art with terminal capability awareness.
Definition ascii.c:188
🔢 Mathematical Utility Functions
📬 Thread-safe packet queue system for per-client send threads
Lock-Free Ring Buffer and Frame Buffer Management.
atomic_bool g_server_should_exit
Global shutdown flag from main.c.
Per-client state management and lifecycle orchestration.
bool any_clients_sending_video(void)
Check if any connected clients are currently sending video.
Definition stream.c:1192
rwlock_t g_client_manager_rwlock
Reader-writer lock protecting the global client manager.
client_manager_t g_client_manager
Global client manager singleton - central coordination point.
char * create_mixed_ascii_frame_for_client(uint32_t target_client_id, unsigned short width, unsigned short height, bool wants_stretch, size_t *out_size, bool *out_grid_changed, int *out_sources_count)
Generate personalized ASCII frame for a specific client.
Definition stream.c:959
int queue_audio_for_client(client_info_t *client, const void *audio_data, size_t data_size)
Queue ASCII frame for delivery to specific client.
Definition stream.c:1172
Multi-client video mixing and ASCII frame generation.
Unified buffer pool with lock-free fast path.
Definition buffer_pool.h:90
Per-client state structure for server-side client management.
terminal_capabilities_t terminal_caps
atomic_uint client_id
char client_palette_chars[256]
video_frame_buffer_t * incoming_video_buffer
bool client_palette_initialized
packet_queue_t * audio_queue
char client_luminance_palette[256]
atomic_bool is_sending_video
atomic_bool active
Global client manager structure for server-side client coordination.
client_info_t clients[MAX_CLIENTS]
Array of client_info_t structures (backing storage)
Image source structure for multi-client video mixing.
Definition stream.c:229
uint32_t client_id
Unique client identifier for this source.
Definition stream.c:233
bool has_video
Whether this client has active video stream.
Definition stream.c:235
image_t * image
Pointer to client's current video frame (owned by buffer system)
Definition stream.c:231
Image structure.
int w
Image width in pixels (must be > 0)
int h
Image height in pixels (must be > 0)
rgb_pixel_t * pixels
Pixel data array (width * height RGB pixels, row-major order)
Multi-source frame structure for multi-user support.
Definition ringbuffer.h:399
char * data
Pointer to frame data (not owned by this struct)
Definition ringbuffer.h:411
uint32_t timestamp
Timestamp when frame was captured.
Definition ringbuffer.h:407
uint32_t source_client_id
Client ID that sent this frame.
Definition ringbuffer.h:403
size_t size
Actual size of frame data in bytes.
Definition ringbuffer.h:409
Complete terminal capabilities structure.
Definition terminal.h:485
render_mode_t render_mode
Preferred rendering mode (render_mode_t)
Definition terminal.h:497
Video frame buffer manager.
Video frame structure.
uint64_t capture_timestamp_us
Timestamp when frame was captured (microseconds)
size_t size
Size of frame data in bytes.
void * data
Frame data pointer (points to pre-allocated buffer)
⏱️ High-precision timing utilities using sokol_time.h and uthash
asciichat_error_t image_validate_dimensions(size_t width, size_t height)
Validate image dimensions (non-zero, within limits)
Definition util/image.c:100
asciichat_error_t image_calc_rgb_size(size_t width, size_t height, size_t *out_size)
Calculate total RGB buffer size from dimensions.
Definition util/image.c:54
🖼️ Safe overflow-checked buffer size calculations for images and video frames
Image Data Structures and Operations.