Line data Source code
1 : /*
2 : * PeerTalk Internal Declarations
3 : * Platform abstraction, internal peer structures, context definition
4 : */
5 :
6 : #ifndef PT_INTERNAL_H
7 : #define PT_INTERNAL_H
8 :
9 : #include "pt_types.h"
10 : #include "pt_log.h"
11 : #include "send.h" /* Phase 3: pt_batch type */
12 : #include "direct_buffer.h" /* Tier 2: large message buffers */
13 :
14 : /* ========================================================================== */
15 : /* PT_Log Integration (Phase 0) */
16 : /* ========================================================================== */
17 :
18 : /**
19 : * Convenience Macros for Context-Based Logging
20 : *
21 : * These macros extract the log handle from a pt_context pointer,
22 : * reducing boilerplate in PeerTalk internal code.
23 : *
24 : * Usage:
25 : * PT_CTX_ERR(ctx, PT_LOG_CAT_NETWORK, "Connection failed: %d", err);
26 : * PT_CTX_INFO(ctx, PT_LOG_CAT_INIT, "PeerTalk initialized");
27 : */
28 : #define PT_CTX_ERR(ctx, cat, ...) \
29 : PT_LOG_ERR((ctx)->log, cat, __VA_ARGS__)
30 :
31 : #define PT_CTX_WARN(ctx, cat, ...) \
32 : PT_LOG_WARN((ctx)->log, cat, __VA_ARGS__)
33 :
34 : #define PT_CTX_INFO(ctx, cat, ...) \
35 : PT_LOG_INFO((ctx)->log, cat, __VA_ARGS__)
36 :
37 : #define PT_CTX_DEBUG(ctx, cat, ...) \
38 : PT_LOG_DEBUG((ctx)->log, cat, __VA_ARGS__)
39 :
40 : /* ========================================================================== */
41 : /* Platform Abstraction Layer */
42 : /* ========================================================================== */
43 :
44 : /**
45 : * Platform-specific operations
46 : */
47 : typedef struct {
48 : int (*init)(struct pt_context *ctx);
49 : void (*shutdown)(struct pt_context *ctx);
50 : int (*poll)(struct pt_context *ctx);
51 : int (*poll_fast)(struct pt_context *ctx); /* TCP I/O only */
52 : pt_tick_t (*get_ticks)(void);
53 : unsigned long (*get_free_mem)(void);
54 : unsigned long (*get_max_block)(void);
55 : int (*send_udp)(struct pt_context *ctx, struct pt_peer *peer,
56 : const void *data, uint16_t len);
57 :
58 : /* Async send pipeline ops (NULL if platform doesn't support/need async) */
59 : int (*tcp_send_async)(struct pt_context *ctx, struct pt_peer *peer,
60 : const void *data, uint16_t len, uint8_t flags);
61 : int (*poll_send_completions)(struct pt_context *ctx,
62 : struct pt_peer *peer);
63 : int (*send_slots_available)(struct pt_context *ctx,
64 : struct pt_peer *peer);
65 : int (*pipeline_init)(struct pt_context *ctx, struct pt_peer *peer);
66 : void (*pipeline_cleanup)(struct pt_context *ctx, struct pt_peer *peer);
67 : } pt_platform_ops;
68 :
69 : /* Platform ops implementations (defined in platform-specific files) */
70 : #ifdef PT_PLATFORM_POSIX
71 : extern pt_platform_ops pt_posix_ops;
72 : #endif
73 :
74 : #ifdef PT_PLATFORM_MACTCP
75 : extern pt_platform_ops pt_mactcp_ops;
76 : #endif
77 :
78 : #ifdef PT_PLATFORM_OT
79 : extern pt_platform_ops pt_ot_ops;
80 : #endif
81 :
82 : #if defined(PT_PLATFORM_APPLETALK) || defined(PT_HAS_APPLETALK)
83 : extern pt_platform_ops pt_appletalk_ops;
84 : #endif
85 :
86 : /* ========================================================================== */
87 : /* Async Send Pipeline Structures */
88 : /* ========================================================================== */
89 :
90 : /**
91 : * Platform-agnostic WDS entry (matches MacTCP wdsEntry layout)
92 : *
93 : * On MacTCP: used directly for TCPSend WDS array
94 : * On POSIX: not used (kernel handles buffering)
95 : * On OT: similar to TNetbuf
96 : */
97 : typedef struct {
98 : uint16_t length; /* Length of buffer */
99 : void *ptr; /* Pointer to buffer data */
100 : } pt_wds_entry;
101 :
102 : /**
103 : * Send slot - holds one in-flight async message (24 bytes)
104 : *
105 : * Design notes:
106 : * - WDS embedded to avoid separate allocation
107 : * - ioResult cached locally for cache-efficient polling (avoids pointer chase)
108 : * - Hot fields (buffer, platform_data, ioResult) grouped first
109 : *
110 : * Per MacTCP Guide (Lines 2959-2961): "You must not modify or relocate the
111 : * WDS and the buffers it describes until the TCPSend command has been completed."
112 : */
113 : typedef struct {
114 : uint8_t *buffer; /* 4 bytes - Message buffer (header + payload + CRC) */
115 : void *platform_data; /* 4 bytes - Platform-specific (TCPiopb*, etc.) */
116 : pt_wds_entry wds[2]; /* 8 bytes - WDS[0]=message, WDS[1]=sentinel */
117 : volatile int16_t ioResult; /* 2 bytes - Cached from pb->ioResult for fast polling */
118 : uint16_t message_len; /* 2 bytes - Actual message length */
119 : uint8_t in_use; /* 1 byte - 1 if send pending */
120 : uint8_t completed; /* 1 byte - 1 if send finished (success or error) */
121 : uint16_t buffer_size; /* 2 bytes - Allocated size (cold - only at init) */
122 : } pt_send_slot; /* Total: 24 bytes */
123 :
124 : /**
125 : * Send pipeline - manages async send slots for a peer
126 : *
127 : * Hot field (pending_count) first for cache locality on 68030 (256-byte cache).
128 : *
129 : * Memory per peer (standard build, depth=4):
130 : * - 4 x pt_send_slot = 96 bytes
131 : * - 4 x buffer (~4KB each) = 16,448 bytes
132 : * - 4 x TCPiopb (~92 bytes) = 368 bytes
133 : * - Total: ~17KB per peer
134 : *
135 : * Memory per peer (lowmem build, depth=2):
136 : * - 2 x pt_send_slot = 48 bytes
137 : * - 2 x buffer (~1KB each) = 2,080 bytes
138 : * - 2 x TCPiopb (~92 bytes) = 184 bytes
139 : * - Total: ~2.3KB per peer
140 : */
141 : typedef struct {
142 : uint8_t pending_count; /* Hot: checked every poll */
143 : uint8_t next_slot; /* Warm: checked on send */
144 : uint8_t initialized; /* Cold: rarely checked */
145 : uint8_t reserved;
146 : pt_send_slot slots[PT_SEND_PIPELINE_DEPTH];
147 : } pt_send_pipeline;
148 :
149 : /* ========================================================================== */
150 : /* Peer Capability Structure */
151 : /* ========================================================================== */
152 :
153 : /**
154 : * Per-peer capability storage
155 : *
156 : * Stored in pt_peer_cold (rarely accessed after negotiation).
157 : * Effective max is cached in pt_peer_hot for fast send-path access.
158 : *
159 : * Flow control: We track last_reported_pressure to detect when our local
160 : * pressure has changed significantly (crosses PT_PRESSURE_* thresholds).
161 : * When it changes, we send a capability update to inform the peer.
162 : * The peer stores our pressure in buffer_pressure and throttles sends.
163 : */
164 : typedef struct {
165 : uint16_t max_message_size; /* Peer's max (256-8192), 0=unknown */
166 : uint16_t preferred_chunk; /* Optimal chunk size */
167 : uint16_t capability_flags; /* PT_CAPFLAG_* */
168 : uint16_t recv_buffer_size; /* Peer's receive buffer size (0=unknown, default 8192) */
169 : uint16_t optimal_chunk; /* Peer's 25% threshold (recv_buf/4) - optimal send size */
170 : uint16_t send_window; /* Flow control: max queued messages (auto-calculated) */
171 : uint8_t buffer_pressure; /* 0-100: peer's reported constraint level */
172 : uint8_t caps_exchanged; /* 1 after exchange complete */
173 : uint8_t last_reported_pressure; /* 0-100: what we last told peer */
174 : uint8_t pressure_update_pending; /* 1 if need to send pressure update */
175 : uint8_t first_send_logged; /* 1 after logging first send effective_max */
176 : uint8_t compact_mode; /* 1 if compact headers negotiated with peer */
177 : uint8_t push_preferred; /* 1 if peer needs pushFlag=1 always */
178 : uint8_t last_ibuf_pressure; /* MacTCP: ibuf level when last update sent */
179 : uint8_t peak_ibuf_pressure; /* MacTCP: peak ibuf during this poll cycle */
180 : uint8_t last_reported_ibuf_level; /* MacTCP: last threshold level (0/25/50/75) */
181 :
182 : /* Rate limiting state (token bucket algorithm)
183 : * When peer reports high pressure, we auto-throttle sends to avoid
184 : * overwhelming them. rate_limit_bytes_per_sec=0 means no rate limit. */
185 : uint32_t rate_limit_bytes_per_sec; /* 0 = unlimited */
186 : uint32_t rate_bucket_tokens; /* Available tokens (bytes) */
187 : uint32_t rate_bucket_max; /* Max token accumulation */
188 : pt_tick_t rate_last_update; /* Last token refill time */
189 :
190 : /* Capability send rate limiting.
191 : * We must not flood the peer with capability messages, especially when
192 : * their ibuf is congested (e.g., after a heavy receive phase). Cap updates
193 : * are rate-limited to at most one per PT_CAP_MIN_INTERVAL_TICKS ticks.
194 : * Units match platform get_ticks(): TickCount on Mac, ms on POSIX. */
195 : pt_tick_t cap_last_sent; /* Tick count when last capability was sent */
196 : } pt_peer_caps;
197 :
198 : /* ========================================================================== */
199 : /* Fragment Reassembly State */
200 : /* ========================================================================== */
201 :
202 : /**
203 : * Per-peer fragment reassembly state
204 : *
205 : * Uses existing recv_direct buffer for storage. Only one message
206 : * can be reassembled at a time per peer.
207 : */
208 : typedef struct {
209 : uint16_t message_id; /* Current message being reassembled */
210 : uint16_t total_length; /* Expected total message size */
211 : uint16_t received_length; /* Bytes received so far */
212 : uint8_t active; /* 1 if reassembly in progress */
213 : uint8_t reserved;
214 : } pt_reassembly_state;
215 :
216 : /* ========================================================================== */
217 : /* Stream Transfer State */
218 : /* ========================================================================== */
219 :
220 : /**
221 : * Per-peer stream transfer state
222 : *
223 : * Tracks an active PeerTalk_StreamSend() operation. Only one stream
224 : * can be active per peer at a time.
225 : */
226 : typedef struct {
227 : const uint8_t *data; /* Pointer to user's data buffer */
228 : void *user_data; /* User callback context */
229 : void *on_complete; /* PeerTalk_StreamCompleteCB (void* to avoid include) */
230 : uint32_t total_length; /* Total bytes to send */
231 : uint32_t bytes_sent; /* Bytes sent so far */
232 : uint8_t active; /* 1 if stream in progress */
233 : uint8_t cancelled; /* 1 if cancel requested */
234 : uint8_t reserved[2];
235 : } pt_peer_stream;
236 :
237 : /* ========================================================================== */
238 : /* Internal Peer Address Structure */
239 : /* ========================================================================== */
240 :
241 : #define PT_MAX_PEER_ADDRESSES 2
242 :
243 : /**
244 : * Per-peer address entry
245 : */
246 : typedef struct {
247 : uint32_t address; /* IP or synthesized AppleTalk address */
248 : uint16_t port;
249 : uint16_t transport; /* PT_TRANSPORT_* */
250 : } pt_peer_address; /* 8 bytes */
251 :
252 : /* ========================================================================== */
253 : /* Internal Peer Structure - Hot/Cold Split */
254 : /* ========================================================================== */
255 :
256 : /**
257 : * Hot peer data - accessed every poll cycle
258 : * Optimized for cache efficiency (designed for 68030 32-byte cache lines)
259 : */
260 : typedef struct {
261 : void *connection; /* Platform-specific connection handle (Phase 5) */
262 : uint32_t magic; /* PT_PEER_MAGIC - validation */
263 : pt_tick_t last_seen; /* Last activity timestamp */
264 : PeerTalk_PeerID id;
265 : uint16_t peer_flags; /* PT_PEER_FLAG_* from discovery */
266 : uint16_t latency_ms; /* Estimated RTT */
267 : uint16_t effective_max_msg; /* min(ours, theirs) - cached for send path */
268 : uint16_t effective_chunk; /* Adaptive chunk size based on RTT */
269 : pt_peer_state state;
270 : uint8_t address_count;
271 : uint8_t preferred_transport;
272 : uint8_t send_seq; /* Send sequence number (Phase 2) */
273 : uint8_t recv_seq; /* Receive sequence number (Phase 2) */
274 : uint8_t name_idx; /* Index into context name table */
275 : uint8_t pipeline_depth; /* Adaptive pipeline depth based on RTT */
276 : } pt_peer_hot;
277 :
278 : /**
279 : * Cold peer data - accessed infrequently
280 : */
281 : typedef struct {
282 : char name[PT_MAX_PEER_NAME + 1]; /* 32 bytes */
283 : PeerTalk_PeerInfo info; /* ~20 bytes */
284 : pt_peer_address addresses[PT_MAX_PEER_ADDRESSES]; /* 16 bytes */
285 : pt_tick_t last_discovery;
286 : PeerTalk_PeerStats stats;
287 : pt_tick_t ping_sent_time;
288 : uint16_t rtt_samples[8]; /* Rolling RTT samples */
289 : uint8_t rtt_index;
290 : uint8_t rtt_count;
291 : pt_peer_caps caps; /* Peer capability info */
292 : pt_reassembly_state reassembly; /* Fragment reassembly state */
293 : uint8_t obuf[PT_FRAME_BUF_SIZE]; /* Output framing buffer */
294 : uint8_t ibuf[PT_FRAME_BUF_SIZE]; /* Input framing buffer */
295 : uint16_t obuflen;
296 : uint16_t ibuflen;
297 : #ifdef PT_DEBUG
298 : uint32_t obuf_canary;
299 : uint32_t ibuf_canary;
300 : #endif
301 : } pt_peer_cold;
302 :
303 : /**
304 : * Complete peer structure
305 : */
306 : struct pt_peer {
307 : pt_peer_hot hot; /* 32 bytes - frequently accessed */
308 : pt_peer_cold cold; /* ~1.4KB - rarely accessed */
309 : struct pt_queue *send_queue; /* Tier 1: 256-byte slots for control messages */
310 : struct pt_queue *recv_queue; /* Tier 1: 256-byte slots for control messages */
311 : pt_direct_buffer send_direct; /* Tier 2: 4KB buffer for large outgoing messages */
312 : pt_direct_buffer recv_direct; /* Tier 2: 4KB buffer for large incoming messages */
313 : pt_peer_stream stream; /* Active stream transfer state */
314 : pt_send_pipeline pipeline; /* Async send pipeline (MacTCP/OT optimization) */
315 : };
316 :
317 : /* ========================================================================== */
318 : /* Internal Context Structure */
319 : /* ========================================================================== */
320 :
321 : #define PT_MAX_PEER_ID 256
322 :
323 : /**
324 : * PeerTalk context (opaque to public API)
325 : */
326 : struct pt_context {
327 : uint32_t magic; /* PT_CONTEXT_MAGIC */
328 : PeerTalk_Config config;
329 : PeerTalk_Callbacks callbacks;
330 : pt_platform_ops *plat;
331 : PeerTalk_PeerInfo local_info;
332 : PeerTalk_GlobalStats global_stats;
333 : struct pt_peer *peers; /* Array of peers */
334 :
335 : /* O(1) Peer ID Lookup Table */
336 : uint8_t peer_id_to_index[PT_MAX_PEER_ID]; /* 0xFF = invalid */
337 :
338 : /* Centralized Name Table */
339 : char peer_names[PT_MAX_PEERS][PT_MAX_PEER_NAME + 1];
340 :
341 : uint32_t next_message_id;
342 : uint32_t peers_version; /* Increments when peers added/removed */
343 : uint16_t local_flags;
344 : uint16_t max_peers;
345 : uint16_t peer_count;
346 : PeerTalk_PeerID next_peer_id;
347 : uint16_t available_transports;
348 : uint16_t active_transports;
349 : uint16_t log_categories;
350 : uint8_t discovery_active;
351 : uint8_t listening;
352 : uint8_t initialized;
353 : uint8_t reserved_byte;
354 :
355 : PT_Log *log; /* PT_Log handle from Phase 0 */
356 :
357 : /* Phase 3: Pre-allocated batch buffer (avoids 1.4KB stack allocation) */
358 : pt_batch send_batch; /* For pt_drain_send_queue() */
359 :
360 : /* Two-tier message queue configuration */
361 : uint16_t direct_threshold; /* Messages > this go to Tier 2 (default 256) */
362 : uint16_t direct_buffer_size; /* Tier 2 buffer size (default 4096) */
363 :
364 : /* Capability negotiation configuration */
365 : uint16_t local_max_message; /* Our max message size (0=8192) */
366 : uint16_t local_preferred_chunk; /* Our preferred chunk (0=1024) */
367 : uint16_t local_capability_flags; /* Our PT_CAPFLAG_* */
368 : uint8_t enable_fragmentation; /* 1=auto-fragment (default 1) */
369 : uint8_t owns_buffer_pool; /* 1=we allocated buffer_pool, must free */
370 :
371 : /* Configurable pressure thresholds (from config, with defaults applied) */
372 : uint8_t pressure_medium; /* Default: PT_PRESSURE_MEDIUM (50) */
373 : uint8_t pressure_high; /* Default: PT_PRESSURE_HIGH (85) */
374 : uint8_t pressure_critical; /* Default: PT_PRESSURE_CRITICAL (95) */
375 : uint8_t pressure_frag; /* Default: PT_PRESSURE_FRAG_THRESHOLD (75) */
376 :
377 : /* Connection timeout (ms) */
378 : uint16_t connect_timeout; /* Default: 30000 */
379 :
380 : /* Platform-specific data follows (allocated via pt_plat_extra_size) */
381 : };
382 :
383 : /* ========================================================================== */
384 : /* Validation Functions */
385 : /* ========================================================================== */
386 :
387 : /**
388 : * Validate context magic number (inline for performance)
389 : */
390 2 : static inline int pt_context_valid(const struct pt_context *ctx)
391 : {
392 2 : return ctx != NULL && ctx->magic == PT_CONTEXT_MAGIC;
393 : }
394 :
395 : /**
396 : * Validate peer magic number (inline for performance)
397 : */
398 : static inline int pt_peer_valid(const struct pt_peer *peer)
399 : {
400 : return peer != NULL && peer->hot.magic == PT_PEER_MAGIC;
401 : }
402 :
403 : /**
404 : * Validate context structure (full validation)
405 : */
406 : int pt_validate_context(struct pt_context *ctx);
407 :
408 : /**
409 : * Validate peer structure (full validation)
410 : */
411 : int pt_validate_peer(struct pt_peer *peer);
412 :
413 : /**
414 : * Validate configuration
415 : */
416 : int pt_validate_config(const PeerTalk_Config *config);
417 :
418 : /* Validation macros for debug builds */
419 : #ifdef PT_DEBUG
420 : #define PT_VALIDATE_CONTEXT(ctx) \
421 : do { \
422 : if (!pt_context_valid(ctx)) { \
423 : PT_Log_Err(ctx ? ctx->log : NULL, PT_LOG_CORE, \
424 : "Invalid context magic: 0x%08X", \
425 : ctx ? ctx->magic : 0); \
426 : return PT_ERR_INVALID_PARAM; \
427 : } \
428 : } while (0)
429 :
430 : #define PT_VALIDATE_PEER(peer) \
431 : do { \
432 : if (!pt_peer_valid(peer)) { \
433 : return PT_ERR_INVALID_PARAM; \
434 : } \
435 : } while (0)
436 : #else
437 : #define PT_VALIDATE_CONTEXT(ctx) ((void)0)
438 : #define PT_VALIDATE_PEER(peer) ((void)0)
439 : #endif
440 :
441 : /* ========================================================================== */
442 : /* Peer Management Functions */
443 : /* ========================================================================== */
444 :
445 : /**
446 : * O(1) peer lookup by ID
447 : *
448 : * Returns NULL if peer not found
449 : */
450 : static inline struct pt_peer *pt_find_peer_by_id(struct pt_context *ctx, PeerTalk_PeerID id)
451 : {
452 : uint8_t index;
453 :
454 : if (id == 0 || id >= PT_MAX_PEER_ID)
455 : return NULL;
456 :
457 : index = ctx->peer_id_to_index[id];
458 : if (index == 0xFF || index >= ctx->peer_count)
459 : return NULL;
460 :
461 : return &ctx->peers[index];
462 : }
463 :
464 : /**
465 : * Linear scan for address lookup (called rarely)
466 : */
467 : struct pt_peer *pt_find_peer_by_address(struct pt_context *ctx, uint32_t addr, uint16_t port);
468 :
469 : /**
470 : * Linear scan for name lookup (cold path)
471 : */
472 : struct pt_peer *pt_find_peer_by_name(struct pt_context *ctx, const char *name);
473 :
474 : /**
475 : * Allocate peer slot (uses swap-back removal for O(1) allocation)
476 : */
477 : struct pt_peer *pt_alloc_peer(struct pt_context *ctx);
478 :
479 : /**
480 : * Free peer slot (uses swap-back removal for O(1) deallocation)
481 : *
482 : * Algorithm:
483 : * 1. Copy last peer to removed slot
484 : * 2. Update peer_id_to_index for moved peer's ID
485 : * 3. Decrement peer_count
486 : * 4. Invalidate old last slot
487 : *
488 : * IMPORTANT: Peer ordering is NOT preserved. Iterate by peer_id if ordering matters.
489 : */
490 : void pt_free_peer(struct pt_context *ctx, struct pt_peer *peer);
491 :
492 : /* ========================================================================== */
493 : /* Name Table Access */
494 : /* ========================================================================== */
495 :
496 : /**
497 : * Get peer name from centralized table
498 : */
499 : const char *pt_get_peer_name(struct pt_context *ctx, uint8_t name_idx);
500 :
501 : /**
502 : * Allocate name slot in centralized table
503 : */
504 : uint8_t pt_alloc_peer_name(struct pt_context *ctx, const char *name);
505 :
506 : /**
507 : * Free name slot in centralized table
508 : */
509 : void pt_free_peer_name(struct pt_context *ctx, uint8_t name_idx);
510 :
511 : /* ========================================================================== */
512 : /* Platform-Specific Allocation */
513 : /* ========================================================================== */
514 :
515 : /**
516 : * Platform-agnostic memory allocation
517 : */
518 : void *pt_plat_alloc(size_t size);
519 :
520 : /**
521 : * Platform-agnostic memory deallocation
522 : */
523 : void pt_plat_free(void *ptr);
524 :
525 : /**
526 : * Platform-specific extra context size
527 : */
528 : size_t pt_plat_extra_size(void);
529 :
530 : /* ========================================================================== */
531 : /* Buffer Pool Internal Functions */
532 : /* ========================================================================== */
533 :
534 : /**
535 : * Get a buffer from the pool (marks it in-use).
536 : * Returns NULL if pool is NULL or exhausted.
537 : */
538 : void *pt_buffer_pool_get(PeerTalk_BufferPool *pool);
539 :
540 : /**
541 : * Return a buffer to the pool (marks it available).
542 : * Returns 1 if buffer was from this pool, 0 otherwise.
543 : */
544 : int pt_buffer_pool_return(PeerTalk_BufferPool *pool, void *buffer);
545 :
546 : /**
547 : * Get the buffer size for a pool.
548 : * Returns 0 if pool is NULL.
549 : */
550 : uint32_t pt_buffer_pool_size(const PeerTalk_BufferPool *pool);
551 :
552 : #endif /* PT_INTERNAL_H */
|