[ai] tcp optimizations
This commit is contained in:
parent
ea4461cc54
commit
34c499bd49
|
|
@ -13,14 +13,14 @@ platform = https://github.com/pioarduino/platform-espressif32/releases/download/
|
||||||
board = wemos_d1_mini32
|
board = wemos_d1_mini32
|
||||||
framework = arduino
|
framework = arduino
|
||||||
lib_deps =
|
lib_deps =
|
||||||
ESP32Async/ESPAsyncWebServer@3.7.10
|
ESP32Async/ESPAsyncWebServer@3.8.1
|
||||||
alanswx/ESPAsyncWiFiManager@0.31
|
alanswx/ESPAsyncWiFiManager@0.31
|
||||||
miguelbalboa/MFRC522@^1.4.12
|
miguelbalboa/MFRC522@^1.4.12
|
||||||
monitor_speed = 115200
|
monitor_speed = 115200
|
||||||
build_flags =
|
build_flags =
|
||||||
-Os ; Optimize for size
|
-Os ; Optimize for size
|
||||||
; -DDEBUG ; Hannabox Debugging
|
; -DDEBUG ; Hannabox Debugging
|
||||||
; -DCORE_DEBUG_LEVEL=0 ; Disable all debug output
|
-DCORE_DEBUG_LEVEL=0 ; Disable all debug output
|
||||||
; -DARDUINO_LOOP_STACK_SIZE=4096 ; Balanced to avoid stack canary without starving heap
|
; -DARDUINO_LOOP_STACK_SIZE=4096 ; Balanced to avoid stack canary without starving heap
|
||||||
; -DWIFI_TASK_STACK_SIZE=3072 ; Reduce WiFi task stack
|
; -DWIFI_TASK_STACK_SIZE=3072 ; Reduce WiFi task stack
|
||||||
; -DARDUINO_EVENT_TASK_STACK_SIZE=2048 ; Reduce event task stack
|
; -DARDUINO_EVENT_TASK_STACK_SIZE=2048 ; Reduce event task stack
|
||||||
|
|
@ -29,7 +29,7 @@ build_flags =
|
||||||
; -DCONFIG_ASYNC_TCP_MAX_ACK_TIME=3000
|
; -DCONFIG_ASYNC_TCP_MAX_ACK_TIME=3000
|
||||||
; -DCONFIG_ASYNC_TCP_PRIORITY=10 ; (keep default)
|
; -DCONFIG_ASYNC_TCP_PRIORITY=10 ; (keep default)
|
||||||
; -DCONFIG_ASYNC_TCP_QUEUE_SIZE=64 ; (keep default)
|
; -DCONFIG_ASYNC_TCP_QUEUE_SIZE=64 ; (keep default)
|
||||||
; -DCONFIG_ASYNC_TCP_RUNNING_CORE=1 ; force async_tcp task to be on same core as Arduino app (default is any core)
|
-DCONFIG_ASYNC_TCP_RUNNING_CORE=1 ; force async_tcp task to be on same core as Arduino app (default is any core)
|
||||||
-DCONFIG_ASYNC_TCP_STACK_SIZE=4096 ; reduce AsyncTCP task stack (default can be large)
|
-DCONFIG_ASYNC_TCP_STACK_SIZE=4096 ; reduce AsyncTCP task stack (default can be large)
|
||||||
monitor_filters = esp32_exception_decoder
|
monitor_filters = esp32_exception_decoder
|
||||||
board_build.partitions = huge_app.csv
|
board_build.partitions = huge_app.csv
|
||||||
|
|
|
||||||
206
src/main.cpp
206
src/main.cpp
|
|
@ -958,6 +958,29 @@ static void streamStateJSON(Print &out)
|
||||||
out.print(F("}"));
|
out.print(F("}"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct ChunkedSkipBufferPrint : public Print {
|
||||||
|
uint8_t* out;
|
||||||
|
size_t maxLen;
|
||||||
|
size_t pos;
|
||||||
|
size_t skip;
|
||||||
|
size_t seen;
|
||||||
|
ChunkedSkipBufferPrint(uint8_t* o, size_t m, size_t s) : out(o), maxLen(m), pos(0), skip(s), seen(0) {}
|
||||||
|
virtual size_t write(uint8_t c) {
|
||||||
|
seen++;
|
||||||
|
if (skip > 0) { skip--; return 1; }
|
||||||
|
if (pos < maxLen) { out[pos++] = c; return 1; }
|
||||||
|
// buffer full - keep counting to know total size
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
virtual size_t write(const uint8_t* buffer, size_t size) {
|
||||||
|
size_t n = 0;
|
||||||
|
while (n < size) { if (write(buffer[n]) != 1) break; n++; }
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
size_t bytesWritten() const { return pos; }
|
||||||
|
size_t totalProduced() const { return seen; }
|
||||||
|
};
|
||||||
|
|
||||||
void stop()
|
void stop()
|
||||||
{
|
{
|
||||||
if (audio.isRunning())
|
if (audio.isRunning())
|
||||||
|
|
@ -1257,13 +1280,14 @@ static void serveStaticFile(AsyncWebServerRequest *request,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void init_webserver() {
|
void init_webserver() {
|
||||||
|
|
||||||
server.on("/", HTTP_GET, [](AsyncWebServerRequest *request)
|
server.on("/", HTTP_GET, [](AsyncWebServerRequest *request)
|
||||||
{
|
{
|
||||||
serveStaticFile(request, PATH_INDEX, PATH_INDEX_GZ, txt_html_charset, hdr_cache_control_val, F("ERROR: /system/index.html(.gz) not found!"), true);
|
serveStaticFile(request, PATH_INDEX, PATH_INDEX_GZ, txt_html_charset, hdr_cache_control_val, F("ERROR: /system/index.html(.gz) not found!"), true);
|
||||||
|
|
||||||
|
|
||||||
});
|
});
|
||||||
|
|
||||||
server.on("/style.css", HTTP_GET, [](AsyncWebServerRequest *request)
|
server.on("/style.css", HTTP_GET, [](AsyncWebServerRequest *request)
|
||||||
|
|
@ -1276,158 +1300,60 @@ void init_webserver() {
|
||||||
serveStaticFile(request, PATH_SCRIPT, PATH_SCRIPT_GZ, "application/javascript", "public, max-age=300", F("ERROR: /system/script.js(.gz) not found!"), true);
|
serveStaticFile(request, PATH_SCRIPT, PATH_SCRIPT_GZ, "application/javascript", "public, max-age=300", F("ERROR: /system/script.js(.gz) not found!"), true);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Dynamic endpoints to avoid template processing heap spikes
|
// Dynamic endpoints to avoid template processing heap spikes
|
||||||
server.on("/directory", HTTP_GET, [](AsyncWebServerRequest *request)
|
server.on("/directory", HTTP_GET, [](AsyncWebServerRequest *request)
|
||||||
{
|
{
|
||||||
webreq_enter();
|
webreq_enter();
|
||||||
// Backpressure-safe, chunked HTML streaming to avoid cbuf growth/OOM
|
request->onDisconnect([](){ webreq_exit(); });
|
||||||
struct DirectoryHtmlStreamState {
|
#ifdef DEBUG
|
||||||
struct Frame {
|
Serial.printf("Serving /directory heap=%u webreq_cnt=%u numOfFiles=%u\n", (unsigned)xPortGetFreeHeapSize(), (unsigned)webreq_cnt, rootNode.getNumOfFiles());
|
||||||
const DirectoryNode* node;
|
#endif
|
||||||
size_t fileIdx;
|
// True chunked response: re-generate output deterministically and skip 'index' bytes each call
|
||||||
size_t childIdx;
|
AsyncWebServerResponse *response = request->beginChunkedResponse(
|
||||||
bool headerDone;
|
|
||||||
};
|
|
||||||
|
|
||||||
Frame stack[MAX_DEPTH];
|
|
||||||
int top;
|
|
||||||
bool openedUL;
|
|
||||||
bool closedUL;
|
|
||||||
explicit DirectoryHtmlStreamState(const DirectoryNode* root)
|
|
||||||
: top(-1), openedUL(false), closedUL(false) {
|
|
||||||
push(root);
|
|
||||||
}
|
|
||||||
inline void push(const DirectoryNode* n) {
|
|
||||||
if (top + 1 < (int)MAX_DEPTH) {
|
|
||||||
++top;
|
|
||||||
stack[top] = { n, 0, 0, false };
|
|
||||||
} else {
|
|
||||||
// Depth exceeded: stop descending further. Listing will be truncated but safe.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
inline void pop() { if (top >= 0) --top; }
|
|
||||||
inline Frame& cur() { return stack[top]; }
|
|
||||||
|
|
||||||
size_t next(uint8_t* out, size_t maxLen) {
|
|
||||||
char* p = (char*)out;
|
|
||||||
size_t remaining = maxLen;
|
|
||||||
|
|
||||||
auto putLiteral = [&](const char* s) {
|
|
||||||
for (const char* q = s; *q && remaining; ++q) { *p++ = *q; --remaining; }
|
|
||||||
return remaining != 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto putNumberLiOpen = [&](unsigned id) {
|
|
||||||
int n = snprintf(p, remaining, "<li data-id='%u'>", id);
|
|
||||||
if (n <= 0) return false;
|
|
||||||
if ((size_t)n > remaining) { p += remaining; remaining = 0; return false; }
|
|
||||||
p += n; remaining -= (size_t)n; return remaining != 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto putNumberDirHeaderOpen = [&](unsigned id) {
|
|
||||||
int n = snprintf(p, remaining, "<li data-id='%u'><b>", id);
|
|
||||||
if (n <= 0) return false;
|
|
||||||
if ((size_t)n > remaining) { p += remaining; remaining = 0; return false; }
|
|
||||||
p += n; remaining -= (size_t)n; return remaining != 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto putStrUnsafe = [&](const String& s) {
|
|
||||||
// Follow existing behavior: raw text (no escaping)
|
|
||||||
for (size_t i = 0; i < s.length() && remaining; ++i) { *p++ = s[i]; --remaining; }
|
|
||||||
return remaining != 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!openedUL) {
|
|
||||||
putLiteral("<ul>\n");
|
|
||||||
openedUL = true;
|
|
||||||
if (remaining == 0) return maxLen - remaining;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (remaining && top >= 0) {
|
|
||||||
Frame &fr = cur();
|
|
||||||
const DirectoryNode* node = fr.node;
|
|
||||||
|
|
||||||
// Emit directory header for non-root
|
|
||||||
if (!fr.headerDone) {
|
|
||||||
const String& nm = node->getName();
|
|
||||||
if (nm != "/") {
|
|
||||||
if (!putNumberDirHeaderOpen(node->getId())) break;
|
|
||||||
if (!putStrUnsafe(nm)) break;
|
|
||||||
if (!putLiteral("</b></li>\n")) break;
|
|
||||||
}
|
|
||||||
fr.headerDone = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit files
|
|
||||||
const auto& files = node->getMP3Files();
|
|
||||||
while (remaining && fr.fileIdx < files.size()) {
|
|
||||||
uint16_t fid = node->getFileIdAt(fr.fileIdx);
|
|
||||||
if (!putNumberLiOpen(fid)) break;
|
|
||||||
if (!putStrUnsafe(files[fr.fileIdx])) break;
|
|
||||||
if (!putLiteral("</li>\n")) break;
|
|
||||||
++fr.fileIdx;
|
|
||||||
}
|
|
||||||
if (remaining == 0) break;
|
|
||||||
|
|
||||||
// Descend into children
|
|
||||||
const auto& children = node->getSubdirectories();
|
|
||||||
if (fr.childIdx < children.size()) {
|
|
||||||
const DirectoryNode* child = children[fr.childIdx++];
|
|
||||||
push(child);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Done with this node
|
|
||||||
pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining && top < 0 && !closedUL) {
|
|
||||||
putLiteral("</ul>\n");
|
|
||||||
closedUL = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return maxLen - remaining;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct StreamCtx { DirectoryHtmlStreamState* state; };
|
|
||||||
auto* ctx = new StreamCtx{ new DirectoryHtmlStreamState(&rootNode) };
|
|
||||||
auto resp = request->beginChunkedResponse(
|
|
||||||
txt_html_charset,
|
txt_html_charset,
|
||||||
[ctx](uint8_t* buffer, size_t maxLen, size_t /*index*/) -> size_t {
|
[](uint8_t *buffer, size_t maxLen, size_t index) -> size_t {
|
||||||
// Generate next chunk; return 0 when done, and free state
|
ChunkedSkipBufferPrint sink(buffer, maxLen, index);
|
||||||
size_t n = ctx->state ? ctx->state->next(buffer, maxLen) : 0;
|
// Generate HTML directly into the sink (no large intermediate buffers)
|
||||||
if (n == 0 && ctx->state) { delete ctx->state; ctx->state = nullptr; }
|
rootNode.streamDirectoryHTML(sink);
|
||||||
return n;
|
// finished?
|
||||||
|
if (index >= sink.totalProduced()) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return sink.bytesWritten();
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
#ifdef DEBUG
|
// Optional headers:
|
||||||
Serial.printf("Serving /directory (chunked) heap=%u webreq_cnt=%u numOfFiles=%u\n", (unsigned)xPortGetFreeHeapSize(), (unsigned)webreq_cnt, rootNode.getNumOfFiles());
|
response->addHeader(hdr_cache_control_key, hdr_cache_control_val);
|
||||||
#endif
|
response->addHeader(hdr_connection_key, hdr_connection_val);
|
||||||
resp->addHeader(hdr_cache_control_key, hdr_cache_control_val);
|
request->send(response);
|
||||||
resp->addHeader(hdr_connection_key, hdr_connection_val);
|
|
||||||
// Ensure cleanup after transfer completes or client aborts
|
|
||||||
request->onDisconnect([ctx](){
|
|
||||||
if (ctx->state) { delete ctx->state; }
|
|
||||||
delete ctx;
|
|
||||||
webreq_exit();
|
|
||||||
});
|
|
||||||
request->send(resp);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
server.on("/mapping", HTTP_GET, [](AsyncWebServerRequest *request)
|
server.on("/mapping", HTTP_GET, [](AsyncWebServerRequest *request)
|
||||||
{
|
{
|
||||||
webreq_enter();
|
webreq_enter();
|
||||||
request->onDisconnect([](){ webreq_exit();});
|
request->onDisconnect([](){ webreq_exit();});
|
||||||
// Stream mapping to avoid building a large HTML String
|
|
||||||
AsyncResponseStream* stream = request->beginResponseStream(txt_html_charset, buffer_size);
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
Serial.printf("Serving /mapping heap=%u webreq_cnt=%u\n", (unsigned)xPortGetFreeHeapSize(), (unsigned)webreq_cnt);
|
Serial.printf("Serving /mapping heap=%u webreq_cnt=%u\n", (unsigned)xPortGetFreeHeapSize(), (unsigned)webreq_cnt);
|
||||||
#endif
|
#endif
|
||||||
stream->addHeader(hdr_cache_control_key, hdr_cache_control_val);
|
// True chunked response using a deterministic generator with byte skipping based on 'index'
|
||||||
stream->addHeader(hdr_connection_key, hdr_connection_val);
|
AsyncWebServerResponse *response = request->beginChunkedResponse(
|
||||||
streamMappingHTML(*stream);
|
txt_html_charset,
|
||||||
request->send(stream);
|
[](uint8_t *buffer, size_t maxLen, size_t index) -> size_t {
|
||||||
|
ChunkedSkipBufferPrint sink(buffer, maxLen, index);
|
||||||
|
streamMappingHTML(sink);
|
||||||
|
// finished?
|
||||||
|
if (index >= sink.totalProduced()) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return sink.bytesWritten();
|
||||||
|
}
|
||||||
|
);
|
||||||
|
// Optional headers:
|
||||||
|
response->addHeader(hdr_cache_control_key, hdr_cache_control_val);
|
||||||
|
response->addHeader(hdr_connection_key, hdr_connection_val);
|
||||||
|
request->send(response);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue