6831 lines
257 KiB
JavaScript
6831 lines
257 KiB
JavaScript
// This code implements the `-sMODULARIZE` settings by taking the generated
|
|
// JS program code (INNER_JS_CODE) and wrapping it in a factory function.
|
|
|
|
// Single threaded MINIMAL_RUNTIME programs do not need access to
|
|
// document.currentScript, so a simple export declaration is enough.
|
|
var loadWasmTtsBindings = (() => {
|
|
// When MODULARIZE this JS may be executed later,
|
|
// after document.currentScript is gone, so we save it.
|
|
// In EXPORT_ES6 mode we can just use 'import.meta.url'.
|
|
var _scriptName = globalThis.document?.currentScript?.src;
|
|
return async function(moduleArg = {}) {
|
|
var moduleRtn;
|
|
|
|
// include: shell.js
|
|
// include: minimum_runtime_check.js
|
|
(function() {
|
|
// "30.0.0" -> 300000
|
|
function humanReadableVersionToPacked(str) {
|
|
str = str.split("-")[0];
|
|
// Remove any trailing part from e.g. "12.53.3-alpha"
|
|
var vers = str.split(".").slice(0, 3);
|
|
while (vers.length < 3) vers.push("00");
|
|
vers = vers.map((n, i, arr) => n.padStart(2, "0"));
|
|
return vers.join("");
|
|
}
|
|
// 300000 -> "30.0.0"
|
|
var packedVersionToHumanReadable = n => [ n / 1e4 | 0, (n / 100 | 0) % 100, n % 100 ].join(".");
|
|
var TARGET_NOT_SUPPORTED = 2147483647;
|
|
// Note: We use a typeof check here instead of optional chaining using
|
|
// globalThis because older browsers might not have globalThis defined.
|
|
var isNode = typeof process !== "undefined" && process && process.versions && process.versions.node;
|
|
var currentNodeVersion = isNode ? humanReadableVersionToPacked(process.versions.node) : TARGET_NOT_SUPPORTED;
|
|
if (currentNodeVersion < 160400) {
|
|
throw new Error(`This emscripten-generated code requires node v${packedVersionToHumanReadable(160400)} (detected v${packedVersionToHumanReadable(currentNodeVersion)})`);
|
|
}
|
|
var userAgent = typeof navigator !== "undefined" && navigator.userAgent;
|
|
if (!userAgent) {
|
|
return;
|
|
}
|
|
var currentSafariVersion = userAgent.includes("Safari/") && !userAgent.includes("Chrome/") && userAgent.match(/Version\/(\d+\.?\d*\.?\d*)/) ? humanReadableVersionToPacked(userAgent.match(/Version\/(\d+\.?\d*\.?\d*)/)[1]) : TARGET_NOT_SUPPORTED;
|
|
if (currentSafariVersion < 15e4) {
|
|
throw new Error(`This emscripten-generated code requires Safari v${packedVersionToHumanReadable(15e4)} (detected v${currentSafariVersion})`);
|
|
}
|
|
var currentFirefoxVersion = userAgent.match(/Firefox\/(\d+(?:\.\d+)?)/) ? parseFloat(userAgent.match(/Firefox\/(\d+(?:\.\d+)?)/)[1]) : TARGET_NOT_SUPPORTED;
|
|
if (currentFirefoxVersion < 79) {
|
|
throw new Error(`This emscripten-generated code requires Firefox v79 (detected v${currentFirefoxVersion})`);
|
|
}
|
|
var currentChromeVersion = userAgent.match(/Chrome\/(\d+(?:\.\d+)?)/) ? parseFloat(userAgent.match(/Chrome\/(\d+(?:\.\d+)?)/)[1]) : TARGET_NOT_SUPPORTED;
|
|
if (currentChromeVersion < 85) {
|
|
throw new Error(`This emscripten-generated code requires Chrome v85 (detected v${currentChromeVersion})`);
|
|
}
|
|
})();
|
|
|
|
// end include: minimum_runtime_check.js
|
|
// The Module object: Our interface to the outside world. We import
|
|
// and export values on it. There are various ways Module can be used:
|
|
// 1. Not defined. We create it here
|
|
// 2. A function parameter, function(moduleArg) => Promise<Module>
|
|
// 3. pre-run appended it, var Module = {}; ..generated code..
|
|
// 4. External script tag defines var Module.
|
|
// We need to check if Module already exists (e.g. case 3 above).
|
|
// Substitution will be replaced with actual code on later stage of the build,
|
|
// this way Closure Compiler will not mangle it (e.g. case 4. above).
|
|
// Note that if you want to run closure, and also to use Module
|
|
// after the generated code, you will need to define var Module = {};
|
|
// before the code. Then that object will be used in the code, and you
|
|
// can continue to use Module afterwards as well.
|
|
var Module = moduleArg;
|
|
|
|
// Determine the runtime environment we are in. You can customize this by
|
|
// setting the ENVIRONMENT setting at compile time (see settings.js).
|
|
// Attempt to auto-detect the environment
|
|
var ENVIRONMENT_IS_WEB = !!globalThis.window;
|
|
|
|
var ENVIRONMENT_IS_WORKER = !!globalThis.WorkerGlobalScope;
|
|
|
|
// N.b. Electron.js environment is simultaneously a NODE-environment, but
|
|
// also a web environment.
|
|
var ENVIRONMENT_IS_NODE = globalThis.process?.versions?.node && globalThis.process?.type != "renderer";
|
|
|
|
var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
|
|
|
|
// Three configurations we can be running in:
|
|
// 1) We could be the application main() thread running in the main JS UI thread. (ENVIRONMENT_IS_WORKER == false and ENVIRONMENT_IS_PTHREAD == false)
|
|
// 2) We could be the application main() running directly in a worker. (ENVIRONMENT_IS_WORKER == true, ENVIRONMENT_IS_PTHREAD == false)
|
|
// 3) We could be an application pthread running in a worker. (ENVIRONMENT_IS_WORKER == true and ENVIRONMENT_IS_PTHREAD == true)
|
|
// The way we signal to a worker that it is hosting a pthread is to construct
|
|
// it with a specific name.
|
|
var ENVIRONMENT_IS_PTHREAD = ENVIRONMENT_IS_WORKER && self.name?.startsWith("em-pthread");
|
|
|
|
if (ENVIRONMENT_IS_PTHREAD) {
|
|
assert(!globalThis.moduleLoaded, "module should only be loaded once on each pthread worker");
|
|
globalThis.moduleLoaded = true;
|
|
}
|
|
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
var worker_threads = require("worker_threads");
|
|
global.Worker = worker_threads.Worker;
|
|
ENVIRONMENT_IS_WORKER = !worker_threads.isMainThread;
|
|
// Under node we set `workerData` to `em-pthread` to signal that the worker
|
|
// is hosting a pthread.
|
|
ENVIRONMENT_IS_PTHREAD = ENVIRONMENT_IS_WORKER && worker_threads["workerData"] == "em-pthread";
|
|
}
|
|
|
|
// --pre-jses are emitted after the Module integration code, so that they can
|
|
// refer to Module (if they choose; they can also define Module)
|
|
var arguments_ = [];
|
|
|
|
var thisProgram = "./this.program";
|
|
|
|
var quit_ = (status, toThrow) => {
|
|
throw toThrow;
|
|
};
|
|
|
|
if (typeof __filename != "undefined") {
|
|
// Node
|
|
_scriptName = __filename;
|
|
} else if (ENVIRONMENT_IS_WORKER) {
|
|
_scriptName = self.location.href;
|
|
}
|
|
|
|
// `/` should be present at the end if `scriptDirectory` is not empty
|
|
var scriptDirectory = "";
|
|
|
|
function locateFile(path) {
|
|
if (Module["locateFile"]) {
|
|
return Module["locateFile"](path, scriptDirectory);
|
|
}
|
|
return scriptDirectory + path;
|
|
}
|
|
|
|
// Hooks that are implemented differently in different runtime environments.
|
|
var readAsync, readBinary;
|
|
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
const isNode = globalThis.process?.versions?.node && globalThis.process?.type != "renderer";
|
|
if (!isNode) throw new Error("not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)");
|
|
// These modules will usually be used on Node.js. Load them eagerly to avoid
|
|
// the complexity of lazy-loading.
|
|
var fs = require("fs");
|
|
scriptDirectory = __dirname + "/";
|
|
// include: node_shell_read.js
|
|
readBinary = filename => {
|
|
// We need to re-wrap `file://` strings to URLs.
|
|
filename = isFileURI(filename) ? new URL(filename) : filename;
|
|
var ret = fs.readFileSync(filename);
|
|
assert(Buffer.isBuffer(ret));
|
|
return ret;
|
|
};
|
|
readAsync = async (filename, binary = true) => {
|
|
// See the comment in the `readBinary` function.
|
|
filename = isFileURI(filename) ? new URL(filename) : filename;
|
|
var ret = fs.readFileSync(filename, binary ? undefined : "utf8");
|
|
assert(binary ? Buffer.isBuffer(ret) : typeof ret == "string");
|
|
return ret;
|
|
};
|
|
// end include: node_shell_read.js
|
|
if (process.argv.length > 1) {
|
|
thisProgram = process.argv[1].replace(/\\/g, "/");
|
|
}
|
|
arguments_ = process.argv.slice(2);
|
|
quit_ = (status, toThrow) => {
|
|
process.exitCode = status;
|
|
throw toThrow;
|
|
};
|
|
} else if (ENVIRONMENT_IS_SHELL) {} else // Note that this includes Node.js workers when relevant (pthreads is enabled).
|
|
// Node.js workers are detected as a combination of ENVIRONMENT_IS_WORKER and
|
|
// ENVIRONMENT_IS_NODE.
|
|
if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
|
|
try {
|
|
scriptDirectory = new URL(".", _scriptName).href;
|
|
} catch {}
|
|
if (!(globalThis.window || globalThis.WorkerGlobalScope)) throw new Error("not compiled for this environment (did you build to HTML and try to run it not on the web, or set ENVIRONMENT to something - like node - and run it someplace else - like on the web?)");
|
|
// Differentiate the Web Worker from the Node Worker case, as reading must
|
|
// be done differently.
|
|
if (!ENVIRONMENT_IS_NODE) {
|
|
// include: web_or_worker_shell_read.js
|
|
if (ENVIRONMENT_IS_WORKER) {
|
|
readBinary = url => {
|
|
var xhr = new XMLHttpRequest;
|
|
xhr.open("GET", url, false);
|
|
xhr.responseType = "arraybuffer";
|
|
xhr.send(null);
|
|
return new Uint8Array(/** @type{!ArrayBuffer} */ (xhr.response));
|
|
};
|
|
}
|
|
readAsync = async url => {
|
|
// Fetch has some additional restrictions over XHR, like it can't be used on a file:// url.
|
|
// See https://github.com/github/fetch/pull/92#issuecomment-140665932
|
|
// Cordova or Electron apps are typically loaded from a file:// url.
|
|
// So use XHR on webview if URL is a file URL.
|
|
if (isFileURI(url)) {
|
|
return new Promise((resolve, reject) => {
|
|
var xhr = new XMLHttpRequest;
|
|
xhr.open("GET", url, true);
|
|
xhr.responseType = "arraybuffer";
|
|
xhr.onload = () => {
|
|
if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) {
|
|
// file URLs can return 0
|
|
resolve(xhr.response);
|
|
return;
|
|
}
|
|
reject(xhr.status);
|
|
};
|
|
xhr.onerror = reject;
|
|
xhr.send(null);
|
|
});
|
|
}
|
|
var response = await fetch(url, {
|
|
credentials: "same-origin"
|
|
});
|
|
if (response.ok) {
|
|
return response.arrayBuffer();
|
|
}
|
|
throw new Error(response.status + " : " + response.url);
|
|
};
|
|
}
|
|
} else {
|
|
throw new Error("environment detection error");
|
|
}
|
|
|
|
// Set up the out() and err() hooks, which are how we can print to stdout or
|
|
// stderr, respectively.
|
|
// Normally just binding console.log/console.error here works fine, but
|
|
// under node (with workers) we see missing/out-of-order messages so route
|
|
// directly to stdout and stderr.
|
|
// See https://github.com/emscripten-core/emscripten/issues/14804
|
|
var defaultPrint = console.log.bind(console);
|
|
|
|
var defaultPrintErr = console.error.bind(console);
|
|
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
var utils = require("util");
|
|
var stringify = a => typeof a == "object" ? utils.inspect(a) : a;
|
|
defaultPrint = (...args) => fs.writeSync(1, args.map(stringify).join(" ") + "\n");
|
|
defaultPrintErr = (...args) => fs.writeSync(2, args.map(stringify).join(" ") + "\n");
|
|
}
|
|
|
|
var out = defaultPrint;
|
|
|
|
var err = defaultPrintErr;
|
|
|
|
// perform assertions in shell.js after we set up out() and err(), as otherwise
|
|
// if an assertion fails it cannot print the message
|
|
assert(ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER || ENVIRONMENT_IS_NODE, "Pthreads do not work in this environment yet (need Web Workers, or an alternative to them)");
|
|
|
|
assert(!ENVIRONMENT_IS_SHELL, "shell environment detected but not enabled at build time. Add `shell` to `-sENVIRONMENT` to enable.");
|
|
|
|
// end include: shell.js
|
|
// include: preamble.js
|
|
// === Preamble library stuff ===
|
|
// Documentation for the public APIs defined in this file must be updated in:
|
|
// site/source/docs/api_reference/preamble.js.rst
|
|
// A prebuilt local version of the documentation is available at:
|
|
// site/build/text/docs/api_reference/preamble.js.txt
|
|
// You can also build docs locally as HTML or other formats in site/
|
|
// An online HTML version (which may be of a different version of Emscripten)
|
|
// is up at http://kripken.github.io/emscripten-site/docs/api_reference/preamble.js.html
|
|
var wasmBinary;
|
|
|
|
if (!globalThis.WebAssembly) {
|
|
err("no native wasm support detected");
|
|
}
|
|
|
|
// Wasm globals
|
|
// For sending to workers.
|
|
var wasmModule;
|
|
|
|
//========================================
|
|
// Runtime essentials
|
|
//========================================
|
|
// whether we are quitting the application. no code should run after this.
|
|
// set in exit() and abort()
|
|
var ABORT = false;
|
|
|
|
// set by exit() and abort(). Passed to 'onExit' handler.
|
|
// NOTE: This is also used as the process return code code in shell environments
|
|
// but only when noExitRuntime is false.
|
|
var EXITSTATUS;
|
|
|
|
// In STRICT mode, we only define assert() when ASSERTIONS is set. i.e. we
|
|
// don't define it at all in release modes. This matches the behaviour of
|
|
// MINIMAL_RUNTIME.
|
|
// TODO(sbc): Make this the default even without STRICT enabled.
|
|
/** @type {function(*, string=)} */ function assert(condition, text) {
|
|
if (!condition) {
|
|
abort("Assertion failed" + (text ? ": " + text : ""));
|
|
}
|
|
}
|
|
|
|
// We used to include malloc/free by default in the past. Show a helpful error in
|
|
// builds with assertions.
|
|
/**
|
|
* Indicates whether filename is delivered via file protocol (as opposed to http/https)
|
|
* @noinline
|
|
*/ var isFileURI = filename => filename.startsWith("file://");
|
|
|
|
// include: runtime_common.js
|
|
// include: runtime_stack_check.js
|
|
// Initializes the stack cookie. Called at the startup of main and at the startup of each thread in pthreads mode.
|
|
function writeStackCookie() {
|
|
var max = _emscripten_stack_get_end();
|
|
assert((max & 3) == 0);
|
|
// If the stack ends at address zero we write our cookies 4 bytes into the
|
|
// stack. This prevents interference with SAFE_HEAP and ASAN which also
|
|
// monitor writes to address zero.
|
|
if (max == 0) {
|
|
max += 4;
|
|
}
|
|
// The stack grow downwards towards _emscripten_stack_get_end.
|
|
// We write cookies to the final two words in the stack and detect if they are
|
|
// ever overwritten.
|
|
HEAPU32[((max) >> 2)] = 34821223;
|
|
HEAPU32[(((max) + (4)) >> 2)] = 2310721022;
|
|
// Also test the global address 0 for integrity.
|
|
HEAPU32[((0) >> 2)] = 1668509029;
|
|
}
|
|
|
|
function checkStackCookie() {
|
|
if (ABORT) return;
|
|
var max = _emscripten_stack_get_end();
|
|
// See writeStackCookie().
|
|
if (max == 0) {
|
|
max += 4;
|
|
}
|
|
var cookie1 = HEAPU32[((max) >> 2)];
|
|
var cookie2 = HEAPU32[(((max) + (4)) >> 2)];
|
|
if (cookie1 != 34821223 || cookie2 != 2310721022) {
|
|
abort(`Stack overflow! Stack cookie has been overwritten at ${ptrToString(max)}, expected hex dwords 0x89BACDFE and 0x2135467, but received ${ptrToString(cookie2)} ${ptrToString(cookie1)}`);
|
|
}
|
|
// Also test the global address 0 for integrity.
|
|
if (HEAPU32[((0) >> 2)] != 1668509029) {
|
|
abort("Runtime error: The application has corrupted its heap memory area (address zero)!");
|
|
}
|
|
}
|
|
|
|
// end include: runtime_stack_check.js
|
|
// include: runtime_exceptions.js
|
|
// end include: runtime_exceptions.js
|
|
// include: runtime_debug.js
|
|
var runtimeDebug = true;
|
|
|
|
// Switch to false at runtime to disable logging at the right times
|
|
// Used by XXXXX_DEBUG settings to output debug messages.
|
|
function dbg(...args) {
|
|
if (!runtimeDebug && typeof runtimeDebug != "undefined") return;
|
|
// Avoid using the console for debugging in multi-threaded node applications
|
|
// See https://github.com/emscripten-core/emscripten/issues/14804
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
// TODO(sbc): Unify with err/out implementation in shell.sh.
|
|
var fs = require("fs");
|
|
var utils = require("util");
|
|
function stringify(a) {
|
|
switch (typeof a) {
|
|
case "object":
|
|
return utils.inspect(a);
|
|
|
|
case "undefined":
|
|
return "undefined";
|
|
}
|
|
return a;
|
|
}
|
|
fs.writeSync(2, args.map(stringify).join(" ") + "\n");
|
|
} else // TODO(sbc): Make this configurable somehow. Its not always convenient for
|
|
// logging to show up as warnings.
|
|
console.warn(...args);
|
|
}
|
|
|
|
// Endianness check
|
|
(() => {
|
|
var h16 = new Int16Array(1);
|
|
var h8 = new Int8Array(h16.buffer);
|
|
h16[0] = 25459;
|
|
if (h8[0] !== 115 || h8[1] !== 99) abort("Runtime error: expected the system to be little-endian! (Run with -sSUPPORT_BIG_ENDIAN to bypass)");
|
|
})();
|
|
|
|
function consumedModuleProp(prop) {
|
|
if (!Object.getOwnPropertyDescriptor(Module, prop)) {
|
|
Object.defineProperty(Module, prop, {
|
|
configurable: true,
|
|
set() {
|
|
abort(`Attempt to set \`Module.${prop}\` after it has already been processed. This can happen, for example, when code is injected via '--post-js' rather than '--pre-js'`);
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
function makeInvalidEarlyAccess(name) {
|
|
return () => assert(false, `call to '${name}' via reference taken before Wasm module initialization`);
|
|
}
|
|
|
|
function ignoredModuleProp(prop) {
|
|
if (Object.getOwnPropertyDescriptor(Module, prop)) {
|
|
abort(`\`Module.${prop}\` was supplied but \`${prop}\` not included in INCOMING_MODULE_JS_API`);
|
|
}
|
|
}
|
|
|
|
// forcing the filesystem exports a few things by default
|
|
function isExportedByForceFilesystem(name) {
|
|
return name === "FS_createPath" || name === "FS_createDataFile" || name === "FS_createPreloadedFile" || name === "FS_preloadFile" || name === "FS_unlink" || name === "addRunDependency" || // The old FS has some functionality that WasmFS lacks.
|
|
name === "FS_createLazyFile" || name === "FS_createDevice" || name === "removeRunDependency";
|
|
}
|
|
|
|
function missingLibrarySymbol(sym) {
|
|
// Any symbol that is not included from the JS library is also (by definition)
|
|
// not exported on the Module object.
|
|
unexportedRuntimeSymbol(sym);
|
|
}
|
|
|
|
function unexportedRuntimeSymbol(sym) {
|
|
if (ENVIRONMENT_IS_PTHREAD) {
|
|
return;
|
|
}
|
|
if (!Object.getOwnPropertyDescriptor(Module, sym)) {
|
|
Object.defineProperty(Module, sym, {
|
|
configurable: true,
|
|
get() {
|
|
var msg = `'${sym}' was not exported. add it to EXPORTED_RUNTIME_METHODS (see the Emscripten FAQ)`;
|
|
if (isExportedByForceFilesystem(sym)) {
|
|
msg += ". Alternatively, forcing filesystem support (-sFORCE_FILESYSTEM) can export this for you";
|
|
}
|
|
abort(msg);
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Override `err`/`out`/`dbg` to report thread / worker information
|
|
*/ function initWorkerLogging() {
|
|
function getLogPrefix() {
|
|
var t = 0;
|
|
if (runtimeInitialized && typeof _pthread_self != "undefined") {
|
|
t = _pthread_self();
|
|
}
|
|
return `w:${workerID},t:${ptrToString(t)}:`;
|
|
}
|
|
// Prefix all dbg() messages with the calling thread info.
|
|
var origDbg = dbg;
|
|
dbg = (...args) => origDbg(getLogPrefix(), ...args);
|
|
}
|
|
|
|
initWorkerLogging();
|
|
|
|
// end include: runtime_debug.js
|
|
var readyPromiseResolve, readyPromiseReject;
|
|
|
|
if (ENVIRONMENT_IS_NODE && (ENVIRONMENT_IS_PTHREAD)) {
|
|
// Create as web-worker-like an environment as we can.
|
|
var parentPort = worker_threads["parentPort"];
|
|
parentPort.on("message", msg => global.onmessage?.({
|
|
data: msg
|
|
}));
|
|
Object.assign(globalThis, {
|
|
self: global,
|
|
postMessage: msg => parentPort["postMessage"](msg)
|
|
});
|
|
// Node.js Workers do not pass postMessage()s and uncaught exception events to the parent
|
|
// thread necessarily in the same order where they were generated in sequential program order.
|
|
// See https://github.com/nodejs/node/issues/59617
|
|
// To remedy this, capture all uncaughtExceptions in the Worker, and sequentialize those over
|
|
// to the same postMessage pipe that other messages use.
|
|
process.on("uncaughtException", err => {
|
|
postMessage({
|
|
cmd: "uncaughtException",
|
|
error: err
|
|
});
|
|
// Also shut down the Worker to match the same semantics as if this uncaughtException
|
|
// handler was not registered.
|
|
// (n.b. this will not shut down the whole Node.js app process, but just the Worker)
|
|
process.exit(1);
|
|
});
|
|
}
|
|
|
|
// include: runtime_pthread.js
|
|
// Pthread Web Worker handling code.
|
|
// This code runs only on pthread web workers and handles pthread setup
|
|
// and communication with the main thread via postMessage.
|
|
// Unique ID of the current pthread worker (zero on non-pthread-workers
|
|
// including the main thread).
|
|
var workerID = 0;
|
|
|
|
var startWorker;
|
|
|
|
if (ENVIRONMENT_IS_PTHREAD) {
|
|
// Thread-local guard variable for one-time init of the JS state
|
|
var initializedJS = false;
|
|
// Turn unhandled rejected promises into errors so that the main thread will be
|
|
// notified about them.
|
|
self.onunhandledrejection = e => {
|
|
throw e.reason || e;
|
|
};
|
|
function handleMessage(e) {
|
|
try {
|
|
var msgData = e["data"];
|
|
//dbg('msgData: ' + Object.keys(msgData));
|
|
var cmd = msgData.cmd;
|
|
if (cmd === "load") {
|
|
// Preload command that is called once per worker to parse and load the Emscripten code.
|
|
workerID = msgData.workerID;
|
|
// Until we initialize the runtime, queue up any further incoming messages.
|
|
let messageQueue = [];
|
|
self.onmessage = e => messageQueue.push(e);
|
|
// And add a callback for when the runtime is initialized.
|
|
startWorker = () => {
|
|
// Notify the main thread that this thread has loaded.
|
|
postMessage({
|
|
cmd: "loaded"
|
|
});
|
|
// Process any messages that were queued before the thread was ready.
|
|
for (let msg of messageQueue) {
|
|
handleMessage(msg);
|
|
}
|
|
// Restore the real message handler.
|
|
self.onmessage = handleMessage;
|
|
};
|
|
// Use `const` here to ensure that the variable is scoped only to
|
|
// that iteration, allowing safe reference from a closure.
|
|
for (const handler of msgData.handlers) {
|
|
// The the main module has a handler for a certain even, but no
|
|
// handler exists on the pthread worker, then proxy that handler
|
|
// back to the main thread.
|
|
if (!Module[handler] || Module[handler].proxy) {
|
|
Module[handler] = (...args) => {
|
|
postMessage({
|
|
cmd: "callHandler",
|
|
handler,
|
|
args
|
|
});
|
|
};
|
|
// Rebind the out / err handlers if needed
|
|
if (handler == "print") out = Module[handler];
|
|
if (handler == "printErr") err = Module[handler];
|
|
}
|
|
}
|
|
wasmMemory = msgData.wasmMemory;
|
|
updateMemoryViews();
|
|
wasmModule = msgData.wasmModule;
|
|
createWasm();
|
|
run();
|
|
} else if (cmd === "run") {
|
|
assert(msgData.pthread_ptr);
|
|
// Call inside JS module to set up the stack frame for this pthread in JS module scope.
|
|
// This needs to be the first thing that we do, as we cannot call to any C/C++ functions
|
|
// until the thread stack is initialized.
|
|
establishStackSpace(msgData.pthread_ptr);
|
|
// Pass the thread address to wasm to store it for fast access.
|
|
__emscripten_thread_init(msgData.pthread_ptr, /*is_main=*/ 0, /*is_runtime=*/ 0, /*can_block=*/ 1, 0, 0);
|
|
PThread.threadInitTLS();
|
|
// Await mailbox notifications with `Atomics.waitAsync` so we can start
|
|
// using the fast `Atomics.notify` notification path.
|
|
__emscripten_thread_mailbox_await(msgData.pthread_ptr);
|
|
if (!initializedJS) {
|
|
// Embind must initialize itself on all threads, as it generates support JS.
|
|
// We only do this once per worker since they get reused
|
|
__embind_initialize_bindings();
|
|
initializedJS = true;
|
|
}
|
|
try {
|
|
invokeEntryPoint(msgData.start_routine, msgData.arg);
|
|
} catch (ex) {
|
|
if (ex != "unwind") {
|
|
// The pthread "crashed". Do not call `_emscripten_thread_exit` (which
|
|
// would make this thread joinable). Instead, re-throw the exception
|
|
// and let the top level handler propagate it back to the main thread.
|
|
throw ex;
|
|
}
|
|
}
|
|
} else if (msgData.target === "setimmediate") {} else if (cmd === "checkMailbox") {
|
|
if (initializedJS) {
|
|
checkMailbox();
|
|
}
|
|
} else if (cmd) {
|
|
// The received message looks like something that should be handled by this message
|
|
// handler, (since there is a cmd field present), but is not one of the
|
|
// recognized commands:
|
|
err(`worker: received unknown command ${cmd}`);
|
|
err(msgData);
|
|
}
|
|
} catch (ex) {
|
|
err(`worker: onmessage() captured an uncaught exception: ${ex}`);
|
|
if (ex?.stack) err(ex.stack);
|
|
__emscripten_thread_crashed();
|
|
throw ex;
|
|
}
|
|
}
|
|
self.onmessage = handleMessage;
|
|
}
|
|
|
|
// ENVIRONMENT_IS_PTHREAD
|
|
// end include: runtime_pthread.js
|
|
// Memory management
|
|
var /** @type {!Int8Array} */ HEAP8, /** @type {!Uint8Array} */ HEAPU8, /** @type {!Int16Array} */ HEAP16, /** @type {!Uint16Array} */ HEAPU16, /** @type {!Int32Array} */ HEAP32, /** @type {!Uint32Array} */ HEAPU32, /** @type {!Float32Array} */ HEAPF32, /** @type {!Float64Array} */ HEAPF64;
|
|
|
|
var runtimeInitialized = false;
|
|
|
|
function updateMemoryViews() {
|
|
var b = wasmMemory.buffer;
|
|
HEAP8 = new Int8Array(b);
|
|
HEAP16 = new Int16Array(b);
|
|
Module["HEAPU8"] = HEAPU8 = new Uint8Array(b);
|
|
HEAPU16 = new Uint16Array(b);
|
|
HEAP32 = new Int32Array(b);
|
|
HEAPU32 = new Uint32Array(b);
|
|
HEAPF32 = new Float32Array(b);
|
|
HEAPF64 = new Float64Array(b);
|
|
}
|
|
|
|
// In non-standalone/normal mode, we create the memory here.
|
|
// include: runtime_init_memory.js
|
|
// Create the wasm memory. (Note: this only applies if IMPORTED_MEMORY is defined)
|
|
// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
|
|
function initMemory() {
|
|
if ((ENVIRONMENT_IS_PTHREAD)) {
|
|
return;
|
|
}
|
|
if (Module["wasmMemory"]) {
|
|
wasmMemory = Module["wasmMemory"];
|
|
} else {
|
|
var INITIAL_MEMORY = Module["INITIAL_MEMORY"] || 536870912;
|
|
assert(INITIAL_MEMORY >= 65536, "INITIAL_MEMORY should be larger than STACK_SIZE, was " + INITIAL_MEMORY + "! (STACK_SIZE=" + 65536 + ")");
|
|
/** @suppress {checkTypes} */ wasmMemory = new WebAssembly.Memory({
|
|
"initial": INITIAL_MEMORY / 65536,
|
|
"maximum": INITIAL_MEMORY / 65536,
|
|
"shared": true
|
|
});
|
|
}
|
|
updateMemoryViews();
|
|
}
|
|
|
|
// end include: runtime_init_memory.js
|
|
// include: memoryprofiler.js
|
|
// end include: memoryprofiler.js
|
|
// end include: runtime_common.js
|
|
assert(globalThis.Int32Array && globalThis.Float64Array && Int32Array.prototype.subarray && Int32Array.prototype.set, "JS engine does not provide full typed array support");
|
|
|
|
function preRun() {
|
|
assert(!ENVIRONMENT_IS_PTHREAD);
|
|
// PThreads reuse the runtime from the main thread.
|
|
if (Module["preRun"]) {
|
|
if (typeof Module["preRun"] == "function") Module["preRun"] = [ Module["preRun"] ];
|
|
while (Module["preRun"].length) {
|
|
addOnPreRun(Module["preRun"].shift());
|
|
}
|
|
}
|
|
consumedModuleProp("preRun");
|
|
// Begin ATPRERUNS hooks
|
|
callRuntimeCallbacks(onPreRuns);
|
|
}
|
|
|
|
function initRuntime() {
|
|
assert(!runtimeInitialized);
|
|
runtimeInitialized = true;
|
|
if (ENVIRONMENT_IS_PTHREAD) return startWorker();
|
|
checkStackCookie();
|
|
// Begin ATINITS hooks
|
|
if (!Module["noFSInit"] && !FS.initialized) FS.init();
|
|
TTY.init();
|
|
// End ATINITS hooks
|
|
wasmExports["__wasm_call_ctors"]();
|
|
// Begin ATPOSTCTORS hooks
|
|
FS.ignorePermissions = false;
|
|
}
|
|
|
|
function postRun() {
|
|
checkStackCookie();
|
|
if ((ENVIRONMENT_IS_PTHREAD)) {
|
|
return;
|
|
}
|
|
// PThreads reuse the runtime from the main thread.
|
|
if (Module["postRun"]) {
|
|
if (typeof Module["postRun"] == "function") Module["postRun"] = [ Module["postRun"] ];
|
|
while (Module["postRun"].length) {
|
|
addOnPostRun(Module["postRun"].shift());
|
|
}
|
|
}
|
|
consumedModuleProp("postRun");
|
|
// Begin ATPOSTRUNS hooks
|
|
callRuntimeCallbacks(onPostRuns);
|
|
}
|
|
|
|
/** @param {string|number=} what */ function abort(what) {
|
|
Module["onAbort"]?.(what);
|
|
what = "Aborted(" + what + ")";
|
|
// TODO(sbc): Should we remove printing and leave it up to whoever
|
|
// catches the exception?
|
|
err(what);
|
|
ABORT = true;
|
|
// Use a wasm runtime error, because a JS error might be seen as a foreign
|
|
// exception, which means we'd run destructors on it. We need the error to
|
|
// simply make the program stop.
|
|
// FIXME This approach does not work in Wasm EH because it currently does not assume
|
|
// all RuntimeErrors are from traps; it decides whether a RuntimeError is from
|
|
// a trap or not based on a hidden field within the object. So at the moment
|
|
// we don't have a way of throwing a wasm trap from JS. TODO Make a JS API that
|
|
// allows this in the wasm spec.
|
|
// Suppress closure compiler warning here. Closure compiler's builtin extern
|
|
// definition for WebAssembly.RuntimeError claims it takes no arguments even
|
|
// though it can.
|
|
// TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure gets fixed.
|
|
/** @suppress {checkTypes} */ var e = new WebAssembly.RuntimeError(what);
|
|
readyPromiseReject?.(e);
|
|
// Throw the error whether or not MODULARIZE is set because abort is used
|
|
// in code paths apart from instantiation where an exception is expected
|
|
// to be thrown when abort is called.
|
|
throw e;
|
|
}
|
|
|
|
function createExportWrapper(name, nargs) {
|
|
return (...args) => {
|
|
assert(runtimeInitialized, `native function \`${name}\` called before runtime initialization`);
|
|
var f = wasmExports[name];
|
|
assert(f, `exported native function \`${name}\` not found`);
|
|
// Only assert for too many arguments. Too few can be valid since the missing arguments will be zero filled.
|
|
assert(args.length <= nargs, `native function \`${name}\` called with ${args.length} args but expects ${nargs}`);
|
|
return f(...args);
|
|
};
|
|
}
|
|
|
|
var wasmBinaryFile;
|
|
|
|
function findWasmBinary() {
|
|
return locateFile("bindings_main.wasm");
|
|
}
|
|
|
|
function getBinarySync(file) {
|
|
if (file == wasmBinaryFile && wasmBinary) {
|
|
return new Uint8Array(wasmBinary);
|
|
}
|
|
if (readBinary) {
|
|
return readBinary(file);
|
|
}
|
|
// Throwing a plain string here, even though it not normally adviables since
|
|
// this gets turning into an `abort` in instantiateArrayBuffer.
|
|
throw "both async and sync fetching of the wasm failed";
|
|
}
|
|
|
|
async function getWasmBinary(binaryFile) {
|
|
// If we don't have the binary yet, load it asynchronously using readAsync.
|
|
if (!wasmBinary) {
|
|
// Fetch the binary using readAsync
|
|
try {
|
|
var response = await readAsync(binaryFile);
|
|
return new Uint8Array(response);
|
|
} catch {}
|
|
}
|
|
// Otherwise, getBinarySync should be able to get it synchronously
|
|
return getBinarySync(binaryFile);
|
|
}
|
|
|
|
async function instantiateArrayBuffer(binaryFile, imports) {
|
|
try {
|
|
var binary = await getWasmBinary(binaryFile);
|
|
var instance = await WebAssembly.instantiate(binary, imports);
|
|
return instance;
|
|
} catch (reason) {
|
|
err(`failed to asynchronously prepare wasm: ${reason}`);
|
|
// Warn on some common problems.
|
|
if (isFileURI(binaryFile)) {
|
|
err(`warning: Loading from a file URI (${binaryFile}) is not supported in most browsers. See https://emscripten.org/docs/getting_started/FAQ.html#how-do-i-run-a-local-webserver-for-testing-why-does-my-program-stall-in-downloading-or-preparing`);
|
|
}
|
|
abort(reason);
|
|
}
|
|
}
|
|
|
|
async function instantiateAsync(binary, binaryFile, imports) {
|
|
if (!binary && !isFileURI(binaryFile) && !ENVIRONMENT_IS_NODE) {
|
|
try {
|
|
var response = fetch(binaryFile, {
|
|
credentials: "same-origin"
|
|
});
|
|
var instantiationResult = await WebAssembly.instantiateStreaming(response, imports);
|
|
return instantiationResult;
|
|
} catch (reason) {
|
|
// We expect the most common failure cause to be a bad MIME type for the binary,
|
|
// in which case falling back to ArrayBuffer instantiation should work.
|
|
err(`wasm streaming compile failed: ${reason}`);
|
|
err("falling back to ArrayBuffer instantiation");
|
|
}
|
|
}
|
|
return instantiateArrayBuffer(binaryFile, imports);
|
|
}
|
|
|
|
function getWasmImports() {
|
|
assignWasmImports();
|
|
// prepare imports
|
|
var imports = {
|
|
"env": wasmImports,
|
|
"wasi_snapshot_preview1": wasmImports
|
|
};
|
|
return imports;
|
|
}
|
|
|
|
// Create the wasm instance.
|
|
// Receives the wasm imports, returns the exports.
|
|
async function createWasm() {
|
|
// Load the wasm module and create an instance of using native support in the JS engine.
|
|
// handle a generated wasm instance, receiving its exports and
|
|
// performing other necessary setup
|
|
/** @param {WebAssembly.Module=} module*/ function receiveInstance(instance, module) {
|
|
wasmExports = instance.exports;
|
|
registerTLSInit(wasmExports["_emscripten_tls_init"]);
|
|
assignWasmExports(wasmExports);
|
|
// We now have the Wasm module loaded up, keep a reference to the compiled module so we can post it to the workers.
|
|
wasmModule = module;
|
|
return wasmExports;
|
|
}
|
|
// Prefer streaming instantiation if available.
|
|
// Async compilation can be confusing when an error on the page overwrites Module
|
|
// (for example, if the order of elements is wrong, and the one defining Module is
|
|
// later), so we save Module and check it later.
|
|
var trueModule = Module;
|
|
function receiveInstantiationResult(result) {
|
|
// 'result' is a ResultObject object which has both the module and instance.
|
|
// receiveInstance() will swap in the exports (to Module.asm) so they can be called
|
|
assert(Module === trueModule, "the Module object should not be replaced during async compilation - perhaps the order of HTML elements is wrong?");
|
|
trueModule = null;
|
|
return receiveInstance(result["instance"], result["module"]);
|
|
}
|
|
var info = getWasmImports();
|
|
// User shell pages can write their own Module.instantiateWasm = function(imports, successCallback) callback
|
|
// to manually instantiate the Wasm module themselves. This allows pages to
|
|
// run the instantiation parallel to any other async startup actions they are
|
|
// performing.
|
|
// Also pthreads and wasm workers initialize the wasm instance through this
|
|
// path.
|
|
if (Module["instantiateWasm"]) {
|
|
return new Promise((resolve, reject) => {
|
|
try {
|
|
Module["instantiateWasm"](info, (inst, mod) => {
|
|
resolve(receiveInstance(inst, mod));
|
|
});
|
|
} catch (e) {
|
|
err(`Module.instantiateWasm callback failed with error: ${e}`);
|
|
reject(e);
|
|
}
|
|
});
|
|
}
|
|
if ((ENVIRONMENT_IS_PTHREAD)) {
|
|
// Instantiate from the module that was recieved via postMessage from
|
|
// the main thread. We can just use sync instantiation in the worker.
|
|
assert(wasmModule, "wasmModule should have been received via postMessage");
|
|
var instance = new WebAssembly.Instance(wasmModule, getWasmImports());
|
|
return receiveInstance(instance, wasmModule);
|
|
}
|
|
wasmBinaryFile ??= findWasmBinary();
|
|
var result = await instantiateAsync(wasmBinary, wasmBinaryFile, info);
|
|
var exports = receiveInstantiationResult(result);
|
|
return exports;
|
|
}
|
|
|
|
// Globals used by JS i64 conversions (see makeSetValue)
|
|
var tempDouble;
|
|
|
|
var tempI64;
|
|
|
|
// end include: preamble.js
|
|
// Begin JS library code
|
|
class ExitStatus {
|
|
name="ExitStatus";
|
|
constructor(status) {
|
|
this.message = `Program terminated with exit(${status})`;
|
|
this.status = status;
|
|
}
|
|
}
|
|
|
|
var terminateWorker = worker => {
|
|
worker.terminate();
|
|
// terminate() can be asynchronous, so in theory the worker can continue
|
|
// to run for some amount of time after termination. However from our POV
|
|
// the worker now dead and we don't want to hear from it again, so we stub
|
|
// out its message handler here. This avoids having to check in each of
|
|
// the onmessage handlers if the message was coming from valid worker.
|
|
worker.onmessage = e => {
|
|
var cmd = e["data"].cmd;
|
|
err(`received "${cmd}" command from terminated worker: ${worker.workerID}`);
|
|
};
|
|
};
|
|
|
|
var cleanupThread = pthread_ptr => {
|
|
assert(!ENVIRONMENT_IS_PTHREAD, "Internal Error! cleanupThread() can only ever be called from main application thread!");
|
|
assert(pthread_ptr, "Internal Error! Null pthread_ptr in cleanupThread!");
|
|
var worker = PThread.pthreads[pthread_ptr];
|
|
assert(worker);
|
|
PThread.returnWorkerToPool(worker);
|
|
};
|
|
|
|
var callRuntimeCallbacks = callbacks => {
|
|
while (callbacks.length > 0) {
|
|
// Pass the module as the first argument.
|
|
callbacks.shift()(Module);
|
|
}
|
|
};
|
|
|
|
var onPreRuns = [];
|
|
|
|
var addOnPreRun = cb => onPreRuns.push(cb);
|
|
|
|
var runDependencies = 0;
|
|
|
|
var dependenciesFulfilled = null;
|
|
|
|
var runDependencyTracking = {};
|
|
|
|
var runDependencyWatcher = null;
|
|
|
|
var removeRunDependency = id => {
|
|
runDependencies--;
|
|
Module["monitorRunDependencies"]?.(runDependencies);
|
|
assert(id, "removeRunDependency requires an ID");
|
|
assert(runDependencyTracking[id]);
|
|
delete runDependencyTracking[id];
|
|
if (runDependencies == 0) {
|
|
if (runDependencyWatcher !== null) {
|
|
clearInterval(runDependencyWatcher);
|
|
runDependencyWatcher = null;
|
|
}
|
|
if (dependenciesFulfilled) {
|
|
var callback = dependenciesFulfilled;
|
|
dependenciesFulfilled = null;
|
|
callback();
|
|
}
|
|
}
|
|
};
|
|
|
|
var addRunDependency = id => {
|
|
runDependencies++;
|
|
Module["monitorRunDependencies"]?.(runDependencies);
|
|
assert(id, "addRunDependency requires an ID");
|
|
assert(!runDependencyTracking[id]);
|
|
runDependencyTracking[id] = 1;
|
|
if (runDependencyWatcher === null && globalThis.setInterval) {
|
|
// Check for missing dependencies every few seconds
|
|
runDependencyWatcher = setInterval(() => {
|
|
if (ABORT) {
|
|
clearInterval(runDependencyWatcher);
|
|
runDependencyWatcher = null;
|
|
return;
|
|
}
|
|
var shown = false;
|
|
for (var dep in runDependencyTracking) {
|
|
if (!shown) {
|
|
shown = true;
|
|
err("still waiting on run dependencies:");
|
|
}
|
|
err(`dependency: ${dep}`);
|
|
}
|
|
if (shown) {
|
|
err("(end of list)");
|
|
}
|
|
}, 1e4);
|
|
// Prevent this timer from keeping the runtime alive if nothing
|
|
// else is.
|
|
runDependencyWatcher.unref?.();
|
|
}
|
|
};
|
|
|
|
var spawnThread = threadParams => {
|
|
assert(!ENVIRONMENT_IS_PTHREAD, "Internal Error! spawnThread() can only ever be called from main application thread!");
|
|
assert(threadParams.pthread_ptr, "Internal error, no pthread ptr!");
|
|
var worker = PThread.getNewWorker();
|
|
if (!worker) {
|
|
// No available workers in the PThread pool.
|
|
return 6;
|
|
}
|
|
assert(!worker.pthread_ptr, "Internal error!");
|
|
PThread.runningWorkers.push(worker);
|
|
// Add to pthreads map
|
|
PThread.pthreads[threadParams.pthread_ptr] = worker;
|
|
worker.pthread_ptr = threadParams.pthread_ptr;
|
|
var msg = {
|
|
cmd: "run",
|
|
start_routine: threadParams.startRoutine,
|
|
arg: threadParams.arg,
|
|
pthread_ptr: threadParams.pthread_ptr
|
|
};
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
// Mark worker as weakly referenced once we start executing a pthread,
|
|
// so that its existence does not prevent Node.js from exiting. This
|
|
// has no effect if the worker is already weakly referenced (e.g. if
|
|
// this worker was previously idle/unused).
|
|
worker.unref();
|
|
}
|
|
// Ask the worker to start executing its pthread entry point function.
|
|
worker.postMessage(msg, threadParams.transferList);
|
|
return 0;
|
|
};
|
|
|
|
var runtimeKeepaliveCounter = 0;
|
|
|
|
var keepRuntimeAlive = () => noExitRuntime || runtimeKeepaliveCounter > 0;
|
|
|
|
var stackSave = () => _emscripten_stack_get_current();
|
|
|
|
var stackRestore = val => __emscripten_stack_restore(val);
|
|
|
|
var stackAlloc = sz => __emscripten_stack_alloc(sz);
|
|
|
|
/** @type{function(number, (number|boolean), ...number)} */ var proxyToMainThread = (funcIndex, emAsmAddr, sync, ...callArgs) => {
|
|
// EM_ASM proxying is done by passing a pointer to the address of the EM_ASM
|
|
// content as `emAsmAddr`. JS library proxying is done by passing an index
|
|
// into `proxiedJSCallArgs` as `funcIndex`. If `emAsmAddr` is non-zero then
|
|
// `funcIndex` will be ignored.
|
|
// Additional arguments are passed after the first three are the actual
|
|
// function arguments.
|
|
// The serialization buffer contains the number of call params, and then
|
|
// all the args here.
|
|
// We also pass 'sync' to C separately, since C needs to look at it.
|
|
// Allocate a buffer, which will be copied by the C code.
|
|
// First passed parameter specifies the number of arguments to the function.
|
|
// When BigInt support is enabled, we must handle types in a more complex
|
|
// way, detecting at runtime if a value is a BigInt or not (as we have no
|
|
// type info here). To do that, add a "prefix" before each value that
|
|
// indicates if it is a BigInt, which effectively doubles the number of
|
|
// values we serialize for proxying. TODO: pack this?
|
|
var serializedNumCallArgs = callArgs.length;
|
|
var sp = stackSave();
|
|
var args = stackAlloc(serializedNumCallArgs * 8);
|
|
var b = ((args) >> 3);
|
|
for (var i = 0; i < callArgs.length; i++) {
|
|
var arg = callArgs[i];
|
|
HEAPF64[b + i] = arg;
|
|
}
|
|
var rtn = __emscripten_run_js_on_main_thread(funcIndex, emAsmAddr, serializedNumCallArgs, args, sync);
|
|
stackRestore(sp);
|
|
return rtn;
|
|
};
|
|
|
|
function _proc_exit(code) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(0, 0, 1, code);
|
|
EXITSTATUS = code;
|
|
if (!keepRuntimeAlive()) {
|
|
PThread.terminateAllThreads();
|
|
Module["onExit"]?.(code);
|
|
ABORT = true;
|
|
}
|
|
quit_(code, new ExitStatus(code));
|
|
}
|
|
|
|
function exitOnMainThread(returnCode) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(1, 0, 0, returnCode);
|
|
_exit(returnCode);
|
|
}
|
|
|
|
/** @param {boolean|number=} implicit */ var exitJS = (status, implicit) => {
|
|
EXITSTATUS = status;
|
|
checkUnflushedContent();
|
|
if (ENVIRONMENT_IS_PTHREAD) {
|
|
// implicit exit can never happen on a pthread
|
|
assert(!implicit);
|
|
// When running in a pthread we propagate the exit back to the main thread
|
|
// where it can decide if the whole process should be shut down or not.
|
|
// The pthread may have decided not to exit its own runtime, for example
|
|
// because it runs a main loop, but that doesn't affect the main thread.
|
|
exitOnMainThread(status);
|
|
throw "unwind";
|
|
}
|
|
// if exit() was called explicitly, warn the user if the runtime isn't actually being shut down
|
|
if (keepRuntimeAlive() && !implicit) {
|
|
var msg = `program exited (with status: ${status}), but keepRuntimeAlive() is set (counter=${runtimeKeepaliveCounter}) due to an async operation, so halting execution but not exiting the runtime or preventing further async execution (you can use emscripten_force_exit, if you want to force a true shutdown)`;
|
|
readyPromiseReject?.(msg);
|
|
err(msg);
|
|
}
|
|
_proc_exit(status);
|
|
};
|
|
|
|
var _exit = exitJS;
|
|
|
|
var ptrToString = ptr => {
|
|
assert(typeof ptr === "number", `ptrToString expects a number, got ${typeof ptr}`);
|
|
// Convert to 32-bit unsigned value
|
|
ptr >>>= 0;
|
|
return "0x" + ptr.toString(16).padStart(8, "0");
|
|
};
|
|
|
|
var PThread = {
|
|
unusedWorkers: [],
|
|
runningWorkers: [],
|
|
tlsInitFunctions: [],
|
|
pthreads: {},
|
|
nextWorkerID: 1,
|
|
init() {
|
|
if ((!(ENVIRONMENT_IS_PTHREAD))) {
|
|
PThread.initMainThread();
|
|
}
|
|
},
|
|
initMainThread() {
|
|
var pthreadPoolSize = 10;
|
|
// Start loading up the Worker pool, if requested.
|
|
while (pthreadPoolSize--) {
|
|
PThread.allocateUnusedWorker();
|
|
}
|
|
// MINIMAL_RUNTIME takes care of calling loadWasmModuleToAllWorkers
|
|
// in postamble_minimal.js
|
|
addOnPreRun(async () => {
|
|
var pthreadPoolReady = PThread.loadWasmModuleToAllWorkers();
|
|
addRunDependency("loading-workers");
|
|
await pthreadPoolReady;
|
|
removeRunDependency("loading-workers");
|
|
});
|
|
},
|
|
terminateAllThreads: () => {
|
|
assert(!ENVIRONMENT_IS_PTHREAD, "Internal Error! terminateAllThreads() can only ever be called from main application thread!");
|
|
// Attempt to kill all workers. Sadly (at least on the web) there is no
|
|
// way to terminate a worker synchronously, or to be notified when a
|
|
// worker in actually terminated. This means there is some risk that
|
|
// pthreads will continue to be executing after `worker.terminate` has
|
|
// returned. For this reason, we don't call `returnWorkerToPool` here or
|
|
// free the underlying pthread data structures.
|
|
for (var worker of PThread.runningWorkers) {
|
|
terminateWorker(worker);
|
|
}
|
|
for (var worker of PThread.unusedWorkers) {
|
|
terminateWorker(worker);
|
|
}
|
|
PThread.unusedWorkers = [];
|
|
PThread.runningWorkers = [];
|
|
PThread.pthreads = {};
|
|
},
|
|
returnWorkerToPool: worker => {
|
|
// We don't want to run main thread queued calls here, since we are doing
|
|
// some operations that leave the worker queue in an invalid state until
|
|
// we are completely done (it would be bad if free() ends up calling a
|
|
// queued pthread_create which looks at the global data structures we are
|
|
// modifying). To achieve that, defer the free() til the very end, when
|
|
// we are all done.
|
|
var pthread_ptr = worker.pthread_ptr;
|
|
delete PThread.pthreads[pthread_ptr];
|
|
// Note: worker is intentionally not terminated so the pool can
|
|
// dynamically grow.
|
|
PThread.unusedWorkers.push(worker);
|
|
PThread.runningWorkers.splice(PThread.runningWorkers.indexOf(worker), 1);
|
|
// Not a running Worker anymore
|
|
// Detach the worker from the pthread object, and return it to the
|
|
// worker pool as an unused worker.
|
|
worker.pthread_ptr = 0;
|
|
// Finally, free the underlying (and now-unused) pthread structure in
|
|
// linear memory.
|
|
__emscripten_thread_free_data(pthread_ptr);
|
|
},
|
|
threadInitTLS() {
|
|
// Call thread init functions (these are the _emscripten_tls_init for each
|
|
// module loaded.
|
|
PThread.tlsInitFunctions.forEach(f => f());
|
|
},
|
|
loadWasmModuleToWorker: worker => new Promise(onFinishedLoading => {
|
|
worker.onmessage = e => {
|
|
var d = e["data"];
|
|
var cmd = d.cmd;
|
|
// If this message is intended to a recipient that is not the main
|
|
// thread, forward it to the target thread.
|
|
if (d.targetThread && d.targetThread != _pthread_self()) {
|
|
var targetWorker = PThread.pthreads[d.targetThread];
|
|
if (targetWorker) {
|
|
targetWorker.postMessage(d, d.transferList);
|
|
} else {
|
|
err(`Internal error! Worker sent a message "${cmd}" to target pthread ${d.targetThread}, but that thread no longer exists!`);
|
|
}
|
|
return;
|
|
}
|
|
if (cmd === "checkMailbox") {
|
|
checkMailbox();
|
|
} else if (cmd === "spawnThread") {
|
|
spawnThread(d);
|
|
} else if (cmd === "cleanupThread") {
|
|
// cleanupThread needs to be run via callUserCallback since it calls
|
|
// back into user code to free thread data. Without this it's possible
|
|
// the unwind or ExitStatus exception could escape here.
|
|
callUserCallback(() => cleanupThread(d.thread));
|
|
} else if (cmd === "loaded") {
|
|
worker.loaded = true;
|
|
// Check that this worker doesn't have an associated pthread.
|
|
if (ENVIRONMENT_IS_NODE && !worker.pthread_ptr) {
|
|
// Once worker is loaded & idle, mark it as weakly referenced,
|
|
// so that mere existence of a Worker in the pool does not prevent
|
|
// Node.js from exiting the app.
|
|
worker.unref();
|
|
}
|
|
onFinishedLoading(worker);
|
|
} else if (d.target === "setimmediate") {
|
|
// Worker wants to postMessage() to itself to implement setImmediate()
|
|
// emulation.
|
|
worker.postMessage(d);
|
|
} else if (cmd === "uncaughtException") {
|
|
// Message handler for Node.js specific out-of-order behavior:
|
|
// https://github.com/nodejs/node/issues/59617
|
|
// A pthread sent an uncaught exception event. Re-raise it on the main thread.
|
|
worker.onerror(d.error);
|
|
} else if (cmd === "callHandler") {
|
|
Module[d.handler](...d.args);
|
|
} else if (cmd) {
|
|
// The received message looks like something that should be handled by this message
|
|
// handler, (since there is a e.data.cmd field present), but is not one of the
|
|
// recognized commands:
|
|
err(`worker sent an unknown command ${cmd}`);
|
|
}
|
|
};
|
|
worker.onerror = e => {
|
|
var message = "worker sent an error!";
|
|
if (worker.pthread_ptr) {
|
|
message = `Pthread ${ptrToString(worker.pthread_ptr)} sent an error!`;
|
|
}
|
|
err(`${message} ${e.filename}:${e.lineno}: ${e.message}`);
|
|
throw e;
|
|
};
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
worker.on("message", data => worker.onmessage({
|
|
data
|
|
}));
|
|
worker.on("error", e => worker.onerror(e));
|
|
}
|
|
assert(wasmMemory instanceof WebAssembly.Memory, "WebAssembly memory should have been loaded by now!");
|
|
assert(wasmModule instanceof WebAssembly.Module, "WebAssembly Module should have been loaded by now!");
|
|
// When running on a pthread, none of the incoming parameters on the module
|
|
// object are present. Proxy known handlers back to the main thread if specified.
|
|
var handlers = [];
|
|
var knownHandlers = [ "onExit", "onAbort", "print", "printErr" ];
|
|
for (var handler of knownHandlers) {
|
|
if (Module.propertyIsEnumerable(handler)) {
|
|
handlers.push(handler);
|
|
}
|
|
}
|
|
// Ask the new worker to load up the Emscripten-compiled page. This is a heavy operation.
|
|
worker.postMessage({
|
|
cmd: "load",
|
|
handlers,
|
|
wasmMemory,
|
|
wasmModule,
|
|
"workerID": worker.workerID
|
|
});
|
|
}),
|
|
async loadWasmModuleToAllWorkers() {
|
|
// Instantiation is synchronous in pthreads.
|
|
if (ENVIRONMENT_IS_PTHREAD) {
|
|
return;
|
|
}
|
|
let pthreadPoolReady = Promise.all(PThread.unusedWorkers.map(PThread.loadWasmModuleToWorker));
|
|
return pthreadPoolReady;
|
|
},
|
|
allocateUnusedWorker() {
|
|
var worker;
|
|
var pthreadMainJs = _scriptName;
|
|
// We can't use makeModuleReceiveWithVar here since we want to also
|
|
// call URL.createObjectURL on the mainScriptUrlOrBlob.
|
|
if (Module["mainScriptUrlOrBlob"]) {
|
|
pthreadMainJs = Module["mainScriptUrlOrBlob"];
|
|
if (typeof pthreadMainJs != "string") {
|
|
pthreadMainJs = URL.createObjectURL(pthreadMainJs);
|
|
}
|
|
}
|
|
// Use Trusted Types compatible wrappers.
|
|
if (globalThis.trustedTypes?.createPolicy) {
|
|
var p = trustedTypes.createPolicy("emscripten#workerPolicy2", {
|
|
createScriptURL: ignored => pthreadMainJs
|
|
});
|
|
worker = new Worker(p.createScriptURL("ignored"), {
|
|
// This is the way that we signal to the node worker that it is hosting
|
|
// a pthread.
|
|
"workerData": "em-pthread",
|
|
// This is the way that we signal to the Web Worker that it is hosting
|
|
// a pthread.
|
|
"name": "em-pthread-" + PThread.nextWorkerID
|
|
});
|
|
} else worker = new Worker(pthreadMainJs, {
|
|
// This is the way that we signal to the node worker that it is hosting
|
|
// a pthread.
|
|
"workerData": "em-pthread",
|
|
// This is the way that we signal to the Web Worker that it is hosting
|
|
// a pthread.
|
|
"name": "em-pthread-" + PThread.nextWorkerID
|
|
});
|
|
worker.workerID = PThread.nextWorkerID++;
|
|
PThread.unusedWorkers.push(worker);
|
|
},
|
|
getNewWorker() {
|
|
if (PThread.unusedWorkers.length == 0) {
|
|
// PTHREAD_POOL_SIZE_STRICT should show a warning and, if set to level `2`, return from the function.
|
|
// However, if we're in Node.js, then we can create new workers on the fly and PTHREAD_POOL_SIZE_STRICT
|
|
// should be ignored altogether.
|
|
if (!ENVIRONMENT_IS_NODE) {
|
|
err("Tried to spawn a new thread, but the thread pool is exhausted.\n" + "This might result in a deadlock unless some threads eventually exit or the code explicitly breaks out to the event loop.\n" + "If you want to increase the pool size, use setting `-sPTHREAD_POOL_SIZE=...`." + "\nIf you want to throw an explicit error instead of the risk of deadlocking in those cases, use setting `-sPTHREAD_POOL_SIZE_STRICT=2`.");
|
|
}
|
|
PThread.allocateUnusedWorker();
|
|
PThread.loadWasmModuleToWorker(PThread.unusedWorkers[0]);
|
|
}
|
|
return PThread.unusedWorkers.pop();
|
|
}
|
|
};
|
|
|
|
var onPostRuns = [];
|
|
|
|
var addOnPostRun = cb => onPostRuns.push(cb);
|
|
|
|
function establishStackSpace(pthread_ptr) {
|
|
var stackHigh = HEAPU32[(((pthread_ptr) + (52)) >> 2)];
|
|
var stackSize = HEAPU32[(((pthread_ptr) + (56)) >> 2)];
|
|
var stackLow = stackHigh - stackSize;
|
|
assert(stackHigh != 0);
|
|
assert(stackLow != 0);
|
|
assert(stackHigh > stackLow, "stackHigh must be higher then stackLow");
|
|
// Set stack limits used by `emscripten/stack.h` function. These limits are
|
|
// cached in wasm-side globals to make checks as fast as possible.
|
|
_emscripten_stack_set_limits(stackHigh, stackLow);
|
|
// Call inside wasm module to set up the stack frame for this pthread in wasm module scope
|
|
stackRestore(stackHigh);
|
|
// Write the stack cookie last, after we have set up the proper bounds and
|
|
// current position of the stack.
|
|
writeStackCookie();
|
|
}
|
|
|
|
var wasmTableMirror = [];
|
|
|
|
var getWasmTableEntry = funcPtr => {
|
|
var func = wasmTableMirror[funcPtr];
|
|
if (!func) {
|
|
/** @suppress {checkTypes} */ wasmTableMirror[funcPtr] = func = wasmTable.get(funcPtr);
|
|
}
|
|
/** @suppress {checkTypes} */ assert(wasmTable.get(funcPtr) == func, "JavaScript-side Wasm function table mirror is out of date!");
|
|
return func;
|
|
};
|
|
|
|
var invokeEntryPoint = (ptr, arg) => {
|
|
// An old thread on this worker may have been canceled without returning the
|
|
// `runtimeKeepaliveCounter` to zero. Reset it now so the new thread won't
|
|
// be affected.
|
|
runtimeKeepaliveCounter = 0;
|
|
// Same for noExitRuntime. The default for pthreads should always be false
|
|
// otherwise pthreads would never complete and attempts to pthread_join to
|
|
// them would block forever.
|
|
// pthreads can still choose to set `noExitRuntime` explicitly, or
|
|
// call emscripten_unwind_to_js_event_loop to extend their lifetime beyond
|
|
// their main function. See comment in src/runtime_pthread.js for more.
|
|
noExitRuntime = 0;
|
|
// pthread entry points are always of signature 'void *ThreadMain(void *arg)'
|
|
// Native codebases sometimes spawn threads with other thread entry point
|
|
// signatures, such as void ThreadMain(void *arg), void *ThreadMain(), or
|
|
// void ThreadMain(). That is not acceptable per C/C++ specification, but
|
|
// x86 compiler ABI extensions enable that to work. If you find the
|
|
// following line to crash, either change the signature to "proper" void
|
|
// *ThreadMain(void *arg) form, or try linking with the Emscripten linker
|
|
// flag -sEMULATE_FUNCTION_POINTER_CASTS to add in emulation for this x86
|
|
// ABI extension.
|
|
var result = getWasmTableEntry(ptr)(arg);
|
|
checkStackCookie();
|
|
function finish(result) {
|
|
// In MINIMAL_RUNTIME the noExitRuntime concept does not apply to
|
|
// pthreads. To exit a pthread with live runtime, use the function
|
|
// emscripten_unwind_to_js_event_loop() in the pthread body.
|
|
if (keepRuntimeAlive()) {
|
|
EXITSTATUS = result;
|
|
return;
|
|
}
|
|
__emscripten_thread_exit(result);
|
|
}
|
|
finish(result);
|
|
};
|
|
|
|
var noExitRuntime = true;
|
|
|
|
var registerTLSInit = tlsInitFunc => PThread.tlsInitFunctions.push(tlsInitFunc);
|
|
|
|
var warnOnce = text => {
|
|
warnOnce.shown ||= {};
|
|
if (!warnOnce.shown[text]) {
|
|
warnOnce.shown[text] = 1;
|
|
if (ENVIRONMENT_IS_NODE) text = "warning: " + text;
|
|
err(text);
|
|
}
|
|
};
|
|
|
|
var wasmMemory;
|
|
|
|
var UTF8Decoder = new TextDecoder;
|
|
|
|
var findStringEnd = (heapOrArray, idx, maxBytesToRead, ignoreNul) => {
|
|
var maxIdx = idx + maxBytesToRead;
|
|
if (ignoreNul) return maxIdx;
|
|
// TextDecoder needs to know the byte length in advance, it doesn't stop on
|
|
// null terminator by itself.
|
|
// As a tiny code save trick, compare idx against maxIdx using a negation,
|
|
// so that maxBytesToRead=undefined/NaN means Infinity.
|
|
while (heapOrArray[idx] && !(idx >= maxIdx)) ++idx;
|
|
return idx;
|
|
};
|
|
|
|
/**
|
|
* Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the
|
|
* emscripten HEAP, returns a copy of that string as a Javascript String object.
|
|
*
|
|
* @param {number} ptr
|
|
* @param {number=} maxBytesToRead - An optional length that specifies the
|
|
* maximum number of bytes to read. You can omit this parameter to scan the
|
|
* string until the first 0 byte. If maxBytesToRead is passed, and the string
|
|
* at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the
|
|
* string will cut short at that byte index.
|
|
* @param {boolean=} ignoreNul - If true, the function will not stop on a NUL character.
|
|
* @return {string}
|
|
*/ var UTF8ToString = (ptr, maxBytesToRead, ignoreNul) => {
|
|
assert(typeof ptr == "number", `UTF8ToString expects a number (got ${typeof ptr})`);
|
|
if (!ptr) return "";
|
|
var end = findStringEnd(HEAPU8, ptr, maxBytesToRead, ignoreNul);
|
|
return UTF8Decoder.decode(HEAPU8.slice(ptr, end));
|
|
};
|
|
|
|
var ___assert_fail = (condition, filename, line, func) => abort(`Assertion failed: ${UTF8ToString(condition)}, at: ` + [ filename ? UTF8ToString(filename) : "unknown filename", line, func ? UTF8ToString(func) : "unknown function" ]);
|
|
|
|
class ExceptionInfo {
|
|
// excPtr - Thrown object pointer to wrap. Metadata pointer is calculated from it.
|
|
constructor(excPtr) {
|
|
this.excPtr = excPtr;
|
|
this.ptr = excPtr - 24;
|
|
}
|
|
set_type(type) {
|
|
HEAPU32[(((this.ptr) + (4)) >> 2)] = type;
|
|
}
|
|
get_type() {
|
|
return HEAPU32[(((this.ptr) + (4)) >> 2)];
|
|
}
|
|
set_destructor(destructor) {
|
|
HEAPU32[(((this.ptr) + (8)) >> 2)] = destructor;
|
|
}
|
|
get_destructor() {
|
|
return HEAPU32[(((this.ptr) + (8)) >> 2)];
|
|
}
|
|
set_caught(caught) {
|
|
caught = caught ? 1 : 0;
|
|
HEAP8[(this.ptr) + (12)] = caught;
|
|
}
|
|
get_caught() {
|
|
return HEAP8[(this.ptr) + (12)] != 0;
|
|
}
|
|
set_rethrown(rethrown) {
|
|
rethrown = rethrown ? 1 : 0;
|
|
HEAP8[(this.ptr) + (13)] = rethrown;
|
|
}
|
|
get_rethrown() {
|
|
return HEAP8[(this.ptr) + (13)] != 0;
|
|
}
|
|
// Initialize native structure fields. Should be called once after allocated.
|
|
init(type, destructor) {
|
|
this.set_adjusted_ptr(0);
|
|
this.set_type(type);
|
|
this.set_destructor(destructor);
|
|
}
|
|
set_adjusted_ptr(adjustedPtr) {
|
|
HEAPU32[(((this.ptr) + (16)) >> 2)] = adjustedPtr;
|
|
}
|
|
get_adjusted_ptr() {
|
|
return HEAPU32[(((this.ptr) + (16)) >> 2)];
|
|
}
|
|
}
|
|
|
|
var exceptionLast = 0;
|
|
|
|
var uncaughtExceptionCount = 0;
|
|
|
|
var ___cxa_throw = (ptr, type, destructor) => {
|
|
var info = new ExceptionInfo(ptr);
|
|
// Initialize ExceptionInfo content after it was allocated in __cxa_allocate_exception.
|
|
info.init(type, destructor);
|
|
exceptionLast = ptr;
|
|
uncaughtExceptionCount++;
|
|
assert(false, "Exception thrown, but exception catching is not enabled. Compile with -sNO_DISABLE_EXCEPTION_CATCHING or -sEXCEPTION_CATCHING_ALLOWED=[..] to catch.");
|
|
};
|
|
|
|
function pthreadCreateProxied(pthread_ptr, attr, startRoutine, arg) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(2, 0, 1, pthread_ptr, attr, startRoutine, arg);
|
|
return ___pthread_create_js(pthread_ptr, attr, startRoutine, arg);
|
|
}
|
|
|
|
var _emscripten_has_threading_support = () => !!globalThis.SharedArrayBuffer;
|
|
|
|
var ___pthread_create_js = (pthread_ptr, attr, startRoutine, arg) => {
|
|
if (!_emscripten_has_threading_support()) {
|
|
dbg("pthread_create: environment does not support SharedArrayBuffer, pthreads are not available");
|
|
return 6;
|
|
}
|
|
// List of JS objects that will transfer ownership to the Worker hosting the thread
|
|
var transferList = [];
|
|
var error = 0;
|
|
// Synchronously proxy the thread creation to main thread if possible. If we
|
|
// need to transfer ownership of objects, then proxy asynchronously via
|
|
// postMessage.
|
|
if (ENVIRONMENT_IS_PTHREAD && (transferList.length === 0 || error)) {
|
|
return pthreadCreateProxied(pthread_ptr, attr, startRoutine, arg);
|
|
}
|
|
// If on the main thread, and accessing Canvas/OffscreenCanvas failed, abort
|
|
// with the detected error.
|
|
if (error) return error;
|
|
var threadParams = {
|
|
startRoutine,
|
|
pthread_ptr,
|
|
arg,
|
|
transferList
|
|
};
|
|
if (ENVIRONMENT_IS_PTHREAD) {
|
|
// The prepopulated pool of web workers that can host pthreads is stored
|
|
// in the main JS thread. Therefore if a pthread is attempting to spawn a
|
|
// new thread, the thread creation must be deferred to the main JS thread.
|
|
threadParams.cmd = "spawnThread";
|
|
postMessage(threadParams, transferList);
|
|
// When we defer thread creation this way, we have no way to detect thread
|
|
// creation synchronously today, so we have to assume success and return 0.
|
|
return 0;
|
|
}
|
|
// We are the main thread, so we have the pthread warmup pool in this
|
|
// thread and can fire off JS thread creation directly ourselves.
|
|
return spawnThread(threadParams);
|
|
};
|
|
|
|
var PATH = {
|
|
isAbs: path => path.charAt(0) === "/",
|
|
splitPath: filename => {
|
|
var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
|
|
return splitPathRe.exec(filename).slice(1);
|
|
},
|
|
normalizeArray: (parts, allowAboveRoot) => {
|
|
// if the path tries to go above the root, `up` ends up > 0
|
|
var up = 0;
|
|
for (var i = parts.length - 1; i >= 0; i--) {
|
|
var last = parts[i];
|
|
if (last === ".") {
|
|
parts.splice(i, 1);
|
|
} else if (last === "..") {
|
|
parts.splice(i, 1);
|
|
up++;
|
|
} else if (up) {
|
|
parts.splice(i, 1);
|
|
up--;
|
|
}
|
|
}
|
|
// if the path is allowed to go above the root, restore leading ..s
|
|
if (allowAboveRoot) {
|
|
for (;up; up--) {
|
|
parts.unshift("..");
|
|
}
|
|
}
|
|
return parts;
|
|
},
|
|
normalize: path => {
|
|
var isAbsolute = PATH.isAbs(path), trailingSlash = path.slice(-1) === "/";
|
|
// Normalize the path
|
|
path = PATH.normalizeArray(path.split("/").filter(p => !!p), !isAbsolute).join("/");
|
|
if (!path && !isAbsolute) {
|
|
path = ".";
|
|
}
|
|
if (path && trailingSlash) {
|
|
path += "/";
|
|
}
|
|
return (isAbsolute ? "/" : "") + path;
|
|
},
|
|
dirname: path => {
|
|
var result = PATH.splitPath(path), root = result[0], dir = result[1];
|
|
if (!root && !dir) {
|
|
// No dirname whatsoever
|
|
return ".";
|
|
}
|
|
if (dir) {
|
|
// It has a dirname, strip trailing slash
|
|
dir = dir.slice(0, -1);
|
|
}
|
|
return root + dir;
|
|
},
|
|
basename: path => path && path.match(/([^\/]+|\/)\/*$/)[1],
|
|
join: (...paths) => PATH.normalize(paths.join("/")),
|
|
join2: (l, r) => PATH.normalize(l + "/" + r)
|
|
};
|
|
|
|
var initRandomFill = () => {
|
|
// This block is not needed on v19+ since crypto.getRandomValues is builtin
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
var nodeCrypto = require("crypto");
|
|
return view => nodeCrypto.randomFillSync(view);
|
|
}
|
|
// like with most Web APIs, we can't use Web Crypto API directly on shared memory,
|
|
// so we need to create an intermediate buffer and copy it to the destination
|
|
return view => view.set(crypto.getRandomValues(new Uint8Array(view.byteLength)));
|
|
};
|
|
|
|
var randomFill = view => {
|
|
// Lazily init on the first invocation.
|
|
(randomFill = initRandomFill())(view);
|
|
};
|
|
|
|
var PATH_FS = {
|
|
resolve: (...args) => {
|
|
var resolvedPath = "", resolvedAbsolute = false;
|
|
for (var i = args.length - 1; i >= -1 && !resolvedAbsolute; i--) {
|
|
var path = (i >= 0) ? args[i] : FS.cwd();
|
|
// Skip empty and invalid entries
|
|
if (typeof path != "string") {
|
|
throw new TypeError("Arguments to path.resolve must be strings");
|
|
} else if (!path) {
|
|
return "";
|
|
}
|
|
resolvedPath = path + "/" + resolvedPath;
|
|
resolvedAbsolute = PATH.isAbs(path);
|
|
}
|
|
// At this point the path should be resolved to a full absolute path, but
|
|
// handle relative paths to be safe (might happen when process.cwd() fails)
|
|
resolvedPath = PATH.normalizeArray(resolvedPath.split("/").filter(p => !!p), !resolvedAbsolute).join("/");
|
|
return ((resolvedAbsolute ? "/" : "") + resolvedPath) || ".";
|
|
},
|
|
relative: (from, to) => {
|
|
from = PATH_FS.resolve(from).slice(1);
|
|
to = PATH_FS.resolve(to).slice(1);
|
|
function trim(arr) {
|
|
var start = 0;
|
|
for (;start < arr.length; start++) {
|
|
if (arr[start] !== "") break;
|
|
}
|
|
var end = arr.length - 1;
|
|
for (;end >= 0; end--) {
|
|
if (arr[end] !== "") break;
|
|
}
|
|
if (start > end) return [];
|
|
return arr.slice(start, end - start + 1);
|
|
}
|
|
var fromParts = trim(from.split("/"));
|
|
var toParts = trim(to.split("/"));
|
|
var length = Math.min(fromParts.length, toParts.length);
|
|
var samePartsLength = length;
|
|
for (var i = 0; i < length; i++) {
|
|
if (fromParts[i] !== toParts[i]) {
|
|
samePartsLength = i;
|
|
break;
|
|
}
|
|
}
|
|
var outputParts = [];
|
|
for (var i = samePartsLength; i < fromParts.length; i++) {
|
|
outputParts.push("..");
|
|
}
|
|
outputParts = outputParts.concat(toParts.slice(samePartsLength));
|
|
return outputParts.join("/");
|
|
}
|
|
};
|
|
|
|
/**
|
|
* Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given
|
|
* array that contains uint8 values, returns a copy of that string as a
|
|
* Javascript String object.
|
|
* heapOrArray is either a regular array, or a JavaScript typed array view.
|
|
* @param {number=} idx
|
|
* @param {number=} maxBytesToRead
|
|
* @param {boolean=} ignoreNul - If true, the function will not stop on a NUL character.
|
|
* @return {string}
|
|
*/ var UTF8ArrayToString = (heapOrArray, idx = 0, maxBytesToRead, ignoreNul) => {
|
|
var endPtr = findStringEnd(heapOrArray, idx, maxBytesToRead, ignoreNul);
|
|
return UTF8Decoder.decode(heapOrArray.buffer ? heapOrArray.buffer instanceof ArrayBuffer ? heapOrArray.subarray(idx, endPtr) : heapOrArray.slice(idx, endPtr) : new Uint8Array(heapOrArray.slice(idx, endPtr)));
|
|
};
|
|
|
|
var FS_stdin_getChar_buffer = [];
|
|
|
|
var lengthBytesUTF8 = str => {
|
|
var len = 0;
|
|
for (var i = 0; i < str.length; ++i) {
|
|
// Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code
|
|
// unit, not a Unicode code point of the character! So decode
|
|
// UTF16->UTF32->UTF8.
|
|
// See http://unicode.org/faq/utf_bom.html#utf16-3
|
|
var c = str.charCodeAt(i);
|
|
// possibly a lead surrogate
|
|
if (c <= 127) {
|
|
len++;
|
|
} else if (c <= 2047) {
|
|
len += 2;
|
|
} else if (c >= 55296 && c <= 57343) {
|
|
len += 4;
|
|
++i;
|
|
} else {
|
|
len += 3;
|
|
}
|
|
}
|
|
return len;
|
|
};
|
|
|
|
var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => {
|
|
assert(typeof str === "string", `stringToUTF8Array expects a string (got ${typeof str})`);
|
|
// Parameter maxBytesToWrite is not optional. Negative values, 0, null,
|
|
// undefined and false each don't write out any bytes.
|
|
if (!(maxBytesToWrite > 0)) return 0;
|
|
var startIdx = outIdx;
|
|
var endIdx = outIdx + maxBytesToWrite - 1;
|
|
// -1 for string null terminator.
|
|
for (var i = 0; i < str.length; ++i) {
|
|
// For UTF8 byte structure, see http://en.wikipedia.org/wiki/UTF-8#Description
|
|
// and https://www.ietf.org/rfc/rfc2279.txt
|
|
// and https://tools.ietf.org/html/rfc3629
|
|
var u = str.codePointAt(i);
|
|
if (u <= 127) {
|
|
if (outIdx >= endIdx) break;
|
|
heap[outIdx++] = u;
|
|
} else if (u <= 2047) {
|
|
if (outIdx + 1 >= endIdx) break;
|
|
heap[outIdx++] = 192 | (u >> 6);
|
|
heap[outIdx++] = 128 | (u & 63);
|
|
} else if (u <= 65535) {
|
|
if (outIdx + 2 >= endIdx) break;
|
|
heap[outIdx++] = 224 | (u >> 12);
|
|
heap[outIdx++] = 128 | ((u >> 6) & 63);
|
|
heap[outIdx++] = 128 | (u & 63);
|
|
} else {
|
|
if (outIdx + 3 >= endIdx) break;
|
|
if (u > 1114111) warnOnce("Invalid Unicode code point " + ptrToString(u) + " encountered when serializing a JS string to a UTF-8 string in wasm memory! (Valid unicode code points should be in range 0-0x10FFFF).");
|
|
heap[outIdx++] = 240 | (u >> 18);
|
|
heap[outIdx++] = 128 | ((u >> 12) & 63);
|
|
heap[outIdx++] = 128 | ((u >> 6) & 63);
|
|
heap[outIdx++] = 128 | (u & 63);
|
|
// Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16.
|
|
// We need to manually skip over the second code unit for correct iteration.
|
|
i++;
|
|
}
|
|
}
|
|
// Null-terminate the pointer to the buffer.
|
|
heap[outIdx] = 0;
|
|
return outIdx - startIdx;
|
|
};
|
|
|
|
/** @type {function(string, boolean=, number=)} */ var intArrayFromString = (stringy, dontAddNull, length) => {
|
|
var len = length > 0 ? length : lengthBytesUTF8(stringy) + 1;
|
|
var u8array = new Array(len);
|
|
var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length);
|
|
if (dontAddNull) u8array.length = numBytesWritten;
|
|
return u8array;
|
|
};
|
|
|
|
var FS_stdin_getChar = () => {
|
|
if (!FS_stdin_getChar_buffer.length) {
|
|
var result = null;
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
// we will read data by chunks of BUFSIZE
|
|
var BUFSIZE = 256;
|
|
var buf = Buffer.alloc(BUFSIZE);
|
|
var bytesRead = 0;
|
|
// For some reason we must suppress a closure warning here, even though
|
|
// fd definitely exists on process.stdin, and is even the proper way to
|
|
// get the fd of stdin,
|
|
// https://github.com/nodejs/help/issues/2136#issuecomment-523649904
|
|
// This started to happen after moving this logic out of library_tty.js,
|
|
// so it is related to the surrounding code in some unclear manner.
|
|
/** @suppress {missingProperties} */ var fd = process.stdin.fd;
|
|
try {
|
|
bytesRead = fs.readSync(fd, buf, 0, BUFSIZE);
|
|
} catch (e) {
|
|
// Cross-platform differences: on Windows, reading EOF throws an
|
|
// exception, but on other OSes, reading EOF returns 0. Uniformize
|
|
// behavior by treating the EOF exception to return 0.
|
|
if (e.toString().includes("EOF")) bytesRead = 0; else throw e;
|
|
}
|
|
if (bytesRead > 0) {
|
|
result = buf.slice(0, bytesRead).toString("utf-8");
|
|
}
|
|
} else if (globalThis.window?.prompt) {
|
|
// Browser.
|
|
result = window.prompt("Input: ");
|
|
// returns null on cancel
|
|
if (result !== null) {
|
|
result += "\n";
|
|
}
|
|
} else {}
|
|
if (!result) {
|
|
return null;
|
|
}
|
|
FS_stdin_getChar_buffer = intArrayFromString(result, true);
|
|
}
|
|
return FS_stdin_getChar_buffer.shift();
|
|
};
|
|
|
|
var TTY = {
|
|
ttys: [],
|
|
init() {},
|
|
shutdown() {},
|
|
register(dev, ops) {
|
|
TTY.ttys[dev] = {
|
|
input: [],
|
|
output: [],
|
|
ops
|
|
};
|
|
FS.registerDevice(dev, TTY.stream_ops);
|
|
},
|
|
stream_ops: {
|
|
open(stream) {
|
|
var tty = TTY.ttys[stream.node.rdev];
|
|
if (!tty) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
stream.tty = tty;
|
|
stream.seekable = false;
|
|
},
|
|
close(stream) {
|
|
// flush any pending line data
|
|
stream.tty.ops.fsync(stream.tty);
|
|
},
|
|
fsync(stream) {
|
|
stream.tty.ops.fsync(stream.tty);
|
|
},
|
|
read(stream, buffer, offset, length, pos) {
|
|
if (!stream.tty || !stream.tty.ops.get_char) {
|
|
throw new FS.ErrnoError(60);
|
|
}
|
|
var bytesRead = 0;
|
|
for (var i = 0; i < length; i++) {
|
|
var result;
|
|
try {
|
|
result = stream.tty.ops.get_char(stream.tty);
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
if (result === undefined && bytesRead === 0) {
|
|
throw new FS.ErrnoError(6);
|
|
}
|
|
if (result === null || result === undefined) break;
|
|
bytesRead++;
|
|
buffer[offset + i] = result;
|
|
}
|
|
if (bytesRead) {
|
|
stream.node.atime = Date.now();
|
|
}
|
|
return bytesRead;
|
|
},
|
|
write(stream, buffer, offset, length, pos) {
|
|
if (!stream.tty || !stream.tty.ops.put_char) {
|
|
throw new FS.ErrnoError(60);
|
|
}
|
|
try {
|
|
for (var i = 0; i < length; i++) {
|
|
stream.tty.ops.put_char(stream.tty, buffer[offset + i]);
|
|
}
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
if (length) {
|
|
stream.node.mtime = stream.node.ctime = Date.now();
|
|
}
|
|
return i;
|
|
}
|
|
},
|
|
default_tty_ops: {
|
|
get_char(tty) {
|
|
return FS_stdin_getChar();
|
|
},
|
|
put_char(tty, val) {
|
|
if (val === null || val === 10) {
|
|
out(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
} else {
|
|
if (val != 0) tty.output.push(val);
|
|
}
|
|
},
|
|
fsync(tty) {
|
|
if (tty.output?.length > 0) {
|
|
out(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
}
|
|
},
|
|
ioctl_tcgets(tty) {
|
|
// typical setting
|
|
return {
|
|
c_iflag: 25856,
|
|
c_oflag: 5,
|
|
c_cflag: 191,
|
|
c_lflag: 35387,
|
|
c_cc: [ 3, 28, 127, 21, 4, 0, 1, 0, 17, 19, 26, 0, 18, 15, 23, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
|
|
};
|
|
},
|
|
ioctl_tcsets(tty, optional_actions, data) {
|
|
// currently just ignore
|
|
return 0;
|
|
},
|
|
ioctl_tiocgwinsz(tty) {
|
|
return [ 24, 80 ];
|
|
}
|
|
},
|
|
default_tty1_ops: {
|
|
put_char(tty, val) {
|
|
if (val === null || val === 10) {
|
|
err(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
} else {
|
|
if (val != 0) tty.output.push(val);
|
|
}
|
|
},
|
|
fsync(tty) {
|
|
if (tty.output?.length > 0) {
|
|
err(UTF8ArrayToString(tty.output));
|
|
tty.output = [];
|
|
}
|
|
}
|
|
}
|
|
};
|
|
|
|
var zeroMemory = (ptr, size) => HEAPU8.fill(0, ptr, ptr + size);
|
|
|
|
var alignMemory = (size, alignment) => {
|
|
assert(alignment, "alignment argument is required");
|
|
return Math.ceil(size / alignment) * alignment;
|
|
};
|
|
|
|
var mmapAlloc = size => {
|
|
size = alignMemory(size, 65536);
|
|
var ptr = _emscripten_builtin_memalign(65536, size);
|
|
if (ptr) zeroMemory(ptr, size);
|
|
return ptr;
|
|
};
|
|
|
|
var MEMFS = {
|
|
ops_table: null,
|
|
mount(mount) {
|
|
return MEMFS.createNode(null, "/", 16895, 0);
|
|
},
|
|
createNode(parent, name, mode, dev) {
|
|
if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
|
|
// no supported
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
MEMFS.ops_table ||= {
|
|
dir: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr,
|
|
lookup: MEMFS.node_ops.lookup,
|
|
mknod: MEMFS.node_ops.mknod,
|
|
rename: MEMFS.node_ops.rename,
|
|
unlink: MEMFS.node_ops.unlink,
|
|
rmdir: MEMFS.node_ops.rmdir,
|
|
readdir: MEMFS.node_ops.readdir,
|
|
symlink: MEMFS.node_ops.symlink
|
|
},
|
|
stream: {
|
|
llseek: MEMFS.stream_ops.llseek
|
|
}
|
|
},
|
|
file: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr
|
|
},
|
|
stream: {
|
|
llseek: MEMFS.stream_ops.llseek,
|
|
read: MEMFS.stream_ops.read,
|
|
write: MEMFS.stream_ops.write,
|
|
mmap: MEMFS.stream_ops.mmap,
|
|
msync: MEMFS.stream_ops.msync
|
|
}
|
|
},
|
|
link: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr,
|
|
readlink: MEMFS.node_ops.readlink
|
|
},
|
|
stream: {}
|
|
},
|
|
chrdev: {
|
|
node: {
|
|
getattr: MEMFS.node_ops.getattr,
|
|
setattr: MEMFS.node_ops.setattr
|
|
},
|
|
stream: FS.chrdev_stream_ops
|
|
}
|
|
};
|
|
var node = FS.createNode(parent, name, mode, dev);
|
|
if (FS.isDir(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.dir.node;
|
|
node.stream_ops = MEMFS.ops_table.dir.stream;
|
|
node.contents = {};
|
|
} else if (FS.isFile(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.file.node;
|
|
node.stream_ops = MEMFS.ops_table.file.stream;
|
|
node.usedBytes = 0;
|
|
// The actual number of bytes used in the typed array, as opposed to contents.length which gives the whole capacity.
|
|
// When the byte data of the file is populated, this will point to either a typed array, or a normal JS array. Typed arrays are preferred
|
|
// for performance, and used by default. However, typed arrays are not resizable like normal JS arrays are, so there is a small disk size
|
|
// penalty involved for appending file writes that continuously grow a file similar to std::vector capacity vs used -scheme.
|
|
node.contents = null;
|
|
} else if (FS.isLink(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.link.node;
|
|
node.stream_ops = MEMFS.ops_table.link.stream;
|
|
} else if (FS.isChrdev(node.mode)) {
|
|
node.node_ops = MEMFS.ops_table.chrdev.node;
|
|
node.stream_ops = MEMFS.ops_table.chrdev.stream;
|
|
}
|
|
node.atime = node.mtime = node.ctime = Date.now();
|
|
// add the new node to the parent
|
|
if (parent) {
|
|
parent.contents[name] = node;
|
|
parent.atime = parent.mtime = parent.ctime = node.atime;
|
|
}
|
|
return node;
|
|
},
|
|
getFileDataAsTypedArray(node) {
|
|
if (!node.contents) return new Uint8Array(0);
|
|
if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes);
|
|
// Make sure to not return excess unused bytes.
|
|
return new Uint8Array(node.contents);
|
|
},
|
|
expandFileStorage(node, newCapacity) {
|
|
var prevCapacity = node.contents ? node.contents.length : 0;
|
|
if (prevCapacity >= newCapacity) return;
|
|
// No need to expand, the storage was already large enough.
|
|
// Don't expand strictly to the given requested limit if it's only a very small increase, but instead geometrically grow capacity.
|
|
// For small filesizes (<1MB), perform size*2 geometric increase, but for large sizes, do a much more conservative size*1.125 increase to
|
|
// avoid overshooting the allocation cap by a very large margin.
|
|
var CAPACITY_DOUBLING_MAX = 1024 * 1024;
|
|
newCapacity = Math.max(newCapacity, (prevCapacity * (prevCapacity < CAPACITY_DOUBLING_MAX ? 2 : 1.125)) >>> 0);
|
|
if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256);
|
|
// At minimum allocate 256b for each file when expanding.
|
|
var oldContents = node.contents;
|
|
node.contents = new Uint8Array(newCapacity);
|
|
// Allocate new storage.
|
|
if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0);
|
|
},
|
|
resizeFileStorage(node, newSize) {
|
|
if (node.usedBytes == newSize) return;
|
|
if (newSize == 0) {
|
|
node.contents = null;
|
|
// Fully decommit when requesting a resize to zero.
|
|
node.usedBytes = 0;
|
|
} else {
|
|
var oldContents = node.contents;
|
|
node.contents = new Uint8Array(newSize);
|
|
// Allocate new storage.
|
|
if (oldContents) {
|
|
node.contents.set(oldContents.subarray(0, Math.min(newSize, node.usedBytes)));
|
|
}
|
|
node.usedBytes = newSize;
|
|
}
|
|
},
|
|
node_ops: {
|
|
getattr(node) {
|
|
var attr = {};
|
|
// device numbers reuse inode numbers.
|
|
attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
|
|
attr.ino = node.id;
|
|
attr.mode = node.mode;
|
|
attr.nlink = 1;
|
|
attr.uid = 0;
|
|
attr.gid = 0;
|
|
attr.rdev = node.rdev;
|
|
if (FS.isDir(node.mode)) {
|
|
attr.size = 4096;
|
|
} else if (FS.isFile(node.mode)) {
|
|
attr.size = node.usedBytes;
|
|
} else if (FS.isLink(node.mode)) {
|
|
attr.size = node.link.length;
|
|
} else {
|
|
attr.size = 0;
|
|
}
|
|
attr.atime = new Date(node.atime);
|
|
attr.mtime = new Date(node.mtime);
|
|
attr.ctime = new Date(node.ctime);
|
|
// NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize),
|
|
// but this is not required by the standard.
|
|
attr.blksize = 4096;
|
|
attr.blocks = Math.ceil(attr.size / attr.blksize);
|
|
return attr;
|
|
},
|
|
setattr(node, attr) {
|
|
for (const key of [ "mode", "atime", "mtime", "ctime" ]) {
|
|
if (attr[key] != null) {
|
|
node[key] = attr[key];
|
|
}
|
|
}
|
|
if (attr.size !== undefined) {
|
|
MEMFS.resizeFileStorage(node, attr.size);
|
|
}
|
|
},
|
|
lookup(parent, name) {
|
|
throw new FS.ErrnoError(44);
|
|
},
|
|
mknod(parent, name, mode, dev) {
|
|
return MEMFS.createNode(parent, name, mode, dev);
|
|
},
|
|
rename(old_node, new_dir, new_name) {
|
|
var new_node;
|
|
try {
|
|
new_node = FS.lookupNode(new_dir, new_name);
|
|
} catch (e) {}
|
|
if (new_node) {
|
|
if (FS.isDir(old_node.mode)) {
|
|
// if we're overwriting a directory at new_name, make sure it's empty.
|
|
for (var i in new_node.contents) {
|
|
throw new FS.ErrnoError(55);
|
|
}
|
|
}
|
|
FS.hashRemoveNode(new_node);
|
|
}
|
|
// do the internal rewiring
|
|
delete old_node.parent.contents[old_node.name];
|
|
new_dir.contents[new_name] = old_node;
|
|
old_node.name = new_name;
|
|
new_dir.ctime = new_dir.mtime = old_node.parent.ctime = old_node.parent.mtime = Date.now();
|
|
},
|
|
unlink(parent, name) {
|
|
delete parent.contents[name];
|
|
parent.ctime = parent.mtime = Date.now();
|
|
},
|
|
rmdir(parent, name) {
|
|
var node = FS.lookupNode(parent, name);
|
|
for (var i in node.contents) {
|
|
throw new FS.ErrnoError(55);
|
|
}
|
|
delete parent.contents[name];
|
|
parent.ctime = parent.mtime = Date.now();
|
|
},
|
|
readdir(node) {
|
|
return [ ".", "..", ...Object.keys(node.contents) ];
|
|
},
|
|
symlink(parent, newname, oldpath) {
|
|
var node = MEMFS.createNode(parent, newname, 511 | 40960, 0);
|
|
node.link = oldpath;
|
|
return node;
|
|
},
|
|
readlink(node) {
|
|
if (!FS.isLink(node.mode)) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return node.link;
|
|
}
|
|
},
|
|
stream_ops: {
|
|
read(stream, buffer, offset, length, position) {
|
|
var contents = stream.node.contents;
|
|
if (position >= stream.node.usedBytes) return 0;
|
|
var size = Math.min(stream.node.usedBytes - position, length);
|
|
assert(size >= 0);
|
|
if (size > 8 && contents.subarray) {
|
|
// non-trivial, and typed array
|
|
buffer.set(contents.subarray(position, position + size), offset);
|
|
} else {
|
|
for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i];
|
|
}
|
|
return size;
|
|
},
|
|
write(stream, buffer, offset, length, position, canOwn) {
|
|
// The data buffer should be a typed array view
|
|
assert(!(buffer instanceof ArrayBuffer));
|
|
if (!length) return 0;
|
|
var node = stream.node;
|
|
node.mtime = node.ctime = Date.now();
|
|
if (buffer.subarray && (!node.contents || node.contents.subarray)) {
|
|
// This write is from a typed array to a typed array?
|
|
if (canOwn) {
|
|
assert(position === 0, "canOwn must imply no weird position inside the file");
|
|
node.contents = buffer.subarray(offset, offset + length);
|
|
node.usedBytes = length;
|
|
return length;
|
|
} else if (node.usedBytes === 0 && position === 0) {
|
|
// If this is a simple first write to an empty file, do a fast set since we don't need to care about old data.
|
|
node.contents = buffer.slice(offset, offset + length);
|
|
node.usedBytes = length;
|
|
return length;
|
|
} else if (position + length <= node.usedBytes) {
|
|
// Writing to an already allocated and used subrange of the file?
|
|
node.contents.set(buffer.subarray(offset, offset + length), position);
|
|
return length;
|
|
}
|
|
}
|
|
// Appending to an existing file and we need to reallocate, or source data did not come as a typed array.
|
|
MEMFS.expandFileStorage(node, position + length);
|
|
if (node.contents.subarray && buffer.subarray) {
|
|
// Use typed array write which is available.
|
|
node.contents.set(buffer.subarray(offset, offset + length), position);
|
|
} else {
|
|
for (var i = 0; i < length; i++) {
|
|
node.contents[position + i] = buffer[offset + i];
|
|
}
|
|
}
|
|
node.usedBytes = Math.max(node.usedBytes, position + length);
|
|
return length;
|
|
},
|
|
llseek(stream, offset, whence) {
|
|
var position = offset;
|
|
if (whence === 1) {
|
|
position += stream.position;
|
|
} else if (whence === 2) {
|
|
if (FS.isFile(stream.node.mode)) {
|
|
position += stream.node.usedBytes;
|
|
}
|
|
}
|
|
if (position < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return position;
|
|
},
|
|
mmap(stream, length, position, prot, flags) {
|
|
if (!FS.isFile(stream.node.mode)) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
var ptr;
|
|
var allocated;
|
|
var contents = stream.node.contents;
|
|
// Only make a new copy when MAP_PRIVATE is specified.
|
|
if (!(flags & 2) && contents && contents.buffer === HEAP8.buffer) {
|
|
// We can't emulate MAP_SHARED when the file is not backed by the
|
|
// buffer we're mapping to (e.g. the HEAP buffer).
|
|
allocated = false;
|
|
ptr = contents.byteOffset;
|
|
} else {
|
|
allocated = true;
|
|
ptr = mmapAlloc(length);
|
|
if (!ptr) {
|
|
throw new FS.ErrnoError(48);
|
|
}
|
|
if (contents) {
|
|
// Try to avoid unnecessary slices.
|
|
if (position > 0 || position + length < contents.length) {
|
|
if (contents.subarray) {
|
|
contents = contents.subarray(position, position + length);
|
|
} else {
|
|
contents = Array.prototype.slice.call(contents, position, position + length);
|
|
}
|
|
}
|
|
HEAP8.set(contents, ptr);
|
|
}
|
|
}
|
|
return {
|
|
ptr,
|
|
allocated
|
|
};
|
|
},
|
|
msync(stream, buffer, offset, length, mmapFlags) {
|
|
MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false);
|
|
// should we check if bytesWritten and length are the same?
|
|
return 0;
|
|
}
|
|
}
|
|
};
|
|
|
|
var FS_modeStringToFlags = str => {
|
|
var flagModes = {
|
|
"r": 0,
|
|
"r+": 2,
|
|
"w": 512 | 64 | 1,
|
|
"w+": 512 | 64 | 2,
|
|
"a": 1024 | 64 | 1,
|
|
"a+": 1024 | 64 | 2
|
|
};
|
|
var flags = flagModes[str];
|
|
if (typeof flags == "undefined") {
|
|
throw new Error(`Unknown file open mode: ${str}`);
|
|
}
|
|
return flags;
|
|
};
|
|
|
|
var FS_getMode = (canRead, canWrite) => {
|
|
var mode = 0;
|
|
if (canRead) mode |= 292 | 73;
|
|
if (canWrite) mode |= 146;
|
|
return mode;
|
|
};
|
|
|
|
var IDBFS = {
|
|
dbs: {},
|
|
indexedDB: () => {
|
|
assert(typeof indexedDB != "undefined", "IDBFS used, but indexedDB not supported");
|
|
return indexedDB;
|
|
},
|
|
DB_VERSION: 21,
|
|
DB_STORE_NAME: "FILE_DATA",
|
|
queuePersist: mount => {
|
|
function onPersistComplete() {
|
|
if (mount.idbPersistState === "again") startPersist(); else mount.idbPersistState = 0;
|
|
}
|
|
function startPersist() {
|
|
mount.idbPersistState = "idb";
|
|
// Mark that we are currently running a sync operation
|
|
IDBFS.syncfs(mount, /*populate:*/ false, onPersistComplete);
|
|
}
|
|
if (!mount.idbPersistState) {
|
|
// Programs typically write/copy/move multiple files in the in-memory
|
|
// filesystem within a single app frame, so when a filesystem sync
|
|
// command is triggered, do not start it immediately, but only after
|
|
// the current frame is finished. This way all the modified files
|
|
// inside the main loop tick will be batched up to the same sync.
|
|
mount.idbPersistState = setTimeout(startPersist, 0);
|
|
} else if (mount.idbPersistState === "idb") {
|
|
// There is an active IndexedDB sync operation in-flight, but we now
|
|
// have accumulated more files to sync. We should therefore queue up
|
|
// a new sync after the current one finishes so that all writes
|
|
// will be properly persisted.
|
|
mount.idbPersistState = "again";
|
|
}
|
|
},
|
|
mount: mount => {
|
|
// reuse core MEMFS functionality
|
|
var mnt = MEMFS.mount(mount);
|
|
// If the automatic IDBFS persistence option has been selected, then automatically persist
|
|
// all modifications to the filesystem as they occur.
|
|
if (mount?.opts?.autoPersist) {
|
|
mount.idbPersistState = 0;
|
|
// IndexedDB sync starts in idle state
|
|
var memfs_node_ops = mnt.node_ops;
|
|
mnt.node_ops = {
|
|
...mnt.node_ops
|
|
};
|
|
// Clone node_ops to inject write tracking
|
|
mnt.node_ops.mknod = (parent, name, mode, dev) => {
|
|
var node = memfs_node_ops.mknod(parent, name, mode, dev);
|
|
// Propagate injected node_ops to the newly created child node
|
|
node.node_ops = mnt.node_ops;
|
|
// Remember for each IDBFS node which IDBFS mount point they came from so we know which mount to persist on modification.
|
|
node.idbfs_mount = mnt.mount;
|
|
// Remember original MEMFS stream_ops for this node
|
|
node.memfs_stream_ops = node.stream_ops;
|
|
// Clone stream_ops to inject write tracking
|
|
node.stream_ops = {
|
|
...node.stream_ops
|
|
};
|
|
// Track all file writes
|
|
node.stream_ops.write = (stream, buffer, offset, length, position, canOwn) => {
|
|
// This file has been modified, we must persist IndexedDB when this file closes
|
|
stream.node.isModified = true;
|
|
return node.memfs_stream_ops.write(stream, buffer, offset, length, position, canOwn);
|
|
};
|
|
// Persist IndexedDB on file close
|
|
node.stream_ops.close = stream => {
|
|
var n = stream.node;
|
|
if (n.isModified) {
|
|
IDBFS.queuePersist(n.idbfs_mount);
|
|
n.isModified = false;
|
|
}
|
|
if (n.memfs_stream_ops.close) return n.memfs_stream_ops.close(stream);
|
|
};
|
|
// Persist the node we just created to IndexedDB
|
|
IDBFS.queuePersist(mnt.mount);
|
|
return node;
|
|
};
|
|
// Also kick off persisting the filesystem on other operations that modify the filesystem.
|
|
mnt.node_ops.rmdir = (...args) => (IDBFS.queuePersist(mnt.mount), memfs_node_ops.rmdir(...args));
|
|
mnt.node_ops.symlink = (...args) => (IDBFS.queuePersist(mnt.mount), memfs_node_ops.symlink(...args));
|
|
mnt.node_ops.unlink = (...args) => (IDBFS.queuePersist(mnt.mount), memfs_node_ops.unlink(...args));
|
|
mnt.node_ops.rename = (...args) => (IDBFS.queuePersist(mnt.mount), memfs_node_ops.rename(...args));
|
|
}
|
|
return mnt;
|
|
},
|
|
syncfs: (mount, populate, callback) => {
|
|
IDBFS.getLocalSet(mount, (err, local) => {
|
|
if (err) return callback(err);
|
|
IDBFS.getRemoteSet(mount, (err, remote) => {
|
|
if (err) return callback(err);
|
|
var src = populate ? remote : local;
|
|
var dst = populate ? local : remote;
|
|
IDBFS.reconcile(src, dst, callback);
|
|
});
|
|
});
|
|
},
|
|
quit: () => {
|
|
for (var value of Object.values(IDBFS.dbs)) {
|
|
value.close();
|
|
}
|
|
IDBFS.dbs = {};
|
|
},
|
|
getDB: (name, callback) => {
|
|
// check the cache first
|
|
var db = IDBFS.dbs[name];
|
|
if (db) {
|
|
return callback(null, db);
|
|
}
|
|
var req;
|
|
try {
|
|
req = IDBFS.indexedDB().open(name, IDBFS.DB_VERSION);
|
|
} catch (e) {
|
|
return callback(e);
|
|
}
|
|
if (!req) {
|
|
return callback("Unable to connect to IndexedDB");
|
|
}
|
|
req.onupgradeneeded = e => {
|
|
var db = /** @type {IDBDatabase} */ (e.target.result);
|
|
var transaction = e.target.transaction;
|
|
var fileStore;
|
|
if (db.objectStoreNames.contains(IDBFS.DB_STORE_NAME)) {
|
|
fileStore = transaction.objectStore(IDBFS.DB_STORE_NAME);
|
|
} else {
|
|
fileStore = db.createObjectStore(IDBFS.DB_STORE_NAME);
|
|
}
|
|
if (!fileStore.indexNames.contains("timestamp")) {
|
|
fileStore.createIndex("timestamp", "timestamp", {
|
|
unique: false
|
|
});
|
|
}
|
|
};
|
|
req.onsuccess = () => {
|
|
db = /** @type {IDBDatabase} */ (req.result);
|
|
// add to the cache
|
|
IDBFS.dbs[name] = db;
|
|
callback(null, db);
|
|
};
|
|
req.onerror = e => {
|
|
callback(e.target.error);
|
|
e.preventDefault();
|
|
};
|
|
},
|
|
getLocalSet: (mount, callback) => {
|
|
var entries = {};
|
|
function isRealDir(p) {
|
|
return p !== "." && p !== "..";
|
|
}
|
|
function toAbsolute(root) {
|
|
return p => PATH.join2(root, p);
|
|
}
|
|
var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint));
|
|
while (check.length) {
|
|
var path = check.pop();
|
|
var stat;
|
|
try {
|
|
stat = FS.stat(path);
|
|
} catch (e) {
|
|
return callback(e);
|
|
}
|
|
if (FS.isDir(stat.mode)) {
|
|
check.push(...FS.readdir(path).filter(isRealDir).map(toAbsolute(path)));
|
|
}
|
|
entries[path] = {
|
|
"timestamp": stat.mtime
|
|
};
|
|
}
|
|
return callback(null, {
|
|
type: "local",
|
|
entries
|
|
});
|
|
},
|
|
getRemoteSet: (mount, callback) => {
|
|
var entries = {};
|
|
IDBFS.getDB(mount.mountpoint, (err, db) => {
|
|
if (err) return callback(err);
|
|
try {
|
|
var transaction = db.transaction([ IDBFS.DB_STORE_NAME ], "readonly");
|
|
transaction.onerror = e => {
|
|
callback(e.target.error);
|
|
e.preventDefault();
|
|
};
|
|
var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
|
|
var index = store.index("timestamp");
|
|
index.openKeyCursor().onsuccess = event => {
|
|
var cursor = event.target.result;
|
|
if (!cursor) {
|
|
return callback(null, {
|
|
type: "remote",
|
|
db,
|
|
entries
|
|
});
|
|
}
|
|
entries[cursor.primaryKey] = {
|
|
"timestamp": cursor.key
|
|
};
|
|
cursor.continue();
|
|
};
|
|
} catch (e) {
|
|
return callback(e);
|
|
}
|
|
});
|
|
},
|
|
loadLocalEntry: (path, callback) => {
|
|
var stat, node;
|
|
try {
|
|
var lookup = FS.lookupPath(path);
|
|
node = lookup.node;
|
|
stat = FS.stat(path);
|
|
} catch (e) {
|
|
return callback(e);
|
|
}
|
|
if (FS.isDir(stat.mode)) {
|
|
return callback(null, {
|
|
"timestamp": stat.mtime,
|
|
"mode": stat.mode
|
|
});
|
|
} else if (FS.isFile(stat.mode)) {
|
|
// Performance consideration: storing a normal JavaScript array to a IndexedDB is much slower than storing a typed array.
|
|
// Therefore always convert the file contents to a typed array first before writing the data to IndexedDB.
|
|
node.contents = MEMFS.getFileDataAsTypedArray(node);
|
|
return callback(null, {
|
|
"timestamp": stat.mtime,
|
|
"mode": stat.mode,
|
|
"contents": node.contents
|
|
});
|
|
} else {
|
|
return callback(new Error("node type not supported"));
|
|
}
|
|
},
|
|
storeLocalEntry: (path, entry, callback) => {
|
|
try {
|
|
if (FS.isDir(entry["mode"])) {
|
|
FS.mkdirTree(path, entry["mode"]);
|
|
} else if (FS.isFile(entry["mode"])) {
|
|
FS.writeFile(path, entry["contents"], {
|
|
canOwn: true
|
|
});
|
|
} else {
|
|
return callback(new Error("node type not supported"));
|
|
}
|
|
FS.chmod(path, entry["mode"]);
|
|
FS.utime(path, entry["timestamp"], entry["timestamp"]);
|
|
} catch (e) {
|
|
return callback(e);
|
|
}
|
|
callback(null);
|
|
},
|
|
removeLocalEntry: (path, callback) => {
|
|
try {
|
|
var stat = FS.stat(path);
|
|
if (FS.isDir(stat.mode)) {
|
|
FS.rmdir(path);
|
|
} else if (FS.isFile(stat.mode)) {
|
|
FS.unlink(path);
|
|
}
|
|
} catch (e) {
|
|
return callback(e);
|
|
}
|
|
callback(null);
|
|
},
|
|
loadRemoteEntry: (store, path, callback) => {
|
|
var req = store.get(path);
|
|
req.onsuccess = event => callback(null, event.target.result);
|
|
req.onerror = e => {
|
|
callback(e.target.error);
|
|
e.preventDefault();
|
|
};
|
|
},
|
|
storeRemoteEntry: (store, path, entry, callback) => {
|
|
try {
|
|
var req = store.put(entry, path);
|
|
} catch (e) {
|
|
callback(e);
|
|
return;
|
|
}
|
|
req.onsuccess = event => callback();
|
|
req.onerror = e => {
|
|
callback(e.target.error);
|
|
e.preventDefault();
|
|
};
|
|
},
|
|
removeRemoteEntry: (store, path, callback) => {
|
|
var req = store.delete(path);
|
|
req.onsuccess = event => callback();
|
|
req.onerror = e => {
|
|
callback(e.target.error);
|
|
e.preventDefault();
|
|
};
|
|
},
|
|
reconcile: (src, dst, callback) => {
|
|
var total = 0;
|
|
var create = [];
|
|
for (var [key, e] of Object.entries(src.entries)) {
|
|
var e2 = dst.entries[key];
|
|
if (!e2 || e["timestamp"].getTime() != e2["timestamp"].getTime()) {
|
|
create.push(key);
|
|
total++;
|
|
}
|
|
}
|
|
var remove = [];
|
|
for (var key of Object.keys(dst.entries)) {
|
|
if (!src.entries[key]) {
|
|
remove.push(key);
|
|
total++;
|
|
}
|
|
}
|
|
if (!total) {
|
|
return callback(null);
|
|
}
|
|
var errored = false;
|
|
var db = src.type === "remote" ? src.db : dst.db;
|
|
var transaction = db.transaction([ IDBFS.DB_STORE_NAME ], "readwrite");
|
|
var store = transaction.objectStore(IDBFS.DB_STORE_NAME);
|
|
function done(err) {
|
|
if (err && !errored) {
|
|
errored = true;
|
|
return callback(err);
|
|
}
|
|
}
|
|
// transaction may abort if (for example) there is a QuotaExceededError
|
|
transaction.onerror = transaction.onabort = e => {
|
|
done(e.target.error);
|
|
e.preventDefault();
|
|
};
|
|
transaction.oncomplete = e => {
|
|
if (!errored) {
|
|
callback(null);
|
|
}
|
|
};
|
|
// sort paths in ascending order so directory entries are created
|
|
// before the files inside them
|
|
for (const path of create.sort()) {
|
|
if (dst.type === "local") {
|
|
IDBFS.loadRemoteEntry(store, path, (err, entry) => {
|
|
if (err) return done(err);
|
|
IDBFS.storeLocalEntry(path, entry, done);
|
|
});
|
|
} else {
|
|
IDBFS.loadLocalEntry(path, (err, entry) => {
|
|
if (err) return done(err);
|
|
IDBFS.storeRemoteEntry(store, path, entry, done);
|
|
});
|
|
}
|
|
}
|
|
// sort paths in descending order so files are deleted before their
|
|
// parent directories
|
|
for (var path of remove.sort().reverse()) {
|
|
if (dst.type === "local") {
|
|
IDBFS.removeLocalEntry(path, done);
|
|
} else {
|
|
IDBFS.removeRemoteEntry(store, path, done);
|
|
}
|
|
}
|
|
}
|
|
};
|
|
|
|
var strError = errno => UTF8ToString(_strerror(errno));
|
|
|
|
var ERRNO_CODES = {
|
|
"EPERM": 63,
|
|
"ENOENT": 44,
|
|
"ESRCH": 71,
|
|
"EINTR": 27,
|
|
"EIO": 29,
|
|
"ENXIO": 60,
|
|
"E2BIG": 1,
|
|
"ENOEXEC": 45,
|
|
"EBADF": 8,
|
|
"ECHILD": 12,
|
|
"EAGAIN": 6,
|
|
"EWOULDBLOCK": 6,
|
|
"ENOMEM": 48,
|
|
"EACCES": 2,
|
|
"EFAULT": 21,
|
|
"ENOTBLK": 105,
|
|
"EBUSY": 10,
|
|
"EEXIST": 20,
|
|
"EXDEV": 75,
|
|
"ENODEV": 43,
|
|
"ENOTDIR": 54,
|
|
"EISDIR": 31,
|
|
"EINVAL": 28,
|
|
"ENFILE": 41,
|
|
"EMFILE": 33,
|
|
"ENOTTY": 59,
|
|
"ETXTBSY": 74,
|
|
"EFBIG": 22,
|
|
"ENOSPC": 51,
|
|
"ESPIPE": 70,
|
|
"EROFS": 69,
|
|
"EMLINK": 34,
|
|
"EPIPE": 64,
|
|
"EDOM": 18,
|
|
"ERANGE": 68,
|
|
"ENOMSG": 49,
|
|
"EIDRM": 24,
|
|
"ECHRNG": 106,
|
|
"EL2NSYNC": 156,
|
|
"EL3HLT": 107,
|
|
"EL3RST": 108,
|
|
"ELNRNG": 109,
|
|
"EUNATCH": 110,
|
|
"ENOCSI": 111,
|
|
"EL2HLT": 112,
|
|
"EDEADLK": 16,
|
|
"ENOLCK": 46,
|
|
"EBADE": 113,
|
|
"EBADR": 114,
|
|
"EXFULL": 115,
|
|
"ENOANO": 104,
|
|
"EBADRQC": 103,
|
|
"EBADSLT": 102,
|
|
"EDEADLOCK": 16,
|
|
"EBFONT": 101,
|
|
"ENOSTR": 100,
|
|
"ENODATA": 116,
|
|
"ETIME": 117,
|
|
"ENOSR": 118,
|
|
"ENONET": 119,
|
|
"ENOPKG": 120,
|
|
"EREMOTE": 121,
|
|
"ENOLINK": 47,
|
|
"EADV": 122,
|
|
"ESRMNT": 123,
|
|
"ECOMM": 124,
|
|
"EPROTO": 65,
|
|
"EMULTIHOP": 36,
|
|
"EDOTDOT": 125,
|
|
"EBADMSG": 9,
|
|
"ENOTUNIQ": 126,
|
|
"EBADFD": 127,
|
|
"EREMCHG": 128,
|
|
"ELIBACC": 129,
|
|
"ELIBBAD": 130,
|
|
"ELIBSCN": 131,
|
|
"ELIBMAX": 132,
|
|
"ELIBEXEC": 133,
|
|
"ENOSYS": 52,
|
|
"ENOTEMPTY": 55,
|
|
"ENAMETOOLONG": 37,
|
|
"ELOOP": 32,
|
|
"EOPNOTSUPP": 138,
|
|
"EPFNOSUPPORT": 139,
|
|
"ECONNRESET": 15,
|
|
"ENOBUFS": 42,
|
|
"EAFNOSUPPORT": 5,
|
|
"EPROTOTYPE": 67,
|
|
"ENOTSOCK": 57,
|
|
"ENOPROTOOPT": 50,
|
|
"ESHUTDOWN": 140,
|
|
"ECONNREFUSED": 14,
|
|
"EADDRINUSE": 3,
|
|
"ECONNABORTED": 13,
|
|
"ENETUNREACH": 40,
|
|
"ENETDOWN": 38,
|
|
"ETIMEDOUT": 73,
|
|
"EHOSTDOWN": 142,
|
|
"EHOSTUNREACH": 23,
|
|
"EINPROGRESS": 26,
|
|
"EALREADY": 7,
|
|
"EDESTADDRREQ": 17,
|
|
"EMSGSIZE": 35,
|
|
"EPROTONOSUPPORT": 66,
|
|
"ESOCKTNOSUPPORT": 137,
|
|
"EADDRNOTAVAIL": 4,
|
|
"ENETRESET": 39,
|
|
"EISCONN": 30,
|
|
"ENOTCONN": 53,
|
|
"ETOOMANYREFS": 141,
|
|
"EUSERS": 136,
|
|
"EDQUOT": 19,
|
|
"ESTALE": 72,
|
|
"ENOTSUP": 138,
|
|
"ENOMEDIUM": 148,
|
|
"EILSEQ": 25,
|
|
"EOVERFLOW": 61,
|
|
"ECANCELED": 11,
|
|
"ENOTRECOVERABLE": 56,
|
|
"EOWNERDEAD": 62,
|
|
"ESTRPIPE": 135
|
|
};
|
|
|
|
var asyncLoad = async url => {
|
|
var arrayBuffer = await readAsync(url);
|
|
assert(arrayBuffer, `Loading data file "${url}" failed (no arrayBuffer).`);
|
|
return new Uint8Array(arrayBuffer);
|
|
};
|
|
|
|
var FS_createDataFile = (...args) => FS.createDataFile(...args);
|
|
|
|
var getUniqueRunDependency = id => {
|
|
var orig = id;
|
|
while (1) {
|
|
if (!runDependencyTracking[id]) return id;
|
|
id = orig + Math.random();
|
|
}
|
|
};
|
|
|
|
var preloadPlugins = [];
|
|
|
|
var FS_handledByPreloadPlugin = async (byteArray, fullname) => {
|
|
// Ensure plugins are ready.
|
|
if (typeof Browser != "undefined") Browser.init();
|
|
for (var plugin of preloadPlugins) {
|
|
if (plugin["canHandle"](fullname)) {
|
|
assert(plugin["handle"].constructor.name === "AsyncFunction", "Filesystem plugin handlers must be async functions (See #24914)");
|
|
return plugin["handle"](byteArray, fullname);
|
|
}
|
|
}
|
|
// In no plugin handled this file then return the original/unmodified
|
|
// byteArray.
|
|
return byteArray;
|
|
};
|
|
|
|
var FS_preloadFile = async (parent, name, url, canRead, canWrite, dontCreateFile, canOwn, preFinish) => {
|
|
// TODO we should allow people to just pass in a complete filename instead
|
|
// of parent and name being that we just join them anyways
|
|
var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent;
|
|
var dep = getUniqueRunDependency(`cp ${fullname}`);
|
|
// might have several active requests for the same fullname
|
|
addRunDependency(dep);
|
|
try {
|
|
var byteArray = url;
|
|
if (typeof url == "string") {
|
|
byteArray = await asyncLoad(url);
|
|
}
|
|
byteArray = await FS_handledByPreloadPlugin(byteArray, fullname);
|
|
preFinish?.();
|
|
if (!dontCreateFile) {
|
|
FS_createDataFile(parent, name, byteArray, canRead, canWrite, canOwn);
|
|
}
|
|
} finally {
|
|
removeRunDependency(dep);
|
|
}
|
|
};
|
|
|
|
var FS_createPreloadedFile = (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn, preFinish) => {
|
|
FS_preloadFile(parent, name, url, canRead, canWrite, dontCreateFile, canOwn, preFinish).then(onload).catch(onerror);
|
|
};
|
|
|
|
var FS = {
|
|
root: null,
|
|
mounts: [],
|
|
devices: {},
|
|
streams: [],
|
|
nextInode: 1,
|
|
nameTable: null,
|
|
currentPath: "/",
|
|
initialized: false,
|
|
ignorePermissions: true,
|
|
filesystems: null,
|
|
syncFSRequests: 0,
|
|
readFiles: {},
|
|
ErrnoError: class extends Error {
|
|
name="ErrnoError";
|
|
// We set the `name` property to be able to identify `FS.ErrnoError`
|
|
// - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway.
|
|
// - when using PROXYFS, an error can come from an underlying FS
|
|
// as different FS objects have their own FS.ErrnoError each,
|
|
// the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs.
|
|
// we'll use the reliable test `err.name == "ErrnoError"` instead
|
|
constructor(errno) {
|
|
super(runtimeInitialized ? strError(errno) : "");
|
|
this.errno = errno;
|
|
for (var key in ERRNO_CODES) {
|
|
if (ERRNO_CODES[key] === errno) {
|
|
this.code = key;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
},
|
|
FSStream: class {
|
|
shared={};
|
|
get object() {
|
|
return this.node;
|
|
}
|
|
set object(val) {
|
|
this.node = val;
|
|
}
|
|
get isRead() {
|
|
return (this.flags & 2097155) !== 1;
|
|
}
|
|
get isWrite() {
|
|
return (this.flags & 2097155) !== 0;
|
|
}
|
|
get isAppend() {
|
|
return (this.flags & 1024);
|
|
}
|
|
get flags() {
|
|
return this.shared.flags;
|
|
}
|
|
set flags(val) {
|
|
this.shared.flags = val;
|
|
}
|
|
get position() {
|
|
return this.shared.position;
|
|
}
|
|
set position(val) {
|
|
this.shared.position = val;
|
|
}
|
|
},
|
|
FSNode: class {
|
|
node_ops={};
|
|
stream_ops={};
|
|
readMode=292 | 73;
|
|
writeMode=146;
|
|
mounted=null;
|
|
constructor(parent, name, mode, rdev) {
|
|
if (!parent) {
|
|
parent = this;
|
|
}
|
|
this.parent = parent;
|
|
this.mount = parent.mount;
|
|
this.id = FS.nextInode++;
|
|
this.name = name;
|
|
this.mode = mode;
|
|
this.rdev = rdev;
|
|
this.atime = this.mtime = this.ctime = Date.now();
|
|
}
|
|
get read() {
|
|
return (this.mode & this.readMode) === this.readMode;
|
|
}
|
|
set read(val) {
|
|
val ? this.mode |= this.readMode : this.mode &= ~this.readMode;
|
|
}
|
|
get write() {
|
|
return (this.mode & this.writeMode) === this.writeMode;
|
|
}
|
|
set write(val) {
|
|
val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode;
|
|
}
|
|
get isFolder() {
|
|
return FS.isDir(this.mode);
|
|
}
|
|
get isDevice() {
|
|
return FS.isChrdev(this.mode);
|
|
}
|
|
},
|
|
lookupPath(path, opts = {}) {
|
|
if (!path) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
opts.follow_mount ??= true;
|
|
if (!PATH.isAbs(path)) {
|
|
path = FS.cwd() + "/" + path;
|
|
}
|
|
// limit max consecutive symlinks to 40 (SYMLOOP_MAX).
|
|
linkloop: for (var nlinks = 0; nlinks < 40; nlinks++) {
|
|
// split the absolute path
|
|
var parts = path.split("/").filter(p => !!p);
|
|
// start at the root
|
|
var current = FS.root;
|
|
var current_path = "/";
|
|
for (var i = 0; i < parts.length; i++) {
|
|
var islast = (i === parts.length - 1);
|
|
if (islast && opts.parent) {
|
|
// stop resolving
|
|
break;
|
|
}
|
|
if (parts[i] === ".") {
|
|
continue;
|
|
}
|
|
if (parts[i] === "..") {
|
|
current_path = PATH.dirname(current_path);
|
|
if (FS.isRoot(current)) {
|
|
path = current_path + "/" + parts.slice(i + 1).join("/");
|
|
// We're making progress here, don't let many consecutive ..'s
|
|
// lead to ELOOP
|
|
nlinks--;
|
|
continue linkloop;
|
|
} else {
|
|
current = current.parent;
|
|
}
|
|
continue;
|
|
}
|
|
current_path = PATH.join2(current_path, parts[i]);
|
|
try {
|
|
current = FS.lookupNode(current, parts[i]);
|
|
} catch (e) {
|
|
// if noent_okay is true, suppress a ENOENT in the last component
|
|
// and return an object with an undefined node. This is needed for
|
|
// resolving symlinks in the path when creating a file.
|
|
if ((e?.errno === 44) && islast && opts.noent_okay) {
|
|
return {
|
|
path: current_path
|
|
};
|
|
}
|
|
throw e;
|
|
}
|
|
// jump to the mount's root node if this is a mountpoint
|
|
if (FS.isMountpoint(current) && (!islast || opts.follow_mount)) {
|
|
current = current.mounted.root;
|
|
}
|
|
// by default, lookupPath will not follow a symlink if it is the final path component.
|
|
// setting opts.follow = true will override this behavior.
|
|
if (FS.isLink(current.mode) && (!islast || opts.follow)) {
|
|
if (!current.node_ops.readlink) {
|
|
throw new FS.ErrnoError(52);
|
|
}
|
|
var link = current.node_ops.readlink(current);
|
|
if (!PATH.isAbs(link)) {
|
|
link = PATH.dirname(current_path) + "/" + link;
|
|
}
|
|
path = link + "/" + parts.slice(i + 1).join("/");
|
|
continue linkloop;
|
|
}
|
|
}
|
|
return {
|
|
path: current_path,
|
|
node: current
|
|
};
|
|
}
|
|
throw new FS.ErrnoError(32);
|
|
},
|
|
getPath(node) {
|
|
var path;
|
|
while (true) {
|
|
if (FS.isRoot(node)) {
|
|
var mount = node.mount.mountpoint;
|
|
if (!path) return mount;
|
|
return mount[mount.length - 1] !== "/" ? `${mount}/${path}` : mount + path;
|
|
}
|
|
path = path ? `${node.name}/${path}` : node.name;
|
|
node = node.parent;
|
|
}
|
|
},
|
|
hashName(parentid, name) {
|
|
var hash = 0;
|
|
for (var i = 0; i < name.length; i++) {
|
|
hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
|
|
}
|
|
return ((parentid + hash) >>> 0) % FS.nameTable.length;
|
|
},
|
|
hashAddNode(node) {
|
|
var hash = FS.hashName(node.parent.id, node.name);
|
|
node.name_next = FS.nameTable[hash];
|
|
FS.nameTable[hash] = node;
|
|
},
|
|
hashRemoveNode(node) {
|
|
var hash = FS.hashName(node.parent.id, node.name);
|
|
if (FS.nameTable[hash] === node) {
|
|
FS.nameTable[hash] = node.name_next;
|
|
} else {
|
|
var current = FS.nameTable[hash];
|
|
while (current) {
|
|
if (current.name_next === node) {
|
|
current.name_next = node.name_next;
|
|
break;
|
|
}
|
|
current = current.name_next;
|
|
}
|
|
}
|
|
},
|
|
lookupNode(parent, name) {
|
|
var errCode = FS.mayLookup(parent);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
var hash = FS.hashName(parent.id, name);
|
|
for (var node = FS.nameTable[hash]; node; node = node.name_next) {
|
|
var nodeName = node.name;
|
|
if (node.parent.id === parent.id && nodeName === name) {
|
|
return node;
|
|
}
|
|
}
|
|
// if we failed to find it in the cache, call into the VFS
|
|
return FS.lookup(parent, name);
|
|
},
|
|
createNode(parent, name, mode, rdev) {
|
|
assert(typeof parent == "object");
|
|
var node = new FS.FSNode(parent, name, mode, rdev);
|
|
FS.hashAddNode(node);
|
|
return node;
|
|
},
|
|
destroyNode(node) {
|
|
FS.hashRemoveNode(node);
|
|
},
|
|
isRoot(node) {
|
|
return node === node.parent;
|
|
},
|
|
isMountpoint(node) {
|
|
return !!node.mounted;
|
|
},
|
|
isFile(mode) {
|
|
return (mode & 61440) === 32768;
|
|
},
|
|
isDir(mode) {
|
|
return (mode & 61440) === 16384;
|
|
},
|
|
isLink(mode) {
|
|
return (mode & 61440) === 40960;
|
|
},
|
|
isChrdev(mode) {
|
|
return (mode & 61440) === 8192;
|
|
},
|
|
isBlkdev(mode) {
|
|
return (mode & 61440) === 24576;
|
|
},
|
|
isFIFO(mode) {
|
|
return (mode & 61440) === 4096;
|
|
},
|
|
isSocket(mode) {
|
|
return (mode & 49152) === 49152;
|
|
},
|
|
flagsToPermissionString(flag) {
|
|
var perms = [ "r", "w", "rw" ][flag & 3];
|
|
if ((flag & 512)) {
|
|
perms += "w";
|
|
}
|
|
return perms;
|
|
},
|
|
nodePermissions(node, perms) {
|
|
if (FS.ignorePermissions) {
|
|
return 0;
|
|
}
|
|
// return 0 if any user, group or owner bits are set.
|
|
if (perms.includes("r") && !(node.mode & 292)) {
|
|
return 2;
|
|
} else if (perms.includes("w") && !(node.mode & 146)) {
|
|
return 2;
|
|
} else if (perms.includes("x") && !(node.mode & 73)) {
|
|
return 2;
|
|
}
|
|
return 0;
|
|
},
|
|
mayLookup(dir) {
|
|
if (!FS.isDir(dir.mode)) return 54;
|
|
var errCode = FS.nodePermissions(dir, "x");
|
|
if (errCode) return errCode;
|
|
if (!dir.node_ops.lookup) return 2;
|
|
return 0;
|
|
},
|
|
mayCreate(dir, name) {
|
|
if (!FS.isDir(dir.mode)) {
|
|
return 54;
|
|
}
|
|
try {
|
|
var node = FS.lookupNode(dir, name);
|
|
return 20;
|
|
} catch (e) {}
|
|
return FS.nodePermissions(dir, "wx");
|
|
},
|
|
mayDelete(dir, name, isdir) {
|
|
var node;
|
|
try {
|
|
node = FS.lookupNode(dir, name);
|
|
} catch (e) {
|
|
return e.errno;
|
|
}
|
|
var errCode = FS.nodePermissions(dir, "wx");
|
|
if (errCode) {
|
|
return errCode;
|
|
}
|
|
if (isdir) {
|
|
if (!FS.isDir(node.mode)) {
|
|
return 54;
|
|
}
|
|
if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
|
|
return 10;
|
|
}
|
|
} else {
|
|
if (FS.isDir(node.mode)) {
|
|
return 31;
|
|
}
|
|
}
|
|
return 0;
|
|
},
|
|
mayOpen(node, flags) {
|
|
if (!node) {
|
|
return 44;
|
|
}
|
|
if (FS.isLink(node.mode)) {
|
|
return 32;
|
|
} else if (FS.isDir(node.mode)) {
|
|
if (FS.flagsToPermissionString(flags) !== "r" || (flags & (512 | 64))) {
|
|
// TODO: check for O_SEARCH? (== search for dir only)
|
|
return 31;
|
|
}
|
|
}
|
|
return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
|
|
},
|
|
checkOpExists(op, err) {
|
|
if (!op) {
|
|
throw new FS.ErrnoError(err);
|
|
}
|
|
return op;
|
|
},
|
|
MAX_OPEN_FDS: 4096,
|
|
nextfd() {
|
|
for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) {
|
|
if (!FS.streams[fd]) {
|
|
return fd;
|
|
}
|
|
}
|
|
throw new FS.ErrnoError(33);
|
|
},
|
|
getStreamChecked(fd) {
|
|
var stream = FS.getStream(fd);
|
|
if (!stream) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
return stream;
|
|
},
|
|
getStream: fd => FS.streams[fd],
|
|
createStream(stream, fd = -1) {
|
|
assert(fd >= -1);
|
|
// clone it, so we can return an instance of FSStream
|
|
stream = Object.assign(new FS.FSStream, stream);
|
|
if (fd == -1) {
|
|
fd = FS.nextfd();
|
|
}
|
|
stream.fd = fd;
|
|
FS.streams[fd] = stream;
|
|
return stream;
|
|
},
|
|
closeStream(fd) {
|
|
FS.streams[fd] = null;
|
|
},
|
|
dupStream(origStream, fd = -1) {
|
|
var stream = FS.createStream(origStream, fd);
|
|
stream.stream_ops?.dup?.(stream);
|
|
return stream;
|
|
},
|
|
doSetAttr(stream, node, attr) {
|
|
var setattr = stream?.stream_ops.setattr;
|
|
var arg = setattr ? stream : node;
|
|
setattr ??= node.node_ops.setattr;
|
|
FS.checkOpExists(setattr, 63);
|
|
setattr(arg, attr);
|
|
},
|
|
chrdev_stream_ops: {
|
|
open(stream) {
|
|
var device = FS.getDevice(stream.node.rdev);
|
|
// override node's stream ops with the device's
|
|
stream.stream_ops = device.stream_ops;
|
|
// forward the open call
|
|
stream.stream_ops.open?.(stream);
|
|
},
|
|
llseek() {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
},
|
|
major: dev => ((dev) >> 8),
|
|
minor: dev => ((dev) & 255),
|
|
makedev: (ma, mi) => ((ma) << 8 | (mi)),
|
|
registerDevice(dev, ops) {
|
|
FS.devices[dev] = {
|
|
stream_ops: ops
|
|
};
|
|
},
|
|
getDevice: dev => FS.devices[dev],
|
|
getMounts(mount) {
|
|
var mounts = [];
|
|
var check = [ mount ];
|
|
while (check.length) {
|
|
var m = check.pop();
|
|
mounts.push(m);
|
|
check.push(...m.mounts);
|
|
}
|
|
return mounts;
|
|
},
|
|
syncfs(populate, callback) {
|
|
if (typeof populate == "function") {
|
|
callback = populate;
|
|
populate = false;
|
|
}
|
|
FS.syncFSRequests++;
|
|
if (FS.syncFSRequests > 1) {
|
|
err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`);
|
|
}
|
|
var mounts = FS.getMounts(FS.root.mount);
|
|
var completed = 0;
|
|
function doCallback(errCode) {
|
|
assert(FS.syncFSRequests > 0);
|
|
FS.syncFSRequests--;
|
|
return callback(errCode);
|
|
}
|
|
function done(errCode) {
|
|
if (errCode) {
|
|
if (!done.errored) {
|
|
done.errored = true;
|
|
return doCallback(errCode);
|
|
}
|
|
return;
|
|
}
|
|
if (++completed >= mounts.length) {
|
|
doCallback(null);
|
|
}
|
|
}
|
|
// sync all mounts
|
|
for (var mount of mounts) {
|
|
if (mount.type.syncfs) {
|
|
mount.type.syncfs(mount, populate, done);
|
|
} else {
|
|
done(null);
|
|
}
|
|
}
|
|
},
|
|
mount(type, opts, mountpoint) {
|
|
if (typeof type == "string") {
|
|
// The filesystem was not included, and instead we have an error
|
|
// message stored in the variable.
|
|
throw type;
|
|
}
|
|
var root = mountpoint === "/";
|
|
var pseudo = !mountpoint;
|
|
var node;
|
|
if (root && FS.root) {
|
|
throw new FS.ErrnoError(10);
|
|
} else if (!root && !pseudo) {
|
|
var lookup = FS.lookupPath(mountpoint, {
|
|
follow_mount: false
|
|
});
|
|
mountpoint = lookup.path;
|
|
// use the absolute path
|
|
node = lookup.node;
|
|
if (FS.isMountpoint(node)) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
if (!FS.isDir(node.mode)) {
|
|
throw new FS.ErrnoError(54);
|
|
}
|
|
}
|
|
var mount = {
|
|
type,
|
|
opts,
|
|
mountpoint,
|
|
mounts: []
|
|
};
|
|
// create a root node for the fs
|
|
var mountRoot = type.mount(mount);
|
|
mountRoot.mount = mount;
|
|
mount.root = mountRoot;
|
|
if (root) {
|
|
FS.root = mountRoot;
|
|
} else if (node) {
|
|
// set as a mountpoint
|
|
node.mounted = mount;
|
|
// add the new mount to the current mount's children
|
|
if (node.mount) {
|
|
node.mount.mounts.push(mount);
|
|
}
|
|
}
|
|
return mountRoot;
|
|
},
|
|
unmount(mountpoint) {
|
|
var lookup = FS.lookupPath(mountpoint, {
|
|
follow_mount: false
|
|
});
|
|
if (!FS.isMountpoint(lookup.node)) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
// destroy the nodes for this mount, and all its child mounts
|
|
var node = lookup.node;
|
|
var mount = node.mounted;
|
|
var mounts = FS.getMounts(mount);
|
|
for (var [hash, current] of Object.entries(FS.nameTable)) {
|
|
while (current) {
|
|
var next = current.name_next;
|
|
if (mounts.includes(current.mount)) {
|
|
FS.destroyNode(current);
|
|
}
|
|
current = next;
|
|
}
|
|
}
|
|
// no longer a mountpoint
|
|
node.mounted = null;
|
|
// remove this mount from the child mounts
|
|
var idx = node.mount.mounts.indexOf(mount);
|
|
assert(idx !== -1);
|
|
node.mount.mounts.splice(idx, 1);
|
|
},
|
|
lookup(parent, name) {
|
|
return parent.node_ops.lookup(parent, name);
|
|
},
|
|
mknod(path, mode, dev) {
|
|
var lookup = FS.lookupPath(path, {
|
|
parent: true
|
|
});
|
|
var parent = lookup.node;
|
|
var name = PATH.basename(path);
|
|
if (!name) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (name === "." || name === "..") {
|
|
throw new FS.ErrnoError(20);
|
|
}
|
|
var errCode = FS.mayCreate(parent, name);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.mknod) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
return parent.node_ops.mknod(parent, name, mode, dev);
|
|
},
|
|
statfs(path) {
|
|
return FS.statfsNode(FS.lookupPath(path, {
|
|
follow: true
|
|
}).node);
|
|
},
|
|
statfsStream(stream) {
|
|
// We keep a separate statfsStream function because noderawfs overrides
|
|
// it. In noderawfs, stream.node is sometimes null. Instead, we need to
|
|
// look at stream.path.
|
|
return FS.statfsNode(stream.node);
|
|
},
|
|
statfsNode(node) {
|
|
// NOTE: None of the defaults here are true. We're just returning safe and
|
|
// sane values. Currently nodefs and rawfs replace these defaults,
|
|
// other file systems leave them alone.
|
|
var rtn = {
|
|
bsize: 4096,
|
|
frsize: 4096,
|
|
blocks: 1e6,
|
|
bfree: 5e5,
|
|
bavail: 5e5,
|
|
files: FS.nextInode,
|
|
ffree: FS.nextInode - 1,
|
|
fsid: 42,
|
|
flags: 2,
|
|
namelen: 255
|
|
};
|
|
if (node.node_ops.statfs) {
|
|
Object.assign(rtn, node.node_ops.statfs(node.mount.opts.root));
|
|
}
|
|
return rtn;
|
|
},
|
|
create(path, mode = 438) {
|
|
mode &= 4095;
|
|
mode |= 32768;
|
|
return FS.mknod(path, mode, 0);
|
|
},
|
|
mkdir(path, mode = 511) {
|
|
mode &= 511 | 512;
|
|
mode |= 16384;
|
|
return FS.mknod(path, mode, 0);
|
|
},
|
|
mkdirTree(path, mode) {
|
|
var dirs = path.split("/");
|
|
var d = "";
|
|
for (var dir of dirs) {
|
|
if (!dir) continue;
|
|
if (d || PATH.isAbs(path)) d += "/";
|
|
d += dir;
|
|
try {
|
|
FS.mkdir(d, mode);
|
|
} catch (e) {
|
|
if (e.errno != 20) throw e;
|
|
}
|
|
}
|
|
},
|
|
mkdev(path, mode, dev) {
|
|
if (typeof dev == "undefined") {
|
|
dev = mode;
|
|
mode = 438;
|
|
}
|
|
mode |= 8192;
|
|
return FS.mknod(path, mode, dev);
|
|
},
|
|
symlink(oldpath, newpath) {
|
|
if (!PATH_FS.resolve(oldpath)) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
var lookup = FS.lookupPath(newpath, {
|
|
parent: true
|
|
});
|
|
var parent = lookup.node;
|
|
if (!parent) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
var newname = PATH.basename(newpath);
|
|
var errCode = FS.mayCreate(parent, newname);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.symlink) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
return parent.node_ops.symlink(parent, newname, oldpath);
|
|
},
|
|
rename(old_path, new_path) {
|
|
var old_dirname = PATH.dirname(old_path);
|
|
var new_dirname = PATH.dirname(new_path);
|
|
var old_name = PATH.basename(old_path);
|
|
var new_name = PATH.basename(new_path);
|
|
// parents must exist
|
|
var lookup, old_dir, new_dir;
|
|
// let the errors from non existent directories percolate up
|
|
lookup = FS.lookupPath(old_path, {
|
|
parent: true
|
|
});
|
|
old_dir = lookup.node;
|
|
lookup = FS.lookupPath(new_path, {
|
|
parent: true
|
|
});
|
|
new_dir = lookup.node;
|
|
if (!old_dir || !new_dir) throw new FS.ErrnoError(44);
|
|
// need to be part of the same mount
|
|
if (old_dir.mount !== new_dir.mount) {
|
|
throw new FS.ErrnoError(75);
|
|
}
|
|
// source must exist
|
|
var old_node = FS.lookupNode(old_dir, old_name);
|
|
// old path should not be an ancestor of the new path
|
|
var relative = PATH_FS.relative(old_path, new_dirname);
|
|
if (relative.charAt(0) !== ".") {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
// new path should not be an ancestor of the old path
|
|
relative = PATH_FS.relative(new_path, old_dirname);
|
|
if (relative.charAt(0) !== ".") {
|
|
throw new FS.ErrnoError(55);
|
|
}
|
|
// see if the new path already exists
|
|
var new_node;
|
|
try {
|
|
new_node = FS.lookupNode(new_dir, new_name);
|
|
} catch (e) {}
|
|
// early out if nothing needs to change
|
|
if (old_node === new_node) {
|
|
return;
|
|
}
|
|
// we'll need to delete the old entry
|
|
var isdir = FS.isDir(old_node.mode);
|
|
var errCode = FS.mayDelete(old_dir, old_name, isdir);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
// need delete permissions if we'll be overwriting.
|
|
// need create permissions if new doesn't already exist.
|
|
errCode = new_node ? FS.mayDelete(new_dir, new_name, isdir) : FS.mayCreate(new_dir, new_name);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!old_dir.node_ops.rename) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
// if we are going to change the parent, check write permissions
|
|
if (new_dir !== old_dir) {
|
|
errCode = FS.nodePermissions(old_dir, "w");
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
}
|
|
// remove the node from the lookup hash
|
|
FS.hashRemoveNode(old_node);
|
|
// do the underlying fs rename
|
|
try {
|
|
old_dir.node_ops.rename(old_node, new_dir, new_name);
|
|
// update old node (we do this here to avoid each backend
|
|
// needing to)
|
|
old_node.parent = new_dir;
|
|
} catch (e) {
|
|
throw e;
|
|
} finally {
|
|
// add the node back to the hash (in case node_ops.rename
|
|
// changed its name)
|
|
FS.hashAddNode(old_node);
|
|
}
|
|
},
|
|
rmdir(path) {
|
|
var lookup = FS.lookupPath(path, {
|
|
parent: true
|
|
});
|
|
var parent = lookup.node;
|
|
var name = PATH.basename(path);
|
|
var node = FS.lookupNode(parent, name);
|
|
var errCode = FS.mayDelete(parent, name, true);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.rmdir) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
if (FS.isMountpoint(node)) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
parent.node_ops.rmdir(parent, name);
|
|
FS.destroyNode(node);
|
|
},
|
|
readdir(path) {
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: true
|
|
});
|
|
var node = lookup.node;
|
|
var readdir = FS.checkOpExists(node.node_ops.readdir, 54);
|
|
return readdir(node);
|
|
},
|
|
unlink(path) {
|
|
var lookup = FS.lookupPath(path, {
|
|
parent: true
|
|
});
|
|
var parent = lookup.node;
|
|
if (!parent) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
var name = PATH.basename(path);
|
|
var node = FS.lookupNode(parent, name);
|
|
var errCode = FS.mayDelete(parent, name, false);
|
|
if (errCode) {
|
|
// According to POSIX, we should map EISDIR to EPERM, but
|
|
// we instead do what Linux does (and we must, as we use
|
|
// the musl linux libc).
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
if (!parent.node_ops.unlink) {
|
|
throw new FS.ErrnoError(63);
|
|
}
|
|
if (FS.isMountpoint(node)) {
|
|
throw new FS.ErrnoError(10);
|
|
}
|
|
parent.node_ops.unlink(parent, name);
|
|
FS.destroyNode(node);
|
|
},
|
|
readlink(path) {
|
|
var lookup = FS.lookupPath(path);
|
|
var link = lookup.node;
|
|
if (!link) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
if (!link.node_ops.readlink) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return link.node_ops.readlink(link);
|
|
},
|
|
stat(path, dontFollow) {
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: !dontFollow
|
|
});
|
|
var node = lookup.node;
|
|
var getattr = FS.checkOpExists(node.node_ops.getattr, 63);
|
|
return getattr(node);
|
|
},
|
|
fstat(fd) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
var node = stream.node;
|
|
var getattr = stream.stream_ops.getattr;
|
|
var arg = getattr ? stream : node;
|
|
getattr ??= node.node_ops.getattr;
|
|
FS.checkOpExists(getattr, 63);
|
|
return getattr(arg);
|
|
},
|
|
lstat(path) {
|
|
return FS.stat(path, true);
|
|
},
|
|
doChmod(stream, node, mode, dontFollow) {
|
|
FS.doSetAttr(stream, node, {
|
|
mode: (mode & 4095) | (node.mode & ~4095),
|
|
ctime: Date.now(),
|
|
dontFollow
|
|
});
|
|
},
|
|
chmod(path, mode, dontFollow) {
|
|
var node;
|
|
if (typeof path == "string") {
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: !dontFollow
|
|
});
|
|
node = lookup.node;
|
|
} else {
|
|
node = path;
|
|
}
|
|
FS.doChmod(null, node, mode, dontFollow);
|
|
},
|
|
lchmod(path, mode) {
|
|
FS.chmod(path, mode, true);
|
|
},
|
|
fchmod(fd, mode) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
FS.doChmod(stream, stream.node, mode, false);
|
|
},
|
|
doChown(stream, node, dontFollow) {
|
|
FS.doSetAttr(stream, node, {
|
|
timestamp: Date.now(),
|
|
dontFollow
|
|
});
|
|
},
|
|
chown(path, uid, gid, dontFollow) {
|
|
var node;
|
|
if (typeof path == "string") {
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: !dontFollow
|
|
});
|
|
node = lookup.node;
|
|
} else {
|
|
node = path;
|
|
}
|
|
FS.doChown(null, node, dontFollow);
|
|
},
|
|
lchown(path, uid, gid) {
|
|
FS.chown(path, uid, gid, true);
|
|
},
|
|
fchown(fd, uid, gid) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
FS.doChown(stream, stream.node, false);
|
|
},
|
|
doTruncate(stream, node, len) {
|
|
if (FS.isDir(node.mode)) {
|
|
throw new FS.ErrnoError(31);
|
|
}
|
|
if (!FS.isFile(node.mode)) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
var errCode = FS.nodePermissions(node, "w");
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
FS.doSetAttr(stream, node, {
|
|
size: len,
|
|
timestamp: Date.now()
|
|
});
|
|
},
|
|
truncate(path, len) {
|
|
if (len < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
var node;
|
|
if (typeof path == "string") {
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: true
|
|
});
|
|
node = lookup.node;
|
|
} else {
|
|
node = path;
|
|
}
|
|
FS.doTruncate(null, node, len);
|
|
},
|
|
ftruncate(fd, len) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
if (len < 0 || (stream.flags & 2097155) === 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
FS.doTruncate(stream, stream.node, len);
|
|
},
|
|
utime(path, atime, mtime) {
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: true
|
|
});
|
|
var node = lookup.node;
|
|
var setattr = FS.checkOpExists(node.node_ops.setattr, 63);
|
|
setattr(node, {
|
|
atime,
|
|
mtime
|
|
});
|
|
},
|
|
open(path, flags, mode = 438) {
|
|
if (path === "") {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
flags = typeof flags == "string" ? FS_modeStringToFlags(flags) : flags;
|
|
if ((flags & 64)) {
|
|
mode = (mode & 4095) | 32768;
|
|
} else {
|
|
mode = 0;
|
|
}
|
|
var node;
|
|
var isDirPath;
|
|
if (typeof path == "object") {
|
|
node = path;
|
|
} else {
|
|
isDirPath = path.endsWith("/");
|
|
// noent_okay makes it so that if the final component of the path
|
|
// doesn't exist, lookupPath returns `node: undefined`. `path` will be
|
|
// updated to point to the target of all symlinks.
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: !(flags & 131072),
|
|
noent_okay: true
|
|
});
|
|
node = lookup.node;
|
|
path = lookup.path;
|
|
}
|
|
// perhaps we need to create the node
|
|
var created = false;
|
|
if ((flags & 64)) {
|
|
if (node) {
|
|
// if O_CREAT and O_EXCL are set, error out if the node already exists
|
|
if ((flags & 128)) {
|
|
throw new FS.ErrnoError(20);
|
|
}
|
|
} else if (isDirPath) {
|
|
throw new FS.ErrnoError(31);
|
|
} else {
|
|
// node doesn't exist, try to create it
|
|
// Ignore the permission bits here to ensure we can `open` this new
|
|
// file below. We use chmod below the apply the permissions once the
|
|
// file is open.
|
|
node = FS.mknod(path, mode | 511, 0);
|
|
created = true;
|
|
}
|
|
}
|
|
if (!node) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
// can't truncate a device
|
|
if (FS.isChrdev(node.mode)) {
|
|
flags &= ~512;
|
|
}
|
|
// if asked only for a directory, then this must be one
|
|
if ((flags & 65536) && !FS.isDir(node.mode)) {
|
|
throw new FS.ErrnoError(54);
|
|
}
|
|
// check permissions, if this is not a file we just created now (it is ok to
|
|
// create and write to a file with read-only permissions; it is read-only
|
|
// for later use)
|
|
if (!created) {
|
|
var errCode = FS.mayOpen(node, flags);
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
}
|
|
// do truncation if necessary
|
|
if ((flags & 512) && !created) {
|
|
FS.truncate(node, 0);
|
|
}
|
|
// we've already handled these, don't pass down to the underlying vfs
|
|
flags &= ~(128 | 512 | 131072);
|
|
// register the stream with the filesystem
|
|
var stream = FS.createStream({
|
|
node,
|
|
path: FS.getPath(node),
|
|
// we want the absolute path to the node
|
|
flags,
|
|
seekable: true,
|
|
position: 0,
|
|
stream_ops: node.stream_ops,
|
|
// used by the file family libc calls (fopen, fwrite, ferror, etc.)
|
|
ungotten: [],
|
|
error: false
|
|
});
|
|
// call the new stream's open function
|
|
if (stream.stream_ops.open) {
|
|
stream.stream_ops.open(stream);
|
|
}
|
|
if (created) {
|
|
FS.chmod(node, mode & 511);
|
|
}
|
|
if (Module["logReadFiles"] && !(flags & 1)) {
|
|
if (!(path in FS.readFiles)) {
|
|
FS.readFiles[path] = 1;
|
|
}
|
|
}
|
|
return stream;
|
|
},
|
|
close(stream) {
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (stream.getdents) stream.getdents = null;
|
|
// free readdir state
|
|
try {
|
|
if (stream.stream_ops.close) {
|
|
stream.stream_ops.close(stream);
|
|
}
|
|
} catch (e) {
|
|
throw e;
|
|
} finally {
|
|
FS.closeStream(stream.fd);
|
|
}
|
|
stream.fd = null;
|
|
},
|
|
isClosed(stream) {
|
|
return stream.fd === null;
|
|
},
|
|
llseek(stream, offset, whence) {
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (!stream.seekable || !stream.stream_ops.llseek) {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
if (whence != 0 && whence != 1 && whence != 2) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
stream.position = stream.stream_ops.llseek(stream, offset, whence);
|
|
stream.ungotten = [];
|
|
return stream.position;
|
|
},
|
|
read(stream, buffer, offset, length, position) {
|
|
assert(offset >= 0);
|
|
if (length < 0 || position < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if ((stream.flags & 2097155) === 1) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (FS.isDir(stream.node.mode)) {
|
|
throw new FS.ErrnoError(31);
|
|
}
|
|
if (!stream.stream_ops.read) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
var seeking = typeof position != "undefined";
|
|
if (!seeking) {
|
|
position = stream.position;
|
|
} else if (!stream.seekable) {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position);
|
|
if (!seeking) stream.position += bytesRead;
|
|
return bytesRead;
|
|
},
|
|
write(stream, buffer, offset, length, position, canOwn) {
|
|
assert(offset >= 0);
|
|
if (length < 0 || position < 0) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (FS.isClosed(stream)) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if ((stream.flags & 2097155) === 0) {
|
|
throw new FS.ErrnoError(8);
|
|
}
|
|
if (FS.isDir(stream.node.mode)) {
|
|
throw new FS.ErrnoError(31);
|
|
}
|
|
if (!stream.stream_ops.write) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
if (stream.seekable && stream.flags & 1024) {
|
|
// seek to the end before writing in append mode
|
|
FS.llseek(stream, 0, 2);
|
|
}
|
|
var seeking = typeof position != "undefined";
|
|
if (!seeking) {
|
|
position = stream.position;
|
|
} else if (!stream.seekable) {
|
|
throw new FS.ErrnoError(70);
|
|
}
|
|
var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn);
|
|
if (!seeking) stream.position += bytesWritten;
|
|
return bytesWritten;
|
|
},
|
|
mmap(stream, length, position, prot, flags) {
|
|
// User requests writing to file (prot & PROT_WRITE != 0).
|
|
// Checking if we have permissions to write to the file unless
|
|
// MAP_PRIVATE flag is set. According to POSIX spec it is possible
|
|
// to write to file opened in read-only mode with MAP_PRIVATE flag,
|
|
// as all modifications will be visible only in the memory of
|
|
// the current process.
|
|
if ((prot & 2) !== 0 && (flags & 2) === 0 && (stream.flags & 2097155) !== 2) {
|
|
throw new FS.ErrnoError(2);
|
|
}
|
|
if ((stream.flags & 2097155) === 1) {
|
|
throw new FS.ErrnoError(2);
|
|
}
|
|
if (!stream.stream_ops.mmap) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
if (!length) {
|
|
throw new FS.ErrnoError(28);
|
|
}
|
|
return stream.stream_ops.mmap(stream, length, position, prot, flags);
|
|
},
|
|
msync(stream, buffer, offset, length, mmapFlags) {
|
|
assert(offset >= 0);
|
|
if (!stream.stream_ops.msync) {
|
|
return 0;
|
|
}
|
|
return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags);
|
|
},
|
|
ioctl(stream, cmd, arg) {
|
|
if (!stream.stream_ops.ioctl) {
|
|
throw new FS.ErrnoError(59);
|
|
}
|
|
return stream.stream_ops.ioctl(stream, cmd, arg);
|
|
},
|
|
readFile(path, opts = {}) {
|
|
opts.flags = opts.flags || 0;
|
|
opts.encoding = opts.encoding || "binary";
|
|
if (opts.encoding !== "utf8" && opts.encoding !== "binary") {
|
|
abort(`Invalid encoding type "${opts.encoding}"`);
|
|
}
|
|
var stream = FS.open(path, opts.flags);
|
|
var stat = FS.stat(path);
|
|
var length = stat.size;
|
|
var buf = new Uint8Array(length);
|
|
FS.read(stream, buf, 0, length, 0);
|
|
if (opts.encoding === "utf8") {
|
|
buf = UTF8ArrayToString(buf);
|
|
}
|
|
FS.close(stream);
|
|
return buf;
|
|
},
|
|
writeFile(path, data, opts = {}) {
|
|
opts.flags = opts.flags || 577;
|
|
var stream = FS.open(path, opts.flags, opts.mode);
|
|
if (typeof data == "string") {
|
|
data = new Uint8Array(intArrayFromString(data, true));
|
|
}
|
|
if (ArrayBuffer.isView(data)) {
|
|
FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn);
|
|
} else {
|
|
abort("Unsupported data type");
|
|
}
|
|
FS.close(stream);
|
|
},
|
|
cwd: () => FS.currentPath,
|
|
chdir(path) {
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: true
|
|
});
|
|
if (lookup.node === null) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
if (!FS.isDir(lookup.node.mode)) {
|
|
throw new FS.ErrnoError(54);
|
|
}
|
|
var errCode = FS.nodePermissions(lookup.node, "x");
|
|
if (errCode) {
|
|
throw new FS.ErrnoError(errCode);
|
|
}
|
|
FS.currentPath = lookup.path;
|
|
},
|
|
createDefaultDirectories() {
|
|
FS.mkdir("/tmp");
|
|
FS.mkdir("/home");
|
|
FS.mkdir("/home/web_user");
|
|
},
|
|
createDefaultDevices() {
|
|
// create /dev
|
|
FS.mkdir("/dev");
|
|
// setup /dev/null
|
|
FS.registerDevice(FS.makedev(1, 3), {
|
|
read: () => 0,
|
|
write: (stream, buffer, offset, length, pos) => length,
|
|
llseek: () => 0
|
|
});
|
|
FS.mkdev("/dev/null", FS.makedev(1, 3));
|
|
// setup /dev/tty and /dev/tty1
|
|
// stderr needs to print output using err() rather than out()
|
|
// so we register a second tty just for it.
|
|
TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
|
|
TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
|
|
FS.mkdev("/dev/tty", FS.makedev(5, 0));
|
|
FS.mkdev("/dev/tty1", FS.makedev(6, 0));
|
|
// setup /dev/[u]random
|
|
// use a buffer to avoid overhead of individual crypto calls per byte
|
|
var randomBuffer = new Uint8Array(1024), randomLeft = 0;
|
|
var randomByte = () => {
|
|
if (randomLeft === 0) {
|
|
randomFill(randomBuffer);
|
|
randomLeft = randomBuffer.byteLength;
|
|
}
|
|
return randomBuffer[--randomLeft];
|
|
};
|
|
FS.createDevice("/dev", "random", randomByte);
|
|
FS.createDevice("/dev", "urandom", randomByte);
|
|
// we're not going to emulate the actual shm device,
|
|
// just create the tmp dirs that reside in it commonly
|
|
FS.mkdir("/dev/shm");
|
|
FS.mkdir("/dev/shm/tmp");
|
|
},
|
|
createSpecialDirectories() {
|
|
// create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the
|
|
// name of the stream for fd 6 (see test_unistd_ttyname)
|
|
FS.mkdir("/proc");
|
|
var proc_self = FS.mkdir("/proc/self");
|
|
FS.mkdir("/proc/self/fd");
|
|
FS.mount({
|
|
mount() {
|
|
var node = FS.createNode(proc_self, "fd", 16895, 73);
|
|
node.stream_ops = {
|
|
llseek: MEMFS.stream_ops.llseek
|
|
};
|
|
node.node_ops = {
|
|
lookup(parent, name) {
|
|
var fd = +name;
|
|
var stream = FS.getStreamChecked(fd);
|
|
var ret = {
|
|
parent: null,
|
|
mount: {
|
|
mountpoint: "fake"
|
|
},
|
|
node_ops: {
|
|
readlink: () => stream.path
|
|
},
|
|
id: fd + 1
|
|
};
|
|
ret.parent = ret;
|
|
// make it look like a simple root node
|
|
return ret;
|
|
},
|
|
readdir() {
|
|
return Array.from(FS.streams.entries()).filter(([k, v]) => v).map(([k, v]) => k.toString());
|
|
}
|
|
};
|
|
return node;
|
|
}
|
|
}, {}, "/proc/self/fd");
|
|
},
|
|
createStandardStreams(input, output, error) {
|
|
// TODO deprecate the old functionality of a single
|
|
// input / output callback and that utilizes FS.createDevice
|
|
// and instead require a unique set of stream ops
|
|
// by default, we symlink the standard streams to the
|
|
// default tty devices. however, if the standard streams
|
|
// have been overwritten we create a unique device for
|
|
// them instead.
|
|
if (input) {
|
|
FS.createDevice("/dev", "stdin", input);
|
|
} else {
|
|
FS.symlink("/dev/tty", "/dev/stdin");
|
|
}
|
|
if (output) {
|
|
FS.createDevice("/dev", "stdout", null, output);
|
|
} else {
|
|
FS.symlink("/dev/tty", "/dev/stdout");
|
|
}
|
|
if (error) {
|
|
FS.createDevice("/dev", "stderr", null, error);
|
|
} else {
|
|
FS.symlink("/dev/tty1", "/dev/stderr");
|
|
}
|
|
// open default streams for the stdin, stdout and stderr devices
|
|
var stdin = FS.open("/dev/stdin", 0);
|
|
var stdout = FS.open("/dev/stdout", 1);
|
|
var stderr = FS.open("/dev/stderr", 1);
|
|
assert(stdin.fd === 0, `invalid handle for stdin (${stdin.fd})`);
|
|
assert(stdout.fd === 1, `invalid handle for stdout (${stdout.fd})`);
|
|
assert(stderr.fd === 2, `invalid handle for stderr (${stderr.fd})`);
|
|
},
|
|
staticInit() {
|
|
FS.nameTable = new Array(4096);
|
|
FS.mount(MEMFS, {}, "/");
|
|
FS.createDefaultDirectories();
|
|
FS.createDefaultDevices();
|
|
FS.createSpecialDirectories();
|
|
FS.filesystems = {
|
|
"MEMFS": MEMFS,
|
|
"IDBFS": IDBFS
|
|
};
|
|
},
|
|
init(input, output, error) {
|
|
assert(!FS.initialized, "FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)");
|
|
FS.initialized = true;
|
|
// Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here
|
|
input ??= Module["stdin"];
|
|
output ??= Module["stdout"];
|
|
error ??= Module["stderr"];
|
|
FS.createStandardStreams(input, output, error);
|
|
},
|
|
quit() {
|
|
FS.initialized = false;
|
|
// force-flush all streams, so we get musl std streams printed out
|
|
_fflush(0);
|
|
// close all of our streams
|
|
for (var stream of FS.streams) {
|
|
if (stream) {
|
|
FS.close(stream);
|
|
}
|
|
}
|
|
},
|
|
findObject(path, dontResolveLastLink) {
|
|
var ret = FS.analyzePath(path, dontResolveLastLink);
|
|
if (!ret.exists) {
|
|
return null;
|
|
}
|
|
return ret.object;
|
|
},
|
|
analyzePath(path, dontResolveLastLink) {
|
|
// operate from within the context of the symlink's target
|
|
try {
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: !dontResolveLastLink
|
|
});
|
|
path = lookup.path;
|
|
} catch (e) {}
|
|
var ret = {
|
|
isRoot: false,
|
|
exists: false,
|
|
error: 0,
|
|
name: null,
|
|
path: null,
|
|
object: null,
|
|
parentExists: false,
|
|
parentPath: null,
|
|
parentObject: null
|
|
};
|
|
try {
|
|
var lookup = FS.lookupPath(path, {
|
|
parent: true
|
|
});
|
|
ret.parentExists = true;
|
|
ret.parentPath = lookup.path;
|
|
ret.parentObject = lookup.node;
|
|
ret.name = PATH.basename(path);
|
|
lookup = FS.lookupPath(path, {
|
|
follow: !dontResolveLastLink
|
|
});
|
|
ret.exists = true;
|
|
ret.path = lookup.path;
|
|
ret.object = lookup.node;
|
|
ret.name = lookup.node.name;
|
|
ret.isRoot = lookup.path === "/";
|
|
} catch (e) {
|
|
ret.error = e.errno;
|
|
}
|
|
return ret;
|
|
},
|
|
createPath(parent, path, canRead, canWrite) {
|
|
parent = typeof parent == "string" ? parent : FS.getPath(parent);
|
|
var parts = path.split("/").reverse();
|
|
while (parts.length) {
|
|
var part = parts.pop();
|
|
if (!part) continue;
|
|
var current = PATH.join2(parent, part);
|
|
try {
|
|
FS.mkdir(current);
|
|
} catch (e) {
|
|
if (e.errno != 20) throw e;
|
|
}
|
|
parent = current;
|
|
}
|
|
return current;
|
|
},
|
|
createFile(parent, name, properties, canRead, canWrite) {
|
|
var path = PATH.join2(typeof parent == "string" ? parent : FS.getPath(parent), name);
|
|
var mode = FS_getMode(canRead, canWrite);
|
|
return FS.create(path, mode);
|
|
},
|
|
createDataFile(parent, name, data, canRead, canWrite, canOwn) {
|
|
var path = name;
|
|
if (parent) {
|
|
parent = typeof parent == "string" ? parent : FS.getPath(parent);
|
|
path = name ? PATH.join2(parent, name) : parent;
|
|
}
|
|
var mode = FS_getMode(canRead, canWrite);
|
|
var node = FS.create(path, mode);
|
|
if (data) {
|
|
if (typeof data == "string") {
|
|
var arr = new Array(data.length);
|
|
for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i);
|
|
data = arr;
|
|
}
|
|
// make sure we can write to the file
|
|
FS.chmod(node, mode | 146);
|
|
var stream = FS.open(node, 577);
|
|
FS.write(stream, data, 0, data.length, 0, canOwn);
|
|
FS.close(stream);
|
|
FS.chmod(node, mode);
|
|
}
|
|
},
|
|
createDevice(parent, name, input, output) {
|
|
var path = PATH.join2(typeof parent == "string" ? parent : FS.getPath(parent), name);
|
|
var mode = FS_getMode(!!input, !!output);
|
|
FS.createDevice.major ??= 64;
|
|
var dev = FS.makedev(FS.createDevice.major++, 0);
|
|
// Create a fake device that a set of stream ops to emulate
|
|
// the old behavior.
|
|
FS.registerDevice(dev, {
|
|
open(stream) {
|
|
stream.seekable = false;
|
|
},
|
|
close(stream) {
|
|
// flush any pending line data
|
|
if (output?.buffer?.length) {
|
|
output(10);
|
|
}
|
|
},
|
|
read(stream, buffer, offset, length, pos) {
|
|
var bytesRead = 0;
|
|
for (var i = 0; i < length; i++) {
|
|
var result;
|
|
try {
|
|
result = input();
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
if (result === undefined && bytesRead === 0) {
|
|
throw new FS.ErrnoError(6);
|
|
}
|
|
if (result === null || result === undefined) break;
|
|
bytesRead++;
|
|
buffer[offset + i] = result;
|
|
}
|
|
if (bytesRead) {
|
|
stream.node.atime = Date.now();
|
|
}
|
|
return bytesRead;
|
|
},
|
|
write(stream, buffer, offset, length, pos) {
|
|
for (var i = 0; i < length; i++) {
|
|
try {
|
|
output(buffer[offset + i]);
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
}
|
|
if (length) {
|
|
stream.node.mtime = stream.node.ctime = Date.now();
|
|
}
|
|
return i;
|
|
}
|
|
});
|
|
return FS.mkdev(path, mode, dev);
|
|
},
|
|
forceLoadFile(obj) {
|
|
if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true;
|
|
if (globalThis.XMLHttpRequest) {
|
|
abort("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.");
|
|
} else {
|
|
// Command-line.
|
|
try {
|
|
obj.contents = readBinary(obj.url);
|
|
} catch (e) {
|
|
throw new FS.ErrnoError(29);
|
|
}
|
|
}
|
|
},
|
|
createLazyFile(parent, name, url, canRead, canWrite) {
|
|
// Lazy chunked Uint8Array (implements get and length from Uint8Array).
|
|
// Actual getting is abstracted away for eventual reuse.
|
|
class LazyUint8Array {
|
|
lengthKnown=false;
|
|
chunks=[];
|
|
// Loaded chunks. Index is the chunk number
|
|
get(idx) {
|
|
if (idx > this.length - 1 || idx < 0) {
|
|
return undefined;
|
|
}
|
|
var chunkOffset = idx % this.chunkSize;
|
|
var chunkNum = (idx / this.chunkSize) | 0;
|
|
return this.getter(chunkNum)[chunkOffset];
|
|
}
|
|
setDataGetter(getter) {
|
|
this.getter = getter;
|
|
}
|
|
cacheLength() {
|
|
// Find length
|
|
var xhr = new XMLHttpRequest;
|
|
xhr.open("HEAD", url, false);
|
|
xhr.send(null);
|
|
if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) abort("Couldn't load " + url + ". Status: " + xhr.status);
|
|
var datalength = Number(xhr.getResponseHeader("Content-length"));
|
|
var header;
|
|
var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes";
|
|
var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip";
|
|
var chunkSize = 1024 * 1024;
|
|
// Chunk size in bytes
|
|
if (!hasByteServing) chunkSize = datalength;
|
|
// Function to get a range from the remote URL.
|
|
var doXHR = (from, to) => {
|
|
if (from > to) abort("invalid range (" + from + ", " + to + ") or no bytes requested!");
|
|
if (to > datalength - 1) abort("only " + datalength + " bytes available! programmer error!");
|
|
// TODO: Use mozResponseArrayBuffer, responseStream, etc. if available.
|
|
var xhr = new XMLHttpRequest;
|
|
xhr.open("GET", url, false);
|
|
if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
|
|
// Some hints to the browser that we want binary data.
|
|
xhr.responseType = "arraybuffer";
|
|
if (xhr.overrideMimeType) {
|
|
xhr.overrideMimeType("text/plain; charset=x-user-defined");
|
|
}
|
|
xhr.send(null);
|
|
if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) abort("Couldn't load " + url + ". Status: " + xhr.status);
|
|
if (xhr.response !== undefined) {
|
|
return new Uint8Array(/** @type{Array<number>} */ (xhr.response || []));
|
|
}
|
|
return intArrayFromString(xhr.responseText || "", true);
|
|
};
|
|
var lazyArray = this;
|
|
lazyArray.setDataGetter(chunkNum => {
|
|
var start = chunkNum * chunkSize;
|
|
var end = (chunkNum + 1) * chunkSize - 1;
|
|
// including this byte
|
|
end = Math.min(end, datalength - 1);
|
|
// if datalength-1 is selected, this is the last block
|
|
if (typeof lazyArray.chunks[chunkNum] == "undefined") {
|
|
lazyArray.chunks[chunkNum] = doXHR(start, end);
|
|
}
|
|
if (typeof lazyArray.chunks[chunkNum] == "undefined") abort("doXHR failed!");
|
|
return lazyArray.chunks[chunkNum];
|
|
});
|
|
if (usesGzip || !datalength) {
|
|
// if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length
|
|
chunkSize = datalength = 1;
|
|
// this will force getter(0)/doXHR do download the whole file
|
|
datalength = this.getter(0).length;
|
|
chunkSize = datalength;
|
|
out("LazyFiles on gzip forces download of the whole file when length is accessed");
|
|
}
|
|
this._length = datalength;
|
|
this._chunkSize = chunkSize;
|
|
this.lengthKnown = true;
|
|
}
|
|
get length() {
|
|
if (!this.lengthKnown) {
|
|
this.cacheLength();
|
|
}
|
|
return this._length;
|
|
}
|
|
get chunkSize() {
|
|
if (!this.lengthKnown) {
|
|
this.cacheLength();
|
|
}
|
|
return this._chunkSize;
|
|
}
|
|
}
|
|
if (globalThis.XMLHttpRequest) {
|
|
if (!ENVIRONMENT_IS_WORKER) abort("Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc");
|
|
var lazyArray = new LazyUint8Array;
|
|
var properties = {
|
|
isDevice: false,
|
|
contents: lazyArray
|
|
};
|
|
} else {
|
|
var properties = {
|
|
isDevice: false,
|
|
url
|
|
};
|
|
}
|
|
var node = FS.createFile(parent, name, properties, canRead, canWrite);
|
|
// This is a total hack, but I want to get this lazy file code out of the
|
|
// core of MEMFS. If we want to keep this lazy file concept I feel it should
|
|
// be its own thin LAZYFS proxying calls to MEMFS.
|
|
if (properties.contents) {
|
|
node.contents = properties.contents;
|
|
} else if (properties.url) {
|
|
node.contents = null;
|
|
node.url = properties.url;
|
|
}
|
|
// Add a function that defers querying the file size until it is asked the first time.
|
|
Object.defineProperties(node, {
|
|
usedBytes: {
|
|
get: function() {
|
|
return this.contents.length;
|
|
}
|
|
}
|
|
});
|
|
// override each stream op with one that tries to force load the lazy file first
|
|
var stream_ops = {};
|
|
for (const [key, fn] of Object.entries(node.stream_ops)) {
|
|
stream_ops[key] = (...args) => {
|
|
FS.forceLoadFile(node);
|
|
return fn(...args);
|
|
};
|
|
}
|
|
function writeChunks(stream, buffer, offset, length, position) {
|
|
var contents = stream.node.contents;
|
|
if (position >= contents.length) return 0;
|
|
var size = Math.min(contents.length - position, length);
|
|
assert(size >= 0);
|
|
if (contents.slice) {
|
|
// normal array
|
|
for (var i = 0; i < size; i++) {
|
|
buffer[offset + i] = contents[position + i];
|
|
}
|
|
} else {
|
|
for (var i = 0; i < size; i++) {
|
|
// LazyUint8Array from sync binary XHR
|
|
buffer[offset + i] = contents.get(position + i);
|
|
}
|
|
}
|
|
return size;
|
|
}
|
|
// use a custom read function
|
|
stream_ops.read = (stream, buffer, offset, length, position) => {
|
|
FS.forceLoadFile(node);
|
|
return writeChunks(stream, buffer, offset, length, position);
|
|
};
|
|
// use a custom mmap function
|
|
stream_ops.mmap = (stream, length, position, prot, flags) => {
|
|
FS.forceLoadFile(node);
|
|
var ptr = mmapAlloc(length);
|
|
if (!ptr) {
|
|
throw new FS.ErrnoError(48);
|
|
}
|
|
writeChunks(stream, HEAP8, ptr, length, position);
|
|
return {
|
|
ptr,
|
|
allocated: true
|
|
};
|
|
};
|
|
node.stream_ops = stream_ops;
|
|
return node;
|
|
},
|
|
absolutePath() {
|
|
abort("FS.absolutePath has been removed; use PATH_FS.resolve instead");
|
|
},
|
|
createFolder() {
|
|
abort("FS.createFolder has been removed; use FS.mkdir instead");
|
|
},
|
|
createLink() {
|
|
abort("FS.createLink has been removed; use FS.symlink instead");
|
|
},
|
|
joinPath() {
|
|
abort("FS.joinPath has been removed; use PATH.join instead");
|
|
},
|
|
mmapAlloc() {
|
|
abort("FS.mmapAlloc has been replaced by the top level function mmapAlloc");
|
|
},
|
|
standardizePath() {
|
|
abort("FS.standardizePath has been removed; use PATH.normalize instead");
|
|
}
|
|
};
|
|
|
|
var SYSCALLS = {
|
|
DEFAULT_POLLMASK: 5,
|
|
calculateAt(dirfd, path, allowEmpty) {
|
|
if (PATH.isAbs(path)) {
|
|
return path;
|
|
}
|
|
// relative path
|
|
var dir;
|
|
if (dirfd === -100) {
|
|
dir = FS.cwd();
|
|
} else {
|
|
var dirstream = SYSCALLS.getStreamFromFD(dirfd);
|
|
dir = dirstream.path;
|
|
}
|
|
if (path.length == 0) {
|
|
if (!allowEmpty) {
|
|
throw new FS.ErrnoError(44);
|
|
}
|
|
return dir;
|
|
}
|
|
return dir + "/" + path;
|
|
},
|
|
writeStat(buf, stat) {
|
|
HEAPU32[((buf) >> 2)] = stat.dev;
|
|
HEAPU32[(((buf) + (4)) >> 2)] = stat.mode;
|
|
HEAPU32[(((buf) + (8)) >> 2)] = stat.nlink;
|
|
HEAPU32[(((buf) + (12)) >> 2)] = stat.uid;
|
|
HEAPU32[(((buf) + (16)) >> 2)] = stat.gid;
|
|
HEAPU32[(((buf) + (20)) >> 2)] = stat.rdev;
|
|
(tempI64 = [ stat.size >>> 0, (tempDouble = stat.size, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (24)) >> 2)] = tempI64[0], HEAP32[(((buf) + (28)) >> 2)] = tempI64[1]);
|
|
HEAP32[(((buf) + (32)) >> 2)] = 4096;
|
|
HEAP32[(((buf) + (36)) >> 2)] = stat.blocks;
|
|
var atime = stat.atime.getTime();
|
|
var mtime = stat.mtime.getTime();
|
|
var ctime = stat.ctime.getTime();
|
|
(tempI64 = [ Math.floor(atime / 1e3) >>> 0, (tempDouble = Math.floor(atime / 1e3),
|
|
(+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (40)) >> 2)] = tempI64[0], HEAP32[(((buf) + (44)) >> 2)] = tempI64[1]);
|
|
HEAPU32[(((buf) + (48)) >> 2)] = (atime % 1e3) * 1e3 * 1e3;
|
|
(tempI64 = [ Math.floor(mtime / 1e3) >>> 0, (tempDouble = Math.floor(mtime / 1e3),
|
|
(+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (56)) >> 2)] = tempI64[0], HEAP32[(((buf) + (60)) >> 2)] = tempI64[1]);
|
|
HEAPU32[(((buf) + (64)) >> 2)] = (mtime % 1e3) * 1e3 * 1e3;
|
|
(tempI64 = [ Math.floor(ctime / 1e3) >>> 0, (tempDouble = Math.floor(ctime / 1e3),
|
|
(+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (72)) >> 2)] = tempI64[0], HEAP32[(((buf) + (76)) >> 2)] = tempI64[1]);
|
|
HEAPU32[(((buf) + (80)) >> 2)] = (ctime % 1e3) * 1e3 * 1e3;
|
|
(tempI64 = [ stat.ino >>> 0, (tempDouble = stat.ino, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (88)) >> 2)] = tempI64[0], HEAP32[(((buf) + (92)) >> 2)] = tempI64[1]);
|
|
return 0;
|
|
},
|
|
writeStatFs(buf, stats) {
|
|
HEAPU32[(((buf) + (4)) >> 2)] = stats.bsize;
|
|
HEAPU32[(((buf) + (60)) >> 2)] = stats.bsize;
|
|
(tempI64 = [ stats.blocks >>> 0, (tempDouble = stats.blocks, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (8)) >> 2)] = tempI64[0], HEAP32[(((buf) + (12)) >> 2)] = tempI64[1]);
|
|
(tempI64 = [ stats.bfree >>> 0, (tempDouble = stats.bfree, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (16)) >> 2)] = tempI64[0], HEAP32[(((buf) + (20)) >> 2)] = tempI64[1]);
|
|
(tempI64 = [ stats.bavail >>> 0, (tempDouble = stats.bavail, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (24)) >> 2)] = tempI64[0], HEAP32[(((buf) + (28)) >> 2)] = tempI64[1]);
|
|
(tempI64 = [ stats.files >>> 0, (tempDouble = stats.files, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (32)) >> 2)] = tempI64[0], HEAP32[(((buf) + (36)) >> 2)] = tempI64[1]);
|
|
(tempI64 = [ stats.ffree >>> 0, (tempDouble = stats.ffree, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((buf) + (40)) >> 2)] = tempI64[0], HEAP32[(((buf) + (44)) >> 2)] = tempI64[1]);
|
|
HEAPU32[(((buf) + (48)) >> 2)] = stats.fsid;
|
|
HEAPU32[(((buf) + (64)) >> 2)] = stats.flags;
|
|
// ST_NOSUID
|
|
HEAPU32[(((buf) + (56)) >> 2)] = stats.namelen;
|
|
},
|
|
doMsync(addr, stream, len, flags, offset) {
|
|
if (!FS.isFile(stream.node.mode)) {
|
|
throw new FS.ErrnoError(43);
|
|
}
|
|
if (flags & 2) {
|
|
// MAP_PRIVATE calls need not to be synced back to underlying fs
|
|
return 0;
|
|
}
|
|
var buffer = HEAPU8.slice(addr, addr + len);
|
|
FS.msync(stream, buffer, offset, len, flags);
|
|
},
|
|
getStreamFromFD(fd) {
|
|
var stream = FS.getStreamChecked(fd);
|
|
return stream;
|
|
},
|
|
varargs: undefined,
|
|
getStr(ptr) {
|
|
var ret = UTF8ToString(ptr);
|
|
return ret;
|
|
}
|
|
};
|
|
|
|
function ___syscall_dup(fd) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(3, 0, 1, fd);
|
|
try {
|
|
var old = SYSCALLS.getStreamFromFD(fd);
|
|
return FS.dupStream(old).fd;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_faccessat(dirfd, path, amode, flags) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(4, 0, 1, dirfd, path, amode, flags);
|
|
try {
|
|
path = SYSCALLS.getStr(path);
|
|
assert(!flags || flags == 512);
|
|
path = SYSCALLS.calculateAt(dirfd, path);
|
|
if (amode & ~7) {
|
|
// need a valid mode
|
|
return -28;
|
|
}
|
|
var lookup = FS.lookupPath(path, {
|
|
follow: true
|
|
});
|
|
var node = lookup.node;
|
|
if (!node) {
|
|
return -44;
|
|
}
|
|
var perms = "";
|
|
if (amode & 4) perms += "r";
|
|
if (amode & 2) perms += "w";
|
|
if (amode & 1) perms += "x";
|
|
if (perms && FS.nodePermissions(node, perms)) {
|
|
return -2;
|
|
}
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var syscallGetVarargI = () => {
|
|
assert(SYSCALLS.varargs != undefined);
|
|
// the `+` prepended here is necessary to convince the JSCompiler that varargs is indeed a number.
|
|
var ret = HEAP32[((+SYSCALLS.varargs) >> 2)];
|
|
SYSCALLS.varargs += 4;
|
|
return ret;
|
|
};
|
|
|
|
var syscallGetVarargP = syscallGetVarargI;
|
|
|
|
function ___syscall_fcntl64(fd, cmd, varargs) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(5, 0, 1, fd, cmd, varargs);
|
|
SYSCALLS.varargs = varargs;
|
|
try {
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
switch (cmd) {
|
|
case 0:
|
|
{
|
|
var arg = syscallGetVarargI();
|
|
if (arg < 0) {
|
|
return -28;
|
|
}
|
|
while (FS.streams[arg]) {
|
|
arg++;
|
|
}
|
|
var newStream;
|
|
newStream = FS.dupStream(stream, arg);
|
|
return newStream.fd;
|
|
}
|
|
|
|
case 1:
|
|
case 2:
|
|
return 0;
|
|
|
|
// FD_CLOEXEC makes no sense for a single process.
|
|
case 3:
|
|
return stream.flags;
|
|
|
|
case 4:
|
|
{
|
|
var arg = syscallGetVarargI();
|
|
stream.flags |= arg;
|
|
return 0;
|
|
}
|
|
|
|
case 12:
|
|
{
|
|
var arg = syscallGetVarargP();
|
|
var offset = 0;
|
|
// We're always unlocked.
|
|
HEAP16[(((arg) + (offset)) >> 1)] = 2;
|
|
return 0;
|
|
}
|
|
|
|
case 13:
|
|
case 14:
|
|
// Pretend that the locking is successful. These are process-level locks,
|
|
// and Emscripten programs are a single process. If we supported linking a
|
|
// filesystem between programs, we'd need to do more here.
|
|
// See https://github.com/emscripten-core/emscripten/issues/23697
|
|
return 0;
|
|
}
|
|
return -28;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_fstat64(fd, buf) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(6, 0, 1, fd, buf);
|
|
try {
|
|
return SYSCALLS.writeStat(buf, FS.fstat(fd));
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var convertI32PairToI53Checked = (lo, hi) => {
|
|
assert(lo == (lo >>> 0) || lo == (lo | 0));
|
|
// lo should either be a i32 or a u32
|
|
assert(hi === (hi | 0));
|
|
// hi should be a i32
|
|
return ((hi + 2097152) >>> 0 < 4194305 - !!lo) ? (lo >>> 0) + hi * 4294967296 : NaN;
|
|
};
|
|
|
|
function ___syscall_ftruncate64(fd, length_low, length_high) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(7, 0, 1, fd, length_low, length_high);
|
|
var length = convertI32PairToI53Checked(length_low, length_high);
|
|
try {
|
|
if (isNaN(length)) return -61;
|
|
FS.ftruncate(fd, length);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var stringToUTF8 = (str, outPtr, maxBytesToWrite) => {
|
|
assert(typeof maxBytesToWrite == "number", "stringToUTF8(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!");
|
|
return stringToUTF8Array(str, HEAPU8, outPtr, maxBytesToWrite);
|
|
};
|
|
|
|
function ___syscall_getdents64(fd, dirp, count) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(8, 0, 1, fd, dirp, count);
|
|
try {
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
stream.getdents ||= FS.readdir(stream.path);
|
|
var struct_size = 280;
|
|
var pos = 0;
|
|
var off = FS.llseek(stream, 0, 1);
|
|
var startIdx = Math.floor(off / struct_size);
|
|
var endIdx = Math.min(stream.getdents.length, startIdx + Math.floor(count / struct_size));
|
|
for (var idx = startIdx; idx < endIdx; idx++) {
|
|
var id;
|
|
var type;
|
|
var name = stream.getdents[idx];
|
|
if (name === ".") {
|
|
id = stream.node.id;
|
|
type = 4;
|
|
} else if (name === "..") {
|
|
var lookup = FS.lookupPath(stream.path, {
|
|
parent: true
|
|
});
|
|
id = lookup.node.id;
|
|
type = 4;
|
|
} else {
|
|
var child;
|
|
try {
|
|
child = FS.lookupNode(stream.node, name);
|
|
} catch (e) {
|
|
// If the entry is not a directory, file, or symlink, nodefs
|
|
// lookupNode will raise EINVAL. Skip these and continue.
|
|
if (e?.errno === 28) {
|
|
continue;
|
|
}
|
|
throw e;
|
|
}
|
|
id = child.id;
|
|
type = FS.isChrdev(child.mode) ? 2 : // DT_CHR, character device.
|
|
FS.isDir(child.mode) ? 4 : // DT_DIR, directory.
|
|
FS.isLink(child.mode) ? 10 : // DT_LNK, symbolic link.
|
|
8;
|
|
}
|
|
assert(id);
|
|
(tempI64 = [ id >>> 0, (tempDouble = id, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[((dirp + pos) >> 2)] = tempI64[0], HEAP32[(((dirp + pos) + (4)) >> 2)] = tempI64[1]);
|
|
(tempI64 = [ (idx + 1) * struct_size >>> 0, (tempDouble = (idx + 1) * struct_size,
|
|
(+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[(((dirp + pos) + (8)) >> 2)] = tempI64[0], HEAP32[(((dirp + pos) + (12)) >> 2)] = tempI64[1]);
|
|
HEAP16[(((dirp + pos) + (16)) >> 1)] = 280;
|
|
HEAP8[(dirp + pos) + (18)] = type;
|
|
stringToUTF8(name, dirp + pos + 19, 256);
|
|
pos += struct_size;
|
|
}
|
|
FS.llseek(stream, idx * struct_size, 0);
|
|
return pos;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_ioctl(fd, op, varargs) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(9, 0, 1, fd, op, varargs);
|
|
SYSCALLS.varargs = varargs;
|
|
try {
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
switch (op) {
|
|
case 21509:
|
|
{
|
|
if (!stream.tty) return -59;
|
|
return 0;
|
|
}
|
|
|
|
case 21505:
|
|
{
|
|
if (!stream.tty) return -59;
|
|
if (stream.tty.ops.ioctl_tcgets) {
|
|
var termios = stream.tty.ops.ioctl_tcgets(stream);
|
|
var argp = syscallGetVarargP();
|
|
HEAP32[((argp) >> 2)] = termios.c_iflag || 0;
|
|
HEAP32[(((argp) + (4)) >> 2)] = termios.c_oflag || 0;
|
|
HEAP32[(((argp) + (8)) >> 2)] = termios.c_cflag || 0;
|
|
HEAP32[(((argp) + (12)) >> 2)] = termios.c_lflag || 0;
|
|
for (var i = 0; i < 32; i++) {
|
|
HEAP8[(argp + i) + (17)] = termios.c_cc[i] || 0;
|
|
}
|
|
return 0;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
case 21510:
|
|
case 21511:
|
|
case 21512:
|
|
{
|
|
if (!stream.tty) return -59;
|
|
return 0;
|
|
}
|
|
|
|
case 21506:
|
|
case 21507:
|
|
case 21508:
|
|
{
|
|
if (!stream.tty) return -59;
|
|
if (stream.tty.ops.ioctl_tcsets) {
|
|
var argp = syscallGetVarargP();
|
|
var c_iflag = HEAP32[((argp) >> 2)];
|
|
var c_oflag = HEAP32[(((argp) + (4)) >> 2)];
|
|
var c_cflag = HEAP32[(((argp) + (8)) >> 2)];
|
|
var c_lflag = HEAP32[(((argp) + (12)) >> 2)];
|
|
var c_cc = [];
|
|
for (var i = 0; i < 32; i++) {
|
|
c_cc.push(HEAP8[(argp + i) + (17)]);
|
|
}
|
|
return stream.tty.ops.ioctl_tcsets(stream.tty, op, {
|
|
c_iflag,
|
|
c_oflag,
|
|
c_cflag,
|
|
c_lflag,
|
|
c_cc
|
|
});
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
case 21519:
|
|
{
|
|
if (!stream.tty) return -59;
|
|
var argp = syscallGetVarargP();
|
|
HEAP32[((argp) >> 2)] = 0;
|
|
return 0;
|
|
}
|
|
|
|
case 21520:
|
|
{
|
|
if (!stream.tty) return -59;
|
|
return -28;
|
|
}
|
|
|
|
case 21537:
|
|
case 21531:
|
|
{
|
|
var argp = syscallGetVarargP();
|
|
return FS.ioctl(stream, op, argp);
|
|
}
|
|
|
|
case 21523:
|
|
{
|
|
// TODO: in theory we should write to the winsize struct that gets
|
|
// passed in, but for now musl doesn't read anything on it
|
|
if (!stream.tty) return -59;
|
|
if (stream.tty.ops.ioctl_tiocgwinsz) {
|
|
var winsize = stream.tty.ops.ioctl_tiocgwinsz(stream.tty);
|
|
var argp = syscallGetVarargP();
|
|
HEAP16[((argp) >> 1)] = winsize[0];
|
|
HEAP16[(((argp) + (2)) >> 1)] = winsize[1];
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
case 21524:
|
|
{
|
|
// TODO: technically, this ioctl call should change the window size.
|
|
// but, since emscripten doesn't have any concept of a terminal window
|
|
// yet, we'll just silently throw it away as we do TIOCGWINSZ
|
|
if (!stream.tty) return -59;
|
|
return 0;
|
|
}
|
|
|
|
case 21515:
|
|
{
|
|
if (!stream.tty) return -59;
|
|
return 0;
|
|
}
|
|
|
|
default:
|
|
return -28;
|
|
}
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_lstat64(path, buf) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(10, 0, 1, path, buf);
|
|
try {
|
|
path = SYSCALLS.getStr(path);
|
|
return SYSCALLS.writeStat(buf, FS.lstat(path));
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_newfstatat(dirfd, path, buf, flags) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(11, 0, 1, dirfd, path, buf, flags);
|
|
try {
|
|
path = SYSCALLS.getStr(path);
|
|
var nofollow = flags & 256;
|
|
var allowEmpty = flags & 4096;
|
|
flags = flags & (~6400);
|
|
assert(!flags, `unknown flags in __syscall_newfstatat: ${flags}`);
|
|
path = SYSCALLS.calculateAt(dirfd, path, allowEmpty);
|
|
return SYSCALLS.writeStat(buf, nofollow ? FS.lstat(path) : FS.stat(path));
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_openat(dirfd, path, flags, varargs) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(12, 0, 1, dirfd, path, flags, varargs);
|
|
SYSCALLS.varargs = varargs;
|
|
try {
|
|
path = SYSCALLS.getStr(path);
|
|
path = SYSCALLS.calculateAt(dirfd, path);
|
|
var mode = varargs ? syscallGetVarargI() : 0;
|
|
return FS.open(path, flags, mode).fd;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function ___syscall_stat64(path, buf) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(13, 0, 1, path, buf);
|
|
try {
|
|
path = SYSCALLS.getStr(path);
|
|
return SYSCALLS.writeStat(buf, FS.stat(path));
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var __abort_js = () => abort("native code called abort()");
|
|
|
|
var __embind_register_bigint = (primitiveType, name, size, minRange, maxRange) => {};
|
|
|
|
var AsciiToString = ptr => {
|
|
var str = "";
|
|
while (1) {
|
|
var ch = HEAPU8[ptr++];
|
|
if (!ch) return str;
|
|
str += String.fromCharCode(ch);
|
|
}
|
|
};
|
|
|
|
var awaitingDependencies = {};
|
|
|
|
var registeredTypes = {};
|
|
|
|
var typeDependencies = {};
|
|
|
|
var BindingError = class BindingError extends Error {
|
|
constructor(message) {
|
|
super(message);
|
|
this.name = "BindingError";
|
|
}
|
|
};
|
|
|
|
var throwBindingError = message => {
|
|
throw new BindingError(message);
|
|
};
|
|
|
|
/** @param {Object=} options */ function sharedRegisterType(rawType, registeredInstance, options = {}) {
|
|
var name = registeredInstance.name;
|
|
if (!rawType) {
|
|
throwBindingError(`type "${name}" must have a positive integer typeid pointer`);
|
|
}
|
|
if (registeredTypes.hasOwnProperty(rawType)) {
|
|
if (options.ignoreDuplicateRegistrations) {
|
|
return;
|
|
} else {
|
|
throwBindingError(`Cannot register type '${name}' twice`);
|
|
}
|
|
}
|
|
registeredTypes[rawType] = registeredInstance;
|
|
delete typeDependencies[rawType];
|
|
if (awaitingDependencies.hasOwnProperty(rawType)) {
|
|
var callbacks = awaitingDependencies[rawType];
|
|
delete awaitingDependencies[rawType];
|
|
callbacks.forEach(cb => cb());
|
|
}
|
|
}
|
|
|
|
/** @param {Object=} options */ function registerType(rawType, registeredInstance, options = {}) {
|
|
return sharedRegisterType(rawType, registeredInstance, options);
|
|
}
|
|
|
|
/** @suppress {globalThis} */ var __embind_register_bool = (rawType, name, trueValue, falseValue) => {
|
|
name = AsciiToString(name);
|
|
registerType(rawType, {
|
|
name,
|
|
fromWireType: function(wt) {
|
|
// ambiguous emscripten ABI: sometimes return values are
|
|
// true or false, and sometimes integers (0 or 1)
|
|
return !!wt;
|
|
},
|
|
toWireType: function(destructors, o) {
|
|
return o ? trueValue : falseValue;
|
|
},
|
|
readValueFromPointer: function(pointer) {
|
|
return this.fromWireType(HEAPU8[pointer]);
|
|
},
|
|
destructorFunction: null
|
|
});
|
|
};
|
|
|
|
var emval_freelist = [];
|
|
|
|
var emval_handles = [ 0, 1, , 1, null, 1, true, 1, false, 1 ];
|
|
|
|
var __emval_decref = handle => {
|
|
if (handle > 9 && 0 === --emval_handles[handle + 1]) {
|
|
assert(emval_handles[handle] !== undefined, `Decref for unallocated handle.`);
|
|
emval_handles[handle] = undefined;
|
|
emval_freelist.push(handle);
|
|
}
|
|
};
|
|
|
|
var Emval = {
|
|
toValue: handle => {
|
|
if (!handle) {
|
|
throwBindingError(`Cannot use deleted val. handle = ${handle}`);
|
|
}
|
|
// handle 2 is supposed to be `undefined`.
|
|
assert(handle === 2 || emval_handles[handle] !== undefined && handle % 2 === 0, `invalid handle: ${handle}`);
|
|
return emval_handles[handle];
|
|
},
|
|
toHandle: value => {
|
|
switch (value) {
|
|
case undefined:
|
|
return 2;
|
|
|
|
case null:
|
|
return 4;
|
|
|
|
case true:
|
|
return 6;
|
|
|
|
case false:
|
|
return 8;
|
|
|
|
default:
|
|
{
|
|
const handle = emval_freelist.pop() || emval_handles.length;
|
|
emval_handles[handle] = value;
|
|
emval_handles[handle + 1] = 1;
|
|
return handle;
|
|
}
|
|
}
|
|
}
|
|
};
|
|
|
|
/** @suppress {globalThis} */ function readPointer(pointer) {
|
|
return this.fromWireType(HEAPU32[((pointer) >> 2)]);
|
|
}
|
|
|
|
var EmValType = {
|
|
name: "emscripten::val",
|
|
fromWireType: handle => {
|
|
var rv = Emval.toValue(handle);
|
|
__emval_decref(handle);
|
|
return rv;
|
|
},
|
|
toWireType: (destructors, value) => Emval.toHandle(value),
|
|
readValueFromPointer: readPointer,
|
|
destructorFunction: null
|
|
};
|
|
|
|
var __embind_register_emval = rawType => registerType(rawType, EmValType);
|
|
|
|
var floatReadValueFromPointer = (name, width) => {
|
|
switch (width) {
|
|
case 4:
|
|
return function(pointer) {
|
|
return this.fromWireType(HEAPF32[((pointer) >> 2)]);
|
|
};
|
|
|
|
case 8:
|
|
return function(pointer) {
|
|
return this.fromWireType(HEAPF64[((pointer) >> 3)]);
|
|
};
|
|
|
|
default:
|
|
throw new TypeError(`invalid float width (${width}): ${name}`);
|
|
}
|
|
};
|
|
|
|
var embindRepr = v => {
|
|
if (v === null) {
|
|
return "null";
|
|
}
|
|
var t = typeof v;
|
|
if (t === "object" || t === "array" || t === "function") {
|
|
return v.toString();
|
|
} else {
|
|
return "" + v;
|
|
}
|
|
};
|
|
|
|
var __embind_register_float = (rawType, name, size) => {
|
|
name = AsciiToString(name);
|
|
registerType(rawType, {
|
|
name,
|
|
fromWireType: value => value,
|
|
toWireType: (destructors, value) => {
|
|
if (typeof value != "number" && typeof value != "boolean") {
|
|
throw new TypeError(`Cannot convert ${embindRepr(value)} to ${this.name}`);
|
|
}
|
|
// The VM will perform JS to Wasm value conversion, according to the spec:
|
|
// https://www.w3.org/TR/wasm-js-api-1/#towebassemblyvalue
|
|
return value;
|
|
},
|
|
readValueFromPointer: floatReadValueFromPointer(name, size),
|
|
destructorFunction: null
|
|
});
|
|
};
|
|
|
|
var integerReadValueFromPointer = (name, width, signed) => {
|
|
// integers are quite common, so generate very specialized functions
|
|
switch (width) {
|
|
case 1:
|
|
return signed ? pointer => HEAP8[pointer] : pointer => HEAPU8[pointer];
|
|
|
|
case 2:
|
|
return signed ? pointer => HEAP16[((pointer) >> 1)] : pointer => HEAPU16[((pointer) >> 1)];
|
|
|
|
case 4:
|
|
return signed ? pointer => HEAP32[((pointer) >> 2)] : pointer => HEAPU32[((pointer) >> 2)];
|
|
|
|
default:
|
|
throw new TypeError(`invalid integer width (${width}): ${name}`);
|
|
}
|
|
};
|
|
|
|
var assertIntegerRange = (typeName, value, minRange, maxRange) => {
|
|
if (value < minRange || value > maxRange) {
|
|
throw new TypeError(`Passing a number "${embindRepr(value)}" from JS side to C/C++ side to an argument of type "${typeName}", which is outside the valid range [${minRange}, ${maxRange}]!`);
|
|
}
|
|
};
|
|
|
|
/** @suppress {globalThis} */ var __embind_register_integer = (primitiveType, name, size, minRange, maxRange) => {
|
|
name = AsciiToString(name);
|
|
const isUnsignedType = minRange === 0;
|
|
let fromWireType = value => value;
|
|
if (isUnsignedType) {
|
|
var bitshift = 32 - 8 * size;
|
|
fromWireType = value => (value << bitshift) >>> bitshift;
|
|
maxRange = fromWireType(maxRange);
|
|
}
|
|
registerType(primitiveType, {
|
|
name,
|
|
fromWireType,
|
|
toWireType: (destructors, value) => {
|
|
if (typeof value != "number" && typeof value != "boolean") {
|
|
throw new TypeError(`Cannot convert "${embindRepr(value)}" to ${name}`);
|
|
}
|
|
assertIntegerRange(name, value, minRange, maxRange);
|
|
// The VM will perform JS to Wasm value conversion, according to the spec:
|
|
// https://www.w3.org/TR/wasm-js-api-1/#towebassemblyvalue
|
|
return value;
|
|
},
|
|
readValueFromPointer: integerReadValueFromPointer(name, size, minRange !== 0),
|
|
destructorFunction: null
|
|
});
|
|
};
|
|
|
|
var __embind_register_memory_view = (rawType, dataTypeIndex, name) => {
|
|
var typeMapping = [ Int8Array, Uint8Array, Int16Array, Uint16Array, Int32Array, Uint32Array, Float32Array, Float64Array ];
|
|
var TA = typeMapping[dataTypeIndex];
|
|
function decodeMemoryView(handle) {
|
|
var size = HEAPU32[((handle) >> 2)];
|
|
var data = HEAPU32[(((handle) + (4)) >> 2)];
|
|
return new TA(HEAP8.buffer, data, size);
|
|
}
|
|
name = AsciiToString(name);
|
|
registerType(rawType, {
|
|
name,
|
|
fromWireType: decodeMemoryView,
|
|
readValueFromPointer: decodeMemoryView
|
|
}, {
|
|
ignoreDuplicateRegistrations: true
|
|
});
|
|
};
|
|
|
|
var __embind_register_std_string = (rawType, name) => {
|
|
name = AsciiToString(name);
|
|
var stdStringIsUTF8 = true;
|
|
registerType(rawType, {
|
|
name,
|
|
// For some method names we use string keys here since they are part of
|
|
// the public/external API and/or used by the runtime-generated code.
|
|
fromWireType(value) {
|
|
var length = HEAPU32[((value) >> 2)];
|
|
var payload = value + 4;
|
|
var str;
|
|
if (stdStringIsUTF8) {
|
|
str = UTF8ToString(payload, length, true);
|
|
} else {
|
|
str = "";
|
|
for (var i = 0; i < length; ++i) {
|
|
str += String.fromCharCode(HEAPU8[payload + i]);
|
|
}
|
|
}
|
|
_free(value);
|
|
return str;
|
|
},
|
|
toWireType(destructors, value) {
|
|
if (value instanceof ArrayBuffer) {
|
|
value = new Uint8Array(value);
|
|
}
|
|
var length;
|
|
var valueIsOfTypeString = (typeof value == "string");
|
|
// We accept `string` or array views with single byte elements
|
|
if (!(valueIsOfTypeString || (ArrayBuffer.isView(value) && value.BYTES_PER_ELEMENT == 1))) {
|
|
throwBindingError("Cannot pass non-string to std::string");
|
|
}
|
|
if (stdStringIsUTF8 && valueIsOfTypeString) {
|
|
length = lengthBytesUTF8(value);
|
|
} else {
|
|
length = value.length;
|
|
}
|
|
// assumes POINTER_SIZE alignment
|
|
var base = _malloc(4 + length + 1);
|
|
var ptr = base + 4;
|
|
HEAPU32[((base) >> 2)] = length;
|
|
if (valueIsOfTypeString) {
|
|
if (stdStringIsUTF8) {
|
|
stringToUTF8(value, ptr, length + 1);
|
|
} else {
|
|
for (var i = 0; i < length; ++i) {
|
|
var charCode = value.charCodeAt(i);
|
|
if (charCode > 255) {
|
|
_free(base);
|
|
throwBindingError("String has UTF-16 code units that do not fit in 8 bits");
|
|
}
|
|
HEAPU8[ptr + i] = charCode;
|
|
}
|
|
}
|
|
} else {
|
|
HEAPU8.set(value, ptr);
|
|
}
|
|
if (destructors !== null) {
|
|
destructors.push(_free, base);
|
|
}
|
|
return base;
|
|
},
|
|
readValueFromPointer: readPointer,
|
|
destructorFunction(ptr) {
|
|
_free(ptr);
|
|
}
|
|
});
|
|
};
|
|
|
|
var UTF16Decoder = new TextDecoder("utf-16le");
|
|
|
|
var UTF16ToString = (ptr, maxBytesToRead, ignoreNul) => {
|
|
assert(ptr % 2 == 0, "Pointer passed to UTF16ToString must be aligned to two bytes!");
|
|
var idx = ((ptr) >> 1);
|
|
var endIdx = findStringEnd(HEAPU16, idx, maxBytesToRead / 2, ignoreNul);
|
|
return UTF16Decoder.decode(HEAPU16.slice(idx, endIdx));
|
|
};
|
|
|
|
var stringToUTF16 = (str, outPtr, maxBytesToWrite) => {
|
|
assert(outPtr % 2 == 0, "Pointer passed to stringToUTF16 must be aligned to two bytes!");
|
|
assert(typeof maxBytesToWrite == "number", "stringToUTF16(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!");
|
|
// Backwards compatibility: if max bytes is not specified, assume unsafe unbounded write is allowed.
|
|
maxBytesToWrite ??= 2147483647;
|
|
if (maxBytesToWrite < 2) return 0;
|
|
maxBytesToWrite -= 2;
|
|
// Null terminator.
|
|
var startPtr = outPtr;
|
|
var numCharsToWrite = (maxBytesToWrite < str.length * 2) ? (maxBytesToWrite / 2) : str.length;
|
|
for (var i = 0; i < numCharsToWrite; ++i) {
|
|
// charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP.
|
|
var codeUnit = str.charCodeAt(i);
|
|
// possibly a lead surrogate
|
|
HEAP16[((outPtr) >> 1)] = codeUnit;
|
|
outPtr += 2;
|
|
}
|
|
// Null-terminate the pointer to the HEAP.
|
|
HEAP16[((outPtr) >> 1)] = 0;
|
|
return outPtr - startPtr;
|
|
};
|
|
|
|
var lengthBytesUTF16 = str => str.length * 2;
|
|
|
|
var UTF32ToString = (ptr, maxBytesToRead, ignoreNul) => {
|
|
assert(ptr % 4 == 0, "Pointer passed to UTF32ToString must be aligned to four bytes!");
|
|
var str = "";
|
|
var startIdx = ((ptr) >> 2);
|
|
// If maxBytesToRead is not passed explicitly, it will be undefined, and this
|
|
// will always evaluate to true. This saves on code size.
|
|
for (var i = 0; !(i >= maxBytesToRead / 4); i++) {
|
|
var utf32 = HEAPU32[startIdx + i];
|
|
if (!utf32 && !ignoreNul) break;
|
|
str += String.fromCodePoint(utf32);
|
|
}
|
|
return str;
|
|
};
|
|
|
|
var stringToUTF32 = (str, outPtr, maxBytesToWrite) => {
|
|
assert(outPtr % 4 == 0, "Pointer passed to stringToUTF32 must be aligned to four bytes!");
|
|
assert(typeof maxBytesToWrite == "number", "stringToUTF32(str, outPtr, maxBytesToWrite) is missing the third parameter that specifies the length of the output buffer!");
|
|
// Backwards compatibility: if max bytes is not specified, assume unsafe unbounded write is allowed.
|
|
maxBytesToWrite ??= 2147483647;
|
|
if (maxBytesToWrite < 4) return 0;
|
|
var startPtr = outPtr;
|
|
var endPtr = startPtr + maxBytesToWrite - 4;
|
|
for (var i = 0; i < str.length; ++i) {
|
|
var codePoint = str.codePointAt(i);
|
|
// Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16.
|
|
// We need to manually skip over the second code unit for correct iteration.
|
|
if (codePoint > 65535) {
|
|
i++;
|
|
}
|
|
HEAP32[((outPtr) >> 2)] = codePoint;
|
|
outPtr += 4;
|
|
if (outPtr + 4 > endPtr) break;
|
|
}
|
|
// Null-terminate the pointer to the HEAP.
|
|
HEAP32[((outPtr) >> 2)] = 0;
|
|
return outPtr - startPtr;
|
|
};
|
|
|
|
var lengthBytesUTF32 = str => {
|
|
var len = 0;
|
|
for (var i = 0; i < str.length; ++i) {
|
|
var codePoint = str.codePointAt(i);
|
|
// Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16.
|
|
// We need to manually skip over the second code unit for correct iteration.
|
|
if (codePoint > 65535) {
|
|
i++;
|
|
}
|
|
len += 4;
|
|
}
|
|
return len;
|
|
};
|
|
|
|
var __embind_register_std_wstring = (rawType, charSize, name) => {
|
|
name = AsciiToString(name);
|
|
var decodeString, encodeString, lengthBytesUTF;
|
|
if (charSize === 2) {
|
|
decodeString = UTF16ToString;
|
|
encodeString = stringToUTF16;
|
|
lengthBytesUTF = lengthBytesUTF16;
|
|
} else {
|
|
assert(charSize === 4, "only 2-byte and 4-byte strings are currently supported");
|
|
decodeString = UTF32ToString;
|
|
encodeString = stringToUTF32;
|
|
lengthBytesUTF = lengthBytesUTF32;
|
|
}
|
|
registerType(rawType, {
|
|
name,
|
|
fromWireType: value => {
|
|
// Code mostly taken from _embind_register_std_string fromWireType
|
|
var length = HEAPU32[((value) >> 2)];
|
|
var str = decodeString(value + 4, length * charSize, true);
|
|
_free(value);
|
|
return str;
|
|
},
|
|
toWireType: (destructors, value) => {
|
|
if (!(typeof value == "string")) {
|
|
throwBindingError(`Cannot pass non-string to C++ string type ${name}`);
|
|
}
|
|
// assumes POINTER_SIZE alignment
|
|
var length = lengthBytesUTF(value);
|
|
var ptr = _malloc(4 + length + charSize);
|
|
HEAPU32[((ptr) >> 2)] = length / charSize;
|
|
encodeString(value, ptr + 4, length + charSize);
|
|
if (destructors !== null) {
|
|
destructors.push(_free, ptr);
|
|
}
|
|
return ptr;
|
|
},
|
|
readValueFromPointer: readPointer,
|
|
destructorFunction(ptr) {
|
|
_free(ptr);
|
|
}
|
|
});
|
|
};
|
|
|
|
var __embind_register_void = (rawType, name) => {
|
|
name = AsciiToString(name);
|
|
registerType(rawType, {
|
|
isVoid: true,
|
|
// void return values can be optimized out sometimes
|
|
name,
|
|
fromWireType: () => undefined,
|
|
// TODO: assert if anything else is given?
|
|
toWireType: (destructors, o) => undefined
|
|
});
|
|
};
|
|
|
|
var __emscripten_init_main_thread_js = tb => {
|
|
// Pass the thread address to the native code where they stored in wasm
|
|
// globals which act as a form of TLS. Global constructors trying
|
|
// to access this value will read the wrong value, but that is UB anyway.
|
|
__emscripten_thread_init(tb, /*is_main=*/ !ENVIRONMENT_IS_WORKER, /*is_runtime=*/ 1, /*can_block=*/ !ENVIRONMENT_IS_WEB, /*default_stacksize=*/ 65536, /*start_profiling=*/ false);
|
|
PThread.threadInitTLS();
|
|
};
|
|
|
|
var handleException = e => {
|
|
// Certain exception types we do not treat as errors since they are used for
|
|
// internal control flow.
|
|
// 1. ExitStatus, which is thrown by exit()
|
|
// 2. "unwind", which is thrown by emscripten_unwind_to_js_event_loop() and others
|
|
// that wish to return to JS event loop.
|
|
if (e instanceof ExitStatus || e == "unwind") {
|
|
return EXITSTATUS;
|
|
}
|
|
checkStackCookie();
|
|
if (e instanceof WebAssembly.RuntimeError) {
|
|
if (_emscripten_stack_get_current() <= 0) {
|
|
err("Stack overflow detected. You can try increasing -sSTACK_SIZE (currently set to 65536)");
|
|
}
|
|
}
|
|
quit_(1, e);
|
|
};
|
|
|
|
var maybeExit = () => {
|
|
if (!keepRuntimeAlive()) {
|
|
try {
|
|
if (ENVIRONMENT_IS_PTHREAD) {
|
|
// exit the current thread, but only if there is one active.
|
|
// TODO(https://github.com/emscripten-core/emscripten/issues/25076):
|
|
// Unify this check with the runtimeExited check above
|
|
if (_pthread_self()) __emscripten_thread_exit(EXITSTATUS);
|
|
return;
|
|
}
|
|
_exit(EXITSTATUS);
|
|
} catch (e) {
|
|
handleException(e);
|
|
}
|
|
}
|
|
};
|
|
|
|
var callUserCallback = func => {
|
|
if (ABORT) {
|
|
err("user callback triggered after runtime exited or application aborted. Ignoring.");
|
|
return;
|
|
}
|
|
try {
|
|
func();
|
|
maybeExit();
|
|
} catch (e) {
|
|
handleException(e);
|
|
}
|
|
};
|
|
|
|
var waitAsyncPolyfilled = (!Atomics.waitAsync || (globalThis.navigator?.userAgent && Number((navigator.userAgent.match(/Chrom(e|ium)\/([0-9]+)\./) || [])[2]) < 91));
|
|
|
|
var __emscripten_thread_mailbox_await = pthread_ptr => {
|
|
if (!waitAsyncPolyfilled) {
|
|
// Wait on the pthread's initial self-pointer field because it is easy and
|
|
// safe to access from sending threads that need to notify the waiting
|
|
// thread.
|
|
// TODO: How to make this work with wasm64?
|
|
var wait = Atomics.waitAsync(HEAP32, ((pthread_ptr) >> 2), pthread_ptr);
|
|
assert(wait.async);
|
|
wait.value.then(checkMailbox);
|
|
var waitingAsync = pthread_ptr + 128;
|
|
Atomics.store(HEAP32, ((waitingAsync) >> 2), 1);
|
|
}
|
|
};
|
|
|
|
var checkMailbox = () => callUserCallback(() => {
|
|
// Only check the mailbox if we have a live pthread runtime. We implement
|
|
// pthread_self to return 0 if there is no live runtime.
|
|
// TODO(https://github.com/emscripten-core/emscripten/issues/25076):
|
|
// Is this check still needed? `callUserCallback` is supposed to
|
|
// ensure the runtime is alive, and if `_pthread_self` is NULL then the
|
|
// runtime certainly is *not* alive, so this should be a redundant check.
|
|
var pthread_ptr = _pthread_self();
|
|
if (pthread_ptr) {
|
|
// If we are using Atomics.waitAsync as our notification mechanism, wait
|
|
// for a notification before processing the mailbox to avoid missing any
|
|
// work that could otherwise arrive after we've finished processing the
|
|
// mailbox and before we're ready for the next notification.
|
|
__emscripten_thread_mailbox_await(pthread_ptr);
|
|
__emscripten_check_mailbox();
|
|
}
|
|
});
|
|
|
|
var __emscripten_notify_mailbox_postmessage = (targetThread, currThreadId) => {
|
|
if (targetThread == currThreadId) {
|
|
setTimeout(checkMailbox);
|
|
} else if (ENVIRONMENT_IS_PTHREAD) {
|
|
postMessage({
|
|
targetThread,
|
|
cmd: "checkMailbox"
|
|
});
|
|
} else {
|
|
var worker = PThread.pthreads[targetThread];
|
|
if (!worker) {
|
|
err(`Cannot send message to thread with ID ${targetThread}, unknown thread ID!`);
|
|
return;
|
|
}
|
|
worker.postMessage({
|
|
cmd: "checkMailbox"
|
|
});
|
|
}
|
|
};
|
|
|
|
var proxiedJSCallArgs = [];
|
|
|
|
var __emscripten_receive_on_main_thread_js = (funcIndex, emAsmAddr, callingThread, numCallArgs, args) => {
|
|
// Sometimes we need to backproxy events to the calling thread (e.g.
|
|
// HTML5 DOM events handlers such as
|
|
// emscripten_set_mousemove_callback()), so keep track in a globally
|
|
// accessible variable about the thread that initiated the proxying.
|
|
proxiedJSCallArgs.length = numCallArgs;
|
|
var b = ((args) >> 3);
|
|
for (var i = 0; i < numCallArgs; i++) {
|
|
proxiedJSCallArgs[i] = HEAPF64[b + i];
|
|
}
|
|
// Proxied JS library funcs use funcIndex and EM_ASM functions use emAsmAddr
|
|
assert(!emAsmAddr);
|
|
var func = proxiedFunctionTable[funcIndex];
|
|
assert(!(funcIndex && emAsmAddr));
|
|
assert(func.length == numCallArgs, "Call args mismatch in _emscripten_receive_on_main_thread_js");
|
|
PThread.currentProxiedOperationCallerThread = callingThread;
|
|
var rtn = func(...proxiedJSCallArgs);
|
|
PThread.currentProxiedOperationCallerThread = 0;
|
|
// Proxied functions can return any type except bigint. All other types
|
|
// cooerce to f64/double (the return type of this function in C) but not
|
|
// bigint.
|
|
assert(typeof rtn != "bigint");
|
|
return rtn;
|
|
};
|
|
|
|
var __emscripten_thread_cleanup = thread => {
|
|
// Called when a thread needs to be cleaned up so it can be reused.
|
|
// A thread is considered reusable when it either returns from its
|
|
// entry point, calls pthread_exit, or acts upon a cancellation.
|
|
// Detached threads are responsible for calling this themselves,
|
|
// otherwise pthread_join is responsible for calling this.
|
|
if (!ENVIRONMENT_IS_PTHREAD) cleanupThread(thread); else postMessage({
|
|
cmd: "cleanupThread",
|
|
thread
|
|
});
|
|
};
|
|
|
|
var __emscripten_thread_set_strongref = thread => {
|
|
// Called when a thread needs to be strongly referenced.
|
|
// Currently only used for:
|
|
// - keeping the "main" thread alive in PROXY_TO_PTHREAD mode;
|
|
// - crashed threads that needs to propagate the uncaught exception
|
|
// back to the main thread.
|
|
if (ENVIRONMENT_IS_NODE) {
|
|
PThread.pthreads[thread].ref();
|
|
}
|
|
};
|
|
|
|
function __mmap_js(len, prot, flags, fd, offset_low, offset_high, allocated, addr) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(14, 0, 1, len, prot, flags, fd, offset_low, offset_high, allocated, addr);
|
|
var offset = convertI32PairToI53Checked(offset_low, offset_high);
|
|
try {
|
|
// musl's mmap doesn't allow values over a certain limit
|
|
// see OFF_MASK in mmap.c.
|
|
assert(!isNaN(offset));
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
var res = FS.mmap(stream, len, offset, prot, flags);
|
|
var ptr = res.ptr;
|
|
HEAP32[((allocated) >> 2)] = res.allocated;
|
|
HEAPU32[((addr) >> 2)] = ptr;
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
function __munmap_js(addr, len, prot, flags, fd, offset_low, offset_high) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(15, 0, 1, addr, len, prot, flags, fd, offset_low, offset_high);
|
|
var offset = convertI32PairToI53Checked(offset_low, offset_high);
|
|
try {
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
if (prot & 2) {
|
|
SYSCALLS.doMsync(addr, stream, len, flags, offset);
|
|
}
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return -e.errno;
|
|
}
|
|
}
|
|
|
|
var __tzset_js = (timezone, daylight, std_name, dst_name) => {
|
|
// TODO: Use (malleable) environment variables instead of system settings.
|
|
var currentYear = (new Date).getFullYear();
|
|
var winter = new Date(currentYear, 0, 1);
|
|
var summer = new Date(currentYear, 6, 1);
|
|
var winterOffset = winter.getTimezoneOffset();
|
|
var summerOffset = summer.getTimezoneOffset();
|
|
// Local standard timezone offset. Local standard time is not adjusted for
|
|
// daylight savings. This code uses the fact that getTimezoneOffset returns
|
|
// a greater value during Standard Time versus Daylight Saving Time (DST).
|
|
// Thus it determines the expected output during Standard Time, and it
|
|
// compares whether the output of the given date the same (Standard) or less
|
|
// (DST).
|
|
var stdTimezoneOffset = Math.max(winterOffset, summerOffset);
|
|
// timezone is specified as seconds west of UTC ("The external variable
|
|
// `timezone` shall be set to the difference, in seconds, between
|
|
// Coordinated Universal Time (UTC) and local standard time."), the same
|
|
// as returned by stdTimezoneOffset.
|
|
// See http://pubs.opengroup.org/onlinepubs/009695399/functions/tzset.html
|
|
HEAPU32[((timezone) >> 2)] = stdTimezoneOffset * 60;
|
|
HEAP32[((daylight) >> 2)] = Number(winterOffset != summerOffset);
|
|
var extractZone = timezoneOffset => {
|
|
// Why inverse sign?
|
|
// Read here https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/getTimezoneOffset
|
|
var sign = timezoneOffset >= 0 ? "-" : "+";
|
|
var absOffset = Math.abs(timezoneOffset);
|
|
var hours = String(Math.floor(absOffset / 60)).padStart(2, "0");
|
|
var minutes = String(absOffset % 60).padStart(2, "0");
|
|
return `UTC${sign}${hours}${minutes}`;
|
|
};
|
|
var winterName = extractZone(winterOffset);
|
|
var summerName = extractZone(summerOffset);
|
|
assert(winterName);
|
|
assert(summerName);
|
|
assert(lengthBytesUTF8(winterName) <= 16, `timezone name truncated to fit in TZNAME_MAX (${winterName})`);
|
|
assert(lengthBytesUTF8(summerName) <= 16, `timezone name truncated to fit in TZNAME_MAX (${summerName})`);
|
|
if (summerOffset < winterOffset) {
|
|
// Northern hemisphere
|
|
stringToUTF8(winterName, std_name, 17);
|
|
stringToUTF8(summerName, dst_name, 17);
|
|
} else {
|
|
stringToUTF8(winterName, dst_name, 17);
|
|
stringToUTF8(summerName, std_name, 17);
|
|
}
|
|
};
|
|
|
|
var _emscripten_get_now = () => performance.timeOrigin + performance.now();
|
|
|
|
var _emscripten_date_now = () => Date.now();
|
|
|
|
var nowIsMonotonic = 1;
|
|
|
|
var checkWasiClock = clock_id => clock_id >= 0 && clock_id <= 3;
|
|
|
|
function _clock_time_get(clk_id, ignored_precision_low, ignored_precision_high, ptime) {
|
|
var ignored_precision = convertI32PairToI53Checked(ignored_precision_low, ignored_precision_high);
|
|
if (!checkWasiClock(clk_id)) {
|
|
return 28;
|
|
}
|
|
var now;
|
|
// all wasi clocks but realtime are monotonic
|
|
if (clk_id === 0) {
|
|
now = _emscripten_date_now();
|
|
} else if (nowIsMonotonic) {
|
|
now = _emscripten_get_now();
|
|
} else {
|
|
return 52;
|
|
}
|
|
// "now" is in ms, and wasi times are in ns.
|
|
var nsec = Math.round(now * 1e3 * 1e3);
|
|
(tempI64 = [ nsec >>> 0, (tempDouble = nsec, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[((ptime) >> 2)] = tempI64[0], HEAP32[(((ptime) + (4)) >> 2)] = tempI64[1]);
|
|
return 0;
|
|
}
|
|
|
|
var _emscripten_check_blocking_allowed = () => {
|
|
if (ENVIRONMENT_IS_NODE) return;
|
|
if (ENVIRONMENT_IS_WORKER) return;
|
|
// Blocking in a worker/pthread is fine.
|
|
warnOnce("Blocking on the main thread is very dangerous, see https://emscripten.org/docs/porting/pthreads.html#blocking-on-the-main-browser-thread");
|
|
};
|
|
|
|
var _emscripten_errn = (str, len) => err(UTF8ToString(str, len));
|
|
|
|
var runtimeKeepalivePush = () => {
|
|
runtimeKeepaliveCounter += 1;
|
|
};
|
|
|
|
var _emscripten_exit_with_live_runtime = () => {
|
|
runtimeKeepalivePush();
|
|
throw "unwind";
|
|
};
|
|
|
|
var getHeapMax = () => HEAPU8.length;
|
|
|
|
var _emscripten_get_heap_max = () => getHeapMax();
|
|
|
|
var _emscripten_num_logical_cores = () => ENVIRONMENT_IS_NODE ? require("os").cpus().length : navigator["hardwareConcurrency"];
|
|
|
|
var UNWIND_CACHE = {};
|
|
|
|
var stringToNewUTF8 = str => {
|
|
var size = lengthBytesUTF8(str) + 1;
|
|
var ret = _malloc(size);
|
|
if (ret) stringToUTF8(str, ret, size);
|
|
return ret;
|
|
};
|
|
|
|
/** @returns {number} */ var convertFrameToPC = frame => {
|
|
var match;
|
|
if (match = /\bwasm-function\[\d+\]:(0x[0-9a-f]+)/.exec(frame)) {
|
|
// Wasm engines give the binary offset directly, so we use that as return address
|
|
return +match[1];
|
|
} else if (match = /\bwasm-function\[(\d+)\]:(\d+)/.exec(frame)) {
|
|
// Older versions of v8 (e.g node v10) give function index and offset in
|
|
// the function. That format is not supported since it does not provide
|
|
// the information we need to map the frame to a global program counter.
|
|
warnOnce("legacy backtrace format detected, this version of v8 is no longer supported by the emscripten backtrace mechanism");
|
|
} else if (match = /:(\d+):\d+(?:\)|$)/.exec(frame)) {
|
|
// If we are in js, we can use the js line number as the "return address".
|
|
// This should work for wasm2js. We tag the high bit to distinguish this
|
|
// from wasm addresses.
|
|
return 2147483648 | +match[1];
|
|
}
|
|
// return 0 if we can't find any
|
|
return 0;
|
|
};
|
|
|
|
var saveInUnwindCache = callstack => {
|
|
for (var line of callstack) {
|
|
var pc = convertFrameToPC(line);
|
|
if (pc) {
|
|
UNWIND_CACHE[pc] = line;
|
|
}
|
|
}
|
|
};
|
|
|
|
var jsStackTrace = () => (new Error).stack.toString();
|
|
|
|
var _emscripten_stack_snapshot = () => {
|
|
var callstack = jsStackTrace().split("\n");
|
|
if (callstack[0] == "Error") {
|
|
callstack.shift();
|
|
}
|
|
saveInUnwindCache(callstack);
|
|
// Caches the stack snapshot so that emscripten_stack_unwind_buffer() can
|
|
// unwind from this spot.
|
|
UNWIND_CACHE.last_addr = convertFrameToPC(callstack[3]);
|
|
UNWIND_CACHE.last_stack = callstack;
|
|
return UNWIND_CACHE.last_addr;
|
|
};
|
|
|
|
var _emscripten_pc_get_function = pc => {
|
|
var frame = UNWIND_CACHE[pc];
|
|
if (!frame) return 0;
|
|
var name;
|
|
var match;
|
|
// First try to match foo.wasm.sym files explcitly. e.g.
|
|
// at test_return_address.wasm.main (wasm://wasm/test_return_address.wasm-0012cc2a:wasm-function[26]:0x9f3
|
|
// Then match JS symbols which don't include that module name:
|
|
// at invokeEntryPoint (.../test_return_address.js:1500:42)
|
|
// Finally match firefox format:
|
|
// Object._main@http://server.com:4324:12'
|
|
if (match = /^\s+at .*\.wasm\.(.*) \(.*\)$/.exec(frame)) {
|
|
name = match[1];
|
|
} else if (match = /^\s+at (.*) \(.*\)$/.exec(frame)) {
|
|
name = match[1];
|
|
} else if (match = /^(.+?)@/.exec(frame)) {
|
|
name = match[1];
|
|
} else {
|
|
return 0;
|
|
}
|
|
_free(_emscripten_pc_get_function.ret ?? 0);
|
|
_emscripten_pc_get_function.ret = stringToNewUTF8(name);
|
|
return _emscripten_pc_get_function.ret;
|
|
};
|
|
|
|
var abortOnCannotGrowMemory = requestedSize => {
|
|
abort(`Cannot enlarge memory arrays to size ${requestedSize} bytes (OOM). Either (1) compile with -sINITIAL_MEMORY=X with X higher than the current value ${HEAP8.length}, (2) compile with -sALLOW_MEMORY_GROWTH which allows increasing the size at runtime, or (3) if you want malloc to return NULL (0) instead of this abort, compile with -sABORTING_MALLOC=0`);
|
|
};
|
|
|
|
var _emscripten_resize_heap = requestedSize => {
|
|
var oldSize = HEAPU8.length;
|
|
// With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned.
|
|
requestedSize >>>= 0;
|
|
abortOnCannotGrowMemory(requestedSize);
|
|
};
|
|
|
|
var _emscripten_stack_unwind_buffer = (addr, buffer, count) => {
|
|
var stack;
|
|
if (UNWIND_CACHE.last_addr == addr) {
|
|
stack = UNWIND_CACHE.last_stack;
|
|
} else {
|
|
stack = jsStackTrace().split("\n");
|
|
if (stack[0] == "Error") {
|
|
stack.shift();
|
|
}
|
|
saveInUnwindCache(stack);
|
|
}
|
|
var offset = 3;
|
|
while (stack[offset] && convertFrameToPC(stack[offset]) != addr) {
|
|
++offset;
|
|
}
|
|
for (var i = 0; i < count && stack[i + offset]; ++i) {
|
|
HEAP32[(((buffer) + (i * 4)) >> 2)] = convertFrameToPC(stack[i + offset]);
|
|
}
|
|
return i;
|
|
};
|
|
|
|
var ENV = {};
|
|
|
|
var getExecutableName = () => thisProgram || "./this.program";
|
|
|
|
var getEnvStrings = () => {
|
|
if (!getEnvStrings.strings) {
|
|
// Default values.
|
|
// Browser language detection #8751
|
|
var lang = (globalThis.navigator?.language ?? "C").replace("-", "_") + ".UTF-8";
|
|
var env = {
|
|
"USER": "web_user",
|
|
"LOGNAME": "web_user",
|
|
"PATH": "/",
|
|
"PWD": "/",
|
|
"HOME": "/home/web_user",
|
|
"LANG": lang,
|
|
"_": getExecutableName()
|
|
};
|
|
// Apply the user-provided values, if any.
|
|
for (var x in ENV) {
|
|
// x is a key in ENV; if ENV[x] is undefined, that means it was
|
|
// explicitly set to be so. We allow user code to do that to
|
|
// force variables with default values to remain unset.
|
|
if (ENV[x] === undefined) delete env[x]; else env[x] = ENV[x];
|
|
}
|
|
var strings = [];
|
|
for (var x in env) {
|
|
strings.push(`${x}=${env[x]}`);
|
|
}
|
|
getEnvStrings.strings = strings;
|
|
}
|
|
return getEnvStrings.strings;
|
|
};
|
|
|
|
function _environ_get(__environ, environ_buf) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(16, 0, 1, __environ, environ_buf);
|
|
var bufSize = 0;
|
|
var envp = 0;
|
|
for (var string of getEnvStrings()) {
|
|
var ptr = environ_buf + bufSize;
|
|
HEAPU32[(((__environ) + (envp)) >> 2)] = ptr;
|
|
bufSize += stringToUTF8(string, ptr, Infinity) + 1;
|
|
envp += 4;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
function _environ_sizes_get(penviron_count, penviron_buf_size) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(17, 0, 1, penviron_count, penviron_buf_size);
|
|
var strings = getEnvStrings();
|
|
HEAPU32[((penviron_count) >> 2)] = strings.length;
|
|
var bufSize = 0;
|
|
for (var string of strings) {
|
|
bufSize += lengthBytesUTF8(string) + 1;
|
|
}
|
|
HEAPU32[((penviron_buf_size) >> 2)] = bufSize;
|
|
return 0;
|
|
}
|
|
|
|
function _fd_close(fd) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(18, 0, 1, fd);
|
|
try {
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
FS.close(stream);
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
/** @param {number=} offset */ var doReadv = (stream, iov, iovcnt, offset) => {
|
|
var ret = 0;
|
|
for (var i = 0; i < iovcnt; i++) {
|
|
var ptr = HEAPU32[((iov) >> 2)];
|
|
var len = HEAPU32[(((iov) + (4)) >> 2)];
|
|
iov += 8;
|
|
var curr = FS.read(stream, HEAP8, ptr, len, offset);
|
|
if (curr < 0) return -1;
|
|
ret += curr;
|
|
if (curr < len) break;
|
|
// nothing more to read
|
|
if (typeof offset != "undefined") {
|
|
offset += curr;
|
|
}
|
|
}
|
|
return ret;
|
|
};
|
|
|
|
function _fd_read(fd, iov, iovcnt, pnum) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(19, 0, 1, fd, iov, iovcnt, pnum);
|
|
try {
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
var num = doReadv(stream, iov, iovcnt);
|
|
HEAPU32[((pnum) >> 2)] = num;
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
function _fd_seek(fd, offset_low, offset_high, whence, newOffset) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(20, 0, 1, fd, offset_low, offset_high, whence, newOffset);
|
|
var offset = convertI32PairToI53Checked(offset_low, offset_high);
|
|
try {
|
|
if (isNaN(offset)) return 61;
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
FS.llseek(stream, offset, whence);
|
|
(tempI64 = [ stream.position >>> 0, (tempDouble = stream.position, (+(Math.abs(tempDouble))) >= 1 ? (tempDouble > 0 ? (+(Math.floor((tempDouble) / 4294967296))) >>> 0 : (~~((+(Math.ceil((tempDouble - +(((~~(tempDouble))) >>> 0)) / 4294967296))))) >>> 0) : 0) ],
|
|
HEAP32[((newOffset) >> 2)] = tempI64[0], HEAP32[(((newOffset) + (4)) >> 2)] = tempI64[1]);
|
|
if (stream.getdents && offset === 0 && whence === 0) stream.getdents = null;
|
|
// reset readdir state
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
/** @param {number=} offset */ var doWritev = (stream, iov, iovcnt, offset) => {
|
|
var ret = 0;
|
|
for (var i = 0; i < iovcnt; i++) {
|
|
var ptr = HEAPU32[((iov) >> 2)];
|
|
var len = HEAPU32[(((iov) + (4)) >> 2)];
|
|
iov += 8;
|
|
var curr = FS.write(stream, HEAP8, ptr, len, offset);
|
|
if (curr < 0) return -1;
|
|
ret += curr;
|
|
if (curr < len) {
|
|
// No more space to write.
|
|
break;
|
|
}
|
|
if (typeof offset != "undefined") {
|
|
offset += curr;
|
|
}
|
|
}
|
|
return ret;
|
|
};
|
|
|
|
function _fd_write(fd, iov, iovcnt, pnum) {
|
|
if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(21, 0, 1, fd, iov, iovcnt, pnum);
|
|
try {
|
|
var stream = SYSCALLS.getStreamFromFD(fd);
|
|
var num = doWritev(stream, iov, iovcnt);
|
|
HEAPU32[((pnum) >> 2)] = num;
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
function _random_get(buffer, size) {
|
|
try {
|
|
randomFill(HEAPU8.subarray(buffer, buffer + size));
|
|
return 0;
|
|
} catch (e) {
|
|
if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
|
|
return e.errno;
|
|
}
|
|
}
|
|
|
|
var stringToUTF8OnStack = str => {
|
|
var size = lengthBytesUTF8(str) + 1;
|
|
var ret = stackAlloc(size);
|
|
stringToUTF8(str, ret, size);
|
|
return ret;
|
|
};
|
|
|
|
var ALLOC_STACK = 1;
|
|
|
|
var allocate = (slab, allocator) => {
|
|
var ret;
|
|
assert(typeof allocator == "number", "allocate no longer takes a type argument");
|
|
assert(typeof slab != "number", "allocate no longer takes a number as arg0");
|
|
if (allocator == ALLOC_STACK) {
|
|
ret = stackAlloc(slab.length);
|
|
} else {
|
|
ret = _malloc(slab.length);
|
|
}
|
|
if (!slab.subarray && !slab.slice) {
|
|
slab = new Uint8Array(slab);
|
|
}
|
|
HEAPU8.set(slab, ret);
|
|
return ret;
|
|
};
|
|
|
|
var ALLOC_NORMAL = 0;
|
|
|
|
var getCFunc = ident => {
|
|
var func = Module["_" + ident];
|
|
// closure exported function
|
|
assert(func, "Cannot call unknown function " + ident + ", make sure it is exported");
|
|
return func;
|
|
};
|
|
|
|
var writeArrayToMemory = (array, buffer) => {
|
|
assert(array.length >= 0, "writeArrayToMemory array must have a length (should be an array or typed array)");
|
|
HEAP8.set(array, buffer);
|
|
};
|
|
|
|
/**
|
|
* @param {string|null=} returnType
|
|
* @param {Array=} argTypes
|
|
* @param {Array=} args
|
|
* @param {Object=} opts
|
|
*/ var ccall = (ident, returnType, argTypes, args, opts) => {
|
|
// For fast lookup of conversion functions
|
|
var toC = {
|
|
"string": str => {
|
|
var ret = 0;
|
|
if (str !== null && str !== undefined && str !== 0) {
|
|
// null string
|
|
ret = stringToUTF8OnStack(str);
|
|
}
|
|
return ret;
|
|
},
|
|
"array": arr => {
|
|
var ret = stackAlloc(arr.length);
|
|
writeArrayToMemory(arr, ret);
|
|
return ret;
|
|
}
|
|
};
|
|
function convertReturnValue(ret) {
|
|
if (returnType === "string") {
|
|
return UTF8ToString(ret);
|
|
}
|
|
if (returnType === "boolean") return Boolean(ret);
|
|
return ret;
|
|
}
|
|
var func = getCFunc(ident);
|
|
var cArgs = [];
|
|
var stack = 0;
|
|
assert(returnType !== "array", 'Return type should not be "array".');
|
|
if (args) {
|
|
for (var i = 0; i < args.length; i++) {
|
|
var converter = toC[argTypes[i]];
|
|
if (converter) {
|
|
if (stack === 0) stack = stackSave();
|
|
cArgs[i] = converter(args[i]);
|
|
} else {
|
|
cArgs[i] = args[i];
|
|
}
|
|
}
|
|
}
|
|
var ret = func(...cArgs);
|
|
function onDone(ret) {
|
|
if (stack !== 0) stackRestore(stack);
|
|
return convertReturnValue(ret);
|
|
}
|
|
ret = onDone(ret);
|
|
return ret;
|
|
};
|
|
|
|
/**
|
|
* @param {string=} returnType
|
|
* @param {Array=} argTypes
|
|
* @param {Object=} opts
|
|
*/ var cwrap = (ident, returnType, argTypes, opts) => (...args) => ccall(ident, returnType, argTypes, args, opts);
|
|
|
|
var FS_createPath = (...args) => FS.createPath(...args);
|
|
|
|
var FS_unlink = (...args) => FS.unlink(...args);
|
|
|
|
var FS_createLazyFile = (...args) => FS.createLazyFile(...args);
|
|
|
|
var FS_createDevice = (...args) => FS.createDevice(...args);
|
|
|
|
PThread.init();
|
|
|
|
FS.createPreloadedFile = FS_createPreloadedFile;
|
|
|
|
FS.preloadFile = FS_preloadFile;
|
|
|
|
FS.staticInit();
|
|
|
|
assert(emval_handles.length === 5 * 2);
|
|
|
|
// End JS library code
|
|
// include: postlibrary.js
|
|
// This file is included after the automatically-generated JS library code
|
|
// but before the wasm module is created.
|
|
{
|
|
// With WASM_ESM_INTEGRATION this has to happen at the top level and not
|
|
// delayed until processModuleArgs.
|
|
initMemory();
|
|
// Begin ATMODULES hooks
|
|
if (Module["noExitRuntime"]) noExitRuntime = Module["noExitRuntime"];
|
|
if (Module["preloadPlugins"]) preloadPlugins = Module["preloadPlugins"];
|
|
if (Module["print"]) out = Module["print"];
|
|
if (Module["printErr"]) err = Module["printErr"];
|
|
if (Module["wasmBinary"]) wasmBinary = Module["wasmBinary"];
|
|
// End ATMODULES hooks
|
|
checkIncomingModuleAPI();
|
|
if (Module["arguments"]) arguments_ = Module["arguments"];
|
|
if (Module["thisProgram"]) thisProgram = Module["thisProgram"];
|
|
// Assertions on removed incoming Module JS APIs.
|
|
assert(typeof Module["memoryInitializerPrefixURL"] == "undefined", "Module.memoryInitializerPrefixURL option was removed, use Module.locateFile instead");
|
|
assert(typeof Module["pthreadMainPrefixURL"] == "undefined", "Module.pthreadMainPrefixURL option was removed, use Module.locateFile instead");
|
|
assert(typeof Module["cdInitializerPrefixURL"] == "undefined", "Module.cdInitializerPrefixURL option was removed, use Module.locateFile instead");
|
|
assert(typeof Module["filePackagePrefixURL"] == "undefined", "Module.filePackagePrefixURL option was removed, use Module.locateFile instead");
|
|
assert(typeof Module["read"] == "undefined", "Module.read option was removed");
|
|
assert(typeof Module["readAsync"] == "undefined", "Module.readAsync option was removed (modify readAsync in JS)");
|
|
assert(typeof Module["readBinary"] == "undefined", "Module.readBinary option was removed (modify readBinary in JS)");
|
|
assert(typeof Module["setWindowTitle"] == "undefined", "Module.setWindowTitle option was removed (modify emscripten_set_window_title in JS)");
|
|
assert(typeof Module["TOTAL_MEMORY"] == "undefined", "Module.TOTAL_MEMORY has been renamed Module.INITIAL_MEMORY");
|
|
assert(typeof Module["ENVIRONMENT"] == "undefined", "Module.ENVIRONMENT has been deprecated. To force the environment, use the ENVIRONMENT compile-time option (for example, -sENVIRONMENT=web or -sENVIRONMENT=node)");
|
|
assert(typeof Module["STACK_SIZE"] == "undefined", "STACK_SIZE can no longer be set at runtime. Use -sSTACK_SIZE at link time");
|
|
if (Module["preInit"]) {
|
|
if (typeof Module["preInit"] == "function") Module["preInit"] = [ Module["preInit"] ];
|
|
while (Module["preInit"].length > 0) {
|
|
Module["preInit"].shift()();
|
|
}
|
|
}
|
|
consumedModuleProp("preInit");
|
|
}
|
|
|
|
// Begin runtime exports
|
|
Module["addRunDependency"] = addRunDependency;
|
|
|
|
Module["removeRunDependency"] = removeRunDependency;
|
|
|
|
Module["ccall"] = ccall;
|
|
|
|
Module["cwrap"] = cwrap;
|
|
|
|
Module["intArrayFromString"] = intArrayFromString;
|
|
|
|
Module["FS_preloadFile"] = FS_preloadFile;
|
|
|
|
Module["FS_unlink"] = FS_unlink;
|
|
|
|
Module["FS_createPath"] = FS_createPath;
|
|
|
|
Module["FS_createDevice"] = FS_createDevice;
|
|
|
|
Module["FS_createDataFile"] = FS_createDataFile;
|
|
|
|
Module["FS_createLazyFile"] = FS_createLazyFile;
|
|
|
|
Module["ALLOC_NORMAL"] = ALLOC_NORMAL;
|
|
|
|
Module["allocate"] = allocate;
|
|
|
|
Module["IDBFS"] = IDBFS;
|
|
|
|
var missingLibrarySymbols = [ "writeI53ToI64", "writeI53ToI64Clamped", "writeI53ToI64Signaling", "writeI53ToU64Clamped", "writeI53ToU64Signaling", "readI53FromI64", "readI53FromU64", "convertI32PairToI53", "convertU32PairToI53", "getTempRet0", "setTempRet0", "createNamedFunction", "growMemory", "withStackSave", "inetPton4", "inetNtop4", "inetPton6", "inetNtop6", "readSockaddr", "writeSockaddr", "readEmAsmArgs", "jstoi_q", "autoResumeAudioContext", "dynCallLegacy", "getDynCaller", "dynCall", "runtimeKeepalivePop", "asmjsMangle", "HandleAllocator", "addOnInit", "addOnPostCtor", "addOnPreMain", "addOnExit", "STACK_SIZE", "STACK_ALIGN", "POINTER_SIZE", "ASSERTIONS", "convertJsFunctionToWasm", "getEmptyTableSlot", "updateTableMap", "getFunctionAddress", "addFunction", "removeFunction", "intArrayToString", "stringToAscii", "registerKeyEventCallback", "findEventTarget", "findCanvasEventTarget", "getBoundingClientRect", "fillMouseEventData", "registerMouseEventCallback", "registerWheelEventCallback", "registerUiEventCallback", "registerFocusEventCallback", "fillDeviceOrientationEventData", "registerDeviceOrientationEventCallback", "fillDeviceMotionEventData", "registerDeviceMotionEventCallback", "screenOrientation", "fillOrientationChangeEventData", "registerOrientationChangeEventCallback", "fillFullscreenChangeEventData", "registerFullscreenChangeEventCallback", "JSEvents_requestFullscreen", "JSEvents_resizeCanvasForFullscreen", "registerRestoreOldStyle", "hideEverythingExceptGivenElement", "restoreHiddenElements", "setLetterbox", "softFullscreenResizeWebGLRenderTarget", "doRequestFullscreen", "fillPointerlockChangeEventData", "registerPointerlockChangeEventCallback", "registerPointerlockErrorEventCallback", "requestPointerLock", "fillVisibilityChangeEventData", "registerVisibilityChangeEventCallback", "registerTouchEventCallback", "fillGamepadEventData", "registerGamepadEventCallback", "registerBeforeUnloadEventCallback", "fillBatteryEventData", "registerBatteryEventCallback", "setCanvasElementSizeCallingThread", "setCanvasElementSizeMainThread", "setCanvasElementSize", "getCanvasSizeCallingThread", "getCanvasSizeMainThread", "getCanvasElementSize", "getCallstack", "convertPCtoSourceLocation", "wasiRightsToMuslOFlags", "wasiOFlagsToMuslOFlags", "safeSetTimeout", "setImmediateWrapped", "safeRequestAnimationFrame", "clearImmediateWrapped", "registerPostMainLoop", "registerPreMainLoop", "getPromise", "makePromise", "idsToPromises", "makePromiseCallback", "findMatchingCatch", "Browser_asyncPrepareDataCounter", "isLeapYear", "ydayFromDate", "arraySum", "addDays", "getSocketFromFD", "getSocketAddress", "FS_mkdirTree", "_setNetworkCallback", "heapObjectForWebGLType", "toTypedArrayIndex", "webgl_enable_ANGLE_instanced_arrays", "webgl_enable_OES_vertex_array_object", "webgl_enable_WEBGL_draw_buffers", "webgl_enable_WEBGL_multi_draw", "webgl_enable_EXT_polygon_offset_clamp", "webgl_enable_EXT_clip_control", "webgl_enable_WEBGL_polygon_mode", "emscriptenWebGLGet", "computeUnpackAlignedImageSize", "colorChannelsInGlTextureFormat", "emscriptenWebGLGetTexPixelData", "emscriptenWebGLGetUniform", "webglGetUniformLocation", "webglPrepareUniformLocationsBeforeFirstUse", "webglGetLeftBracePos", "emscriptenWebGLGetVertexAttrib", "__glGetActiveAttribOrUniform", "writeGLArray", "emscripten_webgl_destroy_context_before_on_calling_thread", "registerWebGlEventCallback", "runAndAbortIfError", "emscriptenWebGLGetIndexed", "webgl_enable_WEBGL_draw_instanced_base_vertex_base_instance", "webgl_enable_WEBGL_multi_draw_instanced_base_vertex_base_instance", "writeStringToMemory", "writeAsciiToMemory", "allocateUTF8", "allocateUTF8OnStack", "demangle", "stackTrace", "getNativeTypeSize", "throwInternalError", "whenDependentTypesAreResolved", "getTypeName", "getFunctionName", "getFunctionArgsName", "heap32VectorToArray", "requireRegisteredType", "usesDestructorStack", "createJsInvokerSignature", "checkArgCount", "getRequiredArgCount", "createJsInvoker", "UnboundTypeError", "PureVirtualError", "throwUnboundTypeError", "ensureOverloadTable", "exposePublicSymbol", "replacePublicSymbol", "getBasestPointer", "registerInheritedInstance", "unregisterInheritedInstance", "getInheritedInstance", "getInheritedInstanceCount", "getLiveInheritedInstances", "enumReadValueFromPointer", "runDestructors", "craftInvokerFunction", "embind__requireFunction", "genericPointerToWireType", "constNoSmartPtrRawPointerToWireType", "nonConstNoSmartPtrRawPointerToWireType", "init_RegisteredPointer", "RegisteredPointer", "RegisteredPointer_fromWireType", "runDestructor", "releaseClassHandle", "detachFinalizer", "attachFinalizer", "makeClassHandle", "init_ClassHandle", "ClassHandle", "throwInstanceAlreadyDeleted", "flushPendingDeletes", "setDelayFunction", "RegisteredClass", "shallowCopyInternalPointer", "downcastPointer", "upcastPointer", "validateThis", "char_0", "char_9", "makeLegalFunctionName", "count_emval_handles", "getStringOrSymbol", "emval_returnValue", "emval_lookupTypes", "emval_addMethodCaller" ];
|
|
|
|
missingLibrarySymbols.forEach(missingLibrarySymbol);
|
|
|
|
var unexportedSymbols = [ "run", "out", "err", "callMain", "abort", "wasmExports", "HEAPF32", "HEAPF64", "HEAP8", "HEAP16", "HEAPU16", "HEAP32", "HEAPU32", "HEAP64", "HEAPU64", "writeStackCookie", "checkStackCookie", "convertI32PairToI53Checked", "stackSave", "stackRestore", "stackAlloc", "ptrToString", "zeroMemory", "exitJS", "getHeapMax", "abortOnCannotGrowMemory", "ENV", "ERRNO_CODES", "strError", "DNS", "Protocols", "Sockets", "timers", "warnOnce", "readEmAsmArgsArray", "getExecutableName", "handleException", "keepRuntimeAlive", "runtimeKeepalivePush", "callUserCallback", "maybeExit", "asyncLoad", "alignMemory", "mmapAlloc", "wasmTable", "wasmMemory", "getUniqueRunDependency", "noExitRuntime", "addOnPreRun", "addOnPostRun", "freeTableIndexes", "functionsInTableMap", "setValue", "getValue", "PATH", "PATH_FS", "UTF8Decoder", "UTF8ArrayToString", "UTF8ToString", "stringToUTF8Array", "stringToUTF8", "lengthBytesUTF8", "AsciiToString", "UTF16Decoder", "UTF16ToString", "stringToUTF16", "lengthBytesUTF16", "UTF32ToString", "stringToUTF32", "lengthBytesUTF32", "stringToNewUTF8", "stringToUTF8OnStack", "writeArrayToMemory", "JSEvents", "specialHTMLTargets", "currentFullscreenStrategy", "restoreOldWindowedStyle", "jsStackTrace", "UNWIND_CACHE", "ExitStatus", "getEnvStrings", "checkWasiClock", "doReadv", "doWritev", "initRandomFill", "randomFill", "emSetImmediate", "emClearImmediate_deps", "emClearImmediate", "promiseMap", "uncaughtExceptionCount", "exceptionLast", "exceptionCaught", "ExceptionInfo", "Browser", "requestFullscreen", "requestFullScreen", "setCanvasSize", "getUserMedia", "createContext", "getPreloadedImageData__data", "wget", "MONTH_DAYS_REGULAR", "MONTH_DAYS_LEAP", "MONTH_DAYS_REGULAR_CUMULATIVE", "MONTH_DAYS_LEAP_CUMULATIVE", "SYSCALLS", "preloadPlugins", "FS_createPreloadedFile", "FS_modeStringToFlags", "FS_getMode", "FS_stdin_getChar_buffer", "FS_stdin_getChar", "FS_readFile", "FS_root", "FS_mounts", "FS_devices", "FS_streams", "FS_nextInode", "FS_nameTable", "FS_currentPath", "FS_initialized", "FS_ignorePermissions", "FS_filesystems", "FS_syncFSRequests", "FS_readFiles", "FS_lookupPath", "FS_getPath", "FS_hashName", "FS_hashAddNode", "FS_hashRemoveNode", "FS_lookupNode", "FS_createNode", "FS_destroyNode", "FS_isRoot", "FS_isMountpoint", "FS_isFile", "FS_isDir", "FS_isLink", "FS_isChrdev", "FS_isBlkdev", "FS_isFIFO", "FS_isSocket", "FS_flagsToPermissionString", "FS_nodePermissions", "FS_mayLookup", "FS_mayCreate", "FS_mayDelete", "FS_mayOpen", "FS_checkOpExists", "FS_nextfd", "FS_getStreamChecked", "FS_getStream", "FS_createStream", "FS_closeStream", "FS_dupStream", "FS_doSetAttr", "FS_chrdev_stream_ops", "FS_major", "FS_minor", "FS_makedev", "FS_registerDevice", "FS_getDevice", "FS_getMounts", "FS_syncfs", "FS_mount", "FS_unmount", "FS_lookup", "FS_mknod", "FS_statfs", "FS_statfsStream", "FS_statfsNode", "FS_create", "FS_mkdir", "FS_mkdev", "FS_symlink", "FS_rename", "FS_rmdir", "FS_readdir", "FS_readlink", "FS_stat", "FS_fstat", "FS_lstat", "FS_doChmod", "FS_chmod", "FS_lchmod", "FS_fchmod", "FS_doChown", "FS_chown", "FS_lchown", "FS_fchown", "FS_doTruncate", "FS_truncate", "FS_ftruncate", "FS_utime", "FS_open", "FS_close", "FS_isClosed", "FS_llseek", "FS_read", "FS_write", "FS_mmap", "FS_msync", "FS_ioctl", "FS_writeFile", "FS_cwd", "FS_chdir", "FS_createDefaultDirectories", "FS_createDefaultDevices", "FS_createSpecialDirectories", "FS_createStandardStreams", "FS_staticInit", "FS_init", "FS_quit", "FS_findObject", "FS_analyzePath", "FS_createFile", "FS_forceLoadFile", "FS_absolutePath", "FS_createFolder", "FS_createLink", "FS_joinPath", "FS_mmapAlloc", "FS_standardizePath", "MEMFS", "TTY", "PIPEFS", "SOCKFS", "tempFixedLengthArray", "miniTempWebGLFloatBuffers", "miniTempWebGLIntBuffers", "GL", "AL", "GLUT", "EGL", "GLEW", "IDBStore", "SDL", "SDL_gfx", "waitAsyncPolyfilled", "ALLOC_STACK", "print", "printErr", "jstoi_s", "PThread", "terminateWorker", "cleanupThread", "registerTLSInit", "spawnThread", "exitOnMainThread", "proxyToMainThread", "proxiedJSCallArgs", "invokeEntryPoint", "checkMailbox", "InternalError", "BindingError", "throwBindingError", "registeredTypes", "awaitingDependencies", "typeDependencies", "tupleRegistrations", "structRegistrations", "sharedRegisterType", "EmValType", "EmValOptionalType", "embindRepr", "registeredInstances", "registeredPointers", "registerType", "integerReadValueFromPointer", "floatReadValueFromPointer", "assertIntegerRange", "readPointer", "finalizationRegistry", "detachFinalizer_deps", "deletionQueue", "delayFunction", "emval_freelist", "emval_handles", "emval_symbols", "Emval", "emval_methodCallers" ];
|
|
|
|
unexportedSymbols.forEach(unexportedRuntimeSymbol);
|
|
|
|
// End runtime exports
|
|
// Begin JS library exports
|
|
Module["FS"] = FS;
|
|
|
|
// End JS library exports
|
|
// end include: postlibrary.js
|
|
// proxiedFunctionTable specifies the list of functions that can be called
|
|
// either synchronously or asynchronously from other threads in postMessage()d
|
|
// or internally queued events. This way a pthread in a Worker can synchronously
|
|
// access e.g. the DOM on the main thread.
|
|
var proxiedFunctionTable = [ _proc_exit, exitOnMainThread, pthreadCreateProxied, ___syscall_dup, ___syscall_faccessat, ___syscall_fcntl64, ___syscall_fstat64, ___syscall_ftruncate64, ___syscall_getdents64, ___syscall_ioctl, ___syscall_lstat64, ___syscall_newfstatat, ___syscall_openat, ___syscall_stat64, __mmap_js, __munmap_js, _environ_get, _environ_sizes_get, _fd_close, _fd_read, _fd_seek, _fd_write ];
|
|
|
|
function checkIncomingModuleAPI() {
|
|
ignoredModuleProp("fetchSettings");
|
|
}
|
|
|
|
function EnsureDir(path) {
|
|
var dir = "/voices/" + UTF8ToString(path).split("/")[0];
|
|
try {
|
|
FS.mkdir(dir);
|
|
} catch (err) {}
|
|
}
|
|
|
|
function hardware_concurrency() {
|
|
var concurrency = 1;
|
|
try {
|
|
concurrency = self.navigator.hardwareConcurrency;
|
|
} catch (e) {}
|
|
return concurrency;
|
|
}
|
|
|
|
// Imports from the Wasm binary.
|
|
var _main = makeInvalidEarlyAccess("_main");
|
|
|
|
var _GoogleTtsInit = Module["_GoogleTtsInit"] = makeInvalidEarlyAccess("_GoogleTtsInit");
|
|
|
|
var _GoogleTtsShutdown = Module["_GoogleTtsShutdown"] = makeInvalidEarlyAccess("_GoogleTtsShutdown");
|
|
|
|
var _GoogleTtsInstallVoice = Module["_GoogleTtsInstallVoice"] = makeInvalidEarlyAccess("_GoogleTtsInstallVoice");
|
|
|
|
var _GoogleTtsInitBuffered = Module["_GoogleTtsInitBuffered"] = makeInvalidEarlyAccess("_GoogleTtsInitBuffered");
|
|
|
|
var _GoogleTtsReadBuffered = Module["_GoogleTtsReadBuffered"] = makeInvalidEarlyAccess("_GoogleTtsReadBuffered");
|
|
|
|
var _GoogleTtsFinalizeBuffered = Module["_GoogleTtsFinalizeBuffered"] = makeInvalidEarlyAccess("_GoogleTtsFinalizeBuffered");
|
|
|
|
var _GoogleTtsGetTimepointsCount = Module["_GoogleTtsGetTimepointsCount"] = makeInvalidEarlyAccess("_GoogleTtsGetTimepointsCount");
|
|
|
|
var _GoogleTtsGetTimepointsTimeInSecsAtIndex = Module["_GoogleTtsGetTimepointsTimeInSecsAtIndex"] = makeInvalidEarlyAccess("_GoogleTtsGetTimepointsTimeInSecsAtIndex");
|
|
|
|
var _GoogleTtsGetTimepointsCharIndexAtIndex = Module["_GoogleTtsGetTimepointsCharIndexAtIndex"] = makeInvalidEarlyAccess("_GoogleTtsGetTimepointsCharIndexAtIndex");
|
|
|
|
var _GoogleTtsGetTimepointsCharLengthAtIndex = Module["_GoogleTtsGetTimepointsCharLengthAtIndex"] = makeInvalidEarlyAccess("_GoogleTtsGetTimepointsCharLengthAtIndex");
|
|
|
|
var _GoogleTtsGetEventBufferPtr = Module["_GoogleTtsGetEventBufferPtr"] = makeInvalidEarlyAccess("_GoogleTtsGetEventBufferPtr");
|
|
|
|
var _GoogleTtsGetEventBufferLen = Module["_GoogleTtsGetEventBufferLen"] = makeInvalidEarlyAccess("_GoogleTtsGetEventBufferLen");
|
|
|
|
var _malloc = Module["_malloc"] = makeInvalidEarlyAccess("_malloc");
|
|
|
|
var _free = Module["_free"] = makeInvalidEarlyAccess("_free");
|
|
|
|
var _fflush = makeInvalidEarlyAccess("_fflush");
|
|
|
|
var _strerror = makeInvalidEarlyAccess("_strerror");
|
|
|
|
var _pthread_self = makeInvalidEarlyAccess("_pthread_self");
|
|
|
|
var ___getTypeName = makeInvalidEarlyAccess("___getTypeName");
|
|
|
|
var __embind_initialize_bindings = makeInvalidEarlyAccess("__embind_initialize_bindings");
|
|
|
|
var __emscripten_tls_init = makeInvalidEarlyAccess("__emscripten_tls_init");
|
|
|
|
var _emscripten_builtin_memalign = makeInvalidEarlyAccess("_emscripten_builtin_memalign");
|
|
|
|
var _emscripten_stack_get_end = makeInvalidEarlyAccess("_emscripten_stack_get_end");
|
|
|
|
var _emscripten_stack_get_base = makeInvalidEarlyAccess("_emscripten_stack_get_base");
|
|
|
|
var __emscripten_thread_init = makeInvalidEarlyAccess("__emscripten_thread_init");
|
|
|
|
var __emscripten_thread_crashed = makeInvalidEarlyAccess("__emscripten_thread_crashed");
|
|
|
|
var __emscripten_run_js_on_main_thread = makeInvalidEarlyAccess("__emscripten_run_js_on_main_thread");
|
|
|
|
var __emscripten_thread_free_data = makeInvalidEarlyAccess("__emscripten_thread_free_data");
|
|
|
|
var __emscripten_thread_exit = makeInvalidEarlyAccess("__emscripten_thread_exit");
|
|
|
|
var __emscripten_check_mailbox = makeInvalidEarlyAccess("__emscripten_check_mailbox");
|
|
|
|
var __emscripten_tempret_set = makeInvalidEarlyAccess("__emscripten_tempret_set");
|
|
|
|
var _emscripten_stack_init = makeInvalidEarlyAccess("_emscripten_stack_init");
|
|
|
|
var _emscripten_stack_set_limits = makeInvalidEarlyAccess("_emscripten_stack_set_limits");
|
|
|
|
var _emscripten_stack_get_free = makeInvalidEarlyAccess("_emscripten_stack_get_free");
|
|
|
|
var __emscripten_stack_restore = makeInvalidEarlyAccess("__emscripten_stack_restore");
|
|
|
|
var __emscripten_stack_alloc = makeInvalidEarlyAccess("__emscripten_stack_alloc");
|
|
|
|
var _emscripten_stack_get_current = makeInvalidEarlyAccess("_emscripten_stack_get_current");
|
|
|
|
var ___cxa_increment_exception_refcount = makeInvalidEarlyAccess("___cxa_increment_exception_refcount");
|
|
|
|
var ___cxa_get_exception_ptr = makeInvalidEarlyAccess("___cxa_get_exception_ptr");
|
|
|
|
var dynCall_iiiijij = makeInvalidEarlyAccess("dynCall_iiiijij");
|
|
|
|
var dynCall_jiji = makeInvalidEarlyAccess("dynCall_jiji");
|
|
|
|
var dynCall_vijj = makeInvalidEarlyAccess("dynCall_vijj");
|
|
|
|
var dynCall_ji = makeInvalidEarlyAccess("dynCall_ji");
|
|
|
|
var dynCall_jij = makeInvalidEarlyAccess("dynCall_jij");
|
|
|
|
var dynCall_viiiijii = makeInvalidEarlyAccess("dynCall_viiiijii");
|
|
|
|
var dynCall_jiiii = makeInvalidEarlyAccess("dynCall_jiiii");
|
|
|
|
var dynCall_jiii = makeInvalidEarlyAccess("dynCall_jiii");
|
|
|
|
var dynCall_viij = makeInvalidEarlyAccess("dynCall_viij");
|
|
|
|
var dynCall_viijii = makeInvalidEarlyAccess("dynCall_viijii");
|
|
|
|
var dynCall_jii = makeInvalidEarlyAccess("dynCall_jii");
|
|
|
|
var dynCall_jiij = makeInvalidEarlyAccess("dynCall_jiij");
|
|
|
|
var dynCall_vij = makeInvalidEarlyAccess("dynCall_vij");
|
|
|
|
var dynCall_iij = makeInvalidEarlyAccess("dynCall_iij");
|
|
|
|
var dynCall_jjj = makeInvalidEarlyAccess("dynCall_jjj");
|
|
|
|
var dynCall_iiiijj = makeInvalidEarlyAccess("dynCall_iiiijj");
|
|
|
|
var dynCall_viijj = makeInvalidEarlyAccess("dynCall_viijj");
|
|
|
|
var dynCall_viiijjj = makeInvalidEarlyAccess("dynCall_viiijjj");
|
|
|
|
var dynCall_iiij = makeInvalidEarlyAccess("dynCall_iiij");
|
|
|
|
var dynCall_jiijj = makeInvalidEarlyAccess("dynCall_jiijj");
|
|
|
|
var dynCall_viji = makeInvalidEarlyAccess("dynCall_viji");
|
|
|
|
var dynCall_iiji = makeInvalidEarlyAccess("dynCall_iiji");
|
|
|
|
var dynCall_iijjiii = makeInvalidEarlyAccess("dynCall_iijjiii");
|
|
|
|
var dynCall_vijjjii = makeInvalidEarlyAccess("dynCall_vijjjii");
|
|
|
|
var dynCall_vijjj = makeInvalidEarlyAccess("dynCall_vijjj");
|
|
|
|
var dynCall_vj = makeInvalidEarlyAccess("dynCall_vj");
|
|
|
|
var dynCall_iijjiiii = makeInvalidEarlyAccess("dynCall_iijjiiii");
|
|
|
|
var dynCall_iiiiij = makeInvalidEarlyAccess("dynCall_iiiiij");
|
|
|
|
var dynCall_iiiiijj = makeInvalidEarlyAccess("dynCall_iiiiijj");
|
|
|
|
var dynCall_iiiiiijj = makeInvalidEarlyAccess("dynCall_iiiiiijj");
|
|
|
|
var _kVersionStampBuildChangelistStr = Module["_kVersionStampBuildChangelistStr"] = makeInvalidEarlyAccess("_kVersionStampBuildChangelistStr");
|
|
|
|
var _kVersionStampCitcSnapshotStr = Module["_kVersionStampCitcSnapshotStr"] = makeInvalidEarlyAccess("_kVersionStampCitcSnapshotStr");
|
|
|
|
var _kVersionStampCitcWorkspaceIdStr = Module["_kVersionStampCitcWorkspaceIdStr"] = makeInvalidEarlyAccess("_kVersionStampCitcWorkspaceIdStr");
|
|
|
|
var _kVersionStampSourceUriStr = Module["_kVersionStampSourceUriStr"] = makeInvalidEarlyAccess("_kVersionStampSourceUriStr");
|
|
|
|
var _kVersionStampBuildClientStr = Module["_kVersionStampBuildClientStr"] = makeInvalidEarlyAccess("_kVersionStampBuildClientStr");
|
|
|
|
var _kVersionStampBuildClientMintStatusStr = Module["_kVersionStampBuildClientMintStatusStr"] = makeInvalidEarlyAccess("_kVersionStampBuildClientMintStatusStr");
|
|
|
|
var _kVersionStampBuildCompilerStr = Module["_kVersionStampBuildCompilerStr"] = makeInvalidEarlyAccess("_kVersionStampBuildCompilerStr");
|
|
|
|
var _kVersionStampBuildDateTimePstStr = Module["_kVersionStampBuildDateTimePstStr"] = makeInvalidEarlyAccess("_kVersionStampBuildDateTimePstStr");
|
|
|
|
var _kVersionStampBuildDepotPathStr = Module["_kVersionStampBuildDepotPathStr"] = makeInvalidEarlyAccess("_kVersionStampBuildDepotPathStr");
|
|
|
|
var _kVersionStampBuildIdStr = Module["_kVersionStampBuildIdStr"] = makeInvalidEarlyAccess("_kVersionStampBuildIdStr");
|
|
|
|
var _kVersionStampBuildInfoStr = Module["_kVersionStampBuildInfoStr"] = makeInvalidEarlyAccess("_kVersionStampBuildInfoStr");
|
|
|
|
var _kVersionStampBuildLabelStr = Module["_kVersionStampBuildLabelStr"] = makeInvalidEarlyAccess("_kVersionStampBuildLabelStr");
|
|
|
|
var _kVersionStampBuildTargetStr = Module["_kVersionStampBuildTargetStr"] = makeInvalidEarlyAccess("_kVersionStampBuildTargetStr");
|
|
|
|
var _kVersionStampBuildTimestampStr = Module["_kVersionStampBuildTimestampStr"] = makeInvalidEarlyAccess("_kVersionStampBuildTimestampStr");
|
|
|
|
var _kVersionStampBuildToolStr = Module["_kVersionStampBuildToolStr"] = makeInvalidEarlyAccess("_kVersionStampBuildToolStr");
|
|
|
|
var _kVersionStampG3BuildTargetStr = Module["_kVersionStampG3BuildTargetStr"] = makeInvalidEarlyAccess("_kVersionStampG3BuildTargetStr");
|
|
|
|
var _kVersionStampVerifiableStr = Module["_kVersionStampVerifiableStr"] = makeInvalidEarlyAccess("_kVersionStampVerifiableStr");
|
|
|
|
var _kVersionStampBuildFdoTypeStr = Module["_kVersionStampBuildFdoTypeStr"] = makeInvalidEarlyAccess("_kVersionStampBuildFdoTypeStr");
|
|
|
|
var _kVersionStampBuildBaselineChangelistStr = Module["_kVersionStampBuildBaselineChangelistStr"] = makeInvalidEarlyAccess("_kVersionStampBuildBaselineChangelistStr");
|
|
|
|
var _kVersionStampBuildLtoTypeStr = Module["_kVersionStampBuildLtoTypeStr"] = makeInvalidEarlyAccess("_kVersionStampBuildLtoTypeStr");
|
|
|
|
var _kVersionStampBuildPropellerTypeStr = Module["_kVersionStampBuildPropellerTypeStr"] = makeInvalidEarlyAccess("_kVersionStampBuildPropellerTypeStr");
|
|
|
|
var _kVersionStampBuildPghoTypeStr = Module["_kVersionStampBuildPghoTypeStr"] = makeInvalidEarlyAccess("_kVersionStampBuildPghoTypeStr");
|
|
|
|
var _kVersionStampBuildUsernameStr = Module["_kVersionStampBuildUsernameStr"] = makeInvalidEarlyAccess("_kVersionStampBuildUsernameStr");
|
|
|
|
var _kVersionStampBuildHostnameStr = Module["_kVersionStampBuildHostnameStr"] = makeInvalidEarlyAccess("_kVersionStampBuildHostnameStr");
|
|
|
|
var _kVersionStampBuildDirectoryStr = Module["_kVersionStampBuildDirectoryStr"] = makeInvalidEarlyAccess("_kVersionStampBuildDirectoryStr");
|
|
|
|
var _kVersionStampBuildChangelistInt = Module["_kVersionStampBuildChangelistInt"] = makeInvalidEarlyAccess("_kVersionStampBuildChangelistInt");
|
|
|
|
var _kVersionStampCitcSnapshotInt = Module["_kVersionStampCitcSnapshotInt"] = makeInvalidEarlyAccess("_kVersionStampCitcSnapshotInt");
|
|
|
|
var _kVersionStampBuildClientMintStatusInt = Module["_kVersionStampBuildClientMintStatusInt"] = makeInvalidEarlyAccess("_kVersionStampBuildClientMintStatusInt");
|
|
|
|
var _kVersionStampBuildTimestampInt = Module["_kVersionStampBuildTimestampInt"] = makeInvalidEarlyAccess("_kVersionStampBuildTimestampInt");
|
|
|
|
var _kVersionStampVerifiableInt = Module["_kVersionStampVerifiableInt"] = makeInvalidEarlyAccess("_kVersionStampVerifiableInt");
|
|
|
|
var _kVersionStampBuildCoverageEnabledInt = Module["_kVersionStampBuildCoverageEnabledInt"] = makeInvalidEarlyAccess("_kVersionStampBuildCoverageEnabledInt");
|
|
|
|
var _kVersionStampBuildBaselineChangelistInt = Module["_kVersionStampBuildBaselineChangelistInt"] = makeInvalidEarlyAccess("_kVersionStampBuildBaselineChangelistInt");
|
|
|
|
var _kVersionStampPrecookedTimestampStr = Module["_kVersionStampPrecookedTimestampStr"] = makeInvalidEarlyAccess("_kVersionStampPrecookedTimestampStr");
|
|
|
|
var _kVersionStampPrecookedClientInfoStr = Module["_kVersionStampPrecookedClientInfoStr"] = makeInvalidEarlyAccess("_kVersionStampPrecookedClientInfoStr");
|
|
|
|
var __indirect_function_table = makeInvalidEarlyAccess("__indirect_function_table");
|
|
|
|
var wasmTable = makeInvalidEarlyAccess("wasmTable");
|
|
|
|
function assignWasmExports(wasmExports) {
|
|
assert(typeof wasmExports["__main_argc_argv"] != "undefined", "missing Wasm export: __main_argc_argv");
|
|
assert(typeof wasmExports["GoogleTtsInit"] != "undefined", "missing Wasm export: GoogleTtsInit");
|
|
assert(typeof wasmExports["GoogleTtsShutdown"] != "undefined", "missing Wasm export: GoogleTtsShutdown");
|
|
assert(typeof wasmExports["GoogleTtsInstallVoice"] != "undefined", "missing Wasm export: GoogleTtsInstallVoice");
|
|
assert(typeof wasmExports["GoogleTtsInitBuffered"] != "undefined", "missing Wasm export: GoogleTtsInitBuffered");
|
|
assert(typeof wasmExports["GoogleTtsReadBuffered"] != "undefined", "missing Wasm export: GoogleTtsReadBuffered");
|
|
assert(typeof wasmExports["GoogleTtsFinalizeBuffered"] != "undefined", "missing Wasm export: GoogleTtsFinalizeBuffered");
|
|
assert(typeof wasmExports["GoogleTtsGetTimepointsCount"] != "undefined", "missing Wasm export: GoogleTtsGetTimepointsCount");
|
|
assert(typeof wasmExports["GoogleTtsGetTimepointsTimeInSecsAtIndex"] != "undefined", "missing Wasm export: GoogleTtsGetTimepointsTimeInSecsAtIndex");
|
|
assert(typeof wasmExports["GoogleTtsGetTimepointsCharIndexAtIndex"] != "undefined", "missing Wasm export: GoogleTtsGetTimepointsCharIndexAtIndex");
|
|
assert(typeof wasmExports["GoogleTtsGetTimepointsCharLengthAtIndex"] != "undefined", "missing Wasm export: GoogleTtsGetTimepointsCharLengthAtIndex");
|
|
assert(typeof wasmExports["GoogleTtsGetEventBufferPtr"] != "undefined", "missing Wasm export: GoogleTtsGetEventBufferPtr");
|
|
assert(typeof wasmExports["GoogleTtsGetEventBufferLen"] != "undefined", "missing Wasm export: GoogleTtsGetEventBufferLen");
|
|
assert(typeof wasmExports["malloc"] != "undefined", "missing Wasm export: malloc");
|
|
assert(typeof wasmExports["free"] != "undefined", "missing Wasm export: free");
|
|
assert(typeof wasmExports["fflush"] != "undefined", "missing Wasm export: fflush");
|
|
assert(typeof wasmExports["strerror"] != "undefined", "missing Wasm export: strerror");
|
|
assert(typeof wasmExports["pthread_self"] != "undefined", "missing Wasm export: pthread_self");
|
|
assert(typeof wasmExports["__getTypeName"] != "undefined", "missing Wasm export: __getTypeName");
|
|
assert(typeof wasmExports["_embind_initialize_bindings"] != "undefined", "missing Wasm export: _embind_initialize_bindings");
|
|
assert(typeof wasmExports["_emscripten_tls_init"] != "undefined", "missing Wasm export: _emscripten_tls_init");
|
|
assert(typeof wasmExports["emscripten_builtin_memalign"] != "undefined", "missing Wasm export: emscripten_builtin_memalign");
|
|
assert(typeof wasmExports["emscripten_stack_get_end"] != "undefined", "missing Wasm export: emscripten_stack_get_end");
|
|
assert(typeof wasmExports["emscripten_stack_get_base"] != "undefined", "missing Wasm export: emscripten_stack_get_base");
|
|
assert(typeof wasmExports["_emscripten_thread_init"] != "undefined", "missing Wasm export: _emscripten_thread_init");
|
|
assert(typeof wasmExports["_emscripten_thread_crashed"] != "undefined", "missing Wasm export: _emscripten_thread_crashed");
|
|
assert(typeof wasmExports["_emscripten_run_js_on_main_thread"] != "undefined", "missing Wasm export: _emscripten_run_js_on_main_thread");
|
|
assert(typeof wasmExports["_emscripten_thread_free_data"] != "undefined", "missing Wasm export: _emscripten_thread_free_data");
|
|
assert(typeof wasmExports["_emscripten_thread_exit"] != "undefined", "missing Wasm export: _emscripten_thread_exit");
|
|
assert(typeof wasmExports["_emscripten_check_mailbox"] != "undefined", "missing Wasm export: _emscripten_check_mailbox");
|
|
assert(typeof wasmExports["_emscripten_tempret_set"] != "undefined", "missing Wasm export: _emscripten_tempret_set");
|
|
assert(typeof wasmExports["emscripten_stack_init"] != "undefined", "missing Wasm export: emscripten_stack_init");
|
|
assert(typeof wasmExports["emscripten_stack_set_limits"] != "undefined", "missing Wasm export: emscripten_stack_set_limits");
|
|
assert(typeof wasmExports["emscripten_stack_get_free"] != "undefined", "missing Wasm export: emscripten_stack_get_free");
|
|
assert(typeof wasmExports["_emscripten_stack_restore"] != "undefined", "missing Wasm export: _emscripten_stack_restore");
|
|
assert(typeof wasmExports["_emscripten_stack_alloc"] != "undefined", "missing Wasm export: _emscripten_stack_alloc");
|
|
assert(typeof wasmExports["emscripten_stack_get_current"] != "undefined", "missing Wasm export: emscripten_stack_get_current");
|
|
assert(typeof wasmExports["__cxa_increment_exception_refcount"] != "undefined", "missing Wasm export: __cxa_increment_exception_refcount");
|
|
assert(typeof wasmExports["__cxa_get_exception_ptr"] != "undefined", "missing Wasm export: __cxa_get_exception_ptr");
|
|
assert(typeof wasmExports["dynCall_iiiijij"] != "undefined", "missing Wasm export: dynCall_iiiijij");
|
|
assert(typeof wasmExports["dynCall_jiji"] != "undefined", "missing Wasm export: dynCall_jiji");
|
|
assert(typeof wasmExports["dynCall_vijj"] != "undefined", "missing Wasm export: dynCall_vijj");
|
|
assert(typeof wasmExports["dynCall_ji"] != "undefined", "missing Wasm export: dynCall_ji");
|
|
assert(typeof wasmExports["dynCall_jij"] != "undefined", "missing Wasm export: dynCall_jij");
|
|
assert(typeof wasmExports["dynCall_viiiijii"] != "undefined", "missing Wasm export: dynCall_viiiijii");
|
|
assert(typeof wasmExports["dynCall_jiiii"] != "undefined", "missing Wasm export: dynCall_jiiii");
|
|
assert(typeof wasmExports["dynCall_jiii"] != "undefined", "missing Wasm export: dynCall_jiii");
|
|
assert(typeof wasmExports["dynCall_viij"] != "undefined", "missing Wasm export: dynCall_viij");
|
|
assert(typeof wasmExports["dynCall_viijii"] != "undefined", "missing Wasm export: dynCall_viijii");
|
|
assert(typeof wasmExports["dynCall_jii"] != "undefined", "missing Wasm export: dynCall_jii");
|
|
assert(typeof wasmExports["dynCall_jiij"] != "undefined", "missing Wasm export: dynCall_jiij");
|
|
assert(typeof wasmExports["dynCall_vij"] != "undefined", "missing Wasm export: dynCall_vij");
|
|
assert(typeof wasmExports["dynCall_iij"] != "undefined", "missing Wasm export: dynCall_iij");
|
|
assert(typeof wasmExports["dynCall_jjj"] != "undefined", "missing Wasm export: dynCall_jjj");
|
|
assert(typeof wasmExports["dynCall_iiiijj"] != "undefined", "missing Wasm export: dynCall_iiiijj");
|
|
assert(typeof wasmExports["dynCall_viijj"] != "undefined", "missing Wasm export: dynCall_viijj");
|
|
assert(typeof wasmExports["dynCall_viiijjj"] != "undefined", "missing Wasm export: dynCall_viiijjj");
|
|
assert(typeof wasmExports["dynCall_iiij"] != "undefined", "missing Wasm export: dynCall_iiij");
|
|
assert(typeof wasmExports["dynCall_jiijj"] != "undefined", "missing Wasm export: dynCall_jiijj");
|
|
assert(typeof wasmExports["dynCall_viji"] != "undefined", "missing Wasm export: dynCall_viji");
|
|
assert(typeof wasmExports["dynCall_iiji"] != "undefined", "missing Wasm export: dynCall_iiji");
|
|
assert(typeof wasmExports["dynCall_iijjiii"] != "undefined", "missing Wasm export: dynCall_iijjiii");
|
|
assert(typeof wasmExports["dynCall_vijjjii"] != "undefined", "missing Wasm export: dynCall_vijjjii");
|
|
assert(typeof wasmExports["dynCall_vijjj"] != "undefined", "missing Wasm export: dynCall_vijjj");
|
|
assert(typeof wasmExports["dynCall_vj"] != "undefined", "missing Wasm export: dynCall_vj");
|
|
assert(typeof wasmExports["dynCall_iijjiiii"] != "undefined", "missing Wasm export: dynCall_iijjiiii");
|
|
assert(typeof wasmExports["dynCall_iiiiij"] != "undefined", "missing Wasm export: dynCall_iiiiij");
|
|
assert(typeof wasmExports["dynCall_iiiiijj"] != "undefined", "missing Wasm export: dynCall_iiiiijj");
|
|
assert(typeof wasmExports["dynCall_iiiiiijj"] != "undefined", "missing Wasm export: dynCall_iiiiiijj");
|
|
assert(typeof wasmExports["kVersionStampBuildChangelistStr"] != "undefined", "missing Wasm export: kVersionStampBuildChangelistStr");
|
|
assert(typeof wasmExports["kVersionStampCitcSnapshotStr"] != "undefined", "missing Wasm export: kVersionStampCitcSnapshotStr");
|
|
assert(typeof wasmExports["kVersionStampCitcWorkspaceIdStr"] != "undefined", "missing Wasm export: kVersionStampCitcWorkspaceIdStr");
|
|
assert(typeof wasmExports["kVersionStampSourceUriStr"] != "undefined", "missing Wasm export: kVersionStampSourceUriStr");
|
|
assert(typeof wasmExports["kVersionStampBuildClientStr"] != "undefined", "missing Wasm export: kVersionStampBuildClientStr");
|
|
assert(typeof wasmExports["kVersionStampBuildClientMintStatusStr"] != "undefined", "missing Wasm export: kVersionStampBuildClientMintStatusStr");
|
|
assert(typeof wasmExports["kVersionStampBuildCompilerStr"] != "undefined", "missing Wasm export: kVersionStampBuildCompilerStr");
|
|
assert(typeof wasmExports["kVersionStampBuildDateTimePstStr"] != "undefined", "missing Wasm export: kVersionStampBuildDateTimePstStr");
|
|
assert(typeof wasmExports["kVersionStampBuildDepotPathStr"] != "undefined", "missing Wasm export: kVersionStampBuildDepotPathStr");
|
|
assert(typeof wasmExports["kVersionStampBuildIdStr"] != "undefined", "missing Wasm export: kVersionStampBuildIdStr");
|
|
assert(typeof wasmExports["kVersionStampBuildInfoStr"] != "undefined", "missing Wasm export: kVersionStampBuildInfoStr");
|
|
assert(typeof wasmExports["kVersionStampBuildLabelStr"] != "undefined", "missing Wasm export: kVersionStampBuildLabelStr");
|
|
assert(typeof wasmExports["kVersionStampBuildTargetStr"] != "undefined", "missing Wasm export: kVersionStampBuildTargetStr");
|
|
assert(typeof wasmExports["kVersionStampBuildTimestampStr"] != "undefined", "missing Wasm export: kVersionStampBuildTimestampStr");
|
|
assert(typeof wasmExports["kVersionStampBuildToolStr"] != "undefined", "missing Wasm export: kVersionStampBuildToolStr");
|
|
assert(typeof wasmExports["kVersionStampG3BuildTargetStr"] != "undefined", "missing Wasm export: kVersionStampG3BuildTargetStr");
|
|
assert(typeof wasmExports["kVersionStampVerifiableStr"] != "undefined", "missing Wasm export: kVersionStampVerifiableStr");
|
|
assert(typeof wasmExports["kVersionStampBuildFdoTypeStr"] != "undefined", "missing Wasm export: kVersionStampBuildFdoTypeStr");
|
|
assert(typeof wasmExports["kVersionStampBuildBaselineChangelistStr"] != "undefined", "missing Wasm export: kVersionStampBuildBaselineChangelistStr");
|
|
assert(typeof wasmExports["kVersionStampBuildLtoTypeStr"] != "undefined", "missing Wasm export: kVersionStampBuildLtoTypeStr");
|
|
assert(typeof wasmExports["kVersionStampBuildPropellerTypeStr"] != "undefined", "missing Wasm export: kVersionStampBuildPropellerTypeStr");
|
|
assert(typeof wasmExports["kVersionStampBuildPghoTypeStr"] != "undefined", "missing Wasm export: kVersionStampBuildPghoTypeStr");
|
|
assert(typeof wasmExports["kVersionStampBuildUsernameStr"] != "undefined", "missing Wasm export: kVersionStampBuildUsernameStr");
|
|
assert(typeof wasmExports["kVersionStampBuildHostnameStr"] != "undefined", "missing Wasm export: kVersionStampBuildHostnameStr");
|
|
assert(typeof wasmExports["kVersionStampBuildDirectoryStr"] != "undefined", "missing Wasm export: kVersionStampBuildDirectoryStr");
|
|
assert(typeof wasmExports["kVersionStampBuildChangelistInt"] != "undefined", "missing Wasm export: kVersionStampBuildChangelistInt");
|
|
assert(typeof wasmExports["kVersionStampCitcSnapshotInt"] != "undefined", "missing Wasm export: kVersionStampCitcSnapshotInt");
|
|
assert(typeof wasmExports["kVersionStampBuildClientMintStatusInt"] != "undefined", "missing Wasm export: kVersionStampBuildClientMintStatusInt");
|
|
assert(typeof wasmExports["kVersionStampBuildTimestampInt"] != "undefined", "missing Wasm export: kVersionStampBuildTimestampInt");
|
|
assert(typeof wasmExports["kVersionStampVerifiableInt"] != "undefined", "missing Wasm export: kVersionStampVerifiableInt");
|
|
assert(typeof wasmExports["kVersionStampBuildCoverageEnabledInt"] != "undefined", "missing Wasm export: kVersionStampBuildCoverageEnabledInt");
|
|
assert(typeof wasmExports["kVersionStampBuildBaselineChangelistInt"] != "undefined", "missing Wasm export: kVersionStampBuildBaselineChangelistInt");
|
|
assert(typeof wasmExports["kVersionStampPrecookedTimestampStr"] != "undefined", "missing Wasm export: kVersionStampPrecookedTimestampStr");
|
|
assert(typeof wasmExports["kVersionStampPrecookedClientInfoStr"] != "undefined", "missing Wasm export: kVersionStampPrecookedClientInfoStr");
|
|
assert(typeof wasmExports["__indirect_function_table"] != "undefined", "missing Wasm export: __indirect_function_table");
|
|
_main = createExportWrapper("__main_argc_argv", 2);
|
|
_GoogleTtsInit = Module["_GoogleTtsInit"] = createExportWrapper("GoogleTtsInit", 2);
|
|
_GoogleTtsShutdown = Module["_GoogleTtsShutdown"] = createExportWrapper("GoogleTtsShutdown", 0);
|
|
_GoogleTtsInstallVoice = Module["_GoogleTtsInstallVoice"] = createExportWrapper("GoogleTtsInstallVoice", 3);
|
|
_GoogleTtsInitBuffered = Module["_GoogleTtsInitBuffered"] = createExportWrapper("GoogleTtsInitBuffered", 4);
|
|
_GoogleTtsReadBuffered = Module["_GoogleTtsReadBuffered"] = createExportWrapper("GoogleTtsReadBuffered", 0);
|
|
_GoogleTtsFinalizeBuffered = Module["_GoogleTtsFinalizeBuffered"] = createExportWrapper("GoogleTtsFinalizeBuffered", 0);
|
|
_GoogleTtsGetTimepointsCount = Module["_GoogleTtsGetTimepointsCount"] = createExportWrapper("GoogleTtsGetTimepointsCount", 0);
|
|
_GoogleTtsGetTimepointsTimeInSecsAtIndex = Module["_GoogleTtsGetTimepointsTimeInSecsAtIndex"] = createExportWrapper("GoogleTtsGetTimepointsTimeInSecsAtIndex", 1);
|
|
_GoogleTtsGetTimepointsCharIndexAtIndex = Module["_GoogleTtsGetTimepointsCharIndexAtIndex"] = createExportWrapper("GoogleTtsGetTimepointsCharIndexAtIndex", 1);
|
|
_GoogleTtsGetTimepointsCharLengthAtIndex = Module["_GoogleTtsGetTimepointsCharLengthAtIndex"] = createExportWrapper("GoogleTtsGetTimepointsCharLengthAtIndex", 1);
|
|
_GoogleTtsGetEventBufferPtr = Module["_GoogleTtsGetEventBufferPtr"] = createExportWrapper("GoogleTtsGetEventBufferPtr", 0);
|
|
_GoogleTtsGetEventBufferLen = Module["_GoogleTtsGetEventBufferLen"] = createExportWrapper("GoogleTtsGetEventBufferLen", 0);
|
|
_malloc = Module["_malloc"] = createExportWrapper("malloc", 1);
|
|
_free = Module["_free"] = createExportWrapper("free", 1);
|
|
_fflush = createExportWrapper("fflush", 1);
|
|
_strerror = createExportWrapper("strerror", 1);
|
|
_pthread_self = createExportWrapper("pthread_self", 0);
|
|
___getTypeName = createExportWrapper("__getTypeName", 1);
|
|
__embind_initialize_bindings = createExportWrapper("_embind_initialize_bindings", 0);
|
|
__emscripten_tls_init = createExportWrapper("_emscripten_tls_init", 0);
|
|
_emscripten_builtin_memalign = createExportWrapper("emscripten_builtin_memalign", 2);
|
|
_emscripten_stack_get_end = wasmExports["emscripten_stack_get_end"];
|
|
_emscripten_stack_get_base = wasmExports["emscripten_stack_get_base"];
|
|
__emscripten_thread_init = createExportWrapper("_emscripten_thread_init", 6);
|
|
__emscripten_thread_crashed = createExportWrapper("_emscripten_thread_crashed", 0);
|
|
__emscripten_run_js_on_main_thread = createExportWrapper("_emscripten_run_js_on_main_thread", 5);
|
|
__emscripten_thread_free_data = createExportWrapper("_emscripten_thread_free_data", 1);
|
|
__emscripten_thread_exit = createExportWrapper("_emscripten_thread_exit", 1);
|
|
__emscripten_check_mailbox = createExportWrapper("_emscripten_check_mailbox", 0);
|
|
__emscripten_tempret_set = createExportWrapper("_emscripten_tempret_set", 1);
|
|
_emscripten_stack_init = wasmExports["emscripten_stack_init"];
|
|
_emscripten_stack_set_limits = wasmExports["emscripten_stack_set_limits"];
|
|
_emscripten_stack_get_free = wasmExports["emscripten_stack_get_free"];
|
|
__emscripten_stack_restore = wasmExports["_emscripten_stack_restore"];
|
|
__emscripten_stack_alloc = wasmExports["_emscripten_stack_alloc"];
|
|
_emscripten_stack_get_current = wasmExports["emscripten_stack_get_current"];
|
|
___cxa_increment_exception_refcount = createExportWrapper("__cxa_increment_exception_refcount", 1);
|
|
___cxa_get_exception_ptr = createExportWrapper("__cxa_get_exception_ptr", 1);
|
|
dynCall_iiiijij = createExportWrapper("dynCall_iiiijij", 9);
|
|
dynCall_jiji = createExportWrapper("dynCall_jiji", 5);
|
|
dynCall_vijj = createExportWrapper("dynCall_vijj", 6);
|
|
dynCall_ji = createExportWrapper("dynCall_ji", 2);
|
|
dynCall_jij = createExportWrapper("dynCall_jij", 4);
|
|
dynCall_viiiijii = createExportWrapper("dynCall_viiiijii", 9);
|
|
dynCall_jiiii = createExportWrapper("dynCall_jiiii", 5);
|
|
dynCall_jiii = createExportWrapper("dynCall_jiii", 4);
|
|
dynCall_viij = createExportWrapper("dynCall_viij", 5);
|
|
dynCall_viijii = createExportWrapper("dynCall_viijii", 7);
|
|
dynCall_jii = createExportWrapper("dynCall_jii", 3);
|
|
dynCall_jiij = createExportWrapper("dynCall_jiij", 5);
|
|
dynCall_vij = createExportWrapper("dynCall_vij", 4);
|
|
dynCall_iij = createExportWrapper("dynCall_iij", 4);
|
|
dynCall_jjj = createExportWrapper("dynCall_jjj", 5);
|
|
dynCall_iiiijj = createExportWrapper("dynCall_iiiijj", 8);
|
|
dynCall_viijj = createExportWrapper("dynCall_viijj", 7);
|
|
dynCall_viiijjj = createExportWrapper("dynCall_viiijjj", 10);
|
|
dynCall_iiij = createExportWrapper("dynCall_iiij", 5);
|
|
dynCall_jiijj = createExportWrapper("dynCall_jiijj", 7);
|
|
dynCall_viji = createExportWrapper("dynCall_viji", 5);
|
|
dynCall_iiji = createExportWrapper("dynCall_iiji", 5);
|
|
dynCall_iijjiii = createExportWrapper("dynCall_iijjiii", 9);
|
|
dynCall_vijjjii = createExportWrapper("dynCall_vijjjii", 10);
|
|
dynCall_vijjj = createExportWrapper("dynCall_vijjj", 8);
|
|
dynCall_vj = createExportWrapper("dynCall_vj", 3);
|
|
dynCall_iijjiiii = createExportWrapper("dynCall_iijjiiii", 10);
|
|
dynCall_iiiiij = createExportWrapper("dynCall_iiiiij", 7);
|
|
dynCall_iiiiijj = createExportWrapper("dynCall_iiiiijj", 9);
|
|
dynCall_iiiiiijj = createExportWrapper("dynCall_iiiiiijj", 10);
|
|
_kVersionStampBuildChangelistStr = Module["_kVersionStampBuildChangelistStr"] = wasmExports["kVersionStampBuildChangelistStr"].value;
|
|
_kVersionStampCitcSnapshotStr = Module["_kVersionStampCitcSnapshotStr"] = wasmExports["kVersionStampCitcSnapshotStr"].value;
|
|
_kVersionStampCitcWorkspaceIdStr = Module["_kVersionStampCitcWorkspaceIdStr"] = wasmExports["kVersionStampCitcWorkspaceIdStr"].value;
|
|
_kVersionStampSourceUriStr = Module["_kVersionStampSourceUriStr"] = wasmExports["kVersionStampSourceUriStr"].value;
|
|
_kVersionStampBuildClientStr = Module["_kVersionStampBuildClientStr"] = wasmExports["kVersionStampBuildClientStr"].value;
|
|
_kVersionStampBuildClientMintStatusStr = Module["_kVersionStampBuildClientMintStatusStr"] = wasmExports["kVersionStampBuildClientMintStatusStr"].value;
|
|
_kVersionStampBuildCompilerStr = Module["_kVersionStampBuildCompilerStr"] = wasmExports["kVersionStampBuildCompilerStr"].value;
|
|
_kVersionStampBuildDateTimePstStr = Module["_kVersionStampBuildDateTimePstStr"] = wasmExports["kVersionStampBuildDateTimePstStr"].value;
|
|
_kVersionStampBuildDepotPathStr = Module["_kVersionStampBuildDepotPathStr"] = wasmExports["kVersionStampBuildDepotPathStr"].value;
|
|
_kVersionStampBuildIdStr = Module["_kVersionStampBuildIdStr"] = wasmExports["kVersionStampBuildIdStr"].value;
|
|
_kVersionStampBuildInfoStr = Module["_kVersionStampBuildInfoStr"] = wasmExports["kVersionStampBuildInfoStr"].value;
|
|
_kVersionStampBuildLabelStr = Module["_kVersionStampBuildLabelStr"] = wasmExports["kVersionStampBuildLabelStr"].value;
|
|
_kVersionStampBuildTargetStr = Module["_kVersionStampBuildTargetStr"] = wasmExports["kVersionStampBuildTargetStr"].value;
|
|
_kVersionStampBuildTimestampStr = Module["_kVersionStampBuildTimestampStr"] = wasmExports["kVersionStampBuildTimestampStr"].value;
|
|
_kVersionStampBuildToolStr = Module["_kVersionStampBuildToolStr"] = wasmExports["kVersionStampBuildToolStr"].value;
|
|
_kVersionStampG3BuildTargetStr = Module["_kVersionStampG3BuildTargetStr"] = wasmExports["kVersionStampG3BuildTargetStr"].value;
|
|
_kVersionStampVerifiableStr = Module["_kVersionStampVerifiableStr"] = wasmExports["kVersionStampVerifiableStr"].value;
|
|
_kVersionStampBuildFdoTypeStr = Module["_kVersionStampBuildFdoTypeStr"] = wasmExports["kVersionStampBuildFdoTypeStr"].value;
|
|
_kVersionStampBuildBaselineChangelistStr = Module["_kVersionStampBuildBaselineChangelistStr"] = wasmExports["kVersionStampBuildBaselineChangelistStr"].value;
|
|
_kVersionStampBuildLtoTypeStr = Module["_kVersionStampBuildLtoTypeStr"] = wasmExports["kVersionStampBuildLtoTypeStr"].value;
|
|
_kVersionStampBuildPropellerTypeStr = Module["_kVersionStampBuildPropellerTypeStr"] = wasmExports["kVersionStampBuildPropellerTypeStr"].value;
|
|
_kVersionStampBuildPghoTypeStr = Module["_kVersionStampBuildPghoTypeStr"] = wasmExports["kVersionStampBuildPghoTypeStr"].value;
|
|
_kVersionStampBuildUsernameStr = Module["_kVersionStampBuildUsernameStr"] = wasmExports["kVersionStampBuildUsernameStr"].value;
|
|
_kVersionStampBuildHostnameStr = Module["_kVersionStampBuildHostnameStr"] = wasmExports["kVersionStampBuildHostnameStr"].value;
|
|
_kVersionStampBuildDirectoryStr = Module["_kVersionStampBuildDirectoryStr"] = wasmExports["kVersionStampBuildDirectoryStr"].value;
|
|
_kVersionStampBuildChangelistInt = Module["_kVersionStampBuildChangelistInt"] = wasmExports["kVersionStampBuildChangelistInt"].value;
|
|
_kVersionStampCitcSnapshotInt = Module["_kVersionStampCitcSnapshotInt"] = wasmExports["kVersionStampCitcSnapshotInt"].value;
|
|
_kVersionStampBuildClientMintStatusInt = Module["_kVersionStampBuildClientMintStatusInt"] = wasmExports["kVersionStampBuildClientMintStatusInt"].value;
|
|
_kVersionStampBuildTimestampInt = Module["_kVersionStampBuildTimestampInt"] = wasmExports["kVersionStampBuildTimestampInt"].value;
|
|
_kVersionStampVerifiableInt = Module["_kVersionStampVerifiableInt"] = wasmExports["kVersionStampVerifiableInt"].value;
|
|
_kVersionStampBuildCoverageEnabledInt = Module["_kVersionStampBuildCoverageEnabledInt"] = wasmExports["kVersionStampBuildCoverageEnabledInt"].value;
|
|
_kVersionStampBuildBaselineChangelistInt = Module["_kVersionStampBuildBaselineChangelistInt"] = wasmExports["kVersionStampBuildBaselineChangelistInt"].value;
|
|
_kVersionStampPrecookedTimestampStr = Module["_kVersionStampPrecookedTimestampStr"] = wasmExports["kVersionStampPrecookedTimestampStr"].value;
|
|
_kVersionStampPrecookedClientInfoStr = Module["_kVersionStampPrecookedClientInfoStr"] = wasmExports["kVersionStampPrecookedClientInfoStr"].value;
|
|
__indirect_function_table = wasmTable = wasmExports["__indirect_function_table"];
|
|
}
|
|
|
|
var wasmImports;
|
|
|
|
function assignWasmImports() {
|
|
wasmImports = {
|
|
/** @export */ EnsureDir,
|
|
/** @export */ __assert_fail: ___assert_fail,
|
|
/** @export */ __cxa_throw: ___cxa_throw,
|
|
/** @export */ __pthread_create_js: ___pthread_create_js,
|
|
/** @export */ __syscall_dup: ___syscall_dup,
|
|
/** @export */ __syscall_faccessat: ___syscall_faccessat,
|
|
/** @export */ __syscall_fcntl64: ___syscall_fcntl64,
|
|
/** @export */ __syscall_fstat64: ___syscall_fstat64,
|
|
/** @export */ __syscall_ftruncate64: ___syscall_ftruncate64,
|
|
/** @export */ __syscall_getdents64: ___syscall_getdents64,
|
|
/** @export */ __syscall_ioctl: ___syscall_ioctl,
|
|
/** @export */ __syscall_lstat64: ___syscall_lstat64,
|
|
/** @export */ __syscall_newfstatat: ___syscall_newfstatat,
|
|
/** @export */ __syscall_openat: ___syscall_openat,
|
|
/** @export */ __syscall_stat64: ___syscall_stat64,
|
|
/** @export */ _abort_js: __abort_js,
|
|
/** @export */ _embind_register_bigint: __embind_register_bigint,
|
|
/** @export */ _embind_register_bool: __embind_register_bool,
|
|
/** @export */ _embind_register_emval: __embind_register_emval,
|
|
/** @export */ _embind_register_float: __embind_register_float,
|
|
/** @export */ _embind_register_integer: __embind_register_integer,
|
|
/** @export */ _embind_register_memory_view: __embind_register_memory_view,
|
|
/** @export */ _embind_register_std_string: __embind_register_std_string,
|
|
/** @export */ _embind_register_std_wstring: __embind_register_std_wstring,
|
|
/** @export */ _embind_register_void: __embind_register_void,
|
|
/** @export */ _emscripten_init_main_thread_js: __emscripten_init_main_thread_js,
|
|
/** @export */ _emscripten_notify_mailbox_postmessage: __emscripten_notify_mailbox_postmessage,
|
|
/** @export */ _emscripten_receive_on_main_thread_js: __emscripten_receive_on_main_thread_js,
|
|
/** @export */ _emscripten_thread_cleanup: __emscripten_thread_cleanup,
|
|
/** @export */ _emscripten_thread_mailbox_await: __emscripten_thread_mailbox_await,
|
|
/** @export */ _emscripten_thread_set_strongref: __emscripten_thread_set_strongref,
|
|
/** @export */ _mmap_js: __mmap_js,
|
|
/** @export */ _munmap_js: __munmap_js,
|
|
/** @export */ _tzset_js: __tzset_js,
|
|
/** @export */ clock_time_get: _clock_time_get,
|
|
/** @export */ emscripten_check_blocking_allowed: _emscripten_check_blocking_allowed,
|
|
/** @export */ emscripten_errn: _emscripten_errn,
|
|
/** @export */ emscripten_exit_with_live_runtime: _emscripten_exit_with_live_runtime,
|
|
/** @export */ emscripten_get_heap_max: _emscripten_get_heap_max,
|
|
/** @export */ emscripten_get_now: _emscripten_get_now,
|
|
/** @export */ emscripten_num_logical_cores: _emscripten_num_logical_cores,
|
|
/** @export */ emscripten_pc_get_function: _emscripten_pc_get_function,
|
|
/** @export */ emscripten_resize_heap: _emscripten_resize_heap,
|
|
/** @export */ emscripten_stack_snapshot: _emscripten_stack_snapshot,
|
|
/** @export */ emscripten_stack_unwind_buffer: _emscripten_stack_unwind_buffer,
|
|
/** @export */ environ_get: _environ_get,
|
|
/** @export */ environ_sizes_get: _environ_sizes_get,
|
|
/** @export */ exit: _exit,
|
|
/** @export */ fd_close: _fd_close,
|
|
/** @export */ fd_read: _fd_read,
|
|
/** @export */ fd_seek: _fd_seek,
|
|
/** @export */ fd_write: _fd_write,
|
|
/** @export */ hardware_concurrency,
|
|
/** @export */ memory: wasmMemory,
|
|
/** @export */ proc_exit: _proc_exit,
|
|
/** @export */ random_get: _random_get
|
|
};
|
|
}
|
|
|
|
// include: postamble.js
|
|
// === Auto-generated postamble setup entry stuff ===
|
|
var calledRun;
|
|
|
|
function stackCheckInit() {
|
|
// This is normally called automatically during __wasm_call_ctors but need to
|
|
// get these values before even running any of the ctors so we call it redundantly
|
|
// here.
|
|
// See $establishStackSpace for the equivalent code that runs on a thread
|
|
assert(!ENVIRONMENT_IS_PTHREAD);
|
|
_emscripten_stack_init();
|
|
// TODO(sbc): Move writeStackCookie to native to to avoid this.
|
|
writeStackCookie();
|
|
}
|
|
|
|
function run(args = arguments_) {
|
|
if (runDependencies > 0) {
|
|
dependenciesFulfilled = run;
|
|
return;
|
|
}
|
|
if ((ENVIRONMENT_IS_PTHREAD)) {
|
|
readyPromiseResolve?.(Module);
|
|
initRuntime();
|
|
return;
|
|
}
|
|
stackCheckInit();
|
|
preRun();
|
|
// a preRun added a dependency, run will be called later
|
|
if (runDependencies > 0) {
|
|
dependenciesFulfilled = run;
|
|
return;
|
|
}
|
|
function doRun() {
|
|
// run may have just been called through dependencies being fulfilled just in this very frame,
|
|
// or while the async setStatus time below was happening
|
|
assert(!calledRun);
|
|
calledRun = true;
|
|
Module["calledRun"] = true;
|
|
if (ABORT) return;
|
|
initRuntime();
|
|
readyPromiseResolve?.(Module);
|
|
Module["onRuntimeInitialized"]?.();
|
|
consumedModuleProp("onRuntimeInitialized");
|
|
assert(!Module["_main"], 'compiled without a main, but one is present. if you added it from JS, use Module["onRuntimeInitialized"]');
|
|
postRun();
|
|
}
|
|
if (Module["setStatus"]) {
|
|
Module["setStatus"]("Running...");
|
|
setTimeout(() => {
|
|
setTimeout(() => Module["setStatus"](""), 1);
|
|
doRun();
|
|
}, 1);
|
|
} else {
|
|
doRun();
|
|
}
|
|
checkStackCookie();
|
|
}
|
|
|
|
function checkUnflushedContent() {
|
|
// Compiler settings do not allow exiting the runtime, so flushing
|
|
// the streams is not possible. but in ASSERTIONS mode we check
|
|
// if there was something to flush, and if so tell the user they
|
|
// should request that the runtime be exitable.
|
|
// Normally we would not even include flush() at all, but in ASSERTIONS
|
|
// builds we do so just for this check, and here we see if there is any
|
|
// content to flush, that is, we check if there would have been
|
|
// something a non-ASSERTIONS build would have not seen.
|
|
// How we flush the streams depends on whether we are in SYSCALLS_REQUIRE_FILESYSTEM=0
|
|
// mode (which has its own special function for this; otherwise, all
|
|
// the code is inside libc)
|
|
var oldOut = out;
|
|
var oldErr = err;
|
|
var has = false;
|
|
out = err = x => {
|
|
has = true;
|
|
};
|
|
try {
|
|
// it doesn't matter if it fails
|
|
_fflush(0);
|
|
// also flush in the JS FS layer
|
|
for (var name of [ "stdout", "stderr" ]) {
|
|
var info = FS.analyzePath("/dev/" + name);
|
|
if (!info) return;
|
|
var stream = info.object;
|
|
var rdev = stream.rdev;
|
|
var tty = TTY.ttys[rdev];
|
|
if (tty?.output?.length) {
|
|
has = true;
|
|
}
|
|
}
|
|
} catch (e) {}
|
|
out = oldOut;
|
|
err = oldErr;
|
|
if (has) {
|
|
warnOnce("stdio streams had content in them that was not flushed. you should set EXIT_RUNTIME to 1 (see the Emscripten FAQ), or make sure to emit a newline when you printf etc.");
|
|
}
|
|
}
|
|
|
|
var wasmExports;
|
|
|
|
if ((!(ENVIRONMENT_IS_PTHREAD))) {
|
|
// Call createWasm on startup if we are the main thread.
|
|
// Worker threads call this once they receive the module via postMessage
|
|
// In modularize mode the generated code is within a factory function so we
|
|
// can use await here (since it's not top-level-await).
|
|
wasmExports = await (createWasm());
|
|
run();
|
|
}
|
|
|
|
// end include: postamble.js
|
|
// include: postamble_modularize.js
|
|
// In MODULARIZE mode we wrap the generated code in a factory function
|
|
// and return either the Module itself, or a promise of the module.
|
|
// We assign to the `moduleRtn` global here and configure closure to see
|
|
// this as and extern so it won't get minified.
|
|
if (runtimeInitialized) {
|
|
moduleRtn = Module;
|
|
} else {
|
|
// Set up the promise that indicates the Module is initialized
|
|
moduleRtn = new Promise((resolve, reject) => {
|
|
readyPromiseResolve = resolve;
|
|
readyPromiseReject = reject;
|
|
});
|
|
}
|
|
|
|
// Assertion for attempting to access module properties on the incoming
|
|
// moduleArg. In the past we used this object as the prototype of the module
|
|
// and assigned properties to it, but now we return a distinct object. This
|
|
// keeps the instance private until it is ready (i.e the promise has been
|
|
// resolved).
|
|
for (const prop of Object.keys(Module)) {
|
|
if (!(prop in moduleArg)) {
|
|
Object.defineProperty(moduleArg, prop, {
|
|
configurable: true,
|
|
get() {
|
|
abort(`Access to module property ('${prop}') is no longer possible via the module constructor argument; Instead, use the result of the module constructor.`);
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
|
|
return moduleRtn;
|
|
};
|
|
})();
|
|
|
|
// Export using a UMD style export, or ES6 exports if selected
|
|
if (typeof exports === 'object' && typeof module === 'object') {
|
|
module.exports = loadWasmTtsBindings;
|
|
// This default export looks redundant, but it allows TS to import this
|
|
// commonjs style module.
|
|
module.exports.default = loadWasmTtsBindings;
|
|
} else if (typeof define === 'function' && define['amd'])
|
|
define([], () => loadWasmTtsBindings);
|
|
|
|
// Create code for detecting if we are running in a pthread.
|
|
// Normally this detection is done when the module is itself run but
|
|
// when running in MODULARIZE mode we need use this to know if we should
|
|
// run the module constructor on startup (true only for pthreads).
|
|
var isPthread = globalThis.self?.name?.startsWith('em-pthread');
|
|
// In order to support both web and node we also need to detect node here.
|
|
var isNode = globalThis.process?.versions?.node && globalThis.process?.type != 'renderer';
|
|
if (isNode) isPthread = require('worker_threads').workerData === 'em-pthread'
|
|
|
|
isPthread && loadWasmTtsBindings();
|
|
|