"use strict"; var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { if (k2 === undefined) k2 = k; var desc = Object.getOwnPropertyDescriptor(m, k); if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { desc = { enumerable: true, get: function() { return m[k]; } }; } Object.defineProperty(o, k2, desc); }) : (function(o, m, k, k2) { if (k2 === undefined) k2 = k; o[k2] = m[k]; })); var __exportStar = (this && this.__exportStar) || function(m, exports) { for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p); }; var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) { if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter"); if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it"); return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver); }; var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) { if (kind === "m") throw new TypeError("Private method is not writable"); if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter"); if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it"); return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value; }; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; var _BatchCluster_instances, _BatchCluster_tasksPerProc, _BatchCluster_logger, _BatchCluster_procs, _BatchCluster_onIdleRequested, _BatchCluster_nextSpawnTime, _BatchCluster_lastPidsCheckTime, _BatchCluster_tasks, _BatchCluster_onIdleInterval, _BatchCluster_startErrorRate, _BatchCluster_spawnedProcs, _BatchCluster_endPromise, _BatchCluster_internalErrorCount, _BatchCluster_childEndCounts, _BatchCluster_beforeExitListener, _BatchCluster_exitListener, _BatchCluster_onIdleLater, _BatchCluster_onIdle, _BatchCluster_maybeCheckPids, _BatchCluster_execNextTask, _BatchCluster_maxSpawnDelay, _BatchCluster_procsToSpawn, _BatchCluster_maybeSpawnProcs, _BatchCluster_spawnNewProc; Object.defineProperty(exports, "__esModule", { value: true }); exports.BatchCluster = exports.Task = exports.Rate = exports.pids = exports.pidExists = exports.kill = exports.SimpleParser = exports.Deferred = exports.BatchProcess = exports.BatchClusterOptions = void 0; const node_events_1 = __importDefault(require("node:events")); const node_process_1 = __importDefault(require("node:process")); const node_timers_1 = __importDefault(require("node:timers")); const Array_1 = require("./Array"); const BatchClusterOptions_1 = require("./BatchClusterOptions"); const BatchProcess_1 = require("./BatchProcess"); const Deferred_1 = require("./Deferred"); const Error_1 = require("./Error"); const Mean_1 = require("./Mean"); const Object_1 = require("./Object"); const Rate_1 = require("./Rate"); const String_1 = require("./String"); const Timeout_1 = require("./Timeout"); var BatchClusterOptions_2 = require("./BatchClusterOptions"); Object.defineProperty(exports, "BatchClusterOptions", { enumerable: true, get: function () { return BatchClusterOptions_2.BatchClusterOptions; } }); var BatchProcess_2 = require("./BatchProcess"); Object.defineProperty(exports, "BatchProcess", { enumerable: true, get: function () { return BatchProcess_2.BatchProcess; } }); var Deferred_2 = require("./Deferred"); Object.defineProperty(exports, "Deferred", { enumerable: true, get: function () { return Deferred_2.Deferred; } }); __exportStar(require("./Logger"), exports); var Parser_1 = require("./Parser"); Object.defineProperty(exports, "SimpleParser", { enumerable: true, get: function () { return Parser_1.SimpleParser; } }); var Pids_1 = require("./Pids"); Object.defineProperty(exports, "kill", { enumerable: true, get: function () { return Pids_1.kill; } }); Object.defineProperty(exports, "pidExists", { enumerable: true, get: function () { return Pids_1.pidExists; } }); Object.defineProperty(exports, "pids", { enumerable: true, get: function () { return Pids_1.pids; } }); var Rate_2 = require("./Rate"); Object.defineProperty(exports, "Rate", { enumerable: true, get: function () { return Rate_2.Rate; } }); var Task_1 = require("./Task"); Object.defineProperty(exports, "Task", { enumerable: true, get: function () { return Task_1.Task; } }); /** * BatchCluster instances manage 0 or more homogeneous child processes, and * provide the main interface for enqueuing `Task`s via `enqueueTask`. * * Given the large number of configuration options, the constructor * receives a single options hash. The most important of these are the * `ChildProcessFactory`, which specifies the factory that creates * ChildProcess instances, and `BatchProcessOptions`, which specifies how * child tasks can be verified and shut down. */ class BatchCluster { constructor(opts) { _BatchCluster_instances.add(this); _BatchCluster_tasksPerProc.set(this, new Mean_1.Mean()); _BatchCluster_logger.set(this, void 0); _BatchCluster_procs.set(this, []); _BatchCluster_onIdleRequested.set(this, false); _BatchCluster_nextSpawnTime.set(this, 0); _BatchCluster_lastPidsCheckTime.set(this, 0); _BatchCluster_tasks.set(this, []); _BatchCluster_onIdleInterval.set(this, void 0); _BatchCluster_startErrorRate.set(this, new Rate_1.Rate()); _BatchCluster_spawnedProcs.set(this, 0); _BatchCluster_endPromise.set(this, void 0); _BatchCluster_internalErrorCount.set(this, 0); _BatchCluster_childEndCounts.set(this, new Map()); this.emitter = new node_events_1.default.EventEmitter(); /** * @see BatchClusterEvents */ this.on = this.emitter.on.bind(this.emitter); /** * @see BatchClusterEvents * @since v9.0.0 */ this.off = this.emitter.off.bind(this.emitter); _BatchCluster_beforeExitListener.set(this, () => this.end(true)); _BatchCluster_exitListener.set(this, () => this.end(false)); _BatchCluster_onIdleLater.set(this, () => { if (!__classPrivateFieldGet(this, _BatchCluster_onIdleRequested, "f")) { __classPrivateFieldSet(this, _BatchCluster_onIdleRequested, true, "f"); node_timers_1.default.setTimeout(() => __classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_onIdle).call(this), 1); } } // NOT ASYNC: updates internal state: ); this.options = (0, BatchClusterOptions_1.verifyOptions)({ ...opts, observer: this.emitter }); this.on("childEnd", (bp, why) => { var _a; __classPrivateFieldGet(this, _BatchCluster_tasksPerProc, "f").push(bp.taskCount); __classPrivateFieldGet(this, _BatchCluster_childEndCounts, "f").set(why, ((_a = __classPrivateFieldGet(this, _BatchCluster_childEndCounts, "f").get(why)) !== null && _a !== void 0 ? _a : 0) + 1); __classPrivateFieldGet(this, _BatchCluster_onIdleLater, "f").call(this); }); this.on("internalError", (error) => { var _a; __classPrivateFieldGet(this, _BatchCluster_logger, "f").call(this).error("BatchCluster: INTERNAL ERROR: " + error); __classPrivateFieldSet(this, _BatchCluster_internalErrorCount, (_a = __classPrivateFieldGet(this, _BatchCluster_internalErrorCount, "f"), _a++, _a), "f"); }); this.on("noTaskData", (stdout, stderr, proc) => { var _a; __classPrivateFieldGet(this, _BatchCluster_logger, "f").call(this).warn("BatchCluster: child process emitted data with no current task. Consider setting streamFlushMillis to a higher value.", { streamFlushMillis: this.options.streamFlushMillis, stdout: (0, String_1.toS)(stdout), stderr: (0, String_1.toS)(stderr), proc_pid: proc === null || proc === void 0 ? void 0 : proc.pid, }); __classPrivateFieldSet(this, _BatchCluster_internalErrorCount, (_a = __classPrivateFieldGet(this, _BatchCluster_internalErrorCount, "f"), _a++, _a), "f"); }); this.on("startError", (error) => { __classPrivateFieldGet(this, _BatchCluster_logger, "f").call(this).warn("BatchCluster.onStartError(): " + error); __classPrivateFieldGet(this, _BatchCluster_startErrorRate, "f").onEvent(); if (this.options.maxReasonableProcessFailuresPerMinute > 0 && __classPrivateFieldGet(this, _BatchCluster_startErrorRate, "f").eventsPerMinute > this.options.maxReasonableProcessFailuresPerMinute) { this.emitter.emit("fatalError", new Error(error + "(start errors/min: " + __classPrivateFieldGet(this, _BatchCluster_startErrorRate, "f").eventsPerMinute.toFixed(2) + ")")); this.end(); } else { __classPrivateFieldGet(this, _BatchCluster_onIdleLater, "f").call(this); } }); if (this.options.onIdleIntervalMillis > 0) { __classPrivateFieldSet(this, _BatchCluster_onIdleInterval, node_timers_1.default.setInterval(() => __classPrivateFieldGet(this, _BatchCluster_onIdleLater, "f").call(this), this.options.onIdleIntervalMillis), "f"); __classPrivateFieldGet(this, _BatchCluster_onIdleInterval, "f").unref(); // < don't prevent node from exiting } __classPrivateFieldSet(this, _BatchCluster_logger, this.options.logger, "f"); node_process_1.default.once("beforeExit", __classPrivateFieldGet(this, _BatchCluster_beforeExitListener, "f")); node_process_1.default.once("exit", __classPrivateFieldGet(this, _BatchCluster_exitListener, "f")); } get ended() { return __classPrivateFieldGet(this, _BatchCluster_endPromise, "f") != null; } /** * Shut down this instance, and all child processes. * @param gracefully should an attempt be made to finish in-flight tasks, or * should we force-kill child PIDs. */ // NOT ASYNC so state transition happens immediately end(gracefully = true) { __classPrivateFieldGet(this, _BatchCluster_logger, "f").call(this).info("BatchCluster.end()", { gracefully }); if (__classPrivateFieldGet(this, _BatchCluster_endPromise, "f") == null) { this.emitter.emit("beforeEnd"); (0, Object_1.map)(__classPrivateFieldGet(this, _BatchCluster_onIdleInterval, "f"), node_timers_1.default.clearInterval); __classPrivateFieldSet(this, _BatchCluster_onIdleInterval, undefined, "f"); node_process_1.default.removeListener("beforeExit", __classPrivateFieldGet(this, _BatchCluster_beforeExitListener, "f")); node_process_1.default.removeListener("exit", __classPrivateFieldGet(this, _BatchCluster_exitListener, "f")); __classPrivateFieldSet(this, _BatchCluster_endPromise, new Deferred_1.Deferred().observe(this.closeChildProcesses(gracefully).then(() => { this.emitter.emit("end"); })), "f"); } return __classPrivateFieldGet(this, _BatchCluster_endPromise, "f"); } /** * Submits `task` for processing by a `BatchProcess` instance * * @return a Promise that is resolved or rejected once the task has been * attempted on an idle BatchProcess */ enqueueTask(task) { if (this.ended) { task.reject(new Error("BatchCluster has ended, cannot enqueue " + task.command)); } __classPrivateFieldGet(this, _BatchCluster_tasks, "f").push(task); // Run #onIdle now (not later), to make sure the task gets enqueued asap if // possible __classPrivateFieldGet(this, _BatchCluster_onIdleLater, "f").call(this); // (BatchProcess will call our #onIdleLater when tasks settle or when they // exit) return task.promise; } /** * @return true if all previously-enqueued tasks have settled */ get isIdle() { return this.pendingTaskCount === 0 && this.busyProcCount === 0; } /** * @return the number of pending tasks */ get pendingTaskCount() { return __classPrivateFieldGet(this, _BatchCluster_tasks, "f").length; } /** * @returns {number} the mean number of tasks completed by child processes */ get meanTasksPerProc() { return __classPrivateFieldGet(this, _BatchCluster_tasksPerProc, "f").mean; } /** * @return the total number of child processes created by this instance */ get spawnedProcCount() { return __classPrivateFieldGet(this, _BatchCluster_spawnedProcs, "f"); } /** * @return the current number of spawned child processes. Some (or all) may be idle. */ get procCount() { return __classPrivateFieldGet(this, _BatchCluster_procs, "f").length; } /** * @return the current number of child processes currently servicing tasks */ get busyProcCount() { return (0, Array_1.count)(__classPrivateFieldGet(this, _BatchCluster_procs, "f"), // don't count procs that are starting up as "busy": (ea) => !ea.starting && !ea.ending && !ea.idle); } get startingProcCount() { return (0, Array_1.count)(__classPrivateFieldGet(this, _BatchCluster_procs, "f"), // don't count procs that are starting up as "busy": (ea) => ea.starting && !ea.ending); } /** * @return the current pending Tasks (mostly for testing) */ get pendingTasks() { return __classPrivateFieldGet(this, _BatchCluster_tasks, "f"); } /** * @return the current running Tasks (mostly for testing) */ get currentTasks() { return __classPrivateFieldGet(this, _BatchCluster_procs, "f") .map((ea) => ea.currentTask) .filter((ea) => ea != null); } /** * For integration tests: */ get internalErrorCount() { return __classPrivateFieldGet(this, _BatchCluster_internalErrorCount, "f"); } /** * Verify that each BatchProcess PID is actually alive. * * @return the spawned PIDs that are still in the process table. */ pids() { const arr = []; for (const proc of [...__classPrivateFieldGet(this, _BatchCluster_procs, "f")]) { if (proc != null && proc.running()) { arr.push(proc.pid); } } return arr; } /** * For diagnostics. Contents may change. */ stats() { var _a; const readyProcCount = (0, Array_1.count)(__classPrivateFieldGet(this, _BatchCluster_procs, "f"), (ea) => ea.ready); return { pendingTaskCount: __classPrivateFieldGet(this, _BatchCluster_tasks, "f").length, currentProcCount: __classPrivateFieldGet(this, _BatchCluster_procs, "f").length, readyProcCount, maxProcCount: this.options.maxProcs, internalErrorCount: __classPrivateFieldGet(this, _BatchCluster_internalErrorCount, "f"), startErrorRatePerMinute: __classPrivateFieldGet(this, _BatchCluster_startErrorRate, "f").eventsPerMinute, msBeforeNextSpawn: Math.max(0, __classPrivateFieldGet(this, _BatchCluster_nextSpawnTime, "f") - Date.now()), spawnedProcCount: this.spawnedProcCount, childEndCounts: this.childEndCounts, ending: __classPrivateFieldGet(this, _BatchCluster_endPromise, "f") != null, ended: false === ((_a = __classPrivateFieldGet(this, _BatchCluster_endPromise, "f")) === null || _a === void 0 ? void 0 : _a.pending), }; } /** * Get ended process counts (used for tests) */ countEndedChildProcs(why) { var _a; return (_a = __classPrivateFieldGet(this, _BatchCluster_childEndCounts, "f").get(why)) !== null && _a !== void 0 ? _a : 0; } get childEndCounts() { return (0, Object_1.fromEntries)([...__classPrivateFieldGet(this, _BatchCluster_childEndCounts, "f").entries()]); } /** * Shut down any currently-running child processes. New child processes will * be started automatically to handle new tasks. */ async closeChildProcesses(gracefully = true) { const procs = [...__classPrivateFieldGet(this, _BatchCluster_procs, "f")]; __classPrivateFieldGet(this, _BatchCluster_procs, "f").length = 0; await Promise.all(procs.map((proc) => proc .end(gracefully, "ending") .catch((err) => this.emitter.emit("endError", (0, Error_1.asError)(err), proc)))); } /** * Reset the maximum number of active child processes to `maxProcs`. Note that * this is handled gracefully: child processes are only reduced as tasks are * completed. */ setMaxProcs(maxProcs) { this.options.maxProcs = maxProcs; // we may now be able to handle an enqueued task. Vacuum pids and see: __classPrivateFieldGet(this, _BatchCluster_onIdleLater, "f").call(this); } /** * Run maintenance on currently spawned child processes. This method is * normally invoked automatically as tasks are enqueued and processed. * * Only public for tests. */ // NOT ASYNC: updates internal state. only exported for tests. vacuumProcs() { __classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_maybeCheckPids).call(this); const endPromises = []; let pidsToReap = Math.max(0, __classPrivateFieldGet(this, _BatchCluster_procs, "f").length - this.options.maxProcs); (0, Array_1.filterInPlace)(__classPrivateFieldGet(this, _BatchCluster_procs, "f"), (proc) => { var _a; // Only check `.idle` (not `.ready`) procs. We don't want to reap busy // procs unless we're ending, and unhealthy procs (that we want to reap) // won't be `.ready`. if (proc.idle) { // don't reap more than pidsToReap pids. We can't use #procs.length // within filterInPlace because #procs.length only changes at iteration // completion: the prior impl resulted in all idle pids getting reaped // when maxProcs was reduced. const why = (_a = proc.whyNotHealthy) !== null && _a !== void 0 ? _a : (--pidsToReap >= 0 ? "tooMany" : null); if (why != null) { endPromises.push(proc.end(true, why)); return false; } proc.maybeRunHealthcheck(); } return true; }); return Promise.all(endPromises); } } exports.BatchCluster = BatchCluster; _BatchCluster_tasksPerProc = new WeakMap(), _BatchCluster_logger = new WeakMap(), _BatchCluster_procs = new WeakMap(), _BatchCluster_onIdleRequested = new WeakMap(), _BatchCluster_nextSpawnTime = new WeakMap(), _BatchCluster_lastPidsCheckTime = new WeakMap(), _BatchCluster_tasks = new WeakMap(), _BatchCluster_onIdleInterval = new WeakMap(), _BatchCluster_startErrorRate = new WeakMap(), _BatchCluster_spawnedProcs = new WeakMap(), _BatchCluster_endPromise = new WeakMap(), _BatchCluster_internalErrorCount = new WeakMap(), _BatchCluster_childEndCounts = new WeakMap(), _BatchCluster_beforeExitListener = new WeakMap(), _BatchCluster_exitListener = new WeakMap(), _BatchCluster_onIdleLater = new WeakMap(), _BatchCluster_instances = new WeakSet(), _BatchCluster_onIdle = function _BatchCluster_onIdle() { __classPrivateFieldSet(this, _BatchCluster_onIdleRequested, false, "f"); this.vacuumProcs(); while (__classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_execNextTask).call(this)) { // } __classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_maybeSpawnProcs).call(this); }, _BatchCluster_maybeCheckPids = function _BatchCluster_maybeCheckPids() { if (this.options.cleanupChildProcs && this.options.pidCheckIntervalMillis > 0 && __classPrivateFieldGet(this, _BatchCluster_lastPidsCheckTime, "f") + this.options.pidCheckIntervalMillis < Date.now()) { __classPrivateFieldSet(this, _BatchCluster_lastPidsCheckTime, Date.now(), "f"); void this.pids(); } }, _BatchCluster_execNextTask = function _BatchCluster_execNextTask(retries = 1) { if (__classPrivateFieldGet(this, _BatchCluster_tasks, "f").length === 0 || this.ended || retries < 0) return false; const readyProc = __classPrivateFieldGet(this, _BatchCluster_procs, "f").find((ea) => ea.ready); // no procs are idle and healthy :( if (readyProc == null) { return false; } const task = __classPrivateFieldGet(this, _BatchCluster_tasks, "f").shift(); if (task == null) { this.emitter.emit("internalError", new Error("unexpected null task")); return false; } const submitted = readyProc.execTask(task); if (!submitted) { // This isn't an internal error: the proc may have needed to run a health // check. Let's reschedule the task and try again: __classPrivateFieldGet(this, _BatchCluster_tasks, "f").push(task); // We don't want to return false here (it'll stop the onIdle loop) unless // we actually can't submit the task: return __classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_execNextTask).call(this, retries--); } __classPrivateFieldGet(this, _BatchCluster_logger, "f").call(this).trace("BatchCluster.#execNextTask(): submitted task", { child_pid: readyProc.pid, task, }); return submitted; }, _BatchCluster_maxSpawnDelay = function _BatchCluster_maxSpawnDelay() { // 10s delay is certainly long enough for .spawn() to return, even on a // loaded windows machine. return Math.max(10000, this.options.spawnTimeoutMillis); }, _BatchCluster_procsToSpawn = function _BatchCluster_procsToSpawn() { const remainingCapacity = this.options.maxProcs - __classPrivateFieldGet(this, _BatchCluster_procs, "f").length; // take into account starting procs, so one task doesn't result in multiple // processes being spawned: const requestedCapacity = __classPrivateFieldGet(this, _BatchCluster_tasks, "f").length - this.startingProcCount; const atLeast0 = Math.max(0, Math.min(remainingCapacity, requestedCapacity)); return this.options.minDelayBetweenSpawnMillis === 0 ? // we can spin up multiple processes in parallel. atLeast0 : // Don't spin up more than 1: Math.min(1, atLeast0); }, _BatchCluster_maybeSpawnProcs = async function _BatchCluster_maybeSpawnProcs() { var _a; let procsToSpawn = __classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_procsToSpawn).call(this); if (this.ended || __classPrivateFieldGet(this, _BatchCluster_nextSpawnTime, "f") > Date.now() || procsToSpawn === 0) { return; } // prevent concurrent runs: __classPrivateFieldSet(this, _BatchCluster_nextSpawnTime, Date.now() + __classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_maxSpawnDelay).call(this), "f"); for (let i = 0; i < procsToSpawn; i++) { if (this.ended) { break; } // Kick the lock down the road: __classPrivateFieldSet(this, _BatchCluster_nextSpawnTime, Date.now() + __classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_maxSpawnDelay).call(this), "f"); __classPrivateFieldSet(this, _BatchCluster_spawnedProcs, (_a = __classPrivateFieldGet(this, _BatchCluster_spawnedProcs, "f"), _a++, _a), "f"); try { const proc = __classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_spawnNewProc).call(this); const result = await (0, Timeout_1.thenOrTimeout)(proc, this.options.spawnTimeoutMillis); if (result === Timeout_1.Timeout) { void proc .then((bp) => { void bp.end(false, "startError"); this.emitter.emit("startError", (0, Error_1.asError)("Failed to spawn process in " + this.options.spawnTimeoutMillis + "ms"), bp); }) .catch((err) => { // this should only happen if the processFactory throws a // rejection: this.emitter.emit("startError", (0, Error_1.asError)(err)); }); } else { __classPrivateFieldGet(this, _BatchCluster_logger, "f").call(this).debug("BatchCluster.#maybeSpawnProcs() started healthy child process", { pid: result.pid }); } // tasks may have been popped off or setMaxProcs may have reduced // maxProcs. Do this at the end so the for loop ends properly. procsToSpawn = Math.min(__classPrivateFieldGet(this, _BatchCluster_instances, "m", _BatchCluster_procsToSpawn).call(this), procsToSpawn); } catch (err) { this.emitter.emit("startError", (0, Error_1.asError)(err)); } } // YAY WE MADE IT. // Only let more children get spawned after minDelay: const delay = Math.max(100, this.options.minDelayBetweenSpawnMillis); __classPrivateFieldSet(this, _BatchCluster_nextSpawnTime, Date.now() + delay, "f"); // And schedule #onIdle for that time: node_timers_1.default.setTimeout(__classPrivateFieldGet(this, _BatchCluster_onIdleLater, "f"), delay).unref(); }, _BatchCluster_spawnNewProc = // must only be called by this.#maybeSpawnProcs() async function _BatchCluster_spawnNewProc() { // no matter how long it takes to spawn, always push the result into #procs // so we don't leak child processes: const proc = await this.options.processFactory(); const result = new BatchProcess_1.BatchProcess(proc, this.options, __classPrivateFieldGet(this, _BatchCluster_onIdleLater, "f")); __classPrivateFieldGet(this, _BatchCluster_procs, "f").push(result); return result; }; //# sourceMappingURL=BatchCluster.js.map