From 32ba8aea0b8c43b63c820ae36671a6ba20361273 Mon Sep 17 00:00:00 2001 From: Ruben Bridgewater Date: Sat, 17 Jun 2017 02:51:23 +0200 Subject: [PATCH 01/64] repl: fix old history error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Backport-PR-URL: https://github.com/nodejs/node/pull/14392 Backport-Reviewed-By: James M Snell PR-URL: https://github.com/nodejs/node/pull/13733 Reviewed-By: Luigi Pinca Reviewed-By: James M Snell Reviewed-By: Colin Ihrig Reviewed-By: Tobias Nießen --- lib/internal/repl.js | 64 +++++++++++-------- .../old-repl-history-file-faulty.json | 1 + test/fixtures/old-repl-history-file-obj.json | 4 ++ test/parallel/test-repl-persistent-history.js | 27 ++++++-- 4 files changed, 67 insertions(+), 29 deletions(-) create mode 100644 test/fixtures/old-repl-history-file-faulty.json create mode 100644 test/fixtures/old-repl-history-file-obj.json diff --git a/lib/internal/repl.js b/lib/internal/repl.js index 4c27fa2746390f..874bfd6fbbd183 100644 --- a/lib/internal/repl.js +++ b/lib/internal/repl.js @@ -15,6 +15,11 @@ module.exports.createInternalRepl = createRepl; // The debounce is to guard against code pasted into the REPL. const kDebounceHistoryMS = 15; +function _writeToOutput(repl, message) { + repl._writeToOutput(message); + repl._refreshLine(); +} + function createRepl(env, opts, cb) { if (typeof opts === 'function') { cb = opts; @@ -80,9 +85,8 @@ function setupHistory(repl, historyPath, oldHistoryPath, ready) { try { historyPath = path.join(os.homedir(), '.node_repl_history'); } catch (err) { - repl._writeToOutput('\nError: Could not get the home directory.\n' + - 'REPL session history will not be persisted.\n'); - repl._refreshLine(); + _writeToOutput(repl, '\nError: Could not get the home directory.\n' + + 'REPL session history will not be persisted.\n'); debug(err.stack); repl._historyPrev = _replHistoryMessage; @@ -103,9 +107,8 @@ function setupHistory(repl, historyPath, oldHistoryPath, ready) { if (err) { // Cannot open history file. // Don't crash, just don't persist history. - repl._writeToOutput('\nError: Could not open history file.\n' + - 'REPL session history will not be persisted.\n'); - repl._refreshLine(); + _writeToOutput(repl, '\nError: Could not open history file.\n' + + 'REPL session history will not be persisted.\n'); debug(err.stack); repl._historyPrev = _replHistoryMessage; @@ -132,18 +135,13 @@ function setupHistory(repl, historyPath, oldHistoryPath, ready) { } else if (oldHistoryPath === historyPath) { // If pre-v3.0, the user had set NODE_REPL_HISTORY_FILE to // ~/.node_repl_history, warn the user about it and proceed. - repl._writeToOutput( - '\nThe old repl history file has the same name and location as ' + + _writeToOutput( + repl, + '\nThe old repl history file has the same name and location as ' + `the new one i.e., ${historyPath} and is empty.\nUsing it as is.\n`); - repl._refreshLine(); } else if (oldHistoryPath) { - // Grab data from the older pre-v3.0 JSON NODE_REPL_HISTORY_FILE format. - repl._writeToOutput( - '\nConverting old JSON repl history to line-separated history.\n' + - `The new repl history file can be found at ${historyPath}.\n`); - repl._refreshLine(); - + let threw = false; try { // Pre-v3.0, repl history was stored as JSON. // Try and convert it to line separated history. @@ -152,15 +150,31 @@ function setupHistory(repl, historyPath, oldHistoryPath, ready) { // Only attempt to use the history if there was any. if (oldReplJSONHistory) repl.history = JSON.parse(oldReplJSONHistory); - if (!Array.isArray(repl.history)) { - throw new Error('Expected array, got ' + typeof repl.history); + if (Array.isArray(repl.history)) { + repl.history = repl.history.slice(0, repl.historySize); + } else { + threw = true; + _writeToOutput( + repl, + '\nError: The old history file data has to be an Array.\n' + + 'REPL session history will not be persisted.\n'); } - repl.history = repl.history.slice(0, repl.historySize); } catch (err) { - if (err.code !== 'ENOENT') { - return ready( - new Error(`Could not parse history data in ${oldHistoryPath}.`)); - } + // Cannot open or parse history file. + // Don't crash, just don't persist history. + threw = true; + const type = err instanceof SyntaxError ? 'parse' : 'open'; + _writeToOutput(repl, `\nError: Could not ${type} old history file.\n` + + 'REPL session history will not be persisted.\n'); + } + if (!threw) { + // Grab data from the older pre-v3.0 JSON NODE_REPL_HISTORY_FILE format. + _writeToOutput( + repl, + '\nConverted old JSON repl history to line-separated history.\n' + + `The new repl history file can be found at ${historyPath}.\n`); + } else { + repl.history = []; } } @@ -223,12 +237,12 @@ function setupHistory(repl, historyPath, oldHistoryPath, ready) { function _replHistoryMessage() { if (this.history.length === 0) { - this._writeToOutput( - '\nPersistent history support disabled. ' + + _writeToOutput( + this, + '\nPersistent history support disabled. ' + 'Set the NODE_REPL_HISTORY environment\nvariable to ' + 'a valid, user-writable path to enable.\n' ); - this._refreshLine(); } this._historyPrev = Interface.prototype._historyPrev; return this._historyPrev(); diff --git a/test/fixtures/old-repl-history-file-faulty.json b/test/fixtures/old-repl-history-file-faulty.json new file mode 100644 index 00000000000000..417b7b5370df81 --- /dev/null +++ b/test/fixtures/old-repl-history-file-faulty.json @@ -0,0 +1 @@ +undefined diff --git a/test/fixtures/old-repl-history-file-obj.json b/test/fixtures/old-repl-history-file-obj.json new file mode 100644 index 00000000000000..43160121b8f9e1 --- /dev/null +++ b/test/fixtures/old-repl-history-file-obj.json @@ -0,0 +1,4 @@ +{ + "a": "'=^.^='", + "b": "'hello world'" +} diff --git a/test/parallel/test-repl-persistent-history.js b/test/parallel/test-repl-persistent-history.js index fe219d716ebf45..8b47cadf11d196 100644 --- a/test/parallel/test-repl-persistent-history.js +++ b/test/parallel/test-repl-persistent-history.js @@ -56,13 +56,19 @@ const prompt = '> '; const replDisabled = '\nPersistent history support disabled. Set the ' + 'NODE_REPL_HISTORY environment\nvariable to a valid, ' + 'user-writable path to enable.\n'; -const convertMsg = '\nConverting old JSON repl history to line-separated ' + +const convertMsg = '\nConverted old JSON repl history to line-separated ' + 'history.\nThe new repl history file can be found at ' + `${path.join(common.tmpDir, '.node_repl_history')}.\n`; const homedirErr = '\nError: Could not get the home directory.\n' + 'REPL session history will not be persisted.\n'; const replFailedRead = '\nError: Could not open history file.\n' + 'REPL session history will not be persisted.\n'; +const oldHistoryFailedOpen = '\nError: Could not open old history file.\n' + + 'REPL session history will not be persisted.\n'; +const oldHistoryFailedParse = '\nError: Could not parse old history file.\n' + + 'REPL session history will not be persisted.\n'; +const oldHistoryObj = '\nError: The old history file data has to be an Array' + + '.\nREPL session history will not be persisted.\n'; const sameHistoryFilePaths = '\nThe old repl history file has the same name ' + 'and location as the new one i.e., ' + path.join(common.tmpDir, '.node_repl_history') + @@ -72,6 +78,10 @@ const fixtures = common.fixturesDir; const historyFixturePath = path.join(fixtures, '.node_repl_history'); const historyPath = path.join(common.tmpDir, '.fixture_copy_repl_history'); const historyPathFail = path.join(common.tmpDir, '.node_repl\u0000_history'); +const oldHistoryPathObj = path.join(fixtures, + 'old-repl-history-file-obj.json'); +const oldHistoryPathFaulty = path.join(fixtures, + 'old-repl-history-file-faulty.json'); const oldHistoryPath = path.join(fixtures, 'old-repl-history-file.json'); const enoentHistoryPath = path.join(fixtures, 'enoent-repl-history-file.json'); const emptyHistoryPath = path.join(fixtures, '.empty-repl-history-file'); @@ -93,10 +103,19 @@ const tests = [ expected: [prompt, replDisabled, prompt] }, { - env: { NODE_REPL_HISTORY: '', - NODE_REPL_HISTORY_FILE: enoentHistoryPath }, + env: { NODE_REPL_HISTORY_FILE: enoentHistoryPath }, test: [UP], - expected: [prompt, replDisabled, prompt] + expected: [prompt, oldHistoryFailedOpen, prompt] + }, + { + env: { NODE_REPL_HISTORY_FILE: oldHistoryPathObj }, + test: [UP], + expected: [prompt, oldHistoryObj, prompt] + }, + { + env: { NODE_REPL_HISTORY_FILE: oldHistoryPathFaulty }, + test: [UP], + expected: [prompt, oldHistoryFailedParse, prompt] }, { env: { NODE_REPL_HISTORY: '', From b5d0a03a9e7d06002baeebd6a2713887ebb208ad Mon Sep 17 00:00:00 2001 From: Rich Trott Date: Sat, 15 Jul 2017 07:22:57 -0700 Subject: [PATCH 02/64] test: fix error handling test-http-full-response The way it is currently written, test-http-full-response will fail if there is a problem with spawning that doesn't include `ab` or `api` in `stderr`, but it will fail with a misleading mismatched-calls `common.mustCall()` error. Alter the error handling so that it rethrows the actual error, providing better information. PR-URL: https://github.com/nodejs/node/pull/14252 Reviewed-By: Yuta Hiroto Reviewed-By: Colin Ihrig Reviewed-By: Luigi Pinca Reviewed-By: Gibson Fahnestock Reviewed-By: James M Snell --- test/parallel/test-http-full-response.js | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/parallel/test-http-full-response.js b/test/parallel/test-http-full-response.js index 42213432090355..2f6fc8fc2a176c 100644 --- a/test/parallel/test-http-full-response.js +++ b/test/parallel/test-http-full-response.js @@ -46,8 +46,7 @@ function runAb(opts, callback) { common.printSkipMessage(`problem spawning \`ab\`.\n${stderr}`); process.reallyExit(0); } - process.exit(); - return; + throw err; } let m = /Document Length:\s*(\d+) bytes/i.exec(stdout); From d2121ab768fe2900b71000040821b584ed1e9cdb Mon Sep 17 00:00:00 2001 From: Lance Ball Date: Tue, 18 Jul 2017 15:02:47 -0400 Subject: [PATCH 03/64] doc: fix minor typo in cluster.md Adds a missing `'` in code example. PR-URL: https://github.com/nodejs/node/pull/14353 Fixes: https://github.com/nodejs/node/issues/14352 Reviewed-By: Colin Ihrig Reviewed-By: Gibson Fahnestock Reviewed-By: Luigi Pinca Reviewed-By: Vse Mozhet Byt --- doc/api/cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/api/cluster.md b/doc/api/cluster.md index 98545edf8bf6c4..278bd31e6fd8ad 100644 --- a/doc/api/cluster.md +++ b/doc/api/cluster.md @@ -191,7 +191,7 @@ added: v0.7.0 Similar to the `cluster.on('message')` event, but specific to this worker. -Within a worker, `process.on('message)` may also be used. +Within a worker, `process.on('message')` may also be used. See [`process` event: `'message'`][]. From 72febfd3b65acc056bd8bd779c593778b7c8e9cc Mon Sep 17 00:00:00 2001 From: SkyAo Date: Sun, 16 Jul 2017 16:54:05 +0800 Subject: [PATCH 04/64] test: replace concatenation with template literals MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use template literals instead of string concatenation in test/parallel/test-http-extra-response.js PR-URL: https://github.com/nodejs/node/pull/14296 Reviewed-By: Rich Trott Reviewed-By: David Cai Reviewed-By: Gibson Fahnestock Reviewed-By: Colin Ihrig Reviewed-By: Tobias Nießen Reviewed-By: Timothy Gu Reviewed-By: Gireesh Punathil Reviewed-By: James M Snell --- test/parallel/test-http-extra-response.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/parallel/test-http-extra-response.js b/test/parallel/test-http-extra-response.js index e71decb0c39b80..7c9514232c7ba8 100644 --- a/test/parallel/test-http-extra-response.js +++ b/test/parallel/test-http-extra-response.js @@ -32,7 +32,7 @@ const net = require('net'); const body = 'hello world\r\n'; const fullResponse = 'HTTP/1.1 500 Internal Server Error\r\n' + - 'Content-Length: ' + body.length + '\r\n' + + `Content-Length: ${body.length}\r\n` + 'Content-Type: text/plain\r\n' + 'Date: Fri + 18 Feb 2011 06:22:45 GMT\r\n' + 'Host: 10.20.149.2\r\n' + From 53ad91c3b1b258e26224edb4c4ca8c569b8f7864 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 17 Jul 2017 10:02:02 +0200 Subject: [PATCH 05/64] doc,stream: _transform happens one at a time Add a note to the stream docs specifying that at most a single call to _transform can happen, and the provided callback() should be used to process another chunk. Fixes: https://github.com/nodejs/node/issues/3208 PR-URL: https://github.com/nodejs/node/pull/14321 Reviewed-By: James M Snell Reviewed-By: Rich Trott Reviewed-By: Benjamin Gruenbaum Reviewed-By: Luigi Pinca Reviewed-By: Colin Ihrig --- doc/api/stream.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/api/stream.md b/doc/api/stream.md index 042685fd9933b6..5ffa984244ac67 100644 --- a/doc/api/stream.md +++ b/doc/api/stream.md @@ -2033,6 +2033,10 @@ The `transform._transform()` method is prefixed with an underscore because it is internal to the class that defines it, and should never be called directly by user programs. +`transform._transform()` is never called in parallel; streams implement a +queue mechanism, and to receive the next chunk, `callback` must be +called, either synchronously or asychronously. + #### Class: stream.PassThrough The `stream.PassThrough` class is a trivial implementation of a [Transform][] From 34821f6400e35478a87bacd78b830cb31e4db07c Mon Sep 17 00:00:00 2001 From: Benjamin Gruenbaum Date: Sun, 16 Jul 2017 18:12:57 +0300 Subject: [PATCH 06/64] repl: don't terminate on null thrown Previous behavior was to assume an error is a proper error in the repl module. A check was added to not terminate the process on thrown repl errors that are `null` or `undefined`. PR-URL: https://github.com/nodejs/node/pull/14306 Fixes: https://github.com/nodejs/node/issues/12373 Reviewed-By: Anna Henningsen Reviewed-By: Timothy Gu Reviewed-By: Jeremiah Senkpiel Reviewed-By: Colin Ihrig pre + (line - 1)); } - top.outputStream.write((e.stack || e) + '\n'); + if (isError && e.stack) { + top.outputStream.write(`${e.stack}\n`); + } else { + top.outputStream.write(`Thrown: ${String(e)}\n`); + } top.bufferedCommand = ''; top.lines.level = []; top.displayPrompt(); diff --git a/test/parallel/test-repl-null-thrown.js b/test/parallel/test-repl-null-thrown.js new file mode 100644 index 00000000000000..1fe5d30396d534 --- /dev/null +++ b/test/parallel/test-repl-null-thrown.js @@ -0,0 +1,24 @@ +'use strict'; +require('../common'); +const repl = require('repl'); +const assert = require('assert'); +const Stream = require('stream'); + +const output = new Stream(); +let text = ''; +output.write = output.pause = output.resume = function(buf) { + text += buf.toString(); +}; + +const replserver = repl.start({ + output: output, + input: process.stdin +}); + +replserver.emit('line', 'process.nextTick(() => { throw null; })'); +replserver.emit('line', '.exit'); + +setTimeout(() => { + console.log(text); + assert(text.includes('Thrown: null')); +}, 0); From dc0a26f25421f64c34c8abff109582058999f8d4 Mon Sep 17 00:00:00 2001 From: Devin Boyer Date: Wed, 19 Jul 2017 13:22:01 -0400 Subject: [PATCH 07/64] doc: replace dead link in v8 module PR-URL: https://github.com/nodejs/node/pull/14372 Reviewed-By: Joyee Cheung Reviewed-By: Colin Ihrig Reviewed-By: Luigi Pinca --- doc/api/v8.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/api/v8.md b/doc/api/v8.md index e9ee98a4844cf6..11b3f1e3ae72b7 100644 --- a/doc/api/v8.md +++ b/doc/api/v8.md @@ -407,4 +407,4 @@ A subclass of [`Deserializer`][] corresponding to the format written by [V8]: https://developers.google.com/v8/ [`vm.Script`]: vm.html#vm_new_vm_script_code_options [here]: https://github.com/thlorenz/v8-flags/blob/master/flags-0.11.md -[`GetHeapSpaceStatistics`]: https://v8docs.nodesource.com/node-5.0/d5/dda/classv8_1_1_isolate.html#ac673576f24fdc7a33378f8f57e1d13a4 +[`GetHeapSpaceStatistics`]: https://v8docs.nodesource.com/node-8.0/d5/dda/classv8_1_1_isolate.html#ac673576f24fdc7a33378f8f57e1d13a4 From 3bc7d2a5ea18218f93dc1f0d434f8f0f1437abf0 Mon Sep 17 00:00:00 2001 From: Helianthus21 <740051540@qq.com> Date: Sun, 16 Jul 2017 15:56:25 +0800 Subject: [PATCH 08/64] test: replace string concat in test-fs-watchfile.js MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/14287 Reviewed-By: Joyee Cheung Reviewed-By: Rich Trott Reviewed-By: Michaël Zasso Reviewed-By: Gibson Fahnestock Reviewed-By: Benjamin Gruenbaum Reviewed-By: Colin Ihrig Reviewed-By: Tobias Nießen Reviewed-By: Gireesh Punathil Reviewed-By: Refael Ackermann Reviewed-By: James M Snell --- test/parallel/test-fs-watchfile.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/parallel/test-fs-watchfile.js b/test/parallel/test-fs-watchfile.js index 778174c64095cc..e7f991767b1d00 100644 --- a/test/parallel/test-fs-watchfile.js +++ b/test/parallel/test-fs-watchfile.js @@ -67,7 +67,7 @@ fs.watchFile(enoentFile, {interval: 0}, common.mustCall(function(curr, prev) { // Watch events should callback with a filename on supported systems. // Omitting AIX. It works but not reliably. if (common.isLinux || common.isOSX || common.isWindows) { - const dir = common.tmpDir + '/watch'; + const dir = path.join(common.tmpDir, 'watch'); fs.mkdir(dir, common.mustCall(function(err) { if (err) assert.fail(err); @@ -79,7 +79,7 @@ if (common.isLinux || common.isOSX || common.isWindows) { })); const interval = setInterval(() => { - fs.writeFile(`${dir}/foo.txt`, 'foo', common.mustCall(function(err) { + fs.writeFile(path.join(dir, 'foo.txt'), 'foo', common.mustCall((err) => { if (err) assert.fail(err); })); }, 1); From f6a03439d8d0facbe19b620ef55f2f15e51e20b9 Mon Sep 17 00:00:00 2001 From: Oleksandr Kushchak Date: Mon, 17 Jul 2017 10:11:09 +0100 Subject: [PATCH 09/64] docs: add note about fs.rmdir() fs.rmdir() on the file (not directory) results in different errors on Windows to everything else Fixes: https://github.com/nodejs/node/issues/8797 PR-URL: https://github.com/nodejs/node/pull/14323 Reviewed-By: Vse Mozhet Byt Reviewed-By: Gireesh Punathil Reviewed-By: Luigi Pinca Reviewed-By: James M Snell Reviewed-By: Jeremiah Senkpiel Reviewed-By: Colin Ihrig Reviewed-By: Gibson Fahnestock --- doc/api/fs.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/api/fs.md b/doc/api/fs.md index 7686563343bae2..e151f0fed89a15 100644 --- a/doc/api/fs.md +++ b/doc/api/fs.md @@ -2004,6 +2004,9 @@ changes: Asynchronous rmdir(2). No arguments other than a possible exception are given to the completion callback. +*Note*: Using `fs.rmdir()` on a file (not a directory) results in an `ENOENT` +error on Windows and an `ENOTDIR` error on POSIX. + ## fs.rmdirSync(path) + // ^ tagged bytecode array pointer + // + // UpdateInterruptBudget already handles adding the bytecode size to the + // length of the back-edge, so we just have to correct for the non-zero offset + // of the first bytecode. + + const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag; + Node* profiling_weight = Int32Sub(TruncateWordToWord32(BytecodeOffset()), + Int32Constant(kFirstBytecodeOffset)); + UpdateInterruptBudget(profiling_weight, true); } Node* InterpreterAssembler::StackCheckTriggeredInterrupt() { @@ -1423,6 +1465,10 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) { return array; } +int InterpreterAssembler::CurrentBytecodeSize() const { + return Bytecodes::Size(bytecode_, operand_scale_); +} + } // namespace interpreter } // namespace internal } // namespace v8 diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h index 1317f377f48cca..c2e0bb3bd76fd1 100644 --- a/deps/v8/src/interpreter/interpreter-assembler.h +++ b/deps/v8/src/interpreter/interpreter-assembler.h @@ -32,6 +32,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { compiler::Node* BytecodeOperandFlag(int operand_index); // Returns the 32-bit zero-extended index immediate for bytecode operand // |operand_index| in the current bytecode. + compiler::Node* BytecodeOperandIdxInt32(int operand_index); + // Returns the word zero-extended index immediate for bytecode operand + // |operand_index| in the current bytecode. compiler::Node* BytecodeOperandIdx(int operand_index); // Returns the smi index immediate for bytecode operand |operand_index| // in the current bytecode. @@ -115,23 +118,25 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { compiler::Node* IncrementCallCount(compiler::Node* feedback_vector, compiler::Node* slot_id); - // Call JSFunction or Callable |function| with |arg_count| - // arguments (not including receiver) and the first argument - // located at |first_arg|. Type feedback is collected in the - // slot at index |slot_id|. - compiler::Node* CallJSWithFeedback(compiler::Node* function, - compiler::Node* context, - compiler::Node* first_arg, - compiler::Node* arg_count, - compiler::Node* slot_id, - compiler::Node* feedback_vector, - TailCallMode tail_call_mode); - - // Call JSFunction or Callable |function| with |arg_count| - // arguments (not including receiver) and the first argument - // located at |first_arg|. + // Call JSFunction or Callable |function| with |arg_count| arguments (not + // including receiver) and the first argument located at |first_arg|. Type + // feedback is collected in the slot at index |slot_id|. + // + // If the |receiver_mode| is kNullOrUndefined, then the receiver is implicitly + // undefined and |first_arg| is the first parameter. Otherwise, |first_arg| is + // the receiver and it is converted according to |receiver_mode|. + compiler::Node* CallJSWithFeedback( + compiler::Node* function, compiler::Node* context, + compiler::Node* first_arg, compiler::Node* arg_count, + compiler::Node* slot_id, compiler::Node* feedback_vector, + ConvertReceiverMode receiver_mode, TailCallMode tail_call_mode); + + // Call JSFunction or Callable |function| with |arg_count| arguments (not + // including receiver) and the first argument located at |first_arg|, possibly + // including the receiver depending on |receiver_mode|. compiler::Node* CallJS(compiler::Node* function, compiler::Node* context, compiler::Node* first_arg, compiler::Node* arg_count, + ConvertReceiverMode receiver_mode, TailCallMode tail_call_mode); // Call JSFunction or Callable |function| with |arg_count| @@ -223,9 +228,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // Returns the offset from the BytecodeArrayPointer of the current bytecode. compiler::Node* BytecodeOffset(); - // Save the bytecode offset to the interpreter frame. - void SaveBytecodeOffset(); - protected: Bytecode bytecode() const { return bytecode_; } static bool TargetSupportsUnalignedAccess(); @@ -301,6 +303,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // JumpIfWordNotEqual. void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset); + // Save the bytecode offset to the interpreter frame. + void SaveBytecodeOffset(); + // Updates and returns BytecodeOffset() advanced by the current bytecode's // size. Traces the exit of the current bytecode. compiler::Node* Advance(); @@ -334,6 +339,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { compiler::Node* DispatchToBytecodeHandlerEntry( compiler::Node* handler_entry, compiler::Node* bytecode_offset); + int CurrentBytecodeSize() const; + OperandScale operand_scale() const { return operand_scale_; } Bytecode bytecode_; diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc new file mode 100644 index 00000000000000..2a8f3c8810d13f --- /dev/null +++ b/deps/v8/src/interpreter/interpreter-generator.cc @@ -0,0 +1,3691 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/interpreter/interpreter-generator.h" + +#include +#include + +#include "src/builtins/builtins-arguments-gen.h" +#include "src/builtins/builtins-constructor-gen.h" +#include "src/builtins/builtins-forin-gen.h" +#include "src/code-events.h" +#include "src/code-factory.h" +#include "src/factory.h" +#include "src/ic/accessor-assembler.h" +#include "src/ic/binary-op-assembler.h" +#include "src/interpreter/bytecode-flags.h" +#include "src/interpreter/bytecodes.h" +#include "src/interpreter/interpreter-assembler.h" +#include "src/interpreter/interpreter-intrinsics-generator.h" +#include "src/objects-inl.h" + +namespace v8 { +namespace internal { +namespace interpreter { + +namespace { + +using compiler::Node; +typedef CodeStubAssembler::Label Label; +typedef CodeStubAssembler::Variable Variable; + +#define IGNITION_HANDLER(Name, BaseAssembler) \ + class Name##Assembler : public BaseAssembler { \ + public: \ + explicit Name##Assembler(compiler::CodeAssemblerState* state, \ + Bytecode bytecode, OperandScale scale) \ + : BaseAssembler(state, bytecode, scale) {} \ + static void Generate(compiler::CodeAssemblerState* state, \ + OperandScale scale); \ + \ + private: \ + void GenerateImpl(); \ + DISALLOW_COPY_AND_ASSIGN(Name##Assembler); \ + }; \ + void Name##Assembler::Generate(compiler::CodeAssemblerState* state, \ + OperandScale scale) { \ + Name##Assembler assembler(state, Bytecode::k##Name, scale); \ + state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \ + assembler.GenerateImpl(); \ + } \ + void Name##Assembler::GenerateImpl() + +// LdaZero +// +// Load literal '0' into the accumulator. +IGNITION_HANDLER(LdaZero, InterpreterAssembler) { + Node* zero_value = NumberConstant(0.0); + SetAccumulator(zero_value); + Dispatch(); +} + +// LdaSmi +// +// Load an integer literal into the accumulator as a Smi. +IGNITION_HANDLER(LdaSmi, InterpreterAssembler) { + Node* smi_int = BytecodeOperandImmSmi(0); + SetAccumulator(smi_int); + Dispatch(); +} + +// LdaConstant +// +// Load constant literal at |idx| in the constant pool into the accumulator. +IGNITION_HANDLER(LdaConstant, InterpreterAssembler) { + Node* index = BytecodeOperandIdx(0); + Node* constant = LoadConstantPoolEntry(index); + SetAccumulator(constant); + Dispatch(); +} + +// LdaUndefined +// +// Load Undefined into the accumulator. +IGNITION_HANDLER(LdaUndefined, InterpreterAssembler) { + SetAccumulator(UndefinedConstant()); + Dispatch(); +} + +// LdaNull +// +// Load Null into the accumulator. +IGNITION_HANDLER(LdaNull, InterpreterAssembler) { + SetAccumulator(NullConstant()); + Dispatch(); +} + +// LdaTheHole +// +// Load TheHole into the accumulator. +IGNITION_HANDLER(LdaTheHole, InterpreterAssembler) { + SetAccumulator(TheHoleConstant()); + Dispatch(); +} + +// LdaTrue +// +// Load True into the accumulator. +IGNITION_HANDLER(LdaTrue, InterpreterAssembler) { + SetAccumulator(TrueConstant()); + Dispatch(); +} + +// LdaFalse +// +// Load False into the accumulator. +IGNITION_HANDLER(LdaFalse, InterpreterAssembler) { + SetAccumulator(FalseConstant()); + Dispatch(); +} + +// Ldar +// +// Load accumulator with value from register . +IGNITION_HANDLER(Ldar, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* value = LoadRegister(reg_index); + SetAccumulator(value); + Dispatch(); +} + +// Star +// +// Store accumulator to register . +IGNITION_HANDLER(Star, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* accumulator = GetAccumulator(); + StoreRegister(accumulator, reg_index); + Dispatch(); +} + +// Mov +// +// Stores the value of register to register . +IGNITION_HANDLER(Mov, InterpreterAssembler) { + Node* src_index = BytecodeOperandReg(0); + Node* src_value = LoadRegister(src_index); + Node* dst_index = BytecodeOperandReg(1); + StoreRegister(src_value, dst_index); + Dispatch(); +} + +class InterpreterLoadGlobalAssembler : public InterpreterAssembler { + public: + InterpreterLoadGlobalAssembler(CodeAssemblerState* state, Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + void LdaGlobal(int slot_operand_index, int name_operand_index, + TypeofMode typeof_mode) { + // Must be kept in sync with AccessorAssembler::LoadGlobalIC. + + // Load the global via the LoadGlobalIC. + Node* feedback_vector = LoadFeedbackVector(); + Node* feedback_slot = BytecodeOperandIdx(slot_operand_index); + + AccessorAssembler accessor_asm(state()); + + Label try_handler(this, Label::kDeferred), miss(this, Label::kDeferred); + + // Fast path without frame construction for the data case. + { + Label done(this); + Variable var_result(this, MachineRepresentation::kTagged); + ExitPoint exit_point(this, &done, &var_result); + + accessor_asm.LoadGlobalIC_TryPropertyCellCase( + feedback_vector, feedback_slot, &exit_point, &try_handler, &miss, + CodeStubAssembler::INTPTR_PARAMETERS); + + Bind(&done); + SetAccumulator(var_result.value()); + Dispatch(); + } + + // Slow path with frame construction. + { + Label done(this); + Variable var_result(this, MachineRepresentation::kTagged); + ExitPoint exit_point(this, &done, &var_result); + + Bind(&try_handler); + { + Node* context = GetContext(); + Node* smi_slot = SmiTag(feedback_slot); + Node* name_index = BytecodeOperandIdx(name_operand_index); + Node* name = LoadConstantPoolEntry(name_index); + + AccessorAssembler::LoadICParameters params(context, nullptr, name, + smi_slot, feedback_vector); + accessor_asm.LoadGlobalIC_TryHandlerCase(¶ms, typeof_mode, + &exit_point, &miss); + } + + Bind(&miss); + { + Node* context = GetContext(); + Node* smi_slot = SmiTag(feedback_slot); + Node* name_index = BytecodeOperandIdx(name_operand_index); + Node* name = LoadConstantPoolEntry(name_index); + + AccessorAssembler::LoadICParameters params(context, nullptr, name, + smi_slot, feedback_vector); + accessor_asm.LoadGlobalIC_MissCase(¶ms, &exit_point); + } + + Bind(&done); + { + SetAccumulator(var_result.value()); + Dispatch(); + } + } + } +}; + +// LdaGlobal +// +// Load the global with name in constant pool entry into the +// accumulator using FeedBackVector slot outside of a typeof. +IGNITION_HANDLER(LdaGlobal, InterpreterLoadGlobalAssembler) { + static const int kNameOperandIndex = 0; + static const int kSlotOperandIndex = 1; + + LdaGlobal(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF); +} + +// LdaGlobalInsideTypeof +// +// Load the global with name in constant pool entry into the +// accumulator using FeedBackVector slot inside of a typeof. +IGNITION_HANDLER(LdaGlobalInsideTypeof, InterpreterLoadGlobalAssembler) { + static const int kNameOperandIndex = 0; + static const int kSlotOperandIndex = 1; + + LdaGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF); +} + +class InterpreterStoreGlobalAssembler : public InterpreterAssembler { + public: + InterpreterStoreGlobalAssembler(CodeAssemblerState* state, Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + void StaGlobal(Callable ic) { + // Get the global object. + Node* context = GetContext(); + Node* native_context = LoadNativeContext(context); + Node* global = LoadContextElement(native_context, Context::EXTENSION_INDEX); + + // Store the global via the StoreIC. + Node* code_target = HeapConstant(ic.code()); + Node* constant_index = BytecodeOperandIdx(0); + Node* name = LoadConstantPoolEntry(constant_index); + Node* value = GetAccumulator(); + Node* raw_slot = BytecodeOperandIdx(1); + Node* smi_slot = SmiTag(raw_slot); + Node* feedback_vector = LoadFeedbackVector(); + CallStub(ic.descriptor(), code_target, context, global, name, value, + smi_slot, feedback_vector); + Dispatch(); + } +}; + +// StaGlobalSloppy +// +// Store the value in the accumulator into the global with name in constant pool +// entry using FeedBackVector slot in sloppy mode. +IGNITION_HANDLER(StaGlobalSloppy, InterpreterStoreGlobalAssembler) { + Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(isolate(), SLOPPY); + StaGlobal(ic); +} + +// StaGlobalStrict +// +// Store the value in the accumulator into the global with name in constant pool +// entry using FeedBackVector slot in strict mode. +IGNITION_HANDLER(StaGlobalStrict, InterpreterStoreGlobalAssembler) { + Callable ic = CodeFactory::StoreGlobalICInOptimizedCode(isolate(), STRICT); + StaGlobal(ic); +} + +// LdaContextSlot +// +// Load the object in |slot_index| of the context at |depth| in the context +// chain starting at |context| into the accumulator. +IGNITION_HANDLER(LdaContextSlot, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* context = LoadRegister(reg_index); + Node* slot_index = BytecodeOperandIdx(1); + Node* depth = BytecodeOperandUImm(2); + Node* slot_context = GetContextAtDepth(context, depth); + Node* result = LoadContextElement(slot_context, slot_index); + SetAccumulator(result); + Dispatch(); +} + +// LdaImmutableContextSlot +// +// Load the object in |slot_index| of the context at |depth| in the context +// chain starting at |context| into the accumulator. +IGNITION_HANDLER(LdaImmutableContextSlot, InterpreterAssembler) { + // Same as LdaContextSlot, should never be called. + UNREACHABLE(); +} + +// LdaCurrentContextSlot +// +// Load the object in |slot_index| of the current context into the accumulator. +IGNITION_HANDLER(LdaCurrentContextSlot, InterpreterAssembler) { + Node* slot_index = BytecodeOperandIdx(0); + Node* slot_context = GetContext(); + Node* result = LoadContextElement(slot_context, slot_index); + SetAccumulator(result); + Dispatch(); +} + +// LdaImmutableCurrentContextSlot +// +// Load the object in |slot_index| of the current context into the accumulator. +IGNITION_HANDLER(LdaImmutableCurrentContextSlot, InterpreterAssembler) { + // Same as LdaCurrentContextSlot, should never be called. + UNREACHABLE(); +} + +// StaContextSlot +// +// Stores the object in the accumulator into |slot_index| of the context at +// |depth| in the context chain starting at |context|. +IGNITION_HANDLER(StaContextSlot, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* reg_index = BytecodeOperandReg(0); + Node* context = LoadRegister(reg_index); + Node* slot_index = BytecodeOperandIdx(1); + Node* depth = BytecodeOperandUImm(2); + Node* slot_context = GetContextAtDepth(context, depth); + StoreContextElement(slot_context, slot_index, value); + Dispatch(); +} + +// StaCurrentContextSlot +// +// Stores the object in the accumulator into |slot_index| of the current +// context. +IGNITION_HANDLER(StaCurrentContextSlot, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* slot_index = BytecodeOperandIdx(0); + Node* slot_context = GetContext(); + StoreContextElement(slot_context, slot_index, value); + Dispatch(); +} + +// LdaLookupSlot +// +// Lookup the object with the name in constant pool entry |name_index| +// dynamically. +IGNITION_HANDLER(LdaLookupSlot, InterpreterAssembler) { + Node* name_index = BytecodeOperandIdx(0); + Node* name = LoadConstantPoolEntry(name_index); + Node* context = GetContext(); + Node* result = CallRuntime(Runtime::kLoadLookupSlot, context, name); + SetAccumulator(result); + Dispatch(); +} + +// LdaLookupSlotInsideTypeof +// +// Lookup the object with the name in constant pool entry |name_index| +// dynamically without causing a NoReferenceError. +IGNITION_HANDLER(LdaLookupSlotInsideTypeof, InterpreterAssembler) { + Node* name_index = BytecodeOperandIdx(0); + Node* name = LoadConstantPoolEntry(name_index); + Node* context = GetContext(); + Node* result = + CallRuntime(Runtime::kLoadLookupSlotInsideTypeof, context, name); + SetAccumulator(result); + Dispatch(); +} + +class InterpreterLookupContextSlotAssembler : public InterpreterAssembler { + public: + InterpreterLookupContextSlotAssembler(CodeAssemblerState* state, + Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + void LookupContextSlot(Runtime::FunctionId function_id) { + Node* context = GetContext(); + Node* name_index = BytecodeOperandIdx(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* depth = BytecodeOperandUImm(2); + + Label slowpath(this, Label::kDeferred); + + // Check for context extensions to allow the fast path. + GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); + + // Fast path does a normal load context. + { + Node* slot_context = GetContextAtDepth(context, depth); + Node* result = LoadContextElement(slot_context, slot_index); + SetAccumulator(result); + Dispatch(); + } + + // Slow path when we have to call out to the runtime. + Bind(&slowpath); + { + Node* name = LoadConstantPoolEntry(name_index); + Node* result = CallRuntime(function_id, context, name); + SetAccumulator(result); + Dispatch(); + } + } +}; + +// LdaLookupSlot +// +// Lookup the object with the name in constant pool entry |name_index| +// dynamically. +IGNITION_HANDLER(LdaLookupContextSlot, InterpreterLookupContextSlotAssembler) { + LookupContextSlot(Runtime::kLoadLookupSlot); +} + +// LdaLookupSlotInsideTypeof +// +// Lookup the object with the name in constant pool entry |name_index| +// dynamically without causing a NoReferenceError. +IGNITION_HANDLER(LdaLookupContextSlotInsideTypeof, + InterpreterLookupContextSlotAssembler) { + LookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof); +} + +class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler { + public: + InterpreterLookupGlobalAssembler(CodeAssemblerState* state, Bytecode bytecode, + OperandScale operand_scale) + : InterpreterLoadGlobalAssembler(state, bytecode, operand_scale) {} + + void LookupGlobalSlot(Runtime::FunctionId function_id) { + Node* context = GetContext(); + Node* depth = BytecodeOperandUImm(2); + + Label slowpath(this, Label::kDeferred); + + // Check for context extensions to allow the fast path + GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); + + // Fast path does a normal load global + { + static const int kNameOperandIndex = 0; + static const int kSlotOperandIndex = 1; + + TypeofMode typeof_mode = + function_id == Runtime::kLoadLookupSlotInsideTypeof + ? INSIDE_TYPEOF + : NOT_INSIDE_TYPEOF; + + LdaGlobal(kSlotOperandIndex, kNameOperandIndex, typeof_mode); + } + + // Slow path when we have to call out to the runtime + Bind(&slowpath); + { + Node* name_index = BytecodeOperandIdx(0); + Node* name = LoadConstantPoolEntry(name_index); + Node* result = CallRuntime(function_id, context, name); + SetAccumulator(result); + Dispatch(); + } + } +}; + +// LdaLookupGlobalSlot +// +// Lookup the object with the name in constant pool entry |name_index| +// dynamically. +IGNITION_HANDLER(LdaLookupGlobalSlot, InterpreterLookupGlobalAssembler) { + LookupGlobalSlot(Runtime::kLoadLookupSlot); +} + +// LdaLookupGlobalSlotInsideTypeof +// +// Lookup the object with the name in constant pool entry |name_index| +// dynamically without causing a NoReferenceError. +IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof, + InterpreterLookupGlobalAssembler) { + LookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof); +} + +// StaLookupSlotSloppy +// +// Store the object in accumulator to the object with the name in constant +// pool entry |name_index| in sloppy mode. +IGNITION_HANDLER(StaLookupSlotSloppy, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* index = BytecodeOperandIdx(0); + Node* name = LoadConstantPoolEntry(index); + Node* context = GetContext(); + Node* result = + CallRuntime(Runtime::kStoreLookupSlot_Sloppy, context, name, value); + SetAccumulator(result); + Dispatch(); +} + +// StaLookupSlotStrict +// +// Store the object in accumulator to the object with the name in constant +// pool entry |name_index| in strict mode. +IGNITION_HANDLER(StaLookupSlotStrict, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* index = BytecodeOperandIdx(0); + Node* name = LoadConstantPoolEntry(index); + Node* context = GetContext(); + Node* result = + CallRuntime(Runtime::kStoreLookupSlot_Strict, context, name, value); + SetAccumulator(result); + Dispatch(); +} + +// LdaNamedProperty +// +// Calls the LoadIC at FeedBackVector slot for and the name at +// constant pool entry . +IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) { + Node* feedback_vector = LoadFeedbackVector(); + Node* feedback_slot = BytecodeOperandIdx(2); + Node* smi_slot = SmiTag(feedback_slot); + + // Load receiver. + Node* register_index = BytecodeOperandReg(0); + Node* recv = LoadRegister(register_index); + + // Load the name. + // TODO(jgruber): Not needed for monomorphic smi handler constant/field case. + Node* constant_index = BytecodeOperandIdx(1); + Node* name = LoadConstantPoolEntry(constant_index); + + Node* context = GetContext(); + + Label done(this); + Variable var_result(this, MachineRepresentation::kTagged); + ExitPoint exit_point(this, &done, &var_result); + + AccessorAssembler::LoadICParameters params(context, recv, name, smi_slot, + feedback_vector); + AccessorAssembler accessor_asm(state()); + accessor_asm.LoadIC_BytecodeHandler(¶ms, &exit_point); + + Bind(&done); + { + SetAccumulator(var_result.value()); + Dispatch(); + } +} + +// KeyedLoadIC +// +// Calls the KeyedLoadIC at FeedBackVector slot for and the key +// in the accumulator. +IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) { + Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()); + Node* code_target = HeapConstant(ic.code()); + Node* reg_index = BytecodeOperandReg(0); + Node* object = LoadRegister(reg_index); + Node* name = GetAccumulator(); + Node* raw_slot = BytecodeOperandIdx(1); + Node* smi_slot = SmiTag(raw_slot); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + Node* result = CallStub(ic.descriptor(), code_target, context, object, name, + smi_slot, feedback_vector); + SetAccumulator(result); + Dispatch(); +} + +class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler { + public: + InterpreterStoreNamedPropertyAssembler(CodeAssemblerState* state, + Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + void StaNamedProperty(Callable ic) { + Node* code_target = HeapConstant(ic.code()); + Node* object_reg_index = BytecodeOperandReg(0); + Node* object = LoadRegister(object_reg_index); + Node* constant_index = BytecodeOperandIdx(1); + Node* name = LoadConstantPoolEntry(constant_index); + Node* value = GetAccumulator(); + Node* raw_slot = BytecodeOperandIdx(2); + Node* smi_slot = SmiTag(raw_slot); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + CallStub(ic.descriptor(), code_target, context, object, name, value, + smi_slot, feedback_vector); + Dispatch(); + } +}; + +// StaNamedPropertySloppy +// +// Calls the sloppy mode StoreIC at FeedBackVector slot for and +// the name in constant pool entry with the value in the +// accumulator. +IGNITION_HANDLER(StaNamedPropertySloppy, + InterpreterStoreNamedPropertyAssembler) { + Callable ic = CodeFactory::StoreICInOptimizedCode(isolate(), SLOPPY); + StaNamedProperty(ic); +} + +// StaNamedPropertyStrict +// +// Calls the strict mode StoreIC at FeedBackVector slot for and +// the name in constant pool entry with the value in the +// accumulator. +IGNITION_HANDLER(StaNamedPropertyStrict, + InterpreterStoreNamedPropertyAssembler) { + Callable ic = CodeFactory::StoreICInOptimizedCode(isolate(), STRICT); + StaNamedProperty(ic); +} + +// StaNamedOwnProperty +// +// Calls the StoreOwnIC at FeedBackVector slot for and +// the name in constant pool entry with the value in the +// accumulator. +IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) { + Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate()); + StaNamedProperty(ic); +} + +class InterpreterStoreKeyedPropertyAssembler : public InterpreterAssembler { + public: + InterpreterStoreKeyedPropertyAssembler(CodeAssemblerState* state, + Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + void StaKeyedProperty(Callable ic) { + Node* code_target = HeapConstant(ic.code()); + Node* object_reg_index = BytecodeOperandReg(0); + Node* object = LoadRegister(object_reg_index); + Node* name_reg_index = BytecodeOperandReg(1); + Node* name = LoadRegister(name_reg_index); + Node* value = GetAccumulator(); + Node* raw_slot = BytecodeOperandIdx(2); + Node* smi_slot = SmiTag(raw_slot); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + CallStub(ic.descriptor(), code_target, context, object, name, value, + smi_slot, feedback_vector); + Dispatch(); + } +}; + +// StaKeyedPropertySloppy +// +// Calls the sloppy mode KeyStoreIC at FeedBackVector slot for +// and the key with the value in the accumulator. +IGNITION_HANDLER(StaKeyedPropertySloppy, + InterpreterStoreKeyedPropertyAssembler) { + Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate(), SLOPPY); + StaKeyedProperty(ic); +} + +// StaKeyedPropertyStrict +// +// Calls the strict mode KeyStoreIC at FeedBackVector slot for +// and the key with the value in the accumulator. +IGNITION_HANDLER(StaKeyedPropertyStrict, + InterpreterStoreKeyedPropertyAssembler) { + Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate(), STRICT); + StaKeyedProperty(ic); +} + +// StaDataPropertyInLiteral +// +// Define a property with value from the accumulator in . +// Property attributes and whether set_function_name are stored in +// DataPropertyInLiteralFlags . +// +// This definition is not observable and is used only for definitions +// in object or class literals. +IGNITION_HANDLER(StaDataPropertyInLiteral, InterpreterAssembler) { + Node* object = LoadRegister(BytecodeOperandReg(0)); + Node* name = LoadRegister(BytecodeOperandReg(1)); + Node* value = GetAccumulator(); + Node* flags = SmiFromWord32(BytecodeOperandFlag(2)); + Node* vector_index = SmiTag(BytecodeOperandIdx(3)); + + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + + CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name, + value, flags, feedback_vector, vector_index); + Dispatch(); +} + +IGNITION_HANDLER(CollectTypeProfile, InterpreterAssembler) { + Node* position = BytecodeOperandImmSmi(0); + Node* value = GetAccumulator(); + + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + + CallRuntime(Runtime::kCollectTypeProfile, context, position, value, + feedback_vector); + Dispatch(); +} + +// LdaModuleVariable +// +// Load the contents of a module variable into the accumulator. The variable is +// identified by . is the depth of the current context +// relative to the module context. +IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) { + Node* cell_index = BytecodeOperandImmIntPtr(0); + Node* depth = BytecodeOperandUImm(1); + + Node* module_context = GetContextAtDepth(GetContext(), depth); + Node* module = LoadContextElement(module_context, Context::EXTENSION_INDEX); + + Label if_export(this), if_import(this), end(this); + Branch(IntPtrGreaterThan(cell_index, IntPtrConstant(0)), &if_export, + &if_import); + + Bind(&if_export); + { + Node* regular_exports = + LoadObjectField(module, Module::kRegularExportsOffset); + // The actual array index is (cell_index - 1). + Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1)); + Node* cell = LoadFixedArrayElement(regular_exports, export_index); + SetAccumulator(LoadObjectField(cell, Cell::kValueOffset)); + Goto(&end); + } + + Bind(&if_import); + { + Node* regular_imports = + LoadObjectField(module, Module::kRegularImportsOffset); + // The actual array index is (-cell_index - 1). + Node* import_index = IntPtrSub(IntPtrConstant(-1), cell_index); + Node* cell = LoadFixedArrayElement(regular_imports, import_index); + SetAccumulator(LoadObjectField(cell, Cell::kValueOffset)); + Goto(&end); + } + + Bind(&end); + Dispatch(); +} + +// StaModuleVariable +// +// Store accumulator to the module variable identified by . +// is the depth of the current context relative to the module context. +IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* cell_index = BytecodeOperandImmIntPtr(0); + Node* depth = BytecodeOperandUImm(1); + + Node* module_context = GetContextAtDepth(GetContext(), depth); + Node* module = LoadContextElement(module_context, Context::EXTENSION_INDEX); + + Label if_export(this), if_import(this), end(this); + Branch(IntPtrGreaterThan(cell_index, IntPtrConstant(0)), &if_export, + &if_import); + + Bind(&if_export); + { + Node* regular_exports = + LoadObjectField(module, Module::kRegularExportsOffset); + // The actual array index is (cell_index - 1). + Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1)); + Node* cell = LoadFixedArrayElement(regular_exports, export_index); + StoreObjectField(cell, Cell::kValueOffset, value); + Goto(&end); + } + + Bind(&if_import); + { + // Not supported (probably never). + Abort(kUnsupportedModuleOperation); + Goto(&end); + } + + Bind(&end); + Dispatch(); +} + +// PushContext +// +// Saves the current context in , and pushes the accumulator as the +// new current context. +IGNITION_HANDLER(PushContext, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* new_context = GetAccumulator(); + Node* old_context = GetContext(); + StoreRegister(old_context, reg_index); + SetContext(new_context); + Dispatch(); +} + +// PopContext +// +// Pops the current context and sets as the new context. +IGNITION_HANDLER(PopContext, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* context = LoadRegister(reg_index); + SetContext(context); + Dispatch(); +} + +class InterpreterBinaryOpAssembler : public InterpreterAssembler { + public: + InterpreterBinaryOpAssembler(CodeAssemblerState* state, Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + typedef Node* (BinaryOpAssembler::*BinaryOpGenerator)(Node* context, + Node* left, Node* right, + Node* slot, + Node* vector); + + void BinaryOpWithFeedback(BinaryOpGenerator generator) { + Node* reg_index = BytecodeOperandReg(0); + Node* lhs = LoadRegister(reg_index); + Node* rhs = GetAccumulator(); + Node* context = GetContext(); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + + BinaryOpAssembler binop_asm(state()); + Node* result = + (binop_asm.*generator)(context, lhs, rhs, slot_index, feedback_vector); + SetAccumulator(result); + Dispatch(); + } +}; + +// Add +// +// Add register to accumulator. +IGNITION_HANDLER(Add, InterpreterBinaryOpAssembler) { + BinaryOpWithFeedback(&BinaryOpAssembler::Generate_AddWithFeedback); +} + +// Sub +// +// Subtract register from accumulator. +IGNITION_HANDLER(Sub, InterpreterBinaryOpAssembler) { + BinaryOpWithFeedback(&BinaryOpAssembler::Generate_SubtractWithFeedback); +} + +// Mul +// +// Multiply accumulator by register . +IGNITION_HANDLER(Mul, InterpreterBinaryOpAssembler) { + BinaryOpWithFeedback(&BinaryOpAssembler::Generate_MultiplyWithFeedback); +} + +// Div +// +// Divide register by accumulator. +IGNITION_HANDLER(Div, InterpreterBinaryOpAssembler) { + BinaryOpWithFeedback(&BinaryOpAssembler::Generate_DivideWithFeedback); +} + +// Mod +// +// Modulo register by accumulator. +IGNITION_HANDLER(Mod, InterpreterBinaryOpAssembler) { + BinaryOpWithFeedback(&BinaryOpAssembler::Generate_ModulusWithFeedback); +} + +// AddSmi +// +// Adds an immediate value to the value in the accumulator. +IGNITION_HANDLER(AddSmi, InterpreterAssembler) { + Variable var_result(this, MachineRepresentation::kTagged); + Label fastpath(this), slowpath(this, Label::kDeferred), end(this); + + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + + // {right} is known to be a Smi. + // Check if the {left} is a Smi take the fast path. + Branch(TaggedIsSmi(left), &fastpath, &slowpath); + Bind(&fastpath); + { + // Try fast Smi addition first. + Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(left), + BitcastTaggedToWord(right)); + Node* overflow = Projection(1, pair); + + // Check if the Smi additon overflowed. + Label if_notoverflow(this); + Branch(overflow, &slowpath, &if_notoverflow); + Bind(&if_notoverflow); + { + UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall), + feedback_vector, slot_index); + var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair))); + Goto(&end); + } + } + Bind(&slowpath); + { + Node* context = GetContext(); + // TODO(ishell): pass slot as word-size value. + var_result.Bind(CallBuiltin(Builtins::kAddWithFeedback, context, left, + right, TruncateWordToWord32(slot_index), + feedback_vector)); + Goto(&end); + } + Bind(&end); + { + SetAccumulator(var_result.value()); + Dispatch(); + } +} + +// SubSmi +// +// Subtracts an immediate value from the value in the accumulator. +IGNITION_HANDLER(SubSmi, InterpreterAssembler) { + Variable var_result(this, MachineRepresentation::kTagged); + Label fastpath(this), slowpath(this, Label::kDeferred), end(this); + + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + + // {right} is known to be a Smi. + // Check if the {left} is a Smi take the fast path. + Branch(TaggedIsSmi(left), &fastpath, &slowpath); + Bind(&fastpath); + { + // Try fast Smi subtraction first. + Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(left), + BitcastTaggedToWord(right)); + Node* overflow = Projection(1, pair); + + // Check if the Smi subtraction overflowed. + Label if_notoverflow(this); + Branch(overflow, &slowpath, &if_notoverflow); + Bind(&if_notoverflow); + { + UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall), + feedback_vector, slot_index); + var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair))); + Goto(&end); + } + } + Bind(&slowpath); + { + Node* context = GetContext(); + // TODO(ishell): pass slot as word-size value. + var_result.Bind(CallBuiltin(Builtins::kSubtractWithFeedback, context, left, + right, TruncateWordToWord32(slot_index), + feedback_vector)); + Goto(&end); + } + Bind(&end); + { + SetAccumulator(var_result.value()); + Dispatch(); + } +} + +// MulSmi +// +// Multiplies an immediate value to the value in the accumulator. +IGNITION_HANDLER(MulSmi, InterpreterAssembler) { + Variable var_result(this, MachineRepresentation::kTagged); + Label fastpath(this), slowpath(this, Label::kDeferred), end(this); + + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + + // {right} is known to be a Smi. + // Check if the {left} is a Smi take the fast path. + Branch(TaggedIsSmi(left), &fastpath, &slowpath); + Bind(&fastpath); + { + // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi, + // in case of overflow. + var_result.Bind(SmiMul(left, right)); + Node* feedback = SelectSmiConstant(TaggedIsSmi(var_result.value()), + BinaryOperationFeedback::kSignedSmall, + BinaryOperationFeedback::kNumber); + UpdateFeedback(feedback, feedback_vector, slot_index); + Goto(&end); + } + Bind(&slowpath); + { + Node* context = GetContext(); + // TODO(ishell): pass slot as word-size value. + var_result.Bind(CallBuiltin(Builtins::kMultiplyWithFeedback, context, left, + right, TruncateWordToWord32(slot_index), + feedback_vector)); + Goto(&end); + } + + Bind(&end); + { + SetAccumulator(var_result.value()); + Dispatch(); + } +} + +// DivSmi +// +// Divides the value in the accumulator by immediate value . +IGNITION_HANDLER(DivSmi, InterpreterAssembler) { + Variable var_result(this, MachineRepresentation::kTagged); + Label fastpath(this), slowpath(this, Label::kDeferred), end(this); + + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + + // {right} is known to be a Smi. + // Check if the {left} is a Smi take the fast path. + Branch(TaggedIsSmi(left), &fastpath, &slowpath); + Bind(&fastpath); + { + var_result.Bind(TrySmiDiv(left, right, &slowpath)); + UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall), + feedback_vector, slot_index); + Goto(&end); + } + Bind(&slowpath); + { + Node* context = GetContext(); + // TODO(ishell): pass slot as word-size value. + var_result.Bind(CallBuiltin(Builtins::kDivideWithFeedback, context, left, + right, TruncateWordToWord32(slot_index), + feedback_vector)); + Goto(&end); + } + + Bind(&end); + { + SetAccumulator(var_result.value()); + Dispatch(); + } +} + +// ModSmi +// +// Modulo accumulator by immediate value . +IGNITION_HANDLER(ModSmi, InterpreterAssembler) { + Variable var_result(this, MachineRepresentation::kTagged); + Label fastpath(this), slowpath(this, Label::kDeferred), end(this); + + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + + // {right} is known to be a Smi. + // Check if the {left} is a Smi take the fast path. + Branch(TaggedIsSmi(left), &fastpath, &slowpath); + Bind(&fastpath); + { + // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi. + var_result.Bind(SmiMod(left, right)); + Node* feedback = SelectSmiConstant(TaggedIsSmi(var_result.value()), + BinaryOperationFeedback::kSignedSmall, + BinaryOperationFeedback::kNumber); + UpdateFeedback(feedback, feedback_vector, slot_index); + Goto(&end); + } + Bind(&slowpath); + { + Node* context = GetContext(); + // TODO(ishell): pass slot as word-size value. + var_result.Bind(CallBuiltin(Builtins::kModulusWithFeedback, context, left, + right, TruncateWordToWord32(slot_index), + feedback_vector)); + Goto(&end); + } + + Bind(&end); + { + SetAccumulator(var_result.value()); + Dispatch(); + } +} + +class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler { + public: + InterpreterBitwiseBinaryOpAssembler(CodeAssemblerState* state, + Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + void BitwiseBinaryOpWithFeedback(Token::Value bitwise_op) { + Node* reg_index = BytecodeOperandReg(0); + Node* lhs = LoadRegister(reg_index); + Node* rhs = GetAccumulator(); + Node* context = GetContext(); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + + Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned), + var_rhs_type_feedback(this, MachineRepresentation::kTaggedSigned); + Node* lhs_value = TruncateTaggedToWord32WithFeedback( + context, lhs, &var_lhs_type_feedback); + Node* rhs_value = TruncateTaggedToWord32WithFeedback( + context, rhs, &var_rhs_type_feedback); + Node* result = nullptr; + + switch (bitwise_op) { + case Token::BIT_OR: { + Node* value = Word32Or(lhs_value, rhs_value); + result = ChangeInt32ToTagged(value); + } break; + case Token::BIT_AND: { + Node* value = Word32And(lhs_value, rhs_value); + result = ChangeInt32ToTagged(value); + } break; + case Token::BIT_XOR: { + Node* value = Word32Xor(lhs_value, rhs_value); + result = ChangeInt32ToTagged(value); + } break; + case Token::SHL: { + Node* value = + Word32Shl(lhs_value, Word32And(rhs_value, Int32Constant(0x1f))); + result = ChangeInt32ToTagged(value); + } break; + case Token::SHR: { + Node* value = + Word32Shr(lhs_value, Word32And(rhs_value, Int32Constant(0x1f))); + result = ChangeUint32ToTagged(value); + } break; + case Token::SAR: { + Node* value = + Word32Sar(lhs_value, Word32And(rhs_value, Int32Constant(0x1f))); + result = ChangeInt32ToTagged(value); + } break; + default: + UNREACHABLE(); + } + + Node* result_type = SelectSmiConstant(TaggedIsSmi(result), + BinaryOperationFeedback::kSignedSmall, + BinaryOperationFeedback::kNumber); + + if (FLAG_debug_code) { + Label ok(this); + GotoIf(TaggedIsSmi(result), &ok); + Node* result_map = LoadMap(result); + AbortIfWordNotEqual(result_map, HeapNumberMapConstant(), + kExpectedHeapNumber); + Goto(&ok); + Bind(&ok); + } + + Node* input_feedback = + SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value()); + UpdateFeedback(SmiOr(result_type, input_feedback), feedback_vector, + slot_index); + SetAccumulator(result); + Dispatch(); + } +}; + +// BitwiseOr +// +// BitwiseOr register to accumulator. +IGNITION_HANDLER(BitwiseOr, InterpreterBitwiseBinaryOpAssembler) { + BitwiseBinaryOpWithFeedback(Token::BIT_OR); +} + +// BitwiseXor +// +// BitwiseXor register to accumulator. +IGNITION_HANDLER(BitwiseXor, InterpreterBitwiseBinaryOpAssembler) { + BitwiseBinaryOpWithFeedback(Token::BIT_XOR); +} + +// BitwiseAnd +// +// BitwiseAnd register to accumulator. +IGNITION_HANDLER(BitwiseAnd, InterpreterBitwiseBinaryOpAssembler) { + BitwiseBinaryOpWithFeedback(Token::BIT_AND); +} + +// ShiftLeft +// +// Left shifts register by the count specified in the accumulator. +// Register is converted to an int32 and the accumulator to uint32 +// before the operation. 5 lsb bits from the accumulator are used as count +// i.e. << (accumulator & 0x1F). +IGNITION_HANDLER(ShiftLeft, InterpreterBitwiseBinaryOpAssembler) { + BitwiseBinaryOpWithFeedback(Token::SHL); +} + +// ShiftRight +// +// Right shifts register by the count specified in the accumulator. +// Result is sign extended. Register is converted to an int32 and the +// accumulator to uint32 before the operation. 5 lsb bits from the accumulator +// are used as count i.e. >> (accumulator & 0x1F). +IGNITION_HANDLER(ShiftRight, InterpreterBitwiseBinaryOpAssembler) { + BitwiseBinaryOpWithFeedback(Token::SAR); +} + +// ShiftRightLogical +// +// Right Shifts register by the count specified in the accumulator. +// Result is zero-filled. The accumulator and register are converted to +// uint32 before the operation 5 lsb bits from the accumulator are used as +// count i.e. << (accumulator & 0x1F). +IGNITION_HANDLER(ShiftRightLogical, InterpreterBitwiseBinaryOpAssembler) { + BitwiseBinaryOpWithFeedback(Token::SHR); +} + +// BitwiseOr +// +// BitwiseOr accumulator with . +IGNITION_HANDLER(BitwiseOrSmi, InterpreterAssembler) { + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + + Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned); + Node* lhs_value = + TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback); + Node* rhs_value = SmiToWord32(right); + Node* value = Word32Or(lhs_value, rhs_value); + Node* result = ChangeInt32ToTagged(value); + Node* result_type = SelectSmiConstant(TaggedIsSmi(result), + BinaryOperationFeedback::kSignedSmall, + BinaryOperationFeedback::kNumber); + UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()), + feedback_vector, slot_index); + SetAccumulator(result); + Dispatch(); +} + +// BitwiseXor +// +// BitwiseXor accumulator with . +IGNITION_HANDLER(BitwiseXorSmi, InterpreterAssembler) { + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + + Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned); + Node* lhs_value = + TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback); + Node* rhs_value = SmiToWord32(right); + Node* value = Word32Xor(lhs_value, rhs_value); + Node* result = ChangeInt32ToTagged(value); + Node* result_type = SelectSmiConstant(TaggedIsSmi(result), + BinaryOperationFeedback::kSignedSmall, + BinaryOperationFeedback::kNumber); + UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()), + feedback_vector, slot_index); + SetAccumulator(result); + Dispatch(); +} + +// BitwiseAnd +// +// BitwiseAnd accumulator with . +IGNITION_HANDLER(BitwiseAndSmi, InterpreterAssembler) { + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + + Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned); + Node* lhs_value = + TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback); + Node* rhs_value = SmiToWord32(right); + Node* value = Word32And(lhs_value, rhs_value); + Node* result = ChangeInt32ToTagged(value); + Node* result_type = SelectSmiConstant(TaggedIsSmi(result), + BinaryOperationFeedback::kSignedSmall, + BinaryOperationFeedback::kNumber); + UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()), + feedback_vector, slot_index); + SetAccumulator(result); + Dispatch(); +} + +// ShiftLeftSmi +// +// Left shifts accumulator by the count specified in . +// The accumulator is converted to an int32 before the operation. The 5 +// lsb bits from are used as count i.e. << ( & 0x1F). +IGNITION_HANDLER(ShiftLeftSmi, InterpreterAssembler) { + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + + Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned); + Node* lhs_value = + TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback); + Node* rhs_value = SmiToWord32(right); + Node* shift_count = Word32And(rhs_value, Int32Constant(0x1f)); + Node* value = Word32Shl(lhs_value, shift_count); + Node* result = ChangeInt32ToTagged(value); + Node* result_type = SelectSmiConstant(TaggedIsSmi(result), + BinaryOperationFeedback::kSignedSmall, + BinaryOperationFeedback::kNumber); + UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()), + feedback_vector, slot_index); + SetAccumulator(result); + Dispatch(); +} + +// ShiftRightSmi +// +// Right shifts accumulator by the count specified in . Result is sign +// extended. The accumulator is converted to an int32 before the operation. The +// 5 lsb bits from are used as count i.e. << ( & 0x1F). +IGNITION_HANDLER(ShiftRightSmi, InterpreterAssembler) { + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + + Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned); + Node* lhs_value = + TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback); + Node* rhs_value = SmiToWord32(right); + Node* shift_count = Word32And(rhs_value, Int32Constant(0x1f)); + Node* value = Word32Sar(lhs_value, shift_count); + Node* result = ChangeInt32ToTagged(value); + Node* result_type = SelectSmiConstant(TaggedIsSmi(result), + BinaryOperationFeedback::kSignedSmall, + BinaryOperationFeedback::kNumber); + UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()), + feedback_vector, slot_index); + SetAccumulator(result); + Dispatch(); +} + +// ShiftRightLogicalSmi +// +// Right shifts accumulator by the count specified in . Result is zero +// extended. The accumulator is converted to an int32 before the operation. The +// 5 lsb bits from are used as count i.e. << ( & 0x1F). +IGNITION_HANDLER(ShiftRightLogicalSmi, InterpreterAssembler) { + Node* left = GetAccumulator(); + Node* right = BytecodeOperandImmSmi(0); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + + Variable var_lhs_type_feedback(this, MachineRepresentation::kTaggedSigned); + Node* lhs_value = + TruncateTaggedToWord32WithFeedback(context, left, &var_lhs_type_feedback); + Node* rhs_value = SmiToWord32(right); + Node* shift_count = Word32And(rhs_value, Int32Constant(0x1f)); + Node* value = Word32Shr(lhs_value, shift_count); + Node* result = ChangeUint32ToTagged(value); + Node* result_type = SelectSmiConstant(TaggedIsSmi(result), + BinaryOperationFeedback::kSignedSmall, + BinaryOperationFeedback::kNumber); + UpdateFeedback(SmiOr(result_type, var_lhs_type_feedback.value()), + feedback_vector, slot_index); + SetAccumulator(result); + Dispatch(); +} + +// ToName +// +// Convert the object referenced by the accumulator to a name. +IGNITION_HANDLER(ToName, InterpreterAssembler) { + Node* object = GetAccumulator(); + Node* context = GetContext(); + Node* result = ToName(context, object); + StoreRegister(result, BytecodeOperandReg(0)); + Dispatch(); +} + +// ToNumber +// +// Convert the object referenced by the accumulator to a number. +IGNITION_HANDLER(ToNumber, InterpreterAssembler) { + Node* object = GetAccumulator(); + Node* context = GetContext(); + + // Convert the {object} to a Number and collect feedback for the {object}. + Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned); + Variable var_result(this, MachineRepresentation::kTagged); + Label if_done(this), if_objectissmi(this), if_objectisnumber(this), + if_objectisother(this, Label::kDeferred); + + GotoIf(TaggedIsSmi(object), &if_objectissmi); + Node* object_map = LoadMap(object); + Branch(IsHeapNumberMap(object_map), &if_objectisnumber, &if_objectisother); + + Bind(&if_objectissmi); + { + var_result.Bind(object); + var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall)); + Goto(&if_done); + } + + Bind(&if_objectisnumber); + { + var_result.Bind(object); + var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber)); + Goto(&if_done); + } + + Bind(&if_objectisother); + { + // Convert the {object} to a Number. + Callable callable = CodeFactory::NonNumberToNumber(isolate()); + var_result.Bind(CallStub(callable, context, object)); + var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny)); + Goto(&if_done); + } + + Bind(&if_done); + StoreRegister(var_result.value(), BytecodeOperandReg(0)); + + // Record the type feedback collected for {object}. + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); + + Dispatch(); +} + +// ToObject +// +// Convert the object referenced by the accumulator to a JSReceiver. +IGNITION_HANDLER(ToObject, InterpreterAssembler) { + Callable callable(CodeFactory::ToObject(isolate())); + Node* target = HeapConstant(callable.code()); + Node* accumulator = GetAccumulator(); + Node* context = GetContext(); + Node* result = CallStub(callable.descriptor(), target, context, accumulator); + StoreRegister(result, BytecodeOperandReg(0)); + Dispatch(); +} + +// Inc +// +// Increments value in the accumulator by one. +IGNITION_HANDLER(Inc, InterpreterAssembler) { + typedef CodeStubAssembler::Label Label; + typedef compiler::Node Node; + typedef CodeStubAssembler::Variable Variable; + + Node* value = GetAccumulator(); + Node* context = GetContext(); + Node* slot_index = BytecodeOperandIdx(0); + Node* feedback_vector = LoadFeedbackVector(); + + // Shared entry for floating point increment. + Label do_finc(this), end(this); + Variable var_finc_value(this, MachineRepresentation::kFloat64); + + // We might need to try again due to ToNumber conversion. + Variable value_var(this, MachineRepresentation::kTagged); + Variable result_var(this, MachineRepresentation::kTagged); + Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned); + Variable* loop_vars[] = {&value_var, &var_type_feedback}; + Label start(this, 2, loop_vars); + value_var.Bind(value); + var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNone)); + Goto(&start); + Bind(&start); + { + value = value_var.value(); + + Label if_issmi(this), if_isnotsmi(this); + Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi); + + Bind(&if_issmi); + { + // Try fast Smi addition first. + Node* one = SmiConstant(Smi::FromInt(1)); + Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(value), + BitcastTaggedToWord(one)); + Node* overflow = Projection(1, pair); + + // Check if the Smi addition overflowed. + Label if_overflow(this), if_notoverflow(this); + Branch(overflow, &if_overflow, &if_notoverflow); + + Bind(&if_notoverflow); + var_type_feedback.Bind( + SmiOr(var_type_feedback.value(), + SmiConstant(BinaryOperationFeedback::kSignedSmall))); + result_var.Bind(BitcastWordToTaggedSigned(Projection(0, pair))); + Goto(&end); + + Bind(&if_overflow); + { + var_finc_value.Bind(SmiToFloat64(value)); + Goto(&do_finc); + } + } + + Bind(&if_isnotsmi); + { + // Check if the value is a HeapNumber. + Label if_valueisnumber(this), if_valuenotnumber(this, Label::kDeferred); + Node* value_map = LoadMap(value); + Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber); + + Bind(&if_valueisnumber); + { + // Load the HeapNumber value. + var_finc_value.Bind(LoadHeapNumberValue(value)); + Goto(&do_finc); + } + + Bind(&if_valuenotnumber); + { + // We do not require an Or with earlier feedback here because once we + // convert the value to a number, we cannot reach this path. We can + // only reach this path on the first pass when the feedback is kNone. + CSA_ASSERT(this, SmiEqual(var_type_feedback.value(), + SmiConstant(BinaryOperationFeedback::kNone))); + + Label if_valueisoddball(this), if_valuenotoddball(this); + Node* instance_type = LoadMapInstanceType(value_map); + Node* is_oddball = + Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)); + Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); + + Bind(&if_valueisoddball); + { + // Convert Oddball to Number and check again. + value_var.Bind(LoadObjectField(value, Oddball::kToNumberOffset)); + var_type_feedback.Bind( + SmiConstant(BinaryOperationFeedback::kNumberOrOddball)); + Goto(&start); + } + + Bind(&if_valuenotoddball); + { + // Convert to a Number first and try again. + Callable callable = CodeFactory::NonNumberToNumber(isolate()); + var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny)); + value_var.Bind(CallStub(callable, context, value)); + Goto(&start); + } + } + } + } + + Bind(&do_finc); + { + Node* finc_value = var_finc_value.value(); + Node* one = Float64Constant(1.0); + Node* finc_result = Float64Add(finc_value, one); + var_type_feedback.Bind( + SmiOr(var_type_feedback.value(), + SmiConstant(BinaryOperationFeedback::kNumber))); + result_var.Bind(AllocateHeapNumberWithValue(finc_result)); + Goto(&end); + } + + Bind(&end); + UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); + + SetAccumulator(result_var.value()); + Dispatch(); +} + +// Dec +// +// Decrements value in the accumulator by one. +IGNITION_HANDLER(Dec, InterpreterAssembler) { + typedef CodeStubAssembler::Label Label; + typedef compiler::Node Node; + typedef CodeStubAssembler::Variable Variable; + + Node* value = GetAccumulator(); + Node* context = GetContext(); + Node* slot_index = BytecodeOperandIdx(0); + Node* feedback_vector = LoadFeedbackVector(); + + // Shared entry for floating point decrement. + Label do_fdec(this), end(this); + Variable var_fdec_value(this, MachineRepresentation::kFloat64); + + // We might need to try again due to ToNumber conversion. + Variable value_var(this, MachineRepresentation::kTagged); + Variable result_var(this, MachineRepresentation::kTagged); + Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned); + Variable* loop_vars[] = {&value_var, &var_type_feedback}; + Label start(this, 2, loop_vars); + var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNone)); + value_var.Bind(value); + Goto(&start); + Bind(&start); + { + value = value_var.value(); + + Label if_issmi(this), if_isnotsmi(this); + Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi); + + Bind(&if_issmi); + { + // Try fast Smi subtraction first. + Node* one = SmiConstant(Smi::FromInt(1)); + Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(value), + BitcastTaggedToWord(one)); + Node* overflow = Projection(1, pair); + + // Check if the Smi subtraction overflowed. + Label if_overflow(this), if_notoverflow(this); + Branch(overflow, &if_overflow, &if_notoverflow); + + Bind(&if_notoverflow); + var_type_feedback.Bind( + SmiOr(var_type_feedback.value(), + SmiConstant(BinaryOperationFeedback::kSignedSmall))); + result_var.Bind(BitcastWordToTaggedSigned(Projection(0, pair))); + Goto(&end); + + Bind(&if_overflow); + { + var_fdec_value.Bind(SmiToFloat64(value)); + Goto(&do_fdec); + } + } + + Bind(&if_isnotsmi); + { + // Check if the value is a HeapNumber. + Label if_valueisnumber(this), if_valuenotnumber(this, Label::kDeferred); + Node* value_map = LoadMap(value); + Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber); + + Bind(&if_valueisnumber); + { + // Load the HeapNumber value. + var_fdec_value.Bind(LoadHeapNumberValue(value)); + Goto(&do_fdec); + } + + Bind(&if_valuenotnumber); + { + // We do not require an Or with earlier feedback here because once we + // convert the value to a number, we cannot reach this path. We can + // only reach this path on the first pass when the feedback is kNone. + CSA_ASSERT(this, SmiEqual(var_type_feedback.value(), + SmiConstant(BinaryOperationFeedback::kNone))); + + Label if_valueisoddball(this), if_valuenotoddball(this); + Node* instance_type = LoadMapInstanceType(value_map); + Node* is_oddball = + Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)); + Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); + + Bind(&if_valueisoddball); + { + // Convert Oddball to Number and check again. + value_var.Bind(LoadObjectField(value, Oddball::kToNumberOffset)); + var_type_feedback.Bind( + SmiConstant(BinaryOperationFeedback::kNumberOrOddball)); + Goto(&start); + } + + Bind(&if_valuenotoddball); + { + // Convert to a Number first and try again. + Callable callable = CodeFactory::NonNumberToNumber(isolate()); + var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny)); + value_var.Bind(CallStub(callable, context, value)); + Goto(&start); + } + } + } + } + + Bind(&do_fdec); + { + Node* fdec_value = var_fdec_value.value(); + Node* one = Float64Constant(1.0); + Node* fdec_result = Float64Sub(fdec_value, one); + var_type_feedback.Bind( + SmiOr(var_type_feedback.value(), + SmiConstant(BinaryOperationFeedback::kNumber))); + result_var.Bind(AllocateHeapNumberWithValue(fdec_result)); + Goto(&end); + } + + Bind(&end); + UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); + + SetAccumulator(result_var.value()); + Dispatch(); +} + +// LogicalNot +// +// Perform logical-not on the accumulator, first casting the +// accumulator to a boolean value if required. +// ToBooleanLogicalNot +IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) { + Node* value = GetAccumulator(); + Variable result(this, MachineRepresentation::kTagged); + Label if_true(this), if_false(this), end(this); + Node* true_value = BooleanConstant(true); + Node* false_value = BooleanConstant(false); + BranchIfToBooleanIsTrue(value, &if_true, &if_false); + Bind(&if_true); + { + result.Bind(false_value); + Goto(&end); + } + Bind(&if_false); + { + result.Bind(true_value); + Goto(&end); + } + Bind(&end); + SetAccumulator(result.value()); + Dispatch(); +} + +// LogicalNot +// +// Perform logical-not on the accumulator, which must already be a boolean +// value. +IGNITION_HANDLER(LogicalNot, InterpreterAssembler) { + Node* value = GetAccumulator(); + Variable result(this, MachineRepresentation::kTagged); + Label if_true(this), if_false(this), end(this); + Node* true_value = BooleanConstant(true); + Node* false_value = BooleanConstant(false); + Branch(WordEqual(value, true_value), &if_true, &if_false); + Bind(&if_true); + { + result.Bind(false_value); + Goto(&end); + } + Bind(&if_false); + { + if (FLAG_debug_code) { + AbortIfWordNotEqual(value, false_value, + BailoutReason::kExpectedBooleanValue); + } + result.Bind(true_value); + Goto(&end); + } + Bind(&end); + SetAccumulator(result.value()); + Dispatch(); +} + +// TypeOf +// +// Load the accumulator with the string representating type of the +// object in the accumulator. +IGNITION_HANDLER(TypeOf, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* result = Typeof(value); + SetAccumulator(result); + Dispatch(); +} + +// DeletePropertyStrict +// +// Delete the property specified in the accumulator from the object +// referenced by the register operand following strict mode semantics. +IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* object = LoadRegister(reg_index); + Node* key = GetAccumulator(); + Node* context = GetContext(); + Node* result = + CallRuntime(Runtime::kDeleteProperty_Strict, context, object, key); + SetAccumulator(result); + Dispatch(); +} + +// DeletePropertySloppy +// +// Delete the property specified in the accumulator from the object +// referenced by the register operand following sloppy mode semantics. +IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* object = LoadRegister(reg_index); + Node* key = GetAccumulator(); + Node* context = GetContext(); + Node* result = + CallRuntime(Runtime::kDeleteProperty_Sloppy, context, object, key); + SetAccumulator(result); + Dispatch(); +} + +// GetSuperConstructor +// +// Get the super constructor from the object referenced by the accumulator. +// The result is stored in register |reg|. +IGNITION_HANDLER(GetSuperConstructor, InterpreterAssembler) { + Node* active_function = GetAccumulator(); + Node* context = GetContext(); + Node* result = GetSuperConstructor(active_function, context); + Node* reg = BytecodeOperandReg(0); + StoreRegister(result, reg); + Dispatch(); +} + +class InterpreterJSCallAssembler : public InterpreterAssembler { + public: + InterpreterJSCallAssembler(CodeAssemblerState* state, Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + // Generates code to perform a JS call that collects type feedback. + void JSCall(ConvertReceiverMode receiver_mode, TailCallMode tail_call_mode) { + Node* function_reg = BytecodeOperandReg(0); + Node* function = LoadRegister(function_reg); + Node* first_arg_reg = BytecodeOperandReg(1); + Node* first_arg = RegisterLocation(first_arg_reg); + Node* arg_list_count = BytecodeOperandCount(2); + Node* args_count; + if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { + // The receiver is implied, so it is not in the argument list. + args_count = arg_list_count; + } else { + // Subtract the receiver from the argument count. + Node* receiver_count = Int32Constant(1); + args_count = Int32Sub(arg_list_count, receiver_count); + } + Node* slot_id = BytecodeOperandIdx(3); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + Node* result = + CallJSWithFeedback(function, context, first_arg, args_count, slot_id, + feedback_vector, receiver_mode, tail_call_mode); + SetAccumulator(result); + Dispatch(); + } + + // Generates code to perform a JS call with a known number of arguments that + // collects type feedback. + void JSCallN(int arg_count, ConvertReceiverMode receiver_mode) { + // Indices and counts of operands on the bytecode. + const int kFirstArgumentOperandIndex = 1; + const int kReceiverOperandCount = + (receiver_mode == ConvertReceiverMode::kNullOrUndefined) ? 0 : 1; + const int kSlotOperandIndex = + kFirstArgumentOperandIndex + kReceiverOperandCount + arg_count; + // Indices and counts of parameters to the call stub. + const int kBoilerplateParameterCount = 7; + const int kReceiverParameterIndex = 5; + const int kReceiverParameterCount = 1; + // Only used in a DCHECK. + USE(kReceiverParameterCount); + + Node* function_reg = BytecodeOperandReg(0); + Node* function = LoadRegister(function_reg); + std::array + temp; + Callable call_ic = CodeFactory::CallIC(isolate()); + temp[0] = HeapConstant(call_ic.code()); + temp[1] = function; + temp[2] = Int32Constant(arg_count); + temp[3] = BytecodeOperandIdxInt32(kSlotOperandIndex); + temp[4] = LoadFeedbackVector(); + + int parameter_index = kReceiverParameterIndex; + if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { + // The first argument parameter (the receiver) is implied to be undefined. + Node* undefined_value = + HeapConstant(isolate()->factory()->undefined_value()); + temp[parameter_index++] = undefined_value; + } + // The bytecode argument operands are copied into the remaining argument + // parameters. + for (int i = 0; i < (kReceiverOperandCount + arg_count); ++i) { + Node* reg = BytecodeOperandReg(kFirstArgumentOperandIndex + i); + temp[parameter_index++] = LoadRegister(reg); + } + + DCHECK_EQ(parameter_index, + kReceiverParameterIndex + kReceiverParameterCount + arg_count); + temp[parameter_index] = GetContext(); + + Node* result = CallStubN(call_ic.descriptor(), 1, + arg_count + kBoilerplateParameterCount, &temp[0]); + SetAccumulator(result); + Dispatch(); + } +}; + +// Call +// +// Call a JSfunction or Callable in |callable| with the |receiver| and +// |arg_count| arguments in subsequent registers. Collect type feedback +// into |feedback_slot_id| +IGNITION_HANDLER(CallAnyReceiver, InterpreterJSCallAssembler) { + JSCall(ConvertReceiverMode::kAny, TailCallMode::kDisallow); +} + +IGNITION_HANDLER(CallProperty, InterpreterJSCallAssembler) { + JSCall(ConvertReceiverMode::kNotNullOrUndefined, TailCallMode::kDisallow); +} + +IGNITION_HANDLER(CallProperty0, InterpreterJSCallAssembler) { + JSCallN(0, ConvertReceiverMode::kNotNullOrUndefined); +} + +IGNITION_HANDLER(CallProperty1, InterpreterJSCallAssembler) { + JSCallN(1, ConvertReceiverMode::kNotNullOrUndefined); +} + +IGNITION_HANDLER(CallProperty2, InterpreterJSCallAssembler) { + JSCallN(2, ConvertReceiverMode::kNotNullOrUndefined); +} + +IGNITION_HANDLER(CallUndefinedReceiver, InterpreterJSCallAssembler) { + JSCall(ConvertReceiverMode::kNullOrUndefined, TailCallMode::kDisallow); +} + +IGNITION_HANDLER(CallUndefinedReceiver0, InterpreterJSCallAssembler) { + JSCallN(0, ConvertReceiverMode::kNullOrUndefined); +} + +IGNITION_HANDLER(CallUndefinedReceiver1, InterpreterJSCallAssembler) { + JSCallN(1, ConvertReceiverMode::kNullOrUndefined); +} + +IGNITION_HANDLER(CallUndefinedReceiver2, InterpreterJSCallAssembler) { + JSCallN(2, ConvertReceiverMode::kNullOrUndefined); +} + +// TailCall +// +// Tail call a JSfunction or Callable in |callable| with the |receiver| and +// |arg_count| arguments in subsequent registers. Collect type feedback +// into |feedback_slot_id| +IGNITION_HANDLER(TailCall, InterpreterJSCallAssembler) { + JSCall(ConvertReceiverMode::kAny, TailCallMode::kAllow); +} + +// CallRuntime +// +// Call the runtime function |function_id| with the first argument in +// register |first_arg| and |arg_count| arguments in subsequent +// registers. +IGNITION_HANDLER(CallRuntime, InterpreterAssembler) { + Node* function_id = BytecodeOperandRuntimeId(0); + Node* first_arg_reg = BytecodeOperandReg(1); + Node* first_arg = RegisterLocation(first_arg_reg); + Node* args_count = BytecodeOperandCount(2); + Node* context = GetContext(); + Node* result = CallRuntimeN(function_id, context, first_arg, args_count); + SetAccumulator(result); + Dispatch(); +} + +// InvokeIntrinsic +// +// Implements the semantic equivalent of calling the runtime function +// |function_id| with the first argument in |first_arg| and |arg_count| +// arguments in subsequent registers. +IGNITION_HANDLER(InvokeIntrinsic, InterpreterAssembler) { + Node* function_id = BytecodeOperandIntrinsicId(0); + Node* first_arg_reg = BytecodeOperandReg(1); + Node* arg_count = BytecodeOperandCount(2); + Node* context = GetContext(); + Node* result = GenerateInvokeIntrinsic(this, function_id, context, + first_arg_reg, arg_count); + SetAccumulator(result); + Dispatch(); +} + +// CallRuntimeForPair +// +// Call the runtime function |function_id| which returns a pair, with the +// first argument in register |first_arg| and |arg_count| arguments in +// subsequent registers. Returns the result in and +// +IGNITION_HANDLER(CallRuntimeForPair, InterpreterAssembler) { + // Call the runtime function. + Node* function_id = BytecodeOperandRuntimeId(0); + Node* first_arg_reg = BytecodeOperandReg(1); + Node* first_arg = RegisterLocation(first_arg_reg); + Node* args_count = BytecodeOperandCount(2); + Node* context = GetContext(); + Node* result_pair = + CallRuntimeN(function_id, context, first_arg, args_count, 2); + // Store the results in and + Node* first_return_reg = BytecodeOperandReg(3); + Node* second_return_reg = NextRegister(first_return_reg); + Node* result0 = Projection(0, result_pair); + Node* result1 = Projection(1, result_pair); + StoreRegister(result0, first_return_reg); + StoreRegister(result1, second_return_reg); + Dispatch(); +} + +// CallJSRuntime +// +// Call the JS runtime function that has the |context_index| with the receiver +// in register |receiver| and |arg_count| arguments in subsequent registers. +IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) { + Node* context_index = BytecodeOperandIdx(0); + Node* receiver_reg = BytecodeOperandReg(1); + Node* first_arg = RegisterLocation(receiver_reg); + Node* receiver_args_count = BytecodeOperandCount(2); + Node* receiver_count = Int32Constant(1); + Node* args_count = Int32Sub(receiver_args_count, receiver_count); + + // Get the function to call from the native context. + Node* context = GetContext(); + Node* native_context = LoadNativeContext(context); + Node* function = LoadContextElement(native_context, context_index); + + // Call the function. + Node* result = CallJS(function, context, first_arg, args_count, + ConvertReceiverMode::kAny, TailCallMode::kDisallow); + SetAccumulator(result); + Dispatch(); +} + +// CallWithSpread +// +// Call a JSfunction or Callable in |callable| with the receiver in +// |first_arg| and |arg_count - 1| arguments in subsequent registers. The +// final argument is always a spread. +// +IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) { + Node* callable_reg = BytecodeOperandReg(0); + Node* callable = LoadRegister(callable_reg); + Node* receiver_reg = BytecodeOperandReg(1); + Node* receiver_arg = RegisterLocation(receiver_reg); + Node* receiver_args_count = BytecodeOperandCount(2); + Node* receiver_count = Int32Constant(1); + Node* args_count = Int32Sub(receiver_args_count, receiver_count); + Node* context = GetContext(); + + // Call into Runtime function CallWithSpread which does everything. + Node* result = CallJSWithSpread(callable, context, receiver_arg, args_count); + SetAccumulator(result); + Dispatch(); +} + +// ConstructWithSpread +// +// Call the constructor in |constructor| with the first argument in register +// |first_arg| and |arg_count| arguments in subsequent registers. The final +// argument is always a spread. The new.target is in the accumulator. +// +IGNITION_HANDLER(ConstructWithSpread, InterpreterAssembler) { + Node* new_target = GetAccumulator(); + Node* constructor_reg = BytecodeOperandReg(0); + Node* constructor = LoadRegister(constructor_reg); + Node* first_arg_reg = BytecodeOperandReg(1); + Node* first_arg = RegisterLocation(first_arg_reg); + Node* args_count = BytecodeOperandCount(2); + Node* context = GetContext(); + Node* result = ConstructWithSpread(constructor, context, new_target, + first_arg, args_count); + SetAccumulator(result); + Dispatch(); +} + +// Construct +// +// Call operator construct with |constructor| and the first argument in +// register |first_arg| and |arg_count| arguments in subsequent +// registers. The new.target is in the accumulator. +// +IGNITION_HANDLER(Construct, InterpreterAssembler) { + Node* new_target = GetAccumulator(); + Node* constructor_reg = BytecodeOperandReg(0); + Node* constructor = LoadRegister(constructor_reg); + Node* first_arg_reg = BytecodeOperandReg(1); + Node* first_arg = RegisterLocation(first_arg_reg); + Node* args_count = BytecodeOperandCount(2); + Node* slot_id = BytecodeOperandIdx(3); + Node* feedback_vector = LoadFeedbackVector(); + Node* context = GetContext(); + Node* result = Construct(constructor, context, new_target, first_arg, + args_count, slot_id, feedback_vector); + SetAccumulator(result); + Dispatch(); +} + +class InterpreterCompareOpAssembler : public InterpreterAssembler { + public: + InterpreterCompareOpAssembler(CodeAssemblerState* state, Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + void CompareOpWithFeedback(Token::Value compare_op) { + Node* reg_index = BytecodeOperandReg(0); + Node* lhs = LoadRegister(reg_index); + Node* rhs = GetAccumulator(); + Node* context = GetContext(); + Node* slot_index = BytecodeOperandIdx(1); + Node* feedback_vector = LoadFeedbackVector(); + + Variable var_result(this, MachineRepresentation::kTagged), + var_fcmp_lhs(this, MachineRepresentation::kFloat64), + var_fcmp_rhs(this, MachineRepresentation::kFloat64), + non_number_value(this, MachineRepresentation::kTagged), + maybe_smi_value(this, MachineRepresentation::kTagged); + Label lhs_is_not_smi(this), do_fcmp(this), slow_path(this), + fast_path_dispatch(this); + + GotoIf(TaggedIsNotSmi(lhs), &lhs_is_not_smi); + { + Label rhs_is_not_smi(this); + GotoIf(TaggedIsNotSmi(rhs), &rhs_is_not_smi); + { + Comment("Do integer comparison"); + UpdateFeedback(SmiConstant(CompareOperationFeedback::kSignedSmall), + feedback_vector, slot_index); + Node* result; + switch (compare_op) { + case Token::LT: + result = SelectBooleanConstant(SmiLessThan(lhs, rhs)); + break; + case Token::LTE: + result = SelectBooleanConstant(SmiLessThanOrEqual(lhs, rhs)); + break; + case Token::GT: + result = SelectBooleanConstant(SmiLessThan(rhs, lhs)); + break; + case Token::GTE: + result = SelectBooleanConstant(SmiLessThanOrEqual(rhs, lhs)); + break; + case Token::EQ: + case Token::EQ_STRICT: + result = SelectBooleanConstant(WordEqual(lhs, rhs)); + break; + default: + UNREACHABLE(); + } + var_result.Bind(result); + Goto(&fast_path_dispatch); + } + + Bind(&rhs_is_not_smi); + { + Node* rhs_map = LoadMap(rhs); + Label rhs_is_not_number(this); + GotoIfNot(IsHeapNumberMap(rhs_map), &rhs_is_not_number); + + Comment("Convert lhs to float and load HeapNumber value from rhs"); + var_fcmp_lhs.Bind(SmiToFloat64(lhs)); + var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs)); + Goto(&do_fcmp); + + Bind(&rhs_is_not_number); + { + non_number_value.Bind(rhs); + maybe_smi_value.Bind(lhs); + Goto(&slow_path); + } + } + } + + Bind(&lhs_is_not_smi); + { + Label rhs_is_not_smi(this), lhs_is_not_number(this), + rhs_is_not_number(this); + + Node* lhs_map = LoadMap(lhs); + GotoIfNot(IsHeapNumberMap(lhs_map), &lhs_is_not_number); + + GotoIfNot(TaggedIsSmi(rhs), &rhs_is_not_smi); + + Comment("Convert rhs to double and load HeapNumber value from lhs"); + var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs)); + var_fcmp_rhs.Bind(SmiToFloat64(rhs)); + Goto(&do_fcmp); + + Bind(&rhs_is_not_smi); + { + Node* rhs_map = LoadMap(rhs); + GotoIfNot(IsHeapNumberMap(rhs_map), &rhs_is_not_number); + + Comment("Load HeapNumber values from lhs and rhs"); + var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs)); + var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs)); + Goto(&do_fcmp); + } + + Bind(&lhs_is_not_number); + { + non_number_value.Bind(lhs); + maybe_smi_value.Bind(rhs); + Goto(&slow_path); + } + + Bind(&rhs_is_not_number); + { + non_number_value.Bind(rhs); + maybe_smi_value.Bind(lhs); + Goto(&slow_path); + } + } + + Bind(&do_fcmp); + { + Comment("Do floating point comparison"); + Node* lhs_float = var_fcmp_lhs.value(); + Node* rhs_float = var_fcmp_rhs.value(); + UpdateFeedback(SmiConstant(CompareOperationFeedback::kNumber), + feedback_vector, slot_index); + + // Perform a fast floating point comparison. + Node* result; + switch (compare_op) { + case Token::LT: + result = SelectBooleanConstant(Float64LessThan(lhs_float, rhs_float)); + break; + case Token::LTE: + result = SelectBooleanConstant( + Float64LessThanOrEqual(lhs_float, rhs_float)); + break; + case Token::GT: + result = + SelectBooleanConstant(Float64GreaterThan(lhs_float, rhs_float)); + break; + case Token::GTE: + result = SelectBooleanConstant( + Float64GreaterThanOrEqual(lhs_float, rhs_float)); + break; + case Token::EQ: + case Token::EQ_STRICT: { + Label check_nan(this); + var_result.Bind(BooleanConstant(false)); + Branch(Float64Equal(lhs_float, rhs_float), &check_nan, + &fast_path_dispatch); + Bind(&check_nan); + result = SelectBooleanConstant(Float64Equal(lhs_float, lhs_float)); + } break; + default: + UNREACHABLE(); + } + var_result.Bind(result); + Goto(&fast_path_dispatch); + } + + Bind(&fast_path_dispatch); + { + SetAccumulator(var_result.value()); + Dispatch(); + } + + // Marking a block with more than one predecessor causes register allocator + // to fail (v8:5998). Add a dummy block as a workaround. + Label slow_path_deferred(this, Label::kDeferred); + Bind(&slow_path); + Goto(&slow_path_deferred); + + Bind(&slow_path_deferred); + { + // When we reach here, one of the operands is not a Smi / HeapNumber and + // the other operand could be of any type. The cases where both of them + // are HeapNumbers / Smis are handled earlier. + Comment("Collect feedback for non HeapNumber cases."); + Label update_feedback_and_do_compare(this); + Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned); + var_type_feedback.Bind(SmiConstant(CompareOperationFeedback::kAny)); + + if (Token::IsOrderedRelationalCompareOp(compare_op)) { + Label check_for_oddball(this); + // Check for NumberOrOddball feedback. + Node* non_number_instance_type = + LoadInstanceType(non_number_value.value()); + GotoIf( + Word32Equal(non_number_instance_type, Int32Constant(ODDBALL_TYPE)), + &check_for_oddball); + + // Check for string feedback. + GotoIfNot(IsStringInstanceType(non_number_instance_type), + &update_feedback_and_do_compare); + + GotoIf(TaggedIsSmi(maybe_smi_value.value()), + &update_feedback_and_do_compare); + + Node* maybe_smi_instance_type = + LoadInstanceType(maybe_smi_value.value()); + GotoIfNot(IsStringInstanceType(maybe_smi_instance_type), + &update_feedback_and_do_compare); + + var_type_feedback.Bind(SmiConstant(CompareOperationFeedback::kString)); + Goto(&update_feedback_and_do_compare); + + Bind(&check_for_oddball); + { + Label compare_with_oddball_feedback(this); + GotoIf(TaggedIsSmi(maybe_smi_value.value()), + &compare_with_oddball_feedback); + + Node* maybe_smi_instance_type = + LoadInstanceType(maybe_smi_value.value()); + GotoIf(Word32Equal(maybe_smi_instance_type, + Int32Constant(HEAP_NUMBER_TYPE)), + &compare_with_oddball_feedback); + + Branch( + Word32Equal(maybe_smi_instance_type, Int32Constant(ODDBALL_TYPE)), + &compare_with_oddball_feedback, &update_feedback_and_do_compare); + + Bind(&compare_with_oddball_feedback); + { + var_type_feedback.Bind( + SmiConstant(CompareOperationFeedback::kNumberOrOddball)); + Goto(&update_feedback_and_do_compare); + } + } + } else { + Label not_string(this), both_are_strings(this); + + DCHECK(Token::IsEqualityOp(compare_op)); + + // If one of them is a Smi and the other is not a number, record "Any" + // feedback. Equality comparisons do not need feedback about oddballs. + GotoIf(TaggedIsSmi(maybe_smi_value.value()), + &update_feedback_and_do_compare); + + Node* maybe_smi_instance_type = + LoadInstanceType(maybe_smi_value.value()); + Node* non_number_instance_type = + LoadInstanceType(non_number_value.value()); + GotoIfNot(IsStringInstanceType(maybe_smi_instance_type), ¬_string); + + // If one value is string and other isn't record "Any" feedback. + Branch(IsStringInstanceType(non_number_instance_type), + &both_are_strings, &update_feedback_and_do_compare); + + Bind(&both_are_strings); + { + Node* operand1_feedback = SelectSmiConstant( + Word32Equal(Word32And(maybe_smi_instance_type, + Int32Constant(kIsNotInternalizedMask)), + Int32Constant(kInternalizedTag)), + CompareOperationFeedback::kInternalizedString, + CompareOperationFeedback::kString); + + Node* operand2_feedback = SelectSmiConstant( + Word32Equal(Word32And(non_number_instance_type, + Int32Constant(kIsNotInternalizedMask)), + Int32Constant(kInternalizedTag)), + CompareOperationFeedback::kInternalizedString, + CompareOperationFeedback::kString); + + var_type_feedback.Bind(SmiOr(operand1_feedback, operand2_feedback)); + Goto(&update_feedback_and_do_compare); + } + + Bind(¬_string); + { + // Check if both operands are of type JSReceiver. + GotoIfNot(IsJSReceiverInstanceType(maybe_smi_instance_type), + &update_feedback_and_do_compare); + + GotoIfNot(IsJSReceiverInstanceType(non_number_instance_type), + &update_feedback_and_do_compare); + + var_type_feedback.Bind( + SmiConstant(CompareOperationFeedback::kReceiver)); + Goto(&update_feedback_and_do_compare); + } + } + + Bind(&update_feedback_and_do_compare); + { + Comment("Do the full compare operation"); + UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); + Node* result; + switch (compare_op) { + case Token::EQ: + result = Equal(lhs, rhs, context); + break; + case Token::EQ_STRICT: + result = StrictEqual(lhs, rhs); + break; + case Token::LT: + result = RelationalComparison(CodeStubAssembler::kLessThan, lhs, + rhs, context); + break; + case Token::GT: + result = RelationalComparison(CodeStubAssembler::kGreaterThan, lhs, + rhs, context); + break; + case Token::LTE: + result = RelationalComparison(CodeStubAssembler::kLessThanOrEqual, + lhs, rhs, context); + break; + case Token::GTE: + result = RelationalComparison( + CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context); + break; + default: + UNREACHABLE(); + } + var_result.Bind(result); + SetAccumulator(var_result.value()); + Dispatch(); + } + } + } +}; + +// TestEqual +// +// Test if the value in the register equals the accumulator. +IGNITION_HANDLER(TestEqual, InterpreterCompareOpAssembler) { + CompareOpWithFeedback(Token::Value::EQ); +} + +// TestEqualStrict +// +// Test if the value in the register is strictly equal to the accumulator. +IGNITION_HANDLER(TestEqualStrict, InterpreterCompareOpAssembler) { + CompareOpWithFeedback(Token::Value::EQ_STRICT); +} + +// TestLessThan +// +// Test if the value in the register is less than the accumulator. +IGNITION_HANDLER(TestLessThan, InterpreterCompareOpAssembler) { + CompareOpWithFeedback(Token::Value::LT); +} + +// TestGreaterThan +// +// Test if the value in the register is greater than the accumulator. +IGNITION_HANDLER(TestGreaterThan, InterpreterCompareOpAssembler) { + CompareOpWithFeedback(Token::Value::GT); +} + +// TestLessThanOrEqual +// +// Test if the value in the register is less than or equal to the +// accumulator. +IGNITION_HANDLER(TestLessThanOrEqual, InterpreterCompareOpAssembler) { + CompareOpWithFeedback(Token::Value::LTE); +} + +// TestGreaterThanOrEqual +// +// Test if the value in the register is greater than or equal to the +// accumulator. +IGNITION_HANDLER(TestGreaterThanOrEqual, InterpreterCompareOpAssembler) { + CompareOpWithFeedback(Token::Value::GTE); +} + +// TestEqualStrictNoFeedback +// +// Test if the value in the register is strictly equal to the accumulator. +// Type feedback is not collected. +IGNITION_HANDLER(TestEqualStrictNoFeedback, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* lhs = LoadRegister(reg_index); + Node* rhs = GetAccumulator(); + // TODO(5310): This is called only when lhs and rhs are Smis (for ex: + // try-finally or generators) or strings (only when visiting + // ClassLiteralProperties). We should be able to optimize this and not perform + // the full strict equality. + Node* result = StrictEqual(lhs, rhs); + SetAccumulator(result); + Dispatch(); +} + +// TestIn +// +// Test if the object referenced by the register operand is a property of the +// object referenced by the accumulator. +IGNITION_HANDLER(TestIn, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* property = LoadRegister(reg_index); + Node* object = GetAccumulator(); + Node* context = GetContext(); + SetAccumulator(HasProperty(object, property, context)); + Dispatch(); +} + +// TestInstanceOf +// +// Test if the object referenced by the register is an an instance of type +// referenced by the accumulator. +IGNITION_HANDLER(TestInstanceOf, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* name = LoadRegister(reg_index); + Node* object = GetAccumulator(); + Node* context = GetContext(); + SetAccumulator(InstanceOf(name, object, context)); + Dispatch(); +} + +// TestUndetectable +// +// Test if the value in the accumulator is undetectable (null, undefined or +// document.all). +IGNITION_HANDLER(TestUndetectable, InterpreterAssembler) { + Label return_false(this), end(this); + Node* object = GetAccumulator(); + + // If the object is an Smi then return false. + SetAccumulator(BooleanConstant(false)); + GotoIf(TaggedIsSmi(object), &end); + + // If it is a HeapObject, load the map and check for undetectable bit. + Node* map = LoadMap(object); + Node* map_bitfield = LoadMapBitField(map); + Node* map_undetectable = + Word32And(map_bitfield, Int32Constant(1 << Map::kIsUndetectable)); + Node* result = + SelectBooleanConstant(Word32NotEqual(map_undetectable, Int32Constant(0))); + SetAccumulator(result); + Goto(&end); + + Bind(&end); + Dispatch(); +} + +// TestNull +// +// Test if the value in accumulator is strictly equal to null. +IGNITION_HANDLER(TestNull, InterpreterAssembler) { + Node* object = GetAccumulator(); + Node* null_value = HeapConstant(isolate()->factory()->null_value()); + Node* result = SelectBooleanConstant(WordEqual(object, null_value)); + SetAccumulator(result); + Dispatch(); +} + +// TestUndefined +// +// Test if the value in the accumulator is strictly equal to undefined. +IGNITION_HANDLER(TestUndefined, InterpreterAssembler) { + Node* object = GetAccumulator(); + Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value()); + Node* result = SelectBooleanConstant(WordEqual(object, undefined_value)); + SetAccumulator(result); + Dispatch(); +} + +// TestTypeOf +// +// Tests if the object in the is typeof the literal represented +// by |literal_flag|. +IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) { + Node* object = GetAccumulator(); + Node* literal_flag = BytecodeOperandFlag(0); + +#define MAKE_LABEL(name, lower_case) Label if_##lower_case(this); + TYPEOF_LITERAL_LIST(MAKE_LABEL) +#undef MAKE_LABEL + +#define LABEL_POINTER(name, lower_case) &if_##lower_case, + Label* labels[] = {TYPEOF_LITERAL_LIST(LABEL_POINTER)}; +#undef LABEL_POINTER + +#define CASE(name, lower_case) \ + static_cast(TestTypeOfFlags::LiteralFlag::k##name), + int32_t cases[] = {TYPEOF_LITERAL_LIST(CASE)}; +#undef CASE + + Label if_true(this), if_false(this), end(this), abort(this, Label::kDeferred); + + Switch(literal_flag, &abort, cases, labels, arraysize(cases)); + + Bind(&abort); + { + Comment("Abort"); + Abort(BailoutReason::kUnexpectedTestTypeofLiteralFlag); + Goto(&if_false); + } + Bind(&if_number); + { + Comment("IfNumber"); + GotoIfNumber(object, &if_true); + Goto(&if_false); + } + Bind(&if_string); + { + Comment("IfString"); + GotoIf(TaggedIsSmi(object), &if_false); + Branch(IsString(object), &if_true, &if_false); + } + Bind(&if_symbol); + { + Comment("IfSymbol"); + GotoIf(TaggedIsSmi(object), &if_false); + Branch(IsSymbol(object), &if_true, &if_false); + } + Bind(&if_boolean); + { + Comment("IfBoolean"); + GotoIf(WordEqual(object, BooleanConstant(true)), &if_true); + Branch(WordEqual(object, BooleanConstant(false)), &if_true, &if_false); + } + Bind(&if_undefined); + { + Comment("IfUndefined"); + GotoIf(TaggedIsSmi(object), &if_false); + // Check it is not null and the map has the undetectable bit set. + GotoIf(WordEqual(object, NullConstant()), &if_false); + Node* map_bitfield = LoadMapBitField(LoadMap(object)); + Node* undetectable_bit = + Word32And(map_bitfield, Int32Constant(1 << Map::kIsUndetectable)); + Branch(Word32Equal(undetectable_bit, Int32Constant(0)), &if_false, + &if_true); + } + Bind(&if_function); + { + Comment("IfFunction"); + GotoIf(TaggedIsSmi(object), &if_false); + // Check if callable bit is set and not undetectable. + Node* map_bitfield = LoadMapBitField(LoadMap(object)); + Node* callable_undetectable = Word32And( + map_bitfield, + Int32Constant(1 << Map::kIsUndetectable | 1 << Map::kIsCallable)); + Branch(Word32Equal(callable_undetectable, + Int32Constant(1 << Map::kIsCallable)), + &if_true, &if_false); + } + Bind(&if_object); + { + Comment("IfObject"); + GotoIf(TaggedIsSmi(object), &if_false); + + // If the object is null then return true. + GotoIf(WordEqual(object, NullConstant()), &if_true); + + // Check if the object is a receiver type and is not undefined or callable. + Node* map = LoadMap(object); + GotoIfNot(IsJSReceiverMap(map), &if_false); + Node* map_bitfield = LoadMapBitField(map); + Node* callable_undetectable = Word32And( + map_bitfield, + Int32Constant(1 << Map::kIsUndetectable | 1 << Map::kIsCallable)); + Branch(Word32Equal(callable_undetectable, Int32Constant(0)), &if_true, + &if_false); + } + Bind(&if_other); + { + // Typeof doesn't return any other string value. + Goto(&if_false); + } + + Bind(&if_false); + { + SetAccumulator(BooleanConstant(false)); + Goto(&end); + } + Bind(&if_true); + { + SetAccumulator(BooleanConstant(true)); + Goto(&end); + } + Bind(&end); + Dispatch(); +} + +// Jump +// +// Jump by number of bytes represented by the immediate operand |imm|. +IGNITION_HANDLER(Jump, InterpreterAssembler) { + Node* relative_jump = BytecodeOperandUImmWord(0); + Jump(relative_jump); +} + +// JumpConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool. +IGNITION_HANDLER(JumpConstant, InterpreterAssembler) { + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Jump(relative_jump); +} + +// JumpIfTrue +// +// Jump by number of bytes represented by an immediate operand if the +// accumulator contains true. This only works for boolean inputs, and +// will misbehave if passed arbitrary input values. +IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* relative_jump = BytecodeOperandUImmWord(0); + Node* true_value = BooleanConstant(true); + CSA_ASSERT(this, TaggedIsNotSmi(accumulator)); + CSA_ASSERT(this, IsBoolean(accumulator)); + JumpIfWordEqual(accumulator, true_value, relative_jump); +} + +// JumpIfTrueConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool +// if the accumulator contains true. This only works for boolean inputs, and +// will misbehave if passed arbitrary input values. +IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* true_value = BooleanConstant(true); + CSA_ASSERT(this, TaggedIsNotSmi(accumulator)); + CSA_ASSERT(this, IsBoolean(accumulator)); + JumpIfWordEqual(accumulator, true_value, relative_jump); +} + +// JumpIfFalse +// +// Jump by number of bytes represented by an immediate operand if the +// accumulator contains false. This only works for boolean inputs, and +// will misbehave if passed arbitrary input values. +IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* relative_jump = BytecodeOperandUImmWord(0); + Node* false_value = BooleanConstant(false); + CSA_ASSERT(this, TaggedIsNotSmi(accumulator)); + CSA_ASSERT(this, IsBoolean(accumulator)); + JumpIfWordEqual(accumulator, false_value, relative_jump); +} + +// JumpIfFalseConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool +// if the accumulator contains false. This only works for boolean inputs, and +// will misbehave if passed arbitrary input values. +IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Node* false_value = BooleanConstant(false); + CSA_ASSERT(this, TaggedIsNotSmi(accumulator)); + CSA_ASSERT(this, IsBoolean(accumulator)); + JumpIfWordEqual(accumulator, false_value, relative_jump); +} + +// JumpIfToBooleanTrue +// +// Jump by number of bytes represented by an immediate operand if the object +// referenced by the accumulator is true when the object is cast to boolean. +IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* relative_jump = BytecodeOperandUImmWord(0); + Label if_true(this), if_false(this); + BranchIfToBooleanIsTrue(value, &if_true, &if_false); + Bind(&if_true); + Jump(relative_jump); + Bind(&if_false); + Dispatch(); +} + +// JumpIfToBooleanTrueConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool +// if the object referenced by the accumulator is true when the object is cast +// to boolean. +IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Label if_true(this), if_false(this); + BranchIfToBooleanIsTrue(value, &if_true, &if_false); + Bind(&if_true); + Jump(relative_jump); + Bind(&if_false); + Dispatch(); +} + +// JumpIfToBooleanFalse +// +// Jump by number of bytes represented by an immediate operand if the object +// referenced by the accumulator is false when the object is cast to boolean. +IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* relative_jump = BytecodeOperandUImmWord(0); + Label if_true(this), if_false(this); + BranchIfToBooleanIsTrue(value, &if_true, &if_false); + Bind(&if_true); + Dispatch(); + Bind(&if_false); + Jump(relative_jump); +} + +// JumpIfToBooleanFalseConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool +// if the object referenced by the accumulator is false when the object is cast +// to boolean. +IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) { + Node* value = GetAccumulator(); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + Label if_true(this), if_false(this); + BranchIfToBooleanIsTrue(value, &if_true, &if_false); + Bind(&if_true); + Dispatch(); + Bind(&if_false); + Jump(relative_jump); +} + +// JumpIfNull +// +// Jump by number of bytes represented by an immediate operand if the object +// referenced by the accumulator is the null constant. +IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* null_value = HeapConstant(isolate()->factory()->null_value()); + Node* relative_jump = BytecodeOperandUImmWord(0); + JumpIfWordEqual(accumulator, null_value, relative_jump); +} + +// JumpIfNullConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool +// if the object referenced by the accumulator is the null constant. +IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* null_value = HeapConstant(isolate()->factory()->null_value()); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + JumpIfWordEqual(accumulator, null_value, relative_jump); +} + +// JumpIfNotNull +// +// Jump by number of bytes represented by an immediate operand if the object +// referenced by the accumulator is not the null constant. +IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* null_value = HeapConstant(isolate()->factory()->null_value()); + Node* relative_jump = BytecodeOperandUImmWord(0); + JumpIfWordNotEqual(accumulator, null_value, relative_jump); +} + +// JumpIfNotNullConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool +// if the object referenced by the accumulator is not the null constant. +IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* null_value = HeapConstant(isolate()->factory()->null_value()); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + JumpIfWordNotEqual(accumulator, null_value, relative_jump); +} + +// JumpIfUndefined +// +// Jump by number of bytes represented by an immediate operand if the object +// referenced by the accumulator is the undefined constant. +IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value()); + Node* relative_jump = BytecodeOperandUImmWord(0); + JumpIfWordEqual(accumulator, undefined_value, relative_jump); +} + +// JumpIfUndefinedConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool +// if the object referenced by the accumulator is the undefined constant. +IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value()); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + JumpIfWordEqual(accumulator, undefined_value, relative_jump); +} + +// JumpIfNotUndefined +// +// Jump by number of bytes represented by an immediate operand if the object +// referenced by the accumulator is not the undefined constant. +IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value()); + Node* relative_jump = BytecodeOperandUImmWord(0); + JumpIfWordNotEqual(accumulator, undefined_value, relative_jump); +} + +// JumpIfNotUndefinedConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool +// if the object referenced by the accumulator is not the undefined constant. +IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value()); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + JumpIfWordNotEqual(accumulator, undefined_value, relative_jump); +} + +// JumpIfJSReceiver +// +// Jump by number of bytes represented by an immediate operand if the object +// referenced by the accumulator is a JSReceiver. +IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* relative_jump = BytecodeOperandUImmWord(0); + + Label if_object(this), if_notobject(this, Label::kDeferred), if_notsmi(this); + Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi); + + Bind(&if_notsmi); + Branch(IsJSReceiver(accumulator), &if_object, &if_notobject); + Bind(&if_object); + Jump(relative_jump); + + Bind(&if_notobject); + Dispatch(); +} + +// JumpIfJSReceiverConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool if +// the object referenced by the accumulator is a JSReceiver. +IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + + Label if_object(this), if_notobject(this), if_notsmi(this); + Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi); + + Bind(&if_notsmi); + Branch(IsJSReceiver(accumulator), &if_object, &if_notobject); + + Bind(&if_object); + Jump(relative_jump); + + Bind(&if_notobject); + Dispatch(); +} + +// JumpIfNotHole +// +// Jump by number of bytes represented by an immediate operand if the object +// referenced by the accumulator is the hole. +IGNITION_HANDLER(JumpIfNotHole, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value()); + Node* relative_jump = BytecodeOperandUImmWord(0); + JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); +} + +// JumpIfNotHoleConstant +// +// Jump by number of bytes in the Smi in the |idx| entry in the constant pool +// if the object referenced by the accumulator is the hole constant. +IGNITION_HANDLER(JumpIfNotHoleConstant, InterpreterAssembler) { + Node* accumulator = GetAccumulator(); + Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value()); + Node* index = BytecodeOperandIdx(0); + Node* relative_jump = LoadAndUntagConstantPoolEntry(index); + JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); +} + +// JumpLoop +// +// Jump by number of bytes represented by the immediate operand |imm|. Also +// performs a loop nesting check and potentially triggers OSR in case the +// current OSR level matches (or exceeds) the specified |loop_depth|. +IGNITION_HANDLER(JumpLoop, InterpreterAssembler) { + Node* relative_jump = BytecodeOperandUImmWord(0); + Node* loop_depth = BytecodeOperandImm(1); + Node* osr_level = LoadOSRNestingLevel(); + + // Check if OSR points at the given {loop_depth} are armed by comparing it to + // the current {osr_level} loaded from the header of the BytecodeArray. + Label ok(this), osr_armed(this, Label::kDeferred); + Node* condition = Int32GreaterThanOrEqual(loop_depth, osr_level); + Branch(condition, &ok, &osr_armed); + + Bind(&ok); + JumpBackward(relative_jump); + + Bind(&osr_armed); + { + Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate()); + Node* target = HeapConstant(callable.code()); + Node* context = GetContext(); + CallStub(callable.descriptor(), target, context); + JumpBackward(relative_jump); + } +} + +// CreateRegExpLiteral +// +// Creates a regular expression literal for literal index with +// and the pattern in . +IGNITION_HANDLER(CreateRegExpLiteral, InterpreterAssembler) { + Node* index = BytecodeOperandIdx(0); + Node* pattern = LoadConstantPoolEntry(index); + Node* literal_index = BytecodeOperandIdxSmi(1); + Node* flags = SmiFromWord32(BytecodeOperandFlag(2)); + Node* closure = LoadRegister(Register::function_closure()); + Node* context = GetContext(); + ConstructorBuiltinsAssembler constructor_assembler(state()); + Node* result = constructor_assembler.EmitFastCloneRegExp( + closure, literal_index, pattern, flags, context); + SetAccumulator(result); + Dispatch(); +} + +// CreateArrayLiteral +// +// Creates an array literal for literal index with +// CreateArrayLiteral flags and constant elements in . +IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) { + Node* literal_index = BytecodeOperandIdxSmi(1); + Node* closure = LoadRegister(Register::function_closure()); + Node* context = GetContext(); + Node* bytecode_flags = BytecodeOperandFlag(2); + + Label fast_shallow_clone(this), call_runtime(this, Label::kDeferred); + Branch( + IsSetWord32(bytecode_flags), + &fast_shallow_clone, &call_runtime); + + Bind(&fast_shallow_clone); + { + ConstructorBuiltinsAssembler constructor_assembler(state()); + Node* result = constructor_assembler.EmitFastCloneShallowArray( + closure, literal_index, context, &call_runtime, TRACK_ALLOCATION_SITE); + SetAccumulator(result); + Dispatch(); + } + + Bind(&call_runtime); + { + Node* flags_raw = DecodeWordFromWord32( + bytecode_flags); + Node* flags = SmiTag(flags_raw); + Node* index = BytecodeOperandIdx(0); + Node* constant_elements = LoadConstantPoolEntry(index); + Node* result = CallRuntime(Runtime::kCreateArrayLiteral, context, closure, + literal_index, constant_elements, flags); + SetAccumulator(result); + Dispatch(); + } +} + +// CreateObjectLiteral +// +// Creates an object literal for literal index with +// CreateObjectLiteralFlags and constant elements in . +IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) { + Node* literal_index = BytecodeOperandIdxSmi(1); + Node* bytecode_flags = BytecodeOperandFlag(2); + Node* closure = LoadRegister(Register::function_closure()); + + // Check if we can do a fast clone or have to call the runtime. + Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred); + Node* fast_clone_properties_count = DecodeWordFromWord32< + CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags); + Branch(WordNotEqual(fast_clone_properties_count, IntPtrConstant(0)), + &if_fast_clone, &if_not_fast_clone); + + Bind(&if_fast_clone); + { + // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. + ConstructorBuiltinsAssembler constructor_assembler(state()); + Node* result = constructor_assembler.EmitFastCloneShallowObject( + &if_not_fast_clone, closure, literal_index, + fast_clone_properties_count); + StoreRegister(result, BytecodeOperandReg(3)); + Dispatch(); + } + + Bind(&if_not_fast_clone); + { + // If we can't do a fast clone, call into the runtime. + Node* index = BytecodeOperandIdx(0); + Node* constant_elements = LoadConstantPoolEntry(index); + Node* context = GetContext(); + + Node* flags_raw = DecodeWordFromWord32( + bytecode_flags); + Node* flags = SmiTag(flags_raw); + + Node* result = CallRuntime(Runtime::kCreateObjectLiteral, context, closure, + literal_index, constant_elements, flags); + StoreRegister(result, BytecodeOperandReg(3)); + // TODO(klaasb) build a single dispatch once the call is inlined + Dispatch(); + } +} + +// CreateClosure +// +// Creates a new closure for SharedFunctionInfo at position |index| in the +// constant pool and with the PretenureFlag . +IGNITION_HANDLER(CreateClosure, InterpreterAssembler) { + Node* index = BytecodeOperandIdx(0); + Node* shared = LoadConstantPoolEntry(index); + Node* flags = BytecodeOperandFlag(2); + Node* context = GetContext(); + + Label call_runtime(this, Label::kDeferred); + GotoIfNot(IsSetWord32(flags), + &call_runtime); + ConstructorBuiltinsAssembler constructor_assembler(state()); + Node* vector_index = BytecodeOperandIdx(1); + vector_index = SmiTag(vector_index); + Node* feedback_vector = LoadFeedbackVector(); + SetAccumulator(constructor_assembler.EmitFastNewClosure( + shared, feedback_vector, vector_index, context)); + Dispatch(); + + Bind(&call_runtime); + { + Node* tenured_raw = + DecodeWordFromWord32(flags); + Node* tenured = SmiTag(tenured_raw); + feedback_vector = LoadFeedbackVector(); + vector_index = BytecodeOperandIdx(1); + vector_index = SmiTag(vector_index); + Node* result = CallRuntime(Runtime::kInterpreterNewClosure, context, shared, + feedback_vector, vector_index, tenured); + SetAccumulator(result); + Dispatch(); + } +} + +// CreateBlockContext +// +// Creates a new block context with the scope info constant at |index| and the +// closure in the accumulator. +IGNITION_HANDLER(CreateBlockContext, InterpreterAssembler) { + Node* index = BytecodeOperandIdx(0); + Node* scope_info = LoadConstantPoolEntry(index); + Node* closure = GetAccumulator(); + Node* context = GetContext(); + SetAccumulator( + CallRuntime(Runtime::kPushBlockContext, context, scope_info, closure)); + Dispatch(); +} + +// CreateCatchContext +// +// Creates a new context for a catch block with the |exception| in a register, +// the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the +// closure in the accumulator. +IGNITION_HANDLER(CreateCatchContext, InterpreterAssembler) { + Node* exception_reg = BytecodeOperandReg(0); + Node* exception = LoadRegister(exception_reg); + Node* name_idx = BytecodeOperandIdx(1); + Node* name = LoadConstantPoolEntry(name_idx); + Node* scope_info_idx = BytecodeOperandIdx(2); + Node* scope_info = LoadConstantPoolEntry(scope_info_idx); + Node* closure = GetAccumulator(); + Node* context = GetContext(); + SetAccumulator(CallRuntime(Runtime::kPushCatchContext, context, name, + exception, scope_info, closure)); + Dispatch(); +} + +// CreateFunctionContext +// +// Creates a new context with number of |slots| for the function closure. +IGNITION_HANDLER(CreateFunctionContext, InterpreterAssembler) { + Node* closure = LoadRegister(Register::function_closure()); + Node* slots = BytecodeOperandUImm(0); + Node* context = GetContext(); + ConstructorBuiltinsAssembler constructor_assembler(state()); + SetAccumulator(constructor_assembler.EmitFastNewFunctionContext( + closure, slots, context, FUNCTION_SCOPE)); + Dispatch(); +} + +// CreateEvalContext +// +// Creates a new context with number of |slots| for an eval closure. +IGNITION_HANDLER(CreateEvalContext, InterpreterAssembler) { + Node* closure = LoadRegister(Register::function_closure()); + Node* slots = BytecodeOperandUImm(0); + Node* context = GetContext(); + ConstructorBuiltinsAssembler constructor_assembler(state()); + SetAccumulator(constructor_assembler.EmitFastNewFunctionContext( + closure, slots, context, EVAL_SCOPE)); + Dispatch(); +} + +// CreateWithContext +// +// Creates a new context with the ScopeInfo at |scope_info_idx| for a +// with-statement with the object in |register| and the closure in the +// accumulator. +IGNITION_HANDLER(CreateWithContext, InterpreterAssembler) { + Node* reg_index = BytecodeOperandReg(0); + Node* object = LoadRegister(reg_index); + Node* scope_info_idx = BytecodeOperandIdx(1); + Node* scope_info = LoadConstantPoolEntry(scope_info_idx); + Node* closure = GetAccumulator(); + Node* context = GetContext(); + SetAccumulator(CallRuntime(Runtime::kPushWithContext, context, object, + scope_info, closure)); + Dispatch(); +} + +// CreateMappedArguments +// +// Creates a new mapped arguments object. +IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) { + Node* closure = LoadRegister(Register::function_closure()); + Node* context = GetContext(); + + Label if_duplicate_parameters(this, Label::kDeferred); + Label if_not_duplicate_parameters(this); + + // Check if function has duplicate parameters. + // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports + // duplicate parameters. + Node* shared_info = + LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); + Node* compiler_hints = LoadObjectField( + shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset, + MachineType::Uint8()); + Node* duplicate_parameters_bit = Int32Constant( + 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); + Node* compare = Word32And(compiler_hints, duplicate_parameters_bit); + Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); + + Bind(&if_not_duplicate_parameters); + { + ArgumentsBuiltinsAssembler constructor_assembler(state()); + Node* result = + constructor_assembler.EmitFastNewSloppyArguments(context, closure); + SetAccumulator(result); + Dispatch(); + } + + Bind(&if_duplicate_parameters); + { + Node* result = + CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure); + SetAccumulator(result); + Dispatch(); + } +} + +// CreateUnmappedArguments +// +// Creates a new unmapped arguments object. +IGNITION_HANDLER(CreateUnmappedArguments, InterpreterAssembler) { + Node* context = GetContext(); + Node* closure = LoadRegister(Register::function_closure()); + ArgumentsBuiltinsAssembler builtins_assembler(state()); + Node* result = + builtins_assembler.EmitFastNewStrictArguments(context, closure); + SetAccumulator(result); + Dispatch(); +} + +// CreateRestParameter +// +// Creates a new rest parameter array. +IGNITION_HANDLER(CreateRestParameter, InterpreterAssembler) { + Node* closure = LoadRegister(Register::function_closure()); + Node* context = GetContext(); + ArgumentsBuiltinsAssembler builtins_assembler(state()); + Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure); + SetAccumulator(result); + Dispatch(); +} + +// StackCheck +// +// Performs a stack guard check. +IGNITION_HANDLER(StackCheck, InterpreterAssembler) { + Label ok(this), stack_check_interrupt(this, Label::kDeferred); + + Node* interrupt = StackCheckTriggeredInterrupt(); + Branch(interrupt, &stack_check_interrupt, &ok); + + Bind(&ok); + Dispatch(); + + Bind(&stack_check_interrupt); + { + Node* context = GetContext(); + CallRuntime(Runtime::kStackGuard, context); + Dispatch(); + } +} + +// SetPendingMessage +// +// Sets the pending message to the value in the accumulator, and returns the +// previous pending message in the accumulator. +IGNITION_HANDLER(SetPendingMessage, InterpreterAssembler) { + Node* pending_message = ExternalConstant( + ExternalReference::address_of_pending_message_obj(isolate())); + Node* previous_message = Load(MachineType::TaggedPointer(), pending_message); + Node* new_message = GetAccumulator(); + StoreNoWriteBarrier(MachineRepresentation::kTaggedPointer, pending_message, + new_message); + SetAccumulator(previous_message); + Dispatch(); +} + +// Throw +// +// Throws the exception in the accumulator. +IGNITION_HANDLER(Throw, InterpreterAssembler) { + Node* exception = GetAccumulator(); + Node* context = GetContext(); + CallRuntime(Runtime::kThrow, context, exception); + // We shouldn't ever return from a throw. + Abort(kUnexpectedReturnFromThrow); +} + +// ReThrow +// +// Re-throws the exception in the accumulator. +IGNITION_HANDLER(ReThrow, InterpreterAssembler) { + Node* exception = GetAccumulator(); + Node* context = GetContext(); + CallRuntime(Runtime::kReThrow, context, exception); + // We shouldn't ever return from a throw. + Abort(kUnexpectedReturnFromThrow); +} + +// Return +// +// Return the value in the accumulator. +IGNITION_HANDLER(Return, InterpreterAssembler) { + UpdateInterruptBudgetOnReturn(); + Node* accumulator = GetAccumulator(); + Return(accumulator); +} + +// Debugger +// +// Call runtime to handle debugger statement. +IGNITION_HANDLER(Debugger, InterpreterAssembler) { + Node* context = GetContext(); + CallStub(CodeFactory::HandleDebuggerStatement(isolate()), context); + Dispatch(); +} + +// DebugBreak +// +// Call runtime to handle a debug break. +#define DEBUG_BREAK(Name, ...) \ + IGNITION_HANDLER(Name, InterpreterAssembler) { \ + Node* context = GetContext(); \ + Node* accumulator = GetAccumulator(); \ + Node* original_handler = \ + CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \ + MaybeDropFrames(context); \ + DispatchToBytecodeHandler(original_handler); \ + } +DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); +#undef DEBUG_BREAK + +class InterpreterForInPrepareAssembler : public InterpreterAssembler { + public: + InterpreterForInPrepareAssembler(CodeAssemblerState* state, Bytecode bytecode, + OperandScale operand_scale) + : InterpreterAssembler(state, bytecode, operand_scale) {} + + void BuildForInPrepareResult(Node* output_register, Node* cache_type, + Node* cache_array, Node* cache_length) { + StoreRegister(cache_type, output_register); + output_register = NextRegister(output_register); + StoreRegister(cache_array, output_register); + output_register = NextRegister(output_register); + StoreRegister(cache_length, output_register); + } +}; + +// ForInPrepare +// +// Returns state for for..in loop execution based on the object in the register +// |receiver|. The object must not be null or undefined and must have been +// converted to a receiver already. +// The result is output in registers |cache_info_triple| to +// |cache_info_triple + 2|, with the registers holding cache_type, cache_array, +// and cache_length respectively. +IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) { + Node* object_register = BytecodeOperandReg(0); + Node* output_register = BytecodeOperandReg(1); + Node* receiver = LoadRegister(object_register); + Node* context = GetContext(); + + Node* cache_type; + Node* cache_array; + Node* cache_length; + Label call_runtime(this, Label::kDeferred), + nothing_to_iterate(this, Label::kDeferred); + + ForInBuiltinsAssembler forin_assembler(state()); + std::tie(cache_type, cache_array, cache_length) = + forin_assembler.EmitForInPrepare(receiver, context, &call_runtime, + ¬hing_to_iterate); + + BuildForInPrepareResult(output_register, cache_type, cache_array, + cache_length); + Dispatch(); + + Bind(&call_runtime); + { + Node* result_triple = + CallRuntime(Runtime::kForInPrepare, context, receiver); + Node* cache_type = Projection(0, result_triple); + Node* cache_array = Projection(1, result_triple); + Node* cache_length = Projection(2, result_triple); + BuildForInPrepareResult(output_register, cache_type, cache_array, + cache_length); + Dispatch(); + } + Bind(¬hing_to_iterate); + { + // Receiver is null or undefined or descriptors are zero length. + Node* zero = SmiConstant(0); + BuildForInPrepareResult(output_register, zero, zero, zero); + Dispatch(); + } +} + +// ForInNext +// +// Returns the next enumerable property in the the accumulator. +IGNITION_HANDLER(ForInNext, InterpreterAssembler) { + Node* receiver_reg = BytecodeOperandReg(0); + Node* receiver = LoadRegister(receiver_reg); + Node* index_reg = BytecodeOperandReg(1); + Node* index = LoadRegister(index_reg); + Node* cache_type_reg = BytecodeOperandReg(2); + Node* cache_type = LoadRegister(cache_type_reg); + Node* cache_array_reg = NextRegister(cache_type_reg); + Node* cache_array = LoadRegister(cache_array_reg); + + // Load the next key from the enumeration array. + Node* key = LoadFixedArrayElement(cache_array, index, 0, + CodeStubAssembler::SMI_PARAMETERS); + + // Check if we can use the for-in fast path potentially using the enum cache. + Label if_fast(this), if_slow(this, Label::kDeferred); + Node* receiver_map = LoadMap(receiver); + Branch(WordEqual(receiver_map, cache_type), &if_fast, &if_slow); + Bind(&if_fast); + { + // Enum cache in use for {receiver}, the {key} is definitely valid. + SetAccumulator(key); + Dispatch(); + } + Bind(&if_slow); + { + // Record the fact that we hit the for-in slow path. + Node* vector_index = BytecodeOperandIdx(3); + Node* feedback_vector = LoadFeedbackVector(); + Node* megamorphic_sentinel = + HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())); + StoreFixedArrayElement(feedback_vector, vector_index, megamorphic_sentinel, + SKIP_WRITE_BARRIER); + + // Need to filter the {key} for the {receiver}. + Node* context = GetContext(); + Callable callable = CodeFactory::ForInFilter(isolate()); + Node* result = CallStub(callable, context, key, receiver); + SetAccumulator(result); + Dispatch(); + } +} + +// ForInContinue +// +// Returns false if the end of the enumerable properties has been reached. +IGNITION_HANDLER(ForInContinue, InterpreterAssembler) { + Node* index_reg = BytecodeOperandReg(0); + Node* index = LoadRegister(index_reg); + Node* cache_length_reg = BytecodeOperandReg(1); + Node* cache_length = LoadRegister(cache_length_reg); + + // Check if {index} is at {cache_length} already. + Label if_true(this), if_false(this), end(this); + Branch(WordEqual(index, cache_length), &if_true, &if_false); + Bind(&if_true); + { + SetAccumulator(BooleanConstant(false)); + Goto(&end); + } + Bind(&if_false); + { + SetAccumulator(BooleanConstant(true)); + Goto(&end); + } + Bind(&end); + Dispatch(); +} + +// ForInStep +// +// Increments the loop counter in register |index| and stores the result +// in the accumulator. +IGNITION_HANDLER(ForInStep, InterpreterAssembler) { + Node* index_reg = BytecodeOperandReg(0); + Node* index = LoadRegister(index_reg); + Node* one = SmiConstant(Smi::FromInt(1)); + Node* result = SmiAdd(index, one); + SetAccumulator(result); + Dispatch(); +} + +// Wide +// +// Prefix bytecode indicating next bytecode has wide (16-bit) operands. +IGNITION_HANDLER(Wide, InterpreterAssembler) { + DispatchWide(OperandScale::kDouble); +} + +// ExtraWide +// +// Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands. +IGNITION_HANDLER(ExtraWide, InterpreterAssembler) { + DispatchWide(OperandScale::kQuadruple); +} + +// Illegal +// +// An invalid bytecode aborting execution if dispatched. +IGNITION_HANDLER(Illegal, InterpreterAssembler) { Abort(kInvalidBytecode); } + +// Nop +// +// No operation. +IGNITION_HANDLER(Nop, InterpreterAssembler) { Dispatch(); } + +// SuspendGenerator +// +// Exports the register file and stores it into the generator. Also stores the +// current context, the state given in the accumulator, and the current bytecode +// offset (for debugging purposes) into the generator. +IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) { + Node* generator_reg = BytecodeOperandReg(0); + Node* flags = BytecodeOperandFlag(1); + Node* generator = LoadRegister(generator_reg); + + Label if_stepping(this, Label::kDeferred), ok(this); + Node* step_action_address = ExternalConstant( + ExternalReference::debug_last_step_action_address(isolate())); + Node* step_action = Load(MachineType::Int8(), step_action_address); + STATIC_ASSERT(StepIn > StepNext); + STATIC_ASSERT(LastStepAction == StepIn); + Node* step_next = Int32Constant(StepNext); + Branch(Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok); + Bind(&ok); + + Node* array = + LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset); + Node* context = GetContext(); + Node* state = GetAccumulator(); + + ExportRegisterFile(array); + StoreObjectField(generator, JSGeneratorObject::kContextOffset, context); + StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state); + + Label if_asyncgeneratorawait(this), if_notasyncgeneratorawait(this), + merge(this); + + // Calculate bytecode offset to store in the [input_or_debug_pos] or + // [await_input_or_debug_pos] fields, to be used by the inspector. + Node* offset = SmiTag(BytecodeOffset()); + + using AsyncGeneratorAwaitBits = SuspendGeneratorBytecodeFlags::FlagsBits; + Branch(Word32Equal(DecodeWord32(flags), + Int32Constant( + static_cast(SuspendFlags::kAsyncGeneratorAwait))), + &if_asyncgeneratorawait, &if_notasyncgeneratorawait); + + Bind(&if_notasyncgeneratorawait); + { + // For ordinary yields (and for AwaitExpressions in Async Functions, which + // are implemented as ordinary yields), it is safe to write over the + // [input_or_debug_pos] field. + StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset, + offset); + Goto(&merge); + } + + Bind(&if_asyncgeneratorawait); + { + // An AwaitExpression in an Async Generator requires writing to the + // [await_input_or_debug_pos] field. + CSA_ASSERT(this, + HasInstanceType(generator, JS_ASYNC_GENERATOR_OBJECT_TYPE)); + StoreObjectField( + generator, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset, offset); + Goto(&merge); + } + + Bind(&merge); + Dispatch(); + + Bind(&if_stepping); + { + Node* context = GetContext(); + CallRuntime(Runtime::kDebugRecordGenerator, context, generator); + Goto(&ok); + } +} + +// ResumeGenerator +// +// Imports the register file stored in the generator. Also loads the +// generator's state and stores it in the accumulator, before overwriting it +// with kGeneratorExecuting. +IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) { + Node* generator_reg = BytecodeOperandReg(0); + Node* generator = LoadRegister(generator_reg); + + ImportRegisterFile( + LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset)); + + Node* old_state = + LoadObjectField(generator, JSGeneratorObject::kContinuationOffset); + Node* new_state = Int32Constant(JSGeneratorObject::kGeneratorExecuting); + StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, + SmiTag(new_state)); + SetAccumulator(old_state); + + Dispatch(); +} + +} // namespace + +Handle GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode, + OperandScale operand_scale) { + Zone zone(isolate->allocator(), ZONE_NAME); + InterpreterDispatchDescriptor descriptor(isolate); + compiler::CodeAssemblerState state( + isolate, &zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER), + Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode)); + + switch (bytecode) { +#define CALL_GENERATOR(Name, ...) \ + case Bytecode::k##Name: \ + Name##Assembler::Generate(&state, operand_scale); \ + break; + BYTECODE_LIST(CALL_GENERATOR); +#undef CALL_GENERATOR + } + + Handle code = compiler::CodeAssembler::GenerateCode(&state); + PROFILE(isolate, CodeCreateEvent( + CodeEventListener::BYTECODE_HANDLER_TAG, + AbstractCode::cast(*code), + Bytecodes::ToString(bytecode, operand_scale).c_str())); +#ifdef ENABLE_DISASSEMBLER + if (FLAG_trace_ignition_codegen) { + OFStream os(stdout); + code->Disassemble(Bytecodes::ToString(bytecode), os); + os << std::flush; + } +#endif // ENABLE_DISASSEMBLER + return code; +} + +} // namespace interpreter +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/interpreter/interpreter-generator.h b/deps/v8/src/interpreter/interpreter-generator.h new file mode 100644 index 00000000000000..eab411d810c84e --- /dev/null +++ b/deps/v8/src/interpreter/interpreter-generator.h @@ -0,0 +1,22 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_INTERPRETER_INTERPRETER_GENERATOR_H_ +#define V8_INTERPRETER_INTERPRETER_GENERATOR_H_ + +#include "src/interpreter/bytecode-operands.h" +#include "src/interpreter/bytecodes.h" + +namespace v8 { +namespace internal { +namespace interpreter { + +extern Handle GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode, + OperandScale operand_scale); + +} // namespace interpreter +} // namespace internal +} // namespace v8 + +#endif // V8_INTERPRETER_INTERPRETER_GENERATOR_H_ diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc new file mode 100644 index 00000000000000..bdd079ab84198a --- /dev/null +++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc @@ -0,0 +1,395 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/interpreter/interpreter-intrinsics-generator.h" + +#include "src/allocation.h" +#include "src/builtins/builtins.h" +#include "src/code-factory.h" +#include "src/frames.h" +#include "src/interpreter/bytecodes.h" +#include "src/interpreter/interpreter-assembler.h" +#include "src/interpreter/interpreter-intrinsics.h" + +namespace v8 { +namespace internal { +namespace interpreter { + +using compiler::Node; + +class IntrinsicsGenerator { + public: + explicit IntrinsicsGenerator(InterpreterAssembler* assembler) + : isolate_(assembler->isolate()), + zone_(assembler->zone()), + assembler_(assembler) {} + + Node* InvokeIntrinsic(Node* function_id, Node* context, Node* first_arg_reg, + Node* arg_count); + + private: + enum InstanceTypeCompareMode { + kInstanceTypeEqual, + kInstanceTypeGreaterThanOrEqual + }; + + Node* IsInstanceType(Node* input, int type); + Node* CompareInstanceType(Node* map, int type, InstanceTypeCompareMode mode); + Node* IntrinsicAsStubCall(Node* input, Node* context, + Callable const& callable); + Node* IntrinsicAsBuiltinCall(Node* input, Node* context, Builtins::Name name); + void AbortIfArgCountMismatch(int expected, compiler::Node* actual); + +#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \ + Node* name(Node* input, Node* arg_count, Node* context); + INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER) +#undef DECLARE_INTRINSIC_HELPER + + Isolate* isolate() { return isolate_; } + Zone* zone() { return zone_; } + + Isolate* isolate_; + Zone* zone_; + InterpreterAssembler* assembler_; + + DISALLOW_COPY_AND_ASSIGN(IntrinsicsGenerator); +}; + +Node* GenerateInvokeIntrinsic(InterpreterAssembler* assembler, + Node* function_id, Node* context, + Node* first_arg_reg, Node* arg_count) { + IntrinsicsGenerator generator(assembler); + return generator.InvokeIntrinsic(function_id, context, first_arg_reg, + arg_count); +} + +#define __ assembler_-> + +Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context, + Node* first_arg_reg, + Node* arg_count) { + InterpreterAssembler::Label abort(assembler_), end(assembler_); + InterpreterAssembler::Variable result(assembler_, + MachineRepresentation::kTagged); + +#define MAKE_LABEL(name, lower_case, count) \ + InterpreterAssembler::Label lower_case(assembler_); + INTRINSICS_LIST(MAKE_LABEL) +#undef MAKE_LABEL + +#define LABEL_POINTER(name, lower_case, count) &lower_case, + InterpreterAssembler::Label* labels[] = {INTRINSICS_LIST(LABEL_POINTER)}; +#undef LABEL_POINTER + +#define CASE(name, lower_case, count) \ + static_cast(IntrinsicsHelper::IntrinsicId::k##name), + int32_t cases[] = {INTRINSICS_LIST(CASE)}; +#undef CASE + + __ Switch(function_id, &abort, cases, labels, arraysize(cases)); +#define HANDLE_CASE(name, lower_case, expected_arg_count) \ + __ Bind(&lower_case); \ + if (FLAG_debug_code && expected_arg_count >= 0) { \ + AbortIfArgCountMismatch(expected_arg_count, arg_count); \ + } \ + result.Bind(name(first_arg_reg, arg_count, context)); \ + __ Goto(&end); + INTRINSICS_LIST(HANDLE_CASE) +#undef HANDLE_CASE + + __ Bind(&abort); + { + __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic); + result.Bind(__ UndefinedConstant()); + __ Goto(&end); + } + + __ Bind(&end); + return result.value(); +} + +Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type, + InstanceTypeCompareMode mode) { + Node* instance_type = __ LoadInstanceType(object); + + if (mode == kInstanceTypeEqual) { + return __ Word32Equal(instance_type, __ Int32Constant(type)); + } else { + DCHECK(mode == kInstanceTypeGreaterThanOrEqual); + return __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type)); + } +} + +Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) { + InterpreterAssembler::Variable return_value(assembler_, + MachineRepresentation::kTagged); + // TODO(ishell): Use Select here. + InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_), + return_false(assembler_), end(assembler_); + Node* arg = __ LoadRegister(input); + __ GotoIf(__ TaggedIsSmi(arg), &return_false); + + Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual); + __ Branch(condition, &return_true, &return_false); + + __ Bind(&return_true); + { + return_value.Bind(__ BooleanConstant(true)); + __ Goto(&end); + } + + __ Bind(&return_false); + { + return_value.Bind(__ BooleanConstant(false)); + __ Goto(&end); + } + + __ Bind(&end); + return return_value.value(); +} + +Node* IntrinsicsGenerator::IsJSReceiver(Node* input, Node* arg_count, + Node* context) { + // TODO(ishell): Use Select here. + // TODO(ishell): Use CSA::IsJSReceiverInstanceType here. + InterpreterAssembler::Variable return_value(assembler_, + MachineRepresentation::kTagged); + InterpreterAssembler::Label return_true(assembler_), return_false(assembler_), + end(assembler_); + + Node* arg = __ LoadRegister(input); + __ GotoIf(__ TaggedIsSmi(arg), &return_false); + + STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); + Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE, + kInstanceTypeGreaterThanOrEqual); + __ Branch(condition, &return_true, &return_false); + + __ Bind(&return_true); + { + return_value.Bind(__ BooleanConstant(true)); + __ Goto(&end); + } + + __ Bind(&return_false); + { + return_value.Bind(__ BooleanConstant(false)); + __ Goto(&end); + } + + __ Bind(&end); + return return_value.value(); +} + +Node* IntrinsicsGenerator::IsArray(Node* input, Node* arg_count, + Node* context) { + return IsInstanceType(input, JS_ARRAY_TYPE); +} + +Node* IntrinsicsGenerator::IsJSProxy(Node* input, Node* arg_count, + Node* context) { + return IsInstanceType(input, JS_PROXY_TYPE); +} + +Node* IntrinsicsGenerator::IsTypedArray(Node* input, Node* arg_count, + Node* context) { + return IsInstanceType(input, JS_TYPED_ARRAY_TYPE); +} + +Node* IntrinsicsGenerator::IsSmi(Node* input, Node* arg_count, Node* context) { + // TODO(ishell): Use SelectBooleanConstant here. + InterpreterAssembler::Variable return_value(assembler_, + MachineRepresentation::kTagged); + InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_), + end(assembler_); + + Node* arg = __ LoadRegister(input); + + __ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi); + __ Bind(&if_smi); + { + return_value.Bind(__ BooleanConstant(true)); + __ Goto(&end); + } + + __ Bind(&if_not_smi); + { + return_value.Bind(__ BooleanConstant(false)); + __ Goto(&end); + } + + __ Bind(&end); + return return_value.value(); +} + +Node* IntrinsicsGenerator::IntrinsicAsStubCall(Node* args_reg, Node* context, + Callable const& callable) { + int param_count = callable.descriptor().GetParameterCount(); + int input_count = param_count + 2; // +2 for target and context + Node** args = zone()->NewArray(input_count); + int index = 0; + args[index++] = __ HeapConstant(callable.code()); + for (int i = 0; i < param_count; i++) { + args[index++] = __ LoadRegister(args_reg); + args_reg = __ NextRegister(args_reg); + } + args[index++] = context; + return __ CallStubN(callable.descriptor(), 1, input_count, args); +} + +Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(Node* input, Node* context, + Builtins::Name name) { + Callable callable = Builtins::CallableFor(isolate_, name); + return IntrinsicAsStubCall(input, context, callable); +} + +Node* IntrinsicsGenerator::CreateIterResultObject(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsStubCall(input, context, + CodeFactory::CreateIterResultObject(isolate())); +} + +Node* IntrinsicsGenerator::HasProperty(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsStubCall(input, context, + CodeFactory::HasProperty(isolate())); +} + +Node* IntrinsicsGenerator::SubString(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsStubCall(input, context, CodeFactory::SubString(isolate())); +} + +Node* IntrinsicsGenerator::ToString(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsStubCall(input, context, CodeFactory::ToString(isolate())); +} + +Node* IntrinsicsGenerator::ToLength(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsStubCall(input, context, CodeFactory::ToLength(isolate())); +} + +Node* IntrinsicsGenerator::ToInteger(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsStubCall(input, context, CodeFactory::ToInteger(isolate())); +} + +Node* IntrinsicsGenerator::ToNumber(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsStubCall(input, context, CodeFactory::ToNumber(isolate())); +} + +Node* IntrinsicsGenerator::ToObject(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsStubCall(input, context, CodeFactory::ToObject(isolate())); +} + +Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count, + Node* context) { + // First argument register contains the function target. + Node* function = __ LoadRegister(args_reg); + + // Receiver is the second runtime call argument. + Node* receiver_reg = __ NextRegister(args_reg); + Node* receiver_arg = __ RegisterLocation(receiver_reg); + + // Subtract function and receiver from arg count. + Node* function_and_receiver_count = __ Int32Constant(2); + Node* target_args_count = __ Int32Sub(arg_count, function_and_receiver_count); + + if (FLAG_debug_code) { + InterpreterAssembler::Label arg_count_positive(assembler_); + Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0)); + __ GotoIfNot(comparison, &arg_count_positive); + __ Abort(kWrongArgumentCountForInvokeIntrinsic); + __ Goto(&arg_count_positive); + __ Bind(&arg_count_positive); + } + + Node* result = __ CallJS(function, context, receiver_arg, target_args_count, + ConvertReceiverMode::kAny, TailCallMode::kDisallow); + return result; +} + +Node* IntrinsicsGenerator::ClassOf(Node* args_reg, Node* arg_count, + Node* context) { + Node* value = __ LoadRegister(args_reg); + return __ ClassOf(value); +} + +Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg, + Node* arg_count, + Node* context) { + InterpreterAssembler::Label not_receiver( + assembler_, InterpreterAssembler::Label::kDeferred); + InterpreterAssembler::Label done(assembler_); + InterpreterAssembler::Variable return_value(assembler_, + MachineRepresentation::kTagged); + + Node* sync_iterator = __ LoadRegister(args_reg); + + __ GotoIf(__ TaggedIsSmi(sync_iterator), ¬_receiver); + __ GotoIfNot(__ IsJSReceiver(sync_iterator), ¬_receiver); + + Node* const native_context = __ LoadNativeContext(context); + Node* const map = __ LoadContextElement( + native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX); + Node* const iterator = __ AllocateJSObjectFromMap(map); + + __ StoreObjectFieldNoWriteBarrier( + iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator); + + return_value.Bind(iterator); + __ Goto(&done); + + __ Bind(¬_receiver); + { + return_value.Bind( + __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context)); + + // Unreachable due to the Throw in runtime call. + __ Goto(&done); + } + + __ Bind(&done); + return return_value.value(); +} + +Node* IntrinsicsGenerator::AsyncGeneratorGetAwaitInputOrDebugPos( + Node* args_reg, Node* arg_count, Node* context) { + Node* generator = __ LoadRegister(args_reg); + CSA_SLOW_ASSERT(assembler_, __ HasInstanceType( + generator, JS_ASYNC_GENERATOR_OBJECT_TYPE)); + + Node* const value = __ LoadObjectField( + generator, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset); + + return value; +} + +Node* IntrinsicsGenerator::AsyncGeneratorReject(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsBuiltinCall(input, context, + Builtins::kAsyncGeneratorReject); +} + +Node* IntrinsicsGenerator::AsyncGeneratorResolve(Node* input, Node* arg_count, + Node* context) { + return IntrinsicAsBuiltinCall(input, context, + Builtins::kAsyncGeneratorResolve); +} + +void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected, Node* actual) { + InterpreterAssembler::Label match(assembler_); + Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected)); + __ GotoIf(comparison, &match); + __ Abort(kWrongArgumentCountForInvokeIntrinsic); + __ Goto(&match); + __ Bind(&match); +} + +} // namespace interpreter +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.h b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h new file mode 100644 index 00000000000000..11442438d5d948 --- /dev/null +++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.h @@ -0,0 +1,29 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_INTERPRETER_INTERPRETER_INTRINSICS_GENERATOR_H_ +#define V8_INTERPRETER_INTERPRETER_INTRINSICS_GENERATOR_H_ + +namespace v8 { +namespace internal { + +namespace compiler { +class Node; +} // namespace compiler + +namespace interpreter { + +class InterpreterAssembler; + +extern compiler::Node* GenerateInvokeIntrinsic(InterpreterAssembler* assembler, + compiler::Node* function_id, + compiler::Node* context, + compiler::Node* first_arg_reg, + compiler::Node* arg_count); + +} // namespace interpreter +} // namespace internal +} // namespace v8 + +#endif // V8_INTERPRETER_INTERPRETER_INTRINSICS_GENERATOR_H_ diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.cc b/deps/v8/src/interpreter/interpreter-intrinsics.cc index 78de42b634d3e4..1682d59c27af13 100644 --- a/deps/v8/src/interpreter/interpreter-intrinsics.cc +++ b/deps/v8/src/interpreter/interpreter-intrinsics.cc @@ -4,22 +4,12 @@ #include "src/interpreter/interpreter-intrinsics.h" -#include "src/code-factory.h" -#include "src/objects-inl.h" +#include "src/base/logging.h" namespace v8 { namespace internal { namespace interpreter { -using compiler::Node; - -#define __ assembler_-> - -IntrinsicsHelper::IntrinsicsHelper(InterpreterAssembler* assembler) - : isolate_(assembler->isolate()), - zone_(assembler->zone()), - assembler_(assembler) {} - // static bool IntrinsicsHelper::IsSupported(Runtime::FunctionId function_id) { switch (function_id) { @@ -62,290 +52,6 @@ Runtime::FunctionId IntrinsicsHelper::ToRuntimeId( } } -Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context, - Node* first_arg_reg, Node* arg_count) { - InterpreterAssembler::Label abort(assembler_), end(assembler_); - InterpreterAssembler::Variable result(assembler_, - MachineRepresentation::kTagged); - -#define MAKE_LABEL(name, lower_case, count) \ - InterpreterAssembler::Label lower_case(assembler_); - INTRINSICS_LIST(MAKE_LABEL) -#undef MAKE_LABEL - -#define LABEL_POINTER(name, lower_case, count) &lower_case, - InterpreterAssembler::Label* labels[] = {INTRINSICS_LIST(LABEL_POINTER)}; -#undef LABEL_POINTER - -#define CASE(name, lower_case, count) \ - static_cast(IntrinsicId::k##name), - int32_t cases[] = {INTRINSICS_LIST(CASE)}; -#undef CASE - - __ Switch(function_id, &abort, cases, labels, arraysize(cases)); -#define HANDLE_CASE(name, lower_case, expected_arg_count) \ - __ Bind(&lower_case); \ - if (FLAG_debug_code && expected_arg_count >= 0) { \ - AbortIfArgCountMismatch(expected_arg_count, arg_count); \ - } \ - result.Bind(name(first_arg_reg, arg_count, context)); \ - __ Goto(&end); - INTRINSICS_LIST(HANDLE_CASE) -#undef HANDLE_CASE - - __ Bind(&abort); - { - __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic); - result.Bind(__ UndefinedConstant()); - __ Goto(&end); - } - - __ Bind(&end); - return result.value(); -} - -Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type, - InstanceTypeCompareMode mode) { - Node* instance_type = __ LoadInstanceType(object); - - if (mode == kInstanceTypeEqual) { - return __ Word32Equal(instance_type, __ Int32Constant(type)); - } else { - DCHECK(mode == kInstanceTypeGreaterThanOrEqual); - return __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type)); - } -} - -Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) { - InterpreterAssembler::Variable return_value(assembler_, - MachineRepresentation::kTagged); - // TODO(ishell): Use Select here. - InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_), - return_false(assembler_), end(assembler_); - Node* arg = __ LoadRegister(input); - __ GotoIf(__ TaggedIsSmi(arg), &return_false); - - Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual); - __ Branch(condition, &return_true, &return_false); - - __ Bind(&return_true); - { - return_value.Bind(__ BooleanConstant(true)); - __ Goto(&end); - } - - __ Bind(&return_false); - { - return_value.Bind(__ BooleanConstant(false)); - __ Goto(&end); - } - - __ Bind(&end); - return return_value.value(); -} - -Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count, - Node* context) { - // TODO(ishell): Use Select here. - // TODO(ishell): Use CSA::IsJSReceiverInstanceType here. - InterpreterAssembler::Variable return_value(assembler_, - MachineRepresentation::kTagged); - InterpreterAssembler::Label return_true(assembler_), return_false(assembler_), - end(assembler_); - - Node* arg = __ LoadRegister(input); - __ GotoIf(__ TaggedIsSmi(arg), &return_false); - - STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE, - kInstanceTypeGreaterThanOrEqual); - __ Branch(condition, &return_true, &return_false); - - __ Bind(&return_true); - { - return_value.Bind(__ BooleanConstant(true)); - __ Goto(&end); - } - - __ Bind(&return_false); - { - return_value.Bind(__ BooleanConstant(false)); - __ Goto(&end); - } - - __ Bind(&end); - return return_value.value(); -} - -Node* IntrinsicsHelper::IsArray(Node* input, Node* arg_count, Node* context) { - return IsInstanceType(input, JS_ARRAY_TYPE); -} - -Node* IntrinsicsHelper::IsJSProxy(Node* input, Node* arg_count, Node* context) { - return IsInstanceType(input, JS_PROXY_TYPE); -} - -Node* IntrinsicsHelper::IsTypedArray(Node* input, Node* arg_count, - Node* context) { - return IsInstanceType(input, JS_TYPED_ARRAY_TYPE); -} - -Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) { - // TODO(ishell): Use SelectBooleanConstant here. - InterpreterAssembler::Variable return_value(assembler_, - MachineRepresentation::kTagged); - InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_), - end(assembler_); - - Node* arg = __ LoadRegister(input); - - __ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi); - __ Bind(&if_smi); - { - return_value.Bind(__ BooleanConstant(true)); - __ Goto(&end); - } - - __ Bind(&if_not_smi); - { - return_value.Bind(__ BooleanConstant(false)); - __ Goto(&end); - } - - __ Bind(&end); - return return_value.value(); -} - -Node* IntrinsicsHelper::IntrinsicAsStubCall(Node* args_reg, Node* context, - Callable const& callable) { - int param_count = callable.descriptor().GetParameterCount(); - int input_count = param_count + 2; // +2 for target and context - Node** args = zone()->NewArray(input_count); - int index = 0; - args[index++] = __ HeapConstant(callable.code()); - for (int i = 0; i < param_count; i++) { - args[index++] = __ LoadRegister(args_reg); - args_reg = __ NextRegister(args_reg); - } - args[index++] = context; - return __ CallStubN(callable.descriptor(), 1, input_count, args); -} - -Node* IntrinsicsHelper::CreateIterResultObject(Node* input, Node* arg_count, - Node* context) { - return IntrinsicAsStubCall(input, context, - CodeFactory::CreateIterResultObject(isolate())); -} - -Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count, - Node* context) { - return IntrinsicAsStubCall(input, context, - CodeFactory::HasProperty(isolate())); -} - -Node* IntrinsicsHelper::SubString(Node* input, Node* arg_count, Node* context) { - return IntrinsicAsStubCall(input, context, CodeFactory::SubString(isolate())); -} - -Node* IntrinsicsHelper::ToString(Node* input, Node* arg_count, Node* context) { - return IntrinsicAsStubCall(input, context, CodeFactory::ToString(isolate())); -} - -Node* IntrinsicsHelper::ToLength(Node* input, Node* arg_count, Node* context) { - return IntrinsicAsStubCall(input, context, CodeFactory::ToLength(isolate())); -} - -Node* IntrinsicsHelper::ToInteger(Node* input, Node* arg_count, Node* context) { - return IntrinsicAsStubCall(input, context, CodeFactory::ToInteger(isolate())); -} - -Node* IntrinsicsHelper::ToNumber(Node* input, Node* arg_count, Node* context) { - return IntrinsicAsStubCall(input, context, CodeFactory::ToNumber(isolate())); -} - -Node* IntrinsicsHelper::ToObject(Node* input, Node* arg_count, Node* context) { - return IntrinsicAsStubCall(input, context, CodeFactory::ToObject(isolate())); -} - -Node* IntrinsicsHelper::Call(Node* args_reg, Node* arg_count, Node* context) { - // First argument register contains the function target. - Node* function = __ LoadRegister(args_reg); - - // Receiver is the second runtime call argument. - Node* receiver_reg = __ NextRegister(args_reg); - Node* receiver_arg = __ RegisterLocation(receiver_reg); - - // Subtract function and receiver from arg count. - Node* function_and_receiver_count = __ Int32Constant(2); - Node* target_args_count = __ Int32Sub(arg_count, function_and_receiver_count); - - if (FLAG_debug_code) { - InterpreterAssembler::Label arg_count_positive(assembler_); - Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0)); - __ GotoIfNot(comparison, &arg_count_positive); - __ Abort(kWrongArgumentCountForInvokeIntrinsic); - __ Goto(&arg_count_positive); - __ Bind(&arg_count_positive); - } - - Node* result = __ CallJS(function, context, receiver_arg, target_args_count, - TailCallMode::kDisallow); - return result; -} - -Node* IntrinsicsHelper::ClassOf(Node* args_reg, Node* arg_count, - Node* context) { - Node* value = __ LoadRegister(args_reg); - return __ ClassOf(value); -} - -Node* IntrinsicsHelper::CreateAsyncFromSyncIterator(Node* args_reg, - Node* arg_count, - Node* context) { - InterpreterAssembler::Label not_receiver( - assembler_, InterpreterAssembler::Label::kDeferred); - InterpreterAssembler::Label done(assembler_); - InterpreterAssembler::Variable return_value(assembler_, - MachineRepresentation::kTagged); - - Node* sync_iterator = __ LoadRegister(args_reg); - - __ GotoIf(__ TaggedIsSmi(sync_iterator), ¬_receiver); - __ GotoIfNot(__ IsJSReceiver(sync_iterator), ¬_receiver); - - Node* const native_context = __ LoadNativeContext(context); - Node* const map = __ LoadContextElement( - native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX); - Node* const iterator = __ AllocateJSObjectFromMap(map); - - __ StoreObjectFieldNoWriteBarrier( - iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator); - - return_value.Bind(iterator); - __ Goto(&done); - - __ Bind(¬_receiver); - { - return_value.Bind( - __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context)); - - // Unreachable due to the Throw in runtime call. - __ Goto(&done); - } - - __ Bind(&done); - return return_value.value(); -} - -void IntrinsicsHelper::AbortIfArgCountMismatch(int expected, Node* actual) { - InterpreterAssembler::Label match(assembler_); - Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected)); - __ GotoIf(comparison, &match); - __ Abort(kWrongArgumentCountForInvokeIntrinsic); - __ Goto(&match); - __ Bind(&match); -} - } // namespace interpreter } // namespace internal } // namespace v8 diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h index 502a2f7b38cc4d..137bdbf9cb70ef 100644 --- a/deps/v8/src/interpreter/interpreter-intrinsics.h +++ b/deps/v8/src/interpreter/interpreter-intrinsics.h @@ -5,25 +5,19 @@ #ifndef V8_INTERPRETER_INTERPRETER_INTRINSICS_H_ #define V8_INTERPRETER_INTERPRETER_INTRINSICS_H_ -#include "src/allocation.h" -#include "src/builtins/builtins.h" -#include "src/frames.h" -#include "src/interpreter/bytecodes.h" -#include "src/interpreter/interpreter-assembler.h" #include "src/runtime/runtime.h" namespace v8 { namespace internal { - -namespace compiler { -class Node; -} // namespace compiler - namespace interpreter { // List of supported intrisics, with upper case name, lower case name and // expected number of arguments (-1 denoting argument count is variable). #define INTRINSICS_LIST(V) \ + V(AsyncGeneratorGetAwaitInputOrDebugPos, \ + async_generator_get_await_input_or_debug_pos, 1) \ + V(AsyncGeneratorReject, async_generator_reject, 2) \ + V(AsyncGeneratorResolve, async_generator_resolve, 3) \ V(Call, call, -1) \ V(ClassOf, class_of, 1) \ V(CreateIterResultObject, create_iter_result_object, 2) \ @@ -51,45 +45,12 @@ class IntrinsicsHelper { }; STATIC_ASSERT(static_cast(IntrinsicId::kIdCount) <= kMaxUInt8); - explicit IntrinsicsHelper(InterpreterAssembler* assembler); - - compiler::Node* InvokeIntrinsic(compiler::Node* function_id, - compiler::Node* context, - compiler::Node* first_arg_reg, - compiler::Node* arg_count); - static bool IsSupported(Runtime::FunctionId function_id); static IntrinsicId FromRuntimeId(Runtime::FunctionId function_id); static Runtime::FunctionId ToRuntimeId(IntrinsicId intrinsic_id); private: - enum InstanceTypeCompareMode { - kInstanceTypeEqual, - kInstanceTypeGreaterThanOrEqual - }; - - compiler::Node* IsInstanceType(compiler::Node* input, int type); - compiler::Node* CompareInstanceType(compiler::Node* map, int type, - InstanceTypeCompareMode mode); - compiler::Node* IntrinsicAsStubCall(compiler::Node* input, - compiler::Node* context, - Callable const& callable); - void AbortIfArgCountMismatch(int expected, compiler::Node* actual); - -#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \ - compiler::Node* name(compiler::Node* input, compiler::Node* arg_count, \ - compiler::Node* context); - INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER) -#undef DECLARE_INTRINSIC_HELPER - - Isolate* isolate() { return isolate_; } - Zone* zone() { return zone_; } - - Isolate* isolate_; - Zone* zone_; - InterpreterAssembler* assembler_; - - DISALLOW_COPY_AND_ASSIGN(IntrinsicsHelper); + DISALLOW_IMPLICIT_CONSTRUCTORS(IntrinsicsHelper); }; } // namespace interpreter diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc index 5db69e4f67a17f..42b2b18ad10db4 100644 --- a/deps/v8/src/interpreter/interpreter.cc +++ b/deps/v8/src/interpreter/interpreter.cc @@ -7,36 +7,20 @@ #include #include -#include "src/ast/prettyprinter.h" -#include "src/builtins/builtins-arguments.h" -#include "src/builtins/builtins-constructor.h" -#include "src/builtins/builtins-object.h" -#include "src/code-factory.h" +#include "src/codegen.h" #include "src/compilation-info.h" #include "src/compiler.h" #include "src/counters.h" -#include "src/debug/debug.h" -#include "src/factory.h" -#include "src/ic/accessor-assembler.h" -#include "src/interpreter/bytecode-flags.h" #include "src/interpreter/bytecode-generator.h" #include "src/interpreter/bytecodes.h" -#include "src/interpreter/interpreter-assembler.h" -#include "src/interpreter/interpreter-intrinsics.h" #include "src/log.h" -#include "src/objects-inl.h" -#include "src/zone/zone.h" +#include "src/objects.h" +#include "src/setup-isolate.h" namespace v8 { namespace internal { namespace interpreter { -using compiler::Node; -typedef CodeStubAssembler::Label Label; -typedef CodeStubAssembler::Variable Variable; - -#define __ assembler-> - class InterpreterCompilationJob final : public CompilationJob { public: explicit InterpreterCompilationJob(CompilationInfo* info); @@ -89,12 +73,6 @@ class InterpreterCompilationJob final : public CompilationJob { Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) { memset(dispatch_table_, 0, sizeof(dispatch_table_)); -} - -void Interpreter::Initialize() { - if (!ShouldInitializeDispatchTable()) return; - Zone zone(isolate_->allocator(), ZONE_NAME); - HandleScope scope(isolate_); if (FLAG_trace_ignition_dispatches) { static const int kBytecodeCount = static_cast(Bytecode::kLast) + 1; @@ -103,57 +81,6 @@ void Interpreter::Initialize() { memset(bytecode_dispatch_counters_table_.get(), 0, sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount); } - - // Generate bytecode handlers for all bytecodes and scales. - const OperandScale kOperandScales[] = { -#define VALUE(Name, _) OperandScale::k##Name, - OPERAND_SCALE_LIST(VALUE) -#undef VALUE - }; - - for (OperandScale operand_scale : kOperandScales) { -#define GENERATE_CODE(Name, ...) \ - InstallBytecodeHandler(&zone, Bytecode::k##Name, operand_scale, \ - &Interpreter::Do##Name); - BYTECODE_LIST(GENERATE_CODE) -#undef GENERATE_CODE - } - - // Fill unused entries will the illegal bytecode handler. - size_t illegal_index = - GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle); - for (size_t index = 0; index < arraysize(dispatch_table_); ++index) { - if (dispatch_table_[index] == nullptr) { - dispatch_table_[index] = dispatch_table_[illegal_index]; - } - } - - // Initialization should have been successful. - DCHECK(IsDispatchTableInitialized()); -} - -void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode, - OperandScale operand_scale, - BytecodeGeneratorFunc generator) { - if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return; - - InterpreterDispatchDescriptor descriptor(isolate_); - compiler::CodeAssemblerState state( - isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER), - Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode)); - InterpreterAssembler assembler(&state, bytecode, operand_scale); - if (Bytecodes::MakesCallAlongCriticalPath(bytecode)) { - assembler.SaveBytecodeOffset(); - } - (this->*generator)(&assembler); - Handle code = compiler::CodeAssembler::GenerateCode(&state); - size_t index = GetDispatchTableIndex(bytecode, operand_scale); - dispatch_table_[index] = code->entry(); - TraceCodegen(code); - PROFILE(isolate_, CodeCreateEvent( - CodeEventListener::BYTECODE_HANDLER_TAG, - AbstractCode::cast(*code), - Bytecodes::ToString(bytecode, operand_scale).c_str())); } Code* Interpreter::GetBytecodeHandler(Bytecode bytecode, @@ -288,26 +215,6 @@ bool Interpreter::IsDispatchTableInitialized() { return dispatch_table_[0] != nullptr; } -bool Interpreter::ShouldInitializeDispatchTable() { - if (FLAG_trace_ignition || FLAG_trace_ignition_codegen || - FLAG_trace_ignition_dispatches) { - // Regenerate table to add bytecode tracing operations, print the assembly - // code generated by TurboFan or instrument handlers with dispatch counters. - return true; - } - return !IsDispatchTableInitialized(); -} - -void Interpreter::TraceCodegen(Handle code) { -#ifdef ENABLE_DISASSEMBLER - if (FLAG_trace_ignition_codegen) { - OFStream os(stdout); - code->Disassemble(nullptr, os); - os << std::flush; - } -#endif // ENABLE_DISASSEMBLER -} - const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) { #ifdef ENABLE_DISASSEMBLER #define RETURN_NAME(Name, ...) \ @@ -380,3010 +287,6 @@ Local Interpreter::GetDispatchCountersObject() { return counters_map; } -// LdaZero -// -// Load literal '0' into the accumulator. -void Interpreter::DoLdaZero(InterpreterAssembler* assembler) { - Node* zero_value = __ NumberConstant(0.0); - __ SetAccumulator(zero_value); - __ Dispatch(); -} - -// LdaSmi -// -// Load an integer literal into the accumulator as a Smi. -void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) { - Node* smi_int = __ BytecodeOperandImmSmi(0); - __ SetAccumulator(smi_int); - __ Dispatch(); -} - -// LdaConstant -// -// Load constant literal at |idx| in the constant pool into the accumulator. -void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) { - Node* index = __ BytecodeOperandIdx(0); - Node* constant = __ LoadConstantPoolEntry(index); - __ SetAccumulator(constant); - __ Dispatch(); -} - -// LdaUndefined -// -// Load Undefined into the accumulator. -void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) { - Node* undefined_value = - __ HeapConstant(isolate_->factory()->undefined_value()); - __ SetAccumulator(undefined_value); - __ Dispatch(); -} - -// LdaNull -// -// Load Null into the accumulator. -void Interpreter::DoLdaNull(InterpreterAssembler* assembler) { - Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); - __ SetAccumulator(null_value); - __ Dispatch(); -} - -// LdaTheHole -// -// Load TheHole into the accumulator. -void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) { - Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); - __ SetAccumulator(the_hole_value); - __ Dispatch(); -} - -// LdaTrue -// -// Load True into the accumulator. -void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) { - Node* true_value = __ HeapConstant(isolate_->factory()->true_value()); - __ SetAccumulator(true_value); - __ Dispatch(); -} - -// LdaFalse -// -// Load False into the accumulator. -void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) { - Node* false_value = __ HeapConstant(isolate_->factory()->false_value()); - __ SetAccumulator(false_value); - __ Dispatch(); -} - -// Ldar -// -// Load accumulator with value from register . -void Interpreter::DoLdar(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* value = __ LoadRegister(reg_index); - __ SetAccumulator(value); - __ Dispatch(); -} - -// Star -// -// Store accumulator to register . -void Interpreter::DoStar(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* accumulator = __ GetAccumulator(); - __ StoreRegister(accumulator, reg_index); - __ Dispatch(); -} - -// Mov -// -// Stores the value of register to register . -void Interpreter::DoMov(InterpreterAssembler* assembler) { - Node* src_index = __ BytecodeOperandReg(0); - Node* src_value = __ LoadRegister(src_index); - Node* dst_index = __ BytecodeOperandReg(1); - __ StoreRegister(src_value, dst_index); - __ Dispatch(); -} - -void Interpreter::BuildLoadGlobal(int slot_operand_index, - int name_operand_index, - TypeofMode typeof_mode, - InterpreterAssembler* assembler) { - // Load the global via the LoadGlobalIC. - Node* feedback_vector = __ LoadFeedbackVector(); - Node* feedback_slot = __ BytecodeOperandIdx(slot_operand_index); - - AccessorAssembler accessor_asm(assembler->state()); - - Label try_handler(assembler, Label::kDeferred), - miss(assembler, Label::kDeferred); - - // Fast path without frame construction for the data case. - { - Label done(assembler); - Variable var_result(assembler, MachineRepresentation::kTagged); - ExitPoint exit_point(assembler, &done, &var_result); - - accessor_asm.LoadGlobalIC_TryPropertyCellCase( - feedback_vector, feedback_slot, &exit_point, &try_handler, &miss, - CodeStubAssembler::INTPTR_PARAMETERS); - - __ Bind(&done); - __ SetAccumulator(var_result.value()); - __ Dispatch(); - } - - // Slow path with frame construction. - { - Label done(assembler); - Variable var_result(assembler, MachineRepresentation::kTagged); - ExitPoint exit_point(assembler, &done, &var_result); - - __ Bind(&try_handler); - { - Node* context = __ GetContext(); - Node* smi_slot = __ SmiTag(feedback_slot); - Node* name_index = __ BytecodeOperandIdx(name_operand_index); - Node* name = __ LoadConstantPoolEntry(name_index); - - AccessorAssembler::LoadICParameters params(context, nullptr, name, - smi_slot, feedback_vector); - accessor_asm.LoadGlobalIC_TryHandlerCase(¶ms, typeof_mode, - &exit_point, &miss); - } - - __ Bind(&miss); - { - Node* context = __ GetContext(); - Node* smi_slot = __ SmiTag(feedback_slot); - Node* name_index = __ BytecodeOperandIdx(name_operand_index); - Node* name = __ LoadConstantPoolEntry(name_index); - - AccessorAssembler::LoadICParameters params(context, nullptr, name, - smi_slot, feedback_vector); - accessor_asm.LoadGlobalIC_MissCase(¶ms, &exit_point); - } - - __ Bind(&done); - { - __ SetAccumulator(var_result.value()); - __ Dispatch(); - } - } -} - -// LdaGlobal -// -// Load the global with name in constant pool entry into the -// accumulator using FeedBackVector slot outside of a typeof. -void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) { - static const int kNameOperandIndex = 0; - static const int kSlotOperandIndex = 1; - - BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF, - assembler); -} - -// LdaGlobalInsideTypeof -// -// Load the global with name in constant pool entry into the -// accumulator using FeedBackVector slot inside of a typeof. -void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) { - static const int kNameOperandIndex = 0; - static const int kSlotOperandIndex = 1; - - BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF, - assembler); -} - -void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) { - // Get the global object. - Node* context = __ GetContext(); - Node* native_context = __ LoadNativeContext(context); - Node* global = - __ LoadContextElement(native_context, Context::EXTENSION_INDEX); - - // Store the global via the StoreIC. - Node* code_target = __ HeapConstant(ic.code()); - Node* constant_index = __ BytecodeOperandIdx(0); - Node* name = __ LoadConstantPoolEntry(constant_index); - Node* value = __ GetAccumulator(); - Node* raw_slot = __ BytecodeOperandIdx(1); - Node* smi_slot = __ SmiTag(raw_slot); - Node* feedback_vector = __ LoadFeedbackVector(); - __ CallStub(ic.descriptor(), code_target, context, global, name, value, - smi_slot, feedback_vector); - __ Dispatch(); -} - -// StaGlobalSloppy -// -// Store the value in the accumulator into the global with name in constant pool -// entry using FeedBackVector slot in sloppy mode. -void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) { - Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); - DoStaGlobal(ic, assembler); -} - -// StaGlobalStrict -// -// Store the value in the accumulator into the global with name in constant pool -// entry using FeedBackVector slot in strict mode. -void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) { - Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); - DoStaGlobal(ic, assembler); -} - -// LdaContextSlot -// -// Load the object in |slot_index| of the context at |depth| in the context -// chain starting at |context| into the accumulator. -void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* context = __ LoadRegister(reg_index); - Node* slot_index = __ BytecodeOperandIdx(1); - Node* depth = __ BytecodeOperandUImm(2); - Node* slot_context = __ GetContextAtDepth(context, depth); - Node* result = __ LoadContextElement(slot_context, slot_index); - __ SetAccumulator(result); - __ Dispatch(); -} - -// LdaImmutableContextSlot -// -// Load the object in |slot_index| of the context at |depth| in the context -// chain starting at |context| into the accumulator. -void Interpreter::DoLdaImmutableContextSlot(InterpreterAssembler* assembler) { - // TODO(danno) Share the actual code object rather creating a duplicate one. - DoLdaContextSlot(assembler); -} - -// LdaCurrentContextSlot -// -// Load the object in |slot_index| of the current context into the accumulator. -void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) { - Node* slot_index = __ BytecodeOperandIdx(0); - Node* slot_context = __ GetContext(); - Node* result = __ LoadContextElement(slot_context, slot_index); - __ SetAccumulator(result); - __ Dispatch(); -} - -// LdaImmutableCurrentContextSlot -// -// Load the object in |slot_index| of the current context into the accumulator. -void Interpreter::DoLdaImmutableCurrentContextSlot( - InterpreterAssembler* assembler) { - // TODO(danno) Share the actual code object rather creating a duplicate one. - DoLdaCurrentContextSlot(assembler); -} - -// StaContextSlot -// -// Stores the object in the accumulator into |slot_index| of the context at -// |depth| in the context chain starting at |context|. -void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* reg_index = __ BytecodeOperandReg(0); - Node* context = __ LoadRegister(reg_index); - Node* slot_index = __ BytecodeOperandIdx(1); - Node* depth = __ BytecodeOperandUImm(2); - Node* slot_context = __ GetContextAtDepth(context, depth); - __ StoreContextElement(slot_context, slot_index, value); - __ Dispatch(); -} - -// StaCurrentContextSlot -// -// Stores the object in the accumulator into |slot_index| of the current -// context. -void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* slot_index = __ BytecodeOperandIdx(0); - Node* slot_context = __ GetContext(); - __ StoreContextElement(slot_context, slot_index, value); - __ Dispatch(); -} - -void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id, - InterpreterAssembler* assembler) { - Node* name_index = __ BytecodeOperandIdx(0); - Node* name = __ LoadConstantPoolEntry(name_index); - Node* context = __ GetContext(); - Node* result = __ CallRuntime(function_id, context, name); - __ SetAccumulator(result); - __ Dispatch(); -} - -// LdaLookupSlot -// -// Lookup the object with the name in constant pool entry |name_index| -// dynamically. -void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) { - DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler); -} - -// LdaLookupSlotInsideTypeof -// -// Lookup the object with the name in constant pool entry |name_index| -// dynamically without causing a NoReferenceError. -void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) { - DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); -} - -void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id, - InterpreterAssembler* assembler) { - Node* context = __ GetContext(); - Node* name_index = __ BytecodeOperandIdx(0); - Node* slot_index = __ BytecodeOperandIdx(1); - Node* depth = __ BytecodeOperandUImm(2); - - Label slowpath(assembler, Label::kDeferred); - - // Check for context extensions to allow the fast path. - __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); - - // Fast path does a normal load context. - { - Node* slot_context = __ GetContextAtDepth(context, depth); - Node* result = __ LoadContextElement(slot_context, slot_index); - __ SetAccumulator(result); - __ Dispatch(); - } - - // Slow path when we have to call out to the runtime. - __ Bind(&slowpath); - { - Node* name = __ LoadConstantPoolEntry(name_index); - Node* result = __ CallRuntime(function_id, context, name); - __ SetAccumulator(result); - __ Dispatch(); - } -} - -// LdaLookupSlot -// -// Lookup the object with the name in constant pool entry |name_index| -// dynamically. -void Interpreter::DoLdaLookupContextSlot(InterpreterAssembler* assembler) { - DoLdaLookupContextSlot(Runtime::kLoadLookupSlot, assembler); -} - -// LdaLookupSlotInsideTypeof -// -// Lookup the object with the name in constant pool entry |name_index| -// dynamically without causing a NoReferenceError. -void Interpreter::DoLdaLookupContextSlotInsideTypeof( - InterpreterAssembler* assembler) { - DoLdaLookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); -} - -void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id, - InterpreterAssembler* assembler) { - Node* context = __ GetContext(); - Node* depth = __ BytecodeOperandUImm(2); - - Label slowpath(assembler, Label::kDeferred); - - // Check for context extensions to allow the fast path - __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath); - - // Fast path does a normal load global - { - static const int kNameOperandIndex = 0; - static const int kSlotOperandIndex = 1; - - TypeofMode typeof_mode = function_id == Runtime::kLoadLookupSlotInsideTypeof - ? INSIDE_TYPEOF - : NOT_INSIDE_TYPEOF; - - BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, typeof_mode, - assembler); - } - - // Slow path when we have to call out to the runtime - __ Bind(&slowpath); - { - Node* name_index = __ BytecodeOperandIdx(0); - Node* name = __ LoadConstantPoolEntry(name_index); - Node* result = __ CallRuntime(function_id, context, name); - __ SetAccumulator(result); - __ Dispatch(); - } -} - -// LdaLookupGlobalSlot -// -// Lookup the object with the name in constant pool entry |name_index| -// dynamically. -void Interpreter::DoLdaLookupGlobalSlot(InterpreterAssembler* assembler) { - DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlot, assembler); -} - -// LdaLookupGlobalSlotInsideTypeof -// -// Lookup the object with the name in constant pool entry |name_index| -// dynamically without causing a NoReferenceError. -void Interpreter::DoLdaLookupGlobalSlotInsideTypeof( - InterpreterAssembler* assembler) { - DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler); -} - -void Interpreter::DoStaLookupSlot(LanguageMode language_mode, - InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* index = __ BytecodeOperandIdx(0); - Node* name = __ LoadConstantPoolEntry(index); - Node* context = __ GetContext(); - Node* result = __ CallRuntime(is_strict(language_mode) - ? Runtime::kStoreLookupSlot_Strict - : Runtime::kStoreLookupSlot_Sloppy, - context, name, value); - __ SetAccumulator(result); - __ Dispatch(); -} - -// StaLookupSlotSloppy -// -// Store the object in accumulator to the object with the name in constant -// pool entry |name_index| in sloppy mode. -void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) { - DoStaLookupSlot(LanguageMode::SLOPPY, assembler); -} - -// StaLookupSlotStrict -// -// Store the object in accumulator to the object with the name in constant -// pool entry |name_index| in strict mode. -void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) { - DoStaLookupSlot(LanguageMode::STRICT, assembler); -} - -// LdaNamedProperty -// -// Calls the LoadIC at FeedBackVector slot for and the name at -// constant pool entry . -void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) { - Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_); - Node* code_target = __ HeapConstant(ic.code()); - Node* register_index = __ BytecodeOperandReg(0); - Node* object = __ LoadRegister(register_index); - Node* constant_index = __ BytecodeOperandIdx(1); - Node* name = __ LoadConstantPoolEntry(constant_index); - Node* raw_slot = __ BytecodeOperandIdx(2); - Node* smi_slot = __ SmiTag(raw_slot); - Node* feedback_vector = __ LoadFeedbackVector(); - Node* context = __ GetContext(); - Node* result = __ CallStub(ic.descriptor(), code_target, context, object, - name, smi_slot, feedback_vector); - __ SetAccumulator(result); - __ Dispatch(); -} - -// KeyedLoadIC -// -// Calls the KeyedLoadIC at FeedBackVector slot for and the key -// in the accumulator. -void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) { - Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_); - Node* code_target = __ HeapConstant(ic.code()); - Node* reg_index = __ BytecodeOperandReg(0); - Node* object = __ LoadRegister(reg_index); - Node* name = __ GetAccumulator(); - Node* raw_slot = __ BytecodeOperandIdx(1); - Node* smi_slot = __ SmiTag(raw_slot); - Node* feedback_vector = __ LoadFeedbackVector(); - Node* context = __ GetContext(); - Node* result = __ CallStub(ic.descriptor(), code_target, context, object, - name, smi_slot, feedback_vector); - __ SetAccumulator(result); - __ Dispatch(); -} - -void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) { - Node* code_target = __ HeapConstant(ic.code()); - Node* object_reg_index = __ BytecodeOperandReg(0); - Node* object = __ LoadRegister(object_reg_index); - Node* constant_index = __ BytecodeOperandIdx(1); - Node* name = __ LoadConstantPoolEntry(constant_index); - Node* value = __ GetAccumulator(); - Node* raw_slot = __ BytecodeOperandIdx(2); - Node* smi_slot = __ SmiTag(raw_slot); - Node* feedback_vector = __ LoadFeedbackVector(); - Node* context = __ GetContext(); - __ CallStub(ic.descriptor(), code_target, context, object, name, value, - smi_slot, feedback_vector); - __ Dispatch(); -} - -// StaNamedPropertySloppy -// -// Calls the sloppy mode StoreIC at FeedBackVector slot for and -// the name in constant pool entry with the value in the -// accumulator. -void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) { - Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY); - DoStoreIC(ic, assembler); -} - -// StaNamedPropertyStrict -// -// Calls the strict mode StoreIC at FeedBackVector slot for and -// the name in constant pool entry with the value in the -// accumulator. -void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) { - Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT); - DoStoreIC(ic, assembler); -} - -// StaNamedOwnProperty -// -// Calls the StoreOwnIC at FeedBackVector slot for and -// the name in constant pool entry with the value in the -// accumulator. -void Interpreter::DoStaNamedOwnProperty(InterpreterAssembler* assembler) { - Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate_); - DoStoreIC(ic, assembler); -} - -void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) { - Node* code_target = __ HeapConstant(ic.code()); - Node* object_reg_index = __ BytecodeOperandReg(0); - Node* object = __ LoadRegister(object_reg_index); - Node* name_reg_index = __ BytecodeOperandReg(1); - Node* name = __ LoadRegister(name_reg_index); - Node* value = __ GetAccumulator(); - Node* raw_slot = __ BytecodeOperandIdx(2); - Node* smi_slot = __ SmiTag(raw_slot); - Node* feedback_vector = __ LoadFeedbackVector(); - Node* context = __ GetContext(); - __ CallStub(ic.descriptor(), code_target, context, object, name, value, - smi_slot, feedback_vector); - __ Dispatch(); -} - -// StaKeyedPropertySloppy -// -// Calls the sloppy mode KeyStoreIC at FeedBackVector slot for -// and the key with the value in the accumulator. -void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) { - Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY); - DoKeyedStoreIC(ic, assembler); -} - -// StaKeyedPropertyStrict -// -// Calls the strict mode KeyStoreIC at FeedBackVector slot for -// and the key with the value in the accumulator. -void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) { - Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT); - DoKeyedStoreIC(ic, assembler); -} - -// StaDataPropertyInLiteral -// -// Define a property with value from the accumulator in . -// Property attributes and whether set_function_name are stored in -// DataPropertyInLiteralFlags . -// -// This definition is not observable and is used only for definitions -// in object or class literals. -void Interpreter::DoStaDataPropertyInLiteral(InterpreterAssembler* assembler) { - Node* object = __ LoadRegister(__ BytecodeOperandReg(0)); - Node* name = __ LoadRegister(__ BytecodeOperandReg(1)); - Node* value = __ GetAccumulator(); - Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2)); - Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(3)); - - Node* feedback_vector = __ LoadFeedbackVector(); - Node* context = __ GetContext(); - - __ CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name, - value, flags, feedback_vector, vector_index); - __ Dispatch(); -} - -// LdaModuleVariable -// -// Load the contents of a module variable into the accumulator. The variable is -// identified by . is the depth of the current context -// relative to the module context. -void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) { - Node* cell_index = __ BytecodeOperandImmIntPtr(0); - Node* depth = __ BytecodeOperandUImm(1); - - Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); - Node* module = - __ LoadContextElement(module_context, Context::EXTENSION_INDEX); - - Label if_export(assembler), if_import(assembler), end(assembler); - __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, - &if_import); - - __ Bind(&if_export); - { - Node* regular_exports = - __ LoadObjectField(module, Module::kRegularExportsOffset); - // The actual array index is (cell_index - 1). - Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); - Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); - __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); - __ Goto(&end); - } - - __ Bind(&if_import); - { - Node* regular_imports = - __ LoadObjectField(module, Module::kRegularImportsOffset); - // The actual array index is (-cell_index - 1). - Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index); - Node* cell = __ LoadFixedArrayElement(regular_imports, import_index); - __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); - __ Goto(&end); - } - - __ Bind(&end); - __ Dispatch(); -} - -// StaModuleVariable -// -// Store accumulator to the module variable identified by . -// is the depth of the current context relative to the module context. -void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* cell_index = __ BytecodeOperandImmIntPtr(0); - Node* depth = __ BytecodeOperandUImm(1); - - Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); - Node* module = - __ LoadContextElement(module_context, Context::EXTENSION_INDEX); - - Label if_export(assembler), if_import(assembler), end(assembler); - __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, - &if_import); - - __ Bind(&if_export); - { - Node* regular_exports = - __ LoadObjectField(module, Module::kRegularExportsOffset); - // The actual array index is (cell_index - 1). - Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); - Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); - __ StoreObjectField(cell, Cell::kValueOffset, value); - __ Goto(&end); - } - - __ Bind(&if_import); - { - // Not supported (probably never). - __ Abort(kUnsupportedModuleOperation); - __ Goto(&end); - } - - __ Bind(&end); - __ Dispatch(); -} - -// PushContext -// -// Saves the current context in , and pushes the accumulator as the -// new current context. -void Interpreter::DoPushContext(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* new_context = __ GetAccumulator(); - Node* old_context = __ GetContext(); - __ StoreRegister(old_context, reg_index); - __ SetContext(new_context); - __ Dispatch(); -} - -// PopContext -// -// Pops the current context and sets as the new context. -void Interpreter::DoPopContext(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* context = __ LoadRegister(reg_index); - __ SetContext(context); - __ Dispatch(); -} - -// TODO(mythria): Remove this function once all CompareOps record type feedback. -void Interpreter::DoCompareOp(Token::Value compare_op, - InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* lhs = __ LoadRegister(reg_index); - Node* rhs = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* result; - switch (compare_op) { - case Token::IN: - result = assembler->HasProperty(rhs, lhs, context); - break; - case Token::INSTANCEOF: - result = assembler->InstanceOf(lhs, rhs, context); - break; - default: - UNREACHABLE(); - } - __ SetAccumulator(result); - __ Dispatch(); -} - -template -void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* lhs = __ LoadRegister(reg_index); - Node* rhs = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(1); - Node* feedback_vector = __ LoadFeedbackVector(); - Node* result = Generator::Generate(assembler, lhs, rhs, slot_index, - feedback_vector, context); - __ SetAccumulator(result); - __ Dispatch(); -} - -void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op, - InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* lhs = __ LoadRegister(reg_index); - Node* rhs = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(1); - Node* feedback_vector = __ LoadFeedbackVector(); - - // TODO(interpreter): the only reason this check is here is because we - // sometimes emit comparisons that shouldn't collect feedback (e.g. - // try-finally blocks and generators), and we could get rid of this by - // introducing Smi equality tests. - Label gather_type_feedback(assembler), do_compare(assembler); - __ Branch(__ WordEqual(slot_index, __ IntPtrConstant(0)), &do_compare, - &gather_type_feedback); - - __ Bind(&gather_type_feedback); - { - Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); - Label lhs_is_not_smi(assembler), lhs_is_not_number(assembler), - lhs_is_not_string(assembler), gather_rhs_type(assembler), - update_feedback(assembler); - - __ GotoIfNot(__ TaggedIsSmi(lhs), &lhs_is_not_smi); - - var_type_feedback.Bind( - __ SmiConstant(CompareOperationFeedback::kSignedSmall)); - __ Goto(&gather_rhs_type); - - __ Bind(&lhs_is_not_smi); - { - Node* lhs_map = __ LoadMap(lhs); - __ GotoIfNot(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number); - - var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber)); - __ Goto(&gather_rhs_type); - - __ Bind(&lhs_is_not_number); - { - Node* lhs_instance_type = __ LoadInstanceType(lhs); - if (Token::IsOrderedRelationalCompareOp(compare_op)) { - Label lhs_is_not_oddball(assembler); - __ GotoIfNot( - __ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)), - &lhs_is_not_oddball); - - var_type_feedback.Bind( - __ SmiConstant(CompareOperationFeedback::kNumberOrOddball)); - __ Goto(&gather_rhs_type); - - __ Bind(&lhs_is_not_oddball); - } - - Label lhs_is_not_string(assembler); - __ GotoIfNot(__ IsStringInstanceType(lhs_instance_type), - &lhs_is_not_string); - - if (Token::IsOrderedRelationalCompareOp(compare_op)) { - var_type_feedback.Bind( - __ SmiConstant(CompareOperationFeedback::kString)); - } else { - var_type_feedback.Bind(__ SelectSmiConstant( - __ Word32Equal( - __ Word32And(lhs_instance_type, - __ Int32Constant(kIsNotInternalizedMask)), - __ Int32Constant(kInternalizedTag)), - CompareOperationFeedback::kInternalizedString, - CompareOperationFeedback::kString)); - } - __ Goto(&gather_rhs_type); - - __ Bind(&lhs_is_not_string); - if (Token::IsEqualityOp(compare_op)) { - var_type_feedback.Bind(__ SelectSmiConstant( - __ IsJSReceiverInstanceType(lhs_instance_type), - CompareOperationFeedback::kReceiver, - CompareOperationFeedback::kAny)); - } else { - var_type_feedback.Bind( - __ SmiConstant(CompareOperationFeedback::kAny)); - } - __ Goto(&gather_rhs_type); - } - } - - __ Bind(&gather_rhs_type); - { - Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler); - - __ GotoIfNot(__ TaggedIsSmi(rhs), &rhs_is_not_smi); - - var_type_feedback.Bind( - __ SmiOr(var_type_feedback.value(), - __ SmiConstant(CompareOperationFeedback::kSignedSmall))); - __ Goto(&update_feedback); - - __ Bind(&rhs_is_not_smi); - { - Node* rhs_map = __ LoadMap(rhs); - __ GotoIfNot(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number); - - var_type_feedback.Bind( - __ SmiOr(var_type_feedback.value(), - __ SmiConstant(CompareOperationFeedback::kNumber))); - __ Goto(&update_feedback); - - __ Bind(&rhs_is_not_number); - { - Node* rhs_instance_type = __ LoadInstanceType(rhs); - if (Token::IsOrderedRelationalCompareOp(compare_op)) { - Label rhs_is_not_oddball(assembler); - __ GotoIfNot(__ Word32Equal(rhs_instance_type, - __ Int32Constant(ODDBALL_TYPE)), - &rhs_is_not_oddball); - - var_type_feedback.Bind(__ SmiOr( - var_type_feedback.value(), - __ SmiConstant(CompareOperationFeedback::kNumberOrOddball))); - __ Goto(&update_feedback); - - __ Bind(&rhs_is_not_oddball); - } - - Label rhs_is_not_string(assembler); - __ GotoIfNot(__ IsStringInstanceType(rhs_instance_type), - &rhs_is_not_string); - - if (Token::IsOrderedRelationalCompareOp(compare_op)) { - var_type_feedback.Bind( - __ SmiOr(var_type_feedback.value(), - __ SmiConstant(CompareOperationFeedback::kString))); - } else { - var_type_feedback.Bind(__ SmiOr( - var_type_feedback.value(), - __ SelectSmiConstant( - __ Word32Equal( - __ Word32And(rhs_instance_type, - __ Int32Constant(kIsNotInternalizedMask)), - __ Int32Constant(kInternalizedTag)), - CompareOperationFeedback::kInternalizedString, - CompareOperationFeedback::kString))); - } - __ Goto(&update_feedback); - - __ Bind(&rhs_is_not_string); - if (Token::IsEqualityOp(compare_op)) { - var_type_feedback.Bind( - __ SmiOr(var_type_feedback.value(), - __ SelectSmiConstant( - __ IsJSReceiverInstanceType(rhs_instance_type), - CompareOperationFeedback::kReceiver, - CompareOperationFeedback::kAny))); - } else { - var_type_feedback.Bind( - __ SmiConstant(CompareOperationFeedback::kAny)); - } - __ Goto(&update_feedback); - } - } - } - - __ Bind(&update_feedback); - { - __ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index); - __ Goto(&do_compare); - } - } - - __ Bind(&do_compare); - Node* result; - switch (compare_op) { - case Token::EQ: - result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs, - context); - break; - case Token::NE: - result = - assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context); - break; - case Token::EQ_STRICT: - result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs, - rhs, context); - break; - case Token::LT: - result = assembler->RelationalComparison(CodeStubAssembler::kLessThan, - lhs, rhs, context); - break; - case Token::GT: - result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan, - lhs, rhs, context); - break; - case Token::LTE: - result = assembler->RelationalComparison( - CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context); - break; - case Token::GTE: - result = assembler->RelationalComparison( - CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context); - break; - default: - UNREACHABLE(); - } - __ SetAccumulator(result); - __ Dispatch(); -} - -// Add -// -// Add register to accumulator. -void Interpreter::DoAdd(InterpreterAssembler* assembler) { - DoBinaryOpWithFeedback(assembler); -} - -// Sub -// -// Subtract register from accumulator. -void Interpreter::DoSub(InterpreterAssembler* assembler) { - DoBinaryOpWithFeedback(assembler); -} - -// Mul -// -// Multiply accumulator by register . -void Interpreter::DoMul(InterpreterAssembler* assembler) { - DoBinaryOpWithFeedback(assembler); -} - -// Div -// -// Divide register by accumulator. -void Interpreter::DoDiv(InterpreterAssembler* assembler) { - DoBinaryOpWithFeedback(assembler); -} - -// Mod -// -// Modulo register by accumulator. -void Interpreter::DoMod(InterpreterAssembler* assembler) { - DoBinaryOpWithFeedback(assembler); -} - -void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op, - InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* lhs = __ LoadRegister(reg_index); - Node* rhs = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(1); - Node* feedback_vector = __ LoadFeedbackVector(); - - Variable var_lhs_type_feedback(assembler, - MachineRepresentation::kTaggedSigned), - var_rhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned); - Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( - context, lhs, &var_lhs_type_feedback); - Node* rhs_value = __ TruncateTaggedToWord32WithFeedback( - context, rhs, &var_rhs_type_feedback); - Node* result = nullptr; - - switch (bitwise_op) { - case Token::BIT_OR: { - Node* value = __ Word32Or(lhs_value, rhs_value); - result = __ ChangeInt32ToTagged(value); - } break; - case Token::BIT_AND: { - Node* value = __ Word32And(lhs_value, rhs_value); - result = __ ChangeInt32ToTagged(value); - } break; - case Token::BIT_XOR: { - Node* value = __ Word32Xor(lhs_value, rhs_value); - result = __ ChangeInt32ToTagged(value); - } break; - case Token::SHL: { - Node* value = __ Word32Shl( - lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); - result = __ ChangeInt32ToTagged(value); - } break; - case Token::SHR: { - Node* value = __ Word32Shr( - lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); - result = __ ChangeUint32ToTagged(value); - } break; - case Token::SAR: { - Node* value = __ Word32Sar( - lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f))); - result = __ ChangeInt32ToTagged(value); - } break; - default: - UNREACHABLE(); - } - - Node* result_type = __ SelectSmiConstant( - __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, - BinaryOperationFeedback::kNumber); - - if (FLAG_debug_code) { - Label ok(assembler); - __ GotoIf(__ TaggedIsSmi(result), &ok); - Node* result_map = __ LoadMap(result); - __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(), - kExpectedHeapNumber); - __ Goto(&ok); - __ Bind(&ok); - } - - Node* input_feedback = - __ SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value()); - __ UpdateFeedback(__ SmiOr(result_type, input_feedback), feedback_vector, - slot_index); - __ SetAccumulator(result); - __ Dispatch(); -} - -// BitwiseOr -// -// BitwiseOr register to accumulator. -void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) { - DoBitwiseBinaryOp(Token::BIT_OR, assembler); -} - -// BitwiseXor -// -// BitwiseXor register to accumulator. -void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) { - DoBitwiseBinaryOp(Token::BIT_XOR, assembler); -} - -// BitwiseAnd -// -// BitwiseAnd register to accumulator. -void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) { - DoBitwiseBinaryOp(Token::BIT_AND, assembler); -} - -// ShiftLeft -// -// Left shifts register by the count specified in the accumulator. -// Register is converted to an int32 and the accumulator to uint32 -// before the operation. 5 lsb bits from the accumulator are used as count -// i.e. << (accumulator & 0x1F). -void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) { - DoBitwiseBinaryOp(Token::SHL, assembler); -} - -// ShiftRight -// -// Right shifts register by the count specified in the accumulator. -// Result is sign extended. Register is converted to an int32 and the -// accumulator to uint32 before the operation. 5 lsb bits from the accumulator -// are used as count i.e. >> (accumulator & 0x1F). -void Interpreter::DoShiftRight(InterpreterAssembler* assembler) { - DoBitwiseBinaryOp(Token::SAR, assembler); -} - -// ShiftRightLogical -// -// Right Shifts register by the count specified in the accumulator. -// Result is zero-filled. The accumulator and register are converted to -// uint32 before the operation 5 lsb bits from the accumulator are used as -// count i.e. << (accumulator & 0x1F). -void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) { - DoBitwiseBinaryOp(Token::SHR, assembler); -} - -// AddSmi -// -// Adds an immediate value to register . For this -// operation is the lhs operand and is the operand. -void Interpreter::DoAddSmi(InterpreterAssembler* assembler) { - Variable var_result(assembler, MachineRepresentation::kTagged); - Label fastpath(assembler), slowpath(assembler, Label::kDeferred), - end(assembler); - - Node* reg_index = __ BytecodeOperandReg(1); - Node* left = __ LoadRegister(reg_index); - Node* right = __ BytecodeOperandImmSmi(0); - Node* slot_index = __ BytecodeOperandIdx(2); - Node* feedback_vector = __ LoadFeedbackVector(); - - // {right} is known to be a Smi. - // Check if the {left} is a Smi take the fast path. - __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); - __ Bind(&fastpath); - { - // Try fast Smi addition first. - Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left), - __ BitcastTaggedToWord(right)); - Node* overflow = __ Projection(1, pair); - - // Check if the Smi additon overflowed. - Label if_notoverflow(assembler); - __ Branch(overflow, &slowpath, &if_notoverflow); - __ Bind(&if_notoverflow); - { - __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall), - feedback_vector, slot_index); - var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); - __ Goto(&end); - } - } - __ Bind(&slowpath); - { - Node* context = __ GetContext(); - AddWithFeedbackStub stub(__ isolate()); - Callable callable = - Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate())); - var_result.Bind(__ CallStub(callable, context, left, right, - __ TruncateWordToWord32(slot_index), - feedback_vector)); - __ Goto(&end); - } - __ Bind(&end); - { - __ SetAccumulator(var_result.value()); - __ Dispatch(); - } -} - -// SubSmi -// -// Subtracts an immediate value to register . For this -// operation is the lhs operand and is the rhs operand. -void Interpreter::DoSubSmi(InterpreterAssembler* assembler) { - Variable var_result(assembler, MachineRepresentation::kTagged); - Label fastpath(assembler), slowpath(assembler, Label::kDeferred), - end(assembler); - - Node* reg_index = __ BytecodeOperandReg(1); - Node* left = __ LoadRegister(reg_index); - Node* right = __ BytecodeOperandImmSmi(0); - Node* slot_index = __ BytecodeOperandIdx(2); - Node* feedback_vector = __ LoadFeedbackVector(); - - // {right} is known to be a Smi. - // Check if the {left} is a Smi take the fast path. - __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); - __ Bind(&fastpath); - { - // Try fast Smi subtraction first. - Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left), - __ BitcastTaggedToWord(right)); - Node* overflow = __ Projection(1, pair); - - // Check if the Smi subtraction overflowed. - Label if_notoverflow(assembler); - __ Branch(overflow, &slowpath, &if_notoverflow); - __ Bind(&if_notoverflow); - { - __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall), - feedback_vector, slot_index); - var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); - __ Goto(&end); - } - } - __ Bind(&slowpath); - { - Node* context = __ GetContext(); - SubtractWithFeedbackStub stub(__ isolate()); - Callable callable = Callable( - stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate())); - var_result.Bind(__ CallStub(callable, context, left, right, - __ TruncateWordToWord32(slot_index), - feedback_vector)); - __ Goto(&end); - } - __ Bind(&end); - { - __ SetAccumulator(var_result.value()); - __ Dispatch(); - } -} - -// BitwiseOr -// -// BitwiseOr with . For this operation is the lhs -// operand and is the rhs operand. -void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(1); - Node* left = __ LoadRegister(reg_index); - Node* right = __ BytecodeOperandImmSmi(0); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(2); - Node* feedback_vector = __ LoadFeedbackVector(); - Variable var_lhs_type_feedback(assembler, - MachineRepresentation::kTaggedSigned); - Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( - context, left, &var_lhs_type_feedback); - Node* rhs_value = __ SmiToWord32(right); - Node* value = __ Word32Or(lhs_value, rhs_value); - Node* result = __ ChangeInt32ToTagged(value); - Node* result_type = __ SelectSmiConstant( - __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, - BinaryOperationFeedback::kNumber); - __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), - feedback_vector, slot_index); - __ SetAccumulator(result); - __ Dispatch(); -} - -// BitwiseAnd -// -// BitwiseAnd with . For this operation is the lhs -// operand and is the rhs operand. -void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(1); - Node* left = __ LoadRegister(reg_index); - Node* right = __ BytecodeOperandImmSmi(0); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(2); - Node* feedback_vector = __ LoadFeedbackVector(); - Variable var_lhs_type_feedback(assembler, - MachineRepresentation::kTaggedSigned); - Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( - context, left, &var_lhs_type_feedback); - Node* rhs_value = __ SmiToWord32(right); - Node* value = __ Word32And(lhs_value, rhs_value); - Node* result = __ ChangeInt32ToTagged(value); - Node* result_type = __ SelectSmiConstant( - __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, - BinaryOperationFeedback::kNumber); - __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), - feedback_vector, slot_index); - __ SetAccumulator(result); - __ Dispatch(); -} - -// ShiftLeftSmi -// -// Left shifts register by the count specified in . -// Register is converted to an int32 before the operation. The 5 -// lsb bits from are used as count i.e. << ( & 0x1F). -void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(1); - Node* left = __ LoadRegister(reg_index); - Node* right = __ BytecodeOperandImmSmi(0); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(2); - Node* feedback_vector = __ LoadFeedbackVector(); - Variable var_lhs_type_feedback(assembler, - MachineRepresentation::kTaggedSigned); - Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( - context, left, &var_lhs_type_feedback); - Node* rhs_value = __ SmiToWord32(right); - Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); - Node* value = __ Word32Shl(lhs_value, shift_count); - Node* result = __ ChangeInt32ToTagged(value); - Node* result_type = __ SelectSmiConstant( - __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, - BinaryOperationFeedback::kNumber); - __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), - feedback_vector, slot_index); - __ SetAccumulator(result); - __ Dispatch(); -} - -// ShiftRightSmi -// -// Right shifts register by the count specified in . -// Register is converted to an int32 before the operation. The 5 -// lsb bits from are used as count i.e. << ( & 0x1F). -void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(1); - Node* left = __ LoadRegister(reg_index); - Node* right = __ BytecodeOperandImmSmi(0); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(2); - Node* feedback_vector = __ LoadFeedbackVector(); - Variable var_lhs_type_feedback(assembler, - MachineRepresentation::kTaggedSigned); - Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( - context, left, &var_lhs_type_feedback); - Node* rhs_value = __ SmiToWord32(right); - Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); - Node* value = __ Word32Sar(lhs_value, shift_count); - Node* result = __ ChangeInt32ToTagged(value); - Node* result_type = __ SelectSmiConstant( - __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, - BinaryOperationFeedback::kNumber); - __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()), - feedback_vector, slot_index); - __ SetAccumulator(result); - __ Dispatch(); -} - -Node* Interpreter::BuildUnaryOp(Callable callable, - InterpreterAssembler* assembler) { - Node* target = __ HeapConstant(callable.code()); - Node* accumulator = __ GetAccumulator(); - Node* context = __ GetContext(); - return __ CallStub(callable.descriptor(), target, context, accumulator); -} - -template -void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(0); - Node* feedback_vector = __ LoadFeedbackVector(); - Node* result = Generator::Generate(assembler, value, context, feedback_vector, - slot_index); - __ SetAccumulator(result); - __ Dispatch(); -} - -// ToName -// -// Convert the object referenced by the accumulator to a name. -void Interpreter::DoToName(InterpreterAssembler* assembler) { - Node* object = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* result = __ ToName(context, object); - __ StoreRegister(result, __ BytecodeOperandReg(0)); - __ Dispatch(); -} - -// ToNumber -// -// Convert the object referenced by the accumulator to a number. -void Interpreter::DoToNumber(InterpreterAssembler* assembler) { - Node* object = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* result = __ ToNumber(context, object); - __ StoreRegister(result, __ BytecodeOperandReg(0)); - __ Dispatch(); -} - -// ToObject -// -// Convert the object referenced by the accumulator to a JSReceiver. -void Interpreter::DoToObject(InterpreterAssembler* assembler) { - Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler); - __ StoreRegister(result, __ BytecodeOperandReg(0)); - __ Dispatch(); -} - -// Inc -// -// Increments value in the accumulator by one. -void Interpreter::DoInc(InterpreterAssembler* assembler) { - typedef CodeStubAssembler::Label Label; - typedef compiler::Node Node; - typedef CodeStubAssembler::Variable Variable; - - Node* value = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(0); - Node* feedback_vector = __ LoadFeedbackVector(); - - // Shared entry for floating point increment. - Label do_finc(assembler), end(assembler); - Variable var_finc_value(assembler, MachineRepresentation::kFloat64); - - // We might need to try again due to ToNumber conversion. - Variable value_var(assembler, MachineRepresentation::kTagged); - Variable result_var(assembler, MachineRepresentation::kTagged); - Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); - Variable* loop_vars[] = {&value_var, &var_type_feedback}; - Label start(assembler, 2, loop_vars); - value_var.Bind(value); - var_type_feedback.Bind( - assembler->SmiConstant(BinaryOperationFeedback::kNone)); - assembler->Goto(&start); - assembler->Bind(&start); - { - value = value_var.value(); - - Label if_issmi(assembler), if_isnotsmi(assembler); - assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi); - - assembler->Bind(&if_issmi); - { - // Try fast Smi addition first. - Node* one = assembler->SmiConstant(Smi::FromInt(1)); - Node* pair = assembler->IntPtrAddWithOverflow( - assembler->BitcastTaggedToWord(value), - assembler->BitcastTaggedToWord(one)); - Node* overflow = assembler->Projection(1, pair); - - // Check if the Smi addition overflowed. - Label if_overflow(assembler), if_notoverflow(assembler); - assembler->Branch(overflow, &if_overflow, &if_notoverflow); - - assembler->Bind(&if_notoverflow); - var_type_feedback.Bind(assembler->SmiOr( - var_type_feedback.value(), - assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall))); - result_var.Bind( - assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair))); - assembler->Goto(&end); - - assembler->Bind(&if_overflow); - { - var_finc_value.Bind(assembler->SmiToFloat64(value)); - assembler->Goto(&do_finc); - } - } - - assembler->Bind(&if_isnotsmi); - { - // Check if the value is a HeapNumber. - Label if_valueisnumber(assembler), - if_valuenotnumber(assembler, Label::kDeferred); - Node* value_map = assembler->LoadMap(value); - assembler->Branch(assembler->IsHeapNumberMap(value_map), - &if_valueisnumber, &if_valuenotnumber); - - assembler->Bind(&if_valueisnumber); - { - // Load the HeapNumber value. - var_finc_value.Bind(assembler->LoadHeapNumberValue(value)); - assembler->Goto(&do_finc); - } - - assembler->Bind(&if_valuenotnumber); - { - // We do not require an Or with earlier feedback here because once we - // convert the value to a number, we cannot reach this path. We can - // only reach this path on the first pass when the feedback is kNone. - CSA_ASSERT(assembler, - assembler->SmiEqual( - var_type_feedback.value(), - assembler->SmiConstant(BinaryOperationFeedback::kNone))); - - Label if_valueisoddball(assembler), if_valuenotoddball(assembler); - Node* instance_type = assembler->LoadMapInstanceType(value_map); - Node* is_oddball = assembler->Word32Equal( - instance_type, assembler->Int32Constant(ODDBALL_TYPE)); - assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); - - assembler->Bind(&if_valueisoddball); - { - // Convert Oddball to Number and check again. - value_var.Bind( - assembler->LoadObjectField(value, Oddball::kToNumberOffset)); - var_type_feedback.Bind(assembler->SmiConstant( - BinaryOperationFeedback::kNumberOrOddball)); - assembler->Goto(&start); - } - - assembler->Bind(&if_valuenotoddball); - { - // Convert to a Number first and try again. - Callable callable = - CodeFactory::NonNumberToNumber(assembler->isolate()); - var_type_feedback.Bind( - assembler->SmiConstant(BinaryOperationFeedback::kAny)); - value_var.Bind(assembler->CallStub(callable, context, value)); - assembler->Goto(&start); - } - } - } - } - - assembler->Bind(&do_finc); - { - Node* finc_value = var_finc_value.value(); - Node* one = assembler->Float64Constant(1.0); - Node* finc_result = assembler->Float64Add(finc_value, one); - var_type_feedback.Bind(assembler->SmiOr( - var_type_feedback.value(), - assembler->SmiConstant(BinaryOperationFeedback::kNumber))); - result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result)); - assembler->Goto(&end); - } - - assembler->Bind(&end); - assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector, - slot_index); - - __ SetAccumulator(result_var.value()); - __ Dispatch(); -} - -// Dec -// -// Decrements value in the accumulator by one. -void Interpreter::DoDec(InterpreterAssembler* assembler) { - typedef CodeStubAssembler::Label Label; - typedef compiler::Node Node; - typedef CodeStubAssembler::Variable Variable; - - Node* value = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* slot_index = __ BytecodeOperandIdx(0); - Node* feedback_vector = __ LoadFeedbackVector(); - - // Shared entry for floating point decrement. - Label do_fdec(assembler), end(assembler); - Variable var_fdec_value(assembler, MachineRepresentation::kFloat64); - - // We might need to try again due to ToNumber conversion. - Variable value_var(assembler, MachineRepresentation::kTagged); - Variable result_var(assembler, MachineRepresentation::kTagged); - Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned); - Variable* loop_vars[] = {&value_var, &var_type_feedback}; - Label start(assembler, 2, loop_vars); - var_type_feedback.Bind( - assembler->SmiConstant(BinaryOperationFeedback::kNone)); - value_var.Bind(value); - assembler->Goto(&start); - assembler->Bind(&start); - { - value = value_var.value(); - - Label if_issmi(assembler), if_isnotsmi(assembler); - assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi); - - assembler->Bind(&if_issmi); - { - // Try fast Smi subtraction first. - Node* one = assembler->SmiConstant(Smi::FromInt(1)); - Node* pair = assembler->IntPtrSubWithOverflow( - assembler->BitcastTaggedToWord(value), - assembler->BitcastTaggedToWord(one)); - Node* overflow = assembler->Projection(1, pair); - - // Check if the Smi subtraction overflowed. - Label if_overflow(assembler), if_notoverflow(assembler); - assembler->Branch(overflow, &if_overflow, &if_notoverflow); - - assembler->Bind(&if_notoverflow); - var_type_feedback.Bind(assembler->SmiOr( - var_type_feedback.value(), - assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall))); - result_var.Bind( - assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair))); - assembler->Goto(&end); - - assembler->Bind(&if_overflow); - { - var_fdec_value.Bind(assembler->SmiToFloat64(value)); - assembler->Goto(&do_fdec); - } - } - - assembler->Bind(&if_isnotsmi); - { - // Check if the value is a HeapNumber. - Label if_valueisnumber(assembler), - if_valuenotnumber(assembler, Label::kDeferred); - Node* value_map = assembler->LoadMap(value); - assembler->Branch(assembler->IsHeapNumberMap(value_map), - &if_valueisnumber, &if_valuenotnumber); - - assembler->Bind(&if_valueisnumber); - { - // Load the HeapNumber value. - var_fdec_value.Bind(assembler->LoadHeapNumberValue(value)); - assembler->Goto(&do_fdec); - } - - assembler->Bind(&if_valuenotnumber); - { - // We do not require an Or with earlier feedback here because once we - // convert the value to a number, we cannot reach this path. We can - // only reach this path on the first pass when the feedback is kNone. - CSA_ASSERT(assembler, - assembler->SmiEqual( - var_type_feedback.value(), - assembler->SmiConstant(BinaryOperationFeedback::kNone))); - - Label if_valueisoddball(assembler), if_valuenotoddball(assembler); - Node* instance_type = assembler->LoadMapInstanceType(value_map); - Node* is_oddball = assembler->Word32Equal( - instance_type, assembler->Int32Constant(ODDBALL_TYPE)); - assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball); - - assembler->Bind(&if_valueisoddball); - { - // Convert Oddball to Number and check again. - value_var.Bind( - assembler->LoadObjectField(value, Oddball::kToNumberOffset)); - var_type_feedback.Bind(assembler->SmiConstant( - BinaryOperationFeedback::kNumberOrOddball)); - assembler->Goto(&start); - } - - assembler->Bind(&if_valuenotoddball); - { - // Convert to a Number first and try again. - Callable callable = - CodeFactory::NonNumberToNumber(assembler->isolate()); - var_type_feedback.Bind( - assembler->SmiConstant(BinaryOperationFeedback::kAny)); - value_var.Bind(assembler->CallStub(callable, context, value)); - assembler->Goto(&start); - } - } - } - } - - assembler->Bind(&do_fdec); - { - Node* fdec_value = var_fdec_value.value(); - Node* one = assembler->Float64Constant(1.0); - Node* fdec_result = assembler->Float64Sub(fdec_value, one); - var_type_feedback.Bind(assembler->SmiOr( - var_type_feedback.value(), - assembler->SmiConstant(BinaryOperationFeedback::kNumber))); - result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result)); - assembler->Goto(&end); - } - - assembler->Bind(&end); - assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector, - slot_index); - - __ SetAccumulator(result_var.value()); - __ Dispatch(); -} - -// LogicalNot -// -// Perform logical-not on the accumulator, first casting the -// accumulator to a boolean value if required. -// ToBooleanLogicalNot -void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Variable result(assembler, MachineRepresentation::kTagged); - Label if_true(assembler), if_false(assembler), end(assembler); - Node* true_value = __ BooleanConstant(true); - Node* false_value = __ BooleanConstant(false); - __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); - __ Bind(&if_true); - { - result.Bind(false_value); - __ Goto(&end); - } - __ Bind(&if_false); - { - result.Bind(true_value); - __ Goto(&end); - } - __ Bind(&end); - __ SetAccumulator(result.value()); - __ Dispatch(); -} - -// LogicalNot -// -// Perform logical-not on the accumulator, which must already be a boolean -// value. -void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Variable result(assembler, MachineRepresentation::kTagged); - Label if_true(assembler), if_false(assembler), end(assembler); - Node* true_value = __ BooleanConstant(true); - Node* false_value = __ BooleanConstant(false); - __ Branch(__ WordEqual(value, true_value), &if_true, &if_false); - __ Bind(&if_true); - { - result.Bind(false_value); - __ Goto(&end); - } - __ Bind(&if_false); - { - if (FLAG_debug_code) { - __ AbortIfWordNotEqual(value, false_value, - BailoutReason::kExpectedBooleanValue); - } - result.Bind(true_value); - __ Goto(&end); - } - __ Bind(&end); - __ SetAccumulator(result.value()); - __ Dispatch(); -} - -// TypeOf -// -// Load the accumulator with the string representating type of the -// object in the accumulator. -void Interpreter::DoTypeOf(InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* result = assembler->Typeof(value, context); - __ SetAccumulator(result); - __ Dispatch(); -} - -void Interpreter::DoDelete(Runtime::FunctionId function_id, - InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* object = __ LoadRegister(reg_index); - Node* key = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* result = __ CallRuntime(function_id, context, object, key); - __ SetAccumulator(result); - __ Dispatch(); -} - -// DeletePropertyStrict -// -// Delete the property specified in the accumulator from the object -// referenced by the register operand following strict mode semantics. -void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) { - DoDelete(Runtime::kDeleteProperty_Strict, assembler); -} - -// DeletePropertySloppy -// -// Delete the property specified in the accumulator from the object -// referenced by the register operand following sloppy mode semantics. -void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) { - DoDelete(Runtime::kDeleteProperty_Sloppy, assembler); -} - -// GetSuperConstructor -// -// Get the super constructor from the object referenced by the accumulator. -// The result is stored in register |reg|. -void Interpreter::DoGetSuperConstructor(InterpreterAssembler* assembler) { - Node* active_function = __ GetAccumulator(); - Node* context = __ GetContext(); - Node* result = __ GetSuperConstructor(active_function, context); - Node* reg = __ BytecodeOperandReg(0); - __ StoreRegister(result, reg); - __ Dispatch(); -} - -void Interpreter::DoJSCall(InterpreterAssembler* assembler, - TailCallMode tail_call_mode) { - Node* function_reg = __ BytecodeOperandReg(0); - Node* function = __ LoadRegister(function_reg); - Node* receiver_reg = __ BytecodeOperandReg(1); - Node* receiver_arg = __ RegisterLocation(receiver_reg); - Node* receiver_args_count = __ BytecodeOperandCount(2); - Node* receiver_count = __ Int32Constant(1); - Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); - Node* slot_id = __ BytecodeOperandIdx(3); - Node* feedback_vector = __ LoadFeedbackVector(); - Node* context = __ GetContext(); - Node* result = - __ CallJSWithFeedback(function, context, receiver_arg, args_count, - slot_id, feedback_vector, tail_call_mode); - __ SetAccumulator(result); - __ Dispatch(); -} - -// Call -// -// Call a JSfunction or Callable in |callable| with the |receiver| and -// |arg_count| arguments in subsequent registers. Collect type feedback -// into |feedback_slot_id| -void Interpreter::DoCall(InterpreterAssembler* assembler) { - DoJSCall(assembler, TailCallMode::kDisallow); -} - -// CallProperty -// -// Call a JSfunction or Callable in |callable| with the |receiver| and -// |arg_count| arguments in subsequent registers. Collect type feedback into -// |feedback_slot_id|. The callable is known to be a property of the receiver. -void Interpreter::DoCallProperty(InterpreterAssembler* assembler) { - // TODO(leszeks): Look into making the interpreter use the fact that the - // receiver is non-null. - DoJSCall(assembler, TailCallMode::kDisallow); -} - -// TailCall -// -// Tail call a JSfunction or Callable in |callable| with the |receiver| and -// |arg_count| arguments in subsequent registers. Collect type feedback -// into |feedback_slot_id| -void Interpreter::DoTailCall(InterpreterAssembler* assembler) { - DoJSCall(assembler, TailCallMode::kAllow); -} - -// CallRuntime -// -// Call the runtime function |function_id| with the first argument in -// register |first_arg| and |arg_count| arguments in subsequent -// registers. -void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) { - Node* function_id = __ BytecodeOperandRuntimeId(0); - Node* first_arg_reg = __ BytecodeOperandReg(1); - Node* first_arg = __ RegisterLocation(first_arg_reg); - Node* args_count = __ BytecodeOperandCount(2); - Node* context = __ GetContext(); - Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count); - __ SetAccumulator(result); - __ Dispatch(); -} - -// InvokeIntrinsic -// -// Implements the semantic equivalent of calling the runtime function -// |function_id| with the first argument in |first_arg| and |arg_count| -// arguments in subsequent registers. -void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) { - Node* function_id = __ BytecodeOperandIntrinsicId(0); - Node* first_arg_reg = __ BytecodeOperandReg(1); - Node* arg_count = __ BytecodeOperandCount(2); - Node* context = __ GetContext(); - IntrinsicsHelper helper(assembler); - Node* result = - helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count); - __ SetAccumulator(result); - __ Dispatch(); -} - -// CallRuntimeForPair -// -// Call the runtime function |function_id| which returns a pair, with the -// first argument in register |first_arg| and |arg_count| arguments in -// subsequent registers. Returns the result in and -// -void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) { - // Call the runtime function. - Node* function_id = __ BytecodeOperandRuntimeId(0); - Node* first_arg_reg = __ BytecodeOperandReg(1); - Node* first_arg = __ RegisterLocation(first_arg_reg); - Node* args_count = __ BytecodeOperandCount(2); - Node* context = __ GetContext(); - Node* result_pair = - __ CallRuntimeN(function_id, context, first_arg, args_count, 2); - - // Store the results in and - Node* first_return_reg = __ BytecodeOperandReg(3); - Node* second_return_reg = __ NextRegister(first_return_reg); - Node* result0 = __ Projection(0, result_pair); - Node* result1 = __ Projection(1, result_pair); - __ StoreRegister(result0, first_return_reg); - __ StoreRegister(result1, second_return_reg); - __ Dispatch(); -} - -// CallJSRuntime -// -// Call the JS runtime function that has the |context_index| with the receiver -// in register |receiver| and |arg_count| arguments in subsequent registers. -void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) { - Node* context_index = __ BytecodeOperandIdx(0); - Node* receiver_reg = __ BytecodeOperandReg(1); - Node* first_arg = __ RegisterLocation(receiver_reg); - Node* receiver_args_count = __ BytecodeOperandCount(2); - Node* receiver_count = __ Int32Constant(1); - Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); - - // Get the function to call from the native context. - Node* context = __ GetContext(); - Node* native_context = __ LoadNativeContext(context); - Node* function = __ LoadContextElement(native_context, context_index); - - // Call the function. - Node* result = __ CallJS(function, context, first_arg, args_count, - TailCallMode::kDisallow); - __ SetAccumulator(result); - __ Dispatch(); -} - -// CallWithSpread -// -// Call a JSfunction or Callable in |callable| with the receiver in -// |first_arg| and |arg_count - 1| arguments in subsequent registers. The -// final argument is always a spread. -// -void Interpreter::DoCallWithSpread(InterpreterAssembler* assembler) { - Node* callable_reg = __ BytecodeOperandReg(0); - Node* callable = __ LoadRegister(callable_reg); - Node* receiver_reg = __ BytecodeOperandReg(1); - Node* receiver_arg = __ RegisterLocation(receiver_reg); - Node* receiver_args_count = __ BytecodeOperandCount(2); - Node* receiver_count = __ Int32Constant(1); - Node* args_count = __ Int32Sub(receiver_args_count, receiver_count); - Node* context = __ GetContext(); - - // Call into Runtime function CallWithSpread which does everything. - Node* result = - __ CallJSWithSpread(callable, context, receiver_arg, args_count); - __ SetAccumulator(result); - __ Dispatch(); -} - -// ConstructWithSpread -// -// Call the constructor in |constructor| with the first argument in register -// |first_arg| and |arg_count| arguments in subsequent registers. The final -// argument is always a spread. The new.target is in the accumulator. -// -void Interpreter::DoConstructWithSpread(InterpreterAssembler* assembler) { - Node* new_target = __ GetAccumulator(); - Node* constructor_reg = __ BytecodeOperandReg(0); - Node* constructor = __ LoadRegister(constructor_reg); - Node* first_arg_reg = __ BytecodeOperandReg(1); - Node* first_arg = __ RegisterLocation(first_arg_reg); - Node* args_count = __ BytecodeOperandCount(2); - Node* context = __ GetContext(); - Node* result = __ ConstructWithSpread(constructor, context, new_target, - first_arg, args_count); - __ SetAccumulator(result); - __ Dispatch(); -} - -// Construct -// -// Call operator construct with |constructor| and the first argument in -// register |first_arg| and |arg_count| arguments in subsequent -// registers. The new.target is in the accumulator. -// -void Interpreter::DoConstruct(InterpreterAssembler* assembler) { - Node* new_target = __ GetAccumulator(); - Node* constructor_reg = __ BytecodeOperandReg(0); - Node* constructor = __ LoadRegister(constructor_reg); - Node* first_arg_reg = __ BytecodeOperandReg(1); - Node* first_arg = __ RegisterLocation(first_arg_reg); - Node* args_count = __ BytecodeOperandCount(2); - Node* slot_id = __ BytecodeOperandIdx(3); - Node* feedback_vector = __ LoadFeedbackVector(); - Node* context = __ GetContext(); - Node* result = __ Construct(constructor, context, new_target, first_arg, - args_count, slot_id, feedback_vector); - __ SetAccumulator(result); - __ Dispatch(); -} - -// TestEqual -// -// Test if the value in the register equals the accumulator. -void Interpreter::DoTestEqual(InterpreterAssembler* assembler) { - DoCompareOpWithFeedback(Token::Value::EQ, assembler); -} - -// TestNotEqual -// -// Test if the value in the register is not equal to the accumulator. -void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) { - DoCompareOpWithFeedback(Token::Value::NE, assembler); -} - -// TestEqualStrict -// -// Test if the value in the register is strictly equal to the accumulator. -void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) { - DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler); -} - -// TestLessThan -// -// Test if the value in the register is less than the accumulator. -void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) { - DoCompareOpWithFeedback(Token::Value::LT, assembler); -} - -// TestGreaterThan -// -// Test if the value in the register is greater than the accumulator. -void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) { - DoCompareOpWithFeedback(Token::Value::GT, assembler); -} - -// TestLessThanOrEqual -// -// Test if the value in the register is less than or equal to the -// accumulator. -void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) { - DoCompareOpWithFeedback(Token::Value::LTE, assembler); -} - -// TestGreaterThanOrEqual -// -// Test if the value in the register is greater than or equal to the -// accumulator. -void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) { - DoCompareOpWithFeedback(Token::Value::GTE, assembler); -} - -// TestIn -// -// Test if the object referenced by the register operand is a property of the -// object referenced by the accumulator. -void Interpreter::DoTestIn(InterpreterAssembler* assembler) { - DoCompareOp(Token::IN, assembler); -} - -// TestInstanceOf -// -// Test if the object referenced by the register is an an instance of type -// referenced by the accumulator. -void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) { - DoCompareOp(Token::INSTANCEOF, assembler); -} - -// TestUndetectable -// -// Test if the value in the register equals to null/undefined. This is -// done by checking undetectable bit on the map of the object. -void Interpreter::DoTestUndetectable(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* object = __ LoadRegister(reg_index); - - Label not_equal(assembler), end(assembler); - // If the object is an Smi then return false. - __ GotoIf(__ TaggedIsSmi(object), ¬_equal); - - // If it is a HeapObject, load the map and check for undetectable bit. - Node* map = __ LoadMap(object); - Node* map_bitfield = __ LoadMapBitField(map); - Node* map_undetectable = - __ Word32And(map_bitfield, __ Int32Constant(1 << Map::kIsUndetectable)); - __ GotoIf(__ Word32Equal(map_undetectable, __ Int32Constant(0)), ¬_equal); - - __ SetAccumulator(__ BooleanConstant(true)); - __ Goto(&end); - - __ Bind(¬_equal); - { - __ SetAccumulator(__ BooleanConstant(false)); - __ Goto(&end); - } - - __ Bind(&end); - __ Dispatch(); -} - -// TestNull -// -// Test if the value in the register is strictly equal to null. -void Interpreter::DoTestNull(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* object = __ LoadRegister(reg_index); - Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); - - Label equal(assembler), end(assembler); - __ GotoIf(__ WordEqual(object, null_value), &equal); - __ SetAccumulator(__ BooleanConstant(false)); - __ Goto(&end); - - __ Bind(&equal); - { - __ SetAccumulator(__ BooleanConstant(true)); - __ Goto(&end); - } - - __ Bind(&end); - __ Dispatch(); -} - -// TestUndefined -// -// Test if the value in the register is strictly equal to undefined. -void Interpreter::DoTestUndefined(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* object = __ LoadRegister(reg_index); - Node* undefined_value = - __ HeapConstant(isolate_->factory()->undefined_value()); - - Label equal(assembler), end(assembler); - __ GotoIf(__ WordEqual(object, undefined_value), &equal); - __ SetAccumulator(__ BooleanConstant(false)); - __ Goto(&end); - - __ Bind(&equal); - { - __ SetAccumulator(__ BooleanConstant(true)); - __ Goto(&end); - } - - __ Bind(&end); - __ Dispatch(); -} - -// Jump -// -// Jump by number of bytes represented by the immediate operand |imm|. -void Interpreter::DoJump(InterpreterAssembler* assembler) { - Node* relative_jump = __ BytecodeOperandUImmWord(0); - __ Jump(relative_jump); -} - -// JumpConstant -// -// Jump by number of bytes in the Smi in the |idx| entry in the constant pool. -void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) { - Node* index = __ BytecodeOperandIdx(0); - Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); - __ Jump(relative_jump); -} - -// JumpIfTrue -// -// Jump by number of bytes represented by an immediate operand if the -// accumulator contains true. This only works for boolean inputs, and -// will misbehave if passed arbitrary input values. -void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* relative_jump = __ BytecodeOperandUImmWord(0); - Node* true_value = __ BooleanConstant(true); - CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator)); - CSA_ASSERT(assembler, assembler->IsBoolean(accumulator)); - __ JumpIfWordEqual(accumulator, true_value, relative_jump); -} - -// JumpIfTrueConstant -// -// Jump by number of bytes in the Smi in the |idx| entry in the constant pool -// if the accumulator contains true. This only works for boolean inputs, and -// will misbehave if passed arbitrary input values. -void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* index = __ BytecodeOperandIdx(0); - Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); - Node* true_value = __ BooleanConstant(true); - CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator)); - CSA_ASSERT(assembler, assembler->IsBoolean(accumulator)); - __ JumpIfWordEqual(accumulator, true_value, relative_jump); -} - -// JumpIfFalse -// -// Jump by number of bytes represented by an immediate operand if the -// accumulator contains false. This only works for boolean inputs, and -// will misbehave if passed arbitrary input values. -void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* relative_jump = __ BytecodeOperandUImmWord(0); - Node* false_value = __ BooleanConstant(false); - CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator)); - CSA_ASSERT(assembler, assembler->IsBoolean(accumulator)); - __ JumpIfWordEqual(accumulator, false_value, relative_jump); -} - -// JumpIfFalseConstant -// -// Jump by number of bytes in the Smi in the |idx| entry in the constant pool -// if the accumulator contains false. This only works for boolean inputs, and -// will misbehave if passed arbitrary input values. -void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* index = __ BytecodeOperandIdx(0); - Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); - Node* false_value = __ BooleanConstant(false); - CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator)); - CSA_ASSERT(assembler, assembler->IsBoolean(accumulator)); - __ JumpIfWordEqual(accumulator, false_value, relative_jump); -} - -// JumpIfToBooleanTrue -// -// Jump by number of bytes represented by an immediate operand if the object -// referenced by the accumulator is true when the object is cast to boolean. -void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* relative_jump = __ BytecodeOperandUImmWord(0); - Label if_true(assembler), if_false(assembler); - __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); - __ Bind(&if_true); - __ Jump(relative_jump); - __ Bind(&if_false); - __ Dispatch(); -} - -// JumpIfToBooleanTrueConstant -// -// Jump by number of bytes in the Smi in the |idx| entry in the constant pool -// if the object referenced by the accumulator is true when the object is cast -// to boolean. -void Interpreter::DoJumpIfToBooleanTrueConstant( - InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* index = __ BytecodeOperandIdx(0); - Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); - Label if_true(assembler), if_false(assembler); - __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); - __ Bind(&if_true); - __ Jump(relative_jump); - __ Bind(&if_false); - __ Dispatch(); -} - -// JumpIfToBooleanFalse -// -// Jump by number of bytes represented by an immediate operand if the object -// referenced by the accumulator is false when the object is cast to boolean. -void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* relative_jump = __ BytecodeOperandUImmWord(0); - Label if_true(assembler), if_false(assembler); - __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); - __ Bind(&if_true); - __ Dispatch(); - __ Bind(&if_false); - __ Jump(relative_jump); -} - -// JumpIfToBooleanFalseConstant -// -// Jump by number of bytes in the Smi in the |idx| entry in the constant pool -// if the object referenced by the accumulator is false when the object is cast -// to boolean. -void Interpreter::DoJumpIfToBooleanFalseConstant( - InterpreterAssembler* assembler) { - Node* value = __ GetAccumulator(); - Node* index = __ BytecodeOperandIdx(0); - Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); - Label if_true(assembler), if_false(assembler); - __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); - __ Bind(&if_true); - __ Dispatch(); - __ Bind(&if_false); - __ Jump(relative_jump); -} - -// JumpIfNull -// -// Jump by number of bytes represented by an immediate operand if the object -// referenced by the accumulator is the null constant. -void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); - Node* relative_jump = __ BytecodeOperandUImmWord(0); - __ JumpIfWordEqual(accumulator, null_value, relative_jump); -} - -// JumpIfNullConstant -// -// Jump by number of bytes in the Smi in the |idx| entry in the constant pool -// if the object referenced by the accumulator is the null constant. -void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); - Node* index = __ BytecodeOperandIdx(0); - Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); - __ JumpIfWordEqual(accumulator, null_value, relative_jump); -} - -// JumpIfUndefined -// -// Jump by number of bytes represented by an immediate operand if the object -// referenced by the accumulator is the undefined constant. -void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* undefined_value = - __ HeapConstant(isolate_->factory()->undefined_value()); - Node* relative_jump = __ BytecodeOperandUImmWord(0); - __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); -} - -// JumpIfUndefinedConstant -// -// Jump by number of bytes in the Smi in the |idx| entry in the constant pool -// if the object referenced by the accumulator is the undefined constant. -void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* undefined_value = - __ HeapConstant(isolate_->factory()->undefined_value()); - Node* index = __ BytecodeOperandIdx(0); - Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); - __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); -} - -// JumpIfJSReceiver -// -// Jump by number of bytes represented by an immediate operand if the object -// referenced by the accumulator is a JSReceiver. -void Interpreter::DoJumpIfJSReceiver(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* relative_jump = __ BytecodeOperandUImmWord(0); - - Label if_object(assembler), if_notobject(assembler, Label::kDeferred), - if_notsmi(assembler); - __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi); - - __ Bind(&if_notsmi); - __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject); - __ Bind(&if_object); - __ Jump(relative_jump); - - __ Bind(&if_notobject); - __ Dispatch(); -} - -// JumpIfJSReceiverConstant -// -// Jump by number of bytes in the Smi in the |idx| entry in the constant pool if -// the object referenced by the accumulator is a JSReceiver. -void Interpreter::DoJumpIfJSReceiverConstant(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* index = __ BytecodeOperandIdx(0); - Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); - - Label if_object(assembler), if_notobject(assembler), if_notsmi(assembler); - __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi); - - __ Bind(&if_notsmi); - __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject); - - __ Bind(&if_object); - __ Jump(relative_jump); - - __ Bind(&if_notobject); - __ Dispatch(); -} - -// JumpIfNotHole -// -// Jump by number of bytes represented by an immediate operand if the object -// referenced by the accumulator is the hole. -void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); - Node* relative_jump = __ BytecodeOperandUImmWord(0); - __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); -} - -// JumpIfNotHoleConstant -// -// Jump by number of bytes in the Smi in the |idx| entry in the constant pool -// if the object referenced by the accumulator is the hole constant. -void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) { - Node* accumulator = __ GetAccumulator(); - Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); - Node* index = __ BytecodeOperandIdx(0); - Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); - __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); -} - -// JumpLoop -// -// Jump by number of bytes represented by the immediate operand |imm|. Also -// performs a loop nesting check and potentially triggers OSR in case the -// current OSR level matches (or exceeds) the specified |loop_depth|. -void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) { - Node* relative_jump = __ BytecodeOperandUImmWord(0); - Node* loop_depth = __ BytecodeOperandImm(1); - Node* osr_level = __ LoadOSRNestingLevel(); - - // Check if OSR points at the given {loop_depth} are armed by comparing it to - // the current {osr_level} loaded from the header of the BytecodeArray. - Label ok(assembler), osr_armed(assembler, Label::kDeferred); - Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level); - __ Branch(condition, &ok, &osr_armed); - - __ Bind(&ok); - __ JumpBackward(relative_jump); - - __ Bind(&osr_armed); - { - Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_); - Node* target = __ HeapConstant(callable.code()); - Node* context = __ GetContext(); - __ CallStub(callable.descriptor(), target, context); - __ JumpBackward(relative_jump); - } -} - -// CreateRegExpLiteral -// -// Creates a regular expression literal for literal index with -// and the pattern in . -void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) { - Node* index = __ BytecodeOperandIdx(0); - Node* pattern = __ LoadConstantPoolEntry(index); - Node* literal_index = __ BytecodeOperandIdxSmi(1); - Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2)); - Node* closure = __ LoadRegister(Register::function_closure()); - Node* context = __ GetContext(); - ConstructorBuiltinsAssembler constructor_assembler(assembler->state()); - Node* result = constructor_assembler.EmitFastCloneRegExp( - closure, literal_index, pattern, flags, context); - __ SetAccumulator(result); - __ Dispatch(); -} - -// CreateArrayLiteral -// -// Creates an array literal for literal index with -// CreateArrayLiteral flags and constant elements in . -void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) { - Node* literal_index = __ BytecodeOperandIdxSmi(1); - Node* closure = __ LoadRegister(Register::function_closure()); - Node* context = __ GetContext(); - Node* bytecode_flags = __ BytecodeOperandFlag(2); - - Label fast_shallow_clone(assembler), - call_runtime(assembler, Label::kDeferred); - __ Branch(__ IsSetWord32( - bytecode_flags), - &fast_shallow_clone, &call_runtime); - - __ Bind(&fast_shallow_clone); - { - ConstructorBuiltinsAssembler constructor_assembler(assembler->state()); - Node* result = constructor_assembler.EmitFastCloneShallowArray( - closure, literal_index, context, &call_runtime, TRACK_ALLOCATION_SITE); - __ SetAccumulator(result); - __ Dispatch(); - } - - __ Bind(&call_runtime); - { - Node* flags_raw = - __ DecodeWordFromWord32( - bytecode_flags); - Node* flags = __ SmiTag(flags_raw); - Node* index = __ BytecodeOperandIdx(0); - Node* constant_elements = __ LoadConstantPoolEntry(index); - Node* result = - __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure, - literal_index, constant_elements, flags); - __ SetAccumulator(result); - __ Dispatch(); - } -} - -// CreateObjectLiteral -// -// Creates an object literal for literal index with -// CreateObjectLiteralFlags and constant elements in . -void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { - Node* literal_index = __ BytecodeOperandIdxSmi(1); - Node* bytecode_flags = __ BytecodeOperandFlag(2); - Node* closure = __ LoadRegister(Register::function_closure()); - - // Check if we can do a fast clone or have to call the runtime. - Label if_fast_clone(assembler), - if_not_fast_clone(assembler, Label::kDeferred); - Node* fast_clone_properties_count = __ DecodeWordFromWord32< - CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags); - __ Branch(__ WordNotEqual(fast_clone_properties_count, __ IntPtrConstant(0)), - &if_fast_clone, &if_not_fast_clone); - - __ Bind(&if_fast_clone); - { - // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. - ConstructorBuiltinsAssembler constructor_assembler(assembler->state()); - Node* result = constructor_assembler.EmitFastCloneShallowObject( - &if_not_fast_clone, closure, literal_index, - fast_clone_properties_count); - __ StoreRegister(result, __ BytecodeOperandReg(3)); - __ Dispatch(); - } - - __ Bind(&if_not_fast_clone); - { - // If we can't do a fast clone, call into the runtime. - Node* index = __ BytecodeOperandIdx(0); - Node* constant_elements = __ LoadConstantPoolEntry(index); - Node* context = __ GetContext(); - - Node* flags_raw = - __ DecodeWordFromWord32( - bytecode_flags); - Node* flags = __ SmiTag(flags_raw); - - Node* result = - __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure, - literal_index, constant_elements, flags); - __ StoreRegister(result, __ BytecodeOperandReg(3)); - // TODO(klaasb) build a single dispatch once the call is inlined - __ Dispatch(); - } -} - -// CreateClosure -// -// Creates a new closure for SharedFunctionInfo at position |index| in the -// constant pool and with the PretenureFlag . -void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) { - Node* index = __ BytecodeOperandIdx(0); - Node* shared = __ LoadConstantPoolEntry(index); - Node* flags = __ BytecodeOperandFlag(2); - Node* context = __ GetContext(); - - Label call_runtime(assembler, Label::kDeferred); - __ GotoIfNot(__ IsSetWord32(flags), - &call_runtime); - ConstructorBuiltinsAssembler constructor_assembler(assembler->state()); - Node* vector_index = __ BytecodeOperandIdx(1); - vector_index = __ SmiTag(vector_index); - Node* feedback_vector = __ LoadFeedbackVector(); - __ SetAccumulator(constructor_assembler.EmitFastNewClosure( - shared, feedback_vector, vector_index, context)); - __ Dispatch(); - - __ Bind(&call_runtime); - { - Node* tenured_raw = - __ DecodeWordFromWord32(flags); - Node* tenured = __ SmiTag(tenured_raw); - feedback_vector = __ LoadFeedbackVector(); - vector_index = __ BytecodeOperandIdx(1); - vector_index = __ SmiTag(vector_index); - Node* result = - __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, - feedback_vector, vector_index, tenured); - __ SetAccumulator(result); - __ Dispatch(); - } -} - -// CreateBlockContext -// -// Creates a new block context with the scope info constant at |index| and the -// closure in the accumulator. -void Interpreter::DoCreateBlockContext(InterpreterAssembler* assembler) { - Node* index = __ BytecodeOperandIdx(0); - Node* scope_info = __ LoadConstantPoolEntry(index); - Node* closure = __ GetAccumulator(); - Node* context = __ GetContext(); - __ SetAccumulator( - __ CallRuntime(Runtime::kPushBlockContext, context, scope_info, closure)); - __ Dispatch(); -} - -// CreateCatchContext -// -// Creates a new context for a catch block with the |exception| in a register, -// the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the -// closure in the accumulator. -void Interpreter::DoCreateCatchContext(InterpreterAssembler* assembler) { - Node* exception_reg = __ BytecodeOperandReg(0); - Node* exception = __ LoadRegister(exception_reg); - Node* name_idx = __ BytecodeOperandIdx(1); - Node* name = __ LoadConstantPoolEntry(name_idx); - Node* scope_info_idx = __ BytecodeOperandIdx(2); - Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx); - Node* closure = __ GetAccumulator(); - Node* context = __ GetContext(); - __ SetAccumulator(__ CallRuntime(Runtime::kPushCatchContext, context, name, - exception, scope_info, closure)); - __ Dispatch(); -} - -// CreateFunctionContext -// -// Creates a new context with number of |slots| for the function closure. -void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) { - Node* closure = __ LoadRegister(Register::function_closure()); - Node* slots = __ BytecodeOperandUImm(0); - Node* context = __ GetContext(); - ConstructorBuiltinsAssembler constructor_assembler(assembler->state()); - __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext( - closure, slots, context, FUNCTION_SCOPE)); - __ Dispatch(); -} - -// CreateEvalContext -// -// Creates a new context with number of |slots| for an eval closure. -void Interpreter::DoCreateEvalContext(InterpreterAssembler* assembler) { - Node* closure = __ LoadRegister(Register::function_closure()); - Node* slots = __ BytecodeOperandUImm(0); - Node* context = __ GetContext(); - ConstructorBuiltinsAssembler constructor_assembler(assembler->state()); - __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext( - closure, slots, context, EVAL_SCOPE)); - __ Dispatch(); -} - -// CreateWithContext -// -// Creates a new context with the ScopeInfo at |scope_info_idx| for a -// with-statement with the object in |register| and the closure in the -// accumulator. -void Interpreter::DoCreateWithContext(InterpreterAssembler* assembler) { - Node* reg_index = __ BytecodeOperandReg(0); - Node* object = __ LoadRegister(reg_index); - Node* scope_info_idx = __ BytecodeOperandIdx(1); - Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx); - Node* closure = __ GetAccumulator(); - Node* context = __ GetContext(); - __ SetAccumulator(__ CallRuntime(Runtime::kPushWithContext, context, object, - scope_info, closure)); - __ Dispatch(); -} - -// CreateMappedArguments -// -// Creates a new mapped arguments object. -void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { - Node* closure = __ LoadRegister(Register::function_closure()); - Node* context = __ GetContext(); - - Label if_duplicate_parameters(assembler, Label::kDeferred); - Label if_not_duplicate_parameters(assembler); - - // Check if function has duplicate parameters. - // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports - // duplicate parameters. - Node* shared_info = - __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); - Node* compiler_hints = __ LoadObjectField( - shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset, - MachineType::Uint8()); - Node* duplicate_parameters_bit = __ Int32Constant( - 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); - Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); - __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); - - __ Bind(&if_not_duplicate_parameters); - { - ArgumentsBuiltinsAssembler constructor_assembler(assembler->state()); - Node* result = - constructor_assembler.EmitFastNewSloppyArguments(context, closure); - __ SetAccumulator(result); - __ Dispatch(); - } - - __ Bind(&if_duplicate_parameters); - { - Node* result = - __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure); - __ SetAccumulator(result); - __ Dispatch(); - } -} - -// CreateUnmappedArguments -// -// Creates a new unmapped arguments object. -void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) { - Node* context = __ GetContext(); - Node* closure = __ LoadRegister(Register::function_closure()); - ArgumentsBuiltinsAssembler builtins_assembler(assembler->state()); - Node* result = - builtins_assembler.EmitFastNewStrictArguments(context, closure); - __ SetAccumulator(result); - __ Dispatch(); -} - -// CreateRestParameter -// -// Creates a new rest parameter array. -void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) { - Node* closure = __ LoadRegister(Register::function_closure()); - Node* context = __ GetContext(); - ArgumentsBuiltinsAssembler builtins_assembler(assembler->state()); - Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure); - __ SetAccumulator(result); - __ Dispatch(); -} - -// StackCheck -// -// Performs a stack guard check. -void Interpreter::DoStackCheck(InterpreterAssembler* assembler) { - Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred); - - Node* interrupt = __ StackCheckTriggeredInterrupt(); - __ Branch(interrupt, &stack_check_interrupt, &ok); - - __ Bind(&ok); - __ Dispatch(); - - __ Bind(&stack_check_interrupt); - { - Node* context = __ GetContext(); - __ CallRuntime(Runtime::kStackGuard, context); - __ Dispatch(); - } -} - -// SetPendingMessage -// -// Sets the pending message to the value in the accumulator, and returns the -// previous pending message in the accumulator. -void Interpreter::DoSetPendingMessage(InterpreterAssembler* assembler) { - Node* pending_message = __ ExternalConstant( - ExternalReference::address_of_pending_message_obj(isolate_)); - Node* previous_message = - __ Load(MachineType::TaggedPointer(), pending_message); - Node* new_message = __ GetAccumulator(); - __ StoreNoWriteBarrier(MachineRepresentation::kTaggedPointer, pending_message, - new_message); - __ SetAccumulator(previous_message); - __ Dispatch(); -} - -// Throw -// -// Throws the exception in the accumulator. -void Interpreter::DoThrow(InterpreterAssembler* assembler) { - Node* exception = __ GetAccumulator(); - Node* context = __ GetContext(); - __ CallRuntime(Runtime::kThrow, context, exception); - // We shouldn't ever return from a throw. - __ Abort(kUnexpectedReturnFromThrow); -} - -// ReThrow -// -// Re-throws the exception in the accumulator. -void Interpreter::DoReThrow(InterpreterAssembler* assembler) { - Node* exception = __ GetAccumulator(); - Node* context = __ GetContext(); - __ CallRuntime(Runtime::kReThrow, context, exception); - // We shouldn't ever return from a throw. - __ Abort(kUnexpectedReturnFromThrow); -} - -// Return -// -// Return the value in the accumulator. -void Interpreter::DoReturn(InterpreterAssembler* assembler) { - __ UpdateInterruptBudgetOnReturn(); - Node* accumulator = __ GetAccumulator(); - __ Return(accumulator); -} - -// Debugger -// -// Call runtime to handle debugger statement. -void Interpreter::DoDebugger(InterpreterAssembler* assembler) { - Node* context = __ GetContext(); - __ CallStub(CodeFactory::HandleDebuggerStatement(isolate_), context); - __ Dispatch(); -} - -// DebugBreak -// -// Call runtime to handle a debug break. -#define DEBUG_BREAK(Name, ...) \ - void Interpreter::Do##Name(InterpreterAssembler* assembler) { \ - Node* context = __ GetContext(); \ - Node* accumulator = __ GetAccumulator(); \ - Node* original_handler = \ - __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \ - __ MaybeDropFrames(context); \ - __ DispatchToBytecodeHandler(original_handler); \ - } -DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); -#undef DEBUG_BREAK - -void Interpreter::BuildForInPrepareResult(Node* output_register, - Node* cache_type, Node* cache_array, - Node* cache_length, - InterpreterAssembler* assembler) { - __ StoreRegister(cache_type, output_register); - output_register = __ NextRegister(output_register); - __ StoreRegister(cache_array, output_register); - output_register = __ NextRegister(output_register); - __ StoreRegister(cache_length, output_register); -} - -// ForInPrepare -// -// Returns state for for..in loop execution based on the object in the register -// |receiver|. The object must not be null or undefined and must have been -// converted to a receiver already. -// The result is output in registers |cache_info_triple| to -// |cache_info_triple + 2|, with the registers holding cache_type, cache_array, -// and cache_length respectively. -void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) { - Node* object_register = __ BytecodeOperandReg(0); - Node* output_register = __ BytecodeOperandReg(1); - Node* receiver = __ LoadRegister(object_register); - Node* context = __ GetContext(); - - Node* cache_type; - Node* cache_array; - Node* cache_length; - Label call_runtime(assembler, Label::kDeferred), - nothing_to_iterate(assembler, Label::kDeferred); - - ObjectBuiltinsAssembler object_assembler(assembler->state()); - std::tie(cache_type, cache_array, cache_length) = - object_assembler.EmitForInPrepare(receiver, context, &call_runtime, - ¬hing_to_iterate); - - BuildForInPrepareResult(output_register, cache_type, cache_array, - cache_length, assembler); - __ Dispatch(); - - __ Bind(&call_runtime); - { - Node* result_triple = - __ CallRuntime(Runtime::kForInPrepare, context, receiver); - Node* cache_type = __ Projection(0, result_triple); - Node* cache_array = __ Projection(1, result_triple); - Node* cache_length = __ Projection(2, result_triple); - BuildForInPrepareResult(output_register, cache_type, cache_array, - cache_length, assembler); - __ Dispatch(); - } - __ Bind(¬hing_to_iterate); - { - // Receiver is null or undefined or descriptors are zero length. - Node* zero = __ SmiConstant(0); - BuildForInPrepareResult(output_register, zero, zero, zero, assembler); - __ Dispatch(); - } -} - -// ForInNext -// -// Returns the next enumerable property in the the accumulator. -void Interpreter::DoForInNext(InterpreterAssembler* assembler) { - Node* receiver_reg = __ BytecodeOperandReg(0); - Node* receiver = __ LoadRegister(receiver_reg); - Node* index_reg = __ BytecodeOperandReg(1); - Node* index = __ LoadRegister(index_reg); - Node* cache_type_reg = __ BytecodeOperandReg(2); - Node* cache_type = __ LoadRegister(cache_type_reg); - Node* cache_array_reg = __ NextRegister(cache_type_reg); - Node* cache_array = __ LoadRegister(cache_array_reg); - - // Load the next key from the enumeration array. - Node* key = __ LoadFixedArrayElement(cache_array, index, 0, - CodeStubAssembler::SMI_PARAMETERS); - - // Check if we can use the for-in fast path potentially using the enum cache. - Label if_fast(assembler), if_slow(assembler, Label::kDeferred); - Node* receiver_map = __ LoadMap(receiver); - __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow); - __ Bind(&if_fast); - { - // Enum cache in use for {receiver}, the {key} is definitely valid. - __ SetAccumulator(key); - __ Dispatch(); - } - __ Bind(&if_slow); - { - // Record the fact that we hit the for-in slow path. - Node* vector_index = __ BytecodeOperandIdx(3); - Node* feedback_vector = __ LoadFeedbackVector(); - Node* megamorphic_sentinel = - __ HeapConstant(FeedbackVector::MegamorphicSentinel(isolate_)); - __ StoreFixedArrayElement(feedback_vector, vector_index, - megamorphic_sentinel, SKIP_WRITE_BARRIER); - - // Need to filter the {key} for the {receiver}. - Node* context = __ GetContext(); - Callable callable = CodeFactory::ForInFilter(assembler->isolate()); - Node* result = __ CallStub(callable, context, key, receiver); - __ SetAccumulator(result); - __ Dispatch(); - } -} - -// ForInContinue -// -// Returns false if the end of the enumerable properties has been reached. -void Interpreter::DoForInContinue(InterpreterAssembler* assembler) { - Node* index_reg = __ BytecodeOperandReg(0); - Node* index = __ LoadRegister(index_reg); - Node* cache_length_reg = __ BytecodeOperandReg(1); - Node* cache_length = __ LoadRegister(cache_length_reg); - - // Check if {index} is at {cache_length} already. - Label if_true(assembler), if_false(assembler), end(assembler); - __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false); - __ Bind(&if_true); - { - __ SetAccumulator(__ BooleanConstant(false)); - __ Goto(&end); - } - __ Bind(&if_false); - { - __ SetAccumulator(__ BooleanConstant(true)); - __ Goto(&end); - } - __ Bind(&end); - __ Dispatch(); -} - -// ForInStep -// -// Increments the loop counter in register |index| and stores the result -// in the accumulator. -void Interpreter::DoForInStep(InterpreterAssembler* assembler) { - Node* index_reg = __ BytecodeOperandReg(0); - Node* index = __ LoadRegister(index_reg); - Node* one = __ SmiConstant(Smi::FromInt(1)); - Node* result = __ SmiAdd(index, one); - __ SetAccumulator(result); - __ Dispatch(); -} - -// Wide -// -// Prefix bytecode indicating next bytecode has wide (16-bit) operands. -void Interpreter::DoWide(InterpreterAssembler* assembler) { - __ DispatchWide(OperandScale::kDouble); -} - -// ExtraWide -// -// Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands. -void Interpreter::DoExtraWide(InterpreterAssembler* assembler) { - __ DispatchWide(OperandScale::kQuadruple); -} - -// Illegal -// -// An invalid bytecode aborting execution if dispatched. -void Interpreter::DoIllegal(InterpreterAssembler* assembler) { - __ Abort(kInvalidBytecode); -} - -// Nop -// -// No operation. -void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); } - -// SuspendGenerator -// -// Exports the register file and stores it into the generator. Also stores the -// current context, the state given in the accumulator, and the current bytecode -// offset (for debugging purposes) into the generator. -void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) { - Node* generator_reg = __ BytecodeOperandReg(0); - Node* generator = __ LoadRegister(generator_reg); - - Label if_stepping(assembler, Label::kDeferred), ok(assembler); - Node* step_action_address = __ ExternalConstant( - ExternalReference::debug_last_step_action_address(isolate_)); - Node* step_action = __ Load(MachineType::Int8(), step_action_address); - STATIC_ASSERT(StepIn > StepNext); - STATIC_ASSERT(LastStepAction == StepIn); - Node* step_next = __ Int32Constant(StepNext); - __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok); - __ Bind(&ok); - - Node* array = - __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset); - Node* context = __ GetContext(); - Node* state = __ GetAccumulator(); - - __ ExportRegisterFile(array); - __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context); - __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state); - - Node* offset = __ SmiTag(__ BytecodeOffset()); - __ StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset, - offset); - - __ Dispatch(); - - __ Bind(&if_stepping); - { - Node* context = __ GetContext(); - __ CallRuntime(Runtime::kDebugRecordGenerator, context, generator); - __ Goto(&ok); - } -} - -// ResumeGenerator -// -// Imports the register file stored in the generator. Also loads the -// generator's state and stores it in the accumulator, before overwriting it -// with kGeneratorExecuting. -void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) { - Node* generator_reg = __ BytecodeOperandReg(0); - Node* generator = __ LoadRegister(generator_reg); - - __ ImportRegisterFile( - __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset)); - - Node* old_state = - __ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset); - Node* new_state = __ Int32Constant(JSGeneratorObject::kGeneratorExecuting); - __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, - __ SmiTag(new_state)); - __ SetAccumulator(old_state); - - __ Dispatch(); -} - } // namespace interpreter } // namespace internal } // namespace v8 diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h index ac36815f14f5e0..4dc6241c245f3e 100644 --- a/deps/v8/src/interpreter/interpreter.h +++ b/deps/v8/src/interpreter/interpreter.h @@ -23,10 +23,7 @@ class Isolate; class Callable; class CompilationInfo; class CompilationJob; - -namespace compiler { -class Node; -} // namespace compiler +class SetupIsolateDelegate; namespace interpreter { @@ -37,9 +34,6 @@ class Interpreter { explicit Interpreter(Isolate* isolate); virtual ~Interpreter() {} - // Initializes the interpreter dispatch table. - void Initialize(); - // Returns the interrupt budget which should be used for the profiler counter. static int InterruptBudget(); @@ -53,7 +47,6 @@ class Interpreter { void IterateDispatchTable(ObjectVisitor* v); // Disassembler support (only useful with ENABLE_DISASSEMBLER defined). - void TraceCodegen(Handle code); const char* LookupNameOfBytecodeHandler(Code* code); V8_EXPORT_PRIVATE Local GetDispatchCountersObject(); @@ -67,102 +60,11 @@ class Interpreter { } // TODO(ignition): Tune code size multiplier. - static const int kCodeSizeMultiplier = 32; + static const int kCodeSizeMultiplier = 24; private: -// Bytecode handler generator functions. -#define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \ - void Do##Name(InterpreterAssembler* assembler); - BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR) -#undef DECLARE_BYTECODE_HANDLER_GENERATOR - - typedef void (Interpreter::*BytecodeGeneratorFunc)(InterpreterAssembler*); - - // Generates handler for given |bytecode| and |operand_scale| using - // |generator| and installs it into the dispatch table. - void InstallBytecodeHandler(Zone* zone, Bytecode bytecode, - OperandScale operand_scale, - BytecodeGeneratorFunc generator); - - // Generates code to perform the binary operation via |Generator|. - template - void DoBinaryOpWithFeedback(InterpreterAssembler* assembler); - - // Generates code to perform the comparison via |Generator| while gathering - // type feedback. - void DoCompareOpWithFeedback(Token::Value compare_op, - InterpreterAssembler* assembler); - - // Generates code to perform the bitwise binary operation corresponding to - // |bitwise_op| while gathering type feedback. - void DoBitwiseBinaryOp(Token::Value bitwise_op, - InterpreterAssembler* assembler); - - // Generates code to perform the binary operation via |Generator| using - // an immediate value rather the accumulator as the rhs operand. - template - void DoBinaryOpWithImmediate(InterpreterAssembler* assembler); - - // Generates code to perform the unary operation via |Generator| while - // gatering type feedback. - template - void DoUnaryOpWithFeedback(InterpreterAssembler* assembler); - - // Generates code to perform the comparison operation associated with - // |compare_op|. - void DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler); - - // Generates code to perform a global store via |ic|. - void DoStaGlobal(Callable ic, InterpreterAssembler* assembler); - - // Generates code to perform a named property store via |ic|. - void DoStoreIC(Callable ic, InterpreterAssembler* assembler); - - // Generates code to perform a keyed property store via |ic|. - void DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler); - - // Generates code to perform a JS call that collects type feedback. - void DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode); - - // Generates code to perform delete via function_id. - void DoDelete(Runtime::FunctionId function_id, - InterpreterAssembler* assembler); - - // Generates code to perform a lookup slot load via |function_id|. - void DoLdaLookupSlot(Runtime::FunctionId function_id, - InterpreterAssembler* assembler); - - // Generates code to perform a lookup slot load via |function_id| that can - // fast path to a context slot load. - void DoLdaLookupContextSlot(Runtime::FunctionId function_id, - InterpreterAssembler* assembler); - - // Generates code to perform a lookup slot load via |function_id| that can - // fast path to a global load. - void DoLdaLookupGlobalSlot(Runtime::FunctionId function_id, - InterpreterAssembler* assembler); - - // Generates code to perform a lookup slot store depending on - // |language_mode|. - void DoStaLookupSlot(LanguageMode language_mode, - InterpreterAssembler* assembler); - - // Generates code to load a global. - void BuildLoadGlobal(int slot_operand_index, int name_operand_index, - TypeofMode typeof_mode, InterpreterAssembler* assembler); - - // Generates code to prepare the result for ForInPrepare. Cache data - // are placed into the consecutive series of registers starting at - // |output_register|. - void BuildForInPrepareResult(compiler::Node* output_register, - compiler::Node* cache_type, - compiler::Node* cache_array, - compiler::Node* cache_length, - InterpreterAssembler* assembler); - - // Generates code to perform the unary operation via |callable|. - compiler::Node* BuildUnaryOp(Callable callable, - InterpreterAssembler* assembler); + friend class SetupInterpreter; + friend class v8::internal::SetupIsolateDelegate; uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const; @@ -171,7 +73,6 @@ class Interpreter { OperandScale operand_scale); bool IsDispatchTableInitialized(); - bool ShouldInitializeDispatchTable(); static const int kNumberOfWideVariants = 3; static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1); diff --git a/deps/v8/src/interpreter/mkpeephole.cc b/deps/v8/src/interpreter/mkpeephole.cc deleted file mode 100644 index e6c3b76f2808b1..00000000000000 --- a/deps/v8/src/interpreter/mkpeephole.cc +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2016 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include -#include -#include -#include -#include - -#include "src/globals.h" -#include "src/interpreter/bytecode-peephole-table.h" -#include "src/interpreter/bytecodes.h" - -namespace v8 { -namespace internal { - -namespace interpreter { - -const char* ActionName(PeepholeAction action) { - switch (action) { -#define CASE(Name) \ - case PeepholeAction::k##Name: \ - return "PeepholeAction::k" #Name; - PEEPHOLE_ACTION_LIST(CASE) -#undef CASE - default: - UNREACHABLE(); - return ""; - } -} - -std::string BytecodeName(Bytecode bytecode) { - return "Bytecode::k" + std::string(Bytecodes::ToString(bytecode)); -} - -class PeepholeActionTableWriter final { - public: - static const size_t kNumberOfBytecodes = - static_cast(Bytecode::kLast) + 1; - typedef std::array Row; - - void BuildTable(); - void Write(std::ostream& os); - - private: - static const char* kIndent; - static const char* kNamespaceElements[]; - - void WriteHeader(std::ostream& os); - void WriteIncludeFiles(std::ostream& os); - void WriteClassMethods(std::ostream& os); - void WriteUniqueRows(std::ostream& os); - void WriteRowMap(std::ostream& os); - void WriteRow(std::ostream& os, size_t row_index); - void WriteOpenNamespace(std::ostream& os); - void WriteCloseNamespace(std::ostream& os); - - PeepholeActionAndData LookupActionAndData(Bytecode last, Bytecode current); - void BuildRow(Bytecode last, Row* row); - size_t HashRow(const Row* row); - void InsertRow(size_t row_index, const Row* const row, size_t row_hash, - std::map* hash_to_row_map); - bool RowsEqual(const Row* const first, const Row* const second); - - std::vector* table() { return &table_; } - - // Table of unique rows. - std::vector table_; - - // Mapping of row index to unique row index. - std::array row_map_; -}; - -const char* PeepholeActionTableWriter::kIndent = " "; -const char* PeepholeActionTableWriter::kNamespaceElements[] = {"v8", "internal", - "interpreter"}; - -// static -PeepholeActionAndData PeepholeActionTableWriter::LookupActionAndData( - Bytecode last, Bytecode current) { - // ToName bytecodes can be replaced by Star with the same output register if - // the value in the accumulator is already a name. - if (current == Bytecode::kToName && Bytecodes::PutsNameInAccumulator(last)) { - return {PeepholeAction::kChangeBytecodeAction, Bytecode::kStar}; - } - - // Nop are placeholders for holding source position information and can be - // elided if there is no source information. - if (last == Bytecode::kNop) { - if (Bytecodes::IsJump(current)) { - return {PeepholeAction::kElideLastBeforeJumpAction, Bytecode::kIllegal}; - } else { - return {PeepholeAction::kElideLastAction, Bytecode::kIllegal}; - } - } - - // The accumulator is invisible to the debugger. If there is a sequence - // of consecutive accumulator loads (that don't have side effects) then - // only the final load is potentially visible. - if (Bytecodes::IsAccumulatorLoadWithoutEffects(last) && - Bytecodes::IsAccumulatorLoadWithoutEffects(current)) { - return {PeepholeAction::kElideLastAction, Bytecode::kIllegal}; - } - - // The current instruction clobbers the accumulator without reading - // it. The load in the last instruction can be elided as it has no - // effect. - if (Bytecodes::IsAccumulatorLoadWithoutEffects(last) && - Bytecodes::GetAccumulatorUse(current) == AccumulatorUse::kWrite) { - return {PeepholeAction::kElideLastAction, Bytecode::kIllegal}; - } - - // Ldar and Star make the accumulator and register hold equivalent - // values. Only the first bytecode is needed if there's a sequence - // of back-to-back Ldar and Star bytecodes with the same operand. - if (Bytecodes::IsLdarOrStar(last) && Bytecodes::IsLdarOrStar(current)) { - return {PeepholeAction::kElideCurrentIfOperand0MatchesAction, - Bytecode::kIllegal}; - } - - // TODO(rmcilroy): Add elide for consecutive mov to and from the same - // register. - - // Remove ToBoolean coercion from conditional jumps where possible. - if (Bytecodes::WritesBooleanToAccumulator(last)) { - if (Bytecodes::IsJumpIfToBoolean(current)) { - return {PeepholeAction::kChangeJumpBytecodeAction, - Bytecodes::GetJumpWithoutToBoolean(current)}; - } else if (current == Bytecode::kToBooleanLogicalNot) { - return {PeepholeAction::kChangeBytecodeAction, Bytecode::kLogicalNot}; - } - } - - // Fuse LdaSmi followed by binary op to produce binary op with a - // immediate integer argument. This savaes on dispatches and size. - if (last == Bytecode::kLdaSmi) { - switch (current) { - case Bytecode::kAdd: - return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction, - Bytecode::kAddSmi}; - case Bytecode::kSub: - return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction, - Bytecode::kSubSmi}; - case Bytecode::kBitwiseAnd: - return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction, - Bytecode::kBitwiseAndSmi}; - case Bytecode::kBitwiseOr: - return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction, - Bytecode::kBitwiseOrSmi}; - case Bytecode::kShiftLeft: - return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction, - Bytecode::kShiftLeftSmi}; - case Bytecode::kShiftRight: - return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction, - Bytecode::kShiftRightSmi}; - default: - break; - } - } - - // Fuse LdaZero followed by binary op to produce binary op with a - // zero immediate argument. This saves dispatches, but not size. - if (last == Bytecode::kLdaZero) { - switch (current) { - case Bytecode::kAdd: - return { - PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction, - Bytecode::kAddSmi}; - case Bytecode::kSub: - return { - PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction, - Bytecode::kSubSmi}; - case Bytecode::kBitwiseAnd: - return { - PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction, - Bytecode::kBitwiseAndSmi}; - case Bytecode::kBitwiseOr: - return { - PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction, - Bytecode::kBitwiseOrSmi}; - case Bytecode::kShiftLeft: - return { - PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction, - Bytecode::kShiftLeftSmi}; - case Bytecode::kShiftRight: - return { - PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction, - Bytecode::kShiftRightSmi}; - default: - break; - } - } - - // Fuse LdaNull/LdaUndefined followed by a equality comparison with test - // undetectable. Testing undetectable is a simple check on the map which is - // more efficient than the full comparison operation. - if (last == Bytecode::kLdaNull || last == Bytecode::kLdaUndefined) { - if (current == Bytecode::kTestEqual) { - return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction, - Bytecode::kTestUndetectable}; - } - } - - // Fuse LdaNull/LdaUndefined followed by a strict equals with - // TestNull/TestUndefined. - if (current == Bytecode::kTestEqualStrict) { - if (last == Bytecode::kLdaNull) { - return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction, - Bytecode::kTestNull}; - } else if (last == Bytecode::kLdaUndefined) { - return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction, - Bytecode::kTestUndefined}; - } - } - - // If there is no last bytecode to optimize against, store the incoming - // bytecode or for jumps emit incoming bytecode immediately. - if (last == Bytecode::kIllegal) { - if (Bytecodes::IsJump(current)) { - return {PeepholeAction::kUpdateLastJumpAction, Bytecode::kIllegal}; - } else if (current == Bytecode::kNop) { - return {PeepholeAction::kUpdateLastIfSourceInfoPresentAction, - Bytecode::kIllegal}; - } else { - return {PeepholeAction::kUpdateLastAction, Bytecode::kIllegal}; - } - } - - // No matches, take the default action. - if (Bytecodes::IsJump(current)) { - return {PeepholeAction::kDefaultJumpAction, Bytecode::kIllegal}; - } else { - return {PeepholeAction::kDefaultAction, Bytecode::kIllegal}; - } -} - -void PeepholeActionTableWriter::Write(std::ostream& os) { - WriteHeader(os); - WriteIncludeFiles(os); - WriteOpenNamespace(os); - WriteUniqueRows(os); - WriteRowMap(os); - WriteClassMethods(os); - WriteCloseNamespace(os); -} - -void PeepholeActionTableWriter::WriteHeader(std::ostream& os) { - os << "// Copyright 2016 the V8 project authors. All rights reserved.\n" - << "// Use of this source code is governed by a BSD-style license that\n" - << "// can be found in the LICENSE file.\n\n" - << "// Autogenerated by " __FILE__ ". Do not edit.\n\n"; -} - -void PeepholeActionTableWriter::WriteIncludeFiles(std::ostream& os) { - os << "#include \"src/interpreter/bytecode-peephole-table.h\"\n\n"; -} - -void PeepholeActionTableWriter::WriteUniqueRows(std::ostream& os) { - os << "const PeepholeActionAndData PeepholeActionTable::row_data_[" - << table_.size() << "][" << kNumberOfBytecodes << "] = {\n"; - for (size_t i = 0; i < table_.size(); ++i) { - os << "{\n"; - WriteRow(os, i); - os << "},\n"; - } - os << "};\n\n"; -} - -void PeepholeActionTableWriter::WriteRowMap(std::ostream& os) { - os << "const PeepholeActionAndData* const PeepholeActionTable::row_[" - << kNumberOfBytecodes << "] = {\n"; - for (size_t i = 0; i < kNumberOfBytecodes; ++i) { - os << kIndent << " PeepholeActionTable::row_data_[" << row_map_[i] - << "], \n"; - } - os << "};\n\n"; -} - -void PeepholeActionTableWriter::WriteRow(std::ostream& os, size_t row_index) { - const Row row = table_.at(row_index); - for (PeepholeActionAndData action_data : row) { - os << kIndent << "{" << ActionName(action_data.action) << "," - << BytecodeName(action_data.bytecode) << "},\n"; - } -} - -void PeepholeActionTableWriter::WriteOpenNamespace(std::ostream& os) { - for (auto element : kNamespaceElements) { - os << "namespace " << element << " {\n"; - } - os << "\n"; -} - -void PeepholeActionTableWriter::WriteCloseNamespace(std::ostream& os) { - for (auto element : kNamespaceElements) { - os << "} // namespace " << element << "\n"; - } -} - -void PeepholeActionTableWriter::WriteClassMethods(std::ostream& os) { - os << "// static\n" - << "const PeepholeActionAndData*\n" - << "PeepholeActionTable::Lookup(Bytecode last, Bytecode current) {\n" - << kIndent - << "return &row_[Bytecodes::ToByte(last)][Bytecodes::ToByte(current)];\n" - << "}\n\n"; -} - -void PeepholeActionTableWriter::BuildTable() { - std::map hash_to_row_map; - Row row; - for (size_t i = 0; i < kNumberOfBytecodes; ++i) { - uint8_t byte_value = static_cast(i); - Bytecode last = Bytecodes::FromByte(byte_value); - BuildRow(last, &row); - size_t row_hash = HashRow(&row); - InsertRow(i, &row, row_hash, &hash_to_row_map); - } -} - -void PeepholeActionTableWriter::BuildRow(Bytecode last, Row* row) { - for (size_t i = 0; i < kNumberOfBytecodes; ++i) { - uint8_t byte_value = static_cast(i); - Bytecode current = Bytecodes::FromByte(byte_value); - PeepholeActionAndData action_data = LookupActionAndData(last, current); - row->at(i) = action_data; - } -} - -// static -bool PeepholeActionTableWriter::RowsEqual(const Row* const first, - const Row* const second) { - return memcmp(first, second, sizeof(*first)) == 0; -} - -// static -void PeepholeActionTableWriter::InsertRow( - size_t row_index, const Row* const row, size_t row_hash, - std::map* hash_to_row_map) { - // Insert row if no existing row matches, otherwise use existing row. - auto iter = hash_to_row_map->find(row_hash); - if (iter == hash_to_row_map->end()) { - row_map_[row_index] = table()->size(); - table()->push_back(*row); - } else { - row_map_[row_index] = iter->second; - - // If the following DCHECK fails, the HashRow() is not adequate. - DCHECK(RowsEqual(&table()->at(iter->second), row)); - } -} - -// static -size_t PeepholeActionTableWriter::HashRow(const Row* row) { - static const size_t kHashShift = 3; - std::size_t result = (1u << 31) - 1u; - const uint8_t* raw_data = reinterpret_cast(row); - for (size_t i = 0; i < sizeof(*row); ++i) { - size_t top_bits = result >> (kBitsPerByte * sizeof(size_t) - kHashShift); - result = (result << kHashShift) ^ top_bits ^ raw_data[i]; - } - return result; -} - -} // namespace interpreter -} // namespace internal -} // namespace v8 - -int main(int argc, const char* argv[]) { - CHECK_EQ(argc, 2); - - std::ofstream ofs(argv[1], std::ofstream::trunc); - v8::internal::interpreter::PeepholeActionTableWriter writer; - writer.BuildTable(); - writer.Write(ofs); - ofs.flush(); - ofs.close(); - - return 0; -} diff --git a/deps/v8/src/interpreter/setup-interpreter-internal.cc b/deps/v8/src/interpreter/setup-interpreter-internal.cc new file mode 100644 index 00000000000000..9adf70dffa8ad9 --- /dev/null +++ b/deps/v8/src/interpreter/setup-interpreter-internal.cc @@ -0,0 +1,91 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/interpreter/setup-interpreter.h" + +#include "src/handles-inl.h" +#include "src/interpreter/bytecodes.h" +#include "src/interpreter/interpreter-generator.h" +#include "src/interpreter/interpreter.h" +#include "src/objects-inl.h" + +namespace v8 { +namespace internal { +namespace interpreter { + +// static +void SetupInterpreter::InstallBytecodeHandlers(Interpreter* interpreter) { + DCHECK(!interpreter->IsDispatchTableInitialized()); + HandleScope scope(interpreter->isolate_); + Address* dispatch_table = interpreter->dispatch_table_; + + // Generate bytecode handlers for all bytecodes and scales. + const OperandScale kOperandScales[] = { +#define VALUE(Name, _) OperandScale::k##Name, + OPERAND_SCALE_LIST(VALUE) +#undef VALUE + }; + + for (OperandScale operand_scale : kOperandScales) { +#define GENERATE_CODE(Name, ...) \ + InstallBytecodeHandler(interpreter->isolate_, dispatch_table, \ + Bytecode::k##Name, operand_scale); + BYTECODE_LIST(GENERATE_CODE) +#undef GENERATE_CODE + } + + // Fill unused entries will the illegal bytecode handler. + size_t illegal_index = Interpreter::GetDispatchTableIndex( + Bytecode::kIllegal, OperandScale::kSingle); + for (size_t index = 0; index < Interpreter::kDispatchTableSize; ++index) { + if (dispatch_table[index] == nullptr) { + dispatch_table[index] = dispatch_table[illegal_index]; + } + } + + // Initialization should have been successful. + DCHECK(interpreter->IsDispatchTableInitialized()); +} + +// static +bool SetupInterpreter::ReuseExistingHandler(Address* dispatch_table, + Bytecode bytecode, + OperandScale operand_scale) { + size_t index = Interpreter::GetDispatchTableIndex(bytecode, operand_scale); + switch (bytecode) { + case Bytecode::kLdaImmutableContextSlot: + STATIC_ASSERT(static_cast(Bytecode::kLdaContextSlot) < + static_cast(Bytecode::kLdaImmutableContextSlot)); + dispatch_table[index] = dispatch_table[Interpreter::GetDispatchTableIndex( + Bytecode::kLdaContextSlot, operand_scale)]; + return true; + case Bytecode::kLdaImmutableCurrentContextSlot: + STATIC_ASSERT( + static_cast(Bytecode::kLdaCurrentContextSlot) < + static_cast(Bytecode::kLdaImmutableCurrentContextSlot)); + dispatch_table[index] = dispatch_table[Interpreter::GetDispatchTableIndex( + Bytecode::kLdaCurrentContextSlot, operand_scale)]; + return true; + default: + return false; + } + return false; +} + +// static +void SetupInterpreter::InstallBytecodeHandler(Isolate* isolate, + Address* dispatch_table, + Bytecode bytecode, + OperandScale operand_scale) { + if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return; + if (ReuseExistingHandler(dispatch_table, bytecode, operand_scale)) return; + + size_t index = Interpreter::GetDispatchTableIndex(bytecode, operand_scale); + Handle code = GenerateBytecodeHandler(isolate, bytecode, operand_scale); + dispatch_table[index] = code->entry(); +} + +} // namespace interpreter +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/interpreter/setup-interpreter.h b/deps/v8/src/interpreter/setup-interpreter.h new file mode 100644 index 00000000000000..e9adad95376e74 --- /dev/null +++ b/deps/v8/src/interpreter/setup-interpreter.h @@ -0,0 +1,37 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_INTERPRETER_SETUP_INTERPRETER_H_ +#define V8_INTERPRETER_SETUP_INTERPRETER_H_ + +#include "src/interpreter/bytecode-operands.h" +#include "src/interpreter/bytecodes.h" + +namespace v8 { +namespace internal { +namespace interpreter { + +class Interpreter; + +class SetupInterpreter { + public: + static void InstallBytecodeHandlers(Interpreter* interpreter); + + private: + // In the case of bytecodes that share handler implementations, copy the code + // into the bytecode's dispatcher table entry and return true. + static bool ReuseExistingHandler(Address* dispatch_table, Bytecode bytecode, + OperandScale operand_scale); + // Generates handler for given |bytecode| and |operand_scale| + // and installs it into the |dispatch_table|. + static void InstallBytecodeHandler(Isolate* isolate, Address* dispatch_table, + Bytecode bytecode, + OperandScale operand_scale); +}; + +} // namespace interpreter +} // namespace internal +} // namespace v8 + +#endif // V8_INTERPRETER_SETUP_INTERPRETER_H_ diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc index bac61301c6a465..ffed04d6e075da 100644 --- a/deps/v8/src/isolate.cc +++ b/deps/v8/src/isolate.cc @@ -12,6 +12,7 @@ #include "src/assembler-inl.h" #include "src/ast/ast-value-factory.h" #include "src/ast/context-slot-cache.h" +#include "src/base/adapters.h" #include "src/base/hashmap.h" #include "src/base/platform/platform.h" #include "src/base/sys-info.h" @@ -38,10 +39,12 @@ #include "src/libsampler/sampler.h" #include "src/log.h" #include "src/messages.h" +#include "src/objects/frame-array-inl.h" #include "src/profiler/cpu-profiler.h" #include "src/prototype.h" #include "src/regexp/regexp-stack.h" #include "src/runtime-profiler.h" +#include "src/setup-isolate.h" #include "src/simulator.h" #include "src/snapshot/deserializer.h" #include "src/tracing/tracing-category-observer.h" @@ -308,8 +311,15 @@ Handle Isolate::StackTraceString() { } } +void Isolate::PushStackTraceAndDie(unsigned int magic1, void* ptr1, void* ptr2, + unsigned int magic2) { + PushStackTraceAndDie(magic1, ptr1, ptr2, nullptr, nullptr, nullptr, nullptr, + nullptr, nullptr, magic2); +} -void Isolate::PushStackTraceAndDie(unsigned int magic, void* ptr1, void* ptr2, +void Isolate::PushStackTraceAndDie(unsigned int magic1, void* ptr1, void* ptr2, + void* ptr3, void* ptr4, void* ptr5, + void* ptr6, void* ptr7, void* ptr8, unsigned int magic2) { const int kMaxStackTraceSize = 32 * KB; Handle trace = StackTraceString(); @@ -318,8 +328,43 @@ void Isolate::PushStackTraceAndDie(unsigned int magic, void* ptr1, void* ptr2, String::WriteToFlat(*trace, buffer, 0, length); buffer[length] = '\0'; // TODO(dcarney): convert buffer to utf8? - base::OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", magic, magic2, ptr1, - ptr2, reinterpret_cast(buffer)); + base::OS::PrintError( + "Stacktrace:" + "\n magic1=%x magic2=%x ptr1=%p ptr2=%p ptr3=%p ptr4=%p ptr5=%p " + "ptr6=%p ptr7=%p ptr8=%p\n\n%s", + magic1, magic2, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7, ptr8, + reinterpret_cast(buffer)); + PushCodeObjectsAndDie(0xdeadc0de, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7, + ptr8, 0xdeadc0de); +} + +void Isolate::PushCodeObjectsAndDie(unsigned int magic1, void* ptr1, void* ptr2, + void* ptr3, void* ptr4, void* ptr5, + void* ptr6, void* ptr7, void* ptr8, + unsigned int magic2) { + const int kMaxCodeObjects = 16; + // Mark as volatile to lower the probability of optimizing code_objects + // away. The first and last entries are set to the magic markers, making it + // easier to spot the array on the stack. + void* volatile code_objects[kMaxCodeObjects + 2]; + code_objects[0] = reinterpret_cast(magic1); + code_objects[kMaxCodeObjects + 1] = reinterpret_cast(magic2); + StackFrameIterator it(this); + int numCodeObjects = 0; + for (; !it.done() && numCodeObjects < kMaxCodeObjects; it.Advance()) { + code_objects[1 + numCodeObjects++] = it.frame()->unchecked_code(); + } + + // Keep the top raw code object pointers on the stack in the hope that the + // corresponding pages end up more frequently in the minidump. + base::OS::PrintError( + "\nCodeObjects (%p length=%i): 1:%p 2:%p 3:%p 4:%p..." + "\n magic1=%x magic2=%x ptr1=%p ptr2=%p ptr3=%p ptr4=%p ptr5=%p " + "ptr6=%p ptr7=%p ptr8=%p\n\n", + static_cast(code_objects[0]), numCodeObjects, + static_cast(code_objects[1]), static_cast(code_objects[2]), + static_cast(code_objects[3]), static_cast(code_objects[4]), + magic1, magic2, ptr1, ptr2, ptr3, ptr4, ptr5, ptr6, ptr7, ptr8); base::OS::Abort(); } @@ -433,6 +478,7 @@ bool GetStackTraceLimit(Isolate* isolate, int* result) { return true; } +bool NoExtension(const v8::FunctionCallbackInfo&) { return false; } } // namespace Handle Isolate::CaptureSimpleStackTrace(Handle error_object, @@ -462,7 +508,8 @@ Handle Isolate::CaptureSimpleStackTrace(Handle error_object, // function. List frames(FLAG_max_inlining_levels + 1); js_frame->Summarize(&frames); - for (int i = frames.length() - 1; i >= 0; i--) { + for (int i = frames.length() - 1; + i >= 0 && elements->FrameCount() < limit; i--) { const auto& summ = frames[i].AsJavaScript(); Handle fun = summ.function(); @@ -537,9 +584,25 @@ Handle Isolate::CaptureSimpleStackTrace(Handle error_object, abstract_code, offset, flags); } break; - case StackFrame::WASM_INTERPRETER_ENTRY: - // TODO(clemensh): Add frames. - break; + case StackFrame::WASM_INTERPRETER_ENTRY: { + WasmInterpreterEntryFrame* interpreter_frame = + WasmInterpreterEntryFrame::cast(frame); + Handle instance(interpreter_frame->wasm_instance(), + this); + // Get the interpreted stack ( pairs). + std::vector> interpreted_stack = + instance->debug_info()->GetInterpretedStack( + interpreter_frame->fp()); + + // interpreted_stack is bottom-up, i.e. caller before callee. We need it + // the other way around. + for (auto pair : base::Reversed(interpreted_stack)) { + elements = FrameArray::AppendWasmFrame( + elements, instance, pair.first, Handle::null(), + pair.second, FrameArray::kIsWasmInterpretedFrame); + if (elements->FrameCount() >= limit) break; + } + } break; default: break; @@ -592,177 +655,69 @@ Handle Isolate::GetDetailedStackTrace(Handle error_object) { class CaptureStackTraceHelper { public: - CaptureStackTraceHelper(Isolate* isolate, - StackTrace::StackTraceOptions options) - : isolate_(isolate) { - if (options & StackTrace::kColumnOffset) { - column_key_ = - factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("column")); - } - if (options & StackTrace::kLineNumber) { - line_key_ = - factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("lineNumber")); - } - if (options & StackTrace::kScriptId) { - script_id_key_ = - factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptId")); - } - if (options & StackTrace::kScriptName) { - script_name_key_ = - factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptName")); - } - if (options & StackTrace::kScriptNameOrSourceURL) { - script_name_or_source_url_key_ = factory()->InternalizeOneByteString( - STATIC_CHAR_VECTOR("scriptNameOrSourceURL")); - } - if (options & StackTrace::kFunctionName) { - function_key_ = factory()->InternalizeOneByteString( - STATIC_CHAR_VECTOR("functionName")); - } - if (options & StackTrace::kIsEval) { - eval_key_ = - factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("isEval")); - } - if (options & StackTrace::kIsConstructor) { - constructor_key_ = factory()->InternalizeOneByteString( - STATIC_CHAR_VECTOR("isConstructor")); - } - } + explicit CaptureStackTraceHelper(Isolate* isolate) : isolate_(isolate) {} - Handle NewStackFrameObject(FrameSummary& summ) { + Handle NewStackFrameObject(FrameSummary& summ) { if (summ.IsJavaScript()) return NewStackFrameObject(summ.AsJavaScript()); if (summ.IsWasm()) return NewStackFrameObject(summ.AsWasm()); UNREACHABLE(); - return Handle::null(); + return factory()->NewStackFrameInfo(); } - Handle NewStackFrameObject( + Handle NewStackFrameObject( const FrameSummary::JavaScriptFrameSummary& summ) { - Handle stack_frame = - factory()->NewJSObject(isolate_->object_function()); + Handle frame = factory()->NewStackFrameInfo(); Handle + + + + + + + + + + + + + +

+ Chrome V8 profiling log processor +

+ + +
+
+ +
+ Usage: + +
+
+ + Record the profile: +
+  d8 --prof your-file.js
+  
+ + Then process the file (this resolves C++ symbols and produces + a JSON file with the profile data): + +
+  <v8-dir>/tools/linux-tick-processor --preprocess v8.log > v8.json
+  
+ + To view the profile, click the Choose file button above and choose + the file in the dialog box. + +
+ + + +
+ + + + + + + + + +

+
+
+
+Copyright the V8 Authors - Last change to this page: 2017/02/15 +

+ + + diff --git a/deps/v8/tools/profview/profile-utils.js b/deps/v8/tools/profview/profile-utils.js new file mode 100644 index 00000000000000..de3d730eb9f639 --- /dev/null +++ b/deps/v8/tools/profview/profile-utils.js @@ -0,0 +1,594 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict" + +let codeKinds = [ + "UNKNOWN", + "CPPCOMP", + "CPPGC", + "CPPEXT", + "CPP", + "LIB", + "IC", + "BC", + "STUB", + "BUILTIN", + "REGEXP", + "JSOPT", + "JSUNOPT" +]; + +function resolveCodeKind(code) { + if (!code || !code.type) { + return "UNKNOWN"; + } else if (code.type === "CPP") { + return "CPP"; + } else if (code.type === "SHARED_LIB") { + return "LIB"; + } else if (code.type === "CODE") { + if (code.kind === "LoadIC" || + code.kind === "StoreIC" || + code.kind === "KeyedStoreIC" || + code.kind === "KeyedLoadIC" || + code.kind === "LoadGlobalIC" || + code.kind === "Handler") { + return "IC"; + } else if (code.kind === "BytecodeHandler") { + return "BC"; + } else if (code.kind === "Stub") { + return "STUB"; + } else if (code.kind === "Builtin") { + return "BUILTIN"; + } else if (code.kind === "RegExp") { + return "REGEXP"; + } + console.log("Unknown CODE: '" + code.kind + "'."); + return "CODE"; + } else if (code.type === "JS") { + if (code.kind === "Builtin") { + return "JSUNOPT"; + } else if (code.kind === "Opt") { + return "JSOPT"; + } else if (code.kind === "Unopt") { + return "JSUNOPT"; + } + } + console.log("Unknown code type '" + type + "'."); +} + +function resolveCodeKindAndVmState(code, vmState) { + let kind = resolveCodeKind(code); + if (kind === "CPP") { + if (vmState === 1) { + kind = "CPPGC"; + } else if (vmState === 2) { + kind = "CPPCOMP"; + } else if (vmState === 4) { + kind = "CPPEXT"; + } + } + return kind; +} + +function codeEquals(code1, code2, allowDifferentKinds = false) { + if (!code1 || !code2) return false; + if (code1.name != code2.name || code1.type != code2.type) return false; + + if (code1.type == 'CODE') { + if (!allowDifferentKinds && code1.kind != code2.kind) return false; + } else if (code1.type == 'JS') { + if (!allowDifferentKinds && code1.kind != code2.kind) return false; + if (code1.func != code2.func) return false; + } + return true; +} + +function createNodeFromStackEntry(code, codeId) { + let name = code ? code.name : "UNKNOWN"; + + return { name, codeId, type : resolveCodeKind(code), + children : [], ownTicks : 0, ticks : 0 }; +} + +function childIdFromCode(codeId, code) { + // For JavaScript function, pretend there is one instance of optimized + // function and one instance of unoptimized function per SFI. + // Otherwise, just compute the id from code id. + let type = resolveCodeKind(code); + if (type === "JSOPT") { + return code.func * 4 + 1; + } else if (type === "JSUNOPT") { + return code.func * 4 + 2; + } else { + return codeId * 4; + } +} + +// We store list of ticks and positions within the ticks stack by +// storing flattened triplets of { tickIndex, depth, count }. +// Triplet { 123, 2, 3 } encodes positions in ticks 123, 124, 125, +// all of them at depth 2. The flattened array is used to encode +// position within the call-tree. + +// The following function helps to encode such triplets. +function addFrameToFrameList(paths, pathIndex, depth) { + // Try to combine with the previous code run. + if (paths.length > 0 && + paths[paths.length - 3] + 1 === pathIndex && + paths[paths.length - 2] === depth) { + paths[paths.length - 1]++; + } else { + paths.push(pathIndex, depth, 1); + } +} + +function findNextFrame(file, stack, stackPos, step, filter) { + let codeId = -1; + let code = null; + while (stackPos >= 0 && stackPos < stack.length) { + codeId = stack[stackPos]; + code = codeId >= 0 ? file.code[codeId] : undefined; + + if (filter) { + let type = code ? code.type : undefined; + let kind = code ? code.kind : undefined; + if (filter(type, kind)) return stackPos; + } + stackPos += step; + } + return -1; +} + +function addOrUpdateChildNode(parent, file, stackIndex, stackPos, ascending) { + let stack = file.ticks[stackIndex].s; + let codeId = stack[stackPos]; + let code = codeId >= 0 ? file.code[codeId] : undefined; + if (stackPos === -1) { + // We reached the end without finding the next step. + // If we are doing top-down call tree, update own ticks. + if (!ascending) { + parent.ownTicks++; + } + } else { + console.assert(stackPos >= 0 && stackPos < stack.length); + // We found a child node. + let childId = childIdFromCode(codeId, code); + let child = parent.children[childId]; + if (!child) { + child = createNodeFromStackEntry(code, codeId); + child.delayedExpansion = { frameList : [], ascending }; + parent.children[childId] = child; + } + child.ticks++; + addFrameToFrameList(child.delayedExpansion.frameList, stackIndex, stackPos); + } +} + +// This expands a tree node (direct children only). +function expandTreeNode(file, node, filter) { + let { frameList, ascending } = node.delayedExpansion; + + let step = ascending ? 2 : -2; + + for (let i = 0; i < frameList.length; i+= 3) { + let firstStackIndex = frameList[i]; + let depth = frameList[i + 1]; + let count = frameList[i + 2]; + for (let j = 0; j < count; j++) { + let stackIndex = firstStackIndex + j; + let stack = file.ticks[stackIndex].s; + + // Get to the next frame that has not been filtered out. + let stackPos = findNextFrame(file, stack, depth + step, step, filter); + addOrUpdateChildNode(node, file, stackIndex, stackPos, ascending); + } + } + node.delayedExpansion = null; +} + +function createEmptyNode(name) { + return { + name : name, + codeId: -1, + type : "CAT", + children : [], + ownTicks : 0, + ticks : 0 + }; +} + +class RuntimeCallTreeProcessor { + constructor() { + this.tree = createEmptyNode("root"); + this.tree.delayedExpansion = { frameList : [], ascending : false }; + } + + addStack(file, tickIndex) { + this.tree.ticks++; + + let stack = file.ticks[tickIndex].s; + let i; + for (i = 0; i < stack.length; i += 2) { + let codeId = stack[i]; + if (codeId < 0) return; + let code = file.code[codeId]; + if (code.type !== "CPP" && code.type !== "SHARED_LIB") { + i -= 2; + break; + } + } + if (i < 0 || i >= stack.length) return; + addOrUpdateChildNode(this.tree, file, tickIndex, i, false); + } +} + +class PlainCallTreeProcessor { + constructor(filter, isBottomUp) { + this.filter = filter; + this.tree = createEmptyNode("root"); + this.tree.delayedExpansion = { frameList : [], ascending : isBottomUp }; + this.isBottomUp = isBottomUp; + } + + addStack(file, tickIndex) { + let stack = file.ticks[tickIndex].s; + let step = this.isBottomUp ? 2 : -2; + let start = this.isBottomUp ? 0 : stack.length - 2; + + let stackPos = findNextFrame(file, stack, start, step, this.filter); + addOrUpdateChildNode(this.tree, file, tickIndex, stackPos, this.isBottomUp); + + this.tree.ticks++; + } +} + +function buildCategoryTreeAndLookup() { + let root = createEmptyNode("root"); + let categories = {}; + function addCategory(name, types) { + let n = createEmptyNode(name); + for (let i = 0; i < types.length; i++) { + categories[types[i]] = n; + } + root.children.push(n); + } + addCategory("JS Optimized", [ "JSOPT" ]); + addCategory("JS Unoptimized", [ "JSUNOPT", "BC" ]); + addCategory("IC", [ "IC" ]); + addCategory("RegExp", [ "REGEXP" ]); + addCategory("Other generated", [ "STUB", "BUILTIN" ]); + addCategory("C++", [ "CPP", "LIB" ]); + addCategory("C++/GC", [ "CPPGC" ]); + addCategory("C++/Compiler", [ "CPPCOMP" ]); + addCategory("C++/External", [ "CPPEXT" ]); + addCategory("Unknown", [ "UNKNOWN" ]); + + return { categories, root }; +} + +class CategorizedCallTreeProcessor { + constructor(filter, isBottomUp) { + this.filter = filter; + let { categories, root } = buildCategoryTreeAndLookup(); + + this.tree = root; + this.categories = categories; + this.isBottomUp = isBottomUp; + } + + addStack(file, tickIndex) { + let stack = file.ticks[tickIndex].s; + let vmState = file.ticks[tickIndex].vm; + if (stack.length === 0) return; + let codeId = stack[0]; + let code = codeId >= 0 ? file.code[codeId] : undefined; + let kind = resolveCodeKindAndVmState(code, vmState); + let node = this.categories[kind]; + + this.tree.ticks++; + node.ticks++; + + let step = this.isBottomUp ? 2 : -2; + let start = this.isBottomUp ? 0 : stack.length - 2; + + let stackPos = findNextFrame(file, stack, start, step, this.filter); + addOrUpdateChildNode(node, file, tickIndex, stackPos, this.isBottomUp); + } +} + +class FunctionListTree { + constructor(filter, withCategories) { + if (withCategories) { + let { categories, root } = buildCategoryTreeAndLookup(); + this.tree = root; + this.categories = categories; + } else { + this.tree = { + name : "root", + codeId: -1, + children : [], + ownTicks : 0, + ticks : 0 + }; + this.categories = null; + } + + this.codeVisited = []; + this.filter = filter; + } + + addStack(file, tickIndex) { + let stack = file.ticks[tickIndex].s; + let vmState = file.ticks[tickIndex].vm; + + this.tree.ticks++; + let child = null; + let tree = null; + for (let i = stack.length - 2; i >= 0; i -= 2) { + let codeId = stack[i]; + if (codeId < 0 || this.codeVisited[codeId]) continue; + + let code = codeId >= 0 ? file.code[codeId] : undefined; + if (this.filter) { + let type = code ? code.type : undefined; + let kind = code ? code.kind : undefined; + if (!this.filter(type, kind)) continue; + } + let childId = childIdFromCode(codeId, code); + if (this.categories) { + let kind = resolveCodeKindAndVmState(code, vmState); + tree = this.categories[kind]; + } else { + tree = this.tree; + } + child = tree.children[childId]; + if (!child) { + child = createNodeFromStackEntry(code, codeId); + child.children[0] = createEmptyNode("Top-down tree"); + child.children[0].delayedExpansion = + { frameList : [], ascending : false }; + child.children[1] = createEmptyNode("Bottom-up tree"); + child.children[1].delayedExpansion = + { frameList : [], ascending : true }; + tree.children[childId] = child; + } + child.ticks++; + child.children[0].ticks++; + addFrameToFrameList( + child.children[0].delayedExpansion.frameList, tickIndex, i); + child.children[1].ticks++; + addFrameToFrameList( + child.children[1].delayedExpansion.frameList, tickIndex, i); + this.codeVisited[codeId] = true; + } + if (child) { + child.ownTicks++; + console.assert(tree !== null); + tree.ticks++; + console.assert(tree.type === "CAT"); + } + + for (let i = 0; i < stack.length; i += 2) { + let codeId = stack[i]; + if (codeId >= 0) this.codeVisited[codeId] = false; + } + } +} + + +class CategorySampler { + constructor(file, bucketCount) { + this.bucketCount = bucketCount; + + this.firstTime = file.ticks[0].tm; + let lastTime = file.ticks[file.ticks.length - 1].tm; + this.step = (lastTime - this.firstTime) / bucketCount; + + this.buckets = []; + let bucket = {}; + for (let i = 0; i < codeKinds.length; i++) { + bucket[codeKinds[i]] = 0; + } + for (let i = 0; i < bucketCount; i++) { + this.buckets.push(Object.assign({ total : 0 }, bucket)); + } + } + + addStack(file, tickIndex) { + let { tm : timestamp, vm : vmState, s : stack } = file.ticks[tickIndex]; + + let i = Math.floor((timestamp - this.firstTime) / this.step); + if (i == this.buckets.length) i--; + console.assert(i >= 0 && i < this.buckets.length); + + let bucket = this.buckets[i]; + bucket.total++; + + let codeId = (stack.length > 0) ? stack[0] : -1; + let code = codeId >= 0 ? file.code[codeId] : undefined; + let kind = resolveCodeKindAndVmState(code, vmState); + bucket[kind]++; + } +} + +class FunctionTimelineProcessor { + constructor(functionCodeId, filter) { + this.functionCodeId = functionCodeId; + this.filter = filter; + this.blocks = []; + this.currentBlock = null; + } + + addStack(file, tickIndex) { + if (!this.functionCodeId) return; + + let { tm : timestamp, vm : vmState, s : stack } = file.ticks[tickIndex]; + let functionCode = file.code[this.functionCodeId]; + + // Find if the function is on the stack, and its position on the stack, + // ignoring any filtered entries. + let stackCode = undefined; + let functionPosInStack = -1; + let filteredI = 0 + for (let i = 0; i < stack.length - 1; i += 2) { + let codeId = stack[i]; + let code = codeId >= 0 ? file.code[codeId] : undefined; + let type = code ? code.type : undefined; + let kind = code ? code.kind : undefined; + if (!this.filter(type, kind)) continue; + + // Match other instances of the same function (e.g. unoptimised, various + // different optimised versions). + if (codeEquals(code, functionCode, true)) { + functionPosInStack = filteredI; + stackCode = code; + break; + } + filteredI++; + } + + if (functionPosInStack >= 0) { + let stackKind = resolveCodeKindAndVmState(stackCode, vmState); + + let codeIsTopOfStack = (functionPosInStack == 0); + + if (this.currentBlock !== null) { + this.currentBlock.end = timestamp; + + if (codeIsTopOfStack === this.currentBlock.topOfStack + && stackKind === this.currentBlock.kind) { + // If we haven't changed the stack top or the function kind, then + // we're happy just extending the current block and not starting + // a new one. + return; + } + } + + // Start a new block at the current timestamp. + this.currentBlock = { + start: timestamp, + end: timestamp, + code: stackCode, + kind: stackKind, + topOfStack: codeIsTopOfStack + }; + this.blocks.push(this.currentBlock); + } else { + this.currentBlock = null; + } + } +} + +// Generates a tree out of a ticks sequence. +// {file} is the JSON files with the ticks and code objects. +// {startTime}, {endTime} is the interval. +// {tree} is the processor of stacks. +function generateTree( + file, startTime, endTime, tree) { + let ticks = file.ticks; + let i = 0; + while (i < ticks.length && ticks[i].tm < startTime) { + i++; + } + + let tickCount = 0; + while (i < ticks.length && ticks[i].tm < endTime) { + tree.addStack(file, i); + i++; + tickCount++; + } + + return tickCount; +} + +function computeOptimizationStats(file, + timeStart = -Infinity, timeEnd = Infinity) { + function newCollection() { + return { count : 0, functions : [], functionTable : [] }; + } + function addToCollection(collection, code) { + collection.count++; + let funcData = collection.functionTable[code.func]; + if (!funcData) { + funcData = { f : file.functions[code.func], instances : [] }; + collection.functionTable[code.func] = funcData; + collection.functions.push(funcData); + } + funcData.instances.push(code); + } + + let functionCount = 0; + let optimizedFunctionCount = 0; + let deoptimizedFunctionCount = 0; + let optimizations = newCollection(); + let eagerDeoptimizations = newCollection(); + let softDeoptimizations = newCollection(); + let lazyDeoptimizations = newCollection(); + + for (let i = 0; i < file.functions.length; i++) { + let f = file.functions[i]; + + // Skip special SFIs that do not correspond to JS functions. + if (f.codes.length === 0) continue; + if (file.code[f.codes[0]].type !== "JS") continue; + + functionCount++; + let optimized = false; + let deoptimized = false; + + for (let j = 0; j < f.codes.length; j++) { + let code = file.code[f.codes[j]]; + console.assert(code.type === "JS"); + if (code.kind === "Opt") { + optimized = true; + if (code.tm >= timeStart && code.tm <= timeEnd) { + addToCollection(optimizations, code); + } + } + if (code.deopt) { + deoptimized = true; + if (code.deopt.tm >= timeStart && code.deopt.tm <= timeEnd) { + switch (code.deopt.bailoutType) { + case "lazy": + addToCollection(lazyDeoptimizations, code); + break; + case "eager": + addToCollection(eagerDeoptimizations, code); + break; + case "soft": + addToCollection(softDeoptimizations, code); + break; + } + } + } + } + if (optimized) { + optimizedFunctionCount++; + } + if (deoptimized) { + deoptimizedFunctionCount++; + } + } + + function sortCollection(collection) { + collection.functions.sort( + (a, b) => a.instances.length - b.instances.length); + } + + sortCollection(eagerDeoptimizations); + sortCollection(lazyDeoptimizations); + sortCollection(softDeoptimizations); + sortCollection(optimizations); + + return { + functionCount, + optimizedFunctionCount, + deoptimizedFunctionCount, + optimizations, + eagerDeoptimizations, + lazyDeoptimizations, + softDeoptimizations, + }; +} diff --git a/deps/v8/tools/profview/profview.css b/deps/v8/tools/profview/profview.css new file mode 100644 index 00000000000000..106bfe288502a2 --- /dev/null +++ b/deps/v8/tools/profview/profview.css @@ -0,0 +1,62 @@ +table.calltree { + width : 100%; +} + +.numeric { + width : 12ex; +} + +.numeric-hidden { + display : none; +} + +body { + font-family: 'Roboto', sans-serif; +} + +div.code-type-chip { + display : inline-block; + padding : 0.0em; +} + +span.code-type-chip { + border-radius : 1em; + display : inline-block; + padding : 0.1em; + background-color : #4040c0; + color: #ffffff; + font-size : small; + box-shadow: 0 2px 5px 0 rgba(0, 0, 0, 0.16), 0 2px 10px 0 rgba(0, 0, 0, 0.12); +} + +span.code-type-chip-space { + width : 0.5ex; + display : inline-block; +} + +span.codeid-link { + text-decoration: underline; + cursor: pointer; +} + +div.mode-button { + padding: 1em 3em; + display: inline-block; + background-color: #6070ff; + color : #ffffff; + margin: 0 0.2em 2em 0; + box-shadow: 3px 3px 2px #d0d0ff; +} + +div.mode-button:hover { + background-color: #4858ff; +} + +div.active-mode-button { + background-color: #0000ff; + box-shadow: 3px 3px 2px #a0a0ff; +} + +div.active-mode-button:hover { + background-color: #0000ff; +} diff --git a/deps/v8/tools/profview/profview.js b/deps/v8/tools/profview/profview.js new file mode 100644 index 00000000000000..033d0f385e9df2 --- /dev/null +++ b/deps/v8/tools/profview/profview.js @@ -0,0 +1,1240 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +"use strict" + +function $(id) { + return document.getElementById(id); +} + +let components = []; + +function createViews() { + components.push(new CallTreeView()); + components.push(new TimelineView()); + components.push(new HelpView()); + components.push(new SummaryView()); + components.push(new ModeBarView()); + + main.setMode("summary"); +} + +function emptyState() { + return { + file : null, + mode : "none", + currentCodeId : null, + start : 0, + end : Infinity, + timeLine : { + width : 100, + height : 100 + }, + callTree : { + attribution : "js-exclude-bc", + categories : "code-type", + sort : "time" + } + }; +} + +function setCallTreeState(state, callTreeState) { + state = Object.assign({}, state); + state.callTree = callTreeState; + return state; +} + +let main = { + currentState : emptyState(), + + setMode(mode) { + if (mode != main.currentState.mode) { + + function setCallTreeModifiers(attribution, categories, sort) { + let callTreeState = Object.assign({}, main.currentState.callTree); + callTreeState.attribution = attribution; + callTreeState.categories = categories; + callTreeState.sort = sort; + return callTreeState; + } + + let state = Object.assign({}, main.currentState); + + switch (mode) { + case "bottom-up": + state.callTree = + setCallTreeModifiers("js-exclude-bc", "code-type", "time"); + break; + case "top-down": + state.callTree = + setCallTreeModifiers("js-exclude-bc", "none", "time"); + break; + case "function-list": + state.callTree = + setCallTreeModifiers("js-exclude-bc", "code-type", "own-time"); + break; + } + + state.mode = mode; + + main.currentState = state; + main.delayRender(); + } + }, + + setCallTreeAttribution(attribution) { + if (attribution != main.currentState.attribution) { + let callTreeState = Object.assign({}, main.currentState.callTree); + callTreeState.attribution = attribution; + main.currentState = setCallTreeState(main.currentState, callTreeState); + main.delayRender(); + } + }, + + setCallTreeSort(sort) { + if (sort != main.currentState.sort) { + let callTreeState = Object.assign({}, main.currentState.callTree); + callTreeState.sort = sort; + main.currentState = setCallTreeState(main.currentState, callTreeState); + main.delayRender(); + } + }, + + setCallTreeCategories(categories) { + if (categories != main.currentState.categories) { + let callTreeState = Object.assign({}, main.currentState.callTree); + callTreeState.categories = categories; + main.currentState = setCallTreeState(main.currentState, callTreeState); + main.delayRender(); + } + }, + + setViewInterval(start, end) { + if (start != main.currentState.start || + end != main.currentState.end) { + main.currentState = Object.assign({}, main.currentState); + main.currentState.start = start; + main.currentState.end = end; + main.delayRender(); + } + }, + + setTimeLineDimensions(width, height) { + if (width != main.currentState.timeLine.width || + height != main.currentState.timeLine.height) { + let timeLine = Object.assign({}, main.currentState.timeLine); + timeLine.width = width; + timeLine.height = height; + main.currentState = Object.assign({}, main.currentState); + main.currentState.timeLine = timeLine; + main.delayRender(); + } + }, + + setFile(file) { + if (file != main.currentState.file) { + main.currentState = Object.assign({}, main.currentState); + main.currentState.file = file; + main.delayRender(); + } + }, + + setCurrentCode(codeId) { + if (codeId != main.currentState.currentCodeId) { + main.currentState = Object.assign({}, main.currentState); + main.currentState.currentCodeId = codeId; + main.delayRender(); + } + }, + + onResize() { + main.setTimeLineDimensions( + Math.round(window.innerWidth - 20), + Math.round(window.innerHeight / 5)); + }, + + onLoad() { + function loadHandler(evt) { + let f = evt.target.files[0]; + if (f) { + let reader = new FileReader(); + reader.onload = function(event) { + let profData = JSON.parse(event.target.result); + main.setViewInterval(0, Infinity); + main.setFile(profData); + }; + reader.onerror = function(event) { + console.error( + "File could not be read! Code " + event.target.error.code); + }; + reader.readAsText(f); + } else { + main.setFile(null); + } + } + $("fileinput").addEventListener( + "change", loadHandler, false); + createViews(); + main.onResize(); + }, + + delayRender() { + Promise.resolve().then(() => { + for (let c of components) { + c.render(main.currentState); + } + }); + } +}; + +let bucketDescriptors = + [ { kinds : [ "JSOPT" ], + color : "#00ff00", + backgroundColor : "#c0ffc0", + text : "JS Optimized" }, + { kinds : [ "JSUNOPT", "BC" ], + color : "#ffb000", + backgroundColor : "#ffe0c0", + text : "JS Unoptimized" }, + { kinds : [ "IC" ], + color : "#ffff00", + backgroundColor : "#ffffc0", + text : "IC" }, + { kinds : [ "STUB", "BUILTIN", "REGEXP" ], + color : "#ffb0b0", + backgroundColor : "#fff0f0", + text : "Other generated" }, + { kinds : [ "CPP", "LIB" ], + color : "#0000ff", + backgroundColor : "#c0c0ff", + text : "C++" }, + { kinds : [ "CPPEXT" ], + color : "#8080ff", + backgroundColor : "#e0e0ff", + text : "C++/external" }, + { kinds : [ "CPPCOMP" ], + color : "#00ffff", + backgroundColor : "#c0ffff", + text : "C++/Compiler" }, + { kinds : [ "CPPGC" ], + color : "#ff00ff", + backgroundColor : "#ffc0ff", + text : "C++/GC" }, + { kinds : [ "UNKNOWN" ], + color : "#f0f0f0", + backgroundColor : "#e0e0e0", + text : "Unknown" } + ]; + +let kindToBucketDescriptor = {} +for (let i = 0; i < bucketDescriptors.length; i++) { + let bucket = bucketDescriptors[i]; + for (let j = 0; j < bucket.kinds.length; j++) { + kindToBucketDescriptor[bucket.kinds[j]] = bucket; + } +} + +function bucketFromKind(kind) { + for (let i = 0; i < bucketDescriptors.length; i++) { + let bucket = bucketDescriptors[i]; + for (let j = 0; j < bucket.kinds.length; j++) { + if (bucket.kinds[j] === kind) { + return bucket; + } + } + } + return null; +} + +function codeTypeToText(type) { + switch (type) { + case "UNKNOWN": + return "Unknown"; + case "CPPCOMP": + return "C++ (compiler)"; + case "CPPGC": + return "C++"; + case "CPPEXT": + return "C++ External"; + case "CPP": + return "C++"; + case "LIB": + return "Library"; + case "IC": + return "IC"; + case "BC": + return "Bytecode"; + case "STUB": + return "Stub"; + case "BUILTIN": + return "Builtin"; + case "REGEXP": + return "RegExp"; + case "JSOPT": + return "JS opt"; + case "JSUNOPT": + return "JS unopt"; + } + console.error("Unknown type: " + type); +} + +function createTypeDiv(type) { + if (type === "CAT") { + return document.createTextNode(""); + } + let div = document.createElement("div"); + div.classList.add("code-type-chip"); + + let span = document.createElement("span"); + span.classList.add("code-type-chip"); + span.textContent = codeTypeToText(type); + div.appendChild(span); + + span = document.createElement("span"); + span.classList.add("code-type-chip-space"); + div.appendChild(span); + + return div; +} + +function isBytecodeHandler(kind) { + return kind === "BytecodeHandler"; +} + +function filterFromFilterId(id) { + switch (id) { + case "full-tree": + return (type, kind) => true; + case "js-funs": + return (type, kind) => type !== 'CODE'; + case "js-exclude-bc": + return (type, kind) => + type !== 'CODE' || !isBytecodeHandler(kind); + } +} + +function createTableExpander(indent) { + let div = document.createElement("div"); + div.style.width = (indent + 0.5) + "em"; + div.style.display = "inline-block"; + div.style.textAlign = "right"; + return div; +} + +function createFunctionNode(name, codeId) { + if (codeId == -1) { + return document.createTextNode(name); + } + let nameElement = document.createElement("span"); + nameElement.classList.add("codeid-link") + nameElement.onclick = function() { + main.setCurrentCode(codeId); + }; + nameElement.appendChild(document.createTextNode(name)); + return nameElement; +} + +class CallTreeView { + constructor() { + this.element = $("calltree"); + this.treeElement = $("calltree-table"); + this.selectAttribution = $("calltree-attribution"); + this.selectCategories = $("calltree-categories"); + this.selectSort = $("calltree-sort"); + + this.selectAttribution.onchange = () => { + main.setCallTreeAttribution(this.selectAttribution.value); + }; + + this.selectCategories.onchange = () => { + main.setCallTreeCategories(this.selectCategories.value); + }; + + this.selectSort.onchange = () => { + main.setCallTreeSort(this.selectSort.value); + }; + + this.currentState = null; + } + + sortFromId(id) { + switch (id) { + case "time": + return (c1, c2) => { + if (c1.ticks < c2.ticks) return 1; + else if (c1.ticks > c2.ticks) return -1; + return c2.ownTicks - c1.ownTicks; + } + case "own-time": + return (c1, c2) => { + if (c1.ownTicks < c2.ownTicks) return 1; + else if (c1.ownTicks > c2.ownTicks) return -1; + return c2.ticks - c1.ticks; + } + case "category-time": + return (c1, c2) => { + if (c1.type === c2.type) return c2.ticks - c1.ticks; + if (c1.type < c2.type) return 1; + return -1; + }; + case "category-own-time": + return (c1, c2) => { + if (c1.type === c2.type) return c2.ownTicks - c1.ownTicks; + if (c1.type < c2.type) return 1; + return -1; + }; + } + } + + expandTree(tree, indent) { + let that = this; + let index = 0; + let id = "R/"; + let row = tree.row; + let expander = tree.expander; + + if (row) { + index = row.rowIndex; + id = row.id; + + // Make sure we collapse the children when the row is clicked + // again. + expander.textContent = "\u25BE"; + let expandHandler = expander.onclick; + expander.onclick = () => { + that.collapseRow(tree, expander, expandHandler); + } + } + + // Collect the children, and sort them by ticks. + let children = []; + let filter = + filterFromFilterId(this.currentState.callTree.attribution); + for (let childId in tree.children) { + let child = tree.children[childId]; + if (child.ticks > 0) { + children.push(child); + if (child.delayedExpansion) { + expandTreeNode(this.currentState.file, child, filter); + } + } + } + children.sort(this.sortFromId(this.currentState.callTree.sort)); + + for (let i = 0; i < children.length; i++) { + let node = children[i]; + let row = this.rows.insertRow(index); + row.id = id + i + "/"; + + if (node.type != "CAT") { + row.style.backgroundColor = bucketFromKind(node.type).backgroundColor; + } + + // Inclusive time % cell. + let c = row.insertCell(); + c.textContent = (node.ticks * 100 / this.tickCount).toFixed(2) + "%"; + c.style.textAlign = "right"; + // Percent-of-parent cell. + c = row.insertCell(); + c.textContent = (node.ticks * 100 / tree.ticks).toFixed(2) + "%"; + c.style.textAlign = "right"; + // Exclusive time % cell. + if (this.currentState.mode !== "bottom-up") { + c = row.insertCell(-1); + c.textContent = (node.ownTicks * 100 / this.tickCount).toFixed(2) + "%"; + c.style.textAlign = "right"; + } + + // Create the name cell. + let nameCell = row.insertCell(); + let expander = createTableExpander(indent + 1); + nameCell.appendChild(expander); + nameCell.appendChild(createTypeDiv(node.type)); + nameCell.appendChild(createFunctionNode(node.name, node.codeId)); + + // Inclusive ticks cell. + c = row.insertCell(); + c.textContent = node.ticks; + c.style.textAlign = "right"; + if (this.currentState.mode !== "bottom-up") { + // Exclusive ticks cell. + c = row.insertCell(-1); + c.textContent = node.ownTicks; + c.style.textAlign = "right"; + } + if (node.children.length > 0) { + expander.textContent = "\u25B8"; + expander.onclick = () => { that.expandTree(node, indent + 1); }; + } + + node.row = row; + node.expander = expander; + + index++; + } + } + + collapseRow(tree, expander, expandHandler) { + let row = tree.row; + let id = row.id; + let index = row.rowIndex; + while (row.rowIndex < this.rows.rows.length && + this.rows.rows[index].id.startsWith(id)) { + this.rows.deleteRow(index); + } + + expander.textContent = "\u25B8"; + expander.onclick = expandHandler; + } + + fillSelects(mode, calltree) { + function addOptions(e, values, current) { + while (e.options.length > 0) { + e.remove(0); + } + for (let i = 0; i < values.length; i++) { + let option = document.createElement("option"); + option.value = values[i].value; + option.textContent = values[i].text; + e.appendChild(option); + } + e.value = current; + } + + let attributions = [ + { value : "js-exclude-bc", + text : "Attribute bytecode handlers to caller" }, + { value : "full-tree", + text : "Count each code object separately" }, + { value : "js-funs", + text : "Attribute non-functions to JS functions" } + ]; + + switch (mode) { + case "bottom-up": + addOptions(this.selectAttribution, attributions, calltree.attribution); + addOptions(this.selectCategories, [ + { value : "code-type", text : "Code type" }, + { value : "none", text : "None" } + ], calltree.categories); + addOptions(this.selectSort, [ + { value : "time", text : "Time (including children)" }, + { value : "category-time", text : "Code category, time" }, + ], calltree.sort); + return; + case "top-down": + addOptions(this.selectAttribution, attributions, calltree.attribution); + addOptions(this.selectCategories, [ + { value : "none", text : "None" }, + { value : "rt-entry", text : "Runtime entries" } + ], calltree.categories); + addOptions(this.selectSort, [ + { value : "time", text : "Time (including children)" }, + { value : "own-time", text : "Own time" }, + { value : "category-time", text : "Code category, time" }, + { value : "category-own-time", text : "Code category, own time"} + ], calltree.sort); + return; + case "function-list": + addOptions(this.selectAttribution, attributions, calltree.attribution); + addOptions(this.selectCategories, [ + { value : "code-type", text : "Code type" }, + { value : "none", text : "None" } + ], calltree.categories); + addOptions(this.selectSort, [ + { value : "own-time", text : "Own time" }, + { value : "time", text : "Time (including children)" }, + { value : "category-own-time", text : "Code category, own time"}, + { value : "category-time", text : "Code category, time" }, + ], calltree.sort); + return; + } + console.error("Unexpected mode"); + } + + static isCallTreeMode(mode) { + switch (mode) { + case "bottom-up": + case "top-down": + case "function-list": + return true; + default: + return false; + } + } + + render(newState) { + let oldState = this.currentState; + if (!newState.file || !CallTreeView.isCallTreeMode(newState.mode)) { + this.element.style.display = "none"; + this.currentState = null; + return; + } + + this.currentState = newState; + if (oldState) { + if (newState.file === oldState.file && + newState.start === oldState.start && + newState.end === oldState.end && + newState.mode === oldState.mode && + newState.callTree.attribution === oldState.callTree.attribution && + newState.callTree.categories === oldState.callTree.categories && + newState.callTree.sort === oldState.callTree.sort) { + // No change => just return. + return; + } + } + + this.element.style.display = "inherit"; + + let mode = this.currentState.mode; + if (!oldState || mode !== oldState.mode) { + // Technically, we should also call this if attribution, categories or + // sort change, but the selection is already highlighted by the combobox + // itself, so we do need to do anything here. + this.fillSelects(newState.mode, newState.callTree); + } + + let ownTimeClass = (mode === "bottom-up") ? "numeric-hidden" : "numeric"; + let ownTimeTh = $(this.treeElement.id + "-own-time-header"); + ownTimeTh.classList = ownTimeClass; + let ownTicksTh = $(this.treeElement.id + "-own-ticks-header"); + ownTicksTh.classList = ownTimeClass; + + // Build the tree. + let stackProcessor; + let filter = filterFromFilterId(this.currentState.callTree.attribution); + if (mode === "top-down") { + if (this.currentState.callTree.categories === "rt-entry") { + stackProcessor = + new RuntimeCallTreeProcessor(); + } else { + stackProcessor = + new PlainCallTreeProcessor(filter, false); + } + } else if (mode === "function-list") { + stackProcessor = new FunctionListTree( + filter, this.currentState.callTree.categories === "code-type"); + + } else { + console.assert(mode === "bottom-up"); + if (this.currentState.callTree.categories == "none") { + stackProcessor = + new PlainCallTreeProcessor(filter, true); + } else { + console.assert(this.currentState.callTree.categories === "code-type"); + stackProcessor = + new CategorizedCallTreeProcessor(filter, true); + } + } + this.tickCount = + generateTree(this.currentState.file, + this.currentState.start, + this.currentState.end, + stackProcessor); + // TODO(jarin) Handle the case when tick count is negative. + + this.tree = stackProcessor.tree; + + // Remove old content of the table, replace with new one. + let oldRows = this.treeElement.getElementsByTagName("tbody"); + let newRows = document.createElement("tbody"); + this.rows = newRows; + + // Populate the table. + this.expandTree(this.tree, 0); + + // Swap in the new rows. + this.treeElement.replaceChild(newRows, oldRows[0]); + } +} + +class TimelineView { + constructor() { + this.element = $("timeline"); + this.canvas = $("timeline-canvas"); + this.legend = $("timeline-legend"); + this.currentCode = $("timeline-currentCode"); + + this.canvas.onmousedown = this.onMouseDown.bind(this); + this.canvas.onmouseup = this.onMouseUp.bind(this); + this.canvas.onmousemove = this.onMouseMove.bind(this); + + this.selectionStart = null; + this.selectionEnd = null; + this.selecting = false; + + this.fontSize = 12; + this.imageOffset = Math.round(this.fontSize * 1.2); + this.functionTimelineHeight = 24; + this.functionTimelineTickHeight = 16; + + this.currentState = null; + } + + onMouseDown(e) { + this.selectionStart = + e.clientX - this.canvas.getBoundingClientRect().left; + this.selectionEnd = this.selectionStart + 1; + this.selecting = true; + } + + onMouseMove(e) { + if (this.selecting) { + this.selectionEnd = + e.clientX - this.canvas.getBoundingClientRect().left; + this.drawSelection(); + } + } + + onMouseUp(e) { + if (this.selectionStart !== null) { + let x = e.clientX - this.canvas.getBoundingClientRect().left; + if (Math.abs(x - this.selectionStart) < 10) { + this.selectionStart = null; + this.selectionEnd = null; + let ctx = this.canvas.getContext("2d"); + ctx.drawImage(this.buffer, 0, this.imageOffset); + } else { + this.selectionEnd = x; + this.drawSelection(); + } + let file = this.currentState.file; + if (file) { + let start = this.selectionStart === null ? 0 : this.selectionStart; + let end = this.selectionEnd === null ? Infinity : this.selectionEnd; + let firstTime = file.ticks[0].tm; + let lastTime = file.ticks[file.ticks.length - 1].tm; + + let width = this.buffer.width; + + start = (start / width) * (lastTime - firstTime) + firstTime; + end = (end / width) * (lastTime - firstTime) + firstTime; + + if (end < start) { + let temp = start; + start = end; + end = temp; + } + + main.setViewInterval(start, end); + } + } + this.selecting = false; + } + + drawSelection() { + let ctx = this.canvas.getContext("2d"); + + // Draw the timeline image. + ctx.drawImage(this.buffer, 0, this.imageOffset); + + // Draw the current interval highlight. + let left; + let right; + if (this.selectionStart !== null && this.selectionEnd !== null) { + ctx.fillStyle = "rgba(0, 0, 0, 0.3)"; + left = Math.min(this.selectionStart, this.selectionEnd); + right = Math.max(this.selectionStart, this.selectionEnd); + let height = this.buffer.height - this.functionTimelineHeight; + ctx.fillRect(0, this.imageOffset, left, height); + ctx.fillRect(right, this.imageOffset, this.buffer.width - right, height); + } else { + left = 0; + right = this.buffer.width; + } + + // Draw the scale text. + let file = this.currentState.file; + ctx.fillStyle = "white"; + ctx.fillRect(0, 0, this.canvas.width, this.imageOffset); + if (file && file.ticks.length > 0) { + let firstTime = file.ticks[0].tm; + let lastTime = file.ticks[file.ticks.length - 1].tm; + + let leftTime = + firstTime + left / this.canvas.width * (lastTime - firstTime); + let rightTime = + firstTime + right / this.canvas.width * (lastTime - firstTime); + + let leftText = (leftTime / 1000000).toFixed(3) + "s"; + let rightText = (rightTime / 1000000).toFixed(3) + "s"; + + ctx.textBaseline = 'top'; + ctx.font = this.fontSize + "px Arial"; + ctx.fillStyle = "black"; + + let leftWidth = ctx.measureText(leftText).width; + let rightWidth = ctx.measureText(rightText).width; + + let leftStart = left - leftWidth / 2; + let rightStart = right - rightWidth / 2; + + if (leftStart < 0) leftStart = 0; + if (rightStart + rightWidth > this.canvas.width) { + rightStart = this.canvas.width - rightWidth; + } + if (leftStart + leftWidth > rightStart) { + if (leftStart > this.canvas.width - (rightStart - rightWidth)) { + rightStart = leftStart + leftWidth; + + } else { + leftStart = rightStart - leftWidth; + } + } + + ctx.fillText(leftText, leftStart, 0); + ctx.fillText(rightText, rightStart, 0); + } + } + + render(newState) { + let oldState = this.currentState; + + if (!newState.file) { + this.element.style.display = "none"; + return; + } + + this.currentState = newState; + if (oldState) { + if (newState.timeLine.width === oldState.timeLine.width && + newState.timeLine.height === oldState.timeLine.height && + newState.file === oldState.file && + newState.currentCodeId === oldState.currentCodeId && + newState.start === oldState.start && + newState.end === oldState.end) { + // No change, nothing to do. + return; + } + } + + this.element.style.display = "inherit"; + + // Make sure the canvas has the right dimensions. + let width = this.currentState.timeLine.width; + let height = this.currentState.timeLine.height; + this.canvas.width = width; + this.canvas.height = height; + + // Make space for the selection text. + height -= this.imageOffset; + + let file = this.currentState.file; + if (!file) return; + + let currentCodeId = this.currentState.currentCodeId; + + let firstTime = file.ticks[0].tm; + let lastTime = file.ticks[file.ticks.length - 1].tm; + let start = Math.max(this.currentState.start, firstTime); + let end = Math.min(this.currentState.end, lastTime); + + this.selectionStart = (start - firstTime) / (lastTime - firstTime) * width; + this.selectionEnd = (end - firstTime) / (lastTime - firstTime) * width; + + let tickCount = file.ticks.length; + + let minBucketPixels = 10; + let minBucketSamples = 30; + let bucketCount = Math.min(width / minBucketPixels, + tickCount / minBucketSamples); + + let stackProcessor = new CategorySampler(file, bucketCount); + generateTree(file, 0, Infinity, stackProcessor); + let codeIdProcessor = new FunctionTimelineProcessor( + currentCodeId, + filterFromFilterId(this.currentState.callTree.attribution)); + generateTree(file, 0, Infinity, codeIdProcessor); + + let buffer = document.createElement("canvas"); + + buffer.width = width; + buffer.height = height; + + // Calculate the bar heights for each bucket. + let graphHeight = height - this.functionTimelineHeight; + let buckets = stackProcessor.buckets; + let bucketsGraph = []; + for (let i = 0; i < buckets.length; i++) { + let sum = 0; + let bucketData = []; + let total = buckets[i].total; + for (let j = 0; j < bucketDescriptors.length; j++) { + let desc = bucketDescriptors[j]; + for (let k = 0; k < desc.kinds.length; k++) { + sum += buckets[i][desc.kinds[k]]; + } + bucketData.push(Math.round(graphHeight * sum / total)); + } + bucketsGraph.push(bucketData); + } + + // Draw the category graph into the buffer. + let bucketWidth = width / bucketsGraph.length; + let ctx = buffer.getContext('2d'); + for (let i = 0; i < bucketsGraph.length - 1; i++) { + let bucketData = bucketsGraph[i]; + let nextBucketData = bucketsGraph[i + 1]; + for (let j = 0; j < bucketData.length; j++) { + let x1 = Math.round(i * bucketWidth); + let x2 = Math.round((i + 1) * bucketWidth); + ctx.beginPath(); + ctx.moveTo(x1, j && bucketData[j - 1]); + ctx.lineTo(x2, j && nextBucketData[j - 1]); + ctx.lineTo(x2, nextBucketData[j]); + ctx.lineTo(x1, bucketData[j]); + ctx.closePath(); + ctx.fillStyle = bucketDescriptors[j].color; + ctx.fill(); + } + } + + // Draw the function ticks. + let functionTimelineYOffset = graphHeight; + let functionTimelineTickHeight = this.functionTimelineTickHeight; + let functionTimelineHalfHeight = + Math.round(functionTimelineTickHeight / 2); + let timestampScaler = width / (lastTime - firstTime); + let timestampToX = (t) => Math.round((t - firstTime) * timestampScaler); + ctx.fillStyle = "white"; + ctx.fillRect( + 0, + functionTimelineYOffset, + buffer.width, + this.functionTimelineHeight); + for (let i = 0; i < codeIdProcessor.blocks.length; i++) { + let block = codeIdProcessor.blocks[i]; + let bucket = kindToBucketDescriptor[block.kind]; + ctx.fillStyle = bucket.color; + ctx.fillRect( + timestampToX(block.start), + functionTimelineYOffset, + Math.max(1, Math.round((block.end - block.start) * timestampScaler)), + block.topOfStack ? + functionTimelineTickHeight : functionTimelineHalfHeight); + } + ctx.strokeStyle = "black"; + ctx.lineWidth = "1"; + ctx.beginPath(); + ctx.moveTo(0, functionTimelineYOffset + 0.5); + ctx.lineTo(buffer.width, functionTimelineYOffset + 0.5); + ctx.stroke(); + ctx.strokeStyle = "rgba(0,0,0,0.2)"; + ctx.lineWidth = "1"; + ctx.beginPath(); + ctx.moveTo(0, functionTimelineYOffset + functionTimelineHalfHeight - 0.5); + ctx.lineTo(buffer.width, + functionTimelineYOffset + functionTimelineHalfHeight - 0.5); + ctx.stroke(); + + // Draw marks for optimizations and deoptimizations in the function + // timeline. + if (currentCodeId && currentCodeId >= 0 && + file.code[currentCodeId].func) { + let y = Math.round(functionTimelineYOffset + functionTimelineTickHeight + + (this.functionTimelineHeight - functionTimelineTickHeight) / 2); + let func = file.functions[file.code[currentCodeId].func]; + for (let i = 0; i < func.codes.length; i++) { + let code = file.code[func.codes[i]]; + if (code.kind === "Opt") { + if (code.deopt) { + // Draw deoptimization mark. + let x = timestampToX(code.deopt.tm); + ctx.lineWidth = 0.7; + ctx.strokeStyle = "red"; + ctx.beginPath(); + ctx.moveTo(x - 3, y - 3); + ctx.lineTo(x + 3, y + 3); + ctx.stroke(); + ctx.beginPath(); + ctx.moveTo(x - 3, y + 3); + ctx.lineTo(x + 3, y - 3); + ctx.stroke(); + } + // Draw optimization mark. + let x = timestampToX(code.tm); + ctx.lineWidth = 0.7; + ctx.strokeStyle = "blue"; + ctx.beginPath(); + ctx.moveTo(x - 3, y - 3); + ctx.lineTo(x, y); + ctx.stroke(); + ctx.beginPath(); + ctx.moveTo(x - 3, y + 3); + ctx.lineTo(x, y); + ctx.stroke(); + } else { + // Draw code creation mark. + let x = Math.round(timestampToX(code.tm)); + ctx.beginPath(); + ctx.fillStyle = "black"; + ctx.arc(x, y, 3, 0, 2 * Math.PI); + ctx.fill(); + } + } + } + + // Remember stuff for later. + this.buffer = buffer; + + // Draw the buffer. + this.drawSelection(); + + // (Re-)Populate the graph legend. + while (this.legend.cells.length > 0) { + this.legend.deleteCell(0); + } + let cell = this.legend.insertCell(-1); + cell.textContent = "Legend: "; + cell.style.padding = "1ex"; + for (let i = 0; i < bucketDescriptors.length; i++) { + let cell = this.legend.insertCell(-1); + cell.style.padding = "1ex"; + let desc = bucketDescriptors[i]; + let div = document.createElement("div"); + div.style.display = "inline-block"; + div.style.width = "0.6em"; + div.style.height = "1.2ex"; + div.style.backgroundColor = desc.color; + div.style.borderStyle = "solid"; + div.style.borderWidth = "1px"; + div.style.borderColor = "Black"; + cell.appendChild(div); + cell.appendChild(document.createTextNode(" " + desc.text)); + } + + while (this.currentCode.firstChild) { + this.currentCode.removeChild(this.currentCode.firstChild); + } + if (currentCodeId) { + let currentCode = file.code[currentCodeId]; + this.currentCode.appendChild(document.createTextNode(currentCode.name)); + } else { + this.currentCode.appendChild(document.createTextNode("")); + } + } +} + +class ModeBarView { + constructor() { + let modeBar = this.element = $("mode-bar"); + + function addMode(id, text, active) { + let div = document.createElement("div"); + div.classList = "mode-button" + (active ? " active-mode-button" : ""); + div.id = "mode-" + id; + div.textContent = text; + div.onclick = () => { + if (main.currentState.mode === id) return; + let old = $("mode-" + main.currentState.mode); + old.classList = "mode-button"; + div.classList = "mode-button active-mode-button"; + main.setMode(id); + }; + modeBar.appendChild(div); + } + + addMode("summary", "Summary", true); + addMode("bottom-up", "Bottom up"); + addMode("top-down", "Top down"); + addMode("function-list", "Functions"); + } + + render(newState) { + if (!newState.file) { + this.element.style.display = "none"; + return; + } + + this.element.style.display = "inherit"; + } +} + +class SummaryView { + constructor() { + this.element = $("summary"); + this.currentState = null; + } + + render(newState) { + let oldState = this.currentState; + + if (!newState.file || newState.mode !== "summary") { + this.element.style.display = "none"; + this.currentState = null; + return; + } + + this.currentState = newState; + if (oldState) { + if (newState.file === oldState.file && + newState.start === oldState.start && + newState.end === oldState.end) { + // No change, nothing to do. + return; + } + } + + this.element.style.display = "inherit"; + + while (this.element.firstChild) { + this.element.removeChild(this.element.firstChild); + } + + let stats = computeOptimizationStats( + this.currentState.file, newState.start, newState.end); + + let table = document.createElement("table"); + let rows = document.createElement("tbody"); + + function addRow(text, number, indent) { + let row = rows.insertRow(-1); + let textCell = row.insertCell(-1); + textCell.textContent = text; + let numberCell = row.insertCell(-1); + numberCell.textContent = number; + if (indent) { + textCell.style.textIndent = indent + "em"; + numberCell.style.textIndent = indent + "em"; + } + return row; + } + + function makeCollapsible(row, expander) { + expander.textContent = "\u25BE"; + let expandHandler = expander.onclick; + expander.onclick = () => { + let id = row.id; + let index = row.rowIndex + 1; + while (index < rows.rows.length && + rows.rows[index].id.startsWith(id)) { + rows.deleteRow(index); + } + expander.textContent = "\u25B8"; + expander.onclick = expandHandler; + } + } + + function expandDeoptInstances(row, expander, instances, indent, kind) { + let index = row.rowIndex; + for (let i = 0; i < instances.length; i++) { + let childRow = rows.insertRow(index + 1); + childRow.id = row.id + i + "/"; + + let deopt = instances[i].deopt; + + let textCell = childRow.insertCell(-1); + textCell.appendChild(document.createTextNode(deopt.posText)); + textCell.style.textIndent = indent + "em"; + let reasonCell = childRow.insertCell(-1); + reasonCell.appendChild( + document.createTextNode("Reason: " + deopt.reason)); + reasonCell.style.textIndent = indent + "em"; + } + makeCollapsible(row, expander); + } + + function expandDeoptFunctionList(row, expander, list, indent, kind) { + let index = row.rowIndex; + for (let i = 0; i < list.length; i++) { + let childRow = rows.insertRow(index + 1); + childRow.id = row.id + i + "/"; + + let textCell = childRow.insertCell(-1); + let expander = createTableExpander(indent); + textCell.appendChild(expander); + textCell.appendChild( + createFunctionNode(list[i].f.name, list[i].f.codes[0])); + + let numberCell = childRow.insertCell(-1); + numberCell.textContent = list[i].instances.length; + numberCell.style.textIndent = indent + "em"; + + expander.textContent = "\u25B8"; + expander.onclick = () => { + expandDeoptInstances( + childRow, expander, list[i].instances, indent + 1); + }; + } + makeCollapsible(row, expander); + } + + function expandOptimizedFunctionList(row, expander, list, indent, kind) { + let index = row.rowIndex; + for (let i = 0; i < list.length; i++) { + let childRow = rows.insertRow(index + 1); + childRow.id = row.id + i + "/"; + + let textCell = childRow.insertCell(-1); + textCell.appendChild( + createFunctionNode(list[i].f.name, list[i].f.codes[0])); + textCell.style.textIndent = indent + "em"; + + let numberCell = childRow.insertCell(-1); + numberCell.textContent = list[i].instances.length; + numberCell.style.textIndent = indent + "em"; + } + makeCollapsible(row, expander); + } + + function addExpandableRow(text, list, indent, kind) { + let row = rows.insertRow(-1); + + row.id = "opt-table/" + kind + "/"; + + let textCell = row.insertCell(-1); + let expander = createTableExpander(indent); + textCell.appendChild(expander); + textCell.appendChild(document.createTextNode(text)); + + let numberCell = row.insertCell(-1); + numberCell.textContent = list.count; + if (indent) { + numberCell.style.textIndent = indent + "em"; + } + + if (list.count > 0) { + expander.textContent = "\u25B8"; + if (kind === "opt") { + expander.onclick = () => { + expandOptimizedFunctionList( + row, expander, list.functions, indent + 1, kind); + }; + } else { + expander.onclick = () => { + expandDeoptFunctionList( + row, expander, list.functions, indent + 1, kind); + }; + } + } + return row; + } + + addRow("Total function count:", stats.functionCount); + addRow("Optimized function count:", stats.optimizedFunctionCount, 1); + addRow("Deoptimized function count:", stats.deoptimizedFunctionCount, 2); + + addExpandableRow("Optimization count:", stats.optimizations, 0, "opt"); + let deoptCount = stats.eagerDeoptimizations.count + + stats.softDeoptimizations.count + stats.lazyDeoptimizations.count; + addRow("Deoptimization count:", deoptCount); + addExpandableRow("Eager:", stats.eagerDeoptimizations, 1, "eager"); + addExpandableRow("Lazy:", stats.lazyDeoptimizations, 1, "lazy"); + addExpandableRow("Soft:", stats.softDeoptimizations, 1, "soft"); + + table.appendChild(rows); + this.element.appendChild(table); + } +} + +class HelpView { + constructor() { + this.element = $("help"); + } + + render(newState) { + this.element.style.display = newState.file ? "none" : "inherit"; + } +} diff --git a/deps/v8/tools/release/PRESUBMIT.py b/deps/v8/tools/release/PRESUBMIT.py new file mode 100644 index 00000000000000..dd831f6721eeb3 --- /dev/null +++ b/deps/v8/tools/release/PRESUBMIT.py @@ -0,0 +1,8 @@ +# Copyright 2017 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +def CheckChangeOnCommit(input_api, output_api): + tests = input_api.canned_checks.GetUnitTestsInDirectory( + input_api, output_api, '.', whitelist=['test_scripts.py$']) + return input_api.RunTests(tests) diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py index d1a3f48cf8c36d..da4cc7efea427a 100755 --- a/deps/v8/tools/release/auto_roll.py +++ b/deps/v8/tools/release/auto_roll.py @@ -20,7 +20,7 @@ https://v8-roll.appspot.com/ This only works with a Google account. -CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel""") +CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel;master.tryserver.chromium.android:android_optional_gpu_tests_rel""") class Preparation(Step): MESSAGE = "Preparation." diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py index 5c03236223779b..8ef77c25ab34be 100644 --- a/deps/v8/tools/release/common_includes.py +++ b/deps/v8/tools/release/common_includes.py @@ -611,7 +611,7 @@ def WaitForLGTM(self): def WaitForResolvingConflicts(self, patch_file): print("Applying the patch \"%s\" failed. Either type \"ABORT\", " "or resolve the conflicts, stage *all* touched files with " - "'git add', and type \"RESOLVED\"") + "'git add', and type \"RESOLVED\"" % (patch_file)) self.DieNoManualMode() answer = "" while answer != "RESOLVED": @@ -767,7 +767,7 @@ def RunStep(self): reviewer = self.ReadLine() self.GitUpload(reviewer, self._options.author, self._options.force_upload, bypass_hooks=self._options.bypass_upload_hooks, - cc=self._options.cc) + cc=self._options.cc, use_gerrit=not self._options.rietveld) def MakeStep(step_class=Step, number=0, state=None, config=None, @@ -820,6 +820,8 @@ def MakeOptions(self, args=None): help="File to write results summary to.") parser.add_argument("-r", "--reviewer", default="", help="The account name to be used for reviews.") + parser.add_argument("--rietveld", default=False, action="store_true", + help="Whether to use rietveld instead of gerrit.") parser.add_argument("-s", "--step", help="Specify the step where to start work. Default: 0.", default=0, type=int) diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py index b4c9de6d6cb338..e688ecb95353c4 100644 --- a/deps/v8/tools/release/git_recipes.py +++ b/deps/v8/tools/release/git_recipes.py @@ -206,7 +206,7 @@ def GitApplyPatch(self, patch_file, reverse=False, **kwargs): self.Git(MakeArgs(args), **kwargs) def GitUpload(self, reviewer="", author="", force=False, cq=False, - bypass_hooks=False, cc="", **kwargs): + bypass_hooks=False, cc="", use_gerrit=False, **kwargs): args = ["cl upload --send-mail"] if author: args += ["--email", Quoted(author)] @@ -220,6 +220,8 @@ def GitUpload(self, reviewer="", author="", force=False, cq=False, args.append("--bypass-hooks") if cc: args += ["--cc", Quoted(cc)] + if use_gerrit: + args += ["--gerrit"] # TODO(machenbach): Check output in forced mode. Verify that all required # base files were uploaded, if not retry. self.Git(MakeArgs(args), pipe=False, **kwargs) @@ -242,10 +244,6 @@ def GitCLLand(self, **kwargs): self.Git( "cl land -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs) - def GitCLAddComment(self, message, **kwargs): - args = ["cl", "comments", "-a", Quoted(message)] - self.Git(MakeArgs(args), **kwargs) - def GitDiff(self, loc1, loc2, **kwargs): return self.Git(MakeArgs(["diff", loc1, loc2]), **kwargs) diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py index bdc94ebd09164a..802409436e551e 100755 --- a/deps/v8/tools/release/merge_to_branch.py +++ b/deps/v8/tools/release/merge_to_branch.py @@ -166,17 +166,6 @@ def RunStep(self): TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE")) self.GitCommit(file_name=self.Config("COMMITMSG_FILE")) -class AddInformationalComment(Step): - MESSAGE = 'Show additional information.' - - def RunStep(self): - message = ("NOTE: This script will no longer automatically " - "update include/v8-version.h " - "and create a tag. This is done automatically by the autotag bot. " - "Please call the merge_to_branch.py with --help for more information.") - - self.GitCLAddComment(message) - class CommitRepository(Step): MESSAGE = "Commit to the repository." @@ -262,7 +251,6 @@ def _Steps(self): ApplyPatches, CommitLocal, UploadStep, - AddInformationalComment, CommitRepository, CleanUp, ] diff --git a/deps/v8/tools/release/mergeinfo.py b/deps/v8/tools/release/mergeinfo.py index 7f8b9cbaf4cca1..1e29ece90906bf 100755 --- a/deps/v8/tools/release/mergeinfo.py +++ b/deps/v8/tools/release/mergeinfo.py @@ -6,6 +6,7 @@ import argparse import os import sys +import re from search_related_commits import git_execute @@ -25,10 +26,9 @@ def describe_commit(git_working_dir, hash_to_search, one_line=False): def get_followup_commits(git_working_dir, hash_to_search): - return git_execute(git_working_dir, ['log', - '--grep=' + hash_to_search, - GIT_OPTION_HASH_ONLY, - 'master']).strip().splitlines() + cmd = ['log', '--grep=' + hash_to_search, GIT_OPTION_HASH_ONLY, + 'remotes/origin/master']; + return git_execute(git_working_dir, cmd).strip().splitlines() def get_merge_commits(git_working_dir, hash_to_search): merges = get_related_commits_not_on_master(git_working_dir, hash_to_search) @@ -45,7 +45,7 @@ def get_related_commits_not_on_master(git_working_dir, grep_command): GIT_OPTION_ONELINE, '--decorate', '--not', - 'master', + 'remotes/origin/master', GIT_OPTION_HASH_ONLY]) return commits.splitlines() @@ -57,12 +57,10 @@ def get_branches_for_commit(git_working_dir, hash_to_search): branches = branches.splitlines() return map(str.strip, branches) -def is_lkgr(git_working_dir, hash_to_search): - branches = get_branches_for_commit(git_working_dir, hash_to_search) +def is_lkgr(branches): return 'remotes/origin/lkgr' in branches -def get_first_canary(git_working_dir, hash_to_search): - branches = get_branches_for_commit(git_working_dir, hash_to_search) +def get_first_canary(branches): canaries = ([currentBranch for currentBranch in branches if currentBranch.startswith('remotes/origin/chromium/')]) canaries.sort() @@ -70,15 +68,25 @@ def get_first_canary(git_working_dir, hash_to_search): return 'No Canary coverage' return canaries[0].split('/')[-1] +def get_first_v8_version(branches): + version_re = re.compile("remotes/origin/[0-9]+\.[0-9]+\.[0-9]+") + versions = filter(lambda branch: version_re.match(branch), branches) + if len(versions) == 0: + return "--" + version = versions[0].split("/")[-1] + return version + def print_analysis(git_working_dir, hash_to_search): print '1.) Searching for "' + hash_to_search + '"' print '=====================ORIGINAL COMMIT START===================' print describe_commit(git_working_dir, hash_to_search) print '=====================ORIGINAL COMMIT END=====================' print '2.) General information:' - print 'Is LKGR: ' + str(is_lkgr(git_working_dir, hash_to_search)) - print 'Is on Canary: ' + ( - str(get_first_canary(git_working_dir, hash_to_search))) + branches = get_branches_for_commit(git_working_dir, hash_to_search) + print 'Is LKGR: ' + str(is_lkgr(branches)) + print 'Is on Canary: ' + str(get_first_canary(branches)) + print 'First V8 branch: ' + str(get_first_v8_version(branches)) + \ + ' (Might not be the rolled version)' print '3.) Found follow-up commits, reverts and ports:' followups = get_followup_commits(git_working_dir, hash_to_search) for followup in followups: diff --git a/deps/v8/tools/release/script_test.py b/deps/v8/tools/release/script_test.py index cbb2134f6d92f9..b9a17e97fa3baf 100755 --- a/deps/v8/tools/release/script_test.py +++ b/deps/v8/tools/release/script_test.py @@ -43,7 +43,6 @@ def Main(argv): alltests = map(unittest.TestLoader().loadTestsFromTestCase, [ test_scripts.ToplevelTest, test_scripts.ScriptTest, - test_scripts.SystemTest, ]) unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(alltests)) cov.stop() diff --git a/deps/v8/tools/release/test_mergeinfo.py b/deps/v8/tools/release/test_mergeinfo.py index d455fa23748011..f8619bb2fdbd5a 100755 --- a/deps/v8/tools/release/test_mergeinfo.py +++ b/deps/v8/tools/release/test_mergeinfo.py @@ -30,12 +30,19 @@ def _execute_git(self, git_args): raise Exception(err) return output + def _update_origin(self): + # Fetch from origin to get/update the origin/master branch + self._execute_git(['fetch', 'origin']) + def setUp(self): if path.exists(self.base_dir): shutil.rmtree(self.base_dir) check_call(["git", "init", self.base_dir]) + # Add fake remote with name 'origin' + self._execute_git(['remote', 'add', 'origin', self.base_dir]) + # Initial commit message = '''Initial commit''' @@ -66,8 +73,12 @@ def _get_commits(self): ["log", "--format=%H", "--reverse"]).splitlines() return commits + def _get_branches(self, hash): + return mergeinfo.get_branches_for_commit(self.base_dir, hash) + def _make_empty_commit(self, message): self._execute_git(["commit", "--allow-empty", "-m", message]) + self._update_origin() return self._get_commits()[-1] def testCanDescribeCommit(self): @@ -158,23 +169,41 @@ def testIsLkgr(self): self._execute_git(['branch', 'remotes/origin/lkgr']) hash_of_not_lkgr = self._make_empty_commit('This one is not yet lkgr') - self.assertTrue(mergeinfo.is_lkgr( - self.base_dir, hash_of_first_commit)) - self.assertFalse(mergeinfo.is_lkgr( - self.base_dir, hash_of_not_lkgr)) + branches = self._get_branches(hash_of_first_commit); + self.assertTrue(mergeinfo.is_lkgr(branches)) + branches = self._get_branches(hash_of_not_lkgr); + self.assertFalse(mergeinfo.is_lkgr(branches)) def testShowFirstCanary(self): commits = self._get_commits() hash_of_first_commit = commits[0] - self.assertEqual(mergeinfo.get_first_canary( - self.base_dir, hash_of_first_commit), 'No Canary coverage') + branches = self._get_branches(hash_of_first_commit); + self.assertEqual(mergeinfo.get_first_canary(branches), 'No Canary coverage') self._execute_git(['branch', 'remotes/origin/chromium/2345']) self._execute_git(['branch', 'remotes/origin/chromium/2346']) - self.assertEqual(mergeinfo.get_first_canary( - self.base_dir, hash_of_first_commit), '2345') + branches = self._get_branches(hash_of_first_commit); + self.assertEqual(mergeinfo.get_first_canary(branches), '2345') + + def testFirstV8Version(self): + commits = self._get_commits() + hash_of_first_commit = commits[0] + + self._execute_git(['branch', 'remotes/origin/chromium/2345']) + self._execute_git(['branch', 'remotes/origin/chromium/2346']) + branches = self._get_branches(hash_of_first_commit); + self.assertEqual(mergeinfo.get_first_v8_version(branches), '--') + + self._execute_git(['branch', 'remotes/origin/5.7.1']) + self._execute_git(['branch', 'remotes/origin/5.8.1']) + branches = self._get_branches(hash_of_first_commit); + self.assertEqual(mergeinfo.get_first_v8_version(branches), '5.7.1') + + self._execute_git(['branch', 'remotes/origin/5.6.1']) + branches = self._get_branches(hash_of_first_commit); + self.assertEqual(mergeinfo.get_first_v8_version(branches), '5.6.1') if __name__ == "__main__": unittest.main() diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py old mode 100644 new mode 100755 index 0cf1affca5b81b..bfc68dc818a587 --- a/deps/v8/tools/release/test_scripts.py +++ b/deps/v8/tools/release/test_scripts.py @@ -1037,7 +1037,7 @@ def CheckVersionCommit(): https://v8-roll.appspot.com/ This only works with a Google account. -CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel +CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel;master.tryserver.chromium.android:android_optional_gpu_tests_rel TBR=reviewer@chromium.org""" @@ -1271,7 +1271,7 @@ def VerifyLand(): Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""), RL("reviewer@chromium.org"), # V8 reviewer. Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" " - "--bypass-hooks --cc \"ulan@chromium.org\"", ""), + "--bypass-hooks --cc \"ulan@chromium.org\" --gerrit", ""), Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""), RL("LGTM"), # Enter LGTM for V8 CL. Cmd("git cl presubmit", "Presubmit successfull\n"), @@ -1653,8 +1653,7 @@ def VerifyLand(): Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""), RL("reviewer@chromium.org"), # V8 reviewer. Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" " - "--bypass-hooks --cc \"ulan@chromium.org\"", ""), - Cmd("git cl comments -a \"%s\"" % info_msg, ""), + "--bypass-hooks --cc \"ulan@chromium.org\" --gerrit", ""), Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""), RL("LGTM"), # Enter LGTM for V8 CL. Cmd("git cl presubmit", "Presubmit successfull\n"), @@ -1922,30 +1921,5 @@ def ResetVersion(major, minor, build, patch=0): } self.assertEquals(expected_json, json.loads(FileToText(json_output))) - - - -class SystemTest(unittest.TestCase): - def testReload(self): - options = ScriptsBase( - TEST_CONFIG, DEFAULT_SIDE_EFFECT_HANDLER, {}).MakeOptions([]) - step = MakeStep(step_class=PrepareChangeLog, number=0, state={}, config={}, - options=options, - side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER) - body = step.Reload( -"""------------------------------------------------------------------------ -r17997 | machenbach@chromium.org | 2013-11-22 11:04:04 +0100 (...) | 6 lines - -Prepare push to trunk. Now working on version 3.23.11. - -R=danno@chromium.org - -Review URL: https://codereview.chromium.org/83173002 - -------------------------------------------------------------------------""") - self.assertEquals( -"""Prepare push to trunk. Now working on version 3.23.11. - -R=danno@chromium.org - -Committed: https://code.google.com/p/v8/source/detail?r=17997""", body) +if __name__ == '__main__': + unittest.main() diff --git a/deps/v8/tools/release/test_update_node.py b/deps/v8/tools/release/test_update_node.py new file mode 100755 index 00000000000000..bff3d08c2f0612 --- /dev/null +++ b/deps/v8/tools/release/test_update_node.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# Copyright 2017 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import shutil +import subprocess +import sys +import tempfile +import unittest + +import update_node + +# Base paths. +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +TEST_DATA = os.path.join(BASE_DIR, 'testdata') + +# Expectations. +EXPECTED_GITIGNORE = """ +/testing/gtest/* +!/testing/gtest/include +/testing/gtest/include/* +!/testing/gtest/include/gtest +/testing/gtest/include/gtest/* +!/testing/gtest/include/gtest/gtest_prod.h +!/third_party/jinja2 +!/third_party/markupsafe +/unrelated +""" + +EXPECTED_GIT_DIFF = """ + create mode 100644 deps/v8/base/trace_event/common/common + rename deps/v8/baz/{delete_me => v8_new} (100%) + rename deps/v8/{delete_me => new/v8_new} (100%) + create mode 100644 deps/v8/third_party/jinja2/jinja2 + create mode 100644 deps/v8/third_party/markupsafe/markupsafe + create mode 100644 deps/v8/v8_new +""" + +ADDED_FILES = [ + 'v8_new', + 'new/v8_new', + 'baz/v8_new', + 'testing/gtest/gtest_new', + 'testing/gtest/new/gtest_new', + 'testing/gtest/baz/gtest_new', + 'third_party/jinja2/jinja2', + 'third_party/markupsafe/markupsafe' +] + +REMOVED_FILES = [ + 'delete_me', + 'baz/delete_me', + 'testing/gtest/delete_me', + 'testing/gtest/baz/delete_me', +] + +def gitify(path): + files = os.listdir(path) + subprocess.check_call(['git', 'init'], cwd=path) + subprocess.check_call(['git', 'add'] + files, cwd=path) + subprocess.check_call(['git', 'commit', '-m', 'Initial'], cwd=path) + + +class TestUpdateNode(unittest.TestCase): + def setUp(self): + self.workdir = tempfile.mkdtemp(prefix='tmp_test_node_') + + def tearDown(self): + shutil.rmtree(self.workdir) + + def testUpdate(self): + v8_cwd = os.path.join(self.workdir, 'v8') + node_cwd = os.path.join(self.workdir, 'node') + + # Set up V8 test fixture. + shutil.copytree(src=os.path.join(TEST_DATA, 'v8'), dst=v8_cwd) + gitify(v8_cwd) + for repository in update_node.SUB_REPOSITORIES: + gitify(os.path.join(v8_cwd, *repository)) + + # Set up node test fixture. + shutil.copytree(src=os.path.join(TEST_DATA, 'node'), dst=node_cwd) + gitify(os.path.join(node_cwd)) + + # Add a patch. + with open(os.path.join(v8_cwd, 'v8_foo'), 'w') as f: + f.write('zonk') + subprocess.check_call(['git', 'add', 'v8_foo'], cwd=v8_cwd) + + # Run update script. + update_node.Main([v8_cwd, node_cwd, "--commit", "--with-patch"]) + + # Check expectations. + with open(os.path.join(node_cwd, 'deps', 'v8', '.gitignore')) as f: + actual_gitignore = f.read() + self.assertEquals(EXPECTED_GITIGNORE.strip(), actual_gitignore.strip()) + for f in ADDED_FILES: + added_file = os.path.join(node_cwd, 'deps', 'v8', *f.split('/')) + self.assertTrue(os.path.exists(added_file)) + for f in REMOVED_FILES: + removed_file = os.path.join(node_cwd, 'deps', 'v8', *f.split('/')) + self.assertFalse(os.path.exists(removed_file)) + gitlog = subprocess.check_output( + ['git', 'diff', 'master', '--summary'], + cwd=node_cwd, + ) + self.assertEquals(EXPECTED_GIT_DIFF.strip(), gitlog.strip()) + + # Check patch. + gitlog = subprocess.check_output( + ['git', 'diff', 'master', '--cached', '--', 'deps/v8/v8_foo'], + cwd=node_cwd, + ) + self.assertIn('+zonk', gitlog.strip()) + +if __name__ == "__main__": + unittest.main() diff --git a/deps/v8/tools/release/testdata/v8/.gitignore b/deps/v8/tools/release/testdata/v8/.gitignore new file mode 100644 index 00000000000000..855286229f8dea --- /dev/null +++ b/deps/v8/tools/release/testdata/v8/.gitignore @@ -0,0 +1,4 @@ +/unrelated +/testing/gtest +/third_party/jinja2 +/third_party/markupsafe \ No newline at end of file diff --git a/deps/v8/tools/release/testdata/v8/base/trace_event/common/common b/deps/v8/tools/release/testdata/v8/base/trace_event/common/common new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/deps/v8/tools/release/testdata/v8/baz/v8_foo b/deps/v8/tools/release/testdata/v8/baz/v8_foo new file mode 100644 index 00000000000000..eb1ae458f8ee6e --- /dev/null +++ b/deps/v8/tools/release/testdata/v8/baz/v8_foo @@ -0,0 +1 @@ +... diff --git a/deps/v8/tools/release/testdata/v8/baz/v8_new b/deps/v8/tools/release/testdata/v8/baz/v8_new new file mode 100644 index 00000000000000..eb1ae458f8ee6e --- /dev/null +++ b/deps/v8/tools/release/testdata/v8/baz/v8_new @@ -0,0 +1 @@ +... diff --git a/deps/v8/tools/release/testdata/v8/new/v8_new b/deps/v8/tools/release/testdata/v8/new/v8_new new file mode 100644 index 00000000000000..eb1ae458f8ee6e --- /dev/null +++ b/deps/v8/tools/release/testdata/v8/new/v8_new @@ -0,0 +1 @@ +... diff --git a/deps/v8/tools/release/testdata/v8/v8_foo b/deps/v8/tools/release/testdata/v8/v8_foo new file mode 100644 index 00000000000000..eb1ae458f8ee6e --- /dev/null +++ b/deps/v8/tools/release/testdata/v8/v8_foo @@ -0,0 +1 @@ +... diff --git a/deps/v8/tools/release/testdata/v8/v8_new b/deps/v8/tools/release/testdata/v8/v8_new new file mode 100644 index 00000000000000..eb1ae458f8ee6e --- /dev/null +++ b/deps/v8/tools/release/testdata/v8/v8_new @@ -0,0 +1 @@ +... diff --git a/deps/v8/tools/release/update_node.py b/deps/v8/tools/release/update_node.py new file mode 100755 index 00000000000000..e05f71234da4e3 --- /dev/null +++ b/deps/v8/tools/release/update_node.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +# Copyright 2017 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import argparse +import os +import shutil +import subprocess +import sys + +TARGET_SUBDIR = os.path.join("deps", "v8") + +SUB_REPOSITORIES = [ ["base", "trace_event", "common"], + ["testing", "gtest"], + ["third_party", "jinja2"], + ["third_party", "markupsafe"] ] + +DELETE_FROM_GITIGNORE = [ "/base", + "/testing/gtest", + "/third_party/jinja2", + "/third_party/markupsafe" ] + +# Node.js requires only a single header file from gtest to build V8. +# Both jinja2 and markupsafe are required to generate part of the inspector. +ADD_TO_GITIGNORE = [ "/testing/gtest/*", + "!/testing/gtest/include", + "/testing/gtest/include/*", + "!/testing/gtest/include/gtest", + "/testing/gtest/include/gtest/*", + "!/testing/gtest/include/gtest/gtest_prod.h", + "!/third_party/jinja2", + "!/third_party/markupsafe" ] + +def RunGclient(path): + assert os.path.isdir(path) + print ">> Running gclient sync" + subprocess.check_call(["gclient", "sync", "--nohooks"], cwd=path) + +def UninitGit(path): + target = os.path.join(path, ".git") + if os.path.isdir(target): + print ">> Cleaning up %s" % path + shutil.rmtree(target) + +def CommitPatch(options): + """Makes a dummy commit for the changes in the index. + + On trybots, bot_updated applies the patch to the index. We commit it to make + the fake git clone fetch it into node.js. We can leave the commit, as + bot_update will ensure a clean state on each run. + """ + print ">> Comitting patch" + subprocess.check_call( + ["git", "commit", "--allow-empty", "-m", "placeholder-commit"], + cwd=options.v8_path, + ) + +def UpdateTarget(repository, options): + source = os.path.join(options.v8_path, *repository) + target = os.path.join(options.node_path, TARGET_SUBDIR, *repository) + print ">> Updating target directory %s" % target + print ">> from active branch at %s" % source + if not os.path.exists(target): + os.makedirs(target) + # Remove possible remnants of previous incomplete runs. + UninitGit(target) + + git_commands = [ + ["git", "init"], # initialize target repo + ["git", "remote", "add", "origin", source], # point to the source repo + ["git", "fetch", "origin", "HEAD"], # sync to the current branch + ["git", "reset", "--hard", "FETCH_HEAD"], # reset to the current branch + ["git", "clean", "-fd"], # delete removed files + ] + try: + for command in git_commands: + subprocess.check_call(command, cwd=target) + except: + raise + finally: + UninitGit(target) + +def UpdateGitIgnore(options): + file_name = os.path.join(options.node_path, TARGET_SUBDIR, ".gitignore") + assert os.path.isfile(file_name) + print ">> Updating .gitignore with lines" + with open(file_name) as gitignore: + content = gitignore.readlines() + content = [x.strip() for x in content] + for x in DELETE_FROM_GITIGNORE: + if x in content: + print "- %s" % x + content.remove(x) + for x in ADD_TO_GITIGNORE: + if x not in content: + print "+ %s" % x + content.append(x) + content.sort(key=lambda x: x[1:] if x.startswith("!") else x) + with open(file_name, "w") as gitignore: + for x in content: + gitignore.write("%s\n" % x) + +def CreateCommit(options): + print ">> Creating commit." + # Find git hash from source. + githash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"], + cwd=options.v8_path).strip() + # Create commit at target. + git_commands = [ + ["git", "checkout", "-b", "update_v8_to_%s" % githash], # new branch + ["git", "add", "."], # add files + ["git", "commit", "-m", "Update V8 to %s" % githash] # new commit + ] + for command in git_commands: + subprocess.check_call(command, cwd=options.node_path) + +def ParseOptions(args): + parser = argparse.ArgumentParser(description="Update V8 in Node.js") + parser.add_argument("v8_path", help="Path to V8 checkout") + parser.add_argument("node_path", help="Path to Node.js checkout") + parser.add_argument("--gclient", action="store_true", help="Run gclient sync") + parser.add_argument("--commit", action="store_true", help="Create commit") + parser.add_argument("--with-patch", action="store_true", + help="Apply also staged files") + options = parser.parse_args(args) + assert os.path.isdir(options.v8_path) + options.v8_path = os.path.abspath(options.v8_path) + assert os.path.isdir(options.node_path) + options.node_path = os.path.abspath(options.node_path) + return options + +def Main(args): + options = ParseOptions(args) + if options.gclient: + RunGclient(options.v8_path) + # Commit patch on trybots to main V8 repository. + if options.with_patch: + CommitPatch(options) + # Update main V8 repository. + UpdateTarget([""], options) + # Patch .gitignore before updating sub-repositories. + UpdateGitIgnore(options) + for repo in SUB_REPOSITORIES: + UpdateTarget(repo, options) + if options.commit: + CreateCommit(options) + +if __name__ == "__main__": + Main(sys.argv[1:]) diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py index 55e33f9a920218..0b1675b75e6ced 100755 --- a/deps/v8/tools/run-tests.py +++ b/deps/v8/tools/run-tests.py @@ -105,12 +105,12 @@ TIMEOUT_DEFAULT = 60 # Variants ordered by expected runtime (slowest first). -VARIANTS = ["ignition_staging", "default", "turbofan"] +VARIANTS = ["default", "noturbofan"] MORE_VARIANTS = [ "stress", - "turbofan_opt", - "ignition", + "noturbofan_stress", + "nooptimization", "asm_wasm", "wasm_traps", ] @@ -123,7 +123,7 @@ # Additional variants, run on all bots. "more": MORE_VARIANTS, # Additional variants, run on a subset of bots. - "extra": ["nocrankshaft"], + "extra": ["fullcode"], } DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination", @@ -263,9 +263,6 @@ def BuildOptions(): result.add_option("--download-data-only", help="Deprecated", default=False, action="store_true") - result.add_option("--enable-inspector", - help="Indicates a build with inspector support", - default=False, action="store_true") result.add_option("--extra-flags", help="Additional flags to pass to each test command", default="") @@ -323,6 +320,8 @@ def BuildOptions(): default=False, action="store_true") result.add_option("--json-test-results", help="Path to a file for storing json results.") + result.add_option("--flakiness-results", + help="Path to a file for storing flakiness json.") result.add_option("--rerun-failures-count", help=("Number of times to rerun each failing test case. " "Very slow tests will be rerun only once."), @@ -404,7 +403,7 @@ def SetupEnvironment(options): ) if options.asan: - asan_options = [symbolizer] + asan_options = [symbolizer, "allow_user_segv_handler=1"] if not utils.GuessOS() == 'macos': # LSAN is not available on mac. asan_options.append('detect_leaks=1') @@ -494,7 +493,6 @@ def ProcessOptions(options): options.arch = 'ia32' options.asan = build_config["is_asan"] options.dcheck_always_on = build_config["dcheck_always_on"] - options.enable_inspector = build_config["v8_enable_inspector"] options.mode = 'debug' if build_config["is_debug"] else 'release' options.msan = build_config["is_msan"] options.no_i18n = not build_config["v8_enable_i18n_support"] @@ -621,13 +619,6 @@ def CheckTestMode(name, option): if options.no_i18n: TEST_MAP["bot_default"].remove("intl") TEST_MAP["default"].remove("intl") - if not options.enable_inspector: - TEST_MAP["default"].remove("inspector") - TEST_MAP["bot_default"].remove("inspector") - TEST_MAP["optimize_for_size"].remove("inspector") - TEST_MAP["default"].remove("debugger") - TEST_MAP["bot_default"].remove("debugger") - TEST_MAP["optimize_for_size"].remove("debugger") return True @@ -878,6 +869,9 @@ def iter_seed_flags(): progress_indicator.Register(progress.JsonTestProgressIndicator( options.json_test_results, arch, MODES[mode]["execution_mode"], ctx.random_seed)) + if options.flakiness_results: + progress_indicator.Register(progress.FlakinessTestProgressIndicator( + options.flakiness_results)) run_networked = not options.no_network if not run_networked: diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py index 9e93d41cd46745..59669c6bbcacd0 100755 --- a/deps/v8/tools/run_perf.py +++ b/deps/v8/tools/run_perf.py @@ -732,7 +732,8 @@ def PreExecution(self): def PostExecution(self): perf = perf_control.PerfControl(self.device) perf.SetDefaultPerfMode() - self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR]) + self.device.RemovePath( + AndroidPlatform.DEVICE_DIR, force=True, recursive=True) def _PushFile(self, host_dir, file_name, target_rel=".", skip_if_missing=False): @@ -784,7 +785,7 @@ def _PushExecutable(self, shell_dir, target_dir, binary): ) self._PushFile( shell_dir, - "snapshot_blob_ignition.bin", + "icudtl.dat", target_dir, skip_if_missing=True, ) @@ -830,6 +831,7 @@ def _Run(self, runnable, count, no_patch=False): output = self.device.RunShellCommand( cmd, cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel), + check_return=True, timeout=runnable.timeout, retries=0, ) @@ -1039,7 +1041,8 @@ def Main(args): if options.outdir_no_patch: print "specify either binary-override-path or outdir-no-patch" return 1 - options.shell_dir = os.path.dirname(options.binary_override_path) + options.shell_dir = os.path.abspath( + os.path.dirname(options.binary_override_path)) default_binary_name = os.path.basename(options.binary_override_path) if options.outdir_no_patch: @@ -1048,6 +1051,17 @@ def Main(args): else: options.shell_dir_no_patch = None + if options.json_test_results: + options.json_test_results = os.path.abspath(options.json_test_results) + + if options.json_test_results_no_patch: + options.json_test_results_no_patch = os.path.abspath( + options.json_test_results_no_patch) + + # Ensure all arguments have absolute path before we start changing current + # directory. + args = map(os.path.abspath, args) + prev_aslr = None prev_cpu_gov = None platform = Platform.GetPlatform(options) @@ -1057,8 +1071,6 @@ def Main(args): with CustomMachineConfiguration(governor = options.cpu_governor, disable_aslr = options.noaslr) as conf: for path in args: - path = os.path.abspath(path) - if not os.path.exists(path): # pragma: no cover results.errors.append("Configuration file %s does not exist." % path) continue diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py index 94b892c34e1997..b2dc74e4d49083 100644 --- a/deps/v8/tools/testrunner/local/commands.py +++ b/deps/v8/tools/testrunner/local/commands.py @@ -50,7 +50,7 @@ def Win32SetErrorMode(mode): return prev_error_mode -def RunProcess(verbose, timeout, args, **rest): +def RunProcess(verbose, timeout, args, additional_env, **rest): if verbose: print "#", " ".join(args) popen_args = args prev_error_mode = SEM_INVALID_VALUE @@ -64,6 +64,7 @@ def RunProcess(verbose, timeout, args, **rest): Win32SetErrorMode(error_mode | prev_error_mode) env = os.environ.copy() + env.update(additional_env) # GTest shard information is read by the V8 tests runner. Make sure it # doesn't leak into the execution of gtests we're wrapping. Those might # otherwise apply a second level of sharding and as a result skip tests. @@ -126,6 +127,6 @@ def kill_process(process, timeout_result): ) -def Execute(args, verbose=False, timeout=None): +def Execute(args, verbose=False, timeout=None, env=None): args = [ c for c in args if c != "" ] - return RunProcess(verbose, timeout, args=args) + return RunProcess(verbose, timeout, args, env or {}) diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py index 6adfd09b91b61f..d5b519aadbdc50 100644 --- a/deps/v8/tools/testrunner/local/execution.py +++ b/deps/v8/tools/testrunner/local/execution.py @@ -49,11 +49,12 @@ class Instructions(object): - def __init__(self, command, test_id, timeout, verbose): + def __init__(self, command, test_id, timeout, verbose, env): self.command = command self.id = test_id self.timeout = timeout self.verbose = verbose + self.env = env # Structure that keeps global information per worker process. @@ -111,7 +112,7 @@ def _GetInstructions(test, context): # the like. if statusfile.IsSlow(test.outcomes or [statusfile.PASS]): timeout *= 2 - return Instructions(command, test.id, timeout, context.verbose) + return Instructions(command, test.id, timeout, context.verbose, test.env) class Job(object): @@ -178,7 +179,8 @@ def Run(self, process_context): return SetupProblem(e, self.test) start_time = time.time() - output = commands.Execute(instr.command, instr.verbose, instr.timeout) + output = commands.Execute(instr.command, instr.verbose, instr.timeout, + instr.env) self._rename_coverage_data(output, process_context.context) return (instr.id, output, time.time() - start_time) diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py index 33e27e154b31b2..6321cadece8b59 100644 --- a/deps/v8/tools/testrunner/local/progress.py +++ b/deps/v8/tools/testrunner/local/progress.py @@ -380,6 +380,58 @@ def HasRun(self, test, has_unexpected_output): }) +class FlakinessTestProgressIndicator(ProgressIndicator): + + def __init__(self, json_test_results): + self.json_test_results = json_test_results + self.results = {} + self.summary = { + "PASS": 0, + "FAIL": 0, + "CRASH": 0, + "TIMEOUT": 0, + } + self.seconds_since_epoch = time.time() + + def Done(self): + with open(self.json_test_results, "w") as f: + json.dump({ + "interrupted": False, + "num_failures_by_type": self.summary, + "path_delimiter": "/", + "seconds_since_epoch": self.seconds_since_epoch, + "tests": self.results, + "version": 3, + }, f) + + def HasRun(self, test, has_unexpected_output): + key = "/".join( + sorted(flag.lstrip("-") + for flag in self.runner.context.extra_flags + test.flags) + + ["test", test.GetLabel()], + ) + outcome = test.suite.GetOutcome(test) + assert outcome in ["PASS", "FAIL", "CRASH", "TIMEOUT"] + if test.run == 1: + # First run of this test. + expected_outcomes = ([ + expected + for expected in (test.outcomes or ["PASS"]) + if expected in ["PASS", "FAIL", "CRASH", "TIMEOUT"] + ] or ["PASS"]) + self.results[key] = { + "actual": outcome, + "expected": " ".join(expected_outcomes), + "times": [test.duration], + } + self.summary[outcome] = self.summary[outcome] + 1 + else: + # This is a rerun and a previous result exists. + result = self.results[key] + result["actual"] = "%s %s" % (result["actual"], outcome) + result["times"].append(test.duration) + + PROGRESS_INDICATORS = { 'verbose': VerboseProgressIndicator, 'dots': DotsProgressIndicator, diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py index 4d1c6a3c94b9b2..2ad00cff2ab154 100644 --- a/deps/v8/tools/testrunner/local/variants.py +++ b/deps/v8/tools/testrunner/local/variants.py @@ -8,12 +8,14 @@ "stress": [["--stress-opt", "--always-opt"]], "turbofan": [["--turbo"]], "turbofan_opt": [["--turbo", "--always-opt"]], - "nocrankshaft": [["--nocrankshaft"]], - "ignition": [["--ignition"]], - "ignition_staging": [["--ignition-staging"]], - "ignition_turbofan": [["--ignition-staging", "--turbo"]], - "asm_wasm": [["--validate-asm"]], - "wasm_traps": [["--wasm_guard_pages", "--invoke-weak-callbacks"]], + "noturbofan": [["--no-turbo"]], + "noturbofan_stress": [["--no-turbo", "--stress-opt", "--always-opt"]], + "fullcode": [["--nocrankshaft", "--no-turbo"]], + # No optimization actually means no profile guided optimization - + # %OptimizeFunctionOnNextCall still works. + "nooptimization": [["--nocrankshaft"]], + "asm_wasm": [["--validate-asm", "--fast-validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]], + "wasm_traps": [["--wasm_guard_pages", "--wasm_trap_handler", "--invoke-weak-callbacks"]], } # FAST_VARIANTS implies no --always-opt. @@ -21,14 +23,16 @@ "default": [[]], "stress": [["--stress-opt"]], "turbofan": [["--turbo"]], - "nocrankshaft": [["--nocrankshaft"]], - "ignition": [["--ignition"]], - "ignition_staging": [["--ignition-staging"]], - "ignition_turbofan": [["--ignition-staging", "--turbo"]], - "asm_wasm": [["--validate-asm"]], - "wasm_traps": [["--wasm_guard_pages", "--invoke-weak-callbacks"]], + "noturbofan": [["--no-turbo"]], + "noturbofan_stress": [["--no-turbo", "--stress-opt"]], + "fullcode": [["--nocrankshaft", "--no-turbo"]], + # No optimization actually means no profile guided optimization - + # %OptimizeFunctionOnNextCall still works. + "nooptimization": [["--nocrankshaft"]], + "asm_wasm": [["--validate-asm", "--fast-validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]], + "wasm_traps": [["--wasm_guard_pages", "--wasm_trap_handler", "--invoke-weak-callbacks"]], } ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt", - "nocrankshaft", "ignition", "ignition_staging", - "ignition_turbofan", "asm_wasm", "wasm_traps"]) + "noturbofan", "noturbofan_stress", "fullcode", + "nooptimization", "asm_wasm", "wasm_traps"]) diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py index 00722d768b3b44..37e3cb4ec2bd14 100644 --- a/deps/v8/tools/testrunner/objects/testcase.py +++ b/deps/v8/tools/testrunner/objects/testcase.py @@ -41,11 +41,13 @@ def __init__(self, suite, path, variant=None, flags=None, self.id = None # int, used to map result back to TestCase instance self.duration = None # assigned during execution self.run = 1 # The nth time this test is executed. + self.env = {} def CopyAddingFlags(self, variant, flags): copy = TestCase(self.suite, self.path, variant, self.flags + flags, self.override_shell) copy.outcomes = self.outcomes + copy.env = self.env return copy def PackTask(self): @@ -56,7 +58,7 @@ def PackTask(self): assert self.id is not None return [self.suitename(), self.path, self.variant, self.flags, self.override_shell, list(self.outcomes or []), - self.id] + self.id, self.env] @staticmethod def UnpackTask(task): @@ -66,6 +68,7 @@ def UnpackTask(task): test.outcomes = frozenset(task[5]) test.id = task[6] test.run = 1 + test.env = task[7] return test def SetSuiteObject(self, suites): diff --git a/deps/v8/tools/tick-processor.html b/deps/v8/tools/tick-processor.html index a785a6e3c86996..3cb4a0b2c362dd 100644 --- a/deps/v8/tools/tick-processor.html +++ b/deps/v8/tools/tick-processor.html @@ -86,7 +86,7 @@ stateFilter: null, callGraphSize: 5, ignoreUnknown: false, - separateIc: false, + separateIc: true, targetRootFS: '', nm: 'nm' }; diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js index be374c9b184040..58844c127ee24e 100644 --- a/deps/v8/tools/tickprocessor-driver.js +++ b/deps/v8/tools/tickprocessor-driver.js @@ -64,6 +64,9 @@ if (params.sourceMap) { var tickProcessor = new TickProcessor( new (entriesProviders[params.platform])(params.nm, params.targetRootFS), params.separateIc, + params.separateBytecodes, + params.separateBuiltins, + params.separateStubs, params.callGraphSize, params.ignoreUnknown, params.stateFilter, @@ -73,6 +76,7 @@ var tickProcessor = new TickProcessor( params.timedRange, params.pairwiseTimedRange, params.onlySummary, - params.runtimeTimerFilter); + params.runtimeTimerFilter, + params.preprocessJson); tickProcessor.processLogFile(params.logFileName); tickProcessor.printStatistics(); diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js index 51b5ae6fea2bb4..91b5436eb5432f 100644 --- a/deps/v8/tools/tickprocessor.js +++ b/deps/v8/tools/tickprocessor.js @@ -30,17 +30,31 @@ function inherits(childCtor, parentCtor) { }; -function V8Profile(separateIc) { +function V8Profile(separateIc, separateBytecodes, separateBuiltins, + separateStubs) { Profile.call(this); - if (!separateIc) { - this.skipThisFunction = function(name) { return V8Profile.IC_RE.test(name); }; + var regexps = []; + if (!separateIc) regexps.push(V8Profile.IC_RE); + if (!separateBytecodes) regexps.push(V8Profile.BYTECODES_RE); + if (!separateBuiltins) regexps.push(V8Profile.BUILTINS_RE); + if (!separateStubs) regexps.push(V8Profile.STUBS_RE); + if (regexps.length > 0) { + this.skipThisFunction = function(name) { + for (var i=0; i"; - view.divNode.innerHTML = newHtml; - } else { - var newHtml = - "
" + sourceText + "
"; - view.divNode.innerHTML = newHtml; + var codePre = document.createElement("pre"); + codePre.classList.add("prettyprint"); + view.divNode.innerHTML = ""; + view.divNode.appendChild(codePre); + if (sourceText != "") { + codePre.classList.add("linenums"); + codePre.textContent = sourceText; try { // Wrap in try to work when offline. view.PR.prettyPrint(); diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py index a294b0b4bc4624..d43291abe82709 100644 --- a/deps/v8/tools/v8heapconst.py +++ b/deps/v8/tools/v8heapconst.py @@ -1,133 +1,109 @@ -# Copyright 2013 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# This file is automatically generated from the V8 source and should not -# be modified manually, run 'make grokdump' instead to update this file. +# Copyright 2017 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can +# be found in the LICENSE file. # List of known V8 instance types. INSTANCE_TYPES = { 0: "INTERNALIZED_STRING_TYPE", 2: "EXTERNAL_INTERNALIZED_STRING_TYPE", - 4: "ONE_BYTE_INTERNALIZED_STRING_TYPE", - 6: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE", - 10: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE", - 18: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE", - 22: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE", - 26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE", + 8: "ONE_BYTE_INTERNALIZED_STRING_TYPE", + 10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE", + 18: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE", + 34: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE", + 42: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE", + 50: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE", 64: "STRING_TYPE", 65: "CONS_STRING_TYPE", 66: "EXTERNAL_STRING_TYPE", 67: "SLICED_STRING_TYPE", - 68: "ONE_BYTE_STRING_TYPE", - 69: "CONS_ONE_BYTE_STRING_TYPE", - 70: "EXTERNAL_ONE_BYTE_STRING_TYPE", - 71: "SLICED_ONE_BYTE_STRING_TYPE", - 74: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE", - 82: "SHORT_EXTERNAL_STRING_TYPE", - 86: "SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE", - 90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE", + 69: "THIN_STRING_TYPE", + 72: "ONE_BYTE_STRING_TYPE", + 73: "CONS_ONE_BYTE_STRING_TYPE", + 74: "EXTERNAL_ONE_BYTE_STRING_TYPE", + 75: "SLICED_ONE_BYTE_STRING_TYPE", + 77: "THIN_ONE_BYTE_STRING_TYPE", + 82: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE", + 98: "SHORT_EXTERNAL_STRING_TYPE", + 106: "SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE", + 114: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE", 128: "SYMBOL_TYPE", 129: "HEAP_NUMBER_TYPE", - 130: "SIMD128_VALUE_TYPE", - 131: "ODDBALL_TYPE", - 132: "MAP_TYPE", - 133: "CODE_TYPE", - 134: "MUTABLE_HEAP_NUMBER_TYPE", - 135: "FOREIGN_TYPE", - 136: "BYTE_ARRAY_TYPE", - 137: "BYTECODE_ARRAY_TYPE", - 138: "FREE_SPACE_TYPE", - 139: "FIXED_INT8_ARRAY_TYPE", - 140: "FIXED_UINT8_ARRAY_TYPE", - 141: "FIXED_INT16_ARRAY_TYPE", - 142: "FIXED_UINT16_ARRAY_TYPE", - 143: "FIXED_INT32_ARRAY_TYPE", - 144: "FIXED_UINT32_ARRAY_TYPE", - 145: "FIXED_FLOAT32_ARRAY_TYPE", - 146: "FIXED_FLOAT64_ARRAY_TYPE", - 147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE", - 148: "FIXED_DOUBLE_ARRAY_TYPE", - 149: "FILLER_TYPE", - 150: "ACCESSOR_INFO_TYPE", - 151: "ACCESSOR_PAIR_TYPE", - 152: "ACCESS_CHECK_INFO_TYPE", - 153: "INTERCEPTOR_INFO_TYPE", - 154: "CALL_HANDLER_INFO_TYPE", - 155: "FUNCTION_TEMPLATE_INFO_TYPE", - 156: "OBJECT_TEMPLATE_INFO_TYPE", - 157: "ALLOCATION_SITE_TYPE", - 158: "ALLOCATION_MEMENTO_TYPE", - 159: "SCRIPT_TYPE", - 160: "TYPE_FEEDBACK_INFO_TYPE", - 161: "ALIASED_ARGUMENTS_ENTRY_TYPE", - 162: "BOX_TYPE", - 163: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE", - 164: "PROMISE_REACTION_JOB_INFO_TYPE", - 165: "DEBUG_INFO_TYPE", - 166: "BREAK_POINT_INFO_TYPE", - 167: "PROTOTYPE_INFO_TYPE", - 168: "TUPLE2_TYPE", - 169: "TUPLE3_TYPE", - 170: "CONTEXT_EXTENSION_TYPE", - 171: "CONSTANT_ELEMENTS_PAIR_TYPE", - 172: "MODULE_TYPE", - 173: "MODULE_INFO_ENTRY_TYPE", - 174: "FIXED_ARRAY_TYPE", - 175: "TRANSITION_ARRAY_TYPE", - 176: "SHARED_FUNCTION_INFO_TYPE", - 177: "CELL_TYPE", - 178: "WEAK_CELL_TYPE", - 179: "PROPERTY_CELL_TYPE", - 180: "JS_PROXY_TYPE", - 181: "JS_GLOBAL_OBJECT_TYPE", - 182: "JS_GLOBAL_PROXY_TYPE", - 183: "JS_SPECIAL_API_OBJECT_TYPE", - 184: "JS_VALUE_TYPE", - 185: "JS_MESSAGE_OBJECT_TYPE", - 186: "JS_DATE_TYPE", - 187: "JS_API_OBJECT_TYPE", - 188: "JS_OBJECT_TYPE", - 189: "JS_ARGUMENTS_TYPE", - 190: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", - 191: "JS_GENERATOR_OBJECT_TYPE", - 192: "JS_MODULE_NAMESPACE_TYPE", - 193: "JS_ARRAY_TYPE", - 194: "JS_ARRAY_BUFFER_TYPE", - 195: "JS_TYPED_ARRAY_TYPE", - 196: "JS_DATA_VIEW_TYPE", - 197: "JS_SET_TYPE", - 198: "JS_MAP_TYPE", - 199: "JS_SET_ITERATOR_TYPE", - 200: "JS_MAP_ITERATOR_TYPE", - 201: "JS_WEAK_MAP_TYPE", - 202: "JS_WEAK_SET_TYPE", - 203: "JS_PROMISE_TYPE", - 204: "JS_REGEXP_TYPE", - 205: "JS_ERROR_TYPE", + 130: "ODDBALL_TYPE", + 131: "MAP_TYPE", + 132: "CODE_TYPE", + 133: "MUTABLE_HEAP_NUMBER_TYPE", + 134: "FOREIGN_TYPE", + 135: "BYTE_ARRAY_TYPE", + 136: "BYTECODE_ARRAY_TYPE", + 137: "FREE_SPACE_TYPE", + 138: "FIXED_INT8_ARRAY_TYPE", + 139: "FIXED_UINT8_ARRAY_TYPE", + 140: "FIXED_INT16_ARRAY_TYPE", + 141: "FIXED_UINT16_ARRAY_TYPE", + 142: "FIXED_INT32_ARRAY_TYPE", + 143: "FIXED_UINT32_ARRAY_TYPE", + 144: "FIXED_FLOAT32_ARRAY_TYPE", + 145: "FIXED_FLOAT64_ARRAY_TYPE", + 146: "FIXED_UINT8_CLAMPED_ARRAY_TYPE", + 147: "FIXED_DOUBLE_ARRAY_TYPE", + 148: "FILLER_TYPE", + 149: "ACCESSOR_INFO_TYPE", + 150: "ACCESSOR_PAIR_TYPE", + 151: "ACCESS_CHECK_INFO_TYPE", + 152: "INTERCEPTOR_INFO_TYPE", + 153: "CALL_HANDLER_INFO_TYPE", + 154: "FUNCTION_TEMPLATE_INFO_TYPE", + 155: "OBJECT_TEMPLATE_INFO_TYPE", + 156: "ALLOCATION_SITE_TYPE", + 157: "ALLOCATION_MEMENTO_TYPE", + 158: "SCRIPT_TYPE", + 159: "TYPE_FEEDBACK_INFO_TYPE", + 160: "ALIASED_ARGUMENTS_ENTRY_TYPE", + 161: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE", + 162: "PROMISE_REACTION_JOB_INFO_TYPE", + 163: "DEBUG_INFO_TYPE", + 164: "BREAK_POINT_INFO_TYPE", + 165: "PROTOTYPE_INFO_TYPE", + 166: "TUPLE2_TYPE", + 167: "TUPLE3_TYPE", + 168: "CONTEXT_EXTENSION_TYPE", + 169: "CONSTANT_ELEMENTS_PAIR_TYPE", + 170: "MODULE_TYPE", + 171: "MODULE_INFO_ENTRY_TYPE", + 172: "FIXED_ARRAY_TYPE", + 173: "TRANSITION_ARRAY_TYPE", + 174: "SHARED_FUNCTION_INFO_TYPE", + 175: "CELL_TYPE", + 176: "WEAK_CELL_TYPE", + 177: "PROPERTY_CELL_TYPE", + 178: "JS_PROXY_TYPE", + 179: "JS_GLOBAL_OBJECT_TYPE", + 180: "JS_GLOBAL_PROXY_TYPE", + 181: "JS_SPECIAL_API_OBJECT_TYPE", + 182: "JS_VALUE_TYPE", + 183: "JS_MESSAGE_OBJECT_TYPE", + 184: "JS_DATE_TYPE", + 185: "JS_API_OBJECT_TYPE", + 186: "JS_OBJECT_TYPE", + 187: "JS_ARGUMENTS_TYPE", + 188: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", + 189: "JS_GENERATOR_OBJECT_TYPE", + 190: "JS_MODULE_NAMESPACE_TYPE", + 191: "JS_ARRAY_TYPE", + 192: "JS_ARRAY_BUFFER_TYPE", + 193: "JS_TYPED_ARRAY_TYPE", + 194: "JS_DATA_VIEW_TYPE", + 195: "JS_SET_TYPE", + 196: "JS_MAP_TYPE", + 197: "JS_SET_ITERATOR_TYPE", + 198: "JS_MAP_ITERATOR_TYPE", + 199: "JS_WEAK_MAP_TYPE", + 200: "JS_WEAK_SET_TYPE", + 201: "JS_PROMISE_CAPABILITY_TYPE", + 202: "JS_PROMISE_TYPE", + 203: "JS_REGEXP_TYPE", + 204: "JS_ERROR_TYPE", + 205: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE", 206: "JS_STRING_ITERATOR_TYPE", 207: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE", 208: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE", @@ -170,185 +146,197 @@ # List of known V8 maps. KNOWN_MAPS = { - 0x84101: (138, "FreeSpaceMap"), - 0x8412d: (132, "MetaMap"), - 0x84159: (131, "NullMap"), - 0x84185: (174, "FixedArrayMap"), - 0x841b1: (4, "OneByteInternalizedStringMap"), - 0x841dd: (149, "OnePointerFillerMap"), - 0x84209: (149, "TwoPointerFillerMap"), - 0x84235: (131, "UninitializedMap"), - 0x84261: (131, "UndefinedMap"), - 0x8428d: (129, "HeapNumberMap"), - 0x842b9: (131, "TheHoleMap"), - 0x842e5: (131, "BooleanMap"), - 0x84311: (136, "ByteArrayMap"), - 0x8433d: (174, "FixedCOWArrayMap"), - 0x84369: (174, "HashTableMap"), - 0x84395: (128, "SymbolMap"), - 0x843c1: (68, "OneByteStringMap"), - 0x843ed: (174, "ScopeInfoMap"), - 0x84419: (176, "SharedFunctionInfoMap"), - 0x84445: (133, "CodeMap"), - 0x84471: (174, "FunctionContextMap"), - 0x8449d: (177, "CellMap"), - 0x844c9: (178, "WeakCellMap"), - 0x844f5: (179, "GlobalPropertyCellMap"), - 0x84521: (135, "ForeignMap"), - 0x8454d: (175, "TransitionArrayMap"), - 0x84579: (131, "NoInterceptorResultSentinelMap"), - 0x845a5: (131, "ArgumentsMarkerMap"), - 0x845d1: (174, "NativeContextMap"), - 0x845fd: (174, "ModuleContextMap"), - 0x84629: (174, "ScriptContextMap"), - 0x84655: (174, "BlockContextMap"), - 0x84681: (174, "CatchContextMap"), - 0x846ad: (174, "WithContextMap"), - 0x846d9: (148, "FixedDoubleArrayMap"), - 0x84705: (134, "MutableHeapNumberMap"), - 0x84731: (174, "OrderedHashTableMap"), - 0x8475d: (174, "SloppyArgumentsElementsMap"), - 0x84789: (185, "JSMessageObjectMap"), - 0x847b5: (137, "BytecodeArrayMap"), - 0x847e1: (174, "ModuleInfoMap"), - 0x8480d: (64, "StringMap"), - 0x84839: (69, "ConsOneByteStringMap"), - 0x84865: (65, "ConsStringMap"), - 0x84891: (67, "SlicedStringMap"), - 0x848bd: (71, "SlicedOneByteStringMap"), - 0x848e9: (66, "ExternalStringMap"), - 0x84915: (74, "ExternalStringWithOneByteDataMap"), - 0x84941: (70, "ExternalOneByteStringMap"), - 0x8496d: (82, "ShortExternalStringMap"), - 0x84999: (90, "ShortExternalStringWithOneByteDataMap"), - 0x849c5: (0, "InternalizedStringMap"), - 0x849f1: (2, "ExternalInternalizedStringMap"), - 0x84a1d: (10, "ExternalInternalizedStringWithOneByteDataMap"), - 0x84a49: (6, "ExternalOneByteInternalizedStringMap"), - 0x84a75: (18, "ShortExternalInternalizedStringMap"), - 0x84aa1: (26, "ShortExternalInternalizedStringWithOneByteDataMap"), - 0x84acd: (22, "ShortExternalOneByteInternalizedStringMap"), - 0x84af9: (86, "ShortExternalOneByteStringMap"), - 0x84b25: (130, "Float32x4Map"), - 0x84b51: (130, "Int32x4Map"), - 0x84b7d: (130, "Uint32x4Map"), - 0x84ba9: (130, "Bool32x4Map"), - 0x84bd5: (130, "Int16x8Map"), - 0x84c01: (130, "Uint16x8Map"), - 0x84c2d: (130, "Bool16x8Map"), - 0x84c59: (130, "Int8x16Map"), - 0x84c85: (130, "Uint8x16Map"), - 0x84cb1: (130, "Bool8x16Map"), - 0x84cdd: (131, "ExceptionMap"), - 0x84d09: (131, "TerminationExceptionMap"), - 0x84d35: (131, "OptimizedOutMap"), - 0x84d61: (131, "StaleRegisterMap"), - 0x84d8d: (174, "DebugEvaluateContextMap"), - 0x84db9: (174, "ScriptContextTableMap"), - 0x84de5: (174, "UnseededNumberDictionaryMap"), - 0x84e11: (188, "ExternalMap"), - 0x84e3d: (86, "NativeSourceStringMap"), - 0x84e69: (140, "FixedUint8ArrayMap"), - 0x84e95: (139, "FixedInt8ArrayMap"), - 0x84ec1: (142, "FixedUint16ArrayMap"), - 0x84eed: (141, "FixedInt16ArrayMap"), - 0x84f19: (144, "FixedUint32ArrayMap"), - 0x84f45: (143, "FixedInt32ArrayMap"), - 0x84f71: (145, "FixedFloat32ArrayMap"), - 0x84f9d: (146, "FixedFloat64ArrayMap"), - 0x84fc9: (147, "FixedUint8ClampedArrayMap"), - 0x84ff5: (159, "ScriptMap"), - 0x85021: (157, "AllocationSiteMap"), - 0x8504d: (158, "AllocationMementoMap"), - 0x85079: (150, "AccessorInfoMap"), - 0x850a5: (155, "FunctionTemplateInfoMap"), - 0x850d1: (168, "Tuple2Map"), - 0x850fd: (167, "PrototypeInfoMap"), - 0x85129: (151, "AccessorPairMap"), - 0x85155: (152, "AccessCheckInfoMap"), - 0x85181: (153, "InterceptorInfoMap"), - 0x851ad: (154, "CallHandlerInfoMap"), - 0x851d9: (156, "ObjectTemplateInfoMap"), - 0x85205: (160, "TypeFeedbackInfoMap"), - 0x85231: (161, "AliasedArgumentsEntryMap"), - 0x8525d: (162, "BoxMap"), - 0x85289: (163, "PromiseResolveThenableJobInfoMap"), - 0x852b5: (164, "PromiseReactionJobInfoMap"), - 0x852e1: (165, "DebugInfoMap"), - 0x8530d: (166, "BreakPointInfoMap"), - 0x85339: (169, "Tuple3Map"), - 0x85365: (170, "ContextExtensionMap"), - 0x85391: (171, "ConstantElementsPairMap"), - 0x853bd: (172, "ModuleMap"), - 0x853e9: (173, "ModuleInfoEntryMap"), + 0x02201: (137, "FreeSpaceMap"), + 0x02259: (131, "MetaMap"), + 0x022b1: (130, "NullMap"), + 0x02309: (172, "FixedArrayMap"), + 0x02361: (8, "OneByteInternalizedStringMap"), + 0x023b9: (148, "OnePointerFillerMap"), + 0x02411: (148, "TwoPointerFillerMap"), + 0x02469: (130, "UninitializedMap"), + 0x024c1: (130, "UndefinedMap"), + 0x02519: (129, "HeapNumberMap"), + 0x02571: (130, "TheHoleMap"), + 0x025c9: (130, "BooleanMap"), + 0x02621: (135, "ByteArrayMap"), + 0x02679: (172, "FixedCOWArrayMap"), + 0x026d1: (172, "HashTableMap"), + 0x02729: (128, "SymbolMap"), + 0x02781: (72, "OneByteStringMap"), + 0x027d9: (172, "ScopeInfoMap"), + 0x02831: (174, "SharedFunctionInfoMap"), + 0x02889: (132, "CodeMap"), + 0x028e1: (172, "FunctionContextMap"), + 0x02939: (175, "CellMap"), + 0x02991: (176, "WeakCellMap"), + 0x029e9: (177, "GlobalPropertyCellMap"), + 0x02a41: (134, "ForeignMap"), + 0x02a99: (173, "TransitionArrayMap"), + 0x02af1: (130, "ArgumentsMarkerMap"), + 0x02b49: (172, "NativeContextMap"), + 0x02ba1: (172, "ModuleContextMap"), + 0x02bf9: (172, "EvalContextMap"), + 0x02c51: (172, "ScriptContextMap"), + 0x02ca9: (172, "BlockContextMap"), + 0x02d01: (172, "CatchContextMap"), + 0x02d59: (172, "WithContextMap"), + 0x02db1: (147, "FixedDoubleArrayMap"), + 0x02e09: (133, "MutableHeapNumberMap"), + 0x02e61: (172, "OrderedHashTableMap"), + 0x02eb9: (172, "SloppyArgumentsElementsMap"), + 0x02f11: (183, "JSMessageObjectMap"), + 0x02f69: (136, "BytecodeArrayMap"), + 0x02fc1: (172, "ModuleInfoMap"), + 0x03019: (175, "NoClosuresCellMap"), + 0x03071: (175, "OneClosureCellMap"), + 0x030c9: (175, "ManyClosuresCellMap"), + 0x03121: (64, "StringMap"), + 0x03179: (73, "ConsOneByteStringMap"), + 0x031d1: (65, "ConsStringMap"), + 0x03229: (77, "ThinOneByteStringMap"), + 0x03281: (69, "ThinStringMap"), + 0x032d9: (67, "SlicedStringMap"), + 0x03331: (75, "SlicedOneByteStringMap"), + 0x03389: (66, "ExternalStringMap"), + 0x033e1: (82, "ExternalStringWithOneByteDataMap"), + 0x03439: (74, "ExternalOneByteStringMap"), + 0x03491: (98, "ShortExternalStringMap"), + 0x034e9: (114, "ShortExternalStringWithOneByteDataMap"), + 0x03541: (0, "InternalizedStringMap"), + 0x03599: (2, "ExternalInternalizedStringMap"), + 0x035f1: (18, "ExternalInternalizedStringWithOneByteDataMap"), + 0x03649: (10, "ExternalOneByteInternalizedStringMap"), + 0x036a1: (34, "ShortExternalInternalizedStringMap"), + 0x036f9: (50, "ShortExternalInternalizedStringWithOneByteDataMap"), + 0x03751: (42, "ShortExternalOneByteInternalizedStringMap"), + 0x037a9: (106, "ShortExternalOneByteStringMap"), + 0x03801: (172, "FeedbackVectorMap"), + 0x03859: (130, "ExceptionMap"), + 0x038b1: (130, "TerminationExceptionMap"), + 0x03909: (130, "OptimizedOutMap"), + 0x03961: (130, "StaleRegisterMap"), + 0x039b9: (172, "DebugEvaluateContextMap"), + 0x03a11: (172, "ScriptContextTableMap"), + 0x03a69: (172, "UnseededNumberDictionaryMap"), + 0x03ac1: (186, "ExternalMap"), + 0x03b19: (106, "NativeSourceStringMap"), + 0x03b71: (139, "FixedUint8ArrayMap"), + 0x03bc9: (138, "FixedInt8ArrayMap"), + 0x03c21: (141, "FixedUint16ArrayMap"), + 0x03c79: (140, "FixedInt16ArrayMap"), + 0x03cd1: (143, "FixedUint32ArrayMap"), + 0x03d29: (142, "FixedInt32ArrayMap"), + 0x03d81: (144, "FixedFloat32ArrayMap"), + 0x03dd9: (145, "FixedFloat64ArrayMap"), + 0x03e31: (146, "FixedUint8ClampedArrayMap"), + 0x03e89: (158, "ScriptMap"), + 0x03ee1: (152, "InterceptorInfoMap"), + 0x03f39: (201, "JSPromiseCapabilityMap"), + 0x03f91: (149, "AccessorInfoMap"), + 0x03fe9: (150, "AccessorPairMap"), + 0x04041: (151, "AccessCheckInfoMap"), + 0x04099: (153, "CallHandlerInfoMap"), + 0x040f1: (154, "FunctionTemplateInfoMap"), + 0x04149: (155, "ObjectTemplateInfoMap"), + 0x041a1: (156, "AllocationSiteMap"), + 0x041f9: (157, "AllocationMementoMap"), + 0x04251: (159, "TypeFeedbackInfoMap"), + 0x042a9: (160, "AliasedArgumentsEntryMap"), + 0x04301: (161, "PromiseResolveThenableJobInfoMap"), + 0x04359: (162, "PromiseReactionJobInfoMap"), + 0x043b1: (163, "DebugInfoMap"), + 0x04409: (164, "BreakPointInfoMap"), + 0x04461: (165, "PrototypeInfoMap"), + 0x044b9: (166, "Tuple2Map"), + 0x04511: (167, "Tuple3Map"), + 0x04569: (168, "ContextExtensionMap"), + 0x045c1: (169, "ConstantElementsPairMap"), + 0x04619: (170, "ModuleMap"), + 0x04671: (171, "ModuleInfoEntryMap"), } # List of known V8 objects. KNOWN_OBJECTS = { - ("OLD_SPACE", 0x84101): "NullValue", - ("OLD_SPACE", 0x8411d): "EmptyDescriptorArray", - ("OLD_SPACE", 0x84125): "EmptyFixedArray", - ("OLD_SPACE", 0x84151): "UninitializedValue", - ("OLD_SPACE", 0x841a1): "UndefinedValue", - ("OLD_SPACE", 0x841bd): "NanValue", - ("OLD_SPACE", 0x841cd): "TheHoleValue", - ("OLD_SPACE", 0x841fd): "HoleNanValue", - ("OLD_SPACE", 0x84209): "TrueValue", - ("OLD_SPACE", 0x84249): "FalseValue", - ("OLD_SPACE", 0x84279): "empty_string", - ("OLD_SPACE", 0x84285): "NoInterceptorResultSentinel", - ("OLD_SPACE", 0x842cd): "ArgumentsMarker", - ("OLD_SPACE", 0x84305): "EmptyByteArray", - ("OLD_SPACE", 0x8430d): "EmptyWeakCell", - ("OLD_SPACE", 0x8431d): "InfinityValue", - ("OLD_SPACE", 0x8432d): "MinusZeroValue", - ("OLD_SPACE", 0x8433d): "MinusInfinityValue", - ("OLD_SPACE", 0x85939): "EmptyLiteralsArray", - ("OLD_SPACE", 0x85945): "EmptyTypeFeedbackVector", - ("OLD_SPACE", 0x85955): "EmptyScopeInfo", - ("OLD_SPACE", 0x8595d): "Exception", - ("OLD_SPACE", 0x85991): "TerminationException", - ("OLD_SPACE", 0x859d1): "OptimizedOut", - ("OLD_SPACE", 0x85a09): "StaleRegister", - ("OLD_SPACE", 0x85a41): "EmptyFixedUint8Array", - ("OLD_SPACE", 0x85a51): "EmptyFixedInt8Array", - ("OLD_SPACE", 0x85a61): "EmptyFixedUint16Array", - ("OLD_SPACE", 0x85a71): "EmptyFixedInt16Array", - ("OLD_SPACE", 0x85a81): "EmptyFixedUint32Array", - ("OLD_SPACE", 0x85a91): "EmptyFixedInt32Array", - ("OLD_SPACE", 0x85aa1): "EmptyFixedFloat32Array", - ("OLD_SPACE", 0x85ab1): "EmptyFixedFloat64Array", - ("OLD_SPACE", 0x85ac1): "EmptyFixedUint8ClampedArray", - ("OLD_SPACE", 0x85ad1): "EmptyScript", - ("OLD_SPACE", 0x85b11): "UndefinedCell", - ("OLD_SPACE", 0x85b19): "EmptySloppyArgumentsElements", - ("OLD_SPACE", 0x85b29): "EmptySlowElementDictionary", - ("OLD_SPACE", 0x85b75): "DummyVector", - ("OLD_SPACE", 0x85bb9): "EmptyPropertyCell", - ("OLD_SPACE", 0x85bc9): "ArrayProtector", - ("OLD_SPACE", 0x85bd9): "IsConcatSpreadableProtector", - ("OLD_SPACE", 0x85be1): "HasInstanceProtector", - ("OLD_SPACE", 0x85bf1): "SpeciesProtector", - ("OLD_SPACE", 0x85bf9): "StringLengthProtector", - ("OLD_SPACE", 0x85c09): "FastArrayIterationProtector", - ("OLD_SPACE", 0x85c11): "ArrayIteratorProtector", - ("OLD_SPACE", 0x85c19): "ArrayBufferNeuteringProtector", - ("OLD_SPACE", 0x85c29): "NumberStringCache", - ("OLD_SPACE", 0x86431): "SingleCharacterStringCache", - ("OLD_SPACE", 0x86859): "StringSplitCache", - ("OLD_SPACE", 0x86c61): "RegExpMultipleCache", - ("OLD_SPACE", 0x87069): "NativesSourceCache", - ("OLD_SPACE", 0x871d1): "ExperimentalNativesSourceCache", - ("OLD_SPACE", 0x871ed): "ExtraNativesSourceCache", - ("OLD_SPACE", 0x87209): "ExperimentalExtraNativesSourceCache", - ("OLD_SPACE", 0x87215): "EmptyPropertiesDictionary", - ("OLD_SPACE", 0x87261): "ScriptList", - ("OLD_SPACE", 0x9ab99): "CodeStubs", - ("OLD_SPACE", 0xa2bd5): "WeakObjectToCodeTable", - ("OLD_SPACE", 0xa2ce9): "WeakNewSpaceObjectToCodeList", - ("OLD_SPACE", 0xa2d31): "NoScriptSharedFunctionInfos", - ("OLD_SPACE", 0xb26e9): "MessageListeners", - ("OLD_SPACE", 0xb6d75): "StringTable", - ("CODE_SPACE", 0x1aa01): "JsConstructEntryCode", - ("CODE_SPACE", 0x29ba1): "JsEntryCode", + ("OLD_SPACE", 0x02201): "NullValue", + ("OLD_SPACE", 0x02231): "EmptyDescriptorArray", + ("OLD_SPACE", 0x02241): "EmptyFixedArray", + ("OLD_SPACE", 0x02291): "UninitializedValue", + ("OLD_SPACE", 0x02311): "UndefinedValue", + ("OLD_SPACE", 0x02341): "NanValue", + ("OLD_SPACE", 0x02351): "TheHoleValue", + ("OLD_SPACE", 0x023a1): "HoleNanValue", + ("OLD_SPACE", 0x023b1): "TrueValue", + ("OLD_SPACE", 0x02421): "FalseValue", + ("OLD_SPACE", 0x02471): "empty_string", + ("OLD_SPACE", 0x02489): "ArgumentsMarker", + ("OLD_SPACE", 0x024e1): "EmptyByteArray", + ("OLD_SPACE", 0x024f1): "EmptyWeakCell", + ("OLD_SPACE", 0x02509): "InfinityValue", + ("OLD_SPACE", 0x02519): "MinusZeroValue", + ("OLD_SPACE", 0x02529): "MinusInfinityValue", + ("OLD_SPACE", 0x04979): "EmptyScopeInfo", + ("OLD_SPACE", 0x04989): "Exception", + ("OLD_SPACE", 0x049e1): "TerminationException", + ("OLD_SPACE", 0x04a41): "OptimizedOut", + ("OLD_SPACE", 0x04a99): "StaleRegister", + ("OLD_SPACE", 0x04af1): "EmptyFixedUint8Array", + ("OLD_SPACE", 0x04b11): "EmptyFixedInt8Array", + ("OLD_SPACE", 0x04b31): "EmptyFixedUint16Array", + ("OLD_SPACE", 0x04b51): "EmptyFixedInt16Array", + ("OLD_SPACE", 0x04b71): "EmptyFixedUint32Array", + ("OLD_SPACE", 0x04b91): "EmptyFixedInt32Array", + ("OLD_SPACE", 0x04bb1): "EmptyFixedFloat32Array", + ("OLD_SPACE", 0x04bd1): "EmptyFixedFloat64Array", + ("OLD_SPACE", 0x04bf1): "EmptyFixedUint8ClampedArray", + ("OLD_SPACE", 0x04c11): "EmptyScript", + ("OLD_SPACE", 0x04c99): "UndefinedCell", + ("OLD_SPACE", 0x04ca9): "EmptySloppyArgumentsElements", + ("OLD_SPACE", 0x04cc9): "EmptySlowElementDictionary", + ("OLD_SPACE", 0x04d19): "EmptyPropertyCell", + ("OLD_SPACE", 0x04d39): "ArrayProtector", + ("OLD_SPACE", 0x04d59): "IsConcatSpreadableProtector", + ("OLD_SPACE", 0x04d69): "SpeciesProtector", + ("OLD_SPACE", 0x04d79): "StringLengthProtector", + ("OLD_SPACE", 0x04d99): "FastArrayIterationProtector", + ("OLD_SPACE", 0x04da9): "ArrayIteratorProtector", + ("OLD_SPACE", 0x04dc9): "ArrayBufferNeuteringProtector", + ("OLD_SPACE", 0x04de9): "NumberStringCache", + ("OLD_SPACE", 0x05df9): "SingleCharacterStringCache", + ("OLD_SPACE", 0x06669): "StringSplitCache", + ("OLD_SPACE", 0x06e79): "RegExpMultipleCache", + ("OLD_SPACE", 0x07689): "NativesSourceCache", + ("OLD_SPACE", 0x07931): "ExtraNativesSourceCache", + ("OLD_SPACE", 0x07969): "ExperimentalExtraNativesSourceCache", + ("OLD_SPACE", 0x07981): "EmptyPropertiesDictionary", + ("OLD_SPACE", 0x079d1): "ScriptList", + ("OLD_SPACE", 0x22019): "CodeStubs", + ("OLD_SPACE", 0x2f199): "WeakObjectToCodeTable", + ("OLD_SPACE", 0x2f3c1): "WeakNewSpaceObjectToCodeList", + ("OLD_SPACE", 0x2f451): "NoScriptSharedFunctionInfos", + ("OLD_SPACE", 0x4abd9): "MessageListeners", + ("OLD_SPACE", 0x4abf9): "NoOpInterceptorInfo", + ("OLD_SPACE", 0x531d1): "StringTable", + ("CODE_SPACE", 0x2cde1): "JsEntryCode", + ("CODE_SPACE", 0x31241): "JsConstructEntryCode", } + +# List of known V8 Frame Markers. +FRAME_MARKERS = ( + "ENTRY", + "ENTRY_CONSTRUCT", + "EXIT", + "JAVA_SCRIPT", + "OPTIMIZED", + "WASM_COMPILED", + "WASM_TO_JS", + "JS_TO_WASM", + "WASM_INTERPRETER_ENTRY", + "INTERPRETED", + "STUB", + "STUB_FAILURE_TRAMPOLINE", + "INTERNAL", + "CONSTRUCT", + "ARGUMENTS_ADAPTOR", + "BUILTIN", + "BUILTIN_EXIT", +) diff --git a/deps/v8/tools/verify_source_deps.py b/deps/v8/tools/verify_source_deps.py index a3fdb2ec7cf0be..6f804040cc6499 100755 --- a/deps/v8/tools/verify_source_deps.py +++ b/deps/v8/tools/verify_source_deps.py @@ -53,6 +53,7 @@ GYP_UNSUPPORTED_FEATURES = [ 'gcmole', + 'setup-isolate-deserialize.cc', ] GN_FILES = [ diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt index 062a517c634f63..0c3c0d7feb1ff8 100644 --- a/deps/v8/tools/whitespace.txt +++ b/deps/v8/tools/whitespace.txt @@ -6,5 +6,6 @@ A Smi balks into a war and says: "I'm so deoptimized today!" The doubles heard this and started to unbox. The Smi looked at them when a crazy v8-autoroll account showed up...... -The autoroller bought a round of Himbeerbrause. Suddenly ...... +The autoroller bought a round of Himbeerbrause. Suddenly ..... +The bartender starts to shake the bottles..... . From 613c81e408de92c707efc1b1e9a57d0abda62343 Mon Sep 17 00:00:00 2001 From: Michael Dawson Date: Mon, 9 May 2016 18:44:42 -0400 Subject: [PATCH 41/64] deps: limit regress/regress-crbug-514081 v8 test regress/regress-crbug-514081 allocates a 2G block of memory and if there are multiple variants running at the same time this can lead to crashes, OOM kills or the OS failing to allocate memory. This patch limits us to running a single variant of the test Fixes: https://github.com/nodejs/node/issues/6340 PR-URL: https://github.com/nodejs/node/pull/6678 Reviewed-By: Ben Noordhuis Reviewed-By: James M Snell Reviewed-By: Fedor Indutny --- deps/v8/test/mjsunit/mjsunit.status | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status index bd5307de03a8de..66ecfcfcb4b9de 100644 --- a/deps/v8/test/mjsunit/mjsunit.status +++ b/deps/v8/test/mjsunit/mjsunit.status @@ -692,4 +692,13 @@ 'whitespaces': [SKIP], }], # variant == wasm_traps +############################################################################## +# This test allocates a 2G block of memory and if there are multiple +# varients this leads kills by the OOM killer, crashes or messages +# indicating the OS cannot allocate memory, exclude for Node.js runs +# re-evalute when we move up to v8 5.1 +[ALWAYS, { +'regress/regress-crbug-514081': [PASS, NO_VARIANTS], +}], # ALWAYS + ] From fae03e6e5b6b0b94f6abe64e1e96f05cee49624f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Tue, 6 Jun 2017 10:33:45 +0200 Subject: [PATCH 42/64] deps: run memory hungry V8 test in exclusive mode es6/typedarray-construct-offset-not-smi allocates a 2G block of memory and if there are multiple variants running at the same time this can lead to crashes, OOM kills or the OS failing to allocate memory. This patch limits us to running a single variant of the test. Refs: https://github.com/nodejs/node/pull/6678 PR-URL: https://github.com/nodejs/node/pull/13263 Reviewed-By: Gibson Fahnestock Reviewed-By: Ben Noordhuis Reviewed-By: Franziska Hinkelmann Reviewed-By: Myles Borins --- deps/v8/test/mjsunit/mjsunit.status | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status index 66ecfcfcb4b9de..c8c3c4e9273270 100644 --- a/deps/v8/test/mjsunit/mjsunit.status +++ b/deps/v8/test/mjsunit/mjsunit.status @@ -148,7 +148,7 @@ # Slow tests. 'copy-on-write-assert': [PASS, SLOW], 'es6/tail-call-megatest*': [PASS, SLOW, FAST_VARIANTS, ['tsan', SKIP]], - 'es6/typedarray-construct-offset-not-smi': [PASS, SLOW], + 'es6/typedarray-construct-offset-not-smi': [PASS, SLOW, NO_VARIANTS], 'harmony/regexp-property-script-extensions': [PASS, SLOW], 'numops-fuzz-part*': [PASS, ['mode == debug', SLOW]], 'readonly': [PASS, SLOW], From ad928c070b3fe9b8a8183a10f5d9b5ca4248b37f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Mon, 22 May 2017 10:40:57 +0200 Subject: [PATCH 43/64] deps: add missing include to V8 i18n.cc This is required for ICU 59.1. PR-URL: https://github.com/nodejs/node/pull/13263 Reviewed-By: Gibson Fahnestock Reviewed-By: Ben Noordhuis Reviewed-By: Franziska Hinkelmann Reviewed-By: Myles Borins --- deps/v8/src/i18n.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc index 79a70daf623cdc..d96fd45b2aa8af 100644 --- a/deps/v8/src/i18n.cc +++ b/deps/v8/src/i18n.cc @@ -32,6 +32,7 @@ #include "unicode/ucol.h" #include "unicode/ucurr.h" #include "unicode/unum.h" +#include "unicode/ustring.h" #include "unicode/uvernum.h" #include "unicode/uversion.h" From 5fb7a0bceb50d0a347d82a4e4d7f96bf8d35f9a4 Mon Sep 17 00:00:00 2001 From: Bartosz Sosnowski Date: Tue, 23 May 2017 18:25:03 +0200 Subject: [PATCH 44/64] deps: fix addons compilation with VS2013 VS2013 does not support defaulting move constructor and assignment operator. This adds explicit definitions of those methods for two classes. This fix is required because we still support building addons with VS2013 and the incompatibility is in v8.h. Fixes: https://github.com/nodejs/node-v8/issues/4 PR-URL: https://github.com/nodejs/node/pull/13263 Reviewed-By: Gibson Fahnestock Reviewed-By: Ben Noordhuis Reviewed-By: Franziska Hinkelmann Reviewed-By: Myles Borins --- deps/v8/include/v8.h | 12 ++++++++---- deps/v8/src/api.cc | 15 +++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 1dd4ace540dd24..ea4119abed29de 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -4025,10 +4025,12 @@ class V8_EXPORT WasmCompiledModule : public Object { // supports move semantics, and does not support copy semantics. class TransferrableModule final { public: - TransferrableModule(TransferrableModule&& src) = default; + TransferrableModule(TransferrableModule&& src) + : compiled_code(std::move(src.compiled_code)), + wire_bytes(std::move(src.wire_bytes)) {} TransferrableModule(const TransferrableModule& src) = delete; - TransferrableModule& operator=(TransferrableModule&& src) = default; + TransferrableModule& operator=(TransferrableModule&& src); TransferrableModule& operator=(const TransferrableModule& src) = delete; private: @@ -4101,9 +4103,11 @@ class V8_EXPORT WasmModuleObjectBuilder final { // Disable copy semantics *in this implementation*. We can choose to // relax this, albeit it's not clear why. WasmModuleObjectBuilder(const WasmModuleObjectBuilder&) = delete; - WasmModuleObjectBuilder(WasmModuleObjectBuilder&&) = default; + WasmModuleObjectBuilder(WasmModuleObjectBuilder&& src) + : received_buffers_(std::move(src.received_buffers_)), + total_size_(src.total_size_) {} WasmModuleObjectBuilder& operator=(const WasmModuleObjectBuilder&) = delete; - WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&) = default; + WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&); std::vector received_buffers_; size_t total_size_ = 0; diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 58094c1f68d786..8e0b0a8a1e6727 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -7592,6 +7592,14 @@ Local WasmCompiledModule::GetWasmWireBytes() { return Local::Cast(Utils::ToLocal(wire_bytes)); } +WasmCompiledModule::TransferrableModule& +WasmCompiledModule::TransferrableModule::operator=( + TransferrableModule&& src) { + compiled_code = std::move(src.compiled_code); + wire_bytes = std::move(src.wire_bytes); + return *this; +} + // Currently, wasm modules are bound, both to Isolate and to // the Context they were created in. The currently-supported means to // decontextualize and then re-contextualize a module is via @@ -7706,6 +7714,13 @@ MaybeLocal WasmModuleObjectBuilder::Finish() { return WasmCompiledModule::Compile(isolate_, wire_bytes.get(), total_size_); } +WasmModuleObjectBuilder& +WasmModuleObjectBuilder::operator=(WasmModuleObjectBuilder&& src) { + received_buffers_ = std::move(src.received_buffers_); + total_size_ = src.total_size_; + return *this; +} + // static v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() { return new ArrayBufferAllocator(); From 1906077ee8fb84380eef1a7c6164bce6a05059ad Mon Sep 17 00:00:00 2001 From: Ben Noordhuis Date: Mon, 17 Apr 2017 12:20:05 +0200 Subject: [PATCH 45/64] v8: fix stack overflow in recursive method HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock() used to self-recurse before this commit, causing stack overflows on systems with small stack sizes. Make it non-recursive by storing intermediate results in a heap-allocated list. Fixes: https://github.com/nodejs/node/issues/11991 PR-URL: https://github.com/nodejs/node/pull/12460 Reviewed-By: James M Snell Reviewed-By: Yang Guo --- deps/v8/src/crankshaft/hydrogen-gvn.cc | 28 ++++++++++++++++---------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/deps/v8/src/crankshaft/hydrogen-gvn.cc b/deps/v8/src/crankshaft/hydrogen-gvn.cc index 70320052b08253..e586f4778f8968 100644 --- a/deps/v8/src/crankshaft/hydrogen-gvn.cc +++ b/deps/v8/src/crankshaft/hydrogen-gvn.cc @@ -5,6 +5,8 @@ #include "src/crankshaft/hydrogen-gvn.h" #include "src/crankshaft/hydrogen.h" +#include "src/list.h" +#include "src/list-inl.h" #include "src/objects-inl.h" #include "src/v8.h" @@ -651,19 +653,23 @@ SideEffects HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock( HBasicBlock* dominator, HBasicBlock* dominated) { SideEffects side_effects; - for (int i = 0; i < dominated->predecessors()->length(); ++i) { - HBasicBlock* block = dominated->predecessors()->at(i); - if (dominator->block_id() < block->block_id() && - block->block_id() < dominated->block_id() && - !visited_on_paths_.Contains(block->block_id())) { - visited_on_paths_.Add(block->block_id()); - side_effects.Add(block_side_effects_[block->block_id()]); - if (block->IsLoopHeader()) { - side_effects.Add(loop_side_effects_[block->block_id()]); + List blocks; + for (;;) { + for (int i = 0; i < dominated->predecessors()->length(); ++i) { + HBasicBlock* block = dominated->predecessors()->at(i); + if (dominator->block_id() < block->block_id() && + block->block_id() < dominated->block_id() && + !visited_on_paths_.Contains(block->block_id())) { + visited_on_paths_.Add(block->block_id()); + side_effects.Add(block_side_effects_[block->block_id()]); + if (block->IsLoopHeader()) { + side_effects.Add(loop_side_effects_[block->block_id()]); + } + blocks.Add(block); } - side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock( - dominator, block)); } + if (blocks.is_empty()) break; + dominated = blocks.RemoveLast(); } return side_effects; } From 0be4d17cd42ff6f6f4ba0f242b03fcde4f7ef9ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Wed, 7 Jun 2017 15:24:50 +0200 Subject: [PATCH 46/64] v8: fix gcc 7 build errors Porting https://github.com/nodejs/node/pull/12392 to V8 5.9 Ref: https://github.com/nodejs/node/pull/12392 Fixes: https://github.com/nodejs/node/issues/10388 PR-URL: https://github.com/nodejs/node/pull/13515 Reviewed-By: James M Snell Reviewed-By: Anna Henningsen --- deps/v8/src/objects-body-descriptors.h | 2 +- deps/v8/src/objects-inl.h | 21 +++++++++++++++++++++ deps/v8/src/objects/hash-table.h | 20 ++++---------------- 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/deps/v8/src/objects-body-descriptors.h b/deps/v8/src/objects-body-descriptors.h index 9f080eb7551bd6..b201c20fbb0294 100644 --- a/deps/v8/src/objects-body-descriptors.h +++ b/deps/v8/src/objects-body-descriptors.h @@ -99,7 +99,7 @@ class FixedBodyDescriptor final : public BodyDescriptorBase { template static inline void IterateBody(HeapObject* obj, int object_size) { - IterateBody(obj); + IterateBody(obj); } }; diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 4b819d43f44f45..fd1f1e84982898 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -46,6 +46,27 @@ namespace v8 { namespace internal { +template +uint32_t HashTable::Hash(Key key) { + if (Shape::UsesSeed) { + return Shape::SeededHash(key, GetHeap()->HashSeed()); + } else { + return Shape::Hash(key); + } +} + + +template +uint32_t HashTable::HashForObject(Key key, + Object* object) { + if (Shape::UsesSeed) { + return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object); + } else { + return Shape::HashForObject(key, object); + } +} + + PropertyDetails::PropertyDetails(Smi* smi) { value_ = smi->value(); } diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h index 221598b711b568..bc2d95d4b9d698 100644 --- a/deps/v8/src/objects/hash-table.h +++ b/deps/v8/src/objects/hash-table.h @@ -135,22 +135,10 @@ class HashTable : public HashTableBase { public: typedef Shape ShapeT; - // Wrapper methods - inline uint32_t Hash(Key key) { - if (Shape::UsesSeed) { - return Shape::SeededHash(key, GetHeap()->HashSeed()); - } else { - return Shape::Hash(key); - } - } - - inline uint32_t HashForObject(Key key, Object* object) { - if (Shape::UsesSeed) { - return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object); - } else { - return Shape::HashForObject(key, object); - } - } + // Wrapper methods. Defined in src/objects-inl.h + // to break a cycle with src/heap/heap.h. + inline uint32_t Hash(Key key); + inline uint32_t HashForObject(Key key, Object* object); // Returns a new HashTable object. MUST_USE_RESULT static Handle New( From 6204fadc19f06687165db1602103cea8960128e4 Mon Sep 17 00:00:00 2001 From: Ben Noordhuis Date: Fri, 28 Apr 2017 13:39:55 +0200 Subject: [PATCH 47/64] deps: cherry-pick bfae9db from upstream v8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Original commit message: Update postmortem metadata generator. Add PropertyDetails::AttributesField + PropertyDetails::LocationField. Review-Url: https://codereview.chromium.org/2842843004 Cr-Commit-Position: refs/heads/master@{#44889} PR-URL: https://github.com/nodejs/node/pull/12722 Refs: https://github.com/nodejs/llnode/issues/81 Reviewed-By: James M Snell Reviewed-By: Michael Dawson Reviewed-By: Michaël Zasso --- deps/v8/tools/gen-postmortem-metadata.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py index 2e903d71b1599b..0e3d088fbb6bb7 100644 --- a/deps/v8/tools/gen-postmortem-metadata.py +++ b/deps/v8/tools/gen-postmortem-metadata.py @@ -99,6 +99,22 @@ 'value': 'kAccessor' }, { 'name': 'prop_kind_mask', 'value': 'PropertyDetails::KindField::kMask' }, + { 'name': 'prop_location_Descriptor', + 'value': 'kDescriptor' }, + { 'name': 'prop_location_Field', + 'value': 'kField' }, + { 'name': 'prop_location_mask', + 'value': 'PropertyDetails::LocationField::kMask' }, + { 'name': 'prop_location_shift', + 'value': 'PropertyDetails::LocationField::kShift' }, + { 'name': 'prop_attributes_NONE', 'value': 'NONE' }, + { 'name': 'prop_attributes_READ_ONLY', 'value': 'READ_ONLY' }, + { 'name': 'prop_attributes_DONT_ENUM', 'value': 'DONT_ENUM' }, + { 'name': 'prop_attributes_DONT_DELETE', 'value': 'DONT_DELETE' }, + { 'name': 'prop_attributes_mask', + 'value': 'PropertyDetails::AttributesField::kMask' }, + { 'name': 'prop_attributes_shift', + 'value': 'PropertyDetails::AttributesField::kShift' }, { 'name': 'prop_index_mask', 'value': 'PropertyDetails::FieldIndexField::kMask' }, { 'name': 'prop_index_shift', From 4c4f647420f7dc4651deb03b458082851af95fef Mon Sep 17 00:00:00 2001 From: "daniel.bevenius" Date: Tue, 2 May 2017 03:28:17 -0700 Subject: [PATCH 48/64] deps: cherry-pick f5fad6d from upstream v8 Original commit message: This commit adds a getter for the private is_verbose_ member. The use case for this comes from Node.js where the ability to avoid calling FatalException if the TryCatch is verbose would be nice to have. BUG= Review-Url: https://codereview.chromium.org/2840803002 Cr-Commit-Position: refs/heads/master@{#45018} PR-URL: https://github.com/nodejs/node/pull/12826 Reviewed-By: Anna Henningsen Reviewed-By: Ben Noordhuis --- deps/v8/include/v8.h | 5 +++++ deps/v8/src/api.cc | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index ea4119abed29de..ce7741a08c0364 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -8168,6 +8168,11 @@ class V8_EXPORT TryCatch { */ void SetVerbose(bool value); + /** + * Returns true if verbosity is enabled. + */ + bool IsVerbose() const; + /** * Set whether or not this TryCatch should capture a Message object * which holds source information about where the exception diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 8e0b0a8a1e6727..cacf469d0dd555 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -2710,6 +2710,10 @@ void v8::TryCatch::SetVerbose(bool value) { is_verbose_ = value; } +bool v8::TryCatch::IsVerbose() const { + return is_verbose_; +} + void v8::TryCatch::SetCaptureMessage(bool value) { capture_message_ = value; From 65956e6e84c5b453a4cd47bcfa1914168b09092c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Sun, 28 May 2017 12:14:23 +0200 Subject: [PATCH 49/64] deps: cherry-pick 6d38f89 from upstream V8 Original commit message: [turbofan] Boost performance of Array.prototype.shift by 4x. For small arrays, it's way faster to just move the elements instead of doing the fairly complex and heavy-weight left-trimming. Crankshaft has had this optimization for small arrays already; this CL more or less ports this functionality to TurboFan, which yields a 4x speed-up when using shift on small arrays (with up to 16 elements). This should recover some of the regressions reported in the Node.js issues https://github.com/nodejs/node/issues/12657 and discovered for the syncthrough module using https://github.com/mcollina/syncthrough/blob/master/benchmarks/basic.js as benchmark. R=jarin@chromium.org BUG=v8:6376 Review-Url: https://codereview.chromium.org/2874453002 Cr-Commit-Position: refs/heads/master@{#45216} PR-URL: https://github.com/nodejs/node/pull/13263 Reviewed-By: Gibson Fahnestock Reviewed-By: Ben Noordhuis Reviewed-By: Franziska Hinkelmann Reviewed-By: Myles Borins --- deps/v8/src/compiler/graph.h | 11 ++ deps/v8/src/compiler/js-builtin-reducer.cc | 176 +++++++++++++++++++++ deps/v8/src/compiler/js-builtin-reducer.h | 1 + deps/v8/src/crankshaft/hydrogen.cc | 2 +- deps/v8/src/objects.h | 3 + deps/v8/test/mjsunit/array-shift5.js | 50 ++++++ 6 files changed, 242 insertions(+), 1 deletion(-) create mode 100644 deps/v8/test/mjsunit/array-shift5.js diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h index 1e861c7b151e1b..60af4789bcd33f 100644 --- a/deps/v8/src/compiler/graph.h +++ b/deps/v8/src/compiler/graph.h @@ -104,6 +104,17 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) { Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9}; return NewNode(op, arraysize(nodes), nodes); } + Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, + Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10) { + Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10}; + return NewNode(op, arraysize(nodes), nodes); + } + Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4, + Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10, + Node* n11) { + Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11}; + return NewNode(op, arraysize(nodes), nodes); + } // Clone the {node}, and assign a new node id to the copy. Node* CloneNode(const Node* node); diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc index a1c83ce1b639b7..bea8f18b63953f 100644 --- a/deps/v8/src/compiler/js-builtin-reducer.cc +++ b/deps/v8/src/compiler/js-builtin-reducer.cc @@ -5,6 +5,7 @@ #include "src/compiler/js-builtin-reducer.h" #include "src/base/bits.h" +#include "src/builtins/builtins-utils.h" #include "src/code-factory.h" #include "src/compilation-dependencies.h" #include "src/compiler/access-builder.h" @@ -1005,6 +1006,179 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) { return NoChange(); } +// ES6 section 22.1.3.22 Array.prototype.shift ( ) +Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) { + Node* target = NodeProperties::GetValueInput(node, 0); + Node* receiver = NodeProperties::GetValueInput(node, 1); + Node* context = NodeProperties::GetContextInput(node); + Node* frame_state = NodeProperties::GetFrameStateInput(node); + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + + // TODO(turbofan): Extend this to also handle fast holey double elements + // once we got the hole NaN mess sorted out in TurboFan/V8. + Handle receiver_map; + if (GetMapWitness(node).ToHandle(&receiver_map) && + CanInlineArrayResizeOperation(receiver_map) && + receiver_map->elements_kind() != FAST_HOLEY_DOUBLE_ELEMENTS) { + // Install code dependencies on the {receiver} prototype maps and the + // global array protector cell. + dependencies()->AssumePropertyCell(factory()->array_protector()); + dependencies()->AssumePrototypeMapsStable(receiver_map); + + // Load length of the {receiver}. + Node* length = effect = graph()->NewNode( + simplified()->LoadField( + AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())), + receiver, effect, control); + + // Return undefined if {receiver} has no elements. + Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length, + jsgraph()->ZeroConstant()); + Node* branch0 = + graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control); + + Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0); + Node* etrue0 = effect; + Node* vtrue0 = jsgraph()->UndefinedConstant(); + + Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0); + Node* efalse0 = effect; + Node* vfalse0; + { + // Check if we should take the fast-path. + Node* check1 = + graph()->NewNode(simplified()->NumberLessThanOrEqual(), length, + jsgraph()->Constant(JSArray::kMaxCopyElements)); + Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue), + check1, if_false0); + + Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1); + Node* etrue1 = efalse0; + Node* vtrue1; + { + Node* elements = etrue1 = graph()->NewNode( + simplified()->LoadField(AccessBuilder::ForJSObjectElements()), + receiver, etrue1, if_true1); + + // Load the first element here, which we return below. + vtrue1 = etrue1 = graph()->NewNode( + simplified()->LoadElement(AccessBuilder::ForFixedArrayElement( + receiver_map->elements_kind())), + elements, jsgraph()->ZeroConstant(), etrue1, if_true1); + + // Ensure that we aren't shifting a copy-on-write backing store. + if (IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) { + elements = etrue1 = + graph()->NewNode(simplified()->EnsureWritableFastElements(), + receiver, elements, etrue1, if_true1); + } + + // Shift the remaining {elements} by one towards the start. + Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1); + Node* eloop = + graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop); + Node* index = graph()->NewNode( + common()->Phi(MachineRepresentation::kTagged, 2), + jsgraph()->OneConstant(), + jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop); + + { + Node* check2 = + graph()->NewNode(simplified()->NumberLessThan(), index, length); + Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop); + + if_true1 = graph()->NewNode(common()->IfFalse(), branch2); + etrue1 = eloop; + + Node* control = graph()->NewNode(common()->IfTrue(), branch2); + Node* effect = etrue1; + + ElementAccess const access = AccessBuilder::ForFixedArrayElement( + receiver_map->elements_kind()); + Node* value = effect = + graph()->NewNode(simplified()->LoadElement(access), elements, + index, effect, control); + effect = graph()->NewNode( + simplified()->StoreElement(access), elements, + graph()->NewNode(simplified()->NumberSubtract(), index, + jsgraph()->OneConstant()), + value, effect, control); + + loop->ReplaceInput(1, control); + eloop->ReplaceInput(1, effect); + index->ReplaceInput(1, + graph()->NewNode(simplified()->NumberAdd(), index, + jsgraph()->OneConstant())); + } + + // Compute the new {length}. + length = graph()->NewNode(simplified()->NumberSubtract(), length, + jsgraph()->OneConstant()); + + // Store the new {length} to the {receiver}. + etrue1 = graph()->NewNode( + simplified()->StoreField( + AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())), + receiver, length, etrue1, if_true1); + + // Store a hole to the element we just removed from the {receiver}. + etrue1 = graph()->NewNode( + simplified()->StoreElement(AccessBuilder::ForFixedArrayElement( + GetHoleyElementsKind(receiver_map->elements_kind()))), + elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1); + } + + Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1); + Node* efalse1 = efalse0; + Node* vfalse1; + { + // Call the generic C++ implementation. + const int builtin_index = Builtins::kArrayShift; + CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor( + graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver, + Builtins::name(builtin_index), node->op()->properties(), + CallDescriptor::kNeedsFrameState); + Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, + kArgvOnStack, true); + Address builtin_entry = Builtins::CppEntryOf(builtin_index); + Node* entry = jsgraph()->ExternalConstant( + ExternalReference(builtin_entry, isolate())); + Node* argc = + jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver); + if_false1 = efalse1 = vfalse1 = + graph()->NewNode(common()->Call(desc), stub_code, receiver, argc, + target, jsgraph()->UndefinedConstant(), entry, + argc, context, frame_state, efalse1, if_false1); + } + + if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1); + efalse0 = + graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0); + vfalse0 = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue1, vfalse1, if_false0); + } + + control = graph()->NewNode(common()->Merge(2), if_true0, if_false0); + effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control); + Node* value = + graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), + vtrue0, vfalse0, control); + + // Convert the hole to undefined. Do this last, so that we can optimize + // conversion operator via some smart strength reduction in many cases. + if (IsFastHoleyElementsKind(receiver_map->elements_kind())) { + value = + graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value); + } + + ReplaceWithValue(node, value, effect, control); + return Replace(value); + } + return NoChange(); +} + namespace { bool HasInstanceTypeWitness(Node* receiver, Node* effect, @@ -2125,6 +2299,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) { return ReduceArrayPop(node); case kArrayPush: return ReduceArrayPush(node); + case kArrayShift: + return ReduceArrayShift(node); case kDateNow: return ReduceDateNow(node); case kDateGetTime: diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h index e792ad3c3afeb7..736ece34e4f5db 100644 --- a/deps/v8/src/compiler/js-builtin-reducer.h +++ b/deps/v8/src/compiler/js-builtin-reducer.h @@ -58,6 +58,7 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final Reduction ReduceArrayIsArray(Node* node); Reduction ReduceArrayPop(Node* node); Reduction ReduceArrayPush(Node* node); + Reduction ReduceArrayShift(Node* node); Reduction ReduceDateNow(Node* node); Reduction ReduceDateGetTime(Node* node); Reduction ReduceGlobalIsFinite(Node* node); diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc index d9dc41221e509f..e3794e33ffb60b 100644 --- a/deps/v8/src/crankshaft/hydrogen.cc +++ b/deps/v8/src/crankshaft/hydrogen.cc @@ -8821,7 +8821,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( Handle::null(), true); // Threshold for fast inlined Array.shift(). - HConstant* inline_threshold = Add(static_cast(16)); + HConstant* inline_threshold = Add(JSArray::kMaxCopyElements); Drop(args_count_no_receiver); HValue* result; diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 118fab96de7973..46697b55cc8e14 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -9608,6 +9608,9 @@ class JSArray: public JSObject { static const int kLengthOffset = JSObject::kHeaderSize; static const int kSize = kLengthOffset + kPointerSize; + // Max. number of elements being copied in Array builtins. + static const int kMaxCopyElements = 16; + static const int kInitialMaxFastElementArray = (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) / diff --git a/deps/v8/test/mjsunit/array-shift5.js b/deps/v8/test/mjsunit/array-shift5.js new file mode 100644 index 00000000000000..c8fb90d830fba9 --- /dev/null +++ b/deps/v8/test/mjsunit/array-shift5.js @@ -0,0 +1,50 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +(function() { + function doShift(a) { return a.shift(); } + + function test() { + var a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]; + assertEquals(0, doShift(a)); + assertEquals([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16], a); + } + + test(); + test(); + %OptimizeFunctionOnNextCall(doShift); + test(); +})(); + +(function() { + function doShift(a) { return a.shift(); } + + function test() { + var a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.1]; + assertEquals(0, doShift(a)); + assertEquals([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.1], a); + } + + test(); + test(); + %OptimizeFunctionOnNextCall(doShift); + test(); +})(); + +(function() { + function doShift(a) { return a.shift(); } + + function test() { + var a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,"16"]; + assertEquals(0, doShift(a)); + assertEquals([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,"16"], a); + } + + test(); + test(); + %OptimizeFunctionOnNextCall(doShift); + test(); +})(); From 83636a4cbee84474ba1c3920a6195843d9104657 Mon Sep 17 00:00:00 2001 From: Jochen Eisinger Date: Fri, 21 Apr 2017 18:07:04 +0200 Subject: [PATCH 50/64] deps: backport 4fdf9fd4813 from upstream v8 Original commit message: Add documentation for FunctionCallbackInfo R=verwaest@chromium.org,haraken@chromium.org,yukishiino@chromium.org BUG= Change-Id: I273f5ce305f80b2aa5e9c8c42a6e8e5afc51a0a7 Reviewed-on: https://chromium-review.googlesource.com/484422 Reviewed-by: Kentaro Hara Reviewed-by: Toon Verwaest Commit-Queue: Jochen Eisinger Cr-Commit-Position: refs/heads/master@{#44927} Ref: https://github.com/v8/v8/commit/4fdf9fd4813 PR-URL: https://github.com/nodejs/node/pull/12875 Reviewed-By: James M Snell --- deps/v8/include/v8.h | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index ce7741a08c0364..fe124d44c644c5 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -3578,16 +3578,34 @@ class ReturnValue { template class FunctionCallbackInfo { public: + /** The number of available arguments. */ V8_INLINE int Length() const; + /** Accessor for the available arguments. */ V8_INLINE Local operator[](int i) const; V8_INLINE V8_DEPRECATED("Use Data() to explicitly pass Callee instead", Local Callee() const); + /** Returns the receiver. This corresponds to the "this" value. */ V8_INLINE Local This() const; + /** + * If the callback was created without a Signature, this is the same + * value as This(). If there is a signature, and the signature didn't match + * This() but one of its hidden prototypes, this will be the respective + * hidden prototype. + * + * Note that this is not the prototype of This() on which the accessor + * referencing this callback was found (which in V8 internally is often + * referred to as holder [sic]). + */ V8_INLINE Local Holder() const; + /** For construct calls, this returns the "new.target" value. */ V8_INLINE Local NewTarget() const; + /** Indicates whether this is a regular call or a construct call. */ V8_INLINE bool IsConstructCall() const; + /** The data argument specified when creating the callback. */ V8_INLINE Local Data() const; + /** The current Isolate. */ V8_INLINE Isolate* GetIsolate() const; + /** The ReturnValue for the call. */ V8_INLINE ReturnValue GetReturnValue() const; // This shouldn't be public, but the arm compiler needs it. static const int kArgsLength = 8; @@ -5745,9 +5763,13 @@ class V8_EXPORT ObjectTemplate : public Template { friend class FunctionTemplate; }; - /** * A Signature specifies which receiver is valid for a function. + * + * A receiver matches a given signature if the receiver (or any of its + * hidden prototypes) was created from the signature's FunctionTemplate, or + * from a FunctionTemplate that inherits directly or indirectly from the + * signature's FunctionTemplate. */ class V8_EXPORT Signature : public Data { public: From 1e93589e8ce4ea9cc4039826957853715d01754a Mon Sep 17 00:00:00 2001 From: Anna Henningsen Date: Sat, 6 May 2017 21:19:18 +0200 Subject: [PATCH 51/64] v8: do not test v8 with -Werror PR-URL: https://github.com/nodejs/node/pull/12875 Reviewed-By: James M Snell --- deps/v8/gypfiles/standalone.gypi | 2 +- deps/v8/gypfiles/toolchain.gypi | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deps/v8/gypfiles/standalone.gypi b/deps/v8/gypfiles/standalone.gypi index 986aaaaebb224c..27e19d3c531784 100644 --- a/deps/v8/gypfiles/standalone.gypi +++ b/deps/v8/gypfiles/standalone.gypi @@ -190,7 +190,7 @@ 'host_clang%': '<(host_clang)', 'target_arch%': '<(target_arch)', 'v8_target_arch%': '<(v8_target_arch)', - 'werror%': '-Werror', + 'werror%': '', 'use_goma%': '<(use_goma)', 'gomadir%': '<(gomadir)', 'asan%': '<(asan)', diff --git a/deps/v8/gypfiles/toolchain.gypi b/deps/v8/gypfiles/toolchain.gypi index 815070a508379c..6672d752a75bc8 100644 --- a/deps/v8/gypfiles/toolchain.gypi +++ b/deps/v8/gypfiles/toolchain.gypi @@ -81,7 +81,7 @@ 'v8_toolset_for_shell%': 'target', 'host_os%': '<(OS)', - 'werror%': '-Werror', + 'werror%': '', # For a shared library build, results in "libv8-<(soname_version).so". 'soname_version%': '', From da35ac3bb946b6a6f4b8e6201bddfe76a0632161 Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Wed, 24 May 2017 15:44:34 +0200 Subject: [PATCH 52/64] v8: backport bd59e7452be from upstream v8 Original commit message: [PATCH] Merged: Make Object::GetOwnPropertyDescriptor() take a Name, not a String. Revision: b5e610c19208ef854755eec67011ca7aff008bf4 NOTRY=true NOPRESUBMIT=true NOTREECHECKS=true TBR=vogelheim@chromium.org Bug: Change-Id: I396b559b28aab6afa138db747711e50cd0da3da7 Reviewed-on: https://chromium-review.googlesource.com/513927 Reviewed-by: Michael Achenbach Cr-Commit-Position: refs/branch-heads/6.0@{#5} Cr-Branched-From: 97dbf624a5eeffb3a8df36d24cdb2a883137385f-refs/heads/6.0.286@{#1} Cr-Branched-From: 12e6f1cb5cd9616da7b9d4a7655c088778a6d415-refs/heads/master@{#45439} PR-URL: https://github.com/nodejs/node/pull/13217 Reviewed-By: Franziska Hinkelmann Reviewed-By: Ben Noordhuis Reviewed-By: Anna Henningsen Reviewed-By: James M Snell --- deps/v8/include/v8.h | 6 +++--- deps/v8/src/api.cc | 8 +++----- deps/v8/test/cctest/test-api.cc | 21 +++++++++++++++------ 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index fe124d44c644c5..f51efbdf503671 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -3070,12 +3070,12 @@ class V8_EXPORT Object : public Value { Local context, Local key); /** - * Returns Object.getOwnPropertyDescriptor as per ES5 section 15.2.3.3. + * Returns Object.getOwnPropertyDescriptor as per ES2016 section 19.1.2.6. */ V8_DEPRECATED("Use maybe version", - Local GetOwnPropertyDescriptor(Local key)); + Local GetOwnPropertyDescriptor(Local key)); V8_WARN_UNUSED_RESULT MaybeLocal GetOwnPropertyDescriptor( - Local context, Local key); + Local context, Local key); V8_DEPRECATE_SOON("Use maybe version", bool Has(Local key)); /** diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index cacf469d0dd555..1b8bafacc29919 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -4530,12 +4530,11 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Local key) { .FromMaybe(static_cast(i::NONE)); } - MaybeLocal v8::Object::GetOwnPropertyDescriptor(Local context, - Local key) { + Local key) { PREPARE_FOR_EXECUTION(context, Object, GetOwnPropertyDescriptor, Value); i::Handle obj = Utils::OpenHandle(this); - i::Handle key_name = Utils::OpenHandle(*key); + i::Handle key_name = Utils::OpenHandle(*key); i::PropertyDescriptor desc; Maybe found = @@ -4548,8 +4547,7 @@ MaybeLocal v8::Object::GetOwnPropertyDescriptor(Local context, RETURN_ESCAPED(Utils::ToLocal(desc.ToObject(isolate))); } - -Local v8::Object::GetOwnPropertyDescriptor(Local key) { +Local v8::Object::GetOwnPropertyDescriptor(Local key) { auto context = ContextFromHeapObject(Utils::OpenHandle(this)); RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyDescriptor(context, key), Value); } diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index f2cbc7dabfe90f..81d2ff1b1ef4a8 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -24362,12 +24362,13 @@ TEST(GetOwnPropertyDescriptor) { v8::Isolate* isolate = env->GetIsolate(); v8::HandleScope scope(isolate); CompileRun( - "var x = { value : 13};" - "Object.defineProperty(x, 'p0', {value : 12});" - "Object.defineProperty(x, 'p1', {" - " set : function(value) { this.value = value; }," - " get : function() { return this.value; }," - "});"); + "var x = { value : 13};" + "Object.defineProperty(x, 'p0', {value : 12});" + "Object.defineProperty(x, Symbol.toStringTag, {value: 'foo'});" + "Object.defineProperty(x, 'p1', {" + " set : function(value) { this.value = value; }," + " get : function() { return this.value; }," + "});"); Local x = Local::Cast( env->Global()->Get(env.local(), v8_str("x")).ToLocalChecked()); Local desc = @@ -24401,6 +24402,14 @@ TEST(GetOwnPropertyDescriptor) { ->Equals(env.local(), get->Call(env.local(), x, 0, NULL).ToLocalChecked()) .FromJust()); + desc = + x->GetOwnPropertyDescriptor(env.local(), Symbol::GetToStringTag(isolate)) + .ToLocalChecked(); + CHECK(v8_str("foo") + ->Equals(env.local(), Local::Cast(desc) + ->Get(env.local(), v8_str("value")) + .ToLocalChecked()) + .FromJust()); } From e3c1119174cc668a16291de2f16cd155179b5e7d Mon Sep 17 00:00:00 2001 From: Peter Marshall Date: Thu, 25 May 2017 14:07:16 +0200 Subject: [PATCH 53/64] v8: backport a9e56f4f36d from upstream v8 Because 5.8 still had other uses of the removed flag, there are some extra changes in Heap::ConfigureHeap and api.cc:SetResourceConstraints. Original commit message: [heap] Remove max_executable_size resource constraint. BUG=chromium:716032 Review-Url: https://codereview.chromium.org/2890603007 Cr-Commit-Position: refs/heads/master@{#45400} PR-URL: https://github.com/nodejs/node/pull/13217 Reviewed-By: Franziska Hinkelmann Reviewed-By: Ben Noordhuis Reviewed-By: Anna Henningsen Reviewed-By: James M Snell --- deps/v8/include/v8.h | 8 ++++++-- deps/v8/src/api.cc | 11 ++--------- deps/v8/src/flag-definitions.h | 1 - deps/v8/src/heap/heap.cc | 3 --- deps/v8/src/heap/heap.h | 10 ---------- 5 files changed, 8 insertions(+), 25 deletions(-) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index f51efbdf503671..a096a7b2fbd110 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -5900,8 +5900,12 @@ class V8_EXPORT ResourceConstraints { void set_max_old_space_size(int limit_in_mb) { max_old_space_size_ = limit_in_mb; } - int max_executable_size() const { return max_executable_size_; } - void set_max_executable_size(int limit_in_mb) { + V8_DEPRECATE_SOON("max_executable_size_ is subsumed by max_old_space_size_", + int max_executable_size() const) { + return max_executable_size_; + } + V8_DEPRECATE_SOON("max_executable_size_ is subsumed by max_old_space_size_", + void set_max_executable_size(int limit_in_mb)) { max_executable_size_ = limit_in_mb; } uint32_t* stack_limit() const { return stack_limit_; } diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 1b8bafacc29919..74d8768d4d5fe8 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -810,7 +810,6 @@ Extension::Extension(const char* name, ResourceConstraints::ResourceConstraints() : max_semi_space_size_(0), max_old_space_size_(0), - max_executable_size_(0), stack_limit_(NULL), code_range_size_(0), max_zone_pool_size_(0) {} @@ -832,24 +831,20 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory, if (physical_memory <= low_limit) { set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeLowMemoryDevice); set_max_old_space_size(i::Heap::kMaxOldSpaceSizeLowMemoryDevice); - set_max_executable_size(i::Heap::kMaxExecutableSizeLowMemoryDevice); set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSizeLowMemoryDevice); } else if (physical_memory <= medium_limit) { set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeMediumMemoryDevice); set_max_old_space_size(i::Heap::kMaxOldSpaceSizeMediumMemoryDevice); - set_max_executable_size(i::Heap::kMaxExecutableSizeMediumMemoryDevice); set_max_zone_pool_size( i::AccountingAllocator::kMaxPoolSizeMediumMemoryDevice); } else if (physical_memory <= high_limit) { set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHighMemoryDevice); set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHighMemoryDevice); - set_max_executable_size(i::Heap::kMaxExecutableSizeHighMemoryDevice); set_max_zone_pool_size( i::AccountingAllocator::kMaxPoolSizeHighMemoryDevice); } else { set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHugeMemoryDevice); set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHugeMemoryDevice); - set_max_executable_size(i::Heap::kMaxExecutableSizeHugeMemoryDevice); set_max_zone_pool_size( i::AccountingAllocator::kMaxPoolSizeHugeMemoryDevice); } @@ -868,13 +863,11 @@ void SetResourceConstraints(i::Isolate* isolate, const ResourceConstraints& constraints) { int semi_space_size = constraints.max_semi_space_size(); int old_space_size = constraints.max_old_space_size(); - int max_executable_size = constraints.max_executable_size(); size_t code_range_size = constraints.code_range_size(); size_t max_pool_size = constraints.max_zone_pool_size(); - if (semi_space_size != 0 || old_space_size != 0 || - max_executable_size != 0 || code_range_size != 0) { + if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) { isolate->heap()->ConfigureHeap(semi_space_size, old_space_size, - max_executable_size, code_range_size); + 0 /*max_executable_size*/, code_range_size); } isolate->allocator()->ConfigureSegmentPool(max_pool_size); diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 8f21ddecd69f36..e40d5bedeaa9d2 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -625,7 +625,6 @@ DEFINE_BOOL(experimental_new_space_growth_heuristic, false, "of their absolute value.") DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)") DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)") -DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)") DEFINE_BOOL(gc_global, false, "always perform global GCs") DEFINE_INT(gc_interval, -1, "garbage collect after allocations") DEFINE_INT(retain_maps_for_n_gc, 2, diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc index 028a793fd1df02..49334173594d9f 100644 --- a/deps/v8/src/heap/heap.cc +++ b/deps/v8/src/heap/heap.cc @@ -5085,9 +5085,6 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size, max_old_generation_size_ = static_cast(FLAG_max_old_space_size) * MB; } - if (FLAG_max_executable_size > 0) { - max_executable_size_ = static_cast(FLAG_max_executable_size) * MB; - } if (Page::kPageSize > MB) { max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize); diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h index 819055b6a933ad..2be558999593c5 100644 --- a/deps/v8/src/heap/heap.h +++ b/deps/v8/src/heap/heap.h @@ -609,16 +609,6 @@ class Heap { static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier; - // The executable size has to be a multiple of Page::kPageSize. - // Sizes are in MB. - static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; - static const int kMaxExecutableSizeMediumMemoryDevice = - 192 * kPointerMultiplier; - static const int kMaxExecutableSizeHighMemoryDevice = - 256 * kPointerMultiplier; - static const int kMaxExecutableSizeHugeMemoryDevice = - 256 * kPointerMultiplier; - static const int kTraceRingBufferSize = 512; static const int kStacktraceBufferSize = 512; From 61a1f9cfe2cf5e29e9649ff94114865d5ef558cb Mon Sep 17 00:00:00 2001 From: hpayer Date: Mon, 8 May 2017 11:09:39 -0700 Subject: [PATCH 54/64] v8: backport 4f82f1d948c from upstream v8 Original commit message: [Api] Add an idle time garbage collection callback flag to GCCallbackFlags. BUG=chromium:718484 Review-Url: https://codereview.chromium.org/2867073002 Cr-Commit-Position: refs/heads/master@{#45167} PR-URL: https://github.com/nodejs/node/pull/13217 Reviewed-By: Franziska Hinkelmann Reviewed-By: Ben Noordhuis Reviewed-By: Anna Henningsen Reviewed-By: James M Snell --- deps/v8/include/v8.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index a096a7b2fbd110..8637e060b51ae9 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -6180,6 +6180,8 @@ enum GCType { * - kGCCallbackFlagCollectAllAvailableGarbage: The GC callback is called * in a phase where V8 is trying to collect all available garbage * (e.g., handling a low memory notification). + * - kGCCallbackScheduleIdleCollectGarbage: The GC callback is called to + * trigger an idle garbage collection. */ enum GCCallbackFlags { kNoGCCallbackFlags = 0, @@ -6188,6 +6190,7 @@ enum GCCallbackFlags { kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3, kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4, kGCCallbackFlagCollectAllExternalMemory = 1 << 5, + kGCCallbackScheduleIdleCollectGarbage = 1 << 6, }; typedef void (*GCCallback)(GCType type, GCCallbackFlags flags); From 9c7af15a8c5e1cd817e5bc062ac7ece72a60aabc Mon Sep 17 00:00:00 2001 From: hpayer Date: Mon, 8 May 2017 13:36:17 -0700 Subject: [PATCH 55/64] v8: backport 4f82f1d948c from upstream v8 Original commit message: [PATCH] Rename idle garbage collection callback flag. TBR=mlippautz@chromium.org Review-Url: https://codereview.chromium.org/2867863002 Cr-Commit-Position: refs/heads/master@{#45173} PR-URL: https://github.com/nodejs/node/pull/13217 Reviewed-By: Franziska Hinkelmann Reviewed-By: Ben Noordhuis Reviewed-By: Anna Henningsen Reviewed-By: James M Snell --- deps/v8/include/v8.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 8637e060b51ae9..7988b58361e419 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -6180,7 +6180,7 @@ enum GCType { * - kGCCallbackFlagCollectAllAvailableGarbage: The GC callback is called * in a phase where V8 is trying to collect all available garbage * (e.g., handling a low memory notification). - * - kGCCallbackScheduleIdleCollectGarbage: The GC callback is called to + * - kGCCallbackScheduleIdleGarbageCollection: The GC callback is called to * trigger an idle garbage collection. */ enum GCCallbackFlags { @@ -6190,7 +6190,7 @@ enum GCCallbackFlags { kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3, kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4, kGCCallbackFlagCollectAllExternalMemory = 1 << 5, - kGCCallbackScheduleIdleCollectGarbage = 1 << 6, + kGCCallbackScheduleIdleGarbageCollection = 1 << 6, }; typedef void (*GCCallback)(GCType type, GCCallbackFlags flags); From e3fcdeffcbeeb178093d7b5587c824f6a86767ec Mon Sep 17 00:00:00 2001 From: Peter Marshall Date: Fri, 26 May 2017 14:08:06 +0200 Subject: [PATCH 56/64] v8: backport pieces from 18a26cfe174 from upstream v8 Backport new virtual methods from 18a26cfe174 ("Add memory protection API to ArrayBuffer::Allocator") PR-URL: https://github.com/nodejs/node/pull/13217 Reviewed-By: Franziska Hinkelmann Reviewed-By: Ben Noordhuis Reviewed-By: Anna Henningsen Reviewed-By: James M Snell --- deps/v8/include/v8.h | 8 ++++++++ deps/v8/src/api.cc | 12 ++++++++++++ 2 files changed, 20 insertions(+) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 7988b58361e419..6257939bd93a2c 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -4176,12 +4176,20 @@ class V8_EXPORT ArrayBuffer : public Object { */ virtual void* AllocateUninitialized(size_t length) = 0; + virtual void* Reserve(size_t length); + /** * Free the memory block of size |length|, pointed to by |data|. * That memory is guaranteed to be previously allocated by |Allocate|. */ virtual void Free(void* data, size_t length) = 0; + enum class AllocationMode { kNormal, kReservation }; + virtual void Free(void* data, size_t length, AllocationMode mode); + enum class Protection { kNoAccess, kReadWrite }; + virtual void SetProtection(void* data, size_t length, + Protection protection); + /** * malloc/free based convenience allocator. * diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 74d8768d4d5fe8..b4a9d177a7b36c 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -437,6 +437,18 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) { i::V8::SetSnapshotBlob(snapshot_blob); } +void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { UNIMPLEMENTED(); } + +void v8::ArrayBuffer::Allocator::Free(void* data, size_t length, + AllocationMode mode) { + UNIMPLEMENTED(); +} + +void v8::ArrayBuffer::Allocator::SetProtection( + void* data, size_t length, + v8::ArrayBuffer::Allocator::Protection protection) { + UNIMPLEMENTED(); +} namespace { From 8e967299e71feac7f8ed5ddd028f9520e14de493 Mon Sep 17 00:00:00 2001 From: jbroman Date: Thu, 27 Apr 2017 08:14:41 -0700 Subject: [PATCH 57/64] deps: cherry-pick a16c3c9 from upstream V8 Original commit message: Expose the ValueSerializer data format version as a compile-time constant. BUG=chromium:704293 Review-Url: https://codereview.chromium.org/2804643006 Cr-Commit-Position: refs/heads/master@{#44945} PR-URL: https://github.com/nodejs/node/pull/13515 Reviewed-By: James M Snell Reviewed-By: Anna Henningsen --- deps/v8/BUILD.gn | 1 + deps/v8/include/v8-value-serializer-version.h | 24 +++++++++++++++++++ deps/v8/include/v8.h | 2 -- deps/v8/src/api.cc | 5 ---- deps/v8/src/v8.gyp | 1 + deps/v8/src/value-serializer.cc | 8 +++---- deps/v8/src/value-serializer.h | 2 -- 7 files changed, 29 insertions(+), 14 deletions(-) create mode 100644 deps/v8/include/v8-value-serializer-version.h diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 80ff7340b1e87c..36cb97b4d35877 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -1019,6 +1019,7 @@ v8_header_set("v8_version") { configs = [ ":internal_config" ] sources = [ + "include/v8-value-serializer-version.h", "include/v8-version-string.h", "include/v8-version.h", ] diff --git a/deps/v8/include/v8-value-serializer-version.h b/deps/v8/include/v8-value-serializer-version.h new file mode 100644 index 00000000000000..c72911c64dc571 --- /dev/null +++ b/deps/v8/include/v8-value-serializer-version.h @@ -0,0 +1,24 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * Compile-time constants. + * + * This header provides access to information about the value serializer at + * compile time, without declaring or defining any symbols that require linking + * to V8. + */ + +#ifndef INCLUDE_V8_VALUE_SERIALIZER_VERSION_H_ +#define INCLUDE_V8_VALUE_SERIALIZER_VERSION_H_ + +#include + +namespace v8 { + +constexpr uint32_t CurrentValueSerializerFormatVersion() { return 13; } + +} // namespace v8 + +#endif // INCLUDE_V8_VALUE_SERIALIZER_VERSION_H_ diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 6257939bd93a2c..5d80acc17d8636 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -1789,8 +1789,6 @@ class V8_EXPORT ValueSerializer { virtual void FreeBufferMemory(void* buffer); }; - static uint32_t GetCurrentDataFormatVersion(); - explicit ValueSerializer(Isolate* isolate); ValueSerializer(Isolate* isolate, Delegate* delegate); ~ValueSerializer(); diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index b4a9d177a7b36c..ed173c175fe6ce 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -3225,11 +3225,6 @@ struct ValueSerializer::PrivateData { i::ValueSerializer serializer; }; -// static -uint32_t ValueSerializer::GetCurrentDataFormatVersion() { - return i::ValueSerializer::GetCurrentDataFormatVersion(); -} - ValueSerializer::ValueSerializer(Isolate* isolate) : ValueSerializer(isolate, nullptr) {} diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp index 1f94a0680a5da5..bb5df28de641e5 100644 --- a/deps/v8/src/v8.gyp +++ b/deps/v8/src/v8.gyp @@ -389,6 +389,7 @@ '../include/v8-profiler.h', '../include/v8-testing.h', '../include/v8-util.h', + '../include/v8-value-serializer-version.h', '../include/v8-version-string.h', '../include/v8-version.h', '../include/v8.h', diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc index 44cb8dc1887720..1aa2525d99f560 100644 --- a/deps/v8/src/value-serializer.cc +++ b/deps/v8/src/value-serializer.cc @@ -6,6 +6,7 @@ #include +#include "include/v8-value-serializer-version.h" #include "src/base/logging.h" #include "src/conversions.h" #include "src/factory.h" @@ -30,6 +31,8 @@ namespace internal { // Version 13: host objects have an explicit tag (rather than handling all // unknown tags) static const uint32_t kLatestVersion = 13; +static_assert(kLatestVersion == v8::CurrentValueSerializerFormatVersion(), + "Exported format version must match latest version."); static const int kPretenureThreshold = 100 * KB; @@ -154,11 +157,6 @@ enum class WasmEncodingTag : uint8_t { } // namespace -// static -uint32_t ValueSerializer::GetCurrentDataFormatVersion() { - return kLatestVersion; -} - ValueSerializer::ValueSerializer(Isolate* isolate, v8::ValueSerializer::Delegate* delegate) : isolate_(isolate), diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h index 47a072283547d6..ef424698d0e59d 100644 --- a/deps/v8/src/value-serializer.h +++ b/deps/v8/src/value-serializer.h @@ -43,8 +43,6 @@ enum class SerializationTag : uint8_t; */ class ValueSerializer { public: - static uint32_t GetCurrentDataFormatVersion(); - ValueSerializer(Isolate* isolate, v8::ValueSerializer::Delegate* delegate); ~ValueSerializer(); From 06d419f8a4598be49512c11a6fda912b64e37d58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Mon, 12 Jun 2017 10:48:46 +0200 Subject: [PATCH 58/64] deps: cherry-pick 866ee63 from upstream V8 Original commit message: [string] Re-enable result caching for String.p.split Runtime::kStringSplit's result caching is only enabled when limit equals kMaxUInt32. BUG=v8:6463 Review-Url: https://codereview.chromium.org/2923183002 Cr-Commit-Position: refs/heads/master@{#45724} Fixes: https://github.com/nodejs/node/issues/13445 PR-URL: https://github.com/nodejs/node/pull/13515 Reviewed-By: James M Snell Reviewed-By: Anna Henningsen --- deps/v8/src/builtins/builtins-string-gen.cc | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index 25bf14ce2bd83d..ed559eadfdf911 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -1190,14 +1190,11 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) { }); // String and integer conversions. - // TODO(jgruber): The old implementation used Uint32Max instead of SmiMax - - // but AFAIK there should not be a difference since arrays are capped at Smi - // lengths. Callable tostring_callable = CodeFactory::ToString(isolate()); Node* const subject_string = CallStub(tostring_callable, context, receiver); Node* const limit_number = - Select(IsUndefined(limit), [=]() { return SmiConstant(Smi::kMaxValue); }, + Select(IsUndefined(limit), [=]() { return NumberConstant(kMaxUInt32); }, [=]() { return ToUint32(context, limit); }, MachineRepresentation::kTagged); Node* const separator_string = From 55a1231d819d58916e8d5b792c128e185598757b Mon Sep 17 00:00:00 2001 From: Bartosz Sosnowski Date: Mon, 12 Jun 2017 11:40:52 +0200 Subject: [PATCH 59/64] v8: fix debug builds on Windows Adds missing return which fixes debug builds on Windows Fixes: https://github.com/nodejs/node/issues/13392 Ref: https://codereview.chromium.org/2929993003/ PR-URL: https://github.com/nodejs/node/pull/13634 Reviewed-By: Refael Ackermann Reviewed-By: Anna Henningsen --- deps/v8/src/api.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index ed173c175fe6ce..0e77faff7ee141 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -437,7 +437,10 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) { i::V8::SetSnapshotBlob(snapshot_blob); } -void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { UNIMPLEMENTED(); } +void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { + UNIMPLEMENTED(); + return nullptr; +} void v8::ArrayBuffer::Allocator::Free(void* data, size_t length, AllocationMode mode) { From da9304607cb9853d57d14d391e9c9bb9ad48162d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Mon, 19 Jun 2017 11:18:55 +0200 Subject: [PATCH 60/64] deps: update V8 to 5.9.211.37 PR-URL: https://github.com/nodejs/node/pull/13790 Reviewed-By: Ben Noordhuis Reviewed-By: Anna Henningsen --- deps/v8/include/v8-version.h | 2 +- deps/v8/src/compiler/escape-analysis.cc | 7 +++++ .../mjsunit/compiler/escape-analysis-17.js | 27 +++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 deps/v8/test/mjsunit/compiler/escape-analysis-17.js diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 277f1d929f384d..0d920efac46d52 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -11,7 +11,7 @@ #define V8_MAJOR_VERSION 5 #define V8_MINOR_VERSION 9 #define V8_BUILD_NUMBER 211 -#define V8_PATCH_LEVEL 35 +#define V8_PATCH_LEVEL 37 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc index 2e0adc6f85a0ea..75a73ffce9be8e 100644 --- a/deps/v8/src/compiler/escape-analysis.cc +++ b/deps/v8/src/compiler/escape-analysis.cc @@ -853,6 +853,13 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep, case IrOpcode::kObjectIsString: case IrOpcode::kObjectIsSymbol: case IrOpcode::kObjectIsUndetectable: + case IrOpcode::kNumberLessThan: + case IrOpcode::kNumberLessThanOrEqual: + case IrOpcode::kNumberEqual: +#define CASE(opcode) case IrOpcode::k##opcode: + SIMPLIFIED_NUMBER_BINOP_LIST(CASE) + SIMPLIFIED_NUMBER_UNOP_LIST(CASE) +#undef CASE if (SetEscaped(rep)) { TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n", rep->id(), rep->op()->mnemonic(), use->id(), diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-17.js b/deps/v8/test/mjsunit/compiler/escape-analysis-17.js new file mode 100644 index 00000000000000..5709d47129ebd7 --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/escape-analysis-17.js @@ -0,0 +1,27 @@ +// Copyright 2017 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --turbo-escape + +function foo() { + var a = {x:1}; + var b = {x:1.5, y: 1}; + var x = 0; + for (var i = 0; i < 1; i = {}) { + // The second iteration of this loop is dead code, leading to a + // contradiction between dynamic and static information. + x += a.x + 0.5; + x += a.x % 0.5; + x += Math.abs(a.x); + x += a.x < 6; + x += a.x === 7; + x += a.x <= 8; + a = b; + } + return x; +} +foo(); +foo(); +%OptimizeFunctionOnNextCall(foo); +foo(); From da1913c5e7f7f22b090c9ba8f5792ab3a7169add Mon Sep 17 00:00:00 2001 From: ochang Date: Fri, 5 May 2017 09:00:27 -0700 Subject: [PATCH 61/64] deps: cherry-pick 3f4536894ac from V8 upstream Original commit message: d8: Make in process stack dumping optional Adds a flag (--disable-in-process-stack-traces) to not install signal handlers so that e.g. ASan signal handlers will work. This flag mirrors chromium's one. R=jochen@chromium.org BUG=chromium:716235 Review-Url: https://codereview.chromium.org/2854173002 Cr-Commit-Position: refs/heads/master@{#45142} PR-URL: https://github.com/nodejs/node/pull/13985 Reviewed-By: Ben Noordhuis Reviewed-By: Colin Ihrig Reviewed-By: James M Snell --- deps/v8/include/libplatform/libplatform.h | 5 ++++- deps/v8/src/d8.cc | 12 +++++++++++- deps/v8/src/d8.h | 4 +++- deps/v8/src/libplatform/default-platform.cc | 9 ++++++--- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h index 55a10204ee2c11..f77742f0f6b00d 100644 --- a/deps/v8/include/libplatform/libplatform.h +++ b/deps/v8/include/libplatform/libplatform.h @@ -13,6 +13,7 @@ namespace v8 { namespace platform { enum class IdleTaskSupport { kDisabled, kEnabled }; +enum class InProcessStackDumping { kDisabled, kEnabled }; /** * Returns a new instance of the default v8::Platform implementation. @@ -27,7 +28,9 @@ enum class IdleTaskSupport { kDisabled, kEnabled }; */ V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform( int thread_pool_size = 0, - IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled); + IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled, + InProcessStackDumping in_process_stack_dumping = + InProcessStackDumping::kEnabled); /** * Pumps the message loop for the given isolate. diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index cb29f4fd303726..62a6518d683e22 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -2575,6 +2575,9 @@ bool Shell::SetOptions(int argc, char* argv[]) { } else if (strncmp(argv[i], "--lcov=", 7) == 0) { options.lcov_file = argv[i] + 7; argv[i] = NULL; + } else if (strcmp(argv[i], "--disable-in-process-stack-traces") == 0) { + options.disable_in_process_stack_traces = true; + argv[i] = NULL; } } @@ -2998,10 +3001,17 @@ int Shell::Main(int argc, char* argv[]) { #endif // defined(_WIN32) || defined(_WIN64) if (!SetOptions(argc, argv)) return 1; v8::V8::InitializeICUDefaultLocation(argv[0], options.icu_data_file); + + v8::platform::InProcessStackDumping in_process_stack_dumping = + options.disable_in_process_stack_traces + ? v8::platform::InProcessStackDumping::kDisabled + : v8::platform::InProcessStackDumping::kEnabled; + g_platform = i::FLAG_verify_predictable ? new PredictablePlatform() : v8::platform::CreateDefaultPlatform( - 0, v8::platform::IdleTaskSupport::kEnabled); + 0, v8::platform::IdleTaskSupport::kEnabled, + in_process_stack_dumping); platform::tracing::TracingController* tracing_controller; if (options.trace_enabled) { diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index 8e0a8f615f3efd..d885817f7ec046 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -304,7 +304,8 @@ class ShellOptions { snapshot_blob(NULL), trace_enabled(false), trace_config(NULL), - lcov_file(NULL) {} + lcov_file(NULL), + disable_in_process_stack_traces(false) {} ~ShellOptions() { delete[] isolate_sources; @@ -336,6 +337,7 @@ class ShellOptions { bool trace_enabled; const char* trace_config; const char* lcov_file; + bool disable_in_process_stack_traces; }; class Shell : public i::AllStatic { diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc index 5cd80ad316dd2f..93dff69709b9e4 100644 --- a/deps/v8/src/libplatform/default-platform.cc +++ b/deps/v8/src/libplatform/default-platform.cc @@ -29,9 +29,12 @@ void PrintStackTrace() { } // namespace -v8::Platform* CreateDefaultPlatform(int thread_pool_size, - IdleTaskSupport idle_task_support) { - v8::base::debug::EnableInProcessStackDumping(); +v8::Platform* CreateDefaultPlatform( + int thread_pool_size, IdleTaskSupport idle_task_support, + InProcessStackDumping in_process_stack_dumping) { + if (in_process_stack_dumping == InProcessStackDumping::kEnabled) { + v8::base::debug::EnableInProcessStackDumping(); + } DefaultPlatform* platform = new DefaultPlatform(idle_task_support); platform->SetThreadPoolSize(thread_pool_size); platform->EnsureInitialized(); From 89961baf70590d0747444f84ae9f665b631479cf Mon Sep 17 00:00:00 2001 From: Anna Henningsen Date: Thu, 29 Jun 2017 13:33:54 +0200 Subject: [PATCH 62/64] src: fix process.abort() interaction with V8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since V8 5.9 V8 installs a default signal handler for some signals when creating a default platform instance that prints a stack trace. However, Node already does the same thing, so it would seem like the two different stack traces would be printed; also, the V8 handler would lead to a `SIGSEGV` under some circumstances, rather than letting the abort continue normally. Resolve this by disabling V8’s signal handler by default. PR-URL: https://github.com/nodejs/node/pull/13985 Fixes: https://github.com/nodejs/node/issues/13865 Reviewed-By: Ben Noordhuis Reviewed-By: Colin Ihrig Reviewed-By: James M Snell --- src/node.cc | 5 ++++- test/abort/test-process-abort-exitcode.js | 24 +++++++++++++++++++++++ test/async-hooks/async-hooks.status | 21 -------------------- 3 files changed, 28 insertions(+), 22 deletions(-) create mode 100644 test/abort/test-process-abort-exitcode.js delete mode 100644 test/async-hooks/async-hooks.status diff --git a/src/node.cc b/src/node.cc index 3f0f6116139c34..741b5dea82b7ba 100644 --- a/src/node.cc +++ b/src/node.cc @@ -247,7 +247,10 @@ node::DebugOptions debug_options; static struct { #if NODE_USE_V8_PLATFORM void Initialize(int thread_pool_size) { - platform_ = v8::platform::CreateDefaultPlatform(thread_pool_size); + platform_ = v8::platform::CreateDefaultPlatform( + thread_pool_size, + v8::platform::IdleTaskSupport::kDisabled, + v8::platform::InProcessStackDumping::kDisabled); V8::InitializePlatform(platform_); tracing::TraceEventHelper::SetCurrentPlatform(platform_); } diff --git a/test/abort/test-process-abort-exitcode.js b/test/abort/test-process-abort-exitcode.js new file mode 100644 index 00000000000000..b29f9dc75cd202 --- /dev/null +++ b/test/abort/test-process-abort-exitcode.js @@ -0,0 +1,24 @@ +'use strict'; +const common = require('../common'); +const assert = require('assert'); + +// This test makes sure that an aborted node process +// exits with code 3 on Windows, and SIGABRT on POSIX. +// Spawn a child, force an abort, and then check the +// exit code in the parent. + +const spawn = require('child_process').spawn; +if (process.argv[2] === 'child') { + process.abort(); +} else { + const child = spawn(process.execPath, [__filename, 'child']); + child.on('exit', common.mustCall((code, signal) => { + if (common.isWindows) { + assert.strictEqual(code, 3); + assert.strictEqual(signal, null); + } else { + assert.strictEqual(code, null); + assert.strictEqual(signal, 'SIGABRT'); + } + })); +} diff --git a/test/async-hooks/async-hooks.status b/test/async-hooks/async-hooks.status deleted file mode 100644 index 500d5c3a76c2a9..00000000000000 --- a/test/async-hooks/async-hooks.status +++ /dev/null @@ -1,21 +0,0 @@ -prefix async-hooks - -# To mark a test as flaky, list the test name in the appropriate section -# below, without ".js", followed by ": PASS,FLAKY". Example: -# sample-test : PASS,FLAKY - -[true] # This section applies to all platforms - -[$system==win32] - -[$system==linux] -test-callback-error : PASS,FLAKY - -[$system==macos] -test-callback-error : PASS,FLAKY - -[$arch==arm || $arch==arm64] - -[$system==solaris] # Also applies to SmartOS - -[$system==freebsd] From 577b4f75f6532de275d134ecfe3f1abdfc2f0cd3 Mon Sep 17 00:00:00 2001 From: Igor Sheludko Date: Wed, 5 Jul 2017 20:29:13 +0200 Subject: [PATCH 63/64] deps: cherry-pick 6cb999b97b from V8 upstream MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Original commit message: Properly handle loads from global interceptor via prototype chain. ... when receiver is in dictionary mode. Bug: v8:6490 Change-Id: Ic5a8d214adcc4efd4cb163cbc6b351c4e6b596af Reviewed-on: https://chromium-review.googlesource.com/559548 Reviewed-by: Camillo Bruni Commit-Queue: Igor Sheludko Cr-Commit-Position: refs/heads/master@{#46428} Ref: https://chromium.googlesource.com/v8/v8.git/+/6cb999b97b7953ebfd4aabf2e1f62bf405f21c69 Fixes: https://github.com/nodejs/node/issues/13804 PR-URL: https://github.com/nodejs/node/pull/14188 Reviewed-By: Ben Noordhuis Reviewed-By: Michaël Zasso Reviewed-By: James M Snell --- deps/v8/include/v8-version.h | 2 +- deps/v8/src/ic/handler-configuration-inl.h | 5 +++ deps/v8/src/ic/handler-configuration.h | 3 ++ deps/v8/src/ic/ic.cc | 21 +++++++++--- deps/v8/test/cctest/test-api-interceptors.cc | 35 ++++++++++++++++++++ 5 files changed, 60 insertions(+), 6 deletions(-) diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 0d920efac46d52..f1b394a8c4a1ab 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -11,7 +11,7 @@ #define V8_MAJOR_VERSION 5 #define V8_MINOR_VERSION 9 #define V8_BUILD_NUMBER 211 -#define V8_PATCH_LEVEL 37 +#define V8_PATCH_LEVEL 38 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h index 2b9dc04b5a9395..5f31d15d46fcc3 100644 --- a/deps/v8/src/ic/handler-configuration-inl.h +++ b/deps/v8/src/ic/handler-configuration-inl.h @@ -13,6 +13,11 @@ namespace v8 { namespace internal { +// Decodes kind from Smi-handler. +LoadHandler::Kind LoadHandler::GetHandlerKind(Smi* smi_handler) { + return KindBits::decode(smi_handler->value()); +} + Handle LoadHandler::LoadNormal(Isolate* isolate) { int config = KindBits::encode(kNormal); return handle(Smi::FromInt(config), isolate); diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h index ab117d5c9bd5cb..eed548b4d5edeb 100644 --- a/deps/v8/src/ic/handler-configuration.h +++ b/deps/v8/src/ic/handler-configuration.h @@ -90,6 +90,9 @@ class LoadHandler { static const int kHolderCellIndex = 2; static const int kFirstPrototypeIndex = 3; + // Decodes kind from Smi-handler. + static inline Kind GetHandlerKind(Smi* smi_handler); + // Creates a Smi-handler for loading a property from a slow object. static inline Handle LoadNormal(Isolate* isolate); diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc index b3b0eb4c849e52..ca3f70df2ab41d 100644 --- a/deps/v8/src/ic/ic.cc +++ b/deps/v8/src/ic/ic.cc @@ -868,10 +868,15 @@ int GetPrototypeCheckCount(Isolate* isolate, Handle receiver_map, Handle(), 0); } +enum class HolderCellRequest { + kGlobalPropertyCell, + kHolder, +}; + Handle HolderCell(Isolate* isolate, Handle holder, - Handle name, Handle smi_handler) { - if (holder->IsJSGlobalObject() && - *smi_handler != *LoadHandler::LoadInterceptor(isolate)) { + Handle name, HolderCellRequest request) { + if (request == HolderCellRequest::kGlobalPropertyCell) { + DCHECK(holder->IsJSGlobalObject()); Handle global = Handle::cast(holder); GlobalDictionary* dict = global->global_dictionary(); int number = dict->FindEntry(name); @@ -908,8 +913,14 @@ Handle LoadIC::LoadFromPrototype(Handle receiver_map, Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate()); DCHECK(!validity_cell.is_null()); - Handle holder_cell = - HolderCell(isolate(), holder, name, smi_handler); + // LoadIC dispatcher expects PropertyCell as a "holder" in case of kGlobal + // handler kind. + HolderCellRequest request = + LoadHandler::GetHandlerKind(*smi_handler) == LoadHandler::kGlobal + ? HolderCellRequest::kGlobalPropertyCell + : HolderCellRequest::kHolder; + + Handle holder_cell = HolderCell(isolate(), holder, name, request); if (checks_count == 0) { return isolate()->factory()->NewTuple3(holder_cell, smi_handler, diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc index 9680b4241b8521..9e739687b4c3c5 100644 --- a/deps/v8/test/cctest/test-api-interceptors.cc +++ b/deps/v8/test/cctest/test-api-interceptors.cc @@ -1383,6 +1383,41 @@ THREADED_TEST(InterceptorLoadGlobalICGlobalWithInterceptor) { CHECK(value->BooleanValue(context.local()).FromJust()); } +// Test load of a non-existing global through prototype chain when a global +// object has an interceptor. +THREADED_TEST(InterceptorLoadICGlobalWithInterceptor) { + i::FLAG_allow_natives_syntax = true; + v8::Isolate* isolate = CcTest::isolate(); + v8::HandleScope scope(isolate); + v8::Local templ_global = v8::ObjectTemplate::New(isolate); + templ_global->SetHandler(v8::NamedPropertyHandlerConfiguration( + GenericInterceptorGetter, GenericInterceptorSetter)); + + LocalContext context(nullptr, templ_global); + i::Handle global_proxy = + v8::Utils::OpenHandle(context->Global()); + CHECK(global_proxy->IsJSGlobalProxy()); + i::Handle global( + i::JSGlobalObject::cast(global_proxy->map()->prototype())); + CHECK(global->map()->has_named_interceptor()); + + ExpectInt32( + "(function() {" + " var f = function(obj) { " + " return obj.foo;" + " };" + " var obj = { __proto__: this, _str_foo: 42 };" + " for (var i = 0; i < 1500; i++) obj['p' + i] = 0;" + " /* Ensure that |obj| is in dictionary mode. */" + " if (%HasFastProperties(obj)) return -1;" + " for (var i = 0; i < 3; i++) {" + " f(obj);" + " };" + " return f(obj);" + "})();", + 42); +} + static void InterceptorLoadICGetter0( Local name, const v8::PropertyCallbackInfo& info) { ApiTestFuzzer::Fuzz(); From d4c2406f56e5c9220badd048dc09c9d52f6297fb Mon Sep 17 00:00:00 2001 From: Anna Henningsen Date: Mon, 24 Jul 2017 18:04:35 +0200 Subject: [PATCH 64/64] 2017-07-??, Version 8.3.0 (Current) Notable changes * **V8** * The V8 engine has been upgraded to version 5.9, which has a significantly changed performance profile. [#13515](https://github.com/nodejs/node/pull/13515) --- CHANGELOG.md | 3 +- doc/changelogs/CHANGELOG_V8.md | 77 ++++++++++++++++++++++++++++++++++ src/node_version.h | 6 +-- 3 files changed, 82 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab0fd9875432ee..ba634b10e3f358 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,7 +27,8 @@ release. -8.2.1
+8.3.0
+8.2.1
8.2.0
8.1.4
8.1.3
diff --git a/doc/changelogs/CHANGELOG_V8.md b/doc/changelogs/CHANGELOG_V8.md index c1f6b7f9c300a3..754c9e836a82ab 100644 --- a/doc/changelogs/CHANGELOG_V8.md +++ b/doc/changelogs/CHANGELOG_V8.md @@ -6,6 +6,7 @@ +8.3.0
8.2.1
8.2.0
8.1.4
@@ -28,6 +29,82 @@ * [io.js](CHANGELOG_IOJS.md) * [Archive](CHANGELOG_ARCHIVE.md) + +## 2017-07-??, Version 8.3.0 (Current), @addaleax + +### Notable changes + +* **V8** + * The V8 engine has been upgraded to version 5.9, which has a significantly + changed performance profile. + [#13515](https://github.com/nodejs/node/pull/13515) + +### Commits + +* [[`9b104b4ea8`](https://github.com/nodejs/node/commit/9b104b4ea8)] - **benchmark**: add assert map and set benchmarks (Ruben Bridgewater) [#14258](https://github.com/nodejs/node/pull/14258) +* [[`0ebb4dff17`](https://github.com/nodejs/node/commit/0ebb4dff17)] - **build**: codesign tarball binary on macOS (Evan Lucas) [#14179](https://github.com/nodejs/node/pull/14179) +* [[`5ab4471d72`](https://github.com/nodejs/node/commit/5ab4471d72)] - **build,tools**: do not force codesign prefix (Evan Lucas) [#14179](https://github.com/nodejs/node/pull/14179) +* [[`577b4f75f6`](https://github.com/nodejs/node/commit/577b4f75f6)] - **deps**: cherry-pick 6cb999b97b from V8 upstream (Igor Sheludko) [#14188](https://github.com/nodejs/node/pull/14188) +* [[`da1913c5e7`](https://github.com/nodejs/node/commit/da1913c5e7)] - **deps**: cherry-pick 3f4536894ac from V8 upstream (ochang) [#13985](https://github.com/nodejs/node/pull/13985) +* [[`da9304607c`](https://github.com/nodejs/node/commit/da9304607c)] - **deps**: update V8 to 5.9.211.37 (Michaël Zasso) [#13790](https://github.com/nodejs/node/pull/13790) +* [[`06d419f8a4`](https://github.com/nodejs/node/commit/06d419f8a4)] - **(SEMVER-MINOR)** **deps**: cherry-pick 866ee63 from upstream V8 (Michaël Zasso) [#13515](https://github.com/nodejs/node/pull/13515) +* [[`8e967299e7`](https://github.com/nodejs/node/commit/8e967299e7)] - **(SEMVER-MINOR)** **deps**: cherry-pick a16c3c9 from upstream V8 (jbroman) [#13515](https://github.com/nodejs/node/pull/13515) +* [[`83636a4cbe`](https://github.com/nodejs/node/commit/83636a4cbe)] - **deps**: backport 4fdf9fd4813 from upstream v8 (Jochen Eisinger) [#12875](https://github.com/nodejs/node/pull/12875) +* [[`65956e6e84`](https://github.com/nodejs/node/commit/65956e6e84)] - **(SEMVER-MINOR)** **deps**: cherry-pick 6d38f89 from upstream V8 (Michaël Zasso) [#13263](https://github.com/nodejs/node/pull/13263) +* [[`4c4f647420`](https://github.com/nodejs/node/commit/4c4f647420)] - **deps**: cherry-pick f5fad6d from upstream v8 (daniel.bevenius) [#12826](https://github.com/nodejs/node/pull/12826) +* [[`6204fadc19`](https://github.com/nodejs/node/commit/6204fadc19)] - **deps**: cherry-pick bfae9db from upstream v8 (Ben Noordhuis) [#12722](https://github.com/nodejs/node/pull/12722) +* [[`5fb7a0bceb`](https://github.com/nodejs/node/commit/5fb7a0bceb)] - **(SEMVER-MINOR)** **deps**: fix addons compilation with VS2013 (Bartosz Sosnowski) [#13263](https://github.com/nodejs/node/pull/13263) +* [[`ad928c070b`](https://github.com/nodejs/node/commit/ad928c070b)] - **(SEMVER-MINOR)** **deps**: add missing include to V8 i18n.cc (Michaël Zasso) [#13263](https://github.com/nodejs/node/pull/13263) +* [[`fae03e6e5b`](https://github.com/nodejs/node/commit/fae03e6e5b)] - **(SEMVER-MINOR)** **deps**: run memory hungry V8 test in exclusive mode (Michaël Zasso) [#13263](https://github.com/nodejs/node/pull/13263) +* [[`613c81e408`](https://github.com/nodejs/node/commit/613c81e408)] - **deps**: limit regress/regress-crbug-514081 v8 test (Michael Dawson) [#6678](https://github.com/nodejs/node/pull/6678) +* [[`272f4945cc`](https://github.com/nodejs/node/commit/272f4945cc)] - **(SEMVER-MINOR)** **deps**: update V8 to 5.9.211.35 (Michaël Zasso) [#13515](https://github.com/nodejs/node/pull/13515) +* [[`d9273ed5ed`](https://github.com/nodejs/node/commit/d9273ed5ed)] - **deps**: cherry-pick 18ea996 from c-ares upstream (Anna Henningsen) [#13883](https://github.com/nodejs/node/pull/13883) +* [[`6c6da38518`](https://github.com/nodejs/node/commit/6c6da38518)] - **doc**: fix some links (Vse Mozhet Byt) [#14400](https://github.com/nodejs/node/pull/14400) +* [[`83c8e5c517`](https://github.com/nodejs/node/commit/83c8e5c517)] - **doc**: describe labelling process for backports (Anna Henningsen) [#12431](https://github.com/nodejs/node/pull/12431) +* [[`592787ef4d`](https://github.com/nodejs/node/commit/592787ef4d)] - **doc**: error message are still major (Refael Ackermann) [#14375](https://github.com/nodejs/node/pull/14375) +* [[`f1b09c0a44`](https://github.com/nodejs/node/commit/f1b09c0a44)] - **doc**: fix typo in stream.md (Marc Hernández Cabot) [#14364](https://github.com/nodejs/node/pull/14364) +* [[`4be373bc4b`](https://github.com/nodejs/node/commit/4be373bc4b)] - **doc**: fixes default shell in child_process.md (Henry) [#14203](https://github.com/nodejs/node/pull/14203) +* [[`b12924d894`](https://github.com/nodejs/node/commit/b12924d894)] - **doc**: add XadillaX to collaborators (XadillaX) [#14388](https://github.com/nodejs/node/pull/14388) +* [[`dc0a26f254`](https://github.com/nodejs/node/commit/dc0a26f254)] - **doc**: replace dead link in v8 module (Devin Boyer) [#14372](https://github.com/nodejs/node/pull/14372) +* [[`d2121ab768`](https://github.com/nodejs/node/commit/d2121ab768)] - **doc**: fix minor typo in cluster.md (Lance Ball) [#14353](https://github.com/nodejs/node/pull/14353) +* [[`53ad91c3b1`](https://github.com/nodejs/node/commit/53ad91c3b1)] - **doc,stream**: \_transform happens one at a time (Matteo Collina) [#14321](https://github.com/nodejs/node/pull/14321) +* [[`f6a03439d8`](https://github.com/nodejs/node/commit/f6a03439d8)] - **docs**: add note about fs.rmdir() (Oleksandr Kushchak) [#14323](https://github.com/nodejs/node/pull/14323) +* [[`28f0693796`](https://github.com/nodejs/node/commit/28f0693796)] - **lib**: include cached modules in module.children (Ben Noordhuis) [#14132](https://github.com/nodejs/node/pull/14132) +* [[`fa134dd60c`](https://github.com/nodejs/node/commit/fa134dd60c)] - **n-api**: add fast paths for integer getters (Anna Henningsen) [#14393](https://github.com/nodejs/node/pull/14393) +* [[`e54f75b831`](https://github.com/nodejs/node/commit/e54f75b831)] - **readline**: remove the caching variable (Lyall Sun) [#14275](https://github.com/nodejs/node/pull/14275) +* [[`34821f6400`](https://github.com/nodejs/node/commit/34821f6400)] - **repl**: don't terminate on null thrown (Benjamin Gruenbaum) [#14306](https://github.com/nodejs/node/pull/14306) +* [[`32ba8aea0b`](https://github.com/nodejs/node/commit/32ba8aea0b)] - **repl**: fix old history error handling (Ruben Bridgewater) [#13733](https://github.com/nodejs/node/pull/13733) +* [[`89961baf70`](https://github.com/nodejs/node/commit/89961baf70)] - **src**: fix process.abort() interaction with V8 (Anna Henningsen) [#13985](https://github.com/nodejs/node/pull/13985) +* [[`552d2be625`](https://github.com/nodejs/node/commit/552d2be625)] - **test**: improve test-util-inspect (Peter Marshall) [#14003](https://github.com/nodejs/node/pull/14003) +* [[`0418a70d7c`](https://github.com/nodejs/node/commit/0418a70d7c)] - **test**: add non-internet resolveAny tests (Anna Henningsen) [#13883](https://github.com/nodejs/node/pull/13883) +* [[`265f159881`](https://github.com/nodejs/node/commit/265f159881)] - **test**: replace concatenation with template literals (Song, Bintao Garfield) [#14295](https://github.com/nodejs/node/pull/14295) +* [[`3414e42127`](https://github.com/nodejs/node/commit/3414e42127)] - **test**: replace concatenation with template literals (Zongmin Lei) [#14298](https://github.com/nodejs/node/pull/14298) +* [[`953736cdde`](https://github.com/nodejs/node/commit/953736cdde)] - **test**: move timing-dependent tests to sequential (Alexey Orlenko) [#14377](https://github.com/nodejs/node/pull/14377) +* [[`9b22acc29e`](https://github.com/nodejs/node/commit/9b22acc29e)] - **test**: fix flaky test-net-write-after-close (Rich Trott) [#14361](https://github.com/nodejs/node/pull/14361) +* [[`11ae8c33bd`](https://github.com/nodejs/node/commit/11ae8c33bd)] - **test**: delete obsolete test-sendfd.js (decareano) [#14334](https://github.com/nodejs/node/pull/14334) +* [[`99104e1b58`](https://github.com/nodejs/node/commit/99104e1b58)] - **test**: improve fs.exists coverage (jkzing) [#14301](https://github.com/nodejs/node/pull/14301) +* [[`e237720537`](https://github.com/nodejs/node/commit/e237720537)] - **test**: replace string concatenation with template (ziyun) [#14286](https://github.com/nodejs/node/pull/14286) +* [[`3c92b787d7`](https://github.com/nodejs/node/commit/3c92b787d7)] - **test**: use path.join in async-hooks/test-tlswrap.js (Vincent Xue) [#14319](https://github.com/nodejs/node/pull/14319) +* [[`0197ba00a5`](https://github.com/nodejs/node/commit/0197ba00a5)] - **test**: add comments for whatwg-url tests (Gautam Arora) [#14355](https://github.com/nodejs/node/pull/14355) +* [[`956a473107`](https://github.com/nodejs/node/commit/956a473107)] - **test**: move test-fs-largefile to pummel (Rich Trott) [#14338](https://github.com/nodejs/node/pull/14338) +* [[`c866c9078b`](https://github.com/nodejs/node/commit/c866c9078b)] - **test**: use path.join for long path concatenation (zzz) [#14280](https://github.com/nodejs/node/pull/14280) +* [[`94c7331277`](https://github.com/nodejs/node/commit/94c7331277)] - **test**: replace string concatenation with path.join (jkzing) [#14272](https://github.com/nodejs/node/pull/14272) +* [[`def98c6959`](https://github.com/nodejs/node/commit/def98c6959)] - **test**: replace string concatenation with template (Nathan Jiang) [#14342](https://github.com/nodejs/node/pull/14342) +* [[`3bc7d2a5ea`](https://github.com/nodejs/node/commit/3bc7d2a5ea)] - **test**: replace string concat in test-fs-watchfile.js (Helianthus21) [#14287](https://github.com/nodejs/node/pull/14287) +* [[`72febfd3b6`](https://github.com/nodejs/node/commit/72febfd3b6)] - **test**: replace concatenation with template literals (SkyAo) [#14296](https://github.com/nodejs/node/pull/14296) +* [[`b5d0a03a9e`](https://github.com/nodejs/node/commit/b5d0a03a9e)] - **test**: fix error handling test-http-full-response (Rich Trott) [#14252](https://github.com/nodejs/node/pull/14252) +* [[`ebb90900af`](https://github.com/nodejs/node/commit/ebb90900af)] - **tools**: skip workaround for newer llvm (nanaya) [#14077](https://github.com/nodejs/node/pull/14077) +* [[`c0ea5d8ce5`](https://github.com/nodejs/node/commit/c0ea5d8ce5)] - **tools**: always include llvm_version in config (nanaya) [#14077](https://github.com/nodejs/node/pull/14077) +* [[`55a1231d81`](https://github.com/nodejs/node/commit/55a1231d81)] - **v8**: fix debug builds on Windows (Bartosz Sosnowski) [#13634](https://github.com/nodejs/node/pull/13634) +* [[`e3fcdeffcb`](https://github.com/nodejs/node/commit/e3fcdeffcb)] - **v8**: backport pieces from 18a26cfe174 from upstream v8 (Peter Marshall) [#13217](https://github.com/nodejs/node/pull/13217) +* [[`9c7af15a8c`](https://github.com/nodejs/node/commit/9c7af15a8c)] - **v8**: backport 4f82f1d948c from upstream v8 (hpayer) [#13217](https://github.com/nodejs/node/pull/13217) +* [[`61a1f9cfe2`](https://github.com/nodejs/node/commit/61a1f9cfe2)] - **v8**: backport 4f82f1d948c from upstream v8 (hpayer) [#13217](https://github.com/nodejs/node/pull/13217) +* [[`e3c1119174`](https://github.com/nodejs/node/commit/e3c1119174)] - **v8**: backport a9e56f4f36d from upstream v8 (Peter Marshall) [#13217](https://github.com/nodejs/node/pull/13217) +* [[`da35ac3bb9`](https://github.com/nodejs/node/commit/da35ac3bb9)] - **v8**: backport bd59e7452be from upstream v8 (Michael Achenbach) [#13217](https://github.com/nodejs/node/pull/13217) +* [[`1e93589e8c`](https://github.com/nodejs/node/commit/1e93589e8c)] - **v8**: do not test v8 with -Werror (Anna Henningsen) [#12875](https://github.com/nodejs/node/pull/12875) +* [[`0be4d17cd4`](https://github.com/nodejs/node/commit/0be4d17cd4)] - **(SEMVER-MINOR)** **v8**: fix gcc 7 build errors (Michaël Zasso) [#13515](https://github.com/nodejs/node/pull/13515) +* [[`1906077ee8`](https://github.com/nodejs/node/commit/1906077ee8)] - **v8**: fix stack overflow in recursive method (Ben Noordhuis) [#12460](https://github.com/nodejs/node/pull/12460) + ## 2017-07-20, Version 8.2.1 (Current), @fishrock123 diff --git a/src/node_version.h b/src/node_version.h index 4fbeac60e805f4..5f1b5fe4723e8e 100644 --- a/src/node_version.h +++ b/src/node_version.h @@ -23,10 +23,10 @@ #define SRC_NODE_VERSION_H_ #define NODE_MAJOR_VERSION 8 -#define NODE_MINOR_VERSION 2 -#define NODE_PATCH_VERSION 1 +#define NODE_MINOR_VERSION 3 +#define NODE_PATCH_VERSION 0 -#define NODE_VERSION_IS_RELEASE 0 +#define NODE_VERSION_IS_RELEASE 1 #ifndef NODE_STRINGIFY #define NODE_STRINGIFY(n) NODE_STRINGIFY_HELPER(n)