profile
viewpoint

bengl/beatsio 38

A web-based live-coding environment for CoffeeScript!

bengl/btags 15

ctags generated using babel's AST parser

bengl/copher 8

A desktop GUI gopher client powered by carlo

bengl/cli-profile 7

generate cpu profiles of cli tools

bengl/cpuprofile2stackcollapse 3

Convert .cpuprofile files to "folded/collapsed" stacks for flamegraph.pl

bengl/contribution-report 2

creates contribution reports for github repositories

bengl/abstract-blob-store 0

A test suite and interface you can use to implement streaming file (blob) storage modules for various storage backends and platforms

bengl/activesmash 0

Higher-level MongoDB ODM that smells like Mongoose

push eventDataDog/dd-trace-js

Bryan English

commit sha bf1633685e25525284a3907fc24e2f2adea69514

PR fixes

view details

push time in 34 minutes

push eventDataDog/dd-trace-js

Bryan English

commit sha 1927772a47f78bb2e2604627fbdea903558690f7

inline flush to avoid losing already-computed msgpack

view details

push time in an hour

push eventDataDog/dd-trace-js

Bryan English

commit sha 5053c54c42c87a876dbca517206af658044935f2

inline flush to avoid losing already-computed msgpack

view details

push time in 4 hours

CommitCommentEvent

push eventDataDog/dd-trace-js

Bryan English

commit sha 4d31a7e4a32efe179ecac23daded5bd154735306

custom msgpack encoder checks before going over MAX_SIZE

view details

push time in a day

Pull request review commentDataDog/dd-trace-js

Performance updates

 class Writer {       headers[key] = value     }   }++  _encode (trace) {+    const offset = this._offset++    try {+      this._offset = encode(this._buffer, this._offset, trace)+      this._count+++    } catch (e) {+      if (e.name.startsWith('RangeError')) {+        if (offset === 0) {+          return log.error('Dropping trace because its payload is too large.')+        }++        this._offset = offset++        this.flush()+        this._encode(trace)+      } else {+        log.error(e)+      }++      return+    }++    log.debug(() => [+      'Added encoded trace to buffer:',+      this._buffer.slice(offset, this._offset).toString('hex').match(/../g).join(' ')+    ].join(' '))+  }++  _reset () {+    this._buffer = Buffer.allocUnsafe(10 * 1024 * 1024) // 10mb

With the change above, we'll put in the 8MB hard limit.

bengl

comment created time in a day

Pull request review commentDataDog/dd-trace-js

Performance updates

 class Writer {       headers[key] = value     }   }++  _encode (trace) {+    const offset = this._offset++    try {+      this._offset = encode(this._buffer, this._offset, trace)+      this._count+++    } catch (e) {+      if (e.name.startsWith('RangeError')) {+        if (offset === 0) {+          return log.error('Dropping trace because its payload is too large.')

I'll add a TODO to investigate whether the RangeError is too expensive.

bengl

comment created time in a day

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'++const { Int64BE, Uint64BE } = require('int64-buffer')+const util = require('./util')+const tokens = require('./tokens')+const cachedString = require('./cache')(1024)++const fields = getFields()++const {+  headerBuffer,+  traceIdOffset,+  spanIdOffset,+  startOffset,+  durationOffset,+  errorOffset+} = (() => {+  const buffer = Buffer.alloc(1024)+  let offset = 0++  offset += copy(buffer, offset, fields.trace_id)+  offset += copy(buffer, offset, tokens.uint64)+  const traceIdOffset = offset+  new Uint64BE(buffer, offset, 0) // eslint-disable-line no-new+  offset += 8++  offset += copy(buffer, offset, fields.span_id)+  offset += copy(buffer, offset, tokens.uint64)+  const spanIdOffset = offset+  new Uint64BE(buffer, offset, 0) // eslint-disable-line no-new+  offset += 8++  offset += copy(buffer, offset, fields.start)+  offset += copy(buffer, offset, tokens.int64)+  const startOffset = offset+  new Int64BE(buffer, offset, 0) // eslint-disable-line no-new+  offset += 8++  offset += copy(buffer, offset, fields.duration)+  offset += copy(buffer, offset, tokens.int64)+  const durationOffset = offset+  new Int64BE(buffer, offset, 0) // eslint-disable-line no-new+  offset += 8++  offset += copy(buffer, offset, fields.error)+  const errorOffset = offset+  offset += copy(buffer, offset, tokens.int[0])++  return {+    headerBuffer: buffer.slice(0, offset),+    traceIdOffset,+    spanIdOffset,+    startOffset,+    durationOffset,+    errorOffset+  }+})()++function encode (buffer, offset, trace) {+  offset = writeArrayPrefix(buffer, offset, trace)++  for (const span of trace) {+    let fieldCount = 9++    span.parent_id && fieldCount+++    span.type && fieldCount+++    span.metrics && fieldCount++++    offset += copy(buffer, offset, tokens.map[fieldCount])++    offset += copyHeader(buffer, offset, span)++    if (span.parent_id) {+      offset += copy(buffer, offset, fields.parent_id)+      offset += copy(buffer, offset, tokens.uint64)+      offset += copy(buffer, offset, span.parent_id.toBuffer())+    }++    offset += copy(buffer, offset, fields.name)+    offset += write(buffer, offset, span.name)++    offset += copy(buffer, offset, fields.resource)+    offset += write(buffer, offset, span.resource)++    offset += copy(buffer, offset, fields.service)+    offset += write(buffer, offset, span.service)++    if (span.type) {+      offset += copy(buffer, offset, fields.type)+      offset += write(buffer, offset, span.type)+    }++    offset += copy(buffer, offset, fields.meta)+    offset = writeMap(buffer, offset, span.meta)++    if (span.metrics) {+      offset += copy(buffer, offset, fields.metrics)+      offset = writeMap(buffer, offset, span.metrics)+    }+  }++  buffer.write('', offset) // throw if offset is out of bounds++  return offset+}++function copyHeader (buffer, offset, span) {+  copy(headerBuffer, traceIdOffset, span.trace_id.toBuffer())+  copy(headerBuffer, spanIdOffset, span.span_id.toBuffer())+  new Uint64BE(headerBuffer, startOffset, span.start) // eslint-disable-line no-new

I'll look into just using a Buffer and doing the appropriate bitwise operations.

bengl

comment created time in a day

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'++/* eslint-disable no-console */++const { spawn, fork } = require('child_process')+const { promisify } = require('util')+const { stat } = require('fs')+const { get: _get } = require('http')+const path = require('path')+const mongoService = require('../../packages/dd-trace/test/setup/services/mongo')+const autocannon = require('autocannon')++function cd (dir) {+  console.log('> cd', dir)+  process.chdir(dir)+}++function delay (ms) {+  return new Promise((resolve, reject) => {+    setTimeout(resolve, ms)+  })+}++function sh (cmd) {+  return new Promise((resolve, reject) => {+    console.log('>', cmd)+    spawn(cmd, [], { stdio: 'inherit', shell: true })+      .on('error', reject)+      .on('close', resolve)+  })+}++function forkProcess (file, options = {}) {+  return new Promise((resolve, reject) => {+    console.log(`> node ${options.execArgv ? options.execArgv.join(' ') + ' ' : ''}${file}`)+    options.stdio = 'pipe'+    const subProcess = fork(file, options)+    console.log('>> PID', subProcess.pid)+    subProcess.on('message', message => {+      if (message.ready) {+        resolve({ subProcess })+      }+    })+  })+}++const statAsync = promisify(stat)+async function exists (filename) {+  try {+    const stats = await statAsync(filename)+    return stats.isDirectory() || stats.isFile()+  } catch (e) {+    return false+  }+}++function get (url) {+  return new Promise((resolve, reject) => {+    _get(url, res => {+      const chunks = []+      res.on('data', d => chunks.push(d))+      res.on('end', () => {+        resolve(Buffer.concat(chunks).toString())+      })+    })+  })+}++async function checkDb () {+  console.log('# checking that db is populated')+  cd('acmeair-nodejs')+  const { subProcess } = await forkProcess('./app.js', {+    execArgv: process.execArgv.concat(['--require', '../datadog.js'])+  })++  const customers = await get('http://localhost:9080/rest/api/config/countCustomers')++  if (parseInt(customers) < 10000) {+    console.log('# populating db')+    await get('http://localhost:9080/rest/api/loader/load?numCustomers=10000')+  }++  subProcess.kill()+  cd(__dirname)+}++async function ensureAppIsInstalled () {+  cd(__dirname)+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs')))) {+    await sh('git clone git@github.com:acmeair/acmeair-nodejs.git')+  }+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs', 'node_modules')))) {+    cd('acmeair-nodejs')+    await sh('npm install')+    cd(__dirname)+  }+}++function runTest (url, duration) {+  return autocannon({ url, duration })+}++async function testBoth (url, duration, prof) {+  cd(__dirname)+  const { subProcess: agentProcess } = await forkProcess('./fake-agent.js')+  cd('acmeair-nodejs')+  const execArgv = ['--require', '../datadog.js']+  if (prof) {+    execArgv.unshift('--prof')+  }+  const { subProcess: airProcess } = await forkProcess('./app.js', {+    execArgv: execArgv.concat(process.execArgv),+    env: Object.assign({}, process.env, { DD_ENABLE: '1' })+  })++  await delay(2000)++  const resultWithTracer = await runTest(url, duration)++  airProcess.kill()+  agentProcess.kill()++  const { subProcess: airProcess2 } = await forkProcess('./app.js', {+    execArgv: execArgv.concat(process.execArgv)+  })++  const resultWithoutTracer = await runTest(url, duration)

I'll add a TODO to make sure the different tests are runnable in isolation.

bengl

comment created time in a day

Pull request review commentDataDog/dd-trace-js

Performance updates

 describe('Writer', () => {       '../../platform': platform,       '../../log': log,       '../../format': format,-      '../../encode': encode,+      '../../encode/index': encode,

It needs to be stubbed directly like this. Proxyrequire didn't do the job otherwise. 🤷‍♂

bengl

comment created time in a day

Pull request review commentDataDog/dd-trace-js

Performance updates

 class Writer {       headers[key] = value     }   }++  _encode (trace) {+    const offset = this._offset++    try {+      this._offset = encode(this._buffer, this._offset, trace)+      this._count+++    } catch (e) {+      if (e.name.startsWith('RangeError')) {+        if (offset === 0) {+          return log.error('Dropping trace because its payload is too large.')

So the logic you're looking for is to continue writing the trace, but check on each write if we go out of bounds on the buffer, and if we do, reset the offset to before the trace, flush, and start over?

I'm a little worried about the perf impact of checking offset + newContentLength > MAX_SIZE before every single write, especially when node is already doing that check for us when we do any writes on Buffer instances.

IIRC (but maybe I don't), exceptions aren't as costly as they used to be, especially if you're not accessing the stack. Stack serialization is what the biggest his usually is.

bengl

comment created time in a day

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'++/* eslint-disable no-console */++const { spawn, fork } = require('child_process')+const { promisify } = require('util')+const { stat } = require('fs')+const { get: _get } = require('http')+const path = require('path')+const mongoService = require('../../packages/dd-trace/test/setup/services/mongo')+const autocannon = require('autocannon')++function cd (dir) {+  console.log('> cd', dir)+  process.chdir(dir)+}++function delay (ms) {+  return new Promise((resolve, reject) => {+    setTimeout(resolve, ms)+  })+}++function sh (cmd) {+  return new Promise((resolve, reject) => {+    console.log('>', cmd)+    spawn(cmd, [], { stdio: 'inherit', shell: true })+      .on('error', reject)+      .on('close', resolve)+  })+}++function forkProcess (file, options = {}) {+  return new Promise((resolve, reject) => {+    console.log(`> node ${options.execArgv ? options.execArgv.join(' ') + ' ' : ''}${file}`)+    options.stdio = 'pipe'+    const subProcess = fork(file, options)+    console.log('>> PID', subProcess.pid)+    subProcess.on('message', message => {+      if (message.ready) {+        resolve({ subProcess })+      }+    })+  })+}++const statAsync = promisify(stat)+async function exists (filename) {+  try {+    const stats = await statAsync(filename)+    return stats.isDirectory() || stats.isFile()+  } catch (e) {+    return false+  }+}++function get (url) {+  return new Promise((resolve, reject) => {+    _get(url, res => {+      const chunks = []+      res.on('data', d => chunks.push(d))+      res.on('end', () => {+        resolve(Buffer.concat(chunks).toString())+      })+    })+  })+}++async function checkDb () {+  console.log('# checking that db is populated')+  cd('acmeair-nodejs')+  const { subProcess } = await forkProcess('./app.js', {+    execArgv: process.execArgv.concat(['--require', '../datadog.js'])+  })++  const customers = await get('http://localhost:9080/rest/api/config/countCustomers')++  if (parseInt(customers) < 10000) {+    console.log('# populating db')+    await get('http://localhost:9080/rest/api/loader/load?numCustomers=10000')+  }++  subProcess.kill()+  cd(__dirname)+}++async function ensureAppIsInstalled () {+  cd(__dirname)+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs')))) {+    await sh('git clone git@github.com:acmeair/acmeair-nodejs.git')+  }+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs', 'node_modules')))) {+    cd('acmeair-nodejs')+    await sh('npm install')+    cd(__dirname)+  }+}++function runTest (url, duration) {+  return autocannon({ url, duration })+}++async function testBoth (url, duration, prof) {+  cd(__dirname)+  const { subProcess: agentProcess } = await forkProcess('./fake-agent.js')+  cd('acmeair-nodejs')+  const execArgv = ['--require', '../datadog.js']+  if (prof) {+    execArgv.unshift('--prof')+  }+  const { subProcess: airProcess } = await forkProcess('./app.js', {+    execArgv: execArgv.concat(process.execArgv),+    env: Object.assign({}, process.env, { DD_ENABLE: '1' })+  })++  await delay(2000)++  const resultWithTracer = await runTest(url, duration)++  airProcess.kill()+  agentProcess.kill()++  const { subProcess: airProcess2 } = await forkProcess('./app.js', {+    execArgv: execArgv.concat(process.execArgv)+  })++  const resultWithoutTracer = await runTest(url, duration)

It is intended to be run locally.

If you parallelize it locally, the tests will interfere with each other.

bengl

comment created time in a day

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'++/* eslint-disable no-console */++const { spawn, fork } = require('child_process')+const { promisify } = require('util')+const { stat } = require('fs')+const { get: _get } = require('http')+const path = require('path')+const mongoService = require('../../packages/dd-trace/test/setup/services/mongo')+const autocannon = require('autocannon')++function cd (dir) {+  console.log('> cd', dir)+  process.chdir(dir)+}++function delay (ms) {+  return new Promise((resolve, reject) => {+    setTimeout(resolve, ms)+  })+}++function sh (cmd) {+  return new Promise((resolve, reject) => {+    console.log('>', cmd)+    spawn(cmd, [], { stdio: 'inherit', shell: true })+      .on('error', reject)+      .on('close', resolve)+  })+}++function forkProcess (file, options = {}) {+  return new Promise((resolve, reject) => {+    console.log(`> node ${options.execArgv ? options.execArgv.join(' ') + ' ' : ''}${file}`)+    options.stdio = 'pipe'+    const subProcess = fork(file, options)+    console.log('>> PID', subProcess.pid)+    subProcess.on('message', message => {+      if (message.ready) {+        resolve({ subProcess })+      }+    })+  })+}++const statAsync = promisify(stat)+async function exists (filename) {+  try {+    const stats = await statAsync(filename)+    return stats.isDirectory() || stats.isFile()+  } catch (e) {+    return false+  }+}++function get (url) {+  return new Promise((resolve, reject) => {+    _get(url, res => {+      const chunks = []+      res.on('data', d => chunks.push(d))+      res.on('end', () => {+        resolve(Buffer.concat(chunks).toString())+      })+    })+  })+}++async function checkDb () {+  console.log('# checking that db is populated')+  cd('acmeair-nodejs')+  const { subProcess } = await forkProcess('./app.js', {+    execArgv: process.execArgv.concat(['--require', '../datadog.js'])+  })++  const customers = await get('http://localhost:9080/rest/api/config/countCustomers')++  if (parseInt(customers) < 10000) {

Do you want to drop the collection by calling the db directly ahead of time, then?

bengl

comment created time in a day

release DataDog/dd-trace-js

v0.18.0

released time in 4 days

created tagDataDog/dd-trace-js

tagv0.18.0

JavaScript APM Tracer

created time in 4 days

push eventDataDog/dd-trace-js

Bryan English

commit sha 677dd9b4a3c07daccf1b53540c99031ecbdec62a

v0.18.0

view details

push time in 4 days

PR merged DataDog/dd-trace-js

v0.18.0

Release PR

+2 -2

0 comment

2 changed files

bengl

pr closed time in 4 days

PR opened DataDog/dd-trace-js

v0.18.0

Release PR

+2 -2

0 comment

2 changed files

pr created time in 4 days

create barnchDataDog/dd-trace-js

branch : v0.18

created branch time in 4 days

push eventDataDog/dd-trace-js

Roch Devost

commit sha 05ebe795ddbc1e2e6bafb137b8a77283ec5e47f1

add install script to avoid rebuild on install (#869) * add install script to avoid rebuild on install * update prebuilds script to skip missing platforms

view details

push time in 4 days

PR merged DataDog/dd-trace-js

add install script to avoid rebuild on install dev/tooling

What does this PR do?

<!-- A brief description of the change being made with this pull request. -->

Add install script to avoid rebuild on install and update prebuilds script to skip missing platforms.

Motivation

<!-- What inspired you to submit this pull request? -->

Removing the install script causes yarn to always rebuild the project. This is a regression for #867 but it will take some investigation to fix properly.

While testing this, I also noticed that the prebuilds script requires that all platforms have been built. This is unnecessary since we already have that guarantee in the CircleCI build.

+14 -9

0 comment

2 changed files

rochdev

pr closed time in 4 days

Pull request review commentDataDog/dd-trace-js

add install script to avoid rebuild on install

   "browser": "browser.js",   "typings": "index.d.ts",   "scripts": {+    "install": "node-gyp-build",

What? Won't this always build?

rochdev

comment created time in 4 days

push eventDataDog/dd-trace-js

Bryan English

commit sha a0d75f6be7c524c72ce7bbc2948de6526ef77eff

PR fixes

view details

push time in 5 days

push eventDataDog/dd-trace-js

Bryan English

commit sha caa693e94525249a2ce4dbeabaed204e47a9a740

PR fixes

view details

push time in 5 days

push eventDataDog/dd-trace-js

Bryan English

commit sha 78638b61128b0bf6daf1ef47f710d9b20260babb

PR feedback

view details

push time in 5 days

push eventDataDog/dd-trace-js

Bryan English

commit sha 65c641d64d20cc22d60e29f2302b93d6fe66f888

no need for spans-to-dot

view details

Bryan English

commit sha 9d59d2ec21497cefa6d6cc3595b37b90a019fe96

PR feedback

view details

push time in 5 days

pull request commentDataDog/dd-trace-js

remove postinstall script and publish extracted native addon

Won't this bloat the package size? Can you run a npm pack before and after the change and compare file sizes?

rochdev

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

 describe('Platform', () => {       describe('prefix', () => {         it('should support fixarray', () => {           const length = 0xf-          const array = new Array(length)-          const prefixed = msgpack.prefix(array)+          const array = Buffer.allocUnsafe(5)

Ok

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

 const msgpack = require('msgpack-lite') const codec = msgpack.createCodec({ int64: true }) const id = require('../src/id')+const { Int64BE } = require('int64-buffer') // TODO: remove dependency

I'm not sure we do, but I wasn't trying to remove it with these changes since it didn't appear to be a bottleneck.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

 class Scope extends Base {      this._trackAsyncScope = config.trackAsyncScope     this._current = null-    this._spans = Object.create(null)-    this._types = Object.create(null)+    this._spans = new Map()

I was testing in Node 13. In newer versions Maps are generally faster for usage as, well, maps, because they're faster than objects with no hidden class. What we're doing with the object here basically prevents a hidden class from happening.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

 function wrapEnd (req) {   res.writeHead = wrapWriteHead(req)    let _end = req._datadog.end = res.end = function () {-    req._datadog.beforeEnd.forEach(beforeEnd => beforeEnd())+    for (const beforeEnd of req._datadog.beforeEnd) beforeEnd()

Ok

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

 function startSpan (tracer, config, req, res, name) { }  function configureDatadogObject (tracer, span, req, res) {-  req._datadog.tracer = tracer-  req._datadog.span = span-  req._datadog.res = res+  const ddObj = req._datadog

This was an "every little bit helps" change. I can revert it if you want, but I'm pretty sure it doesn't hurt?

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

 class Writer {       headers[key] = value     }   }++  _encode (trace) {+    const offset = this._offset++    try {+      this._offset = encode(this._buffer, this._offset, trace)+      this._count+++    } catch (e) {+      if (e.name.startsWith('RangeError')) {+        if (offset === 0) {+          return log.error('Dropping trace because its payload is too large.')+        }++        this._offset = offset++        this.flush()+        this._encode(trace)+      } else {+        log.error(e)+      }++      return+    }++    log.debug(() => [+      'Added encoded trace to buffer:',+      this._buffer.slice(offset, this._offset).toString('hex').match(/../g).join(' ')+    ].join(' '))+  }++  _reset () {+    this._buffer = Buffer.allocUnsafe(10 * 1024 * 1024) // 10mb

It should be less than 10mb since the agent allows 10mb max including other things. A safe value would be 8mb.

8MB is the soft limit. See https://github.com/DataDog/dd-trace-js/pull/858/files/7f490b33356fffdd28e5bc7f69c525e7d311ed92#diff-aa97d0a40b15e6cb805331a6bc6d6936R28

I set the buffer to be 2MB bigger than the soft limit so that if a trace comes in that crosses that boundary, we'll still let it through so long as it's smaller than 2MB.

Also, why not just do this once per instance? Just clearing the first 5 bytes and updating the offset should be enough. It doesn't matter if garbage is still in the buffer.

Good point! Since writes are synchronous, we can do that. Also I don't even think the first 5 bytes need to be cleared, since we're just setting them afterward anyway, along with the offset.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

 class Writer {       headers[key] = value     }   }++  _encode (trace) {+    const offset = this._offset++    try {+      this._offset = encode(this._buffer, this._offset, trace)+      this._count+++    } catch (e) {+      if (e.name.startsWith('RangeError')) {+        if (offset === 0) {+          return log.error('Dropping trace because its payload is too large.')

This should actually be pretty rare. This should only happen in the event that the size of the a single trace is greater than 2MB and our timed flushes haven't flushed it yet and that single huge trace is at the end of the buffer.

Normally, when the MAX_SIZE (8MB) has been hit, we do a flush and so we'd usually not ever hit this. (https://github.com/DataDog/dd-trace-js/pull/858/files/7f490b33356fffdd28e5bc7f69c525e7d311ed92#diff-aa97d0a40b15e6cb805331a6bc6d6936R28)

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

  const platform = require('../../platform') const log = require('../../log')-const encode = require('../../encode')+const encode = require('../../encode/index')

Ok

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'++const { Int64BE, Uint64BE } = require('int64-buffer')+const util = require('./util')+const tokens = require('./tokens')+const cachedString = require('./cache')(1024)++const fields = getFields()++const {+  headerBuffer,+  traceIdOffset,+  spanIdOffset,+  startOffset,+  durationOffset,+  errorOffset+} = (() => {+  const buffer = Buffer.alloc(1024)+  let offset = 0++  offset += copy(buffer, offset, fields.trace_id)+  offset += copy(buffer, offset, tokens.uint64)+  const traceIdOffset = offset+  new Uint64BE(buffer, offset, 0) // eslint-disable-line no-new+  offset += 8++  offset += copy(buffer, offset, fields.span_id)+  offset += copy(buffer, offset, tokens.uint64)+  const spanIdOffset = offset+  new Uint64BE(buffer, offset, 0) // eslint-disable-line no-new+  offset += 8++  offset += copy(buffer, offset, fields.start)+  offset += copy(buffer, offset, tokens.int64)+  const startOffset = offset+  new Int64BE(buffer, offset, 0) // eslint-disable-line no-new+  offset += 8++  offset += copy(buffer, offset, fields.duration)+  offset += copy(buffer, offset, tokens.int64)+  const durationOffset = offset+  new Int64BE(buffer, offset, 0) // eslint-disable-line no-new+  offset += 8++  offset += copy(buffer, offset, fields.error)+  const errorOffset = offset+  offset += copy(buffer, offset, tokens.int[0])++  return {+    headerBuffer: buffer.slice(0, offset),+    traceIdOffset,+    spanIdOffset,+    startOffset,+    durationOffset,+    errorOffset+  }+})()++function encode (buffer, offset, trace) {+  offset = writeArrayPrefix(buffer, offset, trace)++  for (const span of trace) {+    let fieldCount = 9++    span.parent_id && fieldCount+++    span.type && fieldCount+++    span.metrics && fieldCount++++    offset += copy(buffer, offset, tokens.map[fieldCount])++    offset += copyHeader(buffer, offset, span)++    if (span.parent_id) {+      offset += copy(buffer, offset, fields.parent_id)+      offset += copy(buffer, offset, tokens.uint64)+      offset += copy(buffer, offset, span.parent_id.toBuffer())+    }++    offset += copy(buffer, offset, fields.name)+    offset += write(buffer, offset, span.name)++    offset += copy(buffer, offset, fields.resource)+    offset += write(buffer, offset, span.resource)++    offset += copy(buffer, offset, fields.service)+    offset += write(buffer, offset, span.service)++    if (span.type) {+      offset += copy(buffer, offset, fields.type)+      offset += write(buffer, offset, span.type)+    }++    offset += copy(buffer, offset, fields.meta)+    offset = writeMap(buffer, offset, span.meta)++    if (span.metrics) {+      offset += copy(buffer, offset, fields.metrics)+      offset = writeMap(buffer, offset, span.metrics)+    }+  }++  buffer.write('', offset) // throw if offset is out of bounds++  return offset+}++function copyHeader (buffer, offset, span) {+  copy(headerBuffer, traceIdOffset, span.trace_id.toBuffer())+  copy(headerBuffer, spanIdOffset, span.span_id.toBuffer())+  new Uint64BE(headerBuffer, startOffset, span.start) // eslint-disable-line no-new

At this point, span.start is a JS number. This seemed to be the easiest conversion.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'++/* eslint-disable no-console */++const { spawn, fork } = require('child_process')+const { promisify } = require('util')+const { stat } = require('fs')+const { get: _get } = require('http')+const path = require('path')+const mongoService = require('../../packages/dd-trace/test/setup/services/mongo')+const autocannon = require('autocannon')++function cd (dir) {+  console.log('> cd', dir)+  process.chdir(dir)+}++function delay (ms) {+  return new Promise((resolve, reject) => {+    setTimeout(resolve, ms)+  })+}++function sh (cmd) {+  return new Promise((resolve, reject) => {+    console.log('>', cmd)+    spawn(cmd, [], { stdio: 'inherit', shell: true })+      .on('error', reject)+      .on('close', resolve)+  })+}++function forkProcess (file, options = {}) {+  return new Promise((resolve, reject) => {+    console.log(`> node ${options.execArgv ? options.execArgv.join(' ') + ' ' : ''}${file}`)+    options.stdio = 'pipe'+    const subProcess = fork(file, options)+    console.log('>> PID', subProcess.pid)+    subProcess.on('message', message => {+      if (message.ready) {+        resolve({ subProcess })+      }+    })+  })+}++const statAsync = promisify(stat)+async function exists (filename) {+  try {+    const stats = await statAsync(filename)+    return stats.isDirectory() || stats.isFile()+  } catch (e) {+    return false+  }+}++function get (url) {+  return new Promise((resolve, reject) => {+    _get(url, res => {+      const chunks = []+      res.on('data', d => chunks.push(d))+      res.on('end', () => {+        resolve(Buffer.concat(chunks).toString())+      })+    })+  })+}++async function checkDb () {+  console.log('# checking that db is populated')+  cd('acmeair-nodejs')+  const { subProcess } = await forkProcess('./app.js', {+    execArgv: process.execArgv.concat(['--require', '../datadog.js'])+  })++  const customers = await get('http://localhost:9080/rest/api/config/countCustomers')++  if (parseInt(customers) < 10000) {+    console.log('# populating db')+    await get('http://localhost:9080/rest/api/loader/load?numCustomers=10000')+  }++  subProcess.kill()+  cd(__dirname)+}++async function ensureAppIsInstalled () {+  cd(__dirname)+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs')))) {+    await sh('git clone git@github.com:acmeair/acmeair-nodejs.git')+  }+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs', 'node_modules')))) {+    cd('acmeair-nodejs')+    await sh('npm install')+    cd(__dirname)+  }+}++function runTest (url, duration) {+  return autocannon({ url, duration })+}++async function testBoth (url, duration, prof) {+  cd(__dirname)+  const { subProcess: agentProcess } = await forkProcess('./fake-agent.js')+  cd('acmeair-nodejs')+  const execArgv = ['--require', '../datadog.js']+  if (prof) {+    execArgv.unshift('--prof')+  }+  const { subProcess: airProcess } = await forkProcess('./app.js', {+    execArgv: execArgv.concat(process.execArgv),+    env: Object.assign({}, process.env, { DD_ENABLE: '1' })+  })++  await delay(2000)++  const resultWithTracer = await runTest(url, duration)++  airProcess.kill()+  agentProcess.kill()++  const { subProcess: airProcess2 } = await forkProcess('./app.js', {+    execArgv: execArgv.concat(process.execArgv)+  })++  const resultWithoutTracer = await runTest(url, duration)

I think I know what you mean, but could you clarify anyway?

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'++/* eslint-disable no-console */++const { spawn, fork } = require('child_process')+const { promisify } = require('util')+const { stat } = require('fs')+const { get: _get } = require('http')+const path = require('path')+const mongoService = require('../../packages/dd-trace/test/setup/services/mongo')+const autocannon = require('autocannon')++function cd (dir) {+  console.log('> cd', dir)+  process.chdir(dir)+}++function delay (ms) {+  return new Promise((resolve, reject) => {+    setTimeout(resolve, ms)+  })+}++function sh (cmd) {+  return new Promise((resolve, reject) => {+    console.log('>', cmd)+    spawn(cmd, [], { stdio: 'inherit', shell: true })+      .on('error', reject)+      .on('close', resolve)+  })+}++function forkProcess (file, options = {}) {+  return new Promise((resolve, reject) => {+    console.log(`> node ${options.execArgv ? options.execArgv.join(' ') + ' ' : ''}${file}`)+    options.stdio = 'pipe'+    const subProcess = fork(file, options)+    console.log('>> PID', subProcess.pid)+    subProcess.on('message', message => {+      if (message.ready) {+        resolve({ subProcess })+      }+    })+  })+}++const statAsync = promisify(stat)+async function exists (filename) {+  try {+    const stats = await statAsync(filename)+    return stats.isDirectory() || stats.isFile()+  } catch (e) {+    return false+  }+}++function get (url) {+  return new Promise((resolve, reject) => {+    _get(url, res => {+      const chunks = []+      res.on('data', d => chunks.push(d))+      res.on('end', () => {+        resolve(Buffer.concat(chunks).toString())+      })+    })+  })+}++async function checkDb () {+  console.log('# checking that db is populated')+  cd('acmeair-nodejs')+  const { subProcess } = await forkProcess('./app.js', {+    execArgv: process.execArgv.concat(['--require', '../datadog.js'])+  })++  const customers = await get('http://localhost:9080/rest/api/config/countCustomers')++  if (parseInt(customers) < 10000) {+    console.log('# populating db')+    await get('http://localhost:9080/rest/api/loader/load?numCustomers=10000')+  }++  subProcess.kill()+  cd(__dirname)+}++async function ensureAppIsInstalled () {+  cd(__dirname)+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs')))) {+    await sh('git clone git@github.com:acmeair/acmeair-nodejs.git')+  }+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs', 'node_modules')))) {+    cd('acmeair-nodejs')+    await sh('npm install')+    cd(__dirname)+  }+}++function runTest (url, duration) {+  return autocannon({ url, duration })+}++async function testBoth (url, duration, prof) {+  cd(__dirname)+  const { subProcess: agentProcess } = await forkProcess('./fake-agent.js')+  cd('acmeair-nodejs')+  const execArgv = ['--require', '../datadog.js']+  if (prof) {+    execArgv.unshift('--prof')+  }+  const { subProcess: airProcess } = await forkProcess('./app.js', {+    execArgv: execArgv.concat(process.execArgv),+    env: Object.assign({}, process.env, { DD_ENABLE: '1' })+  })++  await delay(2000)

This probably dates back to before I was having the processes notify when they're ready. I'll see if I can remove it.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'++/* eslint-disable no-console */++const { spawn, fork } = require('child_process')+const { promisify } = require('util')+const { stat } = require('fs')+const { get: _get } = require('http')+const path = require('path')+const mongoService = require('../../packages/dd-trace/test/setup/services/mongo')+const autocannon = require('autocannon')++function cd (dir) {+  console.log('> cd', dir)+  process.chdir(dir)+}++function delay (ms) {+  return new Promise((resolve, reject) => {+    setTimeout(resolve, ms)+  })+}++function sh (cmd) {+  return new Promise((resolve, reject) => {+    console.log('>', cmd)+    spawn(cmd, [], { stdio: 'inherit', shell: true })+      .on('error', reject)+      .on('close', resolve)+  })+}++function forkProcess (file, options = {}) {+  return new Promise((resolve, reject) => {+    console.log(`> node ${options.execArgv ? options.execArgv.join(' ') + ' ' : ''}${file}`)+    options.stdio = 'pipe'+    const subProcess = fork(file, options)+    console.log('>> PID', subProcess.pid)+    subProcess.on('message', message => {+      if (message.ready) {+        resolve({ subProcess })+      }+    })+  })+}++const statAsync = promisify(stat)+async function exists (filename) {+  try {+    const stats = await statAsync(filename)+    return stats.isDirectory() || stats.isFile()+  } catch (e) {+    return false+  }+}++function get (url) {+  return new Promise((resolve, reject) => {+    _get(url, res => {+      const chunks = []+      res.on('data', d => chunks.push(d))+      res.on('end', () => {+        resolve(Buffer.concat(chunks).toString())+      })+    })+  })+}++async function checkDb () {+  console.log('# checking that db is populated')+  cd('acmeair-nodejs')+  const { subProcess } = await forkProcess('./app.js', {+    execArgv: process.execArgv.concat(['--require', '../datadog.js'])+  })++  const customers = await get('http://localhost:9080/rest/api/config/countCustomers')++  if (parseInt(customers) < 10000) {+    console.log('# populating db')+    await get('http://localhost:9080/rest/api/loader/load?numCustomers=10000')+  }++  subProcess.kill()+  cd(__dirname)+}++async function ensureAppIsInstalled () {+  cd(__dirname)+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs')))) {+    await sh('git clone git@github.com:acmeair/acmeair-nodejs.git')+  }+  if (!(await exists(path.join(__dirname, 'acmeair-nodejs', 'node_modules')))) {+    cd('acmeair-nodejs')+    await sh('npm install')

I had no idea where this would be run and didn't want to force yarn on anyone (or any environment).

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'++/* eslint-disable no-console */++const { spawn, fork } = require('child_process')+const { promisify } = require('util')+const { stat } = require('fs')+const { get: _get } = require('http')+const path = require('path')+const mongoService = require('../../packages/dd-trace/test/setup/services/mongo')+const autocannon = require('autocannon')++function cd (dir) {+  console.log('> cd', dir)+  process.chdir(dir)+}++function delay (ms) {+  return new Promise((resolve, reject) => {+    setTimeout(resolve, ms)+  })+}++function sh (cmd) {+  return new Promise((resolve, reject) => {+    console.log('>', cmd)+    spawn(cmd, [], { stdio: 'inherit', shell: true })+      .on('error', reject)+      .on('close', resolve)+  })+}++function forkProcess (file, options = {}) {+  return new Promise((resolve, reject) => {+    console.log(`> node ${options.execArgv ? options.execArgv.join(' ') + ' ' : ''}${file}`)+    options.stdio = 'pipe'+    const subProcess = fork(file, options)+    console.log('>> PID', subProcess.pid)+    subProcess.on('message', message => {+      if (message.ready) {+        resolve({ subProcess })+      }+    })+  })+}++const statAsync = promisify(stat)+async function exists (filename) {+  try {+    const stats = await statAsync(filename)+    return stats.isDirectory() || stats.isFile()+  } catch (e) {+    return false+  }+}++function get (url) {+  return new Promise((resolve, reject) => {+    _get(url, res => {+      const chunks = []+      res.on('data', d => chunks.push(d))+      res.on('end', () => {+        resolve(Buffer.concat(chunks).toString())+      })+    })+  })+}++async function checkDb () {+  console.log('# checking that db is populated')+  cd('acmeair-nodejs')+  const { subProcess } = await forkProcess('./app.js', {+    execArgv: process.execArgv.concat(['--require', '../datadog.js'])+  })++  const customers = await get('http://localhost:9080/rest/api/config/countCustomers')++  if (parseInt(customers) < 10000) {

We'd have to delete the existing collection first, for which there's no handy endpoint. Otherwise it just adds new ones and grows the DB. This is easier.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'+const fs = require('fs')+const http = require('http')+// const msgpack = require('msgpack-lite')+// const codec = msgpack.createCodec({ int64: true })++const output = fs.createWriteStream(`./spans.nsjson`)++const agent = http.createServer((req, res) => {+  concatStream(req, body => {+    if (body.length === 0) return res.status(200).send()+    //    body = msgpack.decode(body, { codec })+    //    body.forEach(trace => {+    //      trace.forEach(span => {+    //        output.write(JSON.stringify(span) + '\n')+    //      })+    //    })+    res.statusCode = 200+    res.end(JSON.stringify({ rate_by_service: { 'service:,env:': 1 } }))+  })+})++agent.listen(8126, () => {

Ok

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'+const fs = require('fs')+const http = require('http')+// const msgpack = require('msgpack-lite')+// const codec = msgpack.createCodec({ int64: true })++const output = fs.createWriteStream(`./spans.nsjson`)

"newline-separated JSON"

Again, not really needed anymore.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'+const fs = require('fs')+const http = require('http')+// const msgpack = require('msgpack-lite')

Yeah I need to clean this file up. trace-cat removes the need for most of this anyway.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+'use strict'

Since it's also used for the async_hooks test, maybe preamble.js is better?

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+# End-to-End Benchmarking++The purpose of this folder is to be able to test the overhead of dd-trace on an+application. The primary focus here is on easily measurable metrics like+latency, RPS and throughput.++We're using a sample app called AcmeAir, which is used by Node.js to benchmark+itself (results are at <https://benchmarking.nodejs.org/>). Load is produced+with [autocannon](https://npm.im/autocannon), which also gives us results. We+test with and without the tracer to get a measure of overhead. We test using two+separate endpoints that are measured independently, to get a measure of worst+case (a static landing page) and a more realistic case (a DB call is done).++## Requirements++This test should work with all versions of Node.js supported by dd-trace. In+addition, the sample app uses MongoDB, so you'll have to have that running and+listening on the default port. If you're set up with the `docker-compose.yml` in+the root of this repo, you should be ready.++## Usage++To start the test, run `node test` in this directory. This will install AcmeAir+if it hasn't yet been installed, and populate MongoDB if that hasn't already+been done.++Next, it will run the test for 10 seconds each on each of the 2 endpoints, both+with and without the tracer loaded. That means 40 seconds of testing. Results+will appear on stdout.++You can change the duration of the tests by setting the `DURATION` environment+variable to the number of seconds to run. Keep in mind that this will be run 4+times, so if you set it to `60`, you'll have to wait 4 minutes before it's done.++### Profiling, Method 1++To profile the app, the easiest thing to do is set the `PROF` environment+variable to a truthy string. This adds `--prof` to the node processes, which+writes a file called `isolate-0x${SOMEHEX}-${PID}-v8.log` for each of the 4+tests. You can then use `node --prof-process` or a tool like+[pflames](https://npm.im/pflames) to view the profile data.++### Profiling, Method 2++You can run the app manually, using a tool like [0x](https://npm.im/0x) to get+profiling data. To do that, you'll need to run the fake agent (`node+fake-agent.js`) and run the app using `datadog.js` as a pre-require. You'll also+need to set `DD_ENABLE=1`, which is the switch used to turn on tracing for the

2 options:

  1. DD_BENCH_TRACE_ENABLE or something like that
  2. Re-use an existing option?
bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+# End-to-End Benchmarking++The purpose of this folder is to be able to test the overhead of dd-trace on an+application. The primary focus here is on easily measurable metrics like+latency, RPS and throughput.++We're using a sample app called AcmeAir, which is used by Node.js to benchmark+itself (results are at <https://benchmarking.nodejs.org/>). Load is produced+with [autocannon](https://npm.im/autocannon), which also gives us results. We+test with and without the tracer to get a measure of overhead. We test using two+separate endpoints that are measured independently, to get a measure of worst+case (a static landing page) and a more realistic case (a DB call is done).++## Requirements++This test should work with all versions of Node.js supported by dd-trace. In+addition, the sample app uses MongoDB, so you'll have to have that running and+listening on the default port. If you're set up with the `docker-compose.yml` in+the root of this repo, you should be ready.++## Usage++To start the test, run `node test` in this directory. This will install AcmeAir+if it hasn't yet been installed, and populate MongoDB if that hasn't already+been done.++Next, it will run the test for 10 seconds each on each of the 2 endpoints, both+with and without the tracer loaded. That means 40 seconds of testing. Results+will appear on stdout.++You can change the duration of the tests by setting the `DURATION` environment+variable to the number of seconds to run. Keep in mind that this will be run 4+times, so if you set it to `60`, you'll have to wait 4 minutes before it's done.++### Profiling, Method 1++To profile the app, the easiest thing to do is set the `PROF` environment+variable to a truthy string. This adds `--prof` to the node processes, which+writes a file called `isolate-0x${SOMEHEX}-${PID}-v8.log` for each of the 4

the $PID, which is outputted on each run. Unfortunately node doesn't output the file name for each run so we need to interpret based on PID, etc. At the moment, it's still not clear which test run is which, so I'll add some extra output to clarify.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+# End-to-End Benchmarking++The purpose of this folder is to be able to test the overhead of dd-trace on an+application. The primary focus here is on easily measurable metrics like+latency, RPS and throughput.++We're using a sample app called AcmeAir, which is used by Node.js to benchmark+itself (results are at <https://benchmarking.nodejs.org/>). Load is produced+with [autocannon](https://npm.im/autocannon), which also gives us results. We+test with and without the tracer to get a measure of overhead. We test using two+separate endpoints that are measured independently, to get a measure of worst+case (a static landing page) and a more realistic case (a DB call is done).++## Requirements++This test should work with all versions of Node.js supported by dd-trace. In+addition, the sample app uses MongoDB, so you'll have to have that running and+listening on the default port. If you're set up with the `docker-compose.yml` in+the root of this repo, you should be ready.++## Usage++To start the test, run `node test` in this directory. This will install AcmeAir+if it hasn't yet been installed, and populate MongoDB if that hasn't already+been done.++Next, it will run the test for 10 seconds each on each of the 2 endpoints, both+with and without the tracer loaded. That means 40 seconds of testing. Results+will appear on stdout.++You can change the duration of the tests by setting the `DURATION` environment+variable to the number of seconds to run. Keep in mind that this will be run 4+times, so if you set it to `60`, you'll have to wait 4 minutes before it's done.++### Profiling, Method 1++To profile the app, the easiest thing to do is set the `PROF` environment

DD_BENCH_PROF?

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+# End-to-End Benchmarking++The purpose of this folder is to be able to test the overhead of dd-trace on an+application. The primary focus here is on easily measurable metrics like+latency, RPS and throughput.++We're using a sample app called AcmeAir, which is used by Node.js to benchmark+itself (results are at <https://benchmarking.nodejs.org/>). Load is produced+with [autocannon](https://npm.im/autocannon), which also gives us results. We+test with and without the tracer to get a measure of overhead. We test using two+separate endpoints that are measured independently, to get a measure of worst+case (a static landing page) and a more realistic case (a DB call is done).++## Requirements++This test should work with all versions of Node.js supported by dd-trace. In+addition, the sample app uses MongoDB, so you'll have to have that running and+listening on the default port. If you're set up with the `docker-compose.yml` in+the root of this repo, you should be ready.++## Usage++To start the test, run `node test` in this directory. This will install AcmeAir+if it hasn't yet been installed, and populate MongoDB if that hasn't already+been done.++Next, it will run the test for 10 seconds each on each of the 2 endpoints, both+with and without the tracer loaded. That means 40 seconds of testing. Results+will appear on stdout.++You can change the duration of the tests by setting the `DURATION` environment

DD_BENCH_DURATION?

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+# End-to-End Benchmarking++The purpose of this folder is to be able to test the overhead of dd-trace on an+application. The primary focus here is on easily measurable metrics like+latency, RPS and throughput.++We're using a sample app called AcmeAir, which is used by Node.js to benchmark+itself (results are at <https://benchmarking.nodejs.org/>). Load is produced+with [autocannon](https://npm.im/autocannon), which also gives us results. We+test with and without the tracer to get a measure of overhead. We test using two+separate endpoints that are measured independently, to get a measure of worst+case (a static landing page) and a more realistic case (a DB call is done).++## Requirements++This test should work with all versions of Node.js supported by dd-trace. In+addition, the sample app uses MongoDB, so you'll have to have that running and+listening on the default port. If you're set up with the `docker-compose.yml` in+the root of this repo, you should be ready.++## Usage++To start the test, run `node test` in this directory. This will install AcmeAir+if it hasn't yet been installed, and populate MongoDB if that hasn't already+been done.++Next, it will run the test for 10 seconds each on each of the 2 endpoints, both

I can clarify (and as mentioned below it's actually 6 and 60 now), but yeah it's in series to $DURATION seconds (default 10) each, therefore a total run time of 6 x $DURATION + some insignificant time to calculate and format results.

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+# End-to-End Benchmarking++The purpose of this folder is to be able to test the overhead of dd-trace on an+application. The primary focus here is on easily measurable metrics like+latency, RPS and throughput.++We're using a sample app called AcmeAir, which is used by Node.js to benchmark+itself (results are at <https://benchmarking.nodejs.org/>). Load is produced+with [autocannon](https://npm.im/autocannon), which also gives us results. We+test with and without the tracer to get a measure of overhead. We test using two+separate endpoints that are measured independently, to get a measure of worst+case (a static landing page) and a more realistic case (a DB call is done).++## Requirements++This test should work with all versions of Node.js supported by dd-trace. In+addition, the sample app uses MongoDB, so you'll have to have that running and+listening on the default port. If you're set up with the `docker-compose.yml` in+the root of this repo, you should be ready.++## Usage++To start the test, run `node test` in this directory. This will install AcmeAir

How about node benchmark-run?

bengl

comment created time in 5 days

Pull request review commentDataDog/dd-trace-js

Performance updates

+# End-to-End Benchmarking++The purpose of this folder is to be able to test the overhead of dd-trace on an+application. The primary focus here is on easily measurable metrics like+latency, RPS and throughput.++We're using a sample app called AcmeAir, which is used by Node.js to benchmark+itself (results are at <https://benchmarking.nodejs.org/>). Load is produced+with [autocannon](https://npm.im/autocannon), which also gives us results. We+test with and without the tracer to get a measure of overhead. We test using two+separate endpoints that are measured independently, to get a measure of worst+case (a static landing page) and a more realistic case (a DB call is done).++## Requirements++This test should work with all versions of Node.js supported by dd-trace. In+addition, the sample app uses MongoDB, so you'll have to have that running and+listening on the default port. If you're set up with the `docker-compose.yml` in+the root of this repo, you should be ready.++## Usage++To start the test, run `node test` in this directory. This will install AcmeAir+if it hasn't yet been installed, and populate MongoDB if that hasn't already+been done.++Next, it will run the test for 10 seconds each on each of the 2 endpoints, both+with and without the tracer loaded. That means 40 seconds of testing. Results+will appear on stdout.++You can change the duration of the tests by setting the `DURATION` environment+variable to the number of seconds to run. Keep in mind that this will be run 4

It's actually 6, since I forgot to update the doc: 1 each of control (no tracer), async_hooks only, and with tracing enabled, all of that across two different endpoints.

bengl

comment created time in 5 days

push eventDataDog/dd-trace-js

Bryan English

commit sha e6779593e2a81e6e2df6e87255020b68a6a16bae

fix encode benchmark

view details

push time in 6 days

push eventDataDog/dd-trace-js

Bryan English

commit sha b139e44b257679a9150bdf0bb2540a2dfdf96252

Fix updated FS and testing on linux. (#866) * fix symlink tests on linux * property instrument FileHandle#close

view details

push time in 6 days

PR merged DataDog/dd-trace-js

Fix updated FS and testing on linux. dev/testing

What does this PR do?

Fix some fs problems that arose from:

  1. Updating to latest Node.js.
  2. Testing in environments where we can't symlink from the source dir to $TMPDIR

Motivation

Tests were breaking in CI.

+44 -8

0 comment

2 changed files

bengl

pr closed time in 6 days

push eventDataDog/dd-trace-js

Bryan English

commit sha 7f490b33356fffdd28e5bc7f69c525e7d311ed92

support metrics in encoding

view details

push time in 6 days

push eventDataDog/dd-trace-js

Bryan English

commit sha b9625c28dbfa7c2ee19a34f3ca9236af283ddd30

support metrics in encoding

view details

push time in 6 days

push eventDataDog/dd-trace-js

Bryan English

commit sha dc47bf18051c069d43e7e4768770c0867a7a7968

fix tests and lint

view details

push time in 6 days

push eventDataDog/dd-trace-js

Roch Devost

commit sha 2c96ccbeb1cb63d197c2360374eb48a3ffd4649f

fix context propagation for route handlers in fastify (#845)

view details

Roch Devost

commit sha cc27706994f3a84eff8385e10749e3aa67c18357

update browser exporter to use sendBeacon when available (#854)

view details

Roch Devost

commit sha 5348eaad6f1a86d57b87ffd3de6397feb1dcb417

add site config and update browser exporter to use it (#855)

view details

Roch Devost

commit sha 16da744a8652215f83fc058a3bf5fe978f282494

fix missing postgres build container password (#860)

view details

Roch Devost

commit sha 603d99e428670c96c8b783b82f9cf519f50214e2

add source tag in the browser exporter (#862)

view details

Roch Devost

commit sha 9507b4a261fc970dee9e817a1ea9722af054bb5e

update browser exporter to send env as a tag (#863)

view details

Bryan English

commit sha fe694bd05bd09916d77ed292c2b5ab4b4c65e764

Add profiling tooling See the README.md for details

view details

Bryan English

commit sha 915386551d96c623f8a123ed13943c206d7a2d23

more profiling tooling

view details

Bryan English

commit sha d2141451c4e1ba646038f0a68f12bdfefb4456f9

performance: use Maps for storage in async_hooks We were previously using prototype-less objects, but it turns out that when you start adding a ton of properties to these objects, they slow right down. This is the ideal use case where Map shines, and in this case shaves off a measurable amount of processing time.

view details

Bryan English

commit sha 0562cbd60c19200f92f48a3532b0490ecc131262

performance: microoptimizations in hot paths On these hot paths, we should be using `for of` loops since they're quite a bit faster. The gain isn't all that much, but every little bit helps.

view details

Bryan English

commit sha 90896da94b843ece30b3ed174f35a207c7372dfe

performance: use a cache for pathToRegexp in router plugin Just a simple object cache will suffice here since we expect a relatively small (~100s) number of cache keys over the process lifetime.

view details

Bryan English

commit sha e8f72f01ecd37e0785d5a7254d3f57fb29875341

performance: use hand-rolled msgpack encoder Most of the new code is from 3aa4d9b4bb43c90a49f234a57f87d6352af4792e and parents, with a few optimizations made here and there.

view details

Bryan English

commit sha e713e4e21137b486e360c73cc69a3da88bcc7d16

fix some tests due to perf changes

view details

Bryan English

commit sha 7c049142ae6f2f1383dadfbdcd498053da4aa5dc

fix tests and lint

view details

push time in 6 days

push eventDataDog/dd-trace-js

Bryan English

commit sha b0190de25570af7be6a017df1d87c0c5268e3c94

fix tests and lint

view details

push time in 6 days

PR opened DataDog/dd-trace-js

Fix updated FS and testing on linux.

What does this PR do?

Fix some fs problems that arose from:

  1. Updating to latest Node.js.
  2. Testing in environments where we can't symlink from the source dir to $TMPDIR

Motivation

Tests were breaking in CI.

+44 -8

0 comment

2 changed files

pr created time in 6 days

create barnchDataDog/dd-trace-js

branch : bengl/fixupdatedfs

created branch time in 6 days

Pull request review commentDataDog/dd-trace-js

update browser exporter to send env as a tag

 class BrowserExporter {       this._flushing = false     })   }++  _traceMeta () {+    const meta = {+      '_dd.source': 'browser'+    }++    addTag(meta, 'env', this._env)

Why bother having this function? Why not just have:

if (this._env) {
  meta.env = this._env
}
rochdev

comment created time in 7 days

PR opened DataDog/dd-trace-js

[WIP] Performance updates

What does this PR do?

  • Adds a perf measuring tool.
  • Makes a few changes that reduce performance impact on instrumented applications.

Motivation

To reduce performance impact on instrumented applications.

Additional Notes

The Writer test is currently disabled since the changes have completely broken it. I'm still working on that, hence why this is a WIP.

+866 -86

0 comment

22 changed files

pr created time in 11 days

push eventDataDog/dd-trace-js

Bryan English

commit sha 731be873d98d06f7547092fa9d9a16cdb604de97

fix some tests due to perf changes

view details

push time in 11 days

push eventDataDog/dd-trace-js

rochdev

commit sha 59a857bf22dfa9103abe352d2dbeb8afac963904

remove support for koa-router 8.x until properly supported

view details

Roch Devost

commit sha 26bcc877885ff053a379dccca85e3acfd2c6990b

remove support for koa-router 8.x until properly supported (#837)

view details

Roch Devost

commit sha ee4d9ef304f14349affda97b7fbe44557a7c7544

update http status code temporarily to always be a string (#838)

view details

Roch Devost

commit sha d0d2e9b5da5058e61b46c0f148711ea949e6a113

v0.17.0 (#839)

view details

Roch Devost

commit sha 2da2cebd9e71e5cc4ada7091fb9ecda7828df836

remove several unnecessary dependencies in the browser (#797)

view details

Roch Devost

commit sha 66470ed5e3b2c86264fa89615781d317e3a06838

fix runtime metrics having prefix in double (#843)

view details

Bryan English

commit sha 8c1c319f806a2a885a82e8d07c2dcdf87263965a

Add @google-cloud/pubsub integration (#834) * add defaults test helper, and optional span logging * add support for @goolge-cloud/pubsub

view details

Roch Devost

commit sha 6420f187a7c12bef5744694d95e68ec50359a387

fix wrong route for koa-router 8 (#844)

view details

Bryan English

commit sha 7de90b1b373ffafbb856750b2770ce0598863a72

fix pubsub tests (#849) Timeouts were happening regularly. The timeout is now set to one minute, and this is now reflected in `agent.use` so that the timer there can be set to the same number.

view details

Bryan English

commit sha 3f5ff809871ed85fedf4ef85c729015962bfc0f2

Add profiling tooling See the README.md for details

view details

Bryan English

commit sha df9e2567e251c625c7ec52692d73d646dba2f931

more profiling tooling

view details

Bryan English

commit sha 5a828fcba35501d69db6c8e0c41cc8b312e19a9b

performance: use Maps for storage in async_hooks We were previously using prototype-less objects, but it turns out that when you start adding a ton of properties to these objects, they slow right down. This is the ideal use case where Map shines, and in this case shaves off a measurable amount of processing time.

view details

Bryan English

commit sha 92b126cd8ccb0d5291158623a4dc95d645bd738e

performance: microoptimizations in hot paths On these hot paths, we should be using `for of` loops since they're quite a bit faster. The gain isn't all that much, but every little bit helps.

view details

Bryan English

commit sha 9614e9119525806006f44e5ebfbaffe4f86a4515

performance: use a cache for pathToRegexp in router plugin Just a simple object cache will suffice here since we expect a relatively small (~100s) number of cache keys over the process lifetime.

view details

Bryan English

commit sha 1a6f623d2bc7ab1e67e91e874bf2f41981760057

performance: use hand-rolled msgpack encoder Most of the new code is from 3aa4d9b4bb43c90a49f234a57f87d6352af4792e and parents, with a few optimizations made here and there.

view details

push time in 13 days

PR opened DataDog/dd-trace-js

disable fs for now

What does this PR do?

Disable fs plugin for now

Motivation

Traces are showing a lot of error spans because many normal usages of fs involve some error propagation. We'll disable it for now until we have a better solution.

+1 -0

0 comment

1 changed file

pr created time in 14 days

create barnchDataDog/dd-trace-js

branch : bengl/disablefs

created branch time in 14 days

push eventDataDog/dd-trace-js

Bryan English

commit sha 7de90b1b373ffafbb856750b2770ce0598863a72

fix pubsub tests (#849) Timeouts were happening regularly. The timeout is now set to one minute, and this is now reflected in `agent.use` so that the timer there can be set to the same number.

view details

push time in 21 days

PR merged DataDog/dd-trace-js

fix pubsub tests dev/testing

What does this PR do?

Timeouts were happening regularly with pubsub tests. The timeout is now set to one minute, and this is now reflected in agent.use so that the timer there can be set to the same number.

<!-- A brief description of the change being made with this pull request. -->

Motivation

Tests were failing in CI regularly and locally occaisionally. <!-- What inspired you to submit this pull request? -->

+11 -6

0 comment

3 changed files

bengl

pr closed time in 21 days

Pull request review commentDataDog/dd-trace-js

fix pubsub tests

 module.exports = {   },    // Register a callback with expectations to be run on every agent call.-  use (callback) {+  use (callback, timeoutMs = 1000) {

Fixed in fixup commit. Using an options object instead, so that we won't run into this.

bengl

comment created time in 21 days

push eventDataDog/dd-trace-js

Bryan English

commit sha 45731586c686f89b7e779bbad58aa34441453a06

fixup

view details

push time in 21 days

PR opened DataDog/dd-trace-js

fix pubsub tests

What does this PR do?

Timeouts were happening regularly with pubsub tests. The timeout is now set to one minute, and this is now reflected in agent.use so that the timer there can be set to the same number.

<!-- A brief description of the change being made with this pull request. -->

Motivation

Tests were failing in CI regularly and locally occaisionally. <!-- What inspired you to submit this pull request? -->

+9 -6

0 comment

3 changed files

pr created time in 21 days

create barnchDataDog/dd-trace-js

branch : bengl/fixpubsub

created branch time in 21 days

Pull request review commentDataDog/dd-trace-js

fix wrong route for koa-router 8

 function createWrapRegister (tracer, config) {     return function registerWithTrace (path, methods, middleware, opts) {       const route = register.apply(this, arguments) -      if (Array.isArray(path) || !route || !Array.isArray(route.stack)) return route+      if (!Array.isArray(path) && route && Array.isArray(route.stack)) {+        wrapStack(route)+      } -      route.stack = route.stack.map(middleware => {-        if (typeof middleware !== 'function') return middleware+      return route+    }+  }+} -        return function (ctx, next) {-          if (!ctx || !web.active(ctx.req)) return middleware.apply(this, arguments)+function createWrapRoutes (tracer, config) {+  return function wrapRoutes (routes) {+    return function routesWithTrace () {+      const dispatch = routes.apply(this, arguments)+      const dispatchWithTrace = function (ctx, next) {+        if (!ctx.router) {+          let router++          Object.defineProperty(ctx, 'router', {+            set (value) {+              router = value+              router.stack.forEach(layer => {+                wrapStack(layer)+              })+            },++            get () {+              return router+            }+          })+        } -          web.exitRoute(ctx.req)-          web.enterRoute(ctx.req, route.path)+        return dispatch.apply(this, arguments)+      } -          return wrapMiddleware(middleware).apply(this, arguments)-        }-      })+      dispatchWithTrace.router = dispatch.router -      return route+      return dispatchWithTrace     }   } } +function wrapStack (layer) {+  layer.stack = layer.stack.map(middleware => {+    if (typeof middleware !== 'function') return middleware++    return function (ctx, next) {+      if (!ctx || !web.active(ctx.req)) return middleware.apply(this, arguments)++      web.exitRoute(ctx.req)+      web.enterRoute(ctx.req, layer.path)++      return wrapMiddleware(middleware).apply(this, arguments)

Could this wrapMiddleware() be done outside of this function (i.e. around line 105)?

rochdev

comment created time in 25 days

Pull request review commentDataDog/dd-trace-js

fix wrong route for koa-router 8

 function createWrapRegister (tracer, config) {     return function registerWithTrace (path, methods, middleware, opts) {       const route = register.apply(this, arguments) -      if (Array.isArray(path) || !route || !Array.isArray(route.stack)) return route+      if (!Array.isArray(path) && route && Array.isArray(route.stack)) {+        wrapStack(route)+      } -      route.stack = route.stack.map(middleware => {-        if (typeof middleware !== 'function') return middleware+      return route+    }+  }+} -        return function (ctx, next) {-          if (!ctx || !web.active(ctx.req)) return middleware.apply(this, arguments)+function createWrapRoutes (tracer, config) {+  return function wrapRoutes (routes) {+    return function routesWithTrace () {+      const dispatch = routes.apply(this, arguments)+      const dispatchWithTrace = function (ctx, next) {+        if (!ctx.router) {+          let router++          Object.defineProperty(ctx, 'router', {+            set (value) {+              router = value+              router.stack.forEach(layer => {

This whole thing could be for (const layer of router.stack) wrapStack(layer);. Since this is a hot code path (I think?), we should probably do that here instead of a forEach. (Feel free to ignore if you're sure it's not a hot path.)

rochdev

comment created time in 25 days

push eventDataDog/dd-trace-js

Bryan English

commit sha 8c1c319f806a2a885a82e8d07c2dcdf87263965a

Add @google-cloud/pubsub integration (#834) * add defaults test helper, and optional span logging * add support for @goolge-cloud/pubsub

view details

push time in 25 days

PR merged DataDog/dd-trace-js

Add @google-cloud/pubsub integration enhancement integrations

What does this PR do?

Adds support for @google-cloud/pubsub

Motivation

User-requested.

Plugin Checklist

<!-- Fill this section if adding or updating a plugin. Remove otherwise. -->

Additional Notes

The base branch here is bengl/fs, which has not yet been merged. The fs PR (#814) must be merged first, and then the base branch here needs to be changed to master.

+416 -3

0 comment

11 changed files

bengl

pr closed time in 25 days

push eventDataDog/dd-trace-js

Bryan English

commit sha b70f534e850e12b896095b76d6728db1b22bb1e8

Add profiling tooling See the README.md for details

view details

push time in 25 days

push eventDataDog/dd-trace-js

Bryan English

commit sha 50172d79ba191678b32e91dcfc9405f00d08a330

init3

view details

Bryan English

commit sha d7d984577aee59394a6310b9bb0c869c8dc2e309

init4

view details

push time in a month

push eventDataDog/dd-trace-js

Bryan English

commit sha 60e06ce1c99d72cef685273859ab214e86cc44ea

PR fixes

view details

push time in a month

issue openedDataDog/dd-trace-js

grpc doesn't instrument server pushes

Describe the bug In attempting to make sure we were doing the right thing with @google-cloud/pubsub, I noticed that there was no grpc span happening prior to the pubsub receive. This seems incorrect, like we're missing the HTTP/2 server pushes or something.

created time in a month

push eventDataDog/dd-trace-js

Bryan English

commit sha ec5e4b1dd3b1743eda2a9f7f33992d5bc4eefbde

span.kind === producer only in publish, for gpubsub

view details

push time in a month

create barnchDataDog/dd-trace-js

branch : bengl/profiling

created branch time in a month

push eventDataDog/dd-trace-js

Bryan English

commit sha 9c1037c230457e9c82feb802ec597a347c3f2335

PR fixes

view details

push time in a month

push eventDataDog/dd-trace-js

Bryan English

commit sha b4d06f5293b6e0b578e8680f387621ac723201e8

fix fs readFile when options is string (#835)

view details

push time in a month

PR merged DataDog/dd-trace-js

fix fs readFile when options is string

What does this PR do?

Fix the case where a string is passed in where the options object should be, which is supported by node.

Motivation

It wasn't working, as it didn't have a more appropriate safety net for object access.

+13 -1

0 comment

2 changed files

bengl

pr closed time in a month

push eventDataDog/dd-trace-js

Bryan English

commit sha eafd087dc3d0c83afbdbd4aa2c82f4e918d2357b

fix tsconfig for current versions of @types/node (#836)

view details

push time in a month

PR merged DataDog/dd-trace-js

fix tsconfig for current versions of @types/node

What does this PR do?

Sets the ECMAScript version in docs/tsconfig.json so that @types/node doesn't fail.

Motivation

Builds were all breaking because docs wouldn't generate.

+1 -0

1 comment

1 changed file

bengl

pr closed time in a month

Pull request review commentDataDog/dd-trace-js

fix fs readFile when options is string

 function makeFSFlagTags (resourceName, path, options, defaultFlag, config, trace }  function makeFSTags (resourceName, path, options, config, tracer) {-  path = options && 'fd' in options ? options.fd : path+  path = options && typeof options === 'object' && 'fd' in options ? options.fd : path

It's not quite the same meaning here, because I do explicitly only want this code path for 'object'. For anything that's not an object but still truthy, we just want the value, regardless if someone has attached a weird fd property to the wrapper prototype.

bengl

comment created time in a month

pull request commentDataDog/dd-trace-js

fix tsconfig for current versions of @types/node

Based on https://github.com/DefinitelyTyped/DefinitelyTyped/issues/29172

bengl

comment created time in a month

PR opened DataDog/dd-trace-js

fix tsconfig for current versions of @types/node

What does this PR do?

Sets the ECMAScript version in docs/tsconfig.json so that @types/node doesn't fail.

Motivation

Builds were all breaking because docs wouldn't generate.

+1 -0

0 comment

1 changed file

pr created time in a month

create barnchDataDog/dd-trace-js

branch : bengl/fixdocstsconfig

created branch time in a month

push eventDataDog/dd-trace-js

Bryan English

commit sha d3c76e949eff97bc1a85961845d0dbf46f66bda7

fix fs readFile when options is string

view details

push time in a month

create barnchDataDog/dd-trace-js

branch : bengl/testingwhat

created branch time in a month

PR opened DataDog/dd-trace-js

fix fs readFile when options is string

What does this PR do?

Fix the case where a string is passed in where the options object should be, which is supported by node.

Motivation

It wasn't working, as it didn't have a more appropriate safety net for object access.

+13 -1

0 comment

2 changed files

pr created time in a month

create barnchDataDog/dd-trace-js

branch : bengl/fixfsstring

created branch time in a month

Pull request review commentDataDog/dd-trace-js

Add @google-cloud/pubsub integration

 function assertWorkspace () { }  function install () {-  exec('yarn', { cwd: folder() })+  exec('yarn --ignore-engines', { cwd: folder() })

The dropped support is in a version of semver that's a dependency of @google-cloud/pubsub (transitively via google-gax).

bengl

comment created time in a month

push eventDataDog/dd-trace-js

Roch Devost

commit sha a46479ebcbdce3a3a9716a55d3318d6ee149ce2a

fix cassandra-driver plugin not working for >=4.4 (#825)

view details

Eric Mustin

commit sha 1f1356eadd0f3a48f0be4140cc869b5f0127b5a9

fix log injection removing symbols from log records (#819) * account for Symbol keys for winston splat handling * use for of loop and check for undefined or null records * remove local changes * remove typos on testing local changes * use correct syntax for for of loop * linting fix * simplify for loop * add specs to verify splat formatting is handled correctly * remove duplicated test

view details

Eric Mustin

commit sha 271c74021dbb87274820441a158c575efa9d107f

update bluebird instrumentation to account for getNewLibraryCopy (#813) * handle usage of getNewLibraryCopy by sequelize * [wip] add promise spec for getNewLibraryCopy need to determine min version still * fix unwrap typo, add temp fix and todos for version requirements for getNetLibraryCopy for 2.11.0 and 3.4.1 * add getNewLibraryCopy patching only for versions where it exists * handle unwrapping copies _then by maintaining reference to copies on original promise library * make hidden property non enumerable * [wip] add specs for patching and unpatching behavior , clean up hiddenproperty settings, use promise constructor instead of prototype * add specs for mechanics of tracking wrapped bluebird library copies for unpatching * remove duplicate patching specs * add version filter for bluebird version specific instrumentation * generalize unpatching test for all promise libraries * handle edge case when non native promise libraries default to native promises * only instrument promisejs when it doesnt export global promise, move semver check to promise util specs, clean up bluebird spec syntax * only instrument promise-js on versions it isnt used native promises, add promise-js version range filtering in specs * simplify version checking and test pre / post patching and unpatching behavior * es6 syntax improvement * fix beforeEach handling of promises * add test for copy of copy being unwrapped

view details

Jeremy

commit sha 78e4f8464c92ea8e0ce061415967e98f69797b94

Dual License (#832)

view details

Eric Mustin

commit sha 147520bbff427ca16499a60bee6d3aa8576719e8

add support for WHATWG URL in http client (#833) * add support for WHATWG by removing Object assign usage * linting and move spec to >10 bc it was breaking on ode8 image * use async format for unit test * add await to spec and make url normalization cleaner

view details

Bryan English

commit sha 34beab047f6b4b64f459c698e6c17a02af8db4d4

initial fs support (#814) * initial fs support * allow `yarn lint --fix` * support large payloads in test agent * don't trace fs operations unless there's a parent span

view details

Bryan English

commit sha 577dfa0ae097cd4a0b20678a5e73fe41d884db55

add defaults test helper, and optional span logging

view details

Bryan English

commit sha 9c3be6ffc9b1a24e09977ae98e348f7907dd21d8

add support for @goolge-cloud/pubsub

view details

Bryan English

commit sha e6303c221dbea2e707646a3da30e4514bc3c98e5

plugin PR checklist items

view details

Bryan English

commit sha 2212f1679860ff8481e6e031de38b4fd47d36b7a

PR review fixes

view details

Bryan English

commit sha 8e15ca17a1d2f8f0918b66bc629c006a981a3ed2

@google-cloud/pubsub test errors

view details

Bryan English

commit sha dd882dd81d8c61397fd3a4d48682313a5da9329c

cleanup

view details

Bryan English

commit sha 6282897f2955d0374431085a4b6377f5d683c222

@google-cloud/pubsub test context propagation

view details

Bryan English

commit sha aec00c163c498c15246545755eb49dbf164cd924

PR fixes

view details

push time in a month

push eventDataDog/dd-trace-js

Bryan English

commit sha 34beab047f6b4b64f459c698e6c17a02af8db4d4

initial fs support (#814) * initial fs support * allow `yarn lint --fix` * support large payloads in test agent * don't trace fs operations unless there's a parent span

view details

push time in a month

PR merged DataDog/dd-trace-js

initial fs support enhancement integrations

What does this PR do?

Adds support for Node's builtin fs module

Motivation

Operations in fs ought to be traced, since they can take non-trivial time.

Plugin Checklist

<!-- Fill this section if adding or updating a plugin. Remove otherwise. -->

+2572 -7

0 comment

15 changed files

bengl

pr closed time in a month

more