diff --git a/.clang-format b/.clang-format new file mode 100644 index 00000000..1acba5a7 --- /dev/null +++ b/.clang-format @@ -0,0 +1,2 @@ +BasedOnStyle: Google +SortIncludes: false diff --git a/.eslintrc b/.eslintrc index 9d5a9935..85ab771b 100644 --- a/.eslintrc +++ b/.eslintrc @@ -50,6 +50,11 @@ "ignoreConsecutiveComments": true } ], + "curly": [ + "error", + "multi-line", + "consistent" + ], "import/order": [ "error", { @@ -137,6 +142,10 @@ "format": ["PascalCase"], "trailingUnderscore": "allowSingleOrDouble" }, + { + "selector": "enumMember", + "format": ["PascalCase", "UPPER_CASE"] + }, { "selector": "objectLiteralProperty", "format": null diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 788657f1..4f687dde 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -65,13 +65,17 @@ check:test: - > nix-shell --run ' npm run build --verbose; - npm test -- --ci; + npm test -- --ci --coverage; ' artifacts: when: always reports: junit: - - ./tmp/junit.xml + - ./tmp/junit/junit.xml + coverage_report: + coverage_format: cobertura + path: ./tmp/coverage/cobertura-coverage.xml + coverage: '/All files[^|]*\|[^|]*\s+([\d\.]+)/' rules: # Runs on staging commits and ignores version commits - if: $CI_COMMIT_BRANCH =~ /^feature.*$/ && $CI_COMMIT_TITLE !~ /^[0-9]+\.[0-9]+\.[0-9]+(?:-.*[0-9]+)?$/ @@ -114,17 +118,21 @@ build:linux: - > nix-shell --run ' npm run build --verbose; - npm test -- --ci; + npm test -- --ci --coverage; ' artifacts: when: always reports: junit: - - ./tmp/junit.xml + - ./tmp/junit/junit.xml + coverage_report: + coverage_format: cobertura + path: ./tmp/coverage/cobertura-coverage.xml paths: - ./prebuilds/ # Only the build:linux preserves the dist - ./dist + coverage: '/All files[^|]*\|[^|]*\s+([\d\.]+)/' rules: # Runs on staging commits and ignores version commits - if: $CI_COMMIT_BRANCH == 'staging' && $CI_COMMIT_TITLE !~ /^[0-9]+\.[0-9]+\.[0-9]+(?:-.*[0-9]+)?$/ @@ -150,7 +158,7 @@ build:windows: when: always reports: junit: - - ./tmp/junit.xml + - ./tmp/junit/junit.xml paths: - ./prebuilds/ rules: @@ -169,6 +177,7 @@ build:macos: HOMEBREW_NO_INSTALL_UPGRADE: "true" HOMEBREW_NO_INSTALL_CLEANUP: "true" before_script: + - eval "$(brew shellenv)" - brew install node@16 - brew link --overwrite node@16 - brew install python@3.9 @@ -184,7 +193,7 @@ build:macos: when: always reports: junit: - - ./tmp/junit.xml + - ./tmp/junit/junit.xml paths: - ./prebuilds/ rules: @@ -209,6 +218,26 @@ build:prerelease: nix-shell --run ' npm publish --tag prerelease --access public; ' + - > + for d in prebuilds/*; do + tar \ + --create \ + --verbose \ + --file="prebuilds/$(basename $d).tar" \ + --directory=prebuilds \ + "$(basename $d)"; + done + - > + nix-shell -I nixpkgs=./pkgs.nix --packages gitAndTools.gh --run ' + gh release \ + create "$CI_COMMIT_TAG" \ + prebuilds/*.tar \ + --title "${CI_COMMIT_TAG}-$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ + --notes "" \ + --prerelease \ + --target staging \ + --repo "$GH_PROJECT_PATH"; + ' after_script: - rm -f ./.npmrc rules: @@ -272,6 +301,25 @@ release:distribution: nix-shell --run ' npm publish --access public; ' + - > + for d in prebuilds/*; do + tar \ + --create \ + --verbose \ + --file="prebuilds/$(basename $d).tar" \ + --directory=prebuilds \ + "$(basename $d)"; + done + - > + nix-shell -I nixpkgs=./pkgs.nix --packages gitAndTools.gh --run ' + gh release \ + create "$CI_COMMIT_TAG" \ + prebuilds/*.tar \ + --title "${CI_COMMIT_TAG}-$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ + --notes "" \ + --target master \ + --repo "$GH_PROJECT_PATH"; + ' after_script: - rm -f ./.npmrc rules: diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..9ed6060c --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "deps/snappy/snappy"] + path = deps/snappy/snappy + url = https://github.com/google/snappy.git +[submodule "deps/rocksdb/rocksdb"] + path = deps/rocksdb/rocksdb + url = https://github.com/facebook/rocksdb.git diff --git a/README.md b/README.md index f9db2633..068d408c 100644 --- a/README.md +++ b/README.md @@ -5,12 +5,76 @@ master: [![pipeline status](https://gitlab.com/MatrixAI/open-source/js-db/badges DB is library managing key value state for MatrixAI's JavaScript/TypeScript applications. +This forks classic-level's C++ binding code around LevelDB 1.20. Differences from classic-level: + +* Uses TypeScript from ground-up +* Supports Snapshot-Isolation based transactions via `DBTransaction` +* API supports "key paths" which can be used to manipulate "levels" of nested keys +* Value encryption (key-encryption is not supported yet) - requires additional work with block-encryption + ## Installation ```sh npm install --save @matrixai/db ``` +## Usage + + +```ts +import { DB } from '@matrixai/db'; + +async function main () { + + const key = Buffer.from([ + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03, + ]); + + const encrypt = async ( + key: ArrayBuffer, + plainText: ArrayBuffer + ): Promise { + return plainText; + }; + + const decrypt = async ( + key: ArrayBuffer, + cipherText: ArrayBuffer + ): Promise { + return cipherText; + } + + const db = await DB.createDB({ + dbPath: './tmp/db', + crypto: { + key, + ops: { encrypt, decrypt }, + }, + fresh: true, + }); + + await db.put(['level', Buffer.from([0x30, 0x30]), 'a'], 'value'); + await db.put(['level', Buffer.from([0x30, 0x31]), 'b'], 'value'); + await db.put(['level', Buffer.from([0x30, 0x32]), 'c'], 'value'); + await db.put(['level', Buffer.from([0x30, 0x33]), 'c'], 'value'); + + console.log(await db.get(['level', Buffer.from([0x30, 0x32]), 'c'])); + + await db.del(['level', Buffer.from([0x30, 0x32]), 'c']); + + for await (const [kP, v] of db.iterator({ + lt: [Buffer.from([0x30, 0x32]), ''], + }, ['level'])) { + console.log(kP, v); + } + + await db.stop(); +} + +main(); +``` + ## Development Run `nix-shell`, and once you're inside, you can use: diff --git a/benches/DB1KiB.ts b/benches/DB1KiB.ts index ee328a1f..2a1e8eea 100644 --- a/benches/DB1KiB.ts +++ b/benches/DB1KiB.ts @@ -58,9 +58,7 @@ async function main() { } if (require.main === module) { - (async () => { - await main(); - })(); + void main(); } export default main; diff --git a/benches/DB1MiB.ts b/benches/DB1MiB.ts index 40afd435..a4628cfd 100644 --- a/benches/DB1MiB.ts +++ b/benches/DB1MiB.ts @@ -20,13 +20,13 @@ async function main() { const summary = await b.suite( 'DB1MiB', b.add('get 1 MiB of data', async () => { - await db.put('1kib', data1MiB, true); + await db.put('1mib', data1MiB, true); return async () => { - await db.get('1kib', true); + await db.get('1mib', true); }; }), b.add('put 1 MiB of data', async () => { - await db.put('1kib', data1MiB, true); + await db.put('1mib', data1MiB, true); }), b.add('put zero data', async () => { await db.put('0', data0, true); @@ -58,9 +58,7 @@ async function main() { } if (require.main === module) { - (async () => { - await main(); - })(); + void main(); } export default main; diff --git a/benches/index.ts b/benches/index.ts index 1232e30a..f8a7cce0 100644 --- a/benches/index.ts +++ b/benches/index.ts @@ -20,9 +20,7 @@ async function main(): Promise { } if (require.main === module) { - (async () => { - await main(); - })(); + void main(); } export default main; diff --git a/benches/results/DB1KiB.chart.html b/benches/results/DB1KiB.chart.html index 30e2f123..5d48f5aa 100644 --- a/benches/results/DB1KiB.chart.html +++ b/benches/results/DB1KiB.chart.html @@ -16,7 +16,7 @@
- +
Options
All
  • Public
  • Public/Protected
  • All
Menu

Hierarchy

  • CreateDestroyStartStop
    • DB

Index

Constructors

  • new DB(__namedParameters: { crypto?: { key: Buffer; ops: Crypto }; dbPath: string; fs: FileSystem; logger: Logger }): DB
  • Parameters

    • __namedParameters: { crypto?: { key: Buffer; ops: Crypto }; dbPath: string; fs: FileSystem; logger: Logger }
      • Optional crypto?: { key: Buffer; ops: Crypto }
      • dbPath: string
      • fs: FileSystem
      • logger: Logger

    Returns DB

Properties

[initLock]: RWLockWriter
_db: LevelDB<string | Buffer, Buffer>
crypto?: { key: Buffer; ops: Crypto }

Type declaration

dbPath: string
logger: Logger
transactionCounter: number = 0
workerManager?: DBWorkerManagerInterface

Accessors

  • get [destroyed](): boolean
  • Returns boolean

  • get [running](): boolean
  • Returns boolean

  • get [status](): Status
  • Returns Status

  • get db(): Readonly<LevelDB<string | Buffer, Buffer>>
  • Returns Readonly<LevelDB<string | Buffer, Buffer>>

Methods

  • batch(ops: readonly DBOp[]): Promise<void>
  • +DB | @matrixai/db
    Options
    All
    • Public
    • Public/Protected
    • All
    Menu

    Hierarchy

    • CreateDestroyStartStop
      • DB

    Index

    Constructors

    • new DB(__namedParameters: { crypto?: { key: Buffer; ops: Crypto }; dbPath: string; fs: FileSystem; logger: Logger }): DB
    • Parameters

      • __namedParameters: { crypto?: { key: Buffer; ops: Crypto }; dbPath: string; fs: FileSystem; logger: Logger }
        • Optional crypto?: { key: Buffer; ops: Crypto }
        • dbPath: string
        • fs: FileSystem
        • logger: Logger

      Returns DB

    Properties

    [initLock]: RWLockWriter
    _iteratorRefs: Set<DBIterator<any, any>> = ...
    +

    References to iterators

    +
    _lockBox: LockBox<RWLockWriter> = ...
    _transactionRefs: Set<DBTransaction> = ...
    +

    References to transactions

    +
    crypto?: { key: Buffer; ops: Crypto }

    Type declaration

    dbPath: string
    logger: Logger
    workerManager?: DBWorkerManagerInterface

    Accessors

    • get [destroyed](): boolean
    • Returns boolean

    • get [running](): boolean
    • Returns boolean

    • get [status](): Status
    • Returns Status

    • get lockBox(): Readonly<LockBox<RWLockWriter>>

    Methods

    • batch(ops: readonly DBOp[], sync?: boolean): Promise<void>
    • Batches operations together atomically

      -

      Parameters

      • ops: readonly DBOp[]

      Returns Promise<void>

    • canaryCheck(): Promise<void>
    • clear(levelPath?: readonly (string | Buffer)[]): Promise<void>
    • +

      Parameters

      • ops: readonly DBOp[]
      • sync: boolean = false

      Returns Promise<void>

    • canaryCheck(): Promise<void>
    • Clear all key values for a specific level This is not atomic, it will iterate over a snapshot of the DB

      -

      Parameters

      • levelPath: readonly (string | Buffer)[] = []

      Returns Promise<void>

    • count(levelPath?: readonly (string | Buffer)[]): Promise<number>
    • Parameters

      • levelPath: readonly (string | Buffer)[] = []

      Returns Promise<number>

    • del(keyPath: string | Buffer | readonly (string | Buffer)[]): Promise<void>
    • del(keyPath: string | Buffer | readonly (string | Buffer)[], sync?: boolean): Promise<void>
    • Deletes a key from the DB

      -

      Parameters

      • keyPath: string | Buffer | readonly (string | Buffer)[]

      Returns Promise<void>

    • deserializeDecrypt<T>(cipherTextBuf: Buffer, raw: false): Promise<T>
    • deserializeDecrypt(cipherTextBuf: Buffer, raw: true): Promise<Buffer>
    • Type parameters

      • T

      Parameters

      • cipherTextBuf: Buffer
      • raw: false

      Returns Promise<T>

    • Parameters

      • cipherTextBuf: Buffer
      • raw: true

      Returns Promise<Buffer>

    • destroy(): Promise<void>
    • Returns Promise<void>

    • dump<V>(levelPath?: readonly (string | Buffer)[], raw?: false, root?: boolean): Promise<[readonly (string | Buffer)[], V][]>
    • dump(levelPath: undefined | readonly (string | Buffer)[], raw: true, root?: boolean): Promise<[readonly (string | Buffer)[], Buffer][]>
    • +

      Parameters

      • keyPath: string | Buffer | readonly (string | Buffer)[]
      • sync: boolean = false

      Returns Promise<void>

    • deserializeDecrypt<T>(cipherTextBuf: Buffer, raw: false): Promise<T>
    • deserializeDecrypt(cipherTextBuf: Buffer, raw: true): Promise<Buffer>
    • Type parameters

      • T

      Parameters

      • cipherTextBuf: Buffer
      • raw: false

      Returns Promise<T>

    • Parameters

      • cipherTextBuf: Buffer
      • raw: true

      Returns Promise<Buffer>

    • destroy(): Promise<void>
    • Returns Promise<void>

    • dump<V>(levelPath?: readonly (string | Buffer)[], raw?: false, root?: boolean): Promise<[readonly (string | Buffer)[], V][]>
    • dump(levelPath: undefined | readonly (string | Buffer)[], raw: true, root?: boolean): Promise<[readonly (string | Buffer)[], Buffer][]>
    • Dump from DB This will show entries from all levels It is intended for diagnostics Use console.dir instead of console.log to debug the result Set root to true if you want to dump from root levels

      -

      Type parameters

      • V

      Parameters

      • Optional levelPath: readonly (string | Buffer)[]
      • Optional raw: false
      • Optional root: boolean

      Returns Promise<[readonly (string | Buffer)[], V][]>

    • Parameters

      • levelPath: undefined | readonly (string | Buffer)[]
      • raw: true
      • Optional root: boolean

      Returns Promise<[readonly (string | Buffer)[], Buffer][]>

    • get<T>(keyPath: string | Buffer | readonly (string | Buffer)[], raw?: false): Promise<undefined | T>
    • get(keyPath: string | Buffer | readonly (string | Buffer)[], raw: true): Promise<undefined | Buffer>
    • +

      Type parameters

      • V

      Parameters

      • Optional levelPath: readonly (string | Buffer)[]
      • Optional raw: false
      • Optional root: boolean

      Returns Promise<[readonly (string | Buffer)[], V][]>

    • Parameters

      • levelPath: undefined | readonly (string | Buffer)[]
      • raw: true
      • Optional root: boolean

      Returns Promise<[readonly (string | Buffer)[], Buffer][]>

    • get<T>(keyPath: string | Buffer | readonly (string | Buffer)[], raw?: false): Promise<undefined | T>
    • get(keyPath: string | Buffer | readonly (string | Buffer)[], raw: true): Promise<undefined | Buffer>
    • Gets a value from the DB Use raw to return the raw decrypted buffer

      -

      Type parameters

      • T

      Parameters

      • keyPath: string | Buffer | readonly (string | Buffer)[]
      • Optional raw: false

      Returns Promise<undefined | T>

    • Parameters

      • keyPath: string | Buffer | readonly (string | Buffer)[]
      • raw: true

      Returns Promise<undefined | Buffer>

    • iterator(options: DBIteratorOptions & { keys: false; values: false }, levelPath?: readonly (string | Buffer)[]): DBIterator<undefined, undefined>
    • iterator<V>(options: DBIteratorOptions & { keys: false; valueAsBuffer: false }, levelPath?: readonly (string | Buffer)[]): DBIterator<undefined, V>
    • iterator(options: DBIteratorOptions & { keys: false }, levelPath?: readonly (string | Buffer)[]): DBIterator<undefined, Buffer>
    • iterator(options: DBIteratorOptions & { values: false }, levelPath?: readonly (string | Buffer)[]): DBIterator<readonly (string | Buffer)[], undefined>
    • iterator<V>(options: DBIteratorOptions & { valueAsBuffer: false }, levelPath?: readonly (string | Buffer)[]): DBIterator<readonly (string | Buffer)[], V>
    • iterator(options?: DBIteratorOptions, levelPath?: readonly (string | Buffer)[]): DBIterator<readonly (string | Buffer)[], Buffer>
    • +

      Type parameters

      • T

      Parameters

      • keyPath: string | Buffer | readonly (string | Buffer)[]
      • Optional raw: false

      Returns Promise<undefined | T>

    • Parameters

      • keyPath: string | Buffer | readonly (string | Buffer)[]
      • raw: true

      Returns Promise<undefined | Buffer>

    • Public iterator that works from the data level If keys and values are both false, this iterator will not run at all You must have at least one of them being true or undefined

      -

      Parameters

      • options: DBIteratorOptions & { keys: false; values: false }
      • Optional levelPath: readonly (string | Buffer)[]

      Returns DBIterator<undefined, undefined>

    • Type parameters

      • V

      Parameters

      • options: DBIteratorOptions & { keys: false; valueAsBuffer: false }
      • Optional levelPath: readonly (string | Buffer)[]

      Returns DBIterator<undefined, V>

    • Parameters

      • options: DBIteratorOptions & { keys: false }
      • Optional levelPath: readonly (string | Buffer)[]

      Returns DBIterator<undefined, Buffer>

    • Parameters

      • options: DBIteratorOptions & { values: false }
      • Optional levelPath: readonly (string | Buffer)[]

      Returns DBIterator<readonly (string | Buffer)[], undefined>

    • Type parameters

      • V

      Parameters

      • options: DBIteratorOptions & { valueAsBuffer: false }
      • Optional levelPath: readonly (string | Buffer)[]

      Returns DBIterator<readonly (string | Buffer)[], V>

    • Parameters

      Returns DBIterator<readonly (string | Buffer)[], Buffer>

    • put(keyPath: string | Buffer | readonly (string | Buffer)[], value: any, raw?: false): Promise<void>
    • put(keyPath: string | Buffer | readonly (string | Buffer)[], value: Buffer, raw: true): Promise<void>
    • put(keyPath: string | Buffer | readonly (string | Buffer)[], value: any, raw?: false, sync?: boolean): Promise<void>
    • put(keyPath: string | Buffer | readonly (string | Buffer)[], value: Buffer, raw: true, sync?: boolean): Promise<void>
    • Put a key and value into the DB Use raw to put raw encrypted buffer

      -

      Parameters

      • keyPath: string | Buffer | readonly (string | Buffer)[]
      • value: any
      • Optional raw: false

      Returns Promise<void>

    • Parameters

      • keyPath: string | Buffer | readonly (string | Buffer)[]
      • value: Buffer
      • raw: true

      Returns Promise<void>

    • serializeEncrypt(value: any, raw: false): Promise<Buffer>
    • serializeEncrypt(value: Buffer, raw: true): Promise<Buffer>
    • Parameters

      • value: any
      • raw: false

      Returns Promise<Buffer>

    • Parameters

      • value: Buffer
      • raw: true

      Returns Promise<Buffer>

    • setupDb(dbPath: string): Promise<LevelDB<string | Buffer, Buffer>>
    • Parameters

      • dbPath: string

      Returns Promise<LevelDB<string | Buffer, Buffer>>

    • setupRootLevels(): Promise<void>
    • start(__namedParameters?: { fresh?: boolean }): Promise<void>
    • Parameters

      • __namedParameters: { fresh?: boolean } = {}
        • Optional fresh?: boolean

      Returns Promise<void>

    • stop(): Promise<void>
    • Returns Promise<void>

    • unsetWorkerManager(): void
    • withTransactionF<T>(f: (tran: DBTransaction) => Promise<T>): Promise<T>
    • withTransactionG<T, TReturn, TNext>(g: (tran: DBTransaction) => AsyncGenerator<T, TReturn, TNext>): AsyncGenerator<T, TReturn, TNext>
    • Type parameters

      • T

      • TReturn

      • TNext

      Parameters

      Returns AsyncGenerator<T, TReturn, TNext>

    • createDB(__namedParameters: { crypto?: { key: Buffer; ops: Crypto }; dbPath: string; fresh?: boolean; fs?: FileSystem; logger?: Logger }): Promise<DB>
    • Parameters

      • __namedParameters: { crypto?: { key: Buffer; ops: Crypto }; dbPath: string; fresh?: boolean; fs?: FileSystem; logger?: Logger }
        • Optional crypto?: { key: Buffer; ops: Crypto }
        • dbPath: string
        • Optional fresh?: boolean
        • Optional fs?: FileSystem
        • Optional logger?: Logger

      Returns Promise<DB>

    Legend

    • Property
    • Method
    • Inherited constructor
    • Inherited method
    • Protected property
    • Protected method
    • Property
    • Static method

    Settings

    Theme

    Generated using TypeDoc

    \ No newline at end of file +

    Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]
    • value: any
    • Optional raw: false
    • Optional sync: boolean

    Returns Promise<void>

  • Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]
    • value: Buffer
    • raw: true
    • Optional sync: boolean

    Returns Promise<void>

  • serializeEncrypt(value: any, raw: false): Promise<Buffer>
  • serializeEncrypt(value: Buffer, raw: true): Promise<Buffer>
  • setupRootLevels(): Promise<void>
  • start(__namedParameters?: { fresh?: boolean } & DBOptions): Promise<void>
  • Parameters

    • __namedParameters: { fresh?: boolean } & DBOptions = {}

    Returns Promise<void>

  • stop(): Promise<void>
  • unsetWorkerManager(): void
  • withTransactionF<T>(f: (tran: DBTransaction) => Promise<T>): Promise<T>
  • withTransactionG<T, TReturn, TNext>(g: (tran: DBTransaction) => AsyncGenerator<T, TReturn, TNext>): AsyncGenerator<T, TReturn, TNext>
  • Type parameters

    • T

    • TReturn

    • TNext

    Parameters

    Returns AsyncGenerator<T, TReturn, TNext>

  • createDB(__namedParameters: { crypto?: { key: Buffer; ops: Crypto }; dbPath: string; fresh?: boolean; fs?: FileSystem; logger?: Logger } & DBOptions): Promise<DB>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/DBIterator.html b/docs/classes/DBIterator.html new file mode 100644 index 00000000..8e6e1c49 --- /dev/null +++ b/docs/classes/DBIterator.html @@ -0,0 +1 @@ +DBIterator | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class DBIterator<K, V>

Type parameters

Hierarchy

  • CreateDestroy
    • DBIterator

Index

Constructors

Properties

[initLock]: RWLockWriter
_db: DB
_iterator: RocksDBIterator<Buffer, Buffer>
_options: Merge<DBIteratorOptions<any>, { gt?: Buffer; gte?: Buffer; keyEncoding: "buffer"; lt?: Buffer; lte?: Buffer; valueEncoding: "buffer" }>
_transaction?: DBTransaction
cache: [Buffer, Buffer][] = []
cachePos: number = 0
finished: boolean = false
first: boolean = true
levelPath: readonly (string | Buffer)[]
lock: Lock = ...
logger: Logger

Accessors

  • get [destroyed](): boolean
  • Returns boolean

  • get [status](): Status
  • Returns Status

  • get db(): Readonly<DB>

Methods

  • [asyncIterator](): AsyncGenerator<[K, V], void, void>
  • _next(): Promise<undefined | [K, V]>
  • destroy(): Promise<void>
  • next(): Promise<undefined | [K, V]>
  • processEntry(entry: [Buffer, Buffer]): Promise<[K, V]>
  • seek(keyPath: string | Buffer | readonly (string | Buffer)[]): void

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/DBTransaction.html b/docs/classes/DBTransaction.html index 8b618783..fa6a9508 100644 --- a/docs/classes/DBTransaction.html +++ b/docs/classes/DBTransaction.html @@ -1,5 +1,39 @@ -DBTransaction | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class DBTransaction

Hierarchy

  • CreateDestroy
    • DBTransaction

Index

Constructors

  • new DBTransaction(__namedParameters: { db: DB; logger: Logger; transactionId: number }): DBTransaction
  • Parameters

    • __namedParameters: { db: DB; logger: Logger; transactionId: number }
      • db: DB
      • logger: Logger
      • transactionId: number

    Returns DBTransaction

Properties

[initLock]: RWLockWriter
_callbacksFailure: ((e?: Error) => any)[] = []
_callbacksFinally: ((e?: Error) => any)[] = []
_callbacksSuccess: (() => any)[] = []
_committed: boolean = false
_ops: DBOps = []
_rollbacked: boolean = false
db: DB
logger: Logger
transactionDataPath: readonly (string | Buffer)[]
transactionId: number
transactionPath: readonly (string | Buffer)[]
transactionTombstonePath: readonly (string | Buffer)[]

Accessors

  • get [destroyed](): boolean
  • Returns boolean

  • get [status](): Status
  • Returns Status

  • get callbacksFailure(): readonly (() => any)[]
  • get callbacksSuccess(): readonly (() => any)[]
  • get committed(): boolean
  • get ops(): readonly DBOp[]
  • get rollbacked(): boolean

Methods

  • clear(levelPath?: readonly (string | Buffer)[]): Promise<void>
  • commit(): Promise<void>
  • count(levelPath?: readonly (string | Buffer)[]): Promise<number>
  • del(keyPath: string | Buffer | readonly (string | Buffer)[]): Promise<void>
  • Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]

    Returns Promise<void>

  • destroy(): Promise<void>
  • dump<V>(levelPath?: readonly (string | Buffer)[], raw?: false): Promise<[readonly (string | Buffer)[], V][]>
  • dump(levelPath: undefined | readonly (string | Buffer)[], raw: true): Promise<[readonly (string | Buffer)[], Buffer][]>
  • get<T>(keyPath: string | Buffer | readonly (string | Buffer)[], raw?: false): Promise<undefined | T>
  • get(keyPath: string | Buffer | readonly (string | Buffer)[], raw: true): Promise<undefined | Buffer>
  • Type parameters

    • T

    Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]
    • Optional raw: false

    Returns Promise<undefined | T>

  • Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]
    • raw: true

    Returns Promise<undefined | Buffer>

  • getForUpdate<T>(keyPath: string | Buffer | readonly (string | Buffer)[], raw?: false): Promise<undefined | T>
  • getForUpdate(keyPath: string | Buffer | readonly (string | Buffer)[], raw: true): Promise<undefined | Buffer>
  • +

    Use this for to address write skews

    +

    Type parameters

    • T

    Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]
    • Optional raw: false

    Returns Promise<undefined | T>

  • Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]
    • raw: true

    Returns Promise<undefined | Buffer>

  • lock(...requests: (string | [key: ToString, type?: "read" | "write", timeout?: number])[]): Promise<void>
  • +

    Lock a sequence of lock requests +If the lock request doesn't specify, it +defaults to using RWLockWriter with write type +Keys are locked in string sorted order +Even though keys can be arbitrary strings, by convention, you should use +keys that correspond to keys in the database +Locking with the same key is idempotent therefore lock re-entrancy is enabled +Keys are automatically unlocked in reverse sorted order +when the transaction is destroyed +There is no support for lock upgrading or downgrading +There is no deadlock detection

    +

    Parameters

    • Rest ...requests: (string | [key: ToString, type?: "read" | "write", timeout?: number])[]

    Returns Promise<void>

  • put(keyPath: string | Buffer | readonly (string | Buffer)[], value: any, raw?: false): Promise<void>
  • put(keyPath: string | Buffer | readonly (string | Buffer)[], value: Buffer, raw: true): Promise<void>
  • Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]
    • value: any
    • Optional raw: false

    Returns Promise<void>

  • Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]
    • value: Buffer
    • raw: true

    Returns Promise<void>

  • queueFailure(f: (e?: Error) => any): void
  • queueFinally(f: (e?: Error) => any): void
  • queueSuccess(f: () => any): void
  • rollback(e?: Error): Promise<void>
  • setSnapshot(): void
  • +

    Set the snapshot manually +This ensures that consistent reads and writes start +after this method is executed +This is idempotent +Note that normally snapshots are set lazily upon the first +transaction db operation

    +

    Returns void

  • unlock(...keys: ToString[]): Promise<void>
  • +

    Unlock a sequence of lock keys +Unlocking will be done in the order of the keys +A transaction instance is only allowed to unlock keys that it previously +locked, all keys that are not part of the this._locks is ignored +Unlocking the same keys is idempotent

    +

    Parameters

    Returns Promise<void>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDB.html b/docs/classes/errors.ErrorDB.html index fae75b95..f3cf60df 100644 --- a/docs/classes/errors.ErrorDB.html +++ b/docs/classes/errors.ErrorDB.html @@ -1,16 +1,16 @@ -ErrorDB | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDB<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDB<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDB<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDB<T>

Properties

cause: T
+ErrorDB | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDB<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDB<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDB<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDB<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB error'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB error'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • +

    Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBCreate.html b/docs/classes/errors.ErrorDBCreate.html index cfe1b38d..5b5be548 100644 --- a/docs/classes/errors.ErrorDBCreate.html +++ b/docs/classes/errors.ErrorDBCreate.html @@ -1,16 +1,16 @@ -ErrorDBCreate | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBCreate<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBCreate<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBCreate<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBCreate<T>

Properties

cause: T
+ErrorDBCreate | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBCreate<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBCreate<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBCreate<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBCreate<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB cannot be created'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB cannot be created'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBDecrypt.html b/docs/classes/errors.ErrorDBDecrypt.html index 9c8f2739..ba576452 100644 --- a/docs/classes/errors.ErrorDBDecrypt.html +++ b/docs/classes/errors.ErrorDBDecrypt.html @@ -1,16 +1,16 @@ -ErrorDBDecrypt | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBDecrypt<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBDecrypt<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBDecrypt<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBDecrypt<T>

Properties

cause: T
+ErrorDBDecrypt | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBDecrypt<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBDecrypt<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBDecrypt<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBDecrypt<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB failed decryption'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB failed decryption'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBDelete.html b/docs/classes/errors.ErrorDBDelete.html index d4baa3dc..342e0808 100644 --- a/docs/classes/errors.ErrorDBDelete.html +++ b/docs/classes/errors.ErrorDBDelete.html @@ -1,16 +1,16 @@ -ErrorDBDelete | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBDelete<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBDelete<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBDelete<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBDelete<T>

Properties

cause: T
+ErrorDBDelete | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBDelete<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBDelete<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBDelete<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBDelete<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB cannot be deleted'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB cannot be deleted'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBDestroyed.html b/docs/classes/errors.ErrorDBDestroyed.html index 8fec879b..8de75b7c 100644 --- a/docs/classes/errors.ErrorDBDestroyed.html +++ b/docs/classes/errors.ErrorDBDestroyed.html @@ -1,16 +1,16 @@ -ErrorDBDestroyed | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBDestroyed<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBDestroyed<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBDestroyed<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBDestroyed<T>

Properties

cause: T
+ErrorDBDestroyed | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBDestroyed<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBDestroyed<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBDestroyed<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBDestroyed<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB is destroyed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB is destroyed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBIterator.html b/docs/classes/errors.ErrorDBIterator.html new file mode 100644 index 00000000..7e3b8377 --- /dev/null +++ b/docs/classes/errors.ErrorDBIterator.html @@ -0,0 +1,29 @@ +ErrorDBIterator | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBIterator<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBIterator<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBIterator<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBIterator<T>

Properties

cause: T
+

Causation of the exception +Can be used to know what caused this exception

+
data: POJO
+

Arbitrary data

+
message: string
name: string
stack?: string
timestamp: Date
+

Timestamp when exception was constructed in milliseconds +May contain microseconds in the fractional part +Guaranteed to be weakly monotonic

+
description: string = 'DBIterator error'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Encoding to JSON pojo +When overriding this, you can use super.toJSON +The replacer will:

    +
      +
    • delete undefined values in objects
    • +
    • replace undefined values for null in arrays
    • +
    +

    Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • +

    Create .stack property on a target object

    +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Runtime decoding of JSON POJO to exception instance +When overriding this, you cannot use super.fromJSON +You must write it fully, and use the same type-hacks +to support polymorphic this in static methods +https://github.com/microsoft/TypeScript/issues/5863

    +

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBIteratorBusy.html b/docs/classes/errors.ErrorDBIteratorBusy.html new file mode 100644 index 00000000..23d92f8c --- /dev/null +++ b/docs/classes/errors.ErrorDBIteratorBusy.html @@ -0,0 +1,29 @@ +ErrorDBIteratorBusy | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBIteratorBusy<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBIteratorBusy<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBIteratorBusy<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBIteratorBusy<T>

Properties

cause: T
+

Causation of the exception +Can be used to know what caused this exception

+
data: POJO
+

Arbitrary data

+
message: string
name: string
stack?: string
timestamp: Date
+

Timestamp when exception was constructed in milliseconds +May contain microseconds in the fractional part +Guaranteed to be weakly monotonic

+
description: string = 'DBIterator is busy'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Encoding to JSON pojo +When overriding this, you can use super.toJSON +The replacer will:

    +
      +
    • delete undefined values in objects
    • +
    • replace undefined values for null in arrays
    • +
    +

    Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • +

    Create .stack property on a target object

    +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Runtime decoding of JSON POJO to exception instance +When overriding this, you cannot use super.fromJSON +You must write it fully, and use the same type-hacks +to support polymorphic this in static methods +https://github.com/microsoft/TypeScript/issues/5863

    +

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBIteratorDestroyed.html b/docs/classes/errors.ErrorDBIteratorDestroyed.html new file mode 100644 index 00000000..cfcf9e88 --- /dev/null +++ b/docs/classes/errors.ErrorDBIteratorDestroyed.html @@ -0,0 +1,29 @@ +ErrorDBIteratorDestroyed | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBIteratorDestroyed<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBIteratorDestroyed<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBIteratorDestroyed<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBIteratorDestroyed<T>

Properties

cause: T
+

Causation of the exception +Can be used to know what caused this exception

+
data: POJO
+

Arbitrary data

+
message: string
name: string
stack?: string
timestamp: Date
+

Timestamp when exception was constructed in milliseconds +May contain microseconds in the fractional part +Guaranteed to be weakly monotonic

+
description: string = 'DBIterator is destroyed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Encoding to JSON pojo +When overriding this, you can use super.toJSON +The replacer will:

    +
      +
    • delete undefined values in objects
    • +
    • replace undefined values for null in arrays
    • +
    +

    Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • +

    Create .stack property on a target object

    +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Runtime decoding of JSON POJO to exception instance +When overriding this, you cannot use super.fromJSON +You must write it fully, and use the same type-hacks +to support polymorphic this in static methods +https://github.com/microsoft/TypeScript/issues/5863

    +

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBKey.html b/docs/classes/errors.ErrorDBKey.html index 2ce6197c..89955e65 100644 --- a/docs/classes/errors.ErrorDBKey.html +++ b/docs/classes/errors.ErrorDBKey.html @@ -1,16 +1,16 @@ -ErrorDBKey | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBKey<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBKey<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBKey<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBKey<T>

Properties

cause: T
+ErrorDBKey | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBKey<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBKey<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBKey<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBKey<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB key is incorrect'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB key is incorrect'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBNotRunning.html b/docs/classes/errors.ErrorDBNotRunning.html index 0aaebc23..64d289e4 100644 --- a/docs/classes/errors.ErrorDBNotRunning.html +++ b/docs/classes/errors.ErrorDBNotRunning.html @@ -1,16 +1,16 @@ -ErrorDBNotRunning | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBNotRunning<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBNotRunning<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBNotRunning<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBNotRunning<T>

Properties

cause: T
+ErrorDBNotRunning | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBNotRunning<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBNotRunning<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBNotRunning<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBNotRunning<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB is not running'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB is not running'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBParseKey.html b/docs/classes/errors.ErrorDBParseKey.html index 392fa4b8..9b744c76 100644 --- a/docs/classes/errors.ErrorDBParseKey.html +++ b/docs/classes/errors.ErrorDBParseKey.html @@ -1,16 +1,16 @@ -ErrorDBParseKey | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBParseKey<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBParseKey<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBParseKey<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBParseKey<T>

Properties

cause: T
+ErrorDBParseKey | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBParseKey<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBParseKey<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBParseKey<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBParseKey<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB key parsing failed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB key parsing failed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBParseValue.html b/docs/classes/errors.ErrorDBParseValue.html index 3b002c33..f888cb8b 100644 --- a/docs/classes/errors.ErrorDBParseValue.html +++ b/docs/classes/errors.ErrorDBParseValue.html @@ -1,16 +1,16 @@ -ErrorDBParseValue | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBParseValue<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBParseValue<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBParseValue<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBParseValue<T>

Properties

cause: T
+ErrorDBParseValue | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBParseValue<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBParseValue<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBParseValue<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBParseValue<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB value parsing failed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB value parsing failed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBRunning.html b/docs/classes/errors.ErrorDBRunning.html index 3986adf0..1b250112 100644 --- a/docs/classes/errors.ErrorDBRunning.html +++ b/docs/classes/errors.ErrorDBRunning.html @@ -1,16 +1,16 @@ -ErrorDBRunning | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBRunning<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBRunning<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBRunning<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBRunning<T>

Properties

cause: T
+ErrorDBRunning | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBRunning<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBRunning<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBRunning<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBRunning<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DB error'
desription: string = 'DB is running'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DB error'
desription: string = 'DB is running'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBTransaction.html b/docs/classes/errors.ErrorDBTransaction.html new file mode 100644 index 00000000..02218142 --- /dev/null +++ b/docs/classes/errors.ErrorDBTransaction.html @@ -0,0 +1,29 @@ +ErrorDBTransaction | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransaction<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBTransaction<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBTransaction<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransaction<T>

Properties

cause: T
+

Causation of the exception +Can be used to know what caused this exception

+
data: POJO
+

Arbitrary data

+
message: string
name: string
stack?: string
timestamp: Date
+

Timestamp when exception was constructed in milliseconds +May contain microseconds in the fractional part +Guaranteed to be weakly monotonic

+
description: string = 'DBTransaction error'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Encoding to JSON pojo +When overriding this, you can use super.toJSON +The replacer will:

    +
      +
    • delete undefined values in objects
    • +
    • replace undefined values for null in arrays
    • +
    +

    Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • +

    Create .stack property on a target object

    +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Runtime decoding of JSON POJO to exception instance +When overriding this, you cannot use super.fromJSON +You must write it fully, and use the same type-hacks +to support polymorphic this in static methods +https://github.com/microsoft/TypeScript/issues/5863

    +

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBTransactionCommitted.html b/docs/classes/errors.ErrorDBTransactionCommitted.html index d7845bde..3b8c6272 100644 --- a/docs/classes/errors.ErrorDBTransactionCommitted.html +++ b/docs/classes/errors.ErrorDBTransactionCommitted.html @@ -1,16 +1,16 @@ -ErrorDBTransactionCommitted | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionCommitted<T>

Type parameters

  • T

Hierarchy

  • ErrorDB<T>
    • ErrorDBTransactionCommitted

Index

Constructors

  • new ErrorDBTransactionCommitted<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBTransactionCommitted<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionCommitted<T>

Properties

cause: T
+ErrorDBTransactionCommitted | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionCommitted<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBTransactionCommitted<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBTransactionCommitted<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionCommitted<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DBTransaction is committed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DBTransaction is committed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBTransactionConflict.html b/docs/classes/errors.ErrorDBTransactionConflict.html new file mode 100644 index 00000000..c35e2a4e --- /dev/null +++ b/docs/classes/errors.ErrorDBTransactionConflict.html @@ -0,0 +1,29 @@ +ErrorDBTransactionConflict | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionConflict<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBTransactionConflict<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBTransactionConflict<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionConflict<T>

Properties

cause: T
+

Causation of the exception +Can be used to know what caused this exception

+
data: POJO
+

Arbitrary data

+
message: string
name: string
stack?: string
timestamp: Date
+

Timestamp when exception was constructed in milliseconds +May contain microseconds in the fractional part +Guaranteed to be weakly monotonic

+
description: string = 'DBTransaction cannot commit due to conflicting writes'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Encoding to JSON pojo +When overriding this, you can use super.toJSON +The replacer will:

    +
      +
    • delete undefined values in objects
    • +
    • replace undefined values for null in arrays
    • +
    +

    Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • +

    Create .stack property on a target object

    +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Runtime decoding of JSON POJO to exception instance +When overriding this, you cannot use super.fromJSON +You must write it fully, and use the same type-hacks +to support polymorphic this in static methods +https://github.com/microsoft/TypeScript/issues/5863

    +

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBTransactionDestroyed.html b/docs/classes/errors.ErrorDBTransactionDestroyed.html index e9365f1f..b015d52f 100644 --- a/docs/classes/errors.ErrorDBTransactionDestroyed.html +++ b/docs/classes/errors.ErrorDBTransactionDestroyed.html @@ -1,16 +1,16 @@ -ErrorDBTransactionDestroyed | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionDestroyed<T>

Type parameters

  • T

Hierarchy

  • ErrorDB<T>
    • ErrorDBTransactionDestroyed

Index

Constructors

  • new ErrorDBTransactionDestroyed<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBTransactionDestroyed<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionDestroyed<T>

Properties

cause: T
+ErrorDBTransactionDestroyed | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionDestroyed<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBTransactionDestroyed<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBTransactionDestroyed<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionDestroyed<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DBTransaction is destroyed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DBTransaction is destroyed'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBTransactionLockType.html b/docs/classes/errors.ErrorDBTransactionLockType.html new file mode 100644 index 00000000..d91a0cf4 --- /dev/null +++ b/docs/classes/errors.ErrorDBTransactionLockType.html @@ -0,0 +1,29 @@ +ErrorDBTransactionLockType | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionLockType<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBTransactionLockType<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBTransactionLockType<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionLockType<T>

Properties

cause: T
+

Causation of the exception +Can be used to know what caused this exception

+
data: POJO
+

Arbitrary data

+
message: string
name: string
stack?: string
timestamp: Date
+

Timestamp when exception was constructed in milliseconds +May contain microseconds in the fractional part +Guaranteed to be weakly monotonic

+
description: string = 'DBTransaction does not support upgrading or downgrading the lock type'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Encoding to JSON pojo +When overriding this, you can use super.toJSON +The replacer will:

    +
      +
    • delete undefined values in objects
    • +
    • replace undefined values for null in arrays
    • +
    +

    Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • +

    Create .stack property on a target object

    +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Runtime decoding of JSON POJO to exception instance +When overriding this, you cannot use super.fromJSON +You must write it fully, and use the same type-hacks +to support polymorphic this in static methods +https://github.com/microsoft/TypeScript/issues/5863

    +

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBTransactionNotCommitted.html b/docs/classes/errors.ErrorDBTransactionNotCommitted.html index 9902d849..dbc05acd 100644 --- a/docs/classes/errors.ErrorDBTransactionNotCommitted.html +++ b/docs/classes/errors.ErrorDBTransactionNotCommitted.html @@ -1,16 +1,16 @@ -ErrorDBTransactionNotCommitted | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionNotCommitted<T>

Type parameters

  • T

Hierarchy

  • ErrorDB<T>
    • ErrorDBTransactionNotCommitted

Index

Constructors

  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionNotCommitted<T>

Properties

cause: T
+ErrorDBTransactionNotCommitted | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionNotCommitted<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionNotCommitted<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DBTransaction is not comitted'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DBTransaction is not comitted'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBTransactionNotCommittedNorRollbacked.html b/docs/classes/errors.ErrorDBTransactionNotCommittedNorRollbacked.html new file mode 100644 index 00000000..61c5ae53 --- /dev/null +++ b/docs/classes/errors.ErrorDBTransactionNotCommittedNorRollbacked.html @@ -0,0 +1,29 @@ +ErrorDBTransactionNotCommittedNorRollbacked | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionNotCommittedNorRollbacked<T>

Type parameters

  • T

Hierarchy

Index

Constructors

Properties

cause: T
+

Causation of the exception +Can be used to know what caused this exception

+
data: POJO
+

Arbitrary data

+
message: string
name: string
stack?: string
timestamp: Date
+

Timestamp when exception was constructed in milliseconds +May contain microseconds in the fractional part +Guaranteed to be weakly monotonic

+
description: string = 'DBTransaction is not comitted nor rollbacked'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Encoding to JSON pojo +When overriding this, you can use super.toJSON +The replacer will:

    +
      +
    • delete undefined values in objects
    • +
    • replace undefined values for null in arrays
    • +
    +

    Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • +

    Create .stack property on a target object

    +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Runtime decoding of JSON POJO to exception instance +When overriding this, you cannot use super.fromJSON +You must write it fully, and use the same type-hacks +to support polymorphic this in static methods +https://github.com/microsoft/TypeScript/issues/5863

    +

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/classes/errors.ErrorDBTransactionRollbacked.html b/docs/classes/errors.ErrorDBTransactionRollbacked.html index 644ff5e8..74080675 100644 --- a/docs/classes/errors.ErrorDBTransactionRollbacked.html +++ b/docs/classes/errors.ErrorDBTransactionRollbacked.html @@ -1,16 +1,16 @@ -ErrorDBTransactionRollbacked | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionRollbacked<T>

Type parameters

  • T

Hierarchy

  • ErrorDB<T>
    • ErrorDBTransactionRollbacked

Index

Constructors

  • new ErrorDBTransactionRollbacked<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBTransactionRollbacked<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionRollbacked<T>

Properties

cause: T
+ErrorDBTransactionRollbacked | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Class ErrorDBTransactionRollbacked<T>

Type parameters

  • T

Hierarchy

Index

Constructors

  • new ErrorDBTransactionRollbacked<T>(message?: string, options?: { cause?: T; data?: POJO; timestamp?: Date }): ErrorDBTransactionRollbacked<T>
  • Type parameters

    • T

    Parameters

    • Optional message: string
    • Optional options: { cause?: T; data?: POJO; timestamp?: Date }
      • Optional cause?: T
      • Optional data?: POJO
      • Optional timestamp?: Date

    Returns ErrorDBTransactionRollbacked<T>

Properties

cause: T

Causation of the exception Can be used to know what caused this exception

-
data: POJO
+
data: POJO

Arbitrary data

-
message: string
name: string
stack?: string
timestamp: Date
+
message: string
name: string
stack?: string
timestamp: Date

Timestamp when exception was constructed in milliseconds May contain microseconds in the fractional part Guaranteed to be weakly monotonic

-
description: string = 'DBTransaction is rollbacked'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

    • (err: Error, stackTraces: CallSite[]): any
    • +
description: string = 'DBTransaction is rollbacked'
prepareStackTrace?: (err: Error, stackTraces: CallSite[]) => any

Type declaration

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • +

    Parameters

    • err: Error
    • stackTraces: CallSite[]

    Returns any

stackTraceLimit: number

Accessors

  • get description(): string
  • Returns string

Methods

  • toJSON(): any
  • Encoding to JSON pojo When overriding this, you can use super.toJSON The replacer will:

    @@ -18,12 +18,12 @@
  • delete undefined values in objects
  • replace undefined values for null in arrays
-

Returns any

  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • captureStackTrace(targetObject: object, constructorOpt?: Function): void
  • Create .stack property on a target object

    -

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • +

    Parameters

    • targetObject: object
    • Optional constructorOpt: Function

    Returns void

  • fromJSON<T>(this: T, json: any): InstanceType<T>
  • Runtime decoding of JSON POJO to exception instance When overriding this, you cannot use super.fromJSON You must write it fully, and use the same type-hacks to support polymorphic this in static methods https://github.com/microsoft/TypeScript/issues/5863

    -

    Type parameters

    • T: Class<any>

    Parameters

    • this: T
    • json: any

    Returns InstanceType<T>

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method
  • Property

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type parameters

  • T: Class<any>

Parameters

  • this: T
  • json: any

Returns InstanceType<T>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/index.html b/docs/index.html index 53539e41..dab33aad 100644 --- a/docs/index.html +++ b/docs/index.html @@ -5,6 +5,13 @@

js-db

staging: pipeline status master: pipeline status

DB is library managing key value state for MatrixAI's JavaScript/TypeScript applications.

+

This forks classic-level's C++ binding code around LevelDB 1.20. Differences from classic-level:

+
    +
  • Uses TypeScript from ground-up
  • +
  • Supports Snapshot-Isolation based transactions via DBTransaction
  • +
  • API supports "key paths" which can be used to manipulate "levels" of nested keys
  • +
  • Value encryption (key-encryption is not supported yet) - requires additional work with block-encryption
  • +

Installation

@@ -12,11 +19,17 @@

Installation

npm install --save @matrixai/db
 
+
+

Usage

+
+
import { DB } from '@matrixai/db';

async function main () {

const key = Buffer.from([
0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03,
0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x02, 0x03,
]);

const encrypt = async (
key: ArrayBuffer,
plainText: ArrayBuffer
): Promise<ArrayBuffer> {
return plainText;
};

const decrypt = async (
key: ArrayBuffer,
cipherText: ArrayBuffer
): Promise<ArrayBuffer | undefined> {
return cipherText;
}

const db = await DB.createDB({
dbPath: './tmp/db',
crypto: {
key,
ops: { encrypt, decrypt },
},
fresh: true,
});

await db.put(['level', Buffer.from([0x30, 0x30]), 'a'], 'value');
await db.put(['level', Buffer.from([0x30, 0x31]), 'b'], 'value');
await db.put(['level', Buffer.from([0x30, 0x32]), 'c'], 'value');
await db.put(['level', Buffer.from([0x30, 0x33]), 'c'], 'value');

console.log(await db.get(['level', Buffer.from([0x30, 0x32]), 'c']));

await db.del(['level', Buffer.from([0x30, 0x32]), 'c']);

for await (const [kP, v] of db.iterator({
lt: [Buffer.from([0x30, 0x32]), ''],
}, ['level'])) {
console.log(kP, v);
}

await db.stop();
}

main(); +
+

Development

Run nix-shell, and once you're inside, you can use:

-
# install (or reinstall packages from package.json)
npm install
# build the dist
npm run build
# run the repl (this allows you to import from ./src)
npm run ts-node
# run the tests
npm run test
# lint the source code
npm run lint
# automatically fix the source
npm run lintfix +
# install (or reinstall packages from package.json)
npm install
# build the dist
npm run build
# run the repl (this allows you to import from ./src)
npm run ts-node
# run the tests
npm run test
# lint the source code
npm run lint
# automatically fix the source
npm run lintfix
@@ -36,6 +49,6 @@

Docs Generation

Publishing

-
# npm login
npm version patch # major/minor/patch
npm run build
npm publish --access public
git push
git push --tags +
# npm login
npm version patch # major/minor/patch
npm run build
npm publish --access public
git push
git push --tags
-

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/interfaces/FileSystem.html b/docs/interfaces/FileSystem.html index b7661540..7a56735f 100644 --- a/docs/interfaces/FileSystem.html +++ b/docs/interfaces/FileSystem.html @@ -1,4 +1,4 @@ -FileSystem | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Interface FileSystem

Hierarchy

  • FileSystem

Index

Properties

Properties

promises: { mkdir: { (path: PathLike, options: MakeDirectoryOptions & { recursive: true }): Promise<string | undefined>; (path: PathLike, options?: null | Mode | (MakeDirectoryOptions & { recursive?: false })): Promise<void>; (path: PathLike, options?: null | MakeDirectoryOptions | Mode): Promise<string | undefined> }; rm: (path: PathLike, options?: RmOptions) => Promise<void> }

Type declaration

  • mkdir: { (path: PathLike, options: MakeDirectoryOptions & { recursive: true }): Promise<string | undefined>; (path: PathLike, options?: null | Mode | (MakeDirectoryOptions & { recursive?: false })): Promise<void>; (path: PathLike, options?: null | MakeDirectoryOptions | Mode): Promise<string | undefined> }
      • (path: PathLike, options: MakeDirectoryOptions & { recursive: true }): Promise<string | undefined>
      • (path: PathLike, options?: null | Mode | (MakeDirectoryOptions & { recursive?: false })): Promise<void>
      • (path: PathLike, options?: null | MakeDirectoryOptions | Mode): Promise<string | undefined>
      • +FileSystem | @matrixai/db
        Options
        All
        • Public
        • Public/Protected
        • All
        Menu

        Interface FileSystem

        Hierarchy

        • FileSystem

        Index

        Properties

        Properties

        promises: { mkdir: { (path: PathLike, options: MakeDirectoryOptions & { recursive: true }): Promise<string | undefined>; (path: PathLike, options?: null | Mode | (MakeDirectoryOptions & { recursive?: false })): Promise<void>; (path: PathLike, options?: null | MakeDirectoryOptions | Mode): Promise<string | undefined> }; rm: (path: PathLike, options?: RmOptions) => Promise<void> }

        Type declaration

        • mkdir: { (path: PathLike, options: MakeDirectoryOptions & { recursive: true }): Promise<string | undefined>; (path: PathLike, options?: null | Mode | (MakeDirectoryOptions & { recursive?: false })): Promise<void>; (path: PathLike, options?: null | MakeDirectoryOptions | Mode): Promise<string | undefined> }
            • (path: PathLike, options: MakeDirectoryOptions & { recursive: true }): Promise<string | undefined>
            • (path: PathLike, options?: null | Mode | (MakeDirectoryOptions & { recursive?: false })): Promise<void>
            • (path: PathLike, options?: null | MakeDirectoryOptions | Mode): Promise<string | undefined>
            • Asynchronously creates a directory.

              The optional options argument can be an integer specifying mode (permission and sticky bits), or an object with a mode property and a recursiveproperty indicating whether parent directories should be created. CallingfsPromises.mkdir() when path is a directory @@ -24,4 +24,4 @@

              Removes files and directories (modeled on the standard POSIX rm utility).

              since

              v14.14.0

              Parameters

              • path: PathLike
              • Optional options: RmOptions

              Returns Promise<void>

              Fulfills with undefined upon success.

              -

        Legend

        • Property
        • Method
        • Inherited constructor
        • Inherited method
        • Protected property
        • Protected method
        • Property
        • Static method

        Settings

        Theme

        Generated using TypeDoc

        \ No newline at end of file +

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/interfaces/ToString.html b/docs/interfaces/ToString.html new file mode 100644 index 00000000..11bb5eb8 --- /dev/null +++ b/docs/interfaces/ToString.html @@ -0,0 +1,3 @@ +ToString | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Interface ToString

+

Any type that can be turned into a string

+

Hierarchy

  • ToString

Index

Methods

Methods

  • toString(): string

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/interfaces/rocksdb.RocksDB.html b/docs/interfaces/rocksdb.RocksDB.html new file mode 100644 index 00000000..4409a790 --- /dev/null +++ b/docs/interfaces/rocksdb.RocksDB.html @@ -0,0 +1 @@ +RocksDB | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Interface RocksDB

Hierarchy

  • RocksDB

Index

Methods

  • batchPut(batch: RocksDBBatch, key: string | Buffer, value: string | Buffer): void
  • dbApproximateSize(database: RocksDBDatabase, start: string | Buffer, end: string | Buffer, callback: Callback<[number], void, Error>): void
  • dbCompactRange(database: RocksDBDatabase, start: string | Buffer, end: string | Buffer, callback: Callback<[], void, Error>): void
  • destroyDb(location: string, callback: Callback<[], void, Error>): void
  • iteratorClose(iterator: RocksDBIterator<string | Buffer, string | Buffer>, callback: Callback<[], void, Error>): void
  • iteratorNextv<K, V>(iterator: RocksDBIterator<K, V>, size: number, callback: Callback<[[K, V][], boolean], void, Error>): void
  • iteratorSeek<K>(iterator: RocksDBIterator<K, string | Buffer>, target: K): void
  • repairDb(location: string, callback: Callback<[], void, Error>): void
  • transactionPut(transaction: RocksDBTransaction, key: string | Buffer, value: string | Buffer, callback: Callback<[], void, Error>): void

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/interfaces/rocksdb.RocksDBP.html b/docs/interfaces/rocksdb.RocksDBP.html new file mode 100644 index 00000000..f6c06986 --- /dev/null +++ b/docs/interfaces/rocksdb.RocksDBP.html @@ -0,0 +1 @@ +RocksDBP | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Interface RocksDBP

Hierarchy

  • RocksDBP

Index

Methods

  • batchPut(batch: RocksDBBatch, key: string | Buffer, value: string | Buffer): void
  • dbApproximateSize(database: RocksDBDatabase, start: string | Buffer, end: string | Buffer): Promise<number>
  • dbCompactRange(database: RocksDBDatabase, start: string | Buffer, end: string | Buffer): Promise<void>
  • destroyDb(location: string): Promise<void>
  • iteratorClose(iterator: RocksDBIterator<string | Buffer, string | Buffer>): Promise<void>
  • iteratorNextv<K, V>(iterator: RocksDBIterator<K, V>, size: number): Promise<[[K, V][], boolean]>
  • iteratorSeek<K>(iterator: RocksDBIterator<K, string | Buffer>, target: K): void
  • repairDb(location: string): Promise<void>
  • transactionPut(transaction: RocksDBTransaction, key: string | Buffer, value: string | Buffer): Promise<void>

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/modules.html b/docs/modules.html index 01b2e39f..1d048f9c 100644 --- a/docs/modules.html +++ b/docs/modules.html @@ -1,21 +1,29 @@ -@matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

@matrixai/db

Index

Type aliases

Crypto: { decrypt: any; encrypt: any }
+@matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

@matrixai/db

Index

Type aliases

Callback<P, R, E>: { (e: E, ...params: Partial<P>): R; (e?: null, ...params: P): R }

Type parameters

  • P: any[] = []

  • R = any

  • E: Error = Error

Type declaration

    • (e: E, ...params: Partial<P>): R
    • (e?: null, ...params: P): R
    • +

      Generic callback

      +

      Parameters

      • e: E
      • Rest ...params: Partial<P>

      Returns R

    • +

      Generic callback

      +

      Parameters

      • Optional e: null
      • Rest ...params: P

      Returns R

Crypto: { decrypt: any; encrypt: any }

Crypto utility object Remember ever Node Buffer is an ArrayBuffer

-

Type declaration

  • decrypt:function
    • decrypt(key: ArrayBuffer, cipherText: ArrayBuffer): Promise<undefined | ArrayBuffer>
    • Parameters

      • key: ArrayBuffer
      • cipherText: ArrayBuffer

      Returns Promise<undefined | ArrayBuffer>

  • encrypt:function
    • encrypt(key: ArrayBuffer, plainText: ArrayBuffer): Promise<ArrayBuffer>
    • Parameters

      • key: ArrayBuffer
      • plainText: ArrayBuffer

      Returns Promise<ArrayBuffer>

DBBatch: AbstractBatch
DBIterator<K, V>: { [asyncIterator]: any; end: any; next: any; seek: any }
-

Iterator

-

Type parameters

Type declaration

  • [asyncIterator]:function
    • [asyncIterator](): AsyncGenerator<[K, V], any, unknown>
  • end:function
    • end(): Promise<void>
  • next:function
    • next(): Promise<undefined | [K, V]>
  • seek:function
    • seek(k: string | Buffer | readonly (string | Buffer)[]): void
    • Parameters

      • k: string | Buffer | readonly (string | Buffer)[]

      Returns void

DBIteratorOptions: { gt?: KeyPath | Buffer | string; gte?: KeyPath | Buffer | string; keyAsBuffer?: boolean; keys?: boolean; limit?: number; lt?: KeyPath | Buffer | string; lte?: KeyPath | Buffer | string; reverse?: boolean; valueAsBuffer?: boolean; values?: boolean }
+

Type declaration

  • decrypt:function
    • decrypt(key: ArrayBuffer, cipherText: ArrayBuffer): Promise<undefined | ArrayBuffer>
    • Parameters

      • key: ArrayBuffer
      • cipherText: ArrayBuffer

      Returns Promise<undefined | ArrayBuffer>

  • encrypt:function
    • encrypt(key: ArrayBuffer, plainText: ArrayBuffer): Promise<ArrayBuffer>
    • Parameters

      • key: ArrayBuffer
      • plainText: ArrayBuffer

      Returns Promise<ArrayBuffer>

DBClearOptions<S>: Merge<RocksDBClearOptions<S>, { gt?: KeyPath | Buffer | string; gte?: KeyPath | Buffer | string; lt?: KeyPath | Buffer | string; lte?: KeyPath | Buffer | string }>

Type parameters

DBCountOptions<S>: Merge<RocksDBCountOptions<S>, { gt?: KeyPath | Buffer | string; gte?: KeyPath | Buffer | string; lt?: KeyPath | Buffer | string; lte?: KeyPath | Buffer | string }>

Type parameters

DBIteratorOptions<S>: Merge<Omit<RocksDBIteratorOptions<S>, "keyEncoding" | "valueEncoding">, { gt?: KeyPath | Buffer | string; gte?: KeyPath | Buffer | string; keyAsBuffer?: boolean; lt?: KeyPath | Buffer | string; lte?: KeyPath | Buffer | string; valueAsBuffer?: boolean }>

Iterator options The keyAsBuffer property controls whether DBIterator returns KeyPath as buffers or as strings It should be considered to default to true The valueAsBuffer property controls value type It should be considered to default to true

-

Type declaration

  • Optional gt?: KeyPath | Buffer | string
  • Optional gte?: KeyPath | Buffer | string
  • Optional keyAsBuffer?: boolean
  • Optional keys?: boolean
  • Optional limit?: number
  • Optional lt?: KeyPath | Buffer | string
  • Optional lte?: KeyPath | Buffer | string
  • Optional reverse?: boolean
  • Optional valueAsBuffer?: boolean
  • Optional values?: boolean
DBOp: ({ type: "put" } & DBOp_) | ({ type: "del" } & Omit<DBOp_, "value" | "raw">)
DBOps: DBOp[]
DBWorkerManagerInterface: WorkerManagerInterface<Crypto>
KeyPath: Readonly<(string | Buffer)[]>
+

Type parameters

DBOp: ({ type: "put" } & DBOp_) | ({ type: "del" } & Omit<DBOp_, "value" | "raw">)
DBOps: DBOp[]
DBOptions: Omit<RocksDBDatabaseOptions, "createIfMissing" | "errorIfExists">
DBWorkerManagerInterface: WorkerManagerInterface<Crypto>
KeyPath: Readonly<(string | Buffer)[]>

Path to a key This must be an non-empty array

-
LevelPath: Readonly<(string | Buffer)[]>
+
LevelPath: Readonly<(string | Buffer)[]>

Path to a DB level Empty level path refers to the root level

-
POJO: {}
+
Merge<A, B>: { [ K in keyof A & B]: K extends keyof B ? B[K] : K extends keyof A ? A[K] : never }
+

Merge A property types with B property types +while B's property types override A's property types

+

Type parameters

  • A

  • B

MultiLockRequest: [key: ToString, lockingParams: Parameters<RWLockWriter["lock"]>]
Opaque<K, T>: T & { [brand]: K }
+

Opaque types are wrappers of existing types +that require smart constructors

+

Type parameters

  • K

  • T

POJO: {}

Plain data dictionary

-

Type declaration

  • [key: string]: any

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Type declaration

  • [key: string]: any

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/modules/errors.html b/docs/modules/errors.html index c3b4062e..bddb605f 100644 --- a/docs/modules/errors.html +++ b/docs/modules/errors.html @@ -1 +1 @@ -errors | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Namespace errors

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +errors | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Namespace errors

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/modules/rocksdb.html b/docs/modules/rocksdb.html new file mode 100644 index 00000000..98e1e054 --- /dev/null +++ b/docs/modules/rocksdb.html @@ -0,0 +1,51 @@ +rocksdb | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Namespace rocksdb

Index

Type aliases

RocksDBBatch: Opaque<"RocksDBBatch", object>
+

RocksDBBatch object +A napi_external type

+
RocksDBBatchDelOperation: { key: string | Buffer; type: "del" }

Type declaration

  • key: string | Buffer
  • type: "del"
RocksDBBatchOptions: RocksDBPutOptions
+

Batch options

+
RocksDBBatchPutOperation: { key: string | Buffer; type: "put"; value: string | Buffer }

Type declaration

  • key: string | Buffer
  • type: "put"
  • value: string | Buffer
RocksDBClearOptions<S>: Omit<RocksDBRangeOptions, "reverse"> & { snapshot?: S; sync?: S extends RocksDBSnapshot ? boolean : void }
+

Clear options

+

Type parameters

RocksDBCountOptions<S>: Omit<RocksDBRangeOptions, "reverse"> & { snapshot?: S }
+

Count options

+

Type parameters

RocksDBDatabase: Opaque<"RocksDBDatabase", object>
+

RocksDBDatabase object +A napi_external type

+
RocksDBDatabaseOptions: { blockRestartInterval?: number; blockSize?: number; cacheSize?: number; compression?: boolean; createIfMissing?: boolean; errorIfExists?: boolean; infoLogLevel?: "debug" | "info" | "warn" | "error" | "fatal" | "header"; maxFileSize?: number; maxOpenFiles?: number; writeBufferSize?: number }
+

RocksDB database options

+

Type declaration

  • Optional blockRestartInterval?: number
  • Optional blockSize?: number
  • Optional cacheSize?: number
  • Optional compression?: boolean
  • Optional createIfMissing?: boolean
  • Optional errorIfExists?: boolean
  • Optional infoLogLevel?: "debug" | "info" | "warn" | "error" | "fatal" | "header"
  • Optional maxFileSize?: number
  • Optional maxOpenFiles?: number
  • Optional writeBufferSize?: number
RocksDBDelOptions: RocksDBPutOptions
+

Del options

+
RocksDBGetOptions<S>: { fillCache?: boolean; snapshot?: S; valueEncoding?: "utf8" | "buffer" }
+

Get options

+

Type parameters

Type declaration

  • Optional fillCache?: boolean
  • Optional snapshot?: S
  • Optional valueEncoding?: "utf8" | "buffer"
RocksDBIterator<K, V>: Opaque<"RocksDBIterator", object> & { [brandRocksDBIteratorK]: K; [brandRocksDBIteratorV]: V }
+

RocksDBIterator object +A napi_external type +If keys or values is set to false then +K and V will be an empty buffer +If keys and values is set to false, the iterator will +give back empty array as entries

+

Type parameters

  • K: string | Buffer = string | Buffer

  • V: string | Buffer = string | Buffer

RocksDBIteratorOptions<S>: RocksDBGetOptions<S> & RocksDBRangeOptions & { highWaterMarkBytes?: number; keyEncoding?: "utf8" | "buffer"; keys?: boolean; values?: boolean }
+

Iterator options

+

Type parameters

RocksDBPutOptions: { sync?: boolean }
+

Put options

+

Type declaration

  • Optional sync?: boolean
    +

    If true, rocksdb will perform fsync() before completing operation +It is still asynchronous relative to Node.js +If the operating system crashes, writes may be lost +Prefer to flip this to be true when a transaction batch is written +This will amortize the cost of fsync() across the entire transaction

    +
RocksDBRangeOptions: { gt?: string | Buffer; gte?: string | Buffer; limit?: number; lt?: string | Buffer; lte?: string | Buffer; reverse?: boolean }
+

Range options

+

Type declaration

  • Optional gt?: string | Buffer
  • Optional gte?: string | Buffer
  • Optional limit?: number
  • Optional lt?: string | Buffer
  • Optional lte?: string | Buffer
  • Optional reverse?: boolean
RocksDBSnapshot: Opaque<"RocksDBSnapshot", object>
+

RocksDBSnapshot object +A napi_external type

+
RocksDBTransaction: Opaque<"RocksDBTransaction", object>
+

RocksDBTransaction object +A napi_external type

+
RocksDBTransactionOptions: RocksDBPutOptions
+

Transaction options

+
RocksDBTransactionSnapshot: Opaque<"RocksDBTransactionSnapshot", object>
+

RocksDBTransactionSnapshot object +A napi_external type

+

Variables

rocksdb: RocksDB = ...
rocksdbP: RocksDBP = ...
+

Promisified version of RocksDB

+

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/docs/modules/utils.html b/docs/modules/utils.html index 3d4d1c0d..cb7f9a33 100644 --- a/docs/modules/utils.html +++ b/docs/modules/utils.html @@ -1,25 +1,29 @@ -utils | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Namespace utils

Index

Variables

sep: Buffer = ...
+utils | @matrixai/db
Options
All
  • Public
  • Public/Protected
  • All
Menu

Namespace utils

Index

Variables

sep: Buffer = ...

Separator is a single null byte This special symbol must not appear in the encoded parts

-

Functions

  • decodePart(data: Buffer): Buffer

Functions

  • decodePart(data: Buffer): Buffer
  • Decode level or key part from base 128 The special empty symbol is decoded as an empty buffer

    -

    Parameters

    • data: Buffer

    Returns Buffer

  • deserialize<T>(value_: Buffer): T
  • encodePart(part: Buffer): Buffer
  • deserialize<T>(value_: Buffer): T
  • encodePart(part: Buffer): Buffer
  • Encode level or key part using base 128 encoding Empty parts are encoded with the special empty symbol

    -

    Parameters

    • part: Buffer

    Returns Buffer

  • fromArrayBuffer(b: ArrayBuffer, offset?: number, length?: number): Buffer
  • filterUndefined(o: object): void
  • +

    Native addons expect strict optional properties +Properties that have the value undefined may be misinterpreted +Apply these to options objects before passing them to the native addon

    +

    Parameters

    • o: object

    Returns void

  • fromArrayBuffer(b: ArrayBuffer, offset?: number, length?: number): Buffer
  • Wraps ArrayBuffer in Node Buffer with zero copy

    -

    Parameters

    • b: ArrayBuffer
    • Optional offset: number
    • Optional length: number

    Returns Buffer

  • keyPathToKey(keyPath: readonly (string | Buffer)[]): Buffer
  • +

    Parameters

    • b: ArrayBuffer
    • Optional offset: number
    • Optional length: number

    Returns Buffer

  • iterationOptions<O>(options: O, levelPath: readonly (string | Buffer)[]): Merge<O, { gt?: Buffer; gte?: Buffer; keyEncoding: "buffer"; lt?: Buffer; lte?: Buffer; valueEncoding: "buffer" }>
  • Type parameters

    • O: { gt?: string | Buffer | readonly (string | Buffer)[]; gte?: string | Buffer | readonly (string | Buffer)[]; lt?: string | Buffer | readonly (string | Buffer)[]; lte?: string | Buffer | readonly (string | Buffer)[] }

    Parameters

    • options: O
    • levelPath: readonly (string | Buffer)[]

    Returns Merge<O, { gt?: Buffer; gte?: Buffer; keyEncoding: "buffer"; lt?: Buffer; lte?: Buffer; valueEncoding: "buffer" }>

  • keyPathToKey(keyPath: readonly (string | Buffer)[]): Buffer
  • Converts KeyPath to key buffer e.g. ['A', 'B'] => !A!B (where ! is the sep) An empty key path is converted to [''] Level parts is allowed to contain the separator Key actual part is allowed to contain the separator

    -

    Parameters

    • keyPath: readonly (string | Buffer)[]

    Returns Buffer

  • levelPathToKey(levelPath: readonly (string | Buffer)[]): Buffer
  • +

    Parameters

    • keyPath: readonly (string | Buffer)[]

    Returns Buffer

  • levelPathToKey(levelPath: readonly (string | Buffer)[]): Buffer
  • Converts LevelPath to key buffer e.g. ['A', 'B'] => !A!!B! (where ! is the sep) Level parts are allowed to contain the separator before encoding

    -

    Parameters

    • levelPath: readonly (string | Buffer)[]

    Returns Buffer

  • +

    Parameters

    • levelPath: readonly (string | Buffer)[]

    Returns Buffer

  • Converts key buffer back into KeyPath e.g. !A!!B!C => ['A', 'B', 'C'] (where ! is the sep) Returned parts are always buffers

    @@ -29,11 +33,20 @@ level => sep .?:l sep -> l sep => 0x00 keyActual => .:k -> [k]

    -

    Parameters

    • key: Buffer

    Returns KeyPath

  • sepExists(data: string | Buffer): boolean
  • promisify<T, P, R>(f: (...args: [...params: P[], callback: Callback<T, any, Error>]) => unknown): (...params: P) => Promise<R>
  • +

    Convert callback-style to promise-style +If this is applied to overloaded function +it will only choose one of the function signatures to use

    +

    Type parameters

    • T: unknown[]

    • P: unknown[]

    • R: unknown[]

    Parameters

    • f: (...args: [...params: P[], callback: Callback<T, any, Error>]) => unknown
        • (...args: [...params: P[], callback: Callback<T, any, Error>]): unknown
        • Parameters

          • Rest ...args: [...params: P[], callback: Callback<T, any, Error>]

          Returns unknown

    Returns (...params: P) => Promise<R>

      • (...params: P): Promise<R>
      • +

        Convert callback-style to promise-style +If this is applied to overloaded function +it will only choose one of the function signatures to use

        +

        Parameters

        • Rest ...params: P

        Returns Promise<R>

  • sepExists(data: string | Buffer): boolean
  • Checks if the separator exists in a string or buffer This only needs to applied to the LevelPath, not the final key

    -

    Parameters

    • data: string | Buffer

    Returns boolean

  • serialize<T>(value: T): Buffer
  • toArrayBuffer(b: Buffer): ArrayBuffer
  • serialize<T>(value: T): Buffer
  • toArrayBuffer(b: Buffer): ArrayBuffer
  • Slice-copies the Node Buffer to a new ArrayBuffer

    -

    Parameters

    • b: Buffer

    Returns ArrayBuffer

  • toKeyPath(keyPath: string | Buffer | readonly (string | Buffer)[]): KeyPath
  • -

    Used to convert possible KeyPath into legal KeyPath

    -

    Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]

    Returns KeyPath

Legend

  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Property
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file +

Parameters

  • b: Buffer

Returns ArrayBuffer

  • toKeyPath(keyPath: string | Buffer | readonly (string | Buffer)[]): KeyPath
  • +

    Used to convert possible KeyPath into legal KeyPath +Returns a copy which can be mutated

    +

    Parameters

    • keyPath: string | Buffer | readonly (string | Buffer)[]

    Returns KeyPath

Legend

  • Property
  • Method
  • Property
  • Method
  • Inherited constructor
  • Inherited method
  • Protected property
  • Protected method
  • Static method

Settings

Theme

Generated using TypeDoc

\ No newline at end of file diff --git a/jest.config.js b/jest.config.js index d3988358..b1344b5d 100644 --- a/jest.config.js +++ b/jest.config.js @@ -2,10 +2,9 @@ const path = require('path'); const { pathsToModuleNameMapper } = require('ts-jest'); const { compilerOptions } = require('./tsconfig'); -const moduleNameMapper = pathsToModuleNameMapper( - compilerOptions.paths, - { prefix: "/src/" } -); +const moduleNameMapper = pathsToModuleNameMapper(compilerOptions.paths, { + prefix: '/src/', +}); // Global variables that are shared across the jest worker pool // These variables must be static and serializable @@ -26,41 +25,34 @@ const globals = { // Use `process.env` to set variables module.exports = { - testEnvironment: "node", - cacheDirectory: '/tmp/jest', + testEnvironment: 'node', verbose: true, - roots: [ - "/tests" - ], - testMatch: [ - "**/?(*.)+(spec|test|unit.test).+(ts|tsx|js)" - ], + collectCoverage: false, + cacheDirectory: '/tmp/jest', + coverageDirectory: '/tmp/coverage', + roots: ['/tests'], + testMatch: ['**/?(*.)+(spec|test|unit.test).+(ts|tsx|js|jsx)'], transform: { - "^.+\\.tsx?$": "ts-jest", - "^.+\\.jsx?$": "babel-jest" + '^.+\\.tsx?$': 'ts-jest', + '^.+\\.jsx?$': 'babel-jest', }, reporters: [ - "default", - [ - "jest-junit", - { outputDirectory: "./tmp" } - ] + 'default', + ['jest-junit', { outputDirectory: '/tmp/junit' }], ], + collectCoverageFrom: ['src/**/*.{ts,tsx,js,jsx}', '!src/**/*.d.ts'], + coverageReporters: ['text', 'cobertura'], globals, // Global setup script executed once before all test files - globalSetup: "/tests/globalSetup.ts", + globalSetup: '/tests/globalSetup.ts', // Global teardown script executed once after all test files - globalTeardown: "/tests/globalTeardown.ts", + globalTeardown: '/tests/globalTeardown.ts', // Setup files are executed before each test file // Can access globals - setupFiles: [ - "/tests/setup.ts" - ], + setupFiles: ['/tests/setup.ts'], // Setup files after env are executed before each test file // after the jest test environment is installed // Can access globals - setupFilesAfterEnv: [ - "/tests/setupAfterEnv.ts" - ], - moduleNameMapper: moduleNameMapper + setupFilesAfterEnv: ['/tests/setupAfterEnv.ts'], + moduleNameMapper: moduleNameMapper, }; diff --git a/package-lock.json b/package-lock.json index 652ce990..91df1dcd 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7,20 +7,20 @@ "": { "name": "@matrixai/db", "version": "4.0.5", + "hasInstallScript": true, "license": "Apache-2.0", "dependencies": { - "@matrixai/async-init": "^1.7.3", - "@matrixai/errors": "^1.1.1", - "@matrixai/logger": "^2.1.1", + "@matrixai/async-init": "^1.8.1", + "@matrixai/async-locks": "^3.0.0", + "@matrixai/errors": "^1.1.2", + "@matrixai/logger": "^2.3.0", "@matrixai/resources": "^1.1.3", "@matrixai/workers": "^1.3.3", - "@types/abstract-leveldown": "^7.2.0", - "level": "7.0.1", + "node-gyp-build": "4.4.0", "threads": "^1.6.5" }, "devDependencies": { "@types/jest": "^27.0.2", - "@types/level": "^6.0.0", "@types/node": "^16.11.7", "@types/node-forge": "^0.10.4", "@typescript-eslint/eslint-plugin": "^5.23.0", @@ -33,7 +33,10 @@ "jest": "^27.2.5", "jest-junit": "^13.2.0", "lexicographic-integer": "^1.1.0", + "napi-macros": "^2.0.0", "node-forge": "^1.3.1", + "node-gyp": "9.0.0", + "prebuildify": "^5.0.0", "prettier": "^2.6.2", "rimraf": "^3.0.2", "systeminformation": "^5.8.9", @@ -973,6 +976,12 @@ "js-yaml": "bin/js-yaml.js" } }, + "node_modules/@gar/promisify": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@gar/promisify/-/promisify-1.1.3.tgz", + "integrity": "sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==", + "dev": true + }, "node_modules/@humanwhocodes/config-array": { "version": "0.9.5", "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.9.5.tgz", @@ -1321,18 +1330,28 @@ } }, "node_modules/@matrixai/async-init": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@matrixai/async-init/-/async-init-1.7.3.tgz", - "integrity": "sha512-Sf3q5ODhVJqrYiAdGXmwj606956lgEMKGM9LMFU5scIOh13WokHo3GthjB1yh/umCV75NYvHJn60R9gnudVZ3Q==", + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/@matrixai/async-init/-/async-init-1.8.1.tgz", + "integrity": "sha512-ZAS1yd/PC+r3NwvT9fEz3OtAm68A8mKXXGdZRcYQF1ajl43jsV8/B4aDwr2oLFlV+RYZgWl7UwjZj4rtoZSycQ==", "dependencies": { - "@matrixai/async-locks": "^2.2.4", + "@matrixai/async-locks": "^2.3.1", "@matrixai/errors": "^1.1.1" } }, + "node_modules/@matrixai/async-init/node_modules/@matrixai/async-locks": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-2.3.1.tgz", + "integrity": "sha512-STz8VyiIXleaa72zMsq01x/ZO1gPzukUgMe25+uqMWn/nPrC9EtJOR7e3CW0DODfYDZ0748z196GeOjS3jh+4g==", + "dependencies": { + "@matrixai/errors": "^1.1.1", + "@matrixai/resources": "^1.1.3", + "async-mutex": "^0.3.2" + } + }, "node_modules/@matrixai/async-locks": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-2.2.4.tgz", - "integrity": "sha512-AEGQMM7zw8Mkcc0hbNpOCNKa6DW+04rVIwyZgUnPWawPqwUt5HSGaQwdXI3dXO+35G/vjJppggv+JJZsGfEjvA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-3.0.0.tgz", + "integrity": "sha512-m4gjr28fMejlgEwXntTMo427ExzgPV2LDxQ9i9sGcrxhvepfYpxJlRsTpTh0s1AkWcfNpz5fPbkgzw/CQzotuw==", "dependencies": { "@matrixai/errors": "^1.1.1", "@matrixai/resources": "^1.1.3", @@ -1348,9 +1367,9 @@ } }, "node_modules/@matrixai/logger": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@matrixai/logger/-/logger-2.1.1.tgz", - "integrity": "sha512-79KM0PyJTpfkALf9DK2xGniU+9gngsb5O8hcdUviWz+zR2W0hnTQq/g7tJW0YnIEhmDe/GkJf0Bnbs+gWfj3BA==" + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@matrixai/logger/-/logger-2.3.0.tgz", + "integrity": "sha512-DbsUv9eBubB2WxA8aGygnY/A2Ggm9a+ZnnnL2hIWWnE+sid92FK96gubW1a+u8OrXWx559HqUTBkcPDs83zV/A==" }, "node_modules/@matrixai/resources": { "version": "1.1.3", @@ -1403,6 +1422,32 @@ "node": ">= 8" } }, + "node_modules/@npmcli/fs": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-2.1.0.tgz", + "integrity": "sha512-DmfBvNXGaetMxj9LTp8NAN9vEidXURrf5ZTslQzEAi/6GbW+4yjaLFQc6Tue5cpZ9Frlk4OBo/Snf1Bh/S7qTQ==", + "dev": true, + "dependencies": { + "@gar/promisify": "^1.1.3", + "semver": "^7.3.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@npmcli/move-file": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/move-file/-/move-file-2.0.0.tgz", + "integrity": "sha512-UR6D5f4KEGWJV6BGPH3Qb2EtgH+t+1XQ1Tt85c7qicN6cezzuHPdZwwAxqZr4JLtnQu0LZsTza/5gmNmSl8XLg==", + "dev": true, + "dependencies": { + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, "node_modules/@sinonjs/commons": { "version": "1.8.3", "dev": true, @@ -1447,10 +1492,6 @@ "dev": true, "license": "MIT" }, - "node_modules/@types/abstract-leveldown": { - "version": "7.2.0", - "license": "MIT" - }, "node_modules/@types/babel__core": { "version": "7.1.19", "dev": true, @@ -1488,15 +1529,6 @@ "@babel/types": "^7.3.0" } }, - "node_modules/@types/encoding-down": { - "version": "5.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/abstract-leveldown": "*", - "@types/level-codec": "*" - } - }, "node_modules/@types/graceful-fs": { "version": "4.1.5", "dev": true, @@ -1547,36 +1579,6 @@ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", "dev": true }, - "node_modules/@types/level": { - "version": "6.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/abstract-leveldown": "*", - "@types/encoding-down": "*", - "@types/levelup": "*" - } - }, - "node_modules/@types/level-codec": { - "version": "9.0.1", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/level-errors": { - "version": "3.0.0", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/levelup": { - "version": "5.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/abstract-leveldown": "*", - "@types/level-errors": "*", - "@types/node": "*" - } - }, "node_modules/@types/node": { "version": "16.11.36", "resolved": "https://registry.npmjs.org/@types/node/-/node-16.11.36.tgz", @@ -1803,6 +1805,12 @@ "dev": true, "license": "BSD-3-Clause" }, + "node_modules/abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "dev": true + }, "node_modules/acorn": { "version": "7.4.1", "dev": true, @@ -1851,6 +1859,33 @@ "node": ">= 6.0.0" } }, + "node_modules/agentkeepalive": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.2.1.tgz", + "integrity": "sha512-Zn4cw2NEqd+9fiSVWMscnjyQ1a8Yfoc5oBajLeo5w+YBHgDUcEBY2hS4YpTz6iN5f/2zQiktcuM6tS8x1p9dpA==", + "dev": true, + "dependencies": { + "debug": "^4.1.0", + "depd": "^1.1.2", + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 8.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dev": true, + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -1927,6 +1962,25 @@ "node": ">= 8" } }, + "node_modules/aproba": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.0.0.tgz", + "integrity": "sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==", + "dev": true + }, + "node_modules/are-we-there-yet": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-3.0.0.tgz", + "integrity": "sha512-0GWpv50YSOcLXaN6/FAKY3vfRbllXWV2xvfA/oKJF8pzFhWXPV+yjhJXDBbjscDYowv7Yw1A3uigpzn5iEGTyw==", + "dev": true, + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16" + } + }, "node_modules/arg": { "version": "4.1.3", "dev": true, @@ -1993,14 +2047,16 @@ }, "node_modules/async-mutex": { "version": "0.3.2", - "license": "MIT", + "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.2.tgz", + "integrity": "sha512-HuTK7E7MT7jZEh1P9GtRW9+aTWiDWWi9InbZ5hjxrnRa39KS4BW04+xLBhYNS2aXhHUIKZSw3gj4Pn1pj+qGAA==", "dependencies": { "tslib": "^2.3.1" } }, "node_modules/async-mutex/node_modules/tslib": { "version": "2.4.0", - "license": "0BSD" + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz", + "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" }, "node_modules/asynckit": { "version": "0.4.0", @@ -2114,6 +2170,7 @@ }, "node_modules/base64-js": { "version": "1.5.1", + "dev": true, "funding": [ { "type": "github", @@ -2164,6 +2221,41 @@ "node": ">=6" } }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/bl/node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, "node_modules/brace-expansion": { "version": "1.1.11", "dev": true, @@ -2236,33 +2328,89 @@ "node-int64": "^0.4.0" } }, - "node_modules/buffer": { - "version": "6.0.3", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, "node_modules/buffer-from": { "version": "1.1.2", "dev": true, "license": "MIT" }, + "node_modules/cacache": { + "version": "16.1.0", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.1.0.tgz", + "integrity": "sha512-Pk4aQkwCW82A4jGKFvcGkQFqZcMspfP9YWq9Pr87/ldDvlWf718zeI6KWCdKt/jeihu6BytHRUicJPB1K2k8EQ==", + "dev": true, + "dependencies": { + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", + "infer-owner": "^1.0.4", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", + "unique-filename": "^1.1.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/cacache/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/cacache/node_modules/glob": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-8.0.3.tgz", + "integrity": "sha512-ull455NHSHI/Y1FqGaaYFaLGkNMMJbavMrEGFXG/PGrg6y7sutWHUHrz6gy6WEBH6akM1M414dWKCNs+IhKdiQ==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/cacache/node_modules/lru-cache": { + "version": "7.10.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.10.1.tgz", + "integrity": "sha512-BQuhQxPuRl79J5zSXRP+uNzPOyZw2oFI9JLRQ80XswSvg21KMKNtQza9eF42rfI/3Z40RvzBdXgziEkudzjo8A==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/cacache/node_modules/minimatch": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.0.tgz", + "integrity": "sha512-9TPBGGak4nHfGZsPBohm9AWg6NoT7QTCehS3BIJABslyZbzxfV78QM2Y6+i741OPZIafFAaiiEMh5OyIrJPgtg==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/call-bind": { "version": "1.0.2", "dev": true, @@ -2305,13 +2453,6 @@ ], "license": "CC-BY-4.0" }, - "node_modules/catering": { - "version": "2.0.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/chalk": { "version": "4.1.2", "dev": true, @@ -2335,6 +2476,15 @@ "node": ">=10" } }, + "node_modules/chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "dev": true, + "engines": { + "node": ">=10" + } + }, "node_modules/ci-info": { "version": "3.3.0", "dev": true, @@ -2345,6 +2495,15 @@ "dev": true, "license": "MIT" }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/cli-cursor": { "version": "3.1.0", "dev": true, @@ -2412,6 +2571,15 @@ "dev": true, "license": "MIT" }, + "node_modules/color-support": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", + "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "dev": true, + "bin": { + "color-support": "bin.js" + } + }, "node_modules/combined-stream": { "version": "1.0.8", "dev": true, @@ -2436,6 +2604,12 @@ "dev": true, "license": "MIT" }, + "node_modules/console-control-strings": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", + "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=", + "dev": true + }, "node_modules/convert-source-map": { "version": "1.8.0", "dev": true, @@ -2559,6 +2733,21 @@ "node": ">=0.4.0" } }, + "node_modules/delegates": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", + "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=", + "dev": true + }, + "node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", + "dev": true, + "engines": { + "node": ">= 0.6" + } + }, "node_modules/detect-newline": { "version": "3.1.0", "dev": true, @@ -2646,6 +2835,53 @@ "dev": true, "license": "MIT" }, + "node_modules/encoding": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", + "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", + "dev": true, + "optional": true, + "dependencies": { + "iconv-lite": "^0.6.2" + } + }, + "node_modules/encoding/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "optional": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dev": true, + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/err-code": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", + "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", + "dev": true + }, "node_modules/error-ex": { "version": "1.3.2", "dev": true, @@ -3253,6 +3489,15 @@ "url": "https://github.com/sindresorhus/execa?sponsor=1" } }, + "node_modules/execspawn": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/execspawn/-/execspawn-1.0.1.tgz", + "integrity": "sha1-gob53efOzeeQX73ATiTzaPI/jaY=", + "dev": true, + "dependencies": { + "util-extend": "^1.0.1" + } + }, "node_modules/exit": { "version": "0.1.2", "dev": true, @@ -3403,6 +3648,12 @@ "node": ">= 6" } }, + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", + "dev": true + }, "node_modules/fs-extra": { "version": "9.1.0", "dev": true, @@ -3425,6 +3676,18 @@ "node": ">= 10.0.0" } }, + "node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dev": true, + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/fs.realpath": { "version": "1.0.0", "dev": true, @@ -3440,6 +3703,25 @@ "dev": true, "license": "MIT" }, + "node_modules/gauge": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/gauge/-/gauge-4.0.4.tgz", + "integrity": "sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==", + "dev": true, + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.3", + "console-control-strings": "^1.1.0", + "has-unicode": "^2.0.1", + "signal-exit": "^3.0.7", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, "node_modules/gensync": { "version": "1.0.0-beta.2", "dev": true, @@ -3626,6 +3908,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/has-unicode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", + "integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=", + "dev": true + }, "node_modules/html-encoding-sniffer": { "version": "2.0.1", "dev": true, @@ -3642,6 +3930,12 @@ "dev": true, "license": "MIT" }, + "node_modules/http-cache-semantics": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", + "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==", + "dev": true + }, "node_modules/http-proxy-agent": { "version": "4.0.1", "dev": true, @@ -3675,6 +3969,15 @@ "node": ">=10.17.0" } }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0=", + "dev": true, + "dependencies": { + "ms": "^2.0.0" + } + }, "node_modules/iconv-lite": { "version": "0.4.24", "dev": true, @@ -3688,6 +3991,7 @@ }, "node_modules/ieee754": { "version": "1.2.1", + "dev": true, "funding": [ { "type": "github", @@ -3754,6 +4058,21 @@ "node": ">=0.8.19" } }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/infer-owner": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", + "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", + "dev": true + }, "node_modules/inflight": { "version": "1.0.6", "dev": true, @@ -3765,6 +4084,7 @@ }, "node_modules/inherits": { "version": "2.0.4", + "dev": true, "license": "ISC" }, "node_modules/internal-slot": { @@ -3780,6 +4100,12 @@ "node": ">= 0.4" } }, + "node_modules/ip": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.8.tgz", + "integrity": "sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==", + "dev": true + }, "node_modules/is-arrayish": { "version": "0.2.1", "dev": true, @@ -3811,27 +4137,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-buffer": { - "version": "2.0.5", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "engines": { - "node": ">=4" - } - }, "node_modules/is-callable": { "version": "1.2.4", "dev": true, @@ -3905,6 +4210,12 @@ "node": ">=0.10.0" } }, + "node_modules/is-lambda": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-lambda/-/is-lambda-1.0.1.tgz", + "integrity": "sha1-PZh3iZ5qU+/AFgUEzeFfgubwYdU=", + "dev": true + }, "node_modules/is-negative-zero": { "version": "2.0.2", "dev": true, @@ -5019,197 +5330,6 @@ "node": ">=6" } }, - "node_modules/level": { - "version": "7.0.1", - "license": "MIT", - "dependencies": { - "level-js": "^6.1.0", - "level-packager": "^6.0.1", - "leveldown": "^6.1.0" - }, - "engines": { - "node": ">=10.12.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/level" - } - }, - "node_modules/level-concat-iterator": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/level-js": { - "version": "6.1.0", - "license": "MIT", - "dependencies": { - "abstract-leveldown": "^7.2.0", - "buffer": "^6.0.3", - "inherits": "^2.0.3", - "ltgt": "^2.1.2", - "run-parallel-limit": "^1.1.0" - } - }, - "node_modules/level-js/node_modules/abstract-leveldown": { - "version": "7.2.0", - "license": "MIT", - "dependencies": { - "buffer": "^6.0.3", - "catering": "^2.0.0", - "is-buffer": "^2.0.5", - "level-concat-iterator": "^3.0.0", - "level-supports": "^2.0.1", - "queue-microtask": "^1.2.3" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/level-js/node_modules/level-supports": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/level-packager": { - "version": "6.0.1", - "license": "MIT", - "dependencies": { - "encoding-down": "^7.1.0", - "levelup": "^5.1.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/level-packager/node_modules/abstract-leveldown": { - "version": "7.2.0", - "license": "MIT", - "dependencies": { - "buffer": "^6.0.3", - "catering": "^2.0.0", - "is-buffer": "^2.0.5", - "level-concat-iterator": "^3.0.0", - "level-supports": "^2.0.1", - "queue-microtask": "^1.2.3" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/level-packager/node_modules/deferred-leveldown": { - "version": "7.0.0", - "license": "MIT", - "dependencies": { - "abstract-leveldown": "^7.2.0", - "inherits": "^2.0.3" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/level-packager/node_modules/encoding-down": { - "version": "7.1.0", - "license": "MIT", - "dependencies": { - "abstract-leveldown": "^7.2.0", - "inherits": "^2.0.3", - "level-codec": "^10.0.0", - "level-errors": "^3.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/level-packager/node_modules/level-codec": { - "version": "10.0.0", - "license": "MIT", - "dependencies": { - "buffer": "^6.0.3" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/level-packager/node_modules/level-errors": { - "version": "3.0.1", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/level-packager/node_modules/level-iterator-stream": { - "version": "5.0.0", - "license": "MIT", - "dependencies": { - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/level-packager/node_modules/level-supports": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, - "node_modules/level-packager/node_modules/levelup": { - "version": "5.1.1", - "license": "MIT", - "dependencies": { - "catering": "^2.0.0", - "deferred-leveldown": "^7.0.0", - "level-errors": "^3.0.1", - "level-iterator-stream": "^5.0.0", - "level-supports": "^2.0.1", - "queue-microtask": "^1.2.3" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/leveldown": { - "version": "6.1.0", - "hasInstallScript": true, - "license": "MIT", - "dependencies": { - "abstract-leveldown": "^7.2.0", - "napi-macros": "~2.0.0", - "node-gyp-build": "^4.3.0" - }, - "engines": { - "node": ">=10.12.0" - } - }, - "node_modules/leveldown/node_modules/abstract-leveldown": { - "version": "7.2.0", - "license": "MIT", - "dependencies": { - "buffer": "^6.0.3", - "catering": "^2.0.0", - "is-buffer": "^2.0.5", - "level-concat-iterator": "^3.0.0", - "level-supports": "^2.0.1", - "queue-microtask": "^1.2.3" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/leveldown/node_modules/level-supports": { - "version": "2.1.0", - "license": "MIT", - "engines": { - "node": ">=10" - } - }, "node_modules/leven": { "version": "3.1.0", "dev": true, @@ -5299,10 +5419,6 @@ "node": ">=10" } }, - "node_modules/ltgt": { - "version": "2.2.1", - "license": "MIT" - }, "node_modules/lunr": { "version": "2.3.9", "dev": true, @@ -5335,6 +5451,65 @@ "dev": true, "license": "ISC" }, + "node_modules/make-fetch-happen": { + "version": "10.1.5", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.1.5.tgz", + "integrity": "sha512-mucOj2H0Jn/ax7H9K9T1bf0p1nn/mBFa551Os7ed9xRfLEx20aZhZeLslmRYfAaAqXZUGipcs+m5KOKvOH0XKA==", + "dev": true, + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^2.0.3", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^6.1.1", + "ssri": "^9.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/make-fetch-happen/node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "engines": { + "node": ">= 10" + } + }, + "node_modules/make-fetch-happen/node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/make-fetch-happen/node_modules/lru-cache": { + "version": "7.10.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.10.1.tgz", + "integrity": "sha512-BQuhQxPuRl79J5zSXRP+uNzPOyZw2oFI9JLRQ80XswSvg21KMKNtQza9eF42rfI/3Z40RvzBdXgziEkudzjo8A==", + "dev": true, + "engines": { + "node": ">=12" + } + }, "node_modules/makeerror": { "version": "1.0.12", "dev": true, @@ -5425,6 +5600,96 @@ "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", "dev": true }, + "node_modules/minipass": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.6.tgz", + "integrity": "sha512-rty5kpw9/z8SX9dmxblFA6edItUmwJgMeYDZRrwlIVN27i8gysGbznJwUggw2V/FVqFSDdWy040ZPS811DYAqQ==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-collect": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", + "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", + "dev": true, + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minipass-fetch": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.1.0.tgz", + "integrity": "sha512-H9U4UVBGXEyyWJnqYDCLp1PwD8XIkJ4akNHp1aGVI+2Ym7wQMlxDKi4IB4JbmyU+pl9pEs/cVrK6cOuvmbK4Sg==", + "dev": true, + "dependencies": { + "minipass": "^3.1.6", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/minipass-flush": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", + "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", + "dev": true, + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minipass-pipeline": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", + "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", + "dev": true, + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-sized": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", + "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", + "dev": true, + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dev": true, + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/mkdirp": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", @@ -5437,19 +5702,48 @@ "node": ">=10" } }, + "node_modules/mkdirp-classic": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", + "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "dev": true + }, "node_modules/ms": { "version": "2.1.2", "license": "MIT" }, "node_modules/napi-macros": { "version": "2.0.0", - "license": "MIT" + "resolved": "https://registry.npmjs.org/napi-macros/-/napi-macros-2.0.0.tgz", + "integrity": "sha512-A0xLykHtARfueITVDernsAWdtIMbOJgKgcluwENp3AlsKN/PloyO10HtmoqnFAQAcxPkgZN7wdfPfEd0zNGxbg==", + "dev": true }, "node_modules/natural-compare": { "version": "1.4.0", "dev": true, "license": "MIT" }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "dev": true, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/node-abi": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.22.0.tgz", + "integrity": "sha512-u4uAs/4Zzmp/jjsD9cyFYDXeISfUWaAVWshPmDZOFOv4Xl4SbzTXm53I04C2uRueYJ+0t5PEtLH/owbn2Npf/w==", + "dev": true, + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/node-forge": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", @@ -5459,6 +5753,30 @@ "node": ">= 6.13.0" } }, + "node_modules/node-gyp": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-9.0.0.tgz", + "integrity": "sha512-Ma6p4s+XCTPxCuAMrOA/IJRmVy16R8Sdhtwl4PrCr7IBlj4cPawF0vg/l7nOT1jPbuNS7lIRJpBSvVsXwEZuzw==", + "dev": true, + "dependencies": { + "env-paths": "^2.2.0", + "glob": "^7.1.4", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^10.0.3", + "nopt": "^5.0.0", + "npmlog": "^6.0.0", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.2", + "which": "^2.0.2" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": "^12.22 || ^14.13 || >=16" + } + }, "node_modules/node-gyp-build": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.4.0.tgz", @@ -5479,6 +5797,21 @@ "dev": true, "license": "MIT" }, + "node_modules/nopt": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", + "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "dev": true, + "dependencies": { + "abbrev": "1" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/normalize-path": { "version": "3.0.0", "dev": true, @@ -5498,6 +5831,21 @@ "node": ">=8" } }, + "node_modules/npmlog": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-6.0.2.tgz", + "integrity": "sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==", + "dev": true, + "dependencies": { + "are-we-there-yet": "^3.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^4.0.3", + "set-blocking": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, "node_modules/nwsapi": { "version": "2.2.0", "dev": true, @@ -5619,6 +5967,21 @@ "node": ">=8" } }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dev": true, + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/p-try": { "version": "2.2.0", "dev": true, @@ -5739,6 +6102,36 @@ "dev": true, "license": "MIT" }, + "node_modules/prebuildify": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/prebuildify/-/prebuildify-5.0.0.tgz", + "integrity": "sha512-XhuFIeZx8Tk8e8yn3h5e+CE572pecpdKPrVubUIW0HctP3fpzh4eSWoHR1eOoQNTtxBUt1ixPLHPLbOTYi6STw==", + "dev": true, + "dependencies": { + "execspawn": "^1.0.1", + "minimist": "^1.2.5", + "mkdirp-classic": "^0.5.3", + "node-abi": "^3.3.0", + "npm-run-path": "^3.1.0", + "pump": "^3.0.0", + "tar-fs": "^2.1.0" + }, + "bin": { + "prebuildify": "bin.js" + } + }, + "node_modules/prebuildify/node_modules/npm-run-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-3.1.0.tgz", + "integrity": "sha512-Dbl4A/VfiVGLgQv29URL9xshU8XDY1GeLy+fsaZ1AA8JDSfjvr5P5+pzRbWqRSBxk6/DW7MIh8lTM/PaGnP2kg==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/prelude-ls": { "version": "1.2.1", "dev": true, @@ -5797,6 +6190,25 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/promise-inflight": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", + "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=", + "dev": true + }, + "node_modules/promise-retry": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", + "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", + "dev": true, + "dependencies": { + "err-code": "^2.0.2", + "retry": "^0.12.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/prompts": { "version": "2.4.2", "dev": true, @@ -5814,6 +6226,16 @@ "dev": true, "license": "MIT" }, + "node_modules/pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dev": true, + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, "node_modules/punycode": { "version": "2.1.1", "dev": true, @@ -5824,6 +6246,7 @@ }, "node_modules/queue-microtask": { "version": "1.2.3", + "dev": true, "funding": [ { "type": "github", @@ -5847,6 +6270,7 @@ }, "node_modules/readable-stream": { "version": "3.6.0", + "dev": true, "license": "MIT", "dependencies": { "inherits": "^2.0.3", @@ -5941,6 +6365,15 @@ "node": ">=8" } }, + "node_modules/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=", + "dev": true, + "engines": { + "node": ">= 4" + } + }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -5988,29 +6421,9 @@ "queue-microtask": "^1.2.2" } }, - "node_modules/run-parallel-limit": { - "version": "1.1.0", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, "node_modules/safe-buffer": { "version": "5.2.1", + "dev": true, "funding": [ { "type": "github", @@ -6058,6 +6471,12 @@ "node": ">=10" } }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, "node_modules/shebang-command": { "version": "2.0.0", "dev": true, @@ -6135,6 +6554,44 @@ "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "dev": true, + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.6.2.tgz", + "integrity": "sha512-zDZhHhZRY9PxRruRMR7kMhnf3I8hDs4S3f9RecfnGxvcBHQcKcIH/oUcEWffsfl1XxdYlA7nnlGbbTvPz9D8gA==", + "dev": true, + "dependencies": { + "ip": "^1.1.5", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.13.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-6.2.0.tgz", + "integrity": "sha512-wWqJhjb32Q6GsrUqzuFkukxb/zzide5quXYcMVpIjxalDBBYy2nqKCFQ/9+Ie4dvOYSQdOk3hUlZSdzZOd3zMQ==", + "dev": true, + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, "node_modules/source-map": { "version": "0.6.1", "dev": true, @@ -6157,6 +6614,18 @@ "dev": true, "license": "BSD-3-Clause" }, + "node_modules/ssri": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-9.0.1.tgz", + "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==", + "dev": true, + "dependencies": { + "minipass": "^3.1.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, "node_modules/stack-utils": { "version": "2.0.5", "dev": true, @@ -6183,6 +6652,7 @@ }, "node_modules/string_decoder": { "version": "1.3.0", + "dev": true, "license": "MIT", "dependencies": { "safe-buffer": "~5.2.0" @@ -6341,6 +6811,57 @@ "url": "https://www.buymeacoffee.com/systeminfo" } }, + "node_modules/tar": { + "version": "6.1.11", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.11.tgz", + "integrity": "sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA==", + "dev": true, + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^3.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/tar-fs": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", + "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", + "dev": true, + "dependencies": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", + "pump": "^3.0.0", + "tar-stream": "^2.1.4" + } + }, + "node_modules/tar-fs/node_modules/chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "dev": true + }, + "node_modules/tar-stream": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", + "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", + "dev": true, + "dependencies": { + "bl": "^4.0.3", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/terminal-link": { "version": "2.1.1", "dev": true, @@ -6745,6 +7266,24 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/unique-filename": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", + "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", + "dev": true, + "dependencies": { + "unique-slug": "^2.0.0" + } + }, + "node_modules/unique-slug": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", + "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", + "dev": true, + "dependencies": { + "imurmurhash": "^0.1.4" + } + }, "node_modules/universalify": { "version": "0.1.2", "dev": true, @@ -6764,8 +7303,15 @@ }, "node_modules/util-deprecate": { "version": "1.0.2", + "dev": true, "license": "MIT" }, + "node_modules/util-extend": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/util-extend/-/util-extend-1.0.3.tgz", + "integrity": "sha1-p8IW0mdUUWljeztu3GypEZ4v+T8=", + "dev": true + }, "node_modules/uuid": { "version": "8.3.2", "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", @@ -6901,6 +7447,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/wide-align": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", + "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "dev": true, + "dependencies": { + "string-width": "^1.0.2 || 2 || 3 || 4" + } + }, "node_modules/word-wrap": { "version": "1.2.3", "dev": true, @@ -7643,6 +8198,12 @@ } } }, + "@gar/promisify": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@gar/promisify/-/promisify-1.1.3.tgz", + "integrity": "sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==", + "dev": true + }, "@humanwhocodes/config-array": { "version": "0.9.5", "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.9.5.tgz", @@ -7909,18 +8470,30 @@ } }, "@matrixai/async-init": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/@matrixai/async-init/-/async-init-1.7.3.tgz", - "integrity": "sha512-Sf3q5ODhVJqrYiAdGXmwj606956lgEMKGM9LMFU5scIOh13WokHo3GthjB1yh/umCV75NYvHJn60R9gnudVZ3Q==", + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/@matrixai/async-init/-/async-init-1.8.1.tgz", + "integrity": "sha512-ZAS1yd/PC+r3NwvT9fEz3OtAm68A8mKXXGdZRcYQF1ajl43jsV8/B4aDwr2oLFlV+RYZgWl7UwjZj4rtoZSycQ==", "requires": { - "@matrixai/async-locks": "^2.2.4", + "@matrixai/async-locks": "^2.3.1", "@matrixai/errors": "^1.1.1" + }, + "dependencies": { + "@matrixai/async-locks": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-2.3.1.tgz", + "integrity": "sha512-STz8VyiIXleaa72zMsq01x/ZO1gPzukUgMe25+uqMWn/nPrC9EtJOR7e3CW0DODfYDZ0748z196GeOjS3jh+4g==", + "requires": { + "@matrixai/errors": "^1.1.1", + "@matrixai/resources": "^1.1.3", + "async-mutex": "^0.3.2" + } + } } }, "@matrixai/async-locks": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-2.2.4.tgz", - "integrity": "sha512-AEGQMM7zw8Mkcc0hbNpOCNKa6DW+04rVIwyZgUnPWawPqwUt5HSGaQwdXI3dXO+35G/vjJppggv+JJZsGfEjvA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-3.0.0.tgz", + "integrity": "sha512-m4gjr28fMejlgEwXntTMo427ExzgPV2LDxQ9i9sGcrxhvepfYpxJlRsTpTh0s1AkWcfNpz5fPbkgzw/CQzotuw==", "requires": { "@matrixai/errors": "^1.1.1", "@matrixai/resources": "^1.1.3", @@ -7936,9 +8509,9 @@ } }, "@matrixai/logger": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@matrixai/logger/-/logger-2.1.1.tgz", - "integrity": "sha512-79KM0PyJTpfkALf9DK2xGniU+9gngsb5O8hcdUviWz+zR2W0hnTQq/g7tJW0YnIEhmDe/GkJf0Bnbs+gWfj3BA==" + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@matrixai/logger/-/logger-2.3.0.tgz", + "integrity": "sha512-DbsUv9eBubB2WxA8aGygnY/A2Ggm9a+ZnnnL2hIWWnE+sid92FK96gubW1a+u8OrXWx559HqUTBkcPDs83zV/A==" }, "@matrixai/resources": { "version": "1.1.3", @@ -7982,6 +8555,26 @@ "fastq": "^1.6.0" } }, + "@npmcli/fs": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-2.1.0.tgz", + "integrity": "sha512-DmfBvNXGaetMxj9LTp8NAN9vEidXURrf5ZTslQzEAi/6GbW+4yjaLFQc6Tue5cpZ9Frlk4OBo/Snf1Bh/S7qTQ==", + "dev": true, + "requires": { + "@gar/promisify": "^1.1.3", + "semver": "^7.3.5" + } + }, + "@npmcli/move-file": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@npmcli/move-file/-/move-file-2.0.0.tgz", + "integrity": "sha512-UR6D5f4KEGWJV6BGPH3Qb2EtgH+t+1XQ1Tt85c7qicN6cezzuHPdZwwAxqZr4JLtnQu0LZsTza/5gmNmSl8XLg==", + "dev": true, + "requires": { + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" + } + }, "@sinonjs/commons": { "version": "1.8.3", "dev": true, @@ -8016,9 +8609,6 @@ "version": "1.0.2", "dev": true }, - "@types/abstract-leveldown": { - "version": "7.2.0" - }, "@types/babel__core": { "version": "7.1.19", "dev": true, @@ -8052,14 +8642,6 @@ "@babel/types": "^7.3.0" } }, - "@types/encoding-down": { - "version": "5.0.0", - "dev": true, - "requires": { - "@types/abstract-leveldown": "*", - "@types/level-codec": "*" - } - }, "@types/graceful-fs": { "version": "4.1.5", "dev": true, @@ -8105,32 +8687,6 @@ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", "dev": true }, - "@types/level": { - "version": "6.0.0", - "dev": true, - "requires": { - "@types/abstract-leveldown": "*", - "@types/encoding-down": "*", - "@types/levelup": "*" - } - }, - "@types/level-codec": { - "version": "9.0.1", - "dev": true - }, - "@types/level-errors": { - "version": "3.0.0", - "dev": true - }, - "@types/levelup": { - "version": "5.1.0", - "dev": true, - "requires": { - "@types/abstract-leveldown": "*", - "@types/level-errors": "*", - "@types/node": "*" - } - }, "@types/node": { "version": "16.11.36", "resolved": "https://registry.npmjs.org/@types/node/-/node-16.11.36.tgz", @@ -8262,6 +8818,12 @@ "version": "2.0.6", "dev": true }, + "abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "dev": true + }, "acorn": { "version": "7.4.1", "dev": true @@ -8292,6 +8854,27 @@ "debug": "4" } }, + "agentkeepalive": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.2.1.tgz", + "integrity": "sha512-Zn4cw2NEqd+9fiSVWMscnjyQ1a8Yfoc5oBajLeo5w+YBHgDUcEBY2hS4YpTz6iN5f/2zQiktcuM6tS8x1p9dpA==", + "dev": true, + "requires": { + "debug": "^4.1.0", + "depd": "^1.1.2", + "humanize-ms": "^1.2.1" + } + }, + "aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dev": true, + "requires": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + } + }, "ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -8338,6 +8921,22 @@ "picomatch": "^2.0.4" } }, + "aproba": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.0.0.tgz", + "integrity": "sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==", + "dev": true + }, + "are-we-there-yet": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-3.0.0.tgz", + "integrity": "sha512-0GWpv50YSOcLXaN6/FAKY3vfRbllXWV2xvfA/oKJF8pzFhWXPV+yjhJXDBbjscDYowv7Yw1A3uigpzn5iEGTyw==", + "dev": true, + "requires": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + } + }, "arg": { "version": "4.1.3", "dev": true @@ -8381,12 +8980,16 @@ }, "async-mutex": { "version": "0.3.2", + "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.2.tgz", + "integrity": "sha512-HuTK7E7MT7jZEh1P9GtRW9+aTWiDWWi9InbZ5hjxrnRa39KS4BW04+xLBhYNS2aXhHUIKZSw3gj4Pn1pj+qGAA==", "requires": { "tslib": "^2.3.1" }, "dependencies": { "tslib": { - "version": "2.4.0" + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz", + "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==" } } }, @@ -8470,7 +9073,8 @@ "dev": true }, "base64-js": { - "version": "1.5.1" + "version": "1.5.1", + "dev": true }, "benchmark": { "version": "2.1.4", @@ -8502,6 +9106,29 @@ } } }, + "bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "requires": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + }, + "dependencies": { + "buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "requires": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + } + } + }, "brace-expansion": { "version": "1.1.11", "dev": true, @@ -8546,17 +9173,75 @@ "node-int64": "^0.4.0" } }, - "buffer": { - "version": "6.0.3", - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, "buffer-from": { "version": "1.1.2", "dev": true }, + "cacache": { + "version": "16.1.0", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-16.1.0.tgz", + "integrity": "sha512-Pk4aQkwCW82A4jGKFvcGkQFqZcMspfP9YWq9Pr87/ldDvlWf718zeI6KWCdKt/jeihu6BytHRUicJPB1K2k8EQ==", + "dev": true, + "requires": { + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", + "infer-owner": "^1.0.4", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", + "unique-filename": "^1.1.1" + }, + "dependencies": { + "brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0" + } + }, + "glob": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-8.0.3.tgz", + "integrity": "sha512-ull455NHSHI/Y1FqGaaYFaLGkNMMJbavMrEGFXG/PGrg6y7sutWHUHrz6gy6WEBH6akM1M414dWKCNs+IhKdiQ==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + } + }, + "lru-cache": { + "version": "7.10.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.10.1.tgz", + "integrity": "sha512-BQuhQxPuRl79J5zSXRP+uNzPOyZw2oFI9JLRQ80XswSvg21KMKNtQza9eF42rfI/3Z40RvzBdXgziEkudzjo8A==", + "dev": true + }, + "minimatch": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.0.tgz", + "integrity": "sha512-9TPBGGak4nHfGZsPBohm9AWg6NoT7QTCehS3BIJABslyZbzxfV78QM2Y6+i741OPZIafFAaiiEMh5OyIrJPgtg==", + "dev": true, + "requires": { + "brace-expansion": "^2.0.1" + } + } + } + }, "call-bind": { "version": "1.0.2", "dev": true, @@ -8576,9 +9261,6 @@ "version": "1.0.30001335", "dev": true }, - "catering": { - "version": "2.0.0" - }, "chalk": { "version": "4.1.2", "dev": true, @@ -8591,6 +9273,12 @@ "version": "1.0.2", "dev": true }, + "chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "dev": true + }, "ci-info": { "version": "3.3.0", "dev": true @@ -8599,6 +9287,12 @@ "version": "1.2.2", "dev": true }, + "clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true + }, "cli-cursor": { "version": "3.1.0", "dev": true, @@ -8645,6 +9339,12 @@ "version": "1.1.4", "dev": true }, + "color-support": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", + "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "dev": true + }, "combined-stream": { "version": "1.0.8", "dev": true, @@ -8660,6 +9360,12 @@ "version": "0.0.1", "dev": true }, + "console-control-strings": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", + "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=", + "dev": true + }, "convert-source-map": { "version": "1.8.0", "dev": true, @@ -8747,6 +9453,18 @@ "version": "1.0.0", "dev": true }, + "delegates": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", + "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=", + "dev": true + }, + "depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", + "dev": true + }, "detect-newline": { "version": "3.1.0", "dev": true @@ -8800,6 +9518,49 @@ "version": "8.0.0", "dev": true }, + "encoding": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", + "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", + "dev": true, + "optional": true, + "requires": { + "iconv-lite": "^0.6.2" + }, + "dependencies": { + "iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "optional": true, + "requires": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + } + } + } + }, + "end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dev": true, + "requires": { + "once": "^1.4.0" + } + }, + "env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "dev": true + }, + "err-code": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", + "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", + "dev": true + }, "error-ex": { "version": "1.3.2", "dev": true, @@ -9233,6 +9994,15 @@ "strip-final-newline": "^2.0.0" } }, + "execspawn": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/execspawn/-/execspawn-1.0.1.tgz", + "integrity": "sha1-gob53efOzeeQX73ATiTzaPI/jaY=", + "dev": true, + "requires": { + "util-extend": "^1.0.1" + } + }, "exit": { "version": "0.1.2", "dev": true @@ -9346,6 +10116,12 @@ "mime-types": "^2.1.12" } }, + "fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", + "dev": true + }, "fs-extra": { "version": "9.1.0", "dev": true, @@ -9362,6 +10138,15 @@ } } }, + "fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dev": true, + "requires": { + "minipass": "^3.0.0" + } + }, "fs.realpath": { "version": "1.0.0", "dev": true @@ -9374,6 +10159,22 @@ "version": "1.0.1", "dev": true }, + "gauge": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/gauge/-/gauge-4.0.4.tgz", + "integrity": "sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==", + "dev": true, + "requires": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.3", + "console-control-strings": "^1.1.0", + "has-unicode": "^2.0.1", + "signal-exit": "^3.0.7", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.5" + } + }, "gensync": { "version": "1.0.0-beta.2", "dev": true @@ -9481,6 +10282,12 @@ "has-symbols": "^1.0.2" } }, + "has-unicode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", + "integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=", + "dev": true + }, "html-encoding-sniffer": { "version": "2.0.1", "dev": true, @@ -9492,6 +10299,12 @@ "version": "2.0.2", "dev": true }, + "http-cache-semantics": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", + "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==", + "dev": true + }, "http-proxy-agent": { "version": "4.0.1", "dev": true, @@ -9513,6 +10326,15 @@ "version": "2.1.0", "dev": true }, + "humanize-ms": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha1-xG4xWaKT9riW2ikxbYtv6Lt5u+0=", + "dev": true, + "requires": { + "ms": "^2.0.0" + } + }, "iconv-lite": { "version": "0.4.24", "dev": true, @@ -9521,7 +10343,8 @@ } }, "ieee754": { - "version": "1.2.1" + "version": "1.2.1", + "dev": true }, "ignore": { "version": "5.2.0", @@ -9549,6 +10372,18 @@ "version": "0.1.4", "dev": true }, + "indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true + }, + "infer-owner": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz", + "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", + "dev": true + }, "inflight": { "version": "1.0.6", "dev": true, @@ -9558,7 +10393,8 @@ } }, "inherits": { - "version": "2.0.4" + "version": "2.0.4", + "dev": true }, "internal-slot": { "version": "1.0.3", @@ -9569,6 +10405,12 @@ "side-channel": "^1.0.4" } }, + "ip": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.8.tgz", + "integrity": "sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==", + "dev": true + }, "is-arrayish": { "version": "0.2.1", "dev": true @@ -9588,9 +10430,6 @@ "has-tostringtag": "^1.0.0" } }, - "is-buffer": { - "version": "2.0.5" - }, "is-callable": { "version": "1.2.4", "dev": true @@ -9632,6 +10471,12 @@ "is-extglob": "^2.1.1" } }, + "is-lambda": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-lambda/-/is-lambda-1.0.1.tgz", + "integrity": "sha1-PZh3iZ5qU+/AFgUEzeFfgubwYdU=", + "dev": true + }, "is-negative-zero": { "version": "2.0.2", "dev": true @@ -10410,133 +11255,6 @@ "version": "3.0.3", "dev": true }, - "level": { - "version": "7.0.1", - "requires": { - "level-js": "^6.1.0", - "level-packager": "^6.0.1", - "leveldown": "^6.1.0" - } - }, - "level-concat-iterator": { - "version": "3.0.0" - }, - "level-js": { - "version": "6.1.0", - "requires": { - "abstract-leveldown": "^7.2.0", - "buffer": "^6.0.3", - "inherits": "^2.0.3", - "ltgt": "^2.1.2", - "run-parallel-limit": "^1.1.0" - }, - "dependencies": { - "abstract-leveldown": { - "version": "7.2.0", - "requires": { - "buffer": "^6.0.3", - "catering": "^2.0.0", - "is-buffer": "^2.0.5", - "level-concat-iterator": "^3.0.0", - "level-supports": "^2.0.1", - "queue-microtask": "^1.2.3" - } - }, - "level-supports": { - "version": "2.1.0" - } - } - }, - "level-packager": { - "version": "6.0.1", - "requires": { - "encoding-down": "^7.1.0", - "levelup": "^5.1.1" - }, - "dependencies": { - "abstract-leveldown": { - "version": "7.2.0", - "requires": { - "buffer": "^6.0.3", - "catering": "^2.0.0", - "is-buffer": "^2.0.5", - "level-concat-iterator": "^3.0.0", - "level-supports": "^2.0.1", - "queue-microtask": "^1.2.3" - } - }, - "deferred-leveldown": { - "version": "7.0.0", - "requires": { - "abstract-leveldown": "^7.2.0", - "inherits": "^2.0.3" - } - }, - "encoding-down": { - "version": "7.1.0", - "requires": { - "abstract-leveldown": "^7.2.0", - "inherits": "^2.0.3", - "level-codec": "^10.0.0", - "level-errors": "^3.0.0" - } - }, - "level-codec": { - "version": "10.0.0", - "requires": { - "buffer": "^6.0.3" - } - }, - "level-errors": { - "version": "3.0.1" - }, - "level-iterator-stream": { - "version": "5.0.0", - "requires": { - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, - "level-supports": { - "version": "2.1.0" - }, - "levelup": { - "version": "5.1.1", - "requires": { - "catering": "^2.0.0", - "deferred-leveldown": "^7.0.0", - "level-errors": "^3.0.1", - "level-iterator-stream": "^5.0.0", - "level-supports": "^2.0.1", - "queue-microtask": "^1.2.3" - } - } - } - }, - "leveldown": { - "version": "6.1.0", - "requires": { - "abstract-leveldown": "^7.2.0", - "napi-macros": "~2.0.0", - "node-gyp-build": "^4.3.0" - }, - "dependencies": { - "abstract-leveldown": { - "version": "7.2.0", - "requires": { - "buffer": "^6.0.3", - "catering": "^2.0.0", - "is-buffer": "^2.0.5", - "level-concat-iterator": "^3.0.0", - "level-supports": "^2.0.1", - "queue-microtask": "^1.2.3" - } - }, - "level-supports": { - "version": "2.1.0" - } - } - }, "leven": { "version": "3.1.0", "dev": true @@ -10597,9 +11315,6 @@ "yallist": "^4.0.0" } }, - "ltgt": { - "version": "2.2.1" - }, "lunr": { "version": "2.3.9", "dev": true @@ -10621,6 +11336,55 @@ "version": "1.3.6", "dev": true }, + "make-fetch-happen": { + "version": "10.1.5", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-10.1.5.tgz", + "integrity": "sha512-mucOj2H0Jn/ax7H9K9T1bf0p1nn/mBFa551Os7ed9xRfLEx20aZhZeLslmRYfAaAqXZUGipcs+m5KOKvOH0XKA==", + "dev": true, + "requires": { + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^2.0.3", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^6.1.1", + "ssri": "^9.0.0" + }, + "dependencies": { + "@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true + }, + "http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "requires": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + } + }, + "lru-cache": { + "version": "7.10.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.10.1.tgz", + "integrity": "sha512-BQuhQxPuRl79J5zSXRP+uNzPOyZw2oFI9JLRQ80XswSvg21KMKNtQza9eF42rfI/3Z40RvzBdXgziEkudzjo8A==", + "dev": true + } + } + }, "makeerror": { "version": "1.0.12", "dev": true, @@ -10680,28 +11444,137 @@ "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", "dev": true }, + "minipass": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.6.tgz", + "integrity": "sha512-rty5kpw9/z8SX9dmxblFA6edItUmwJgMeYDZRrwlIVN27i8gysGbznJwUggw2V/FVqFSDdWy040ZPS811DYAqQ==", + "dev": true, + "requires": { + "yallist": "^4.0.0" + } + }, + "minipass-collect": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", + "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", + "dev": true, + "requires": { + "minipass": "^3.0.0" + } + }, + "minipass-fetch": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-2.1.0.tgz", + "integrity": "sha512-H9U4UVBGXEyyWJnqYDCLp1PwD8XIkJ4akNHp1aGVI+2Ym7wQMlxDKi4IB4JbmyU+pl9pEs/cVrK6cOuvmbK4Sg==", + "dev": true, + "requires": { + "encoding": "^0.1.13", + "minipass": "^3.1.6", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + } + }, + "minipass-flush": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", + "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", + "dev": true, + "requires": { + "minipass": "^3.0.0" + } + }, + "minipass-pipeline": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", + "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", + "dev": true, + "requires": { + "minipass": "^3.0.0" + } + }, + "minipass-sized": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", + "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", + "dev": true, + "requires": { + "minipass": "^3.0.0" + } + }, + "minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dev": true, + "requires": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + } + }, "mkdirp": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", "dev": true }, + "mkdirp-classic": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", + "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "dev": true + }, "ms": { "version": "2.1.2" }, "napi-macros": { - "version": "2.0.0" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/napi-macros/-/napi-macros-2.0.0.tgz", + "integrity": "sha512-A0xLykHtARfueITVDernsAWdtIMbOJgKgcluwENp3AlsKN/PloyO10HtmoqnFAQAcxPkgZN7wdfPfEd0zNGxbg==", + "dev": true }, "natural-compare": { "version": "1.4.0", "dev": true }, + "negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "dev": true + }, + "node-abi": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.22.0.tgz", + "integrity": "sha512-u4uAs/4Zzmp/jjsD9cyFYDXeISfUWaAVWshPmDZOFOv4Xl4SbzTXm53I04C2uRueYJ+0t5PEtLH/owbn2Npf/w==", + "dev": true, + "requires": { + "semver": "^7.3.5" + } + }, "node-forge": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", "dev": true }, + "node-gyp": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-9.0.0.tgz", + "integrity": "sha512-Ma6p4s+XCTPxCuAMrOA/IJRmVy16R8Sdhtwl4PrCr7IBlj4cPawF0vg/l7nOT1jPbuNS7lIRJpBSvVsXwEZuzw==", + "dev": true, + "requires": { + "env-paths": "^2.2.0", + "glob": "^7.1.4", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^10.0.3", + "nopt": "^5.0.0", + "npmlog": "^6.0.0", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.2", + "which": "^2.0.2" + } + }, "node-gyp-build": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.4.0.tgz", @@ -10715,6 +11588,15 @@ "version": "2.0.4", "dev": true }, + "nopt": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", + "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "dev": true, + "requires": { + "abbrev": "1" + } + }, "normalize-path": { "version": "3.0.0", "dev": true @@ -10726,6 +11608,18 @@ "path-key": "^3.0.0" } }, + "npmlog": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-6.0.2.tgz", + "integrity": "sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==", + "dev": true, + "requires": { + "are-we-there-yet": "^3.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^4.0.3", + "set-blocking": "^2.0.0" + } + }, "nwsapi": { "version": "2.2.0", "dev": true @@ -10800,6 +11694,15 @@ "p-limit": "^2.2.0" } }, + "p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dev": true, + "requires": { + "aggregate-error": "^3.0.0" + } + }, "p-try": { "version": "2.2.0", "dev": true @@ -10872,6 +11775,32 @@ "version": "1.3.6", "dev": true }, + "prebuildify": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/prebuildify/-/prebuildify-5.0.0.tgz", + "integrity": "sha512-XhuFIeZx8Tk8e8yn3h5e+CE572pecpdKPrVubUIW0HctP3fpzh4eSWoHR1eOoQNTtxBUt1ixPLHPLbOTYi6STw==", + "dev": true, + "requires": { + "execspawn": "^1.0.1", + "minimist": "^1.2.5", + "mkdirp-classic": "^0.5.3", + "node-abi": "^3.3.0", + "npm-run-path": "^3.1.0", + "pump": "^3.0.0", + "tar-fs": "^2.1.0" + }, + "dependencies": { + "npm-run-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-3.1.0.tgz", + "integrity": "sha512-Dbl4A/VfiVGLgQv29URL9xshU8XDY1GeLy+fsaZ1AA8JDSfjvr5P5+pzRbWqRSBxk6/DW7MIh8lTM/PaGnP2kg==", + "dev": true, + "requires": { + "path-key": "^3.0.0" + } + } + } + }, "prelude-ls": { "version": "1.2.1", "dev": true @@ -10904,6 +11833,22 @@ } } }, + "promise-inflight": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz", + "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=", + "dev": true + }, + "promise-retry": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", + "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", + "dev": true, + "requires": { + "err-code": "^2.0.2", + "retry": "^0.12.0" + } + }, "prompts": { "version": "2.4.2", "dev": true, @@ -10916,12 +11861,23 @@ "version": "1.8.0", "dev": true }, + "pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dev": true, + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, "punycode": { "version": "2.1.1", "dev": true }, "queue-microtask": { - "version": "1.2.3" + "version": "1.2.3", + "dev": true }, "react-is": { "version": "17.0.2", @@ -10929,6 +11885,7 @@ }, "readable-stream": { "version": "3.6.0", + "dev": true, "requires": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", @@ -10985,6 +11942,12 @@ "signal-exit": "^3.0.2" } }, + "retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", + "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=", + "dev": true + }, "reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -11007,14 +11970,9 @@ "queue-microtask": "^1.2.2" } }, - "run-parallel-limit": { - "version": "1.1.0", - "requires": { - "queue-microtask": "^1.2.2" - } - }, "safe-buffer": { - "version": "5.2.1" + "version": "5.2.1", + "dev": true }, "safer-buffer": { "version": "2.1.2", @@ -11036,6 +11994,12 @@ "lru-cache": "^6.0.0" } }, + "set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, "shebang-command": { "version": "2.0.0", "dev": true, @@ -11088,6 +12052,33 @@ "is-fullwidth-code-point": "^3.0.0" } }, + "smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "dev": true + }, + "socks": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.6.2.tgz", + "integrity": "sha512-zDZhHhZRY9PxRruRMR7kMhnf3I8hDs4S3f9RecfnGxvcBHQcKcIH/oUcEWffsfl1XxdYlA7nnlGbbTvPz9D8gA==", + "dev": true, + "requires": { + "ip": "^1.1.5", + "smart-buffer": "^4.2.0" + } + }, + "socks-proxy-agent": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-6.2.0.tgz", + "integrity": "sha512-wWqJhjb32Q6GsrUqzuFkukxb/zzide5quXYcMVpIjxalDBBYy2nqKCFQ/9+Ie4dvOYSQdOk3hUlZSdzZOd3zMQ==", + "dev": true, + "requires": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + } + }, "source-map": { "version": "0.6.1", "dev": true @@ -11104,6 +12095,15 @@ "version": "1.0.3", "dev": true }, + "ssri": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-9.0.1.tgz", + "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==", + "dev": true, + "requires": { + "minipass": "^3.1.1" + } + }, "stack-utils": { "version": "2.0.5", "dev": true, @@ -11123,6 +12123,7 @@ }, "string_decoder": { "version": "1.3.0", + "dev": true, "requires": { "safe-buffer": "~5.2.0" } @@ -11212,6 +12213,53 @@ "version": "5.8.9", "dev": true }, + "tar": { + "version": "6.1.11", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.11.tgz", + "integrity": "sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA==", + "dev": true, + "requires": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^3.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + } + }, + "tar-fs": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", + "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", + "dev": true, + "requires": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", + "pump": "^3.0.0", + "tar-stream": "^2.1.4" + }, + "dependencies": { + "chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "dev": true + } + } + }, + "tar-stream": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", + "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", + "dev": true, + "requires": { + "bl": "^4.0.3", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + } + }, "terminal-link": { "version": "2.1.1", "dev": true, @@ -11456,6 +12504,24 @@ "which-boxed-primitive": "^1.0.2" } }, + "unique-filename": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", + "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", + "dev": true, + "requires": { + "unique-slug": "^2.0.0" + } + }, + "unique-slug": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", + "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", + "dev": true, + "requires": { + "imurmurhash": "^0.1.4" + } + }, "universalify": { "version": "0.1.2", "dev": true @@ -11470,7 +12536,14 @@ } }, "util-deprecate": { - "version": "1.0.2" + "version": "1.0.2", + "dev": true + }, + "util-extend": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/util-extend/-/util-extend-1.0.3.tgz", + "integrity": "sha1-p8IW0mdUUWljeztu3GypEZ4v+T8=", + "dev": true }, "uuid": { "version": "8.3.2", @@ -11568,6 +12641,15 @@ "is-symbol": "^1.0.3" } }, + "wide-align": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", + "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "dev": true, + "requires": { + "string-width": "^1.0.2 || 2 || 3 || 4" + } + }, "word-wrap": { "version": "1.2.3", "dev": true diff --git a/package.json b/package.json index 42e3952d..21419381 100644 --- a/package.json +++ b/package.json @@ -10,30 +10,34 @@ }, "main": "dist/index.js", "types": "dist/index.d.ts", + "gypfile": true, "scripts": { "prepare": "tsc -p ./tsconfig.build.json", + "install": "node-gyp-build", + "prebuild": "prebuildify --napi --strip --target=16.14.2", "build": "rimraf ./dist && tsc -p ./tsconfig.build.json", "postversion": "npm install --package-lock-only --ignore-scripts --silent", "ts-node": "ts-node -r tsconfig-paths/register", "test": "jest", "lint": "eslint '{src,tests,benches}/**/*.{js,ts}'", "lintfix": "eslint '{src,tests,benches}/**/*.{js,ts}' --fix", + "lint-native": "find ./src -type f -regextype posix-extended -regex '.*\\.(c|cc|cpp|h|hh|hpp)' -exec clang-format --dry-run -Werror {} +", + "lintfix-native": "find ./src -type f -regextype posix-extended -regex '.*\\.(c|cc|cpp|h|hh|hpp)' -exec clang-format -i {} +", "docs": "rimraf ./docs && typedoc --gitRevision master --tsconfig ./tsconfig.build.json --out ./docs src", "bench": "rimraf ./benches/results && ts-node -r tsconfig-paths/register ./benches" }, "dependencies": { - "@matrixai/async-init": "^1.7.3", - "@matrixai/errors": "^1.1.1", - "@matrixai/logger": "^2.1.1", + "@matrixai/async-init": "^1.8.1", + "@matrixai/async-locks": "^3.0.0", + "@matrixai/errors": "^1.1.2", + "@matrixai/logger": "^2.3.0", "@matrixai/resources": "^1.1.3", "@matrixai/workers": "^1.3.3", - "@types/abstract-leveldown": "^7.2.0", - "level": "7.0.1", + "node-gyp-build": "4.4.0", "threads": "^1.6.5" }, "devDependencies": { "@types/jest": "^27.0.2", - "@types/level": "^6.0.0", "@types/node": "^16.11.7", "@types/node-forge": "^0.10.4", "@typescript-eslint/eslint-plugin": "^5.23.0", @@ -46,7 +50,10 @@ "jest": "^27.2.5", "jest-junit": "^13.2.0", "lexicographic-integer": "^1.1.0", + "napi-macros": "^2.0.0", "node-forge": "^1.3.1", + "node-gyp": "9.0.0", + "prebuildify": "^5.0.0", "prettier": "^2.6.2", "rimraf": "^3.0.2", "systeminformation": "^5.8.9", diff --git a/shell.nix b/shell.nix index 06b9204e..63d31be7 100644 --- a/shell.nix +++ b/shell.nix @@ -4,7 +4,12 @@ with pkgs; mkShell { nativeBuildInputs = [ nodejs + nodejs.python + clang-tools ]; + # Don't set rpath for native addons + NIX_DONT_SET_RPATH = true; + NIX_NO_SELF_RPATH = true; shellHook = '' echo 'Entering js-db' set -o allexport @@ -20,6 +25,15 @@ mkShell { # Enables npm link export npm_config_prefix=~/.npm + # Path to headers used by node-gyp for native addons + export npm_config_nodedir="${nodejs}" + + # Use all cores during node-gyp compilation + export npm_config_jobs=max + + # Verbose logging of the Nix compiler wrappers + export NIX_DEBUG=1 + npm install --ignore-scripts set +v diff --git a/src/DB.ts b/src/DB.ts index 0dffed7d..74903efa 100644 --- a/src/DB.ts +++ b/src/DB.ts @@ -1,17 +1,19 @@ -import type { LevelDB } from 'level'; import type { ResourceAcquire } from '@matrixai/resources'; +import type { RWLockWriter } from '@matrixai/async-locks'; import type { KeyPath, LevelPath, FileSystem, Crypto, DBWorkerManagerInterface, - DBIteratorOptions, - DBIterator, DBBatch, DBOps, + DBOptions, + DBIteratorOptions, + DBClearOptions, + DBCountOptions, } from './types'; -import level from 'level'; +import type { RocksDBDatabase, RocksDBDatabaseOptions } from './rocksdb'; import { Transfer } from 'threads'; import Logger from '@matrixai/logger'; import { withF, withG } from '@matrixai/resources'; @@ -19,7 +21,10 @@ import { CreateDestroyStartStop, ready, } from '@matrixai/async-init/dist/CreateDestroyStartStop'; +import { LockBox } from '@matrixai/async-locks'; +import DBIterator from './DBIterator'; import DBTransaction from './DBTransaction'; +import { rocksdbP } from './rocksdb'; import * as utils from './utils'; import * as errors from './errors'; @@ -35,6 +40,7 @@ class DB { fs = require('fs'), logger = new Logger(this.name), fresh = false, + ...dbOptions }: { dbPath: string; crypto?: { @@ -44,21 +50,20 @@ class DB { fs?: FileSystem; logger?: Logger; fresh?: boolean; - }): Promise { + } & DBOptions): Promise { logger.info(`Creating ${this.name}`); - const db = new DB({ + const db = new this({ dbPath, crypto, fs, logger, }); - await db.start({ fresh }); + await db.start({ fresh, ...dbOptions }); logger.info(`Created ${this.name}`); return db; } public readonly dbPath: string; - protected crypto?: { key: Buffer; ops: Crypto; @@ -66,8 +71,38 @@ class DB { protected fs: FileSystem; protected logger: Logger; protected workerManager?: DBWorkerManagerInterface; - protected _db: LevelDB; - protected transactionCounter: number = 0; + protected _lockBox: LockBox = new LockBox(); + protected _db: RocksDBDatabase; + /** + * References to iterators + */ + protected _iteratorRefs: Set> = new Set(); + /** + * References to transactions + */ + protected _transactionRefs: Set = new Set(); + + get db(): Readonly { + return this._db; + } + + /** + * @internal + */ + get iteratorRefs(): Readonly>> { + return this._iteratorRefs; + } + + /** + * @internal + */ + get transactionRefs(): Readonly> { + return this._transactionRefs; + } + + get lockBox(): Readonly> { + return this._lockBox; + } constructor({ dbPath, @@ -89,15 +124,12 @@ class DB { this.fs = fs; } - get db(): Readonly> { - return this._db; - } - public async start({ fresh = false, + ...dbOptions }: { fresh?: boolean; - } = {}) { + } & DBOptions = {}) { this.logger.info(`Starting ${this.constructor.name}`); this.logger.info(`Setting DB path to ${this.dbPath}`); if (fresh) { @@ -110,7 +142,11 @@ class DB { throw new errors.ErrorDBDelete(e.message, { cause: e }); } } - const db = await this.setupDb(this.dbPath); + const db = await this.setupDb(this.dbPath, { + ...dbOptions, + createIfMissing: true, + errorIfExists: false, + }); this._db = db; try { // Only run these after this._db is assigned @@ -119,8 +155,8 @@ class DB { await this.canaryCheck(); } } catch (e) { - // LevelDB must be closed otherwise its lock will persist - await this._db.close(); + // RocksDB must be closed otherwise its lock will persist + await rocksdbP.dbClose(db); throw e; } this.logger.info(`Started ${this.constructor.name}`); @@ -128,7 +164,13 @@ class DB { public async stop(): Promise { this.logger.info(`Stopping ${this.constructor.name}`); - await this._db.close(); + for (const iterator of this._iteratorRefs) { + await iterator.destroy(); + } + for (const transaction of this._transactionRefs) { + await transaction.rollback(); + } + await rocksdbP.dbClose(this._db); this.logger.info(`Stopped ${this.constructor.name}`); } @@ -156,23 +198,16 @@ class DB { @ready(new errors.ErrorDBNotRunning()) public transaction(): ResourceAcquire { return async () => { - const transactionId = this.transactionCounter++; - const tran = await DBTransaction.createTransaction({ + const tran = new DBTransaction({ db: this, - transactionId, + lockBox: this._lockBox, logger: this.logger, }); return [ async (e?: Error) => { try { if (e == null) { - try { - await tran.commit(); - } catch (e) { - await tran.rollback(e); - throw e; - } - await tran.finalize(); + await tran.commit(); } else { await tran.rollback(e); } @@ -235,9 +270,9 @@ class DB { let data; try { const key = utils.keyPathToKey(keyPath); - data = await this._db.get(key); + data = await rocksdbP.dbGet(this._db, key, { valueEncoding: 'buffer' }); } catch (e) { - if (e.notFound) { + if (e.code === 'NOT_FOUND') { return undefined; } throw e; @@ -253,64 +288,88 @@ class DB { keyPath: KeyPath | string | Buffer, value: any, raw?: false, + sync?: boolean, ): Promise; public async put( keyPath: KeyPath | string | Buffer, value: Buffer, raw: true, + sync?: boolean, ): Promise; @ready(new errors.ErrorDBNotRunning()) public async put( keyPath: KeyPath | string | Buffer, value: any, raw: boolean = false, + sync: boolean = false, ): Promise { keyPath = utils.toKeyPath(keyPath); keyPath = ['data', ...keyPath]; - return this._put(keyPath, value, raw as any); + return this._put(keyPath, value, raw as any, sync); } /** * Put from root level * @internal */ - public async _put(keyPath: KeyPath, value: any, raw?: false): Promise; + public async _put( + keyPath: KeyPath, + value: any, + raw?: false, + sync?: boolean, + ): Promise; /** * @internal */ - public async _put(keyPath: KeyPath, value: Buffer, raw: true): Promise; + public async _put( + keyPath: KeyPath, + value: Buffer, + raw: true, + sync?: boolean, + ): Promise; public async _put( keyPath: KeyPath, value: any, raw: boolean = false, + sync: boolean = false, ): Promise { const data = await this.serializeEncrypt(value, raw as any); - return this._db.put(utils.keyPathToKey(keyPath), data); + const key = utils.keyPathToKey(keyPath); + await rocksdbP.dbPut(this._db, key, data, { sync }); + return; } /** * Deletes a key from the DB */ @ready(new errors.ErrorDBNotRunning()) - public async del(keyPath: KeyPath | string | Buffer): Promise { + public async del( + keyPath: KeyPath | string | Buffer, + sync: boolean = false, + ): Promise { keyPath = utils.toKeyPath(keyPath); keyPath = ['data', ...keyPath]; - return this._del(keyPath); + return this._del(keyPath, sync); } /** * Delete from root level * @internal */ - public async _del(keyPath: KeyPath): Promise { - return this._db.del(utils.keyPathToKey(keyPath)); + public async _del(keyPath: KeyPath, sync: boolean = false): Promise { + const key = utils.keyPathToKey(keyPath); + await rocksdbP.dbDel(this._db, key, { sync }); + return; } /** * Batches operations together atomically */ @ready(new errors.ErrorDBNotRunning()) - public async batch(ops: Readonly): Promise { + public async batch( + ops: Readonly, + sync: boolean = false, + ): Promise { const opsP: Array | DBBatch> = []; for (const op of ops) { op.keyPath = utils.toKeyPath(op.keyPath); @@ -333,14 +392,18 @@ class DB { } } const opsB = await Promise.all(opsP); - return this._db.batch(opsB); + await rocksdbP.batchDo(this._db, opsB, { sync }); + return; } /** * Batch from root level * @internal */ - public async _batch(ops: Readonly): Promise { + public async _batch( + ops: Readonly, + sync: boolean = false, + ): Promise { const opsP: Array | DBBatch> = []; for (const op of ops) { if (!Array.isArray(op.keyPath)) { @@ -364,7 +427,8 @@ class DB { } } const opsB = await Promise.all(opsP); - return this._db.batch(opsB); + await rocksdbP.batchDo(this._db, opsB, { sync }); + return; } /** @@ -373,36 +437,39 @@ class DB { * You must have at least one of them being true or undefined */ public iterator( + levelPath: LevelPath | undefined, options: DBIteratorOptions & { keys: false; values: false }, - levelPath?: LevelPath, ): DBIterator; public iterator( + levelPath: LevelPath | undefined, options: DBIteratorOptions & { keys: false; valueAsBuffer: false }, - levelPath?: LevelPath, ): DBIterator; public iterator( + levelPath: LevelPath | undefined, options: DBIteratorOptions & { keys: false }, - levelPath?: LevelPath, ): DBIterator; public iterator( + levelPath: LevelPath | undefined, options: DBIteratorOptions & { values: false }, - levelPath?: LevelPath, ): DBIterator; public iterator( + levelPath: LevelPath | undefined, options: DBIteratorOptions & { valueAsBuffer: false }, - levelPath?: LevelPath, ): DBIterator; public iterator( - options?: DBIteratorOptions, levelPath?: LevelPath, + options?: DBIteratorOptions, ): DBIterator; @ready(new errors.ErrorDBNotRunning()) public iterator( - options?: DBIteratorOptions & { keyAsBuffer?: any; valueAsBuffer?: any }, levelPath: LevelPath = [], + options: DBIteratorOptions & { + keyAsBuffer?: any; + valueAsBuffer?: any; + } = {}, ): DBIterator { levelPath = ['data', ...levelPath]; - return this._iterator(options, levelPath); + return this._iterator(levelPath, options); } /** @@ -410,132 +477,54 @@ class DB { * @internal */ public _iterator( + levelPath: LevelPath | undefined, options: DBIteratorOptions & { keys: false; values: false }, - levelPath?: LevelPath, ): DBIterator; /** * @internal */ public _iterator( + levelPath: LevelPath | undefined, options: DBIteratorOptions & { keys: false; valueAsBuffer: false }, - levelPath?: LevelPath, ): DBIterator; /** * @internal */ public _iterator( + levelPath: LevelPath | undefined, options: DBIteratorOptions & { keys: false }, - levelPath?: LevelPath, ): DBIterator; /** * @internal */ public _iterator( + levelPath: LevelPath | undefined, options: DBIteratorOptions & { values: false }, - levelPath?: LevelPath, ): DBIterator; /** * @internal */ public _iterator( + levelPath: LevelPath | undefined, options?: DBIteratorOptions & { valueAsBuffer: false }, - levelPath?: LevelPath, ): DBIterator; /** * @internal */ public _iterator( + levelPath?: LevelPath | undefined, options?: DBIteratorOptions, - levelPath?: LevelPath, ): DBIterator; public _iterator( - options?: DBIteratorOptions, levelPath: LevelPath = [], + options: DBIteratorOptions = {}, ): DBIterator { - const options_ = { - ...(options ?? {}), - // Internally we always use the buffer - keyAsBuffer: true, - valueAsBuffer: true, - }; - if (options_.gt != null) { - options_.gt = utils.keyPathToKey( - levelPath.concat(utils.toKeyPath(options_.gt)), - ); - } - if (options_.gte != null) { - options_.gte = utils.keyPathToKey( - levelPath.concat(utils.toKeyPath(options_.gte)), - ); - } - if (options_.gt == null && options_.gte == null) { - options_.gte = utils.levelPathToKey(levelPath); - } - if (options_.lt != null) { - options_.lt = utils.keyPathToKey( - levelPath.concat(utils.toKeyPath(options_.lt)), - ); - } - if (options_.lte != null) { - options_.lte = utils.keyPathToKey( - levelPath.concat(utils.toKeyPath(options_.lte)), - ); - } - if (options_.lt == null && options_.lte == null) { - const levelKeyEnd = utils.levelPathToKey(levelPath); - levelKeyEnd[levelKeyEnd.length - 1] += 1; - options_.lt = levelKeyEnd; - } - const iterator_ = this._db.iterator(options_); - const iterator = { - seek: (keyPath: KeyPath | Buffer | string): void => { - iterator_.seek( - utils.keyPathToKey(levelPath.concat(utils.toKeyPath(keyPath))), - ); - }, - end: async () => { - // @ts-ignore AbstractIterator type is outdated - // eslint-disable-next-line @typescript-eslint/await-thenable - await iterator_.end(); - }, - next: async () => { - // @ts-ignore AbstractIterator type is outdated - // eslint-disable-next-line @typescript-eslint/await-thenable - const kv = (await iterator_.next()) as any; - // If kv is undefined, we have reached the end of iteration - if (kv == null) return kv; - // Handle keys: false - if (kv[0] != null) { - // Truncate level path so the returned key is relative to the level path - const keyPath = utils.parseKey(kv[0]).slice(levelPath.length); - if (options?.keyAsBuffer === false) { - kv[0] = keyPath.map((k) => k.toString('utf-8')); - } else { - kv[0] = keyPath; - } - } - // Handle values: false - if (kv[1] != null) { - if (options?.valueAsBuffer === false) { - kv[1] = await this.deserializeDecrypt(kv[1], false); - } else { - kv[1] = await this.deserializeDecrypt(kv[1], true); - } - } - return kv; - }, - [Symbol.asyncIterator]: async function* () { - try { - let kv: [KeyPath | undefined, any] | undefined; - while ((kv = await iterator.next()) !== undefined) { - yield kv; - } - } finally { - if (!iterator_._ended) await iterator.end(); - } - }, - }; - return iterator; + return new DBIterator({ + db: this, + levelPath, + logger: this.logger.getChild(DBIterator.name), + ...options, + }); } /** @@ -543,31 +532,45 @@ class DB { * This is not atomic, it will iterate over a snapshot of the DB */ @ready(new errors.ErrorDBNotRunning()) - public async clear(levelPath: LevelPath = []): Promise { + public async clear( + levelPath: LevelPath = [], + options: DBClearOptions = {}, + ): Promise { levelPath = ['data', ...levelPath]; - await this._clear(levelPath); + await this._clear(levelPath, options); } /** * Clear from root level * @internal */ - public async _clear(levelPath: LevelPath = []): Promise { - for await (const [keyPath] of this._iterator( - { values: false }, - levelPath, - )) { - await this._del(levelPath.concat(keyPath)); - } + public async _clear( + levelPath: LevelPath = [], + options: DBClearOptions = {}, + ): Promise { + const options_ = utils.iterationOptions(options, levelPath); + return rocksdbP.dbClear(this._db, options_); } @ready(new errors.ErrorDBNotRunning()) - public async count(levelPath: LevelPath = []): Promise { - let count = 0; - for await (const _ of this.iterator({ values: false }, levelPath)) { - count++; - } - return count; + public async count( + levelPath: LevelPath = [], + options: DBCountOptions = {}, + ): Promise { + levelPath = ['data', ...levelPath]; + return this._count(levelPath, options); + } + + /** + * Count from root level + * @internal + */ + public async _count( + levelPath: LevelPath = [], + options: DBCountOptions = {}, + ): Promise { + const options_ = utils.iterationOptions(options, levelPath); + return rocksdbP.dbCount(this._db, options_); } /** @@ -597,13 +600,10 @@ class DB { levelPath = ['data', ...levelPath]; } const records: Array<[KeyPath, any]> = []; - for await (const [keyPath, v] of this._iterator( - { - keyAsBuffer: raw, - valueAsBuffer: raw, - }, - levelPath, - )) { + for await (const [keyPath, v] of this._iterator(levelPath, { + keyAsBuffer: raw, + valueAsBuffer: raw, + })) { records.push([keyPath, v]); } return records; @@ -686,7 +686,8 @@ class DB { protected async setupDb( dbPath: string, - ): Promise> { + options: RocksDBDatabaseOptions = {}, + ): Promise { try { await this.fs.promises.mkdir(dbPath); } catch (e) { @@ -694,26 +695,11 @@ class DB { throw new errors.ErrorDBCreate(e.message, { cause: e }); } } - let db: LevelDB; + const db = rocksdbP.dbInit(); + // Mutates options object which is copied from this.start + utils.filterUndefined(options); try { - db = await new Promise>( - (resolve, reject) => { - const db = level( - dbPath, - { - keyEncoding: 'binary', - valueEncoding: 'binary', - }, - (e) => { - if (e) { - reject(e); - } else { - resolve(db); - } - }, - ); - }, - ); + await rocksdbP.dbOpen(db, dbPath, options); } catch (e) { throw new errors.ErrorDBCreate(e.message, { cause: e }); } @@ -721,8 +707,7 @@ class DB { } protected async setupRootLevels(): Promise { - // Clear any dirty state in transactions - await this._clear(['transactions']); + // Nothing to do yet } protected async canaryCheck(): Promise { diff --git a/src/DBIterator.ts b/src/DBIterator.ts new file mode 100644 index 00000000..c24b255e --- /dev/null +++ b/src/DBIterator.ts @@ -0,0 +1,216 @@ +import type DB from './DB'; +import type DBTransaction from './DBTransaction'; +import type { Merge, KeyPath, LevelPath, DBIteratorOptions } from './types'; +import type { + RocksDBIterator, + RocksDBIteratorOptions, + RocksDBSnapshot, + RocksDBTransactionSnapshot, +} from './rocksdb'; +import Logger from '@matrixai/logger'; +import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; +import { Lock } from '@matrixai/async-locks'; +import { rocksdbP } from './rocksdb'; +import * as errors from './errors'; +import * as utils from './utils'; + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +interface DBIterator extends CreateDestroy {} +@CreateDestroy() +class DBIterator { + protected logger: Logger; + protected levelPath: LevelPath; + protected _db: DB; + protected _transaction?: DBTransaction; + protected _options: Merge< + DBIteratorOptions, + { + gt?: Buffer; + gte?: Buffer; + lt?: Buffer; + lte?: Buffer; + keyEncoding: 'buffer'; + valueEncoding: 'buffer'; + } + >; + protected _iterator: RocksDBIterator; + protected first: boolean = true; + protected finished: boolean = false; + protected cache: Array<[Buffer, Buffer]> = []; + protected cachePos: number = 0; + protected lock: Lock = new Lock(); + + public constructor( + options: { + db: DB; + levelPath: LevelPath; + logger?: Logger; + } & DBIteratorOptions, + ); + public constructor( + options: { + db: DB; + transaction: DBTransaction; + levelPath: LevelPath; + logger?: Logger; + } & DBIteratorOptions, + ); + public constructor({ + db, + transaction, + levelPath, + logger, + ...options + }: { + db: DB; + transaction?: DBTransaction; + levelPath: LevelPath; + logger?: Logger; + } & DBIteratorOptions) { + logger = logger ?? new Logger(this.constructor.name); + logger.debug(`Constructing ${this.constructor.name}`); + this.logger = logger; + this.levelPath = levelPath; + const options_ = utils.iterationOptions>( + options, + levelPath, + ); + this._options = options_; + this._db = db; + if (transaction != null) { + this._transaction = transaction; + this._iterator = rocksdbP.transactionIteratorInit( + transaction.transaction, + options_ as RocksDBIteratorOptions & { + keyEncoding: 'buffer'; + valueEncoding: 'buffer'; + }, + ); + transaction.iteratorRefs.add(this); + } else { + this._iterator = rocksdbP.iteratorInit( + db.db, + options_ as RocksDBIteratorOptions & { + keyEncoding: 'buffer'; + valueEncoding: 'buffer'; + }, + ); + db.iteratorRefs.add(this); + } + logger.debug(`Constructed ${this.constructor.name}`); + } + + get db(): Readonly { + return this._db; + } + + get transaction(): Readonly | undefined { + return this._transaction; + } + + get iterator(): Readonly> { + return this._iterator; + } + + get options(): Readonly> { + return this._options; + } + + public async destroy(): Promise { + this.logger.debug(`Destroying ${this.constructor.name}`); + this.cache = []; + await rocksdbP.iteratorClose(this._iterator); + if (this._transaction != null) { + this._transaction.iteratorRefs.delete(this); + } else { + this._db.iteratorRefs.delete(this); + } + this.logger.debug(`Destroyed ${this.constructor.name}`); + } + + @ready(new errors.ErrorDBIteratorDestroyed()) + public seek(keyPath: KeyPath | string | Buffer): void { + if (this.lock.isLocked()) { + throw new errors.ErrorDBIteratorBusy(); + } + rocksdbP.iteratorSeek( + this._iterator, + utils.keyPathToKey(this.levelPath.concat(utils.toKeyPath(keyPath))), + ); + this.first = true; + this.finished = false; + this.cache = []; + this.cachePos = 0; + } + + @ready(new errors.ErrorDBIteratorDestroyed(), true) + public async next(): Promise<[K, V] | undefined> { + return this.lock.withF(this._next.bind(this)); + } + + protected async _next(): Promise<[K, V] | undefined> { + if (this.cachePos < this.cache.length) { + const entry = this.cache[this.cachePos]; + const result = this.processEntry(entry); + this.cachePos += 1; + return result; + } else if (this.finished) { + return; + } + let entries: Array<[Buffer, Buffer]>, finished: boolean; + if (this.first) { + [entries, finished] = await rocksdbP.iteratorNextv(this._iterator, 1); + this.first = false; + } else { + [entries, finished] = await rocksdbP.iteratorNextv(this._iterator, 1000); + } + this.cachePos = 0; + this.cache = entries; + this.finished = finished; + // If the entries are empty and finished is false + // then this will enter a retry loop + // until entries is filled or finished is true + return this._next(); + } + + public async *[Symbol.asyncIterator](): AsyncGenerator<[K, V], void, void> { + try { + let entry: [K, V] | undefined; + while ((entry = await this.next()) !== undefined) { + yield entry; + } + } finally { + // Once entry is undefined, then it is finished + // therefore we an perform an idempotent destroy + await this.destroy(); + } + } + + protected async processEntry(entry: [Buffer, Buffer]): Promise<[K, V]> { + let keyPath: KeyPath | undefined; + let value: Buffer | V | undefined; + // If keys were false, leveldb returns empty buffer + if (this._options.keys === false) { + keyPath = undefined; + } else { + // Truncate level path so the returned key is relative to the level path + keyPath = utils.parseKey(entry[0]).slice(this.levelPath.length); + if (this._options.keyAsBuffer === false) { + keyPath = keyPath.map((k) => k.toString('utf-8')); + } + } + // If values were false, leveldb returns empty buffer + if (this._options.values === false) { + value = undefined; + } else { + if (this._options.valueAsBuffer === false) { + value = await this._db.deserializeDecrypt(entry[1], false); + } else { + value = await this._db.deserializeDecrypt(entry[1], true); + } + } + return [keyPath, value] as [K, V]; + } +} + +export default DBIterator; diff --git a/src/DBTransaction.ts b/src/DBTransaction.ts index 39042373..ac28b10d 100644 --- a/src/DBTransaction.ts +++ b/src/DBTransaction.ts @@ -1,67 +1,51 @@ +import type { ResourceRelease } from '@matrixai/resources'; +import type { + LockBox, + MultiLockRequest as AsyncLocksMultiLockRequest, +} from '@matrixai/async-locks'; import type DB from './DB'; import type { + ToString, KeyPath, LevelPath, - DBIterator, - DBOps, DBIteratorOptions, + DBClearOptions, + DBCountOptions, + MultiLockRequest, } from './types'; +import type { + RocksDBTransaction, + RocksDBTransactionOptions, + RocksDBTransactionSnapshot, +} from './rocksdb/types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; +import { RWLockWriter } from '@matrixai/async-locks'; +import DBIterator from './DBIterator'; +import { rocksdbP } from './rocksdb'; import * as utils from './utils'; import * as errors from './errors'; -/** - * Minimal read-committed transaction system - * - * Properties: - * - No dirty reads - cannot read uncommitted writes from other transactions - * - Non-repeatable reads - multiple reads on the same key may read - * different values due to other committed - * transactions - * - Phantom reads - can read entries that are added or deleted by other - * transactions - * - Lost updates - can lose writes if 2 transactions commit writes to the - * same key - * - * To prevent non-repeatable reads, phantom-reads or lost-updates, it is up to the - * user to use advisory read/write locking on relevant keys or ranges of keys. - * - * This does not use LevelDB snapshots provided by the `iterator` method - * which would provide "repeatable-read" isolation level by default - * - * See: https://en.wikipedia.org/wiki/Isolation_(database_systems) - */ interface DBTransaction extends CreateDestroy {} @CreateDestroy() class DBTransaction { - public static async createTransaction({ - db, - transactionId, - logger = new Logger(this.name), - }: { - db: DB; - transactionId: number; - logger?: Logger; - }): Promise { - logger.debug(`Creating ${this.name} ${transactionId}`); - const tran = new this({ - db, - transactionId, - logger, - }); - logger.debug(`Created ${this.name} ${transactionId}`); - return tran; - } + public readonly id: number; - public readonly transactionId: number; - public readonly transactionPath: LevelPath; - public readonly transactionDataPath: LevelPath; - public readonly transactionTombstonePath: LevelPath; - - protected db: DB; + protected _db: DB; protected logger: Logger; - protected _ops: DBOps = []; + protected lockBox: LockBox; + protected _locks: Map< + string, + { + lock: RWLockWriter; + type: 'read' | 'write'; + release: ResourceRelease; + } + > = new Map(); + protected _options: RocksDBTransactionOptions; + protected _transaction: RocksDBTransaction; + protected _snapshot: RocksDBTransactionSnapshot; + protected _iteratorRefs: Set> = new Set(); protected _callbacksSuccess: Array<() => any> = []; protected _callbacksFailure: Array<(e?: Error) => any> = []; protected _callbacksFinally: Array<(e?: Error) => any> = []; @@ -70,38 +54,54 @@ class DBTransaction { public constructor({ db, - transactionId, + lockBox, logger, + ...options }: { db: DB; - transactionId: number; - logger: Logger; - }) { + lockBox: LockBox; + logger?: Logger; + } & RocksDBTransactionOptions) { + logger = logger ?? new Logger(this.constructor.name); + logger.debug(`Constructing ${this.constructor.name}`); this.logger = logger; - this.db = db; - this.transactionId = transactionId; - this.transactionPath = ['transactions', this.transactionId.toString()]; - // Data path contains the COW overlay - this.transactionDataPath = [...this.transactionPath, 'data']; - // Tombstone path tracks whether key has been deleted - // If `undefined`, it has not been deleted - // If `true`, then it has been deleted - // When deleted, the COW overlay entry must also be deleted - this.transactionTombstonePath = [...this.transactionPath, 'tombstone']; + this._db = db; + this.lockBox = lockBox; + const options_ = { + ...options, + // Transactions should be synchronous + sync: true, + }; + utils.filterUndefined(options_); + this._options = options_; + this._transaction = rocksdbP.transactionInit(db.db, options_); + db.transactionRefs.add(this); + this.id = rocksdbP.transactionId(this._transaction); + logger.debug(`Constructed ${this.constructor.name} ${this.id}`); } + /** + * Destroy the transaction + * This cannot be called until the transaction is committed or rollbacked + */ public async destroy() { - this.logger.debug( - `Destroying ${this.constructor.name} ${this.transactionId}`, - ); - await this.db._clear(this.transactionPath), - this.logger.debug( - `Destroyed ${this.constructor.name} ${this.transactionId}`, - ); + this.logger.debug(`Destroying ${this.constructor.name} ${this.id}`); + if (!this._committed && !this._rollbacked) { + throw new errors.ErrorDBTransactionNotCommittedNorRollbacked(); + } + this._db.transactionRefs.delete(this); + // Unlock all locked keys in reverse + const lockedKeys = [...this._locks.keys()].reverse(); + await this.unlock(...lockedKeys); + this.logger.debug(`Destroyed ${this.constructor.name} ${this.id}`); + } + + get db(): Readonly { + return this._db; } - get ops(): Readonly { - return this._ops; + get transaction(): Readonly { + return this._transaction; } get callbacksSuccess(): Readonly any>> { @@ -112,6 +112,10 @@ class DBTransaction { return this._callbacksFailure; } + get callbacksFinally(): Readonly any>> { + return this._callbacksFinally; + } + get committed(): boolean { return this._committed; } @@ -120,6 +124,98 @@ class DBTransaction { return this._rollbacked; } + get locks(): ReadonlyMap< + string, + { + lock: RWLockWriter; + type: 'read' | 'write'; + release: ResourceRelease; + } + > { + return this._locks; + } + + /** + * @internal + */ + get iteratorRefs(): Readonly>> { + return this._iteratorRefs; + } + + /** + * Lock a sequence of lock requests + * If the lock request doesn't specify, it + * defaults to using `RWLockWriter` with `write` type + * Keys are locked in string sorted order + * Even though keys can be arbitrary strings, by convention, you should use + * keys that correspond to keys in the database + * Locking with the same key is idempotent therefore lock re-entrancy is enabled + * Keys are automatically unlocked in reverse sorted order + * when the transaction is destroyed + * There is no support for lock upgrading or downgrading + * There is no deadlock detection + */ + public async lock( + ...requests: Array + ): Promise { + const requests_: Array> = []; + for (const request of requests) { + if (Array.isArray(request)) { + const [key, ...lockingParams] = request; + const key_ = key.toString(); + const lock = this._locks.get(key_); + // Default the lock type to `write` + const lockType = (lockingParams[0] = lockingParams[0] ?? 'write'); + if (lock == null) { + requests_.push([key_, RWLockWriter, ...lockingParams]); + } else if (lock.type !== lockType) { + throw new errors.ErrorDBTransactionLockType(); + } + } else { + const key_ = request.toString(); + const lock = this._locks.get(key_); + if (lock == null) { + // Default to using `RWLockWriter` write lock for just string keys + requests_.push([key_, RWLockWriter, 'write']); + } else if (lock.type !== 'write') { + throw new errors.ErrorDBTransactionLockType(); + } + } + } + if (requests_.length > 0) { + // Duplicates are eliminated, and the returned acquisitions are sorted + const lockAcquires = this.lockBox.lockMulti(...requests_); + for (const [key, lockAcquire, ...lockingParams] of lockAcquires) { + const [lockRelease, lock] = await lockAcquire(); + // The `Map` will maintain insertion order + // these must be unlocked in reverse order + // when the transaction is destroyed + this._locks.set(key as string, { + lock: lock!, + type: lockingParams[0]!, // The `type` is defaulted to `write` + release: lockRelease, + }); + } + } + } + + /** + * Unlock a sequence of lock keys + * Unlocking will be done in the order of the keys + * A transaction instance is only allowed to unlock keys that it previously + * locked, all keys that are not part of the `this._locks` is ignored + * Unlocking the same keys is idempotent + */ + public async unlock(...keys: Array): Promise { + for (const key of keys) { + const key_ = key.toString(); + const lock = this._locks.get(key_); + if (lock == null) continue; + this._locks.delete(key_); + await lock.release(); + } + } + public async get( keyPath: KeyPath | string | Buffer, raw?: false, @@ -134,23 +230,55 @@ class DBTransaction { raw: boolean = false, ): Promise { keyPath = utils.toKeyPath(keyPath); - let value = await this.db._get( - [...this.transactionDataPath, ...keyPath], - raw as any, - ); - if (value === undefined) { - if ( - (await this.db._get([ - ...this.transactionTombstonePath, - ...keyPath, - ])) !== true - ) { - value = await this.db.get(keyPath, raw as any); + keyPath = ['data', ...keyPath]; + let data: Buffer; + try { + const key = utils.keyPathToKey(keyPath); + data = await rocksdbP.transactionGet(this._transaction, key, { + valueEncoding: 'buffer', + snapshot: this.setupSnapshot(), + }); + } catch (e) { + if (e.code === 'NOT_FOUND') { + return undefined; + } + throw e; + } + return this._db.deserializeDecrypt(data, raw as any); + } + + /** + * Use this for to address write skews + */ + public async getForUpdate( + keyPath: KeyPath | string | Buffer, + raw?: false, + ): Promise; + public async getForUpdate( + keyPath: KeyPath | string | Buffer, + raw: true, + ): Promise; + @ready(new errors.ErrorDBTransactionDestroyed()) + public async getForUpdate( + keyPath: KeyPath | string | Buffer, + raw: boolean = false, + ): Promise { + keyPath = utils.toKeyPath(keyPath); + keyPath = ['data', ...keyPath]; + let data: Buffer; + try { + const key = utils.keyPathToKey(keyPath); + data = await rocksdbP.transactionGetForUpdate(this._transaction, key, { + valueEncoding: 'buffer', + snapshot: this.setupSnapshot(), + }); + } catch (e) { + if (e.code === 'NOT_FOUND') { + return undefined; } - // Don't set it in the transaction DB - // Because this is not a repeatable-read "snapshot" + throw e; } - return value; + return this._db.deserializeDecrypt(data, raw as any); } public async put( @@ -169,218 +297,96 @@ class DBTransaction { value: any, raw: boolean = false, ): Promise { + this.setupSnapshot(); keyPath = utils.toKeyPath(keyPath); - await this.db._put( - [...this.transactionDataPath, ...keyPath], - value, - raw as any, - ); - await this.db._del([...this.transactionTombstonePath, ...keyPath]); - this._ops.push({ - type: 'put', - keyPath, - value, - raw, - }); + keyPath = ['data', ...keyPath]; + const key = utils.keyPathToKey(keyPath); + const data = await this._db.serializeEncrypt(value, raw as any); + return rocksdbP.transactionPut(this._transaction, key, data); } @ready(new errors.ErrorDBTransactionDestroyed()) public async del(keyPath: KeyPath | string | Buffer): Promise { + this.setupSnapshot(); keyPath = utils.toKeyPath(keyPath); - await this.db._del([...this.transactionDataPath, ...keyPath]); - await this.db._put([...this.transactionTombstonePath, ...keyPath], true); - this._ops.push({ - type: 'del', - keyPath, - }); + keyPath = ['data', ...keyPath]; + const key = utils.keyPathToKey(keyPath); + return rocksdbP.transactionDel(this._transaction, key); } public iterator( - options: DBIteratorOptions & { values: false }, - levelPath?: LevelPath, - ): DBIterator; + levelPath: LevelPath | undefined, + options: DBIteratorOptions & { + keys: false; + values: false; + }, + ): DBIterator; + public iterator( + levelPath: LevelPath | undefined, + options: DBIteratorOptions & { + keys: false; + valueAsBuffer: false; + }, + ): DBIterator; public iterator( - options?: DBIteratorOptions & { valueAsBuffer?: true }, - levelPath?: LevelPath, - ): DBIterator; + levelPath: LevelPath | undefined, + options: DBIteratorOptions & { keys: false }, + ): DBIterator; + public iterator( + levelPath: LevelPath | undefined, + options: DBIteratorOptions & { values: false }, + ): DBIterator; public iterator( - options?: DBIteratorOptions & { valueAsBuffer: false }, - levelPath?: LevelPath, + levelPath: LevelPath | undefined, + options: DBIteratorOptions & { + valueAsBuffer: false; + }, ): DBIterator; + public iterator( + levelPath?: LevelPath | undefined, + options?: DBIteratorOptions, + ): DBIterator; @ready(new errors.ErrorDBTransactionDestroyed()) public iterator( - options?: DBIteratorOptions, levelPath: LevelPath = [], - ): DBIterator { - const dataIterator = this.db._iterator( - { - ...options, - keys: true, - keyAsBuffer: true, - }, - ['data', ...levelPath], - ); - const tranIterator = this.db._iterator( - { - ...options, - keys: true, - keyAsBuffer: true, - }, - [...this.transactionDataPath, ...levelPath], - ); - const order = options?.reverse ? 'desc' : 'asc'; - const processKV = ( - kv: [KeyPath, Buffer | V | undefined], - ): [KeyPath, Buffer | V | undefined] => { - if (options?.keyAsBuffer === false) { - kv[0] = kv[0].map((k) => k.toString('utf-8')); - } - return kv; - }; - const iterator = { - _ended: false, - _nexting: false, - seek: (keyPath: KeyPath | Buffer | string): void => { - if (iterator._ended) { - throw new Error('cannot call seek() after end()'); - } - if (iterator._nexting) { - throw new Error('cannot call seek() before next() has completed'); - } - dataIterator.seek(keyPath); - tranIterator.seek(keyPath); - }, - end: async () => { - if (iterator._ended) { - throw new Error('end() already called on iterator'); - } - iterator._ended = true; - await dataIterator.end(); - await tranIterator.end(); - }, - next: async () => { - if (iterator._ended) { - throw new Error('cannot call next() after end()'); - } - if (iterator._nexting) { - throw new Error( - 'cannot call next() before previous next() has completed', - ); - } - iterator._nexting = true; - try { - while (true) { - const tranKV = (await tranIterator.next()) as - | [KeyPath, Buffer | undefined] - | undefined; - const dataKV = (await dataIterator.next()) as - | [KeyPath, Buffer | undefined] - | undefined; - // If both are finished, iterator is finished - if (tranKV == null && dataKV == null) { - return undefined; - } - // If tranIterator is not finished but dataIterator is finished - // continue with tranIterator - if (tranKV != null && dataKV == null) { - return processKV(tranKV); - } - // If tranIterator is finished but dataIterator is not finished - // continue with the dataIterator - if (tranKV == null && dataKV != null) { - // If the dataKey is entombed, skip iteration - if ( - (await this.db._get( - this.transactionTombstonePath.concat(levelPath, dataKV[0]), - )) === true - ) { - continue; - } - return processKV(dataKV); - } - const [tranKeyPath, tranData] = tranKV as [ - KeyPath, - Buffer | V | undefined, - ]; - const [dataKeyPath, dataData] = dataKV as [ - KeyPath, - Buffer | V | undefined, - ]; - const keyCompare = Buffer.compare( - utils.keyPathToKey(tranKeyPath), - utils.keyPathToKey(dataKeyPath), - ); - if (keyCompare < 0) { - if (order === 'asc') { - dataIterator.seek(tranKeyPath); - return processKV([tranKeyPath, tranData]); - } else if (order === 'desc') { - tranIterator.seek(dataKeyPath); - // If the dataKey is entombed, skip iteration - if ( - (await this.db._get( - this.transactionTombstonePath.concat( - levelPath, - dataKeyPath, - ), - )) === true - ) { - continue; - } - return processKV([dataKeyPath, dataData]); - } - } else if (keyCompare > 0) { - if (order === 'asc') { - tranIterator.seek(dataKeyPath); - // If the dataKey is entombed, skip iteration - if ( - (await this.db._get( - this.transactionTombstonePath.concat( - levelPath, - dataKeyPath, - ), - )) === true - ) { - continue; - } - return processKV([dataKeyPath, dataData]); - } else if (order === 'desc') { - dataIterator.seek(tranKeyPath); - return processKV([tranKeyPath, tranData]); - } - } else { - return processKV([tranKeyPath, tranData]); - } - } - } finally { - iterator._nexting = false; - } - }, - [Symbol.asyncIterator]: async function* () { - try { - let kv: [KeyPath, any] | undefined; - while ((kv = await iterator.next()) !== undefined) { - yield kv; - } - } finally { - if (!iterator._ended) await iterator.end(); - } - }, - }; - return iterator; + options: DBIteratorOptions = {}, + ): DBIterator { + levelPath = ['data', ...levelPath]; + return new DBIterator({ + ...options, + db: this._db, + transaction: this, + levelPath, + logger: this.logger.getChild(DBIterator.name), + snapshot: this.setupSnapshot(), + }); } @ready(new errors.ErrorDBTransactionDestroyed()) - public async clear(levelPath: LevelPath = []): Promise { - for await (const [keyPath] of this.iterator({ values: false }, levelPath)) { - await this.del(levelPath.concat(keyPath)); - } + public async clear( + levelPath: LevelPath = [], + options: DBClearOptions = {}, + ): Promise { + levelPath = ['data', ...levelPath]; + const options_ = utils.iterationOptions(options, levelPath); + return rocksdbP.transactionClear(this._transaction, { + ...options_, + snapshot: this.setupSnapshot(), + }); } @ready(new errors.ErrorDBTransactionDestroyed()) - public async count(levelPath: LevelPath = []): Promise { + public async count( + levelPath: LevelPath = [], + options: DBCountOptions = {}, + ): Promise { + const options_ = { + ...options, + keys: true, + values: false, + }; let count = 0; - for await (const _ of this.iterator({ values: false }, levelPath)) { + for await (const _ of this.iterator(levelPath, options_)) { count++; } return count; @@ -404,11 +410,14 @@ class DBTransaction { levelPath: LevelPath = [], raw: boolean = false, ): Promise> { - return await this.db.dump( - this.transactionPath.concat(levelPath), - raw as any, - true, - ); + const records: Array<[KeyPath, any]> = []; + for await (const [keyPath, v] of this.iterator(levelPath, { + keyAsBuffer: raw, + valueAsBuffer: raw, + })) { + records.push([keyPath, v]); + } + return records; } @ready(new errors.ErrorDBTransactionDestroyed()) @@ -434,19 +443,41 @@ class DBTransaction { if (this._committed) { return; } - this.logger.debug( - `Committing ${this.constructor.name} ${this.transactionId}`, - ); + this.logger.debug(`Committing ${this.constructor.name} ${this.id}`); + for (const iterator of this._iteratorRefs) { + await iterator.destroy(); + } this._committed = true; try { - await this.db.batch(this._ops); - } catch (e) { - this._committed = false; - throw e; + try { + // If this fails, the `DBTransaction` is still considered committed + // it must be destroyed, it cannot be reused + await rocksdbP.transactionCommit(this._transaction); + } catch (e) { + if (e.code === 'TRANSACTION_CONFLICT') { + this.logger.debug( + `Failed Committing ${this.constructor.name} ${this.id} due to ${errors.ErrorDBTransactionConflict.name}`, + ); + throw new errors.ErrorDBTransactionConflict(undefined, { + cause: e, + }); + } else { + this.logger.debug( + `Failed Committing ${this.constructor.name} ${this.id} due to ${e.message}`, + ); + throw e; + } + } + for (const f of this._callbacksSuccess) { + await f(); + } + } finally { + for (const f of this._callbacksFinally) { + await f(); + } } - this.logger.debug( - `Committed ${this.constructor.name} ${this.transactionId}`, - ); + await this.destroy(); + this.logger.debug(`Committed ${this.constructor.name} ${this.id}`); } @ready(new errors.ErrorDBTransactionDestroyed()) @@ -457,41 +488,50 @@ class DBTransaction { if (this._rollbacked) { return; } - this.logger.debug( - `Rollbacking ${this.constructor.name} ${this.transactionId}`, - ); - this._rollbacked = true; - for (const f of this._callbacksFailure) { - await f(e); + this.logger.debug(`Rollbacking ${this.constructor.name} ${this.id}`); + for (const iterator of this._iteratorRefs) { + await iterator.destroy(); } - for (const f of this._callbacksFinally) { - await f(e); + this._rollbacked = true; + try { + // If this fails, the `DBTransaction` is still considered rollbacked + // it must be destroyed, it cannot be reused + await rocksdbP.transactionRollback(this._transaction); + for (const f of this._callbacksFailure) { + await f(e); + } + } finally { + for (const f of this._callbacksFinally) { + await f(e); + } } - this.logger.debug( - `Rollbacked ${this.constructor.name} ${this.transactionId}`, - ); + await this.destroy(); + this.logger.debug(`Rollbacked ${this.constructor.name} ${this.id}`); } + /** + * Set the snapshot manually + * This ensures that consistent reads and writes start + * after this method is executed + * This is idempotent + * Note that normally snapshots are set lazily upon the first + * transaction db operation + */ @ready(new errors.ErrorDBTransactionDestroyed()) - public async finalize(): Promise { - if (this._rollbacked) { - throw new errors.ErrorDBTransactionRollbacked(); - } - if (!this._committed) { - throw new errors.ErrorDBTransactionNotCommitted(); - } - this.logger.debug( - `Finalize ${this.constructor.name} ${this.transactionId}`, - ); - for (const f of this._callbacksSuccess) { - await f(); - } - for (const f of this._callbacksFinally) { - await f(); + public setSnapshot(): void { + this.setupSnapshot(); + } + + /** + * Sets up the snapshot + * This is executed lazily, not at this construction, + * but at the first transactional operation + */ + protected setupSnapshot(): RocksDBTransactionSnapshot { + if (this._snapshot == null) { + this._snapshot = rocksdbP.transactionSnapshot(this._transaction); } - this.logger.debug( - `Finalized ${this.constructor.name} ${this.transactionId}`, - ); + return this._snapshot; } } diff --git a/src/errors.ts b/src/errors.ts index 2a44ac43..52f3a8aa 100644 --- a/src/errors.ts +++ b/src/errors.ts @@ -40,22 +40,53 @@ class ErrorDBParseValue extends ErrorDB { static description = 'DB value parsing failed'; } -class ErrorDBTransactionDestroyed extends ErrorDB { +class ErrorDBIterator extends ErrorDB { + static description = 'DBIterator error'; +} + +class ErrorDBIteratorDestroyed extends ErrorDBIterator { + static description = 'DBIterator is destroyed'; +} + +class ErrorDBIteratorBusy extends ErrorDBIterator { + static description = 'DBIterator is busy'; +} + +class ErrorDBTransaction extends ErrorDB { + static description = 'DBTransaction error'; +} + +class ErrorDBTransactionDestroyed extends ErrorDBTransaction { static description = 'DBTransaction is destroyed'; } -class ErrorDBTransactionCommitted extends ErrorDB { +class ErrorDBTransactionCommitted extends ErrorDBTransaction { static description = 'DBTransaction is committed'; } -class ErrorDBTransactionNotCommitted extends ErrorDB { +class ErrorDBTransactionNotCommitted extends ErrorDBTransaction { static description = 'DBTransaction is not comitted'; } -class ErrorDBTransactionRollbacked extends ErrorDB { +class ErrorDBTransactionRollbacked extends ErrorDBTransaction { static description = 'DBTransaction is rollbacked'; } +class ErrorDBTransactionNotCommittedNorRollbacked< + T, +> extends ErrorDBTransaction { + static description = 'DBTransaction is not comitted nor rollbacked'; +} + +class ErrorDBTransactionConflict extends ErrorDBTransaction { + static description = 'DBTransaction cannot commit due to conflicting writes'; +} + +class ErrorDBTransactionLockType extends ErrorDBTransaction { + static description = + 'DBTransaction does not support upgrading or downgrading the lock type'; +} + export { ErrorDB, ErrorDBRunning, @@ -67,8 +98,15 @@ export { ErrorDBDecrypt, ErrorDBParseKey, ErrorDBParseValue, + ErrorDBIterator, + ErrorDBIteratorDestroyed, + ErrorDBIteratorBusy, + ErrorDBTransaction, ErrorDBTransactionDestroyed, ErrorDBTransactionCommitted, ErrorDBTransactionNotCommitted, ErrorDBTransactionRollbacked, + ErrorDBTransactionNotCommittedNorRollbacked, + ErrorDBTransactionConflict, + ErrorDBTransactionLockType, }; diff --git a/src/index.ts b/src/index.ts index 897e4303..791148a6 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,5 +1,7 @@ export { default as DB } from './DB'; export { default as DBTransaction } from './DBTransaction'; +export { default as DBIterator } from './DBIterator'; export * as utils from './utils'; export * as errors from './errors'; +export * as rocksdb from './rocksdb'; export * from './types'; diff --git a/src/rocksdb/index.ts b/src/rocksdb/index.ts new file mode 100644 index 00000000..c95d7a8a --- /dev/null +++ b/src/rocksdb/index.ts @@ -0,0 +1,5 @@ +export { default as rocksdb } from './rocksdb'; +export { default as rocksdbP } from './rocksdbP'; +export type { RocksDB } from './rocksdb'; +export type { RocksDBP } from './rocksdbP'; +export * from './types'; diff --git a/src/rocksdb/napi/batch.cpp b/src/rocksdb/napi/batch.cpp new file mode 100644 index 00000000..ccfcb3da --- /dev/null +++ b/src/rocksdb/napi/batch.cpp @@ -0,0 +1,44 @@ +#define NAPI_VERSION 3 + +#include "batch.h" + +#include +#include +#include +#include + +#include "debug.h" +#include "database.h" + +Batch::Batch(Database* database) + : database_(database), batch_(new rocksdb::WriteBatch()), hasData_(false) { + LOG_DEBUG("Batch:Constructing Batch\n"); + LOG_DEBUG("Batch:Constructed Batch\n"); +} + +Batch::~Batch() { + LOG_DEBUG("Batch:Destroying Batch\n"); + delete batch_; + LOG_DEBUG("Batch:Destroyed Batch\n"); +} + +void Batch::Put(rocksdb::Slice key, rocksdb::Slice value) { + batch_->Put(key, value); + hasData_ = true; +} + +void Batch::Del(rocksdb::Slice key) { + batch_->Delete(key); + hasData_ = true; +} + +void Batch::Clear() { + batch_->Clear(); + hasData_ = false; +} + +rocksdb::Status Batch::Write(bool sync) { + rocksdb::WriteOptions options; + options.sync = sync; + return database_->WriteBatch(options, batch_); +} diff --git a/src/rocksdb/napi/batch.h b/src/rocksdb/napi/batch.h new file mode 100644 index 00000000..de97808d --- /dev/null +++ b/src/rocksdb/napi/batch.h @@ -0,0 +1,32 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include +#include + +#include "database.h" + +/** + * Owns a WriteBatch. + */ +struct Batch { + Batch(Database* database); + + ~Batch(); + + void Put(rocksdb::Slice key, rocksdb::Slice value); + + void Del(rocksdb::Slice key); + + void Clear(); + + rocksdb::Status Write(bool sync); + + Database* database_; + rocksdb::WriteBatch* batch_; + bool hasData_; +}; diff --git a/src/rocksdb/napi/database.cpp b/src/rocksdb/napi/database.cpp new file mode 100644 index 00000000..cb9dcc2f --- /dev/null +++ b/src/rocksdb/napi/database.cpp @@ -0,0 +1,187 @@ +#define NAPI_VERSION 3 + +#include "database.h" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "worker.h" + +Database::Database() + : db_(nullptr), + isClosing_(false), + hasClosed_(false), + currentIteratorId_(0), + currentTransactionId_(0), + closeWorker_(nullptr), + ref_(nullptr), + pendingWork_(0) { + LOG_DEBUG("Database:Constructing Database\n"); + LOG_DEBUG("Database:Constructed Database\n"); +} + +Database::~Database() { + LOG_DEBUG("Database:Destroying Database\n"); + assert(hasClosed_); + delete db_; + LOG_DEBUG("Database:Destroyed Database\n"); +} + +void Database::Attach(napi_env env, napi_value database_ref) { + if (ref_ != nullptr) return; + NAPI_STATUS_THROWS_VOID(napi_create_reference(env, database_ref, 0, &ref_)); +} + +void Database::Detach(napi_env env) { + if (ref_ == nullptr) return; + NAPI_STATUS_THROWS_VOID(napi_delete_reference(env, ref_)); + ref_ = nullptr; +} + +rocksdb::Status Database::Open(const rocksdb::Options& options, + const char* location) { + return rocksdb::OptimisticTransactionDB::Open(options, location, &db_); +} + +void Database::Close() { + LOG_DEBUG("Database:Calling %s\n", __func__); + if (hasClosed_) return; + hasClosed_ = true; + delete db_; + db_ = nullptr; + LOG_DEBUG("Database:Called %s\n", __func__); +} + +rocksdb::Status Database::Put(const rocksdb::WriteOptions& options, + rocksdb::Slice key, rocksdb::Slice value) { + assert(!hasClosed_); + return db_->Put(options, key, value); +} + +rocksdb::Status Database::Get(const rocksdb::ReadOptions& options, + rocksdb::Slice key, std::string& value) { + assert(!hasClosed_); + return db_->Get(options, key, &value); +} + +std::vector Database::MultiGet( + const rocksdb::ReadOptions& options, + const std::vector& keys, std::vector& values) { + assert(!hasClosed_); + return db_->MultiGet(options, keys, &values); +} + +rocksdb::Status Database::Del(const rocksdb::WriteOptions& options, + rocksdb::Slice key) { + assert(!hasClosed_); + return db_->Delete(options, key); +} + +rocksdb::Status Database::WriteBatch(const rocksdb::WriteOptions& options, + rocksdb::WriteBatch* batch) { + assert(!hasClosed_); + return db_->Write(options, batch); +} + +uint64_t Database::ApproximateSize(const rocksdb::Range* range) { + assert(!hasClosed_); + uint64_t size = 0; + db_->GetApproximateSizes(range, 1, &size); + return size; +} + +void Database::CompactRange(const rocksdb::Slice* start, + const rocksdb::Slice* end) { + assert(!hasClosed_); + rocksdb::CompactRangeOptions options; + db_->CompactRange(options, start, end); +} + +void Database::GetProperty(const rocksdb::Slice& property, std::string* value) { + assert(!hasClosed_); + db_->GetProperty(property, value); +} + +const rocksdb::Snapshot* Database::NewSnapshot() { + assert(!hasClosed_); + return db_->GetSnapshot(); +} + +rocksdb::Iterator* Database::NewIterator(rocksdb::ReadOptions& options) { + assert(!hasClosed_); + return db_->NewIterator(options); +} + +rocksdb::Transaction* Database::NewTransaction(rocksdb::WriteOptions& options) { + assert(!hasClosed_); + return db_->BeginTransaction(options); +} + +void Database::ReleaseSnapshot(const rocksdb::Snapshot* snapshot) { + assert(!hasClosed_); + return db_->ReleaseSnapshot(snapshot); +} + +void Database::AttachSnapshot(napi_env env, uint32_t id, Snapshot* snapshot) { + assert(!hasClosed_); + snapshots_[id] = snapshot; + IncrementPendingWork(env); +} + +void Database::DetachSnapshot(napi_env env, uint32_t id) { + snapshots_.erase(id); + DecrementPendingWork(env); +} + +void Database::AttachIterator(napi_env env, uint32_t id, Iterator* iterator) { + assert(!hasClosed_); + iterators_[id] = iterator; + IncrementPendingWork(env); +} + +void Database::DetachIterator(napi_env env, uint32_t id) { + iterators_.erase(id); + DecrementPendingWork(env); +} + +void Database::AttachTransaction(napi_env env, uint32_t id, + Transaction* transaction) { + assert(!hasClosed_); + transactions_[id] = transaction; + IncrementPendingWork(env); +} + +void Database::DetachTransaction(napi_env env, uint32_t id) { + transactions_.erase(id); + DecrementPendingWork(env); +} + +void Database::IncrementPendingWork(napi_env env) { + assert(!hasClosed_); + napi_reference_ref(env, ref_, &pendingWork_); +} + +void Database::DecrementPendingWork(napi_env env) { + napi_reference_unref(env, ref_, &pendingWork_); + // If the `closeWorker_` is set, then the closing operation + // is waiting until all pending work is completed + if (closeWorker_ != nullptr && pendingWork_ == 0) { + closeWorker_->Queue(env); + closeWorker_ = nullptr; + } +} + +bool Database::HasPendingWork() const { + // Initial JS reference count starts at 0 + return pendingWork_ > 0; +} diff --git a/src/rocksdb/napi/database.h b/src/rocksdb/napi/database.h new file mode 100644 index 00000000..baada6fa --- /dev/null +++ b/src/rocksdb/napi/database.h @@ -0,0 +1,126 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/** + * Forward declarations + */ +struct Iterator; +struct Transaction; +struct Snapshot; +struct BaseWorker; + +/** + * Owns the RocksDB storage, cache, filter policy and iterators. + */ +struct Database { + /** + * Constructs database + */ + Database(); + + /** + * Destroys transaction + * Call `Database::Close()` beforehand + */ + ~Database(); + + /** + * Creates JS reference count at 0 + * This is a weak reference, it will be GCed once the + * db reference is no longer live + * Repeating this call is idempotent + */ + void Attach(napi_env env, napi_value database_ref); + + /** + * Deletes JS reference count to allow GC of this object + * Even though this object starts out as a weak reference, + * this should still be called when the object is GCed + * Repeating this call is idempotent + */ + void Detach(napi_env env); + + rocksdb::Status Open(const rocksdb::Options& options, const char* location); + + /** + * Close the database + * Repeating this call is idempotent + */ + void Close(); + + rocksdb::Status Put(const rocksdb::WriteOptions& options, rocksdb::Slice key, + rocksdb::Slice value); + + rocksdb::Status Get(const rocksdb::ReadOptions& options, rocksdb::Slice key, + std::string& value); + + std::vector MultiGet(const rocksdb::ReadOptions& options, + const std::vector& keys, + std::vector& values); + + rocksdb::Status Del(const rocksdb::WriteOptions& options, rocksdb::Slice key); + + rocksdb::Status WriteBatch(const rocksdb::WriteOptions& options, + rocksdb::WriteBatch* batch); + + uint64_t ApproximateSize(const rocksdb::Range* range); + + void CompactRange(const rocksdb::Slice* start, const rocksdb::Slice* end); + + void GetProperty(const rocksdb::Slice& property, std::string* value); + + const rocksdb::Snapshot* NewSnapshot(); + + rocksdb::Iterator* NewIterator(rocksdb::ReadOptions& options); + + rocksdb::Transaction* NewTransaction(rocksdb::WriteOptions& options); + + void ReleaseSnapshot(const rocksdb::Snapshot* snapshot); + + void AttachIterator(napi_env env, uint32_t id, Iterator* iterator); + + void DetachIterator(napi_env env, uint32_t id); + + void AttachTransaction(napi_env env, uint32_t id, Transaction* transaction); + + void DetachTransaction(napi_env env, uint32_t id); + + void AttachSnapshot(napi_env env, uint32_t id, Snapshot* snapshot); + + void DetachSnapshot(napi_env, uint32_t id); + + void IncrementPendingWork(napi_env env); + + void DecrementPendingWork(napi_env env); + + bool HasPendingWork() const; + + rocksdb::OptimisticTransactionDB* db_; + bool isClosing_; + bool hasClosed_; + uint32_t currentIteratorId_; + uint32_t currentTransactionId_; + uint32_t currentSnapshotId_; + std::map iterators_; + std::map transactions_; + std::map snapshots_; + BaseWorker* closeWorker_; + napi_ref ref_; + + private: + uint32_t pendingWork_; +}; diff --git a/src/rocksdb/napi/debug.cpp b/src/rocksdb/napi/debug.cpp new file mode 100644 index 00000000..8dbcf3df --- /dev/null +++ b/src/rocksdb/napi/debug.cpp @@ -0,0 +1,26 @@ +#include "debug.h" + +#include +#include +#include + +#include +#include + +bool is_log_debug_enabled = false; + +void CheckNodeDebugNative() { + const char* node_debug_native_env = getenv("NODE_DEBUG_NATIVE"); + if (node_debug_native_env != nullptr) { + std::string node_debug_native(node_debug_native_env); + std::stringstream ss(node_debug_native); + while (ss.good()) { + std::string module; + getline(ss, module, ','); + if (module == "*" || module == "rocksdb") { + is_log_debug_enabled = true; + break; + } + } + } +} diff --git a/src/rocksdb/napi/debug.h b/src/rocksdb/napi/debug.h new file mode 100644 index 00000000..5e197fcb --- /dev/null +++ b/src/rocksdb/napi/debug.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +#define LOG_DEBUG(...) \ + do { \ + if (is_log_debug_enabled) { \ + fprintf(stderr, __VA_ARGS__); \ + } \ + } while (0) + +extern bool is_log_debug_enabled; + +void CheckNodeDebugNative(); diff --git a/src/rocksdb/napi/index.cpp b/src/rocksdb/napi/index.cpp new file mode 100644 index 00000000..c632add8 --- /dev/null +++ b/src/rocksdb/napi/index.cpp @@ -0,0 +1,1147 @@ +#define NAPI_VERSION 3 + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "debug.h" +#include "database.h" +#include "batch.h" +#include "iterator.h" +#include "transaction.h" +#include "snapshot.h" +#include "utils.h" +#include "workers/database_workers.h" +#include "workers/batch_workers.h" +#include "workers/iterator_workers.h" +#include "workers/transaction_workers.h" +#include "workers/snapshot_workers.h" + +/** + * Hook for when napi environment exits + * The `napi_env` cannot be accessed at this point in time + * All napi references are already automatically deleted + * It is guaranteed that already-scheduled `napi_async_work` items are finished + * Cleanup operations here have to be synchronous + * When the napi environment exits, the GC callbacks of live references will + * run after this hook is called + */ +static void env_cleanup_hook(void* arg) { + LOG_DEBUG("Cleaning NAPI Environment\n"); + auto database = static_cast(arg); + // Do everything that `dbClose` does but synchronously + // This may execute when the database hasn't been opened + // or when the database hasn't been closed + // If it hasn't been opened, it means only `dbInit` was caled + // If it hasn't been closed, then `GCDatabase` did not yet run + // Therefore this must also check if the `db_` is still set + if (!database->hasClosed_ && database->db_ != nullptr) { + std::map iterators = database->iterators_; + std::map::iterator iterator_it; + for (iterator_it = iterators.begin(); iterator_it != iterators.end(); + ++iterator_it) { + auto iterator = iterator_it->second; + iterator->Close(); + } + std::map transactions = database->transactions_; + std::map::iterator transaction_it; + for (transaction_it = transactions.begin(); + transaction_it != transactions.end(); ++transaction_it) { + auto transaction = transaction_it->second; + // Close transaction iterators too + std::map iterators = transaction->iterators_; + std::map::iterator iterator_it; + for (iterator_it = iterators.begin(); iterator_it != iterators.end(); + ++iterator_it) { + auto iterator = iterator_it->second; + iterator->Close(); + } + transaction->Rollback(); + } + std::map snapshots = database->snapshots_; + std::map::iterator snapshot_it; + for (snapshot_it = snapshots.begin(); snapshot_it != snapshots.end(); + ++snapshot_it) { + auto snapshot = snapshot_it->second; + snapshot->Release(); + } + database->Close(); + } + LOG_DEBUG("Cleaned NAPI Environment\n"); +} + +/** + * Used by: + * - `iteratorClose` + * - `dbClose` + * - `transactionCommit` + * - `TransactionRollbackDo` + */ +static void IteratorCloseDo(napi_env env, Iterator* iterator, napi_value cb) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + IteratorCloseWorker* worker = new IteratorCloseWorker(env, iterator, cb); + iterator->isClosing_ = true; + // The only pending work for iterator is the `IteratorNextWorker` + if (!iterator->nexting_) { + LOG_DEBUG("%s:Queuing IteratorCloseWorker\n", __func__); + worker->Queue(env); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + return; + } + LOG_DEBUG("%s:Delayed IteratorCloseWorker\n", __func__); + iterator->closeWorker_ = worker; + LOG_DEBUG("%s:Called %s\n", __func__, __func__); +} + +/** + * Used by `transactionRollback` and `dbClose` + */ +static void TransactionRollbackDo(napi_env env, Transaction* transaction, + napi_value cb) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + TransactionRollbackWorker* worker = + new TransactionRollbackWorker(env, transaction, cb); + transaction->isRollbacking_ = true; + if (!transaction->HasPendingWork()) { + LOG_DEBUG("%s:Queuing TransactionRollbackWorker\n", __func__); + worker->Queue(env); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + return; + } + LOG_DEBUG("%s:Delayed TransactionRollbackWorker\n", __func__); + transaction->closeWorker_ = worker; + napi_value noop; + napi_create_function(env, NULL, 0, noop_callback, NULL, &noop); + std::map iterators = transaction->iterators_; + std::map::iterator iterator_it; + for (iterator_it = iterators.begin(); iterator_it != iterators.end(); + ++iterator_it) { + Iterator* iterator = iterator_it->second; + if (iterator->isClosing_ || iterator->hasClosed_) { + continue; + } + LOG_DEBUG("%s:Closing Iterator %d\n", __func__, iterator->id_); + IteratorCloseDo(env, iterator, noop); + } + LOG_DEBUG("%s:Called %s\n", __func__, __func__); +} + +/** + * Used by `snapshotRelease` and `dbClose` + */ +static void SnapshotReleaseDo(napi_env env, Snapshot* snapshot, napi_value cb) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + SnapshotReleaseWorker* worker = new SnapshotReleaseWorker(env, snapshot, cb); + snapshot->isReleasing_ = true; + LOG_DEBUG("%s:Queuing SnapshotReleaseWorker\n", __func__); + worker->Queue(env); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); +} + +/** + * Garbage collection `Database` + * Only occurs when the object falls out of scope + * with no references and no concurrent workers + */ +static void GCDatabase(napi_env env, void* data, void* hint) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + if (data != nullptr) { + auto database = static_cast(data); + napi_remove_env_cleanup_hook(env, env_cleanup_hook, database); + if (!database->isClosing_ && !database->hasClosed_) { + database->Close(); + database->Detach(env); + } + delete database; + } + LOG_DEBUG("%s:Called %s\n", __func__, __func__); +} + +/** + * Garbage collection `Batch` + * Only occurs when the object falls out of scope + * with no references and no concurrent workers + */ +static void GCBatch(napi_env env, void* data, void* hint) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + if (data) { + auto batch = static_cast(data); + delete batch; + } + LOG_DEBUG("%s:Called %s\n", __func__, __func__); +} + +/** + * Garbage collection `Iterator` + * Only occurs when the object falls out of scope + * with no references and no concurrent workers + */ +static void GCIterator(napi_env env, void* data, void* hint) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + if (data != nullptr) { + auto iterator = static_cast(data); + if (!iterator->isClosing_ && !iterator->hasClosed_) { + iterator->Close(); + iterator->Detach(env); + } + delete iterator; + } + LOG_DEBUG("%s:Called %s\n", __func__, __func__); +} + +/** + * Garbage collect `Transaction` + * Only occurs when the object falls out of scope + * with no references and no concurrent workers + */ +static void GCTransaction(napi_env env, void* data, void* hint) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + if (data != nullptr) { + auto transaction = static_cast(data); + if (!transaction->isCommitting_ && !transaction->hasCommitted_ && + !transaction->isRollbacking_ && !transaction->hasRollbacked_) { + transaction->Rollback(); + transaction->Detach(env); + } + delete transaction; + } + LOG_DEBUG("%s:Called %s\n", __func__, __func__); +} + +/** + * Garbage collect `Snapshot` + * Only occurs when the object falls out of scope + * with no references and no concurrent workers + */ +static void GCSnapshot(napi_env env, void* data, void* hint) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + if (data != nullptr) { + auto snapshot = static_cast(data); + if (!snapshot->isReleasing_ && !snapshot->hasReleased_) { + snapshot->Release(); + snapshot->Detach(env); + } + delete snapshot; + } + LOG_DEBUG("%s:Called %s\n", __func__, __func__); +} + +/** + * Garbage collect `TransactionSnapshot` + * Only occurs when the object falls out of scope + * with no references and no concurrent workers + */ +static void GCTransactionSnapshot(napi_env env, void* data, void* hint) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + if (data) { + auto snapshot = static_cast(data); + delete snapshot; + } + LOG_DEBUG("%s:Called %s\n", __func__, __func__); +} + +/** + * Creates the Database object + */ +NAPI_METHOD(dbInit) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + Database* database = new Database(); + napi_add_env_cleanup_hook(env, env_cleanup_hook, database); + napi_value database_ref; + NAPI_STATUS_THROWS( + napi_create_external(env, database, GCDatabase, nullptr, &database_ref)); + database->Attach(env, database_ref); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + return database_ref; +} + +/** + * Open a database + */ +NAPI_METHOD(dbOpen) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(4); + NAPI_DB_CONTEXT(); + NAPI_ARGV_UTF8_NEW(location, 1); + + napi_value options = argv[2]; + const bool createIfMissing = + BooleanProperty(env, options, "createIfMissing", true); + const bool errorIfExists = + BooleanProperty(env, options, "errorIfExists", false); + const bool compression = BooleanProperty(env, options, "compression", true); + + const std::string infoLogLevel = StringProperty(env, options, "infoLogLevel"); + + const uint32_t cacheSize = Uint32Property(env, options, "cacheSize", 8 << 20); + const uint32_t writeBufferSize = + Uint32Property(env, options, "writeBufferSize", 4 << 20); + const uint32_t blockSize = Uint32Property(env, options, "blockSize", 4096); + const uint32_t maxOpenFiles = + Uint32Property(env, options, "maxOpenFiles", 1000); + const uint32_t blockRestartInterval = + Uint32Property(env, options, "blockRestartInterval", 16); + const uint32_t maxFileSize = + Uint32Property(env, options, "maxFileSize", 2 << 20); + + napi_value callback = argv[3]; + + rocksdb::InfoLogLevel log_level; + rocksdb::Logger* logger; + if (infoLogLevel.size() > 0) { + if (infoLogLevel == "debug") + log_level = rocksdb::InfoLogLevel::DEBUG_LEVEL; + else if (infoLogLevel == "info") + log_level = rocksdb::InfoLogLevel::INFO_LEVEL; + else if (infoLogLevel == "warn") + log_level = rocksdb::InfoLogLevel::WARN_LEVEL; + else if (infoLogLevel == "error") + log_level = rocksdb::InfoLogLevel::ERROR_LEVEL; + else if (infoLogLevel == "fatal") + log_level = rocksdb::InfoLogLevel::FATAL_LEVEL; + else if (infoLogLevel == "header") + log_level = rocksdb::InfoLogLevel::HEADER_LEVEL; + else { + napi_value callback_error = + CreateCodeError(env, "DB_OPEN", "Invalid log level"); + NAPI_STATUS_THROWS(CallFunction(env, callback, 1, &callback_error)); + NAPI_RETURN_UNDEFINED(); + } + logger = nullptr; + } else { + // In some places RocksDB checks this option to see if it should prepare + // debug information (ahead of logging), so set it to the highest level. + log_level = rocksdb::InfoLogLevel::HEADER_LEVEL; + logger = new NullLogger(); + } + + OpenWorker* worker = new OpenWorker( + env, database, callback, location, createIfMissing, errorIfExists, + compression, writeBufferSize, blockSize, maxOpenFiles, + blockRestartInterval, maxFileSize, cacheSize, log_level, logger); + LOG_DEBUG("%s:Queuing OpenWorker\n", __func__); + worker->Queue(env); + delete[] location; + + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Close a database + * This is asynchronous + */ +NAPI_METHOD(dbClose) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(2); + NAPI_DB_CONTEXT(); + napi_value callback = argv[1]; + CloseWorker* worker = new CloseWorker(env, database, callback); + database->isClosing_ = true; + if (!database->HasPendingWork()) { + LOG_DEBUG("%s:Queuing CloseWorker\n", __func__); + worker->Queue(env); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + NAPI_RETURN_UNDEFINED(); + } + LOG_DEBUG("%s:Delayed CloseWorker\n", __func__); + database->closeWorker_ = worker; + napi_value noop; + napi_create_function(env, NULL, 0, noop_callback, NULL, &noop); + std::map iterators = database->iterators_; + std::map::iterator iterator_it; + for (iterator_it = iterators.begin(); iterator_it != iterators.end(); + ++iterator_it) { + auto iterator = iterator_it->second; + if (iterator->isClosing_ || iterator->hasClosed_) { + continue; + } + LOG_DEBUG("%s:Closing Iterator %d\n", __func__, iterator->id_); + IteratorCloseDo(env, iterator, noop); + } + std::map transactions = database->transactions_; + std::map::iterator transaction_it; + for (transaction_it = transactions.begin(); + transaction_it != transactions.end(); ++transaction_it) { + auto transaction = transaction_it->second; + if (transaction->isCommitting_ || transaction->hasCommitted_ || + transaction->isRollbacking_ || transaction->hasRollbacked_) { + continue; + } + LOG_DEBUG("%s:Rollbacking Transaction %d\n", __func__, transaction->id_); + TransactionRollbackDo(env, transaction, noop); + } + std::map snapshots = database->snapshots_; + std::map::iterator snapshot_it; + for (snapshot_it = snapshots.begin(); snapshot_it != snapshots.end(); + ++snapshot_it) { + auto snapshot = snapshot_it->second; + if (snapshot->isReleasing_ || snapshot->hasReleased_) { + continue; + } + LOG_DEBUG("%s:Releasing Snapshot %d\n", __func__, snapshot->id_); + SnapshotReleaseDo(env, snapshot, noop); + } + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Gets a value from a database. + */ +NAPI_METHOD(dbGet) { + NAPI_ARGV(4); + NAPI_DB_CONTEXT(); + rocksdb::Slice key = ToSlice(env, argv[1]); + napi_value options = argv[2]; + const bool asBuffer = EncodingIsBuffer(env, options, "valueEncoding"); + const bool fillCache = BooleanProperty(env, options, "fillCache", true); + const Snapshot* snapshot = SnapshotProperty(env, options, "snapshot"); + napi_value callback = argv[3]; + GetWorker* worker = new GetWorker(env, database, callback, key, asBuffer, + fillCache, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Gets many values from a database. + */ +NAPI_METHOD(dbMultiGet) { + NAPI_ARGV(4); + NAPI_DB_CONTEXT(); + const std::vector* keys = KeyArray(env, argv[1]); + napi_value options = argv[2]; + const bool asBuffer = EncodingIsBuffer(env, options, "valueEncoding"); + const bool fillCache = BooleanProperty(env, options, "fillCache", true); + const Snapshot* snapshot = SnapshotProperty(env, options, "snapshot"); + napi_value callback = argv[3]; + MultiGetWorker* worker = new MultiGetWorker(env, database, keys, callback, + asBuffer, fillCache, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Puts a key and a value to a database. + */ +NAPI_METHOD(dbPut) { + NAPI_ARGV(5); + NAPI_DB_CONTEXT(); + rocksdb::Slice key = ToSlice(env, argv[1]); + rocksdb::Slice value = ToSlice(env, argv[2]); + bool sync = BooleanProperty(env, argv[3], "sync", false); + napi_value callback = argv[4]; + PutWorker* worker = new PutWorker(env, database, callback, key, value, sync); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Delete a value from a database. + */ +NAPI_METHOD(dbDel) { + NAPI_ARGV(4); + NAPI_DB_CONTEXT(); + rocksdb::Slice key = ToSlice(env, argv[1]); + bool sync = BooleanProperty(env, argv[2], "sync", false); + napi_value callback = argv[3]; + DelWorker* worker = new DelWorker(env, database, callback, key, sync); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Delete a range from a database. + */ +NAPI_METHOD(dbClear) { + NAPI_ARGV(3); + NAPI_DB_CONTEXT(); + napi_value options = argv[1]; + napi_value callback = argv[2]; + const int limit = Int32Property(env, options, "limit", -1); + std::string* lt = RangeOption(env, options, "lt"); + std::string* lte = RangeOption(env, options, "lte"); + std::string* gt = RangeOption(env, options, "gt"); + std::string* gte = RangeOption(env, options, "gte"); + const Snapshot* snapshot = SnapshotProperty(env, options, "snapshot"); + const bool sync = BooleanProperty(env, options, "sync", false); + IteratorClearWorker* worker = new IteratorClearWorker( + env, database, callback, limit, lt, lte, gt, gte, sync, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Count a range from a database. + */ +NAPI_METHOD(dbCount) { + NAPI_ARGV(3); + NAPI_DB_CONTEXT(); + napi_value options = argv[1]; + napi_value callback = argv[2]; + const int limit = Int32Property(env, options, "limit", -1); + std::string* lt = RangeOption(env, options, "lt"); + std::string* lte = RangeOption(env, options, "lte"); + std::string* gt = RangeOption(env, options, "gt"); + std::string* gte = RangeOption(env, options, "gte"); + const Snapshot* snapshot = SnapshotProperty(env, options, "snapshot"); + IteratorCountWorker* worker = new IteratorCountWorker( + env, database, callback, limit, lt, lte, gt, gte, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Calculates the approximate size of a range in a database. + */ +NAPI_METHOD(dbApproximateSize) { + NAPI_ARGV(4); + NAPI_DB_CONTEXT(); + rocksdb::Slice start = ToSlice(env, argv[1]); + rocksdb::Slice end = ToSlice(env, argv[2]); + napi_value callback = argv[3]; + ApproximateSizeWorker* worker = + new ApproximateSizeWorker(env, database, callback, start, end); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Compacts a range in a database. + */ +NAPI_METHOD(dbCompactRange) { + NAPI_ARGV(4); + NAPI_DB_CONTEXT(); + rocksdb::Slice start = ToSlice(env, argv[1]); + rocksdb::Slice end = ToSlice(env, argv[2]); + napi_value callback = argv[3]; + CompactRangeWorker* worker = + new CompactRangeWorker(env, database, callback, start, end); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Get a property from a database. + */ +NAPI_METHOD(dbGetProperty) { + NAPI_ARGV(2); + NAPI_DB_CONTEXT(); + rocksdb::Slice property = ToSlice(env, argv[1]); + std::string value; + database->GetProperty(property, &value); + napi_value result; + napi_create_string_utf8(env, value.data(), value.size(), &result); + DisposeSliceBuffer(property); + return result; +} + +/** + * Gets a snapshot from the database + */ +NAPI_METHOD(snapshotInit) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(1); + NAPI_DB_CONTEXT(); + const uint32_t id = database->currentSnapshotId_++; + Snapshot* snapshot = new Snapshot(database, id); + // Opaque JS value acting as a reference to `rocksdb::Snapshot` + napi_value snapshot_ref; + NAPI_STATUS_THROWS( + napi_create_external(env, snapshot, GCSnapshot, nullptr, &snapshot_ref)); + snapshot->Attach(env, snapshot_ref); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + return snapshot_ref; +} + +NAPI_METHOD(snapshotRelease) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(2); + NAPI_SNAPSHOT_CONTEXT(); + napi_value callback = argv[1]; + if (snapshot->isReleasing_ || snapshot->hasReleased_) { + napi_value callback_error; + napi_get_null(env, &callback_error); + NAPI_STATUS_THROWS(CallFunction(env, callback, 1, &callback_error)); + NAPI_RETURN_UNDEFINED(); + } + SnapshotReleaseDo(env, snapshot, callback); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Destroys a database. + */ +NAPI_METHOD(destroyDb) { + NAPI_ARGV(2); + NAPI_ARGV_UTF8_NEW(location, 0); + napi_value callback = argv[1]; + DestroyWorker* worker = new DestroyWorker(env, location, callback); + worker->Queue(env); + delete[] location; + NAPI_RETURN_UNDEFINED(); +} + +/** + * Repairs a database. + */ +NAPI_METHOD(repairDb) { + NAPI_ARGV(2); + NAPI_ARGV_UTF8_NEW(location, 0); + napi_value callback = argv[1]; + RepairWorker* worker = new RepairWorker(env, location, callback); + worker->Queue(env); + delete[] location; + NAPI_RETURN_UNDEFINED(); +} + +/** + * Create an iterator. + */ +NAPI_METHOD(iteratorInit) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(2); + NAPI_DB_CONTEXT(); + napi_value options = argv[1]; + const bool reverse = BooleanProperty(env, options, "reverse", false); + const bool keys = BooleanProperty(env, options, "keys", true); + const bool values = BooleanProperty(env, options, "values", true); + const bool fillCache = BooleanProperty(env, options, "fillCache", false); + const bool keyAsBuffer = EncodingIsBuffer(env, options, "keyEncoding"); + const bool valueAsBuffer = EncodingIsBuffer(env, options, "valueEncoding"); + const int limit = Int32Property(env, options, "limit", -1); + const uint32_t highWaterMarkBytes = + Uint32Property(env, options, "highWaterMarkBytes", 16 * 1024); + std::string* lt = RangeOption(env, options, "lt"); + std::string* lte = RangeOption(env, options, "lte"); + std::string* gt = RangeOption(env, options, "gt"); + std::string* gte = RangeOption(env, options, "gte"); + const Snapshot* snapshot = SnapshotProperty(env, options, "snapshot"); + const uint32_t id = database->currentIteratorId_++; + Iterator* iterator = new Iterator( + database, id, reverse, keys, values, limit, lt, lte, gt, gte, fillCache, + keyAsBuffer, valueAsBuffer, highWaterMarkBytes, snapshot); + napi_value iterator_ref; + NAPI_STATUS_THROWS( + napi_create_external(env, iterator, GCIterator, NULL, &iterator_ref)); + iterator->Attach(env, iterator_ref); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + return iterator_ref; +} + +/** + * Seeks an iterator. + */ +NAPI_METHOD(iteratorSeek) { + NAPI_ARGV(2); + NAPI_ITERATOR_CONTEXT(); + if (iterator->isClosing_ || iterator->hasClosed_) { + NAPI_RETURN_UNDEFINED(); + } + rocksdb::Slice target = ToSlice(env, argv[1]); + iterator->first_ = true; + iterator->Seek(target); + DisposeSliceBuffer(target); + NAPI_RETURN_UNDEFINED(); +} + +/** + * CLoses an iterator. + */ +NAPI_METHOD(iteratorClose) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(2); + NAPI_ITERATOR_CONTEXT(); + napi_value callback = argv[1]; + if (iterator->isClosing_ || iterator->hasClosed_) { + napi_value callback_error; + napi_get_null(env, &callback_error); + NAPI_STATUS_THROWS(CallFunction(env, callback, 1, &callback_error)); + NAPI_RETURN_UNDEFINED(); + } + IteratorCloseDo(env, iterator, callback); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Advance repeatedly and get multiple entries at once. + */ +NAPI_METHOD(iteratorNextv) { + NAPI_ARGV(3); + NAPI_ITERATOR_CONTEXT(); + uint32_t size; + NAPI_STATUS_THROWS(napi_get_value_uint32(env, argv[1], &size)); + if (size == 0) size = 1; + napi_value callback = argv[2]; + if (iterator->isClosing_ || iterator->hasClosed_) { + napi_value argv = + CreateCodeError(env, "ITERATOR_NOT_OPEN", "Iterator is not open"); + NAPI_STATUS_THROWS(CallFunction(env, callback, 1, &argv)); + NAPI_RETURN_UNDEFINED(); + } + IteratorNextWorker* worker = + new IteratorNextWorker(env, iterator, size, callback); + iterator->nexting_ = true; + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Does a batch write operation on a database. + */ +NAPI_METHOD(batchDo) { + NAPI_ARGV(4); + NAPI_DB_CONTEXT(); + napi_value array = argv[1]; + const bool sync = BooleanProperty(env, argv[2], "sync", false); + napi_value callback = argv[3]; + uint32_t length; + napi_get_array_length(env, array, &length); + rocksdb::WriteBatch* batch = new rocksdb::WriteBatch(); + bool hasData = false; + for (uint32_t i = 0; i < length; i++) { + napi_value element; + napi_get_element(env, array, i, &element); + if (!IsObject(env, element)) continue; + std::string type = StringProperty(env, element, "type"); + if (type == "del") { + if (!HasProperty(env, element, "key")) continue; + rocksdb::Slice key = ToSlice(env, GetProperty(env, element, "key")); + batch->Delete(key); + if (!hasData) hasData = true; + DisposeSliceBuffer(key); + } else if (type == "put") { + if (!HasProperty(env, element, "key")) continue; + if (!HasProperty(env, element, "value")) continue; + rocksdb::Slice key = ToSlice(env, GetProperty(env, element, "key")); + rocksdb::Slice value = ToSlice(env, GetProperty(env, element, "value")); + batch->Put(key, value); + if (!hasData) hasData = true; + DisposeSliceBuffer(key); + DisposeSliceBuffer(value); + } + } + BatchWorker* worker = + new BatchWorker(env, database, callback, batch, sync, hasData); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Return a batch object. + */ +NAPI_METHOD(batchInit) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(1); + NAPI_DB_CONTEXT(); + Batch* batch = new Batch(database); + napi_value result; + NAPI_STATUS_THROWS(napi_create_external(env, batch, GCBatch, NULL, &result)); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + return result; +} + +/** + * Adds a put instruction to a batch object. + */ +NAPI_METHOD(batchPut) { + NAPI_ARGV(3); + NAPI_BATCH_CONTEXT(); + rocksdb::Slice key = ToSlice(env, argv[1]); + rocksdb::Slice value = ToSlice(env, argv[2]); + batch->Put(key, value); + DisposeSliceBuffer(key); + DisposeSliceBuffer(value); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Adds a delete instruction to a batch object. + */ +NAPI_METHOD(batchDel) { + NAPI_ARGV(2); + NAPI_BATCH_CONTEXT(); + rocksdb::Slice key = ToSlice(env, argv[1]); + batch->Del(key); + DisposeSliceBuffer(key); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Clears a batch object. + */ +NAPI_METHOD(batchClear) { + NAPI_ARGV(1); + NAPI_BATCH_CONTEXT(); + batch->Clear(); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Writes a batch object. + */ +NAPI_METHOD(batchWrite) { + NAPI_ARGV(3); + NAPI_BATCH_CONTEXT(); + napi_value options = argv[1]; + const bool sync = BooleanProperty(env, options, "sync", false); + napi_value callback = argv[2]; + BatchWriteWorker* worker = + new BatchWriteWorker(env, argv[0], batch, callback, sync); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Creates a transaction + * + * @returns {napi_value} A `napi_external` that references `Transaction` + */ +NAPI_METHOD(transactionInit) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(2); + NAPI_DB_CONTEXT(); + napi_value options = argv[1]; + const bool sync = BooleanProperty(env, options, "sync", false); + const uint32_t id = database->currentTransactionId_++; + Transaction* transaction = new Transaction(database, id, sync); + // Opaque JS value acting as a reference to `Transaction` + napi_value transaction_ref; + NAPI_STATUS_THROWS(napi_create_external(env, transaction, GCTransaction, NULL, + &transaction_ref)); + transaction->Attach(env, transaction_ref); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + return transaction_ref; +} + +NAPI_METHOD(transactionId) { + NAPI_ARGV(1); + NAPI_TRANSACTION_CONTEXT(); + ASSERT_TRANSACTION_READY(env, transaction); + // This uses our own id instead of `Transaction::GetID()` and + // `Transaction::GetId()` + const uint32_t id = transaction->id_; + NAPI_RETURN_UINT32(id); +} + +/** + * Commit transaction + */ +NAPI_METHOD(transactionCommit) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(2); + NAPI_TRANSACTION_CONTEXT(); + assert(!transaction->isRollbacking_ && !transaction->hasRollbacked_); + napi_value callback = argv[1]; + if (transaction->isCommitting_ || transaction->hasCommitted_) { + napi_value callback_error; + napi_get_null(env, &callback_error); + NAPI_STATUS_THROWS(CallFunction(env, callback, 1, &callback_error)); + NAPI_RETURN_UNDEFINED(); + } + TransactionCommitWorker* worker = + new TransactionCommitWorker(env, transaction, callback); + transaction->isCommitting_ = true; + if (!transaction->HasPendingWork()) { + LOG_DEBUG("%s:Queuing TransactionCommitWorker\n", __func__); + worker->Queue(env); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + NAPI_RETURN_UNDEFINED(); + } + LOG_DEBUG("%s:Delayed TransactionCommitWorker\n", __func__); + transaction->closeWorker_ = worker; + napi_value noop; + napi_create_function(env, NULL, 0, noop_callback, NULL, &noop); + // Close transactional iterators + std::map iterators = transaction->iterators_; + std::map::iterator iterator_it; + for (iterator_it = iterators.begin(); iterator_it != iterators.end(); + ++iterator_it) { + Iterator* iterator = iterator_it->second; + if (iterator->isClosing_ || iterator->hasClosed_) { + continue; + } + LOG_DEBUG("%s:Closing Iterator %d\n", __func__, iterator->id_); + IteratorCloseDo(env, iterator, noop); + } + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Rollback transaction + */ +NAPI_METHOD(transactionRollback) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(2); + NAPI_TRANSACTION_CONTEXT(); + assert(!transaction->isCommitting_ && !transaction->hasCommitted_); + napi_value callback = argv[1]; + if (transaction->isRollbacking_ || transaction->hasRollbacked_) { + napi_value callback_error; + napi_get_null(env, &callback_error); + NAPI_STATUS_THROWS(CallFunction(env, callback, 1, &callback_error)); + NAPI_RETURN_UNDEFINED(); + } + TransactionRollbackDo(env, transaction, callback); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Gets a value from a transaction + */ +NAPI_METHOD(transactionGet) { + NAPI_ARGV(4); + NAPI_TRANSACTION_CONTEXT(); + rocksdb::Slice key = ToSlice(env, argv[1]); + napi_value options = argv[2]; + const bool asBuffer = EncodingIsBuffer(env, options, "valueEncoding"); + const bool fillCache = BooleanProperty(env, options, "fillCache", true); + const TransactionSnapshot* snapshot = + TransactionSnapshotProperty(env, options, "snapshot"); + napi_value callback = argv[3]; + ASSERT_TRANSACTION_READY_CB(env, transaction, callback); + TransactionGetWorker* worker = new TransactionGetWorker( + env, transaction, callback, key, asBuffer, fillCache, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Gets a value for update from a transaction + */ +NAPI_METHOD(transactionGetForUpdate) { + NAPI_ARGV(4); + NAPI_TRANSACTION_CONTEXT(); + rocksdb::Slice key = ToSlice(env, argv[1]); + napi_value options = argv[2]; + const bool asBuffer = EncodingIsBuffer(env, options, "valueEncoding"); + const bool fillCache = BooleanProperty(env, options, "fillCache", true); + const TransactionSnapshot* snapshot = + TransactionSnapshotProperty(env, options, "snapshot"); + napi_value callback = argv[3]; + ASSERT_TRANSACTION_READY_CB(env, transaction, callback); + TransactionGetForUpdateWorker* worker = new TransactionGetForUpdateWorker( + env, transaction, callback, key, asBuffer, fillCache, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Gets many values from a transaction + */ +NAPI_METHOD(transactionMultiGet) { + NAPI_ARGV(4); + NAPI_TRANSACTION_CONTEXT(); + const std::vector* keys = KeyArray(env, argv[1]); + napi_value options = argv[2]; + const bool asBuffer = EncodingIsBuffer(env, options, "valueEncoding"); + const bool fillCache = BooleanProperty(env, options, "fillCache", true); + const TransactionSnapshot* snapshot = + TransactionSnapshotProperty(env, options, "snapshot"); + napi_value callback = argv[3]; + TransactionMultiGetWorker* worker = new TransactionMultiGetWorker( + env, transaction, keys, callback, asBuffer, fillCache, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Gets many values for update from a transaction + */ +NAPI_METHOD(transactionMultiGetForUpdate) { + NAPI_ARGV(4); + NAPI_TRANSACTION_CONTEXT(); + const std::vector* keys = KeyArray(env, argv[1]); + napi_value options = argv[2]; + const bool asBuffer = EncodingIsBuffer(env, options, "valueEncoding"); + const bool fillCache = BooleanProperty(env, options, "fillCache", true); + const TransactionSnapshot* snapshot = + TransactionSnapshotProperty(env, options, "snapshot"); + napi_value callback = argv[3]; + TransactionMultiGetForUpdateWorker* worker = + new TransactionMultiGetForUpdateWorker(env, transaction, keys, callback, + asBuffer, fillCache, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Puts a key and a value to a transaction + */ +NAPI_METHOD(transactionPut) { + NAPI_ARGV(4); + NAPI_TRANSACTION_CONTEXT(); + rocksdb::Slice key = ToSlice(env, argv[1]); + rocksdb::Slice value = ToSlice(env, argv[2]); + napi_value callback = argv[3]; + ASSERT_TRANSACTION_READY_CB(env, transaction, callback); + TransactionPutWorker* worker = + new TransactionPutWorker(env, transaction, callback, key, value); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * Delete a value from a database. + */ +NAPI_METHOD(transactionDel) { + NAPI_ARGV(3); + NAPI_TRANSACTION_CONTEXT(); + rocksdb::Slice key = ToSlice(env, argv[1]); + napi_value callback = argv[2]; + ASSERT_TRANSACTION_READY_CB(env, transaction, callback); + TransactionDelWorker* worker = + new TransactionDelWorker(env, transaction, callback, key); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +NAPI_METHOD(transactionSnapshot) { + NAPI_ARGV(1); + NAPI_TRANSACTION_CONTEXT(); + ASSERT_TRANSACTION_READY(env, transaction); + TransactionSnapshot* snapshot = new TransactionSnapshot(transaction); + // Opaque JS value acting as a reference to `rocksdb::Snapshot` + napi_value snapshot_ref; + NAPI_STATUS_THROWS(napi_create_external(env, snapshot, GCTransactionSnapshot, + nullptr, &snapshot_ref)); + return snapshot_ref; +} + +NAPI_METHOD(transactionIteratorInit) { + LOG_DEBUG("%s:Calling %s\n", __func__, __func__); + NAPI_ARGV(2); + NAPI_TRANSACTION_CONTEXT(); + ASSERT_TRANSACTION_READY(env, transaction); + napi_value options = argv[1]; + const bool reverse = BooleanProperty(env, options, "reverse", false); + const bool keys = BooleanProperty(env, options, "keys", true); + const bool values = BooleanProperty(env, options, "values", true); + const bool fillCache = BooleanProperty(env, options, "fillCache", false); + const bool keyAsBuffer = EncodingIsBuffer(env, options, "keyEncoding"); + const bool valueAsBuffer = EncodingIsBuffer(env, options, "valueEncoding"); + const int limit = Int32Property(env, options, "limit", -1); + const uint32_t highWaterMarkBytes = + Uint32Property(env, options, "highWaterMarkBytes", 16 * 1024); + std::string* lt = RangeOption(env, options, "lt"); + std::string* lte = RangeOption(env, options, "lte"); + std::string* gt = RangeOption(env, options, "gt"); + std::string* gte = RangeOption(env, options, "gte"); + const TransactionSnapshot* snapshot = + TransactionSnapshotProperty(env, options, "snapshot"); + const uint32_t id = transaction->currentIteratorId_++; + Iterator* iterator = new Iterator( + transaction, id, reverse, keys, values, limit, lt, lte, gt, gte, + fillCache, keyAsBuffer, valueAsBuffer, highWaterMarkBytes, snapshot); + napi_value iterator_ref; + NAPI_STATUS_THROWS( + napi_create_external(env, iterator, GCIterator, NULL, &iterator_ref)); + iterator->Attach(env, iterator_ref); + LOG_DEBUG("%s:Called %s\n", __func__, __func__); + return iterator_ref; +} + +NAPI_METHOD(transactionClear) { + NAPI_ARGV(3); + NAPI_TRANSACTION_CONTEXT(); + ASSERT_TRANSACTION_READY(env, transaction); + napi_value options = argv[1]; + napi_value callback = argv[2]; + const int limit = Int32Property(env, options, "limit", -1); + std::string* lt = RangeOption(env, options, "lt"); + std::string* lte = RangeOption(env, options, "lte"); + std::string* gt = RangeOption(env, options, "gt"); + std::string* gte = RangeOption(env, options, "gte"); + const TransactionSnapshot* snapshot = + TransactionSnapshotProperty(env, options, "snapshot"); + IteratorClearWorker* worker = new IteratorClearWorker( + env, transaction, callback, limit, lt, lte, gt, gte, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +NAPI_METHOD(transactionCount) { + NAPI_ARGV(3); + NAPI_TRANSACTION_CONTEXT(); + ASSERT_TRANSACTION_READY(env, transaction); + napi_value options = argv[1]; + napi_value callback = argv[2]; + const int limit = Int32Property(env, options, "limit", -1); + std::string* lt = RangeOption(env, options, "lt"); + std::string* lte = RangeOption(env, options, "lte"); + std::string* gt = RangeOption(env, options, "gt"); + std::string* gte = RangeOption(env, options, "gte"); + const TransactionSnapshot* snapshot = + TransactionSnapshotProperty(env, options, "snapshot"); + IteratorCountWorker* worker = new IteratorCountWorker( + env, transaction, callback, limit, lt, lte, gt, gte, snapshot); + worker->Queue(env); + NAPI_RETURN_UNDEFINED(); +} + +/** + * All exported functions. + */ +NAPI_INIT() { + // Check `NODE_DEBUG_NATIVE` environment variable + CheckNodeDebugNative(); + + NAPI_EXPORT_FUNCTION(dbInit); + NAPI_EXPORT_FUNCTION(dbOpen); + NAPI_EXPORT_FUNCTION(dbClose); + NAPI_EXPORT_FUNCTION(dbGet); + NAPI_EXPORT_FUNCTION(dbMultiGet); + NAPI_EXPORT_FUNCTION(dbPut); + NAPI_EXPORT_FUNCTION(dbDel); + NAPI_EXPORT_FUNCTION(dbClear); + NAPI_EXPORT_FUNCTION(dbCount); + NAPI_EXPORT_FUNCTION(dbApproximateSize); + NAPI_EXPORT_FUNCTION(dbCompactRange); + NAPI_EXPORT_FUNCTION(dbGetProperty); + + NAPI_EXPORT_FUNCTION(snapshotInit); + NAPI_EXPORT_FUNCTION(snapshotRelease); + + NAPI_EXPORT_FUNCTION(destroyDb); + NAPI_EXPORT_FUNCTION(repairDb); + + NAPI_EXPORT_FUNCTION(iteratorInit); + NAPI_EXPORT_FUNCTION(iteratorSeek); + NAPI_EXPORT_FUNCTION(iteratorNextv); + NAPI_EXPORT_FUNCTION(iteratorClose); + + NAPI_EXPORT_FUNCTION(batchDo); + NAPI_EXPORT_FUNCTION(batchInit); + NAPI_EXPORT_FUNCTION(batchPut); + NAPI_EXPORT_FUNCTION(batchDel); + NAPI_EXPORT_FUNCTION(batchClear); + NAPI_EXPORT_FUNCTION(batchWrite); + + NAPI_EXPORT_FUNCTION(transactionInit); + NAPI_EXPORT_FUNCTION(transactionId); + NAPI_EXPORT_FUNCTION(transactionCommit); + NAPI_EXPORT_FUNCTION(transactionRollback); + NAPI_EXPORT_FUNCTION(transactionGet); + NAPI_EXPORT_FUNCTION(transactionGetForUpdate); + NAPI_EXPORT_FUNCTION(transactionMultiGet); + NAPI_EXPORT_FUNCTION(transactionMultiGetForUpdate); + NAPI_EXPORT_FUNCTION(transactionPut); + NAPI_EXPORT_FUNCTION(transactionDel); + NAPI_EXPORT_FUNCTION(transactionSnapshot); + NAPI_EXPORT_FUNCTION(transactionIteratorInit); + NAPI_EXPORT_FUNCTION(transactionClear); + NAPI_EXPORT_FUNCTION(transactionCount); +} diff --git a/src/rocksdb/napi/iterator.cpp b/src/rocksdb/napi/iterator.cpp new file mode 100644 index 00000000..11782e4d --- /dev/null +++ b/src/rocksdb/napi/iterator.cpp @@ -0,0 +1,386 @@ +#define NAPI_VERSION 3 + +#include "iterator.h" + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "debug.h" +#include "database.h" +#include "transaction.h" +#include "snapshot.h" + +Entry::Entry(const rocksdb::Slice* key, const rocksdb::Slice* value) + : key_(key->data(), key->size()), value_(value->data(), value->size()) {} + +void Entry::ConvertByMode(napi_env env, Mode mode, const bool keyAsBuffer, + const bool valueAsBuffer, napi_value* result) { + if (mode == Mode::entries) { + napi_create_array_with_length(env, 2, result); + + napi_value keyElement; + napi_value valueElement; + + Convert(env, &key_, keyAsBuffer, &keyElement); + Convert(env, &value_, valueAsBuffer, &valueElement); + + napi_set_element(env, *result, 0, keyElement); + napi_set_element(env, *result, 1, valueElement); + } else if (mode == Mode::keys) { + Convert(env, &key_, keyAsBuffer, result); + } else { + Convert(env, &value_, valueAsBuffer, result); + } +} + +void Entry::Convert(napi_env env, const std::string* s, const bool asBuffer, + napi_value* result) { + if (s == NULL) { + napi_get_undefined(env, result); + } else if (asBuffer) { + napi_create_buffer_copy(env, s->size(), s->data(), NULL, result); + } else { + napi_create_string_utf8(env, s->data(), s->size(), result); + } +} + +BaseIterator::BaseIterator(Database* database, const bool reverse, + std::string* lt, std::string* lte, std::string* gt, + std::string* gte, const int limit, + const bool fillCache, const Snapshot* snapshot) + : database_(database), + transaction_(nullptr), + hasClosed_(false), + didSeek_(false), + reverse_(reverse), + lt_(lt), + lte_(lte), + gt_(gt), + gte_(gte), + limit_(limit), + count_(0) { + LOG_DEBUG("BaseIterator:Constructing BaseIterator from Database\n"); + options_ = new rocksdb::ReadOptions(); + options_->fill_cache = fillCache; + options_->verify_checksums = false; + if (snapshot != nullptr) options_->snapshot = snapshot->snapshot(); + iter_ = database->NewIterator(*options_); + LOG_DEBUG("BaseIterator:Constructed BaseIterator from Database\n"); +} + +BaseIterator::BaseIterator(Transaction* transaction, const bool reverse, + std::string* lt, std::string* lte, std::string* gt, + std::string* gte, const int limit, + const bool fillCache, + const TransactionSnapshot* snapshot) + : database_(nullptr), + transaction_(transaction), + hasClosed_(false), + didSeek_(false), + reverse_(reverse), + lt_(lt), + lte_(lte), + gt_(gt), + gte_(gte), + limit_(limit), + count_(0) { + options_ = new rocksdb::ReadOptions(); + options_->fill_cache = fillCache; + options_->verify_checksums = false; + if (snapshot != nullptr) options_->snapshot = snapshot->snapshot(); + iter_ = transaction->GetIterator(*options_); +} + +BaseIterator::~BaseIterator() { + assert(hasClosed_); + delete iter_; + delete options_; + if (lt_ != nullptr) delete lt_; + if (gt_ != nullptr) delete gt_; + if (lte_ != nullptr) delete lte_; + if (gte_ != nullptr) delete gte_; +} + +void BaseIterator::Close() { + if (hasClosed_) return; + hasClosed_ = true; + delete iter_; + iter_ = nullptr; + delete options_; + options_ = nullptr; + if (lt_ != nullptr) { + delete lt_; + lt_ = nullptr; + } + if (gt_ != nullptr) { + delete gt_; + gt_ = nullptr; + } + if (lte_ != nullptr) { + delete lte_; + lte_ = nullptr; + } + if (gte_ != nullptr) { + delete gte_; + gte_ = nullptr; + } +} + +bool BaseIterator::DidSeek() const { return didSeek_; } + +void BaseIterator::SeekToRange() { + assert(!hasClosed_); + + didSeek_ = true; + + if (!reverse_ && gte_ != NULL) { + iter_->Seek(*gte_); + } else if (!reverse_ && gt_ != NULL) { + iter_->Seek(*gt_); + + if (iter_->Valid() && iter_->key().compare(*gt_) == 0) { + iter_->Next(); + } + } else if (reverse_ && lte_ != NULL) { + iter_->Seek(*lte_); + + if (!iter_->Valid()) { + iter_->SeekToLast(); + } else if (iter_->key().compare(*lte_) > 0) { + iter_->Prev(); + } + } else if (reverse_ && lt_ != NULL) { + iter_->Seek(*lt_); + + if (!iter_->Valid()) { + iter_->SeekToLast(); + } else if (iter_->key().compare(*lt_) >= 0) { + iter_->Prev(); + } + } else if (reverse_) { + iter_->SeekToLast(); + } else { + iter_->SeekToFirst(); + } +} + +void BaseIterator::Seek(rocksdb::Slice& target) { + assert(!hasClosed_); + didSeek_ = true; + if (OutOfRange(target)) { + return SeekToEnd(); + } + iter_->Seek(target); + if (iter_->Valid()) { + int cmp = iter_->key().compare(target); + if (reverse_ ? cmp > 0 : cmp < 0) { + Next(); + } + } else { + SeekToFirst(); + if (iter_->Valid()) { + int cmp = iter_->key().compare(target); + if (reverse_ ? cmp > 0 : cmp < 0) { + SeekToEnd(); + } + } + } +} + +bool BaseIterator::Valid() const { + assert(!hasClosed_); + return iter_->Valid() && !OutOfRange(iter_->key()); +} + +bool BaseIterator::Increment() { + assert(!hasClosed_); + return limit_ < 0 || ++count_ <= limit_; +} + +void BaseIterator::Next() { + assert(!hasClosed_); + if (reverse_) { + iter_->Prev(); + } else { + iter_->Next(); + } +} + +void BaseIterator::SeekToFirst() { + assert(!hasClosed_); + if (reverse_) { + iter_->SeekToLast(); + } else { + iter_->SeekToFirst(); + } +} + +void BaseIterator::SeekToLast() { + assert(!hasClosed_); + if (reverse_) { + iter_->SeekToFirst(); + } else { + iter_->SeekToLast(); + } +} + +void BaseIterator::SeekToEnd() { + SeekToLast(); + Next(); +} + +rocksdb::Slice BaseIterator::CurrentKey() const { return iter_->key(); } + +rocksdb::Slice BaseIterator::CurrentValue() const { return iter_->value(); } + +rocksdb::Status BaseIterator::Status() const { return iter_->status(); } + +bool BaseIterator::OutOfRange(const rocksdb::Slice& target) const { + // The lte and gte options take precedence over lt and gt respectively + if (lte_ != NULL) { + if (target.compare(*lte_) > 0) return true; + } else if (lt_ != NULL) { + if (target.compare(*lt_) >= 0) return true; + } + if (gte_ != NULL) { + if (target.compare(*gte_) < 0) return true; + } else if (gt_ != NULL) { + if (target.compare(*gt_) <= 0) return true; + } + return false; +} + +Iterator::Iterator(Database* database, const uint32_t id, const bool reverse, + const bool keys, const bool values, const int limit, + std::string* lt, std::string* lte, std::string* gt, + std::string* gte, const bool fillCache, + const bool keyAsBuffer, const bool valueAsBuffer, + const uint32_t highWaterMarkBytes, const Snapshot* snapshot) + : BaseIterator(database, reverse, lt, lte, gt, gte, limit, fillCache, + snapshot), + id_(id), + keys_(keys), + values_(values), + keyAsBuffer_(keyAsBuffer), + valueAsBuffer_(valueAsBuffer), + highWaterMarkBytes_(highWaterMarkBytes), + first_(true), + nexting_(false), + isClosing_(false), + closeWorker_(nullptr), + ref_(nullptr) { + LOG_DEBUG("Iterator %d:Constructing from Database\n", id_); + LOG_DEBUG("Iterator %d:Constructed from Database\n", id_); +} + +Iterator::Iterator(Transaction* transaction, const uint32_t id, + const bool reverse, const bool keys, const bool values, + const int limit, std::string* lt, std::string* lte, + std::string* gt, std::string* gte, const bool fillCache, + const bool keyAsBuffer, const bool valueAsBuffer, + const uint32_t highWaterMarkBytes, + const TransactionSnapshot* snapshot) + : BaseIterator(transaction, reverse, lt, lte, gt, gte, limit, fillCache, + snapshot), + id_(id), + keys_(keys), + values_(values), + keyAsBuffer_(keyAsBuffer), + valueAsBuffer_(valueAsBuffer), + highWaterMarkBytes_(highWaterMarkBytes), + first_(true), + nexting_(false), + isClosing_(false), + closeWorker_(nullptr), + ref_(nullptr) { + LOG_DEBUG("Iterator %d:Constructing from Transaction %d\n", id_, + transaction->id_); + LOG_DEBUG("Iterator %d:Constructed from Transaction %d\n", id_, + transaction->id_); +} + +Iterator::~Iterator() { + LOG_DEBUG("Iterator %d:Destroying\n", id_); + BaseIterator::~BaseIterator(); + LOG_DEBUG("Iterator %d:Destroyed\n", id_); +}; + +void Iterator::Attach(napi_env env, napi_value iterator_ref) { + LOG_DEBUG("Iterator %d:Calling %s\n", id_, __func__); + assert(database_ != nullptr || transaction_ != nullptr); + if (ref_ != nullptr) { + LOG_DEBUG("Iterator %d:Called %s\n", id_, __func__); + return; + } + NAPI_STATUS_THROWS_VOID(napi_create_reference(env, iterator_ref, 1, &ref_)); + if (database_ != nullptr) { + database_->AttachIterator(env, id_, this); + } else if (transaction_ != nullptr) { + transaction_->AttachIterator(env, id_, this); + } + LOG_DEBUG("Iterator %d:Called %s\n", id_, __func__); +} + +void Iterator::Detach(napi_env env) { + LOG_DEBUG("Iterator %d:Calling %s\n", id_, __func__); + assert(database_ != nullptr || transaction_ != nullptr); + if (ref_ == nullptr) { + LOG_DEBUG("Iterator %d:Called %s\n", id_, __func__); + return; + } + if (database_ != nullptr) { + database_->DetachIterator(env, id_); + } else if (transaction_ != nullptr) { + transaction_->DetachIterator(env, id_); + } + NAPI_STATUS_THROWS_VOID(napi_delete_reference(env, ref_)); + ref_ = nullptr; + LOG_DEBUG("Iterator %d:Called %s\n", id_, __func__); +} + +void Iterator::Close() { + LOG_DEBUG("Iterator %d:Calling %s\n", id_, __func__); + BaseIterator::Close(); + LOG_DEBUG("Iterator %d:Called %s\n", id_, __func__); +} + +bool Iterator::ReadMany(uint32_t size) { + assert(!hasClosed_); + cache_.clear(); + cache_.reserve(size); + size_t bytesRead = 0; + rocksdb::Slice empty; + while (true) { + if (!first_) { + Next(); + } else { + first_ = false; + } + if (!Valid() || !Increment()) break; + if (keys_ && values_) { + rocksdb::Slice k = CurrentKey(); + rocksdb::Slice v = CurrentValue(); + cache_.emplace_back(&k, &v); + bytesRead += k.size() + v.size(); + } else if (keys_) { + rocksdb::Slice k = CurrentKey(); + cache_.emplace_back(&k, &empty); + } else if (values_) { + rocksdb::Slice v = CurrentValue(); + cache_.emplace_back(&empty, &v); + bytesRead += v.size(); + } + if (bytesRead > highWaterMarkBytes_ || cache_.size() >= size) { + return true; + } + } + return false; +} diff --git a/src/rocksdb/napi/iterator.h b/src/rocksdb/napi/iterator.h new file mode 100644 index 00000000..583efe38 --- /dev/null +++ b/src/rocksdb/napi/iterator.h @@ -0,0 +1,193 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include + +#include +#include +#include +#include +#include + +#include "database.h" +#include "transaction.h" +#include "snapshot.h" +#include "worker.h" + +/** + * Whether to yield entries, keys or values. + */ +enum Mode { entries, keys, values }; + +/** + * Helper struct for caching and converting a key-value pair to napi_values. + */ +struct Entry { + Entry(const rocksdb::Slice* key, const rocksdb::Slice* value); + + void ConvertByMode(napi_env env, Mode mode, const bool keyAsBuffer, + const bool valueAsBuffer, napi_value* result); + + static void Convert(napi_env env, const std::string* s, const bool asBuffer, + napi_value* result); + + private: + std::string key_; + std::string value_; +}; + +/** + * Iterator wrapper used internally + * Lifecycle controlled manually in C++ + */ +struct BaseIterator { + /** + * Constructs iterator from database + */ + BaseIterator(Database* database, const bool reverse, std::string* lt, + std::string* lte, std::string* gt, std::string* gte, + const int limit, const bool fillCache, + const Snapshot* snapshot = nullptr); + + /** + * Constructs iterator from transaction + */ + BaseIterator(Transaction* transaction, const bool reverse, std::string* lt, + std::string* lte, std::string* gt, std::string* gte, + const int limit, const bool fillCache, + const TransactionSnapshot* snapshot = nullptr); + + /** + * Destroy iterator + * Call `BaseIterator::Close` beforehand + */ + virtual ~BaseIterator(); + + /** + * Closes the iterator + * Repeating this call is idempotent + */ + virtual void Close(); + + bool DidSeek() const; + + /** + * Seek to the first relevant key based on range options. + */ + void SeekToRange(); + + /** + * Seek manually (during iteration). + */ + void Seek(rocksdb::Slice& target); + + bool Valid() const; + + bool Increment(); + + void Next(); + + void SeekToFirst(); + + void SeekToLast(); + + void SeekToEnd(); + + rocksdb::Slice CurrentKey() const; + + rocksdb::Slice CurrentValue() const; + + rocksdb::Status Status() const; + + bool OutOfRange(const rocksdb::Slice& target) const; + + Database* database_; + Transaction* transaction_; + bool hasClosed_; + + private: + rocksdb::Iterator* iter_; + bool didSeek_; + const bool reverse_; + std::string* lt_; + std::string* lte_; + std::string* gt_; + std::string* gte_; + const int limit_; + int count_; + rocksdb::ReadOptions* options_; +}; + +/** + * Iterator object managed from JS + * Lifecycle controlled by JS + */ +struct Iterator final : public BaseIterator { + /** + * Constructs iterator from database + * Call `Iterator::Attach` afterwards + */ + Iterator(Database* database, const uint32_t id, const bool reverse, + const bool keys, const bool values, const int limit, std::string* lt, + std::string* lte, std::string* gt, std::string* gte, + const bool fillCache, const bool keyAsBuffer, + const bool valueAsBuffer, const uint32_t highWaterMarkBytes, + const Snapshot* snapshot = nullptr); + + /** + * Constructs iterator from transaction + * Call `Iterator::Attach` afterwards + */ + Iterator(Transaction* transaction, const uint32_t id, const bool reverse, + const bool keys, const bool values, const int limit, std::string* lt, + std::string* lte, std::string* gt, std::string* gte, + const bool fillCache, const bool keyAsBuffer, + const bool valueAsBuffer, const uint32_t highWaterMarkBytes, + const TransactionSnapshot* snapshot = nullptr); + + ~Iterator() override; + + /** + * Creates JS reference count at 1 to prevent GC of this object + * Attaches this `Iterator` to the `Database` or `Transaction` + * Repeating this call is idempotent + * Call this after `Iterator::Iterator` + */ + void Attach(napi_env env, napi_value iterator_ref); + + /** + * Deletes JS reference count to allow GC of this object + * Detaches this `Transaction` from the `Database` + * Repeating this call is idempotent + * Call this after `BaseIterator::Close` but before + * `BaseIterator::~BaseIterator` + */ + void Detach(napi_env env); + + void Close() override; + + bool ReadMany(uint32_t size); + + const uint32_t id_; + const bool keys_; + const bool values_; + const bool keyAsBuffer_; + const bool valueAsBuffer_; + const uint32_t highWaterMarkBytes_; + bool first_; + bool nexting_; + /** + * This is managed by workers + * It is used to indicate whether close is asynchronously scheduled + */ + bool isClosing_; + BaseWorker* closeWorker_; + std::vector cache_; + + private: + napi_ref ref_; +}; diff --git a/src/rocksdb/napi/snapshot.cpp b/src/rocksdb/napi/snapshot.cpp new file mode 100644 index 00000000..3acff211 --- /dev/null +++ b/src/rocksdb/napi/snapshot.cpp @@ -0,0 +1,91 @@ +#define NAPI_VERSION 3 + +#include "snapshot.h" + +#include +#include + +#include +#include +#include + +#include "database.h" +#include "transaction.h" +#include "debug.h" + +Snapshot::Snapshot(Database* database, const uint32_t id) + : database_(database), + id_(id), + isReleasing_(false), + hasReleased_(false), + ref_(NULL) { + LOG_DEBUG("Snapshot %d:Constructing Snapshot from Database\n", id_); + snap_ = database->NewSnapshot(); + LOG_DEBUG("Snapshot %d:Constructed Snapshot from Database\n", id_); +} + +Snapshot::~Snapshot() { + LOG_DEBUG("Snapshot %d:Destroying\n", id_); + assert(hasReleased_); + // Cannot delete `snap_` because it is already deleted by `ReleaseSnapshot` + LOG_DEBUG("Snapshot %d:Destroyed\n", id_); +} + +void Snapshot::Attach(napi_env env, napi_value snapshot_ref) { + LOG_DEBUG("Snapshot %d:Calling %s\n", id_, __func__); + if (ref_ != nullptr) { + LOG_DEBUG("Snapshot %d:Called %s\n", id_, __func__); + return; + } + NAPI_STATUS_THROWS_VOID(napi_create_reference(env, snapshot_ref, 1, &ref_)); + database_->AttachSnapshot(env, id_, this); + LOG_DEBUG("Snapshot %d:Called %s\n", id_, __func__); +} + +void Snapshot::Detach(napi_env env) { + LOG_DEBUG("Snapshot %d:Calling %s\n", id_, __func__); + if (ref_ == nullptr) { + LOG_DEBUG("Snapshot %d:Called %s\n", id_, __func__); + return; + } + database_->DetachSnapshot(env, id_); + NAPI_STATUS_THROWS_VOID(napi_delete_reference(env, ref_)); + ref_ = nullptr; + LOG_DEBUG("Snapshot %d:Called %s\n", id_, __func__); +} + +void Snapshot::Release() { + LOG_DEBUG("Snapshot %d:Calling %s\n", id_, __func__); + if (hasReleased_) { + LOG_DEBUG("Snapshot %d:Called %s\n", id_, __func__); + return; + } + hasReleased_ = true; + // This deletes also deletes `rocksdb::Snapshot` + database_->ReleaseSnapshot(snap_); + LOG_DEBUG("Snapshot %d:Called %s\n", id_, __func__); +} + +const rocksdb::Snapshot* Snapshot::snapshot() const { return snap_; } + +TransactionSnapshot::TransactionSnapshot(Transaction* transaction) { + LOG_DEBUG( + "TransactionSnapshot:Constructing TransactionSnapshot from Transaction " + "%d\n", + transaction->id_); + // This ensures that the transaction has consistent writes + transaction->SetSnapshot(); + // Use this snapshot to get consistent reads + snap_ = transaction->GetSnapshot(); + LOG_DEBUG( + "TransactionSnapshot:Constructed TransactionSnapshot from Transaction " + "%d\n", + transaction->id_); +} + +TransactionSnapshot::~TransactionSnapshot() { + LOG_DEBUG("TransactionSnapshot:Destroying\n"); + LOG_DEBUG("TransactionSnapshot:Destroyed\n"); +} + +const rocksdb::Snapshot* TransactionSnapshot::snapshot() const { return snap_; } diff --git a/src/rocksdb/napi/snapshot.h b/src/rocksdb/napi/snapshot.h new file mode 100644 index 00000000..b4326858 --- /dev/null +++ b/src/rocksdb/napi/snapshot.h @@ -0,0 +1,86 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include + +#include +#include + +#include "database.h" +#include "transaction.h" + +/** + * Snapshot object managed from JS + */ +struct Snapshot final { + /** + * Constructs snapshot from database + * Call `Snapshot::Attach` afterwards + */ + Snapshot(Database* database, const uint32_t id); + + /** + * Destroys snapshot + * Call `Snapshot::Release()` then `Snapshot::Detach` beforehand + */ + virtual ~Snapshot(); + + /** + * Creates JS reference count at 1 to prevent GC of this object + * Attaches this `Snapshot` to the `Database` + * Repeating this call is idempotent + */ + void Attach(napi_env env, napi_value snapshot_ref); + + /** + * Deletes JS reference count to allow GC of this object + * Detaches this `Snapshot` from the `Database` + * Repeating this call is idempotent + */ + void Detach(napi_env env); + + /** + * Release the snapshot + * Repeating this call is idempotent + */ + void Release(); + + const rocksdb::Snapshot* snapshot() const; + + Database* database_; + const uint32_t id_; + /** + * This is managed by workers + * It is used to indicate whether release is asynchronously scheduled + */ + bool isReleasing_; + bool hasReleased_; + + private: + const rocksdb::Snapshot* snap_; + napi_ref ref_; +}; + +/** + * Snapshot to be used from JS land + * This is only for transactions + * These snapshots must not be manually released + * because transactions will automatically release snapshots + */ +struct TransactionSnapshot final { + /** + * Constructs a snapshot for a transaction + * This sets then gets the snapshot for the transaction + */ + TransactionSnapshot(Transaction* transaction); + + virtual ~TransactionSnapshot(); + + const rocksdb::Snapshot* snapshot() const; + + private: + const rocksdb::Snapshot* snap_; +}; diff --git a/src/rocksdb/napi/transaction.cpp b/src/rocksdb/napi/transaction.cpp new file mode 100644 index 00000000..84a04014 --- /dev/null +++ b/src/rocksdb/napi/transaction.cpp @@ -0,0 +1,197 @@ +#define NAPI_VERSION 3 + +#include "transaction.h" + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "database.h" +#include "iterator.h" + +Transaction::Transaction(Database* database, const uint32_t id, const bool sync) + : database_(database), + id_(id), + isCommitting_(false), + hasCommitted_(false), + isRollbacking_(false), + hasRollbacked_(false), + currentIteratorId_(0), + closeWorker_(nullptr), + pendingWork_(0), + ref_(nullptr) { + LOG_DEBUG("Transaction %d:Constructing from Database\n", id_); + options_ = new rocksdb::WriteOptions(); + options_->sync = sync; + tran_ = database->NewTransaction(*options_); + LOG_DEBUG("Transaction %d:Constructed from Database\n", id_); +} + +Transaction::~Transaction() { + LOG_DEBUG("Transaction %d:Destroying\n", id_); + assert(hasCommitted_ || hasRollbacked_); + delete tran_; + delete options_; + LOG_DEBUG("Transaction %d:Destroyed\n", id_); +} + +void Transaction::Attach(napi_env env, napi_value transaction_ref) { + LOG_DEBUG("Transaction %d:Calling %s\n", id_, __func__); + if (ref_ != nullptr) { + LOG_DEBUG("Transaction %d:Called %s\n", id_, __func__); + return; + } + NAPI_STATUS_THROWS_VOID( + napi_create_reference(env, transaction_ref, 1, &ref_)); + database_->AttachTransaction(env, id_, this); + LOG_DEBUG("Transaction %d:Called %s\n", id_, __func__); +} + +void Transaction::Detach(napi_env env) { + LOG_DEBUG("Transaction %d:Calling %s\n", id_, __func__); + if (ref_ == nullptr) { + LOG_DEBUG("Transaction %d:Called %s\n", id_, __func__); + return; + } + database_->DetachTransaction(env, id_); + NAPI_STATUS_THROWS_VOID(napi_delete_reference(env, ref_)); + ref_ = nullptr; + LOG_DEBUG("Transaction %d:Called %s\n", id_, __func__); +} + +rocksdb::Status Transaction::Commit() { + LOG_DEBUG("Transaction %d:Calling %s\n", id_, __func__); + assert(!hasRollbacked_); + if (hasCommitted_) { + LOG_DEBUG("Transaction %d:Called %s\n", id_, __func__); + return rocksdb::Status::OK(); + } + hasCommitted_ = true; + rocksdb::Status status = tran_->Commit(); + // If the commit failed, this object is still considered committed + // this means this object cannot be used anymore + // Early deletion + delete tran_; + tran_ = nullptr; + delete options_; + options_ = nullptr; + LOG_DEBUG("Transaction %d:Called %s\n", id_, __func__); + return status; +} + +rocksdb::Status Transaction::Rollback() { + LOG_DEBUG("Transaction %d:Calling %s\n", id_, __func__); + assert(!hasCommitted_); + if (hasRollbacked_) { + LOG_DEBUG("Transaction %d:Called %s\n", id_, __func__); + return rocksdb::Status::OK(); + } + hasRollbacked_ = true; + rocksdb::Status status = tran_->Rollback(); + // If the rollback failed, this object is still considered rollbacked + // this means this object cannot be used anymore + // Early deletion + delete tran_; + tran_ = nullptr; + delete options_; + options_ = nullptr; + LOG_DEBUG("Transaction %d:Called %s\n", id_, __func__); + return status; +} + +void Transaction::SetSnapshot() { + assert(!hasCommitted_ && !hasRollbacked_); + return tran_->SetSnapshot(); +} + +const rocksdb::Snapshot* Transaction::GetSnapshot() { + assert(!hasCommitted_ && !hasRollbacked_); + return tran_->GetSnapshot(); +} + +rocksdb::Iterator* Transaction::GetIterator( + const rocksdb::ReadOptions& options) { + assert(!hasCommitted_ && !hasRollbacked_); + return tran_->GetIterator(options); +} + +rocksdb::Status Transaction::Get(const rocksdb::ReadOptions& options, + rocksdb::Slice key, std::string& value) { + assert(!hasCommitted_ && !hasRollbacked_); + return tran_->Get(options, key, &value); +} + +rocksdb::Status Transaction::GetForUpdate(const rocksdb::ReadOptions& options, + rocksdb::Slice key, + std::string& value, bool exclusive) { + assert(!hasCommitted_ && !hasRollbacked_); + return tran_->GetForUpdate(options, key, &value, exclusive); +} + +rocksdb::Status Transaction::Put(rocksdb::Slice key, rocksdb::Slice value) { + assert(!hasCommitted_ && !hasRollbacked_); + return tran_->Put(key, value); +} + +rocksdb::Status Transaction::Del(rocksdb::Slice key) { + assert(!hasCommitted_ && !hasRollbacked_); + return tran_->Delete(key); +} + +std::vector Transaction::MultiGet( + const rocksdb::ReadOptions& options, + const std::vector& keys, std::vector& values) { + assert(!hasCommitted_ && !hasRollbacked_); + return tran_->MultiGet(options, keys, &values); +} + +std::vector Transaction::MultiGetForUpdate( + const rocksdb::ReadOptions& options, + const std::vector& keys, std::vector& values) { + assert(!hasCommitted_ && !hasRollbacked_); + return tran_->MultiGetForUpdate(options, keys, &values); +} + +void Transaction::AttachIterator(napi_env env, uint32_t id, + Iterator* iterator) { + assert(!hasCommitted_ && !hasRollbacked_); + iterators_[id] = iterator; + IncrementPendingWork(env); +} + +void Transaction::DetachIterator(napi_env env, uint32_t id) { + iterators_.erase(id); + DecrementPendingWork(env); +} + +void Transaction::IncrementPendingWork(napi_env env) { + assert(!hasCommitted_ && !hasRollbacked_); + // The initial JS reference count starts at 1 + // therefore the `pendingWork_` will start at 1 + napi_reference_ref(env, ref_, &pendingWork_); +} + +void Transaction::DecrementPendingWork(napi_env env) { + napi_reference_unref(env, ref_, &pendingWork_); + // If the `closeWorker_` is set, then the closing operation + // is waiting until all pending work is completed + // Remember that the `pendingWork_` starts at 1 + // so when there's no pending work, `pendingWork_` will be 1 + if (closeWorker_ != nullptr && pendingWork_ == 1) { + closeWorker_->Queue(env); + closeWorker_ = nullptr; + } +} + +bool Transaction::HasPendingWork() const { + // Remember that the `pendingWork_` starts at 1 + // so when there's no pending work, `pendingWork_` will be 1 + return pendingWork_ > 1; +} diff --git a/src/rocksdb/napi/transaction.h b/src/rocksdb/napi/transaction.h new file mode 100644 index 00000000..e36f4fbf --- /dev/null +++ b/src/rocksdb/napi/transaction.h @@ -0,0 +1,201 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "database.h" + +/** + * Forward declarations + */ +struct Iterator; +struct BaseWorker; + +/** + * Transaction object managed from JS + */ +struct Transaction final { + /** + * Constructs transaction from database + * Call `Transaction::Attach` afterwards + */ + Transaction(Database* database, const uint32_t id, const bool sync); + + /** + * Destroys transaction + * Call `Transaction::Rollback()` or `Transaction::Commit()` + * then `Transaction::Detach` beforehand + */ + ~Transaction(); + + /** + * Creates JS reference count at 1 to prevent GC of this object + * Attaches this `Transaction` to the `Database` + * Repeating this call is idempotent + */ + void Attach(napi_env env, napi_value transaction_ref); + + /** + * Deletes JS reference count to allow GC of this object + * Detaches this `Transaction` from the `Database` + * Repeating this call is idempotent + */ + void Detach(napi_env env); + + /** + * Commit the transaction + * Repeating this call is idempotent + */ + rocksdb::Status Commit(); + + /** + * Rollback the transaction + * Repeating this call is idempotent + */ + rocksdb::Status Rollback(); + + /** + * Set the snapshot for the transaction + * This only affects write consistency + * It does not affect whether reads are consistent + */ + void SetSnapshot(); + + /** + * Get the snapshot that was set for the transaction + * If you don't set the snapshot prior, this will return `nullptr` + * This snapshot must not be manually released, it will + * be automatically released when this `Transaction` is deleted + */ + const rocksdb::Snapshot* GetSnapshot(); + + /** + * Get an iterator for this transaction + * The caller is responsible for deleting the iterator + * By default it will read any value set in the transaction overlay + * before default to the underlying DB value, this includes deleted values + * Setting a read snapshot only affects what is read from the DB + */ + rocksdb::Iterator* GetIterator(const rocksdb::ReadOptions& options); + + /** + * Get a value + * This will read from the transaction overlay and default to the underlying + * db Use a snapshot for consistent reads + */ + rocksdb::Status Get(const rocksdb::ReadOptions& options, rocksdb::Slice key, + std::string& value); + + /** + * Get a value for update + * This will read from the transaction overlay and default to the underlying + * db Use this to solve write skews, and to for read-write conflicts Use a + * snapshot for consistent reads + */ + rocksdb::Status GetForUpdate(const rocksdb::ReadOptions& options, + rocksdb::Slice key, std::string& value, + bool exclusive = true); + + /** + * Get multiple values + */ + std::vector MultiGet(const rocksdb::ReadOptions& options, + const std::vector& keys, + std::vector& values); + + /** + * Get multiple values for update + */ + std::vector MultiGetForUpdate( + const rocksdb::ReadOptions& options, + const std::vector& keys, + std::vector& values); + + /** + * Put a key value + * This will write to the transaction overlay + * Writing to the same key after this put operation will cause a conflict + * If a snapshot is applied to the transaction, writing to keys after the + * snapshot is set that is also written to by this transaction, will cause a + * conflict + */ + rocksdb::Status Put(rocksdb::Slice key, rocksdb::Slice value); + + /** + * Delete a key value + * This will write to the transaction overlay + * Writing to the same key after this put operation will cause a conflict + * If a snapshot is applied to the transaction, writing to keys after the + * snapshot is set that is also written to by this transaction, will cause a + * conflict + */ + rocksdb::Status Del(rocksdb::Slice key); + + /** + * Attach `Iterator` to be managed by this `Transaction` + * Iterators attached will be closed automatically if not detached + */ + void AttachIterator(napi_env env, uint32_t id, Iterator* iterator); + + /** + * Detach `Iterator` from this `Transaction` + * It is assumed the caller will have closed or will be closing the iterator + */ + void DetachIterator(napi_env env, uint32_t id); + + /** + * Increment pending work count to delay concurrent close operation + * This also increments the JS reference count which prevents GC + * Pending work can be priority asynchronous operations + * or they can be sub-objects like iterators + */ + void IncrementPendingWork(napi_env env); + + /** + * Decrement pending work count + * When count reaches 0, it will run the `closeWorker_` if it set + */ + void DecrementPendingWork(napi_env env); + + /** + * Check if it has any pending work + */ + bool HasPendingWork() const; + + Database* database_; + const uint32_t id_; + /** + * This is managed by workers + * It is used to indicate whether commit is asynchronously scheduled + */ + bool isCommitting_; + bool hasCommitted_; + /** + * This is managed by workers + * It is used to indicate whether rollback is asynchronously scheduled + */ + bool isRollbacking_; + bool hasRollbacked_; + uint32_t currentIteratorId_; + std::map iterators_; + BaseWorker* closeWorker_; + + private: + rocksdb::WriteOptions* options_; + rocksdb::Transaction* tran_; + uint32_t pendingWork_; + napi_ref ref_; +}; diff --git a/src/rocksdb/napi/utils.cpp b/src/rocksdb/napi/utils.cpp new file mode 100644 index 00000000..3d20c9b3 --- /dev/null +++ b/src/rocksdb/napi/utils.cpp @@ -0,0 +1,237 @@ +#define NAPI_VERSION 3 + +#include "utils.h" + +#include + +void NullLogger::Logv(const char* format, va_list ap) {} + +size_t NullLogger::GetLogFileSize() const { return 0; } + +bool IsString(napi_env env, napi_value value) { + napi_valuetype type; + napi_typeof(env, value, &type); + return type == napi_string; +} + +bool IsBuffer(napi_env env, napi_value value) { + bool isBuffer; + napi_is_buffer(env, value, &isBuffer); + return isBuffer; +} + +bool IsObject(napi_env env, napi_value value) { + napi_valuetype type; + napi_typeof(env, value, &type); + return type == napi_object; +} + +bool IsUndefined(napi_env env, napi_value value) { + napi_valuetype type; + napi_typeof(env, value, &type); + return type == napi_undefined; +} + +bool IsNull(napi_env env, napi_value value) { + napi_valuetype type; + napi_typeof(env, value, &type); + return type == napi_null; +} + +bool IsExternal(napi_env env, napi_value value) { + napi_valuetype type; + napi_typeof(env, value, &type); + return type == napi_external; +} + +napi_value CreateError(napi_env env, const char* str) { + napi_value msg; + napi_create_string_utf8(env, str, strlen(str), &msg); + napi_value error; + napi_create_error(env, NULL, msg, &error); + return error; +} + +napi_value CreateCodeError(napi_env env, const char* code, const char* msg) { + napi_value codeValue; + napi_create_string_utf8(env, code, strlen(code), &codeValue); + napi_value msgValue; + napi_create_string_utf8(env, msg, strlen(msg), &msgValue); + napi_value error; + napi_create_error(env, codeValue, msgValue, &error); + return error; +} + +bool HasProperty(napi_env env, napi_value obj, const char* key) { + bool has = false; + napi_has_named_property(env, obj, key, &has); + return has; +} + +napi_value GetProperty(napi_env env, napi_value obj, const char* key) { + napi_value value; + napi_get_named_property(env, obj, key, &value); + return value; +} + +bool BooleanProperty(napi_env env, napi_value obj, const char* key, + bool DEFAULT) { + if (HasProperty(env, obj, key)) { + napi_value value = GetProperty(env, obj, key); + bool result; + napi_get_value_bool(env, value, &result); + return result; + } + + return DEFAULT; +} + +bool EncodingIsBuffer(napi_env env, napi_value options, const char* option) { + napi_value value; + size_t size; + + if (napi_get_named_property(env, options, option, &value) == napi_ok && + napi_get_value_string_utf8(env, value, NULL, 0, &size) == napi_ok) { + // Value is either "buffer" or "utf8" so we can tell them apart just by size + return size == 6; + } + + return false; +} + +uint32_t Uint32Property(napi_env env, napi_value obj, const char* key, + uint32_t DEFAULT) { + if (HasProperty(env, obj, key)) { + napi_value value = GetProperty(env, obj, key); + uint32_t result; + napi_get_value_uint32(env, value, &result); + return result; + } + + return DEFAULT; +} + +int Int32Property(napi_env env, napi_value obj, const char* key, int DEFAULT) { + if (HasProperty(env, obj, key)) { + napi_value value = GetProperty(env, obj, key); + int result; + napi_get_value_int32(env, value, &result); + return result; + } + + return DEFAULT; +} + +std::string StringProperty(napi_env env, napi_value obj, const char* key) { + if (HasProperty(env, obj, key)) { + napi_value value = GetProperty(env, obj, key); + if (IsString(env, value)) { + size_t size = 0; + napi_get_value_string_utf8(env, value, NULL, 0, &size); + + char* buf = new char[size + 1]; + napi_get_value_string_utf8(env, value, buf, size + 1, &size); + buf[size] = '\0'; + + std::string result = buf; + delete[] buf; + return result; + } + } + + return ""; +} + +const Snapshot* SnapshotProperty(napi_env env, napi_value obj, + const char* key) { + if (!HasProperty(env, obj, key)) { + return nullptr; + } + napi_value value = GetProperty(env, obj, key); + if (!IsExternal(env, value)) { + return nullptr; + } + Snapshot* snapshot = NULL; + NAPI_STATUS_THROWS(napi_get_value_external(env, value, (void**)&snapshot)); + if (!dynamic_cast(snapshot)) { + return nullptr; + } + return snapshot; +} + +const TransactionSnapshot* TransactionSnapshotProperty(napi_env env, + napi_value obj, + const char* key) { + if (!HasProperty(env, obj, key)) { + return nullptr; + } + napi_value value = GetProperty(env, obj, key); + if (!IsExternal(env, value)) { + return nullptr; + } + TransactionSnapshot* snapshot = NULL; + NAPI_STATUS_THROWS(napi_get_value_external(env, value, (void**)&snapshot)); + if (!dynamic_cast(snapshot)) { + return nullptr; + } + return snapshot; +} + +void DisposeSliceBuffer(rocksdb::Slice slice) { + if (!slice.empty()) delete[] slice.data(); +} + +rocksdb::Slice ToSlice(napi_env env, napi_value from) { + LD_STRING_OR_BUFFER_TO_COPY(env, from, to); + return rocksdb::Slice(toCh_, toSz_); +} + +size_t StringOrBufferLength(napi_env env, napi_value value) { + size_t size = 0; + + if (IsString(env, value)) { + napi_get_value_string_utf8(env, value, NULL, 0, &size); + } else if (IsBuffer(env, value)) { + char* buf; + napi_get_buffer_info(env, value, (void**)&buf, &size); + } + + return size; +} + +std::string* RangeOption(napi_env env, napi_value opts, const char* name) { + if (HasProperty(env, opts, name)) { + napi_value value = GetProperty(env, opts, name); + LD_STRING_OR_BUFFER_TO_COPY(env, value, to); + std::string* result = new std::string(toCh_, toSz_); + delete[] toCh_; + return result; + } + + return NULL; +} + +std::vector* KeyArray(napi_env env, napi_value arr) { + uint32_t length; + std::vector* result = new std::vector(); + if (napi_get_array_length(env, arr, &length) == napi_ok) { + result->reserve(length); + for (uint32_t i = 0; i < length; i++) { + napi_value element; + if (napi_get_element(env, arr, i, &element) == napi_ok) { + rocksdb::Slice slice = ToSlice(env, element); + result->emplace_back(slice); + } + } + } + return result; +} + +napi_status CallFunction(napi_env env, napi_value callback, const int argc, + napi_value* argv) { + napi_value global; + napi_get_global(env, &global); + return napi_call_function(env, global, callback, argc, argv, NULL); +} + +napi_value noop_callback(napi_env env, napi_callback_info info) { return 0; } diff --git a/src/rocksdb/napi/utils.h b/src/rocksdb/napi/utils.h new file mode 100644 index 00000000..b393ce9a --- /dev/null +++ b/src/rocksdb/napi/utils.h @@ -0,0 +1,248 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include + +#include +#include +#include +#include + +#include "database.h" +#include "iterator.h" +#include "transaction.h" +#include "batch.h" +#include "snapshot.h" + +/** + * Macros + */ + +#define NAPI_DB_CONTEXT() \ + Database* database = NULL; \ + NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], (void**)&database)); + +#define NAPI_ITERATOR_CONTEXT() \ + Iterator* iterator = NULL; \ + NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], (void**)&iterator)); + +#define NAPI_TRANSACTION_CONTEXT() \ + Transaction* transaction = NULL; \ + NAPI_STATUS_THROWS( \ + napi_get_value_external(env, argv[0], (void**)&transaction)); + +#define NAPI_BATCH_CONTEXT() \ + Batch* batch = NULL; \ + NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], (void**)&batch)); + +#define NAPI_SNAPSHOT_CONTEXT() \ + Snapshot* snapshot = NULL; \ + NAPI_STATUS_THROWS(napi_get_value_external(env, argv[0], (void**)&snapshot)); + +#define NAPI_RETURN_UNDEFINED() return 0; + +#define NAPI_UTF8_NEW(name, val) \ + size_t name##_size = 0; \ + NAPI_STATUS_THROWS( \ + napi_get_value_string_utf8(env, val, NULL, 0, &name##_size)) \ + char* name = new char[name##_size + 1]; \ + NAPI_STATUS_THROWS(napi_get_value_string_utf8( \ + env, val, name, name##_size + 1, &name##_size)) \ + name[name##_size] = '\0'; + +#define NAPI_ARGV_UTF8_NEW(name, i) NAPI_UTF8_NEW(name, argv[i]) + +#define LD_STRING_OR_BUFFER_TO_COPY(env, from, to) \ + char* to##Ch_ = 0; \ + size_t to##Sz_ = 0; \ + if (IsString(env, from)) { \ + napi_get_value_string_utf8(env, from, NULL, 0, &to##Sz_); \ + to##Ch_ = new char[to##Sz_ + 1]; \ + napi_get_value_string_utf8(env, from, to##Ch_, to##Sz_ + 1, &to##Sz_); \ + to##Ch_[to##Sz_] = '\0'; \ + } else if (IsBuffer(env, from)) { \ + char* buf = 0; \ + napi_get_buffer_info(env, from, (void**)&buf, &to##Sz_); \ + to##Ch_ = new char[to##Sz_]; \ + memcpy(to##Ch_, buf, to##Sz_); \ + } + +#define ASSERT_TRANSACTION_READY_CB(env, transaction, callback) \ + if (transaction->isCommitting_ || transaction->hasCommitted_) { \ + napi_value callback_error = CreateCodeError( \ + env, "TRANSACTION_COMMITTED", "Transaction is already committed"); \ + NAPI_STATUS_THROWS(CallFunction(env, callback, 1, &callback_error)); \ + NAPI_RETURN_UNDEFINED(); \ + } \ + if (transaction->isRollbacking_ || transaction->hasRollbacked_) { \ + napi_value callback_error = CreateCodeError( \ + env, "TRANSACTION_ROLLBACKED", "Transaction is already rollbacked"); \ + NAPI_STATUS_THROWS(CallFunction(env, callback, 1, &callback_error)); \ + NAPI_RETURN_UNDEFINED(); \ + } + +#define ASSERT_TRANSACTION_READY(env, transaction) \ + if (transaction->isCommitting_ || transaction->hasCommitted_) { \ + napi_throw_error(env, "TRANSACTION_COMMITTED", \ + "Transaction is already committed"); \ + NAPI_RETURN_UNDEFINED(); \ + } \ + if (transaction->isRollbacking_ || transaction->hasRollbacked_) { \ + napi_throw_error(env, "TRANSACTION_ROLLBACKED", \ + "Transaction is already rollbacked"); \ + NAPI_RETURN_UNDEFINED(); \ + } + +/** + * NAPI_EXPORT_FUNCTION does not export the name of the function + * To ensure that this overrides napi-macros.h, make sure to include this + * header after you include + */ +#undef NAPI_EXPORT_FUNCTION +#define NAPI_EXPORT_FUNCTION(name) \ + { \ + napi_value name##_fn; \ + NAPI_STATUS_THROWS_VOID(napi_create_function(env, #name, NAPI_AUTO_LENGTH, \ + name, NULL, &name##_fn)) \ + NAPI_STATUS_THROWS_VOID( \ + napi_set_named_property(env, exports, #name, name##_fn)) \ + } + +class NullLogger : public rocksdb::Logger { + public: + using rocksdb::Logger::Logv; + virtual void Logv(const char* format, va_list ap) override; + virtual size_t GetLogFileSize() const override; +}; + +/** + * Helper functions + */ + +/** + * Returns true if 'value' is a string. + */ +bool IsString(napi_env env, napi_value value); + +/** + * Returns true if 'value' is a buffer. + */ +bool IsBuffer(napi_env env, napi_value value); + +/** + * Returns true if 'value' is an object. + */ +bool IsObject(napi_env env, napi_value value); + +/** + * Returns true if 'value' is an undefined. + */ +bool IsUndefined(napi_env env, napi_value value); + +/** + * Returns true if 'value' is an null. + */ +bool IsNull(napi_env env, napi_value value); + +/** + * Returns true if 'value' is an external. + */ +bool IsExternal(napi_env env, napi_value value); + +/** + * Create an error object. + */ +napi_value CreateError(napi_env env, const char* str); + +napi_value CreateCodeError(napi_env env, const char* code, const char* msg); + +/** + * Returns true if 'obj' has a property 'key'. + */ +bool HasProperty(napi_env env, napi_value obj, const char* key); + +/** + * Returns a property in napi_value form. + */ +napi_value GetProperty(napi_env env, napi_value obj, const char* key); + +/** + * Returns a boolean property 'key' from 'obj'. + * Returns 'DEFAULT' if the property doesn't exist. + */ +bool BooleanProperty(napi_env env, napi_value obj, const char* key, + bool DEFAULT); + +/** + * Returns true if the options object contains an encoding option that is + * "buffer" + */ +bool EncodingIsBuffer(napi_env env, napi_value options, const char* option); + +/** + * Returns a uint32 property 'key' from 'obj'. + * Returns 'DEFAULT' if the property doesn't exist. + */ +uint32_t Uint32Property(napi_env env, napi_value obj, const char* key, + uint32_t DEFAULT); + +/** + * Returns a int32 property 'key' from 'obj'. + * Returns 'DEFAULT' if the property doesn't exist. + */ +int Int32Property(napi_env env, napi_value obj, const char* key, int DEFAULT); + +/** + * Returns a string property 'key' from 'obj'. + * Returns empty string if the property doesn't exist. + */ +std::string StringProperty(napi_env env, napi_value obj, const char* key); + +/** + * Returns a snapshot property 'key' from 'obj'. + * Returns `nullptr` if the property doesn't exist. + */ +const Snapshot* SnapshotProperty(napi_env env, napi_value obj, const char* key); + +/** + * Returns a transaction snapshot property 'key' from 'obj'. + * Returns `nullptr` if the property doesn't exist. + */ +const TransactionSnapshot* TransactionSnapshotProperty(napi_env env, + napi_value obj, + const char* key); + +void DisposeSliceBuffer(rocksdb::Slice slice); + +/** + * Convert a napi_value to a rocksdb::Slice. + */ +rocksdb::Slice ToSlice(napi_env env, napi_value from); + +/** + * Returns length of string or buffer + */ +size_t StringOrBufferLength(napi_env env, napi_value value); + +/** + * Takes a Buffer or string property 'name' from 'opts'. + * Returns null if the property does not exist or is zero-length. + */ +std::string* RangeOption(napi_env env, napi_value opts, const char* name); + +/** + * Converts an array containing Buffer or string keys to a vector. + */ +std::vector* KeyArray(napi_env env, napi_value arr); + +/** + * Calls a function. + */ +napi_status CallFunction(napi_env env, napi_value callback, const int argc, + napi_value* argv); + +napi_value noop_callback(napi_env env, napi_callback_info info); diff --git a/src/rocksdb/napi/worker.cpp b/src/rocksdb/napi/worker.cpp new file mode 100644 index 00000000..52e6e0a1 --- /dev/null +++ b/src/rocksdb/napi/worker.cpp @@ -0,0 +1,146 @@ +#define NAPI_VERSION 3 + +#include "worker.h" + +#include +#include +#include + +#include "database.h" +#include "utils.h" + +BaseWorker::BaseWorker(napi_env env, Database* database, napi_value callback, + const char* resourceName) + : database_(database), transaction_(nullptr), errMsg_(nullptr) { + NAPI_STATUS_THROWS_VOID( + napi_create_reference(env, callback, 1, &callbackRef_)); + napi_value asyncResourceName; + NAPI_STATUS_THROWS_VOID(napi_create_string_utf8( + env, resourceName, NAPI_AUTO_LENGTH, &asyncResourceName)); + NAPI_STATUS_THROWS_VOID(napi_create_async_work( + env, callback, asyncResourceName, BaseWorker::Execute, + BaseWorker::Complete, this, &asyncWork_)); +} + +BaseWorker::BaseWorker(napi_env env, Transaction* transaction, + napi_value callback, const char* resourceName) + : database_(nullptr), transaction_(transaction), errMsg_(nullptr) { + NAPI_STATUS_THROWS_VOID( + napi_create_reference(env, callback, 1, &callbackRef_)); + napi_value asyncResourceName; + NAPI_STATUS_THROWS_VOID(napi_create_string_utf8( + env, resourceName, NAPI_AUTO_LENGTH, &asyncResourceName)); + NAPI_STATUS_THROWS_VOID(napi_create_async_work( + env, callback, asyncResourceName, BaseWorker::Execute, + BaseWorker::Complete, this, &asyncWork_)); +} + +BaseWorker::~BaseWorker() { delete[] errMsg_; } + +void BaseWorker::Execute(napi_env env, void* data) { + BaseWorker* self = (BaseWorker*)data; + // Don't pass env to DoExecute() because use of Node-API + // methods should generally be avoided in async work. + self->DoExecute(); +} + +bool BaseWorker::SetStatus(rocksdb::Status status) { + status_ = status; + if (!status.ok()) { + SetErrorMessage(status.ToString().c_str()); + return false; + } + return true; +} + +void BaseWorker::SetErrorMessage(const char* msg) { + delete[] errMsg_; + size_t size = strlen(msg) + 1; + errMsg_ = new char[size]; + memcpy(errMsg_, msg, size); +} + +void BaseWorker::Complete(napi_env env, napi_status status, void* data) { + BaseWorker* self = (BaseWorker*)data; + + self->DoComplete(env); + self->DoFinally(env); +} + +void BaseWorker::DoComplete(napi_env env) { + napi_value callback; + napi_get_reference_value(env, callbackRef_, &callback); + + if (status_.ok()) { + HandleOKCallback(env, callback); + } else { + HandleErrorCallback(env, callback); + } +} + +void BaseWorker::HandleOKCallback(napi_env env, napi_value callback) { + napi_value argv; + napi_get_null(env, &argv); + CallFunction(env, callback, 1, &argv); +} + +void BaseWorker::HandleErrorCallback(napi_env env, napi_value callback) { + napi_value argv; + + if (status_.IsNotFound()) { + argv = CreateCodeError(env, "NOT_FOUND", errMsg_); + } else if (status_.IsCorruption()) { + argv = CreateCodeError(env, "CORRUPTION", errMsg_); + } else if (status_.IsIOError()) { + if (strlen(errMsg_) > 15 && + strncmp("IO error: lock ", errMsg_, 15) == 0) { // fs_posix.cc + argv = CreateCodeError(env, "LOCKED", errMsg_); + } else if (strlen(errMsg_) > 32 && + strncmp("IO error: Failed to create lock ", errMsg_, 32) == + 0) { // env_win.cc + argv = CreateCodeError(env, "LOCKED", errMsg_); + } else { + argv = CreateCodeError(env, "IO_ERROR", errMsg_); + } + } else if (status_.IsBusy()) { + argv = CreateCodeError(env, "TRANSACTION_CONFLICT", errMsg_); + } else { + argv = CreateError(env, errMsg_); + } + + CallFunction(env, callback, 1, &argv); +} + +void BaseWorker::DoFinally(napi_env env) { + napi_delete_reference(env, callbackRef_); + napi_delete_async_work(env, asyncWork_); + // Because the worker is executed asynchronously + // cleanup must be done by itself + delete this; +} + +void BaseWorker::Queue(napi_env env) { napi_queue_async_work(env, asyncWork_); } + +PriorityWorker::PriorityWorker(napi_env env, Database* database, + napi_value callback, const char* resourceName) + : BaseWorker(env, database, callback, resourceName) { + database_->IncrementPendingWork(env); +} + +PriorityWorker::PriorityWorker(napi_env env, Transaction* transaction, + napi_value callback, const char* resourceName) + : BaseWorker(env, transaction, callback, resourceName) { + transaction_->IncrementPendingWork(env); +} + +PriorityWorker::~PriorityWorker() = default; + +void PriorityWorker::DoFinally(napi_env env) { + assert(database_ != nullptr || transaction_ != nullptr); + if (database_ != nullptr) { + database_->DecrementPendingWork(env); + } else if (transaction_ != nullptr) { + transaction_->DecrementPendingWork(env); + } + BaseWorker::DoFinally(env); +} diff --git a/src/rocksdb/napi/worker.h b/src/rocksdb/napi/worker.h new file mode 100644 index 00000000..80346783 --- /dev/null +++ b/src/rocksdb/napi/worker.h @@ -0,0 +1,81 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include + +#include "database.h" +#include "transaction.h" + +/** + * Asynchronous worker queues operations into the Node.js libuv thread pool + * Use this to make synchronous operations asynchronous so you don't block + * the main Node.js thread + * + * Derived classes can override the following virtual methods + * (listed in the order in which they're called): + * + * - DoExecute (abstract, worker pool thread): main work + * - HandleOKCallback (main thread): call JS callback on success + * - HandleErrorCallback (main thread): call JS callback on error + * - DoFinally (main thread): do cleanup regardless of success + * + * Note: storing env is discouraged as we'd end up using it in unsafe places. + */ +struct BaseWorker { + BaseWorker(napi_env env, Database* database, napi_value callback, + const char* resourceName); + + BaseWorker(napi_env env, Transaction* transaction, napi_value callback, + const char* resourceName); + + virtual ~BaseWorker(); + + static void Execute(napi_env env, void* data); + + bool SetStatus(rocksdb::Status status); + + void SetErrorMessage(const char* msg); + + virtual void DoExecute() = 0; + + static void Complete(napi_env env, napi_status status, void* data); + + void DoComplete(napi_env env); + + virtual void HandleOKCallback(napi_env env, napi_value callback); + + virtual void HandleErrorCallback(napi_env env, napi_value callback); + + virtual void DoFinally(napi_env env); + + void Queue(napi_env env); + + Database* database_; + Transaction* transaction_; + + private: + napi_ref callbackRef_; + napi_async_work asyncWork_; + rocksdb::Status status_; + char* errMsg_; +}; + +/** + * Priority worker represents asynchronous operations that delays concurrent + * close operations + */ +struct PriorityWorker : public BaseWorker { + PriorityWorker(napi_env env, Database* database, napi_value callback, + const char* resourceName); + + PriorityWorker(napi_env env, Transaction* transaction, napi_value callback, + const char* resourceName); + + virtual ~PriorityWorker(); + + void DoFinally(napi_env env) override; +}; diff --git a/src/rocksdb/napi/workers/batch_workers.cpp b/src/rocksdb/napi/workers/batch_workers.cpp new file mode 100644 index 00000000..7fd2a084 --- /dev/null +++ b/src/rocksdb/napi/workers/batch_workers.cpp @@ -0,0 +1,51 @@ +#define NAPI_VERSION 3 + +#include "batch_workers.h" + +#include +#include + +#include "../worker.h" +#include "../database.h" +#include "../batch.h" +#include "../utils.h" + +BatchWorker::BatchWorker(napi_env env, Database* database, napi_value callback, + rocksdb::WriteBatch* batch, const bool sync, + const bool hasData) + : PriorityWorker(env, database, callback, "rocksdb.batch.do"), + batch_(batch), + hasData_(hasData) { + options_.sync = sync; +} + +BatchWorker::~BatchWorker() { delete batch_; } + +void BatchWorker::DoExecute() { + if (hasData_) { + SetStatus(database_->WriteBatch(options_, batch_)); + } +} + +BatchWriteWorker::BatchWriteWorker(napi_env env, napi_value context, + Batch* batch, napi_value callback, + const bool sync) + : PriorityWorker(env, batch->database_, callback, "rocksdb.batch.write"), + batch_(batch), + sync_(sync) { + // Prevent GC of batch object before we execute + NAPI_STATUS_THROWS_VOID(napi_create_reference(env, context, 1, &contextRef_)); +} + +BatchWriteWorker::~BatchWriteWorker() {} + +void BatchWriteWorker::DoExecute() { + if (batch_->hasData_) { + SetStatus(batch_->Write(sync_)); + } +} + +void BatchWriteWorker::DoFinally(napi_env env) { + napi_delete_reference(env, contextRef_); + PriorityWorker::DoFinally(env); +} diff --git a/src/rocksdb/napi/workers/batch_workers.h b/src/rocksdb/napi/workers/batch_workers.h new file mode 100644 index 00000000..b56cb3e8 --- /dev/null +++ b/src/rocksdb/napi/workers/batch_workers.h @@ -0,0 +1,49 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include +#include + +#include "../worker.h" +#include "../database.h" +#include "../batch.h" + +/** + * Worker class for batch write operation. + */ +struct BatchWorker final : public PriorityWorker { + BatchWorker(napi_env env, Database* database, napi_value callback, + rocksdb::WriteBatch* batch, const bool sync, const bool hasData); + + ~BatchWorker(); + + void DoExecute() override; + + private: + rocksdb::WriteOptions options_; + rocksdb::WriteBatch* batch_; + const bool hasData_; +}; + +/** + * Worker class for batch write operation. + */ +struct BatchWriteWorker final : public PriorityWorker { + BatchWriteWorker(napi_env env, napi_value context, Batch* batch, + napi_value callback, const bool sync); + + ~BatchWriteWorker(); + + void DoExecute() override; + + void DoFinally(napi_env env) override; + + private: + Batch* batch_; + const bool sync_; + napi_ref contextRef_; +}; diff --git a/src/rocksdb/napi/workers/database_workers.cpp b/src/rocksdb/napi/workers/database_workers.cpp new file mode 100644 index 00000000..677ddfa1 --- /dev/null +++ b/src/rocksdb/napi/workers/database_workers.cpp @@ -0,0 +1,263 @@ +#define NAPI_VERSION 3 + +#include "database_workers.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../worker.h" +#include "../database.h" +#include "../snapshot.h" +#include "../utils.h" + +OpenWorker::OpenWorker(napi_env env, Database* database, napi_value callback, + const std::string& location, const bool createIfMissing, + const bool errorIfExists, const bool compression, + const uint32_t writeBufferSize, const uint32_t blockSize, + const uint32_t maxOpenFiles, + const uint32_t blockRestartInterval, + const uint32_t maxFileSize, const uint32_t cacheSize, + const rocksdb::InfoLogLevel log_level, + rocksdb::Logger* logger) + : BaseWorker(env, database, callback, "rocksdb.db.open"), + location_(location) { + options_.create_if_missing = createIfMissing; + options_.error_if_exists = errorIfExists; + options_.compression = + compression ? rocksdb::kSnappyCompression : rocksdb::kNoCompression; + options_.write_buffer_size = writeBufferSize; + options_.max_open_files = maxOpenFiles; + options_.max_log_file_size = maxFileSize; + options_.paranoid_checks = false; + options_.info_log_level = log_level; + if (logger) { + options_.info_log.reset(logger); + } + + rocksdb::BlockBasedTableOptions tableOptions; + + if (cacheSize) { + tableOptions.block_cache = rocksdb::NewLRUCache(cacheSize); + } else { + tableOptions.no_block_cache = true; + } + + tableOptions.block_size = blockSize; + tableOptions.block_restart_interval = blockRestartInterval; + tableOptions.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10)); + + options_.table_factory.reset( + rocksdb::NewBlockBasedTableFactory(tableOptions)); +} + +OpenWorker::~OpenWorker() {} + +void OpenWorker::DoExecute() { + SetStatus(database_->Open(options_, location_.c_str())); +} + +CloseWorker::CloseWorker(napi_env env, Database* database, napi_value callback) + : BaseWorker(env, database, callback, "rocksdb.db.close") {} + +CloseWorker::~CloseWorker() {} + +void CloseWorker::DoExecute() { database_->Close(); } + +void CloseWorker::DoFinally(napi_env env) { + database_->Detach(env); + BaseWorker::DoFinally(env); +} + +GetWorker::GetWorker(napi_env env, Database* database, napi_value callback, + rocksdb::Slice key, const bool asBuffer, + const bool fillCache, const Snapshot* snapshot) + : PriorityWorker(env, database, callback, "rocksdb.db.get"), + key_(key), + asBuffer_(asBuffer) { + options_.fill_cache = fillCache; + if (snapshot) options_.snapshot = snapshot->snapshot(); +} + +GetWorker::~GetWorker() { DisposeSliceBuffer(key_); } + +void GetWorker::DoExecute() { + SetStatus(database_->Get(options_, key_, value_)); +} + +void GetWorker::HandleOKCallback(napi_env env, napi_value callback) { + napi_value argv[2]; + napi_get_null(env, &argv[0]); + Entry::Convert(env, &value_, asBuffer_, &argv[1]); + CallFunction(env, callback, 2, argv); +} + +MultiGetWorker::MultiGetWorker(napi_env env, Database* database, + const std::vector* keys, + napi_value callback, const bool valueAsBuffer, + const bool fillCache, const Snapshot* snapshot) + : PriorityWorker(env, database, callback, "rocksdb.db.multiget"), + keys_(keys), + valueAsBuffer_(valueAsBuffer) { + options_.fill_cache = fillCache; + if (snapshot) options_.snapshot = snapshot->snapshot(); +} + +MultiGetWorker::~MultiGetWorker() { delete keys_; } + +void MultiGetWorker::DoExecute() { + // NAPI requires a vector of string pointers + // the nullptr can be used to represent `undefined` + values_.reserve(keys_->size()); + // RocksDB requires just a vector of strings + // these will be automatically deallocated + std::vector values(keys_->size()); + std::vector statuses = + database_->MultiGet(options_, *keys_, values); + for (size_t i = 0; i != statuses.size(); i++) { + if (statuses[i].ok()) { + std::string* value = new std::string(values[i]); + values_.push_back(value); + } else if (statuses[i].IsNotFound()) { + values_.push_back(nullptr); + } else { + for (const std::string* value : values_) { + if (value != NULL) delete value; + } + SetStatus(statuses[i]); + break; + } + } +} + +void MultiGetWorker::HandleOKCallback(napi_env env, napi_value callback) { + size_t size = values_.size(); + napi_value array; + napi_create_array_with_length(env, size, &array); + + for (size_t idx = 0; idx < size; idx++) { + std::string* value = values_[idx]; + napi_value element; + Entry::Convert(env, value, valueAsBuffer_, &element); + napi_set_element(env, array, static_cast(idx), element); + if (value != nullptr) delete value; + } + + napi_value argv[2]; + napi_get_null(env, &argv[0]); + argv[1] = array; + CallFunction(env, callback, 2, argv); +} + +PutWorker::PutWorker(napi_env env, Database* database, napi_value callback, + rocksdb::Slice key, rocksdb::Slice value, bool sync) + : PriorityWorker(env, database, callback, "rocksdb.db.put"), + key_(key), + value_(value) { + options_.sync = sync; +} + +PutWorker::~PutWorker() { + DisposeSliceBuffer(key_); + DisposeSliceBuffer(value_); +} + +void PutWorker::DoExecute() { + SetStatus(database_->Put(options_, key_, value_)); +} + +DelWorker::DelWorker(napi_env env, Database* database, napi_value callback, + rocksdb::Slice key, bool sync) + : PriorityWorker(env, database, callback, "rocksdb.db.del"), key_(key) { + options_.sync = sync; +} + +DelWorker::~DelWorker() { DisposeSliceBuffer(key_); } + +void DelWorker::DoExecute() { SetStatus(database_->Del(options_, key_)); } + +ApproximateSizeWorker::ApproximateSizeWorker(napi_env env, Database* database, + napi_value callback, + rocksdb::Slice start, + rocksdb::Slice end) + : PriorityWorker(env, database, callback, "rocksdb.db.approximate_size"), + start_(start), + end_(end) {} + +ApproximateSizeWorker::~ApproximateSizeWorker() { + DisposeSliceBuffer(start_); + DisposeSliceBuffer(end_); +} + +void ApproximateSizeWorker::DoExecute() { + rocksdb::Range range(start_, end_); + size_ = database_->ApproximateSize(&range); +} + +void ApproximateSizeWorker::HandleOKCallback(napi_env env, + napi_value callback) { + napi_value argv[2]; + napi_get_null(env, &argv[0]); + napi_create_int64(env, (uint64_t)size_, &argv[1]); + CallFunction(env, callback, 2, argv); +} + +CompactRangeWorker::CompactRangeWorker(napi_env env, Database* database, + napi_value callback, + rocksdb::Slice start, rocksdb::Slice end) + : PriorityWorker(env, database, callback, "rocksdb.db.compact_range"), + start_(start), + end_(end) {} + +CompactRangeWorker::~CompactRangeWorker() { + DisposeSliceBuffer(start_); + DisposeSliceBuffer(end_); +} + +void CompactRangeWorker::DoExecute() { + database_->CompactRange(&start_, &end_); +} + +DestroyWorker::DestroyWorker(napi_env env, const std::string& location, + napi_value callback) + : BaseWorker(env, (Database*)nullptr, callback, "rocksdb.destroyDb"), + location_(location) {} + +DestroyWorker::~DestroyWorker() {} + +void DestroyWorker::DoExecute() { + rocksdb::Options options; + + // TODO: support overriding infoLogLevel the same as db.open(options) + options.info_log_level = rocksdb::InfoLogLevel::HEADER_LEVEL; + options.info_log.reset(new NullLogger()); + + SetStatus(rocksdb::DestroyDB(location_, options)); +} + +RepairWorker::RepairWorker(napi_env env, const std::string& location, + napi_value callback) + : BaseWorker(env, (Database*)nullptr, callback, "rocksdb.repairDb"), + location_(location) {} + +RepairWorker::~RepairWorker() {} + +void RepairWorker::DoExecute() { + rocksdb::Options options; + + // TODO: support overriding infoLogLevel the same as db.open(options) + options.info_log_level = rocksdb::InfoLogLevel::HEADER_LEVEL; + options.info_log.reset(new NullLogger()); + + SetStatus(rocksdb::RepairDB(location_, options)); +} diff --git a/src/rocksdb/napi/workers/database_workers.h b/src/rocksdb/napi/workers/database_workers.h new file mode 100644 index 00000000..7d6c3184 --- /dev/null +++ b/src/rocksdb/napi/workers/database_workers.h @@ -0,0 +1,184 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include + +#include +#include +#include +#include + +#include "../worker.h" +#include "../database.h" +#include "../snapshot.h" + +/** + * Worker class for opening a database. + * TODO: shouldn't this be a PriorityWorker? + */ +struct OpenWorker final : public BaseWorker { + OpenWorker(napi_env env, Database* database, napi_value callback, + const std::string& location, const bool createIfMissing, + const bool errorIfExists, const bool compression, + const uint32_t writeBufferSize, const uint32_t blockSize, + const uint32_t maxOpenFiles, const uint32_t blockRestartInterval, + const uint32_t maxFileSize, const uint32_t cacheSize, + const rocksdb::InfoLogLevel log_level, rocksdb::Logger* logger); + + ~OpenWorker(); + + void DoExecute() override; + + rocksdb::Options options_; + std::string location_; +}; + +/** + * Worker class for closing a database + */ +struct CloseWorker final : public BaseWorker { + CloseWorker(napi_env env, Database* database, napi_value callback); + + ~CloseWorker(); + + void DoExecute() override; + + void DoFinally(napi_env env) override; +}; + +/** + * Worker class for getting a value from a database. + */ +struct GetWorker final : public PriorityWorker { + GetWorker(napi_env env, Database* database, napi_value callback, + rocksdb::Slice key, const bool asBuffer, const bool fillCache, + const Snapshot* snapshot = nullptr); + + ~GetWorker(); + + void DoExecute() override; + + void HandleOKCallback(napi_env env, napi_value callback) override; + + private: + rocksdb::ReadOptions options_; + rocksdb::Slice key_; + std::string value_; + const bool asBuffer_; +}; + +/** + * Worker class for getting many values. + */ +struct MultiGetWorker final : public PriorityWorker { + MultiGetWorker(napi_env env, Database* database, + const std::vector* keys, napi_value callback, + const bool valueAsBuffer, const bool fillCache, + const Snapshot* snapshot = nullptr); + + ~MultiGetWorker(); + + void DoExecute() override; + + void HandleOKCallback(napi_env env, napi_value callback) override; + + private: + rocksdb::ReadOptions options_; + const std::vector* keys_; + std::vector values_; + const bool valueAsBuffer_; +}; + +/** + * Worker class for putting key/value to the database + */ +struct PutWorker final : public PriorityWorker { + PutWorker(napi_env env, Database* database, napi_value callback, + rocksdb::Slice key, rocksdb::Slice value, bool sync); + + ~PutWorker(); + + void DoExecute() override; + + rocksdb::WriteOptions options_; + rocksdb::Slice key_; + rocksdb::Slice value_; +}; + +/** + * Worker class for deleting a value from a database. + */ +struct DelWorker final : public PriorityWorker { + DelWorker(napi_env env, Database* database, napi_value callback, + rocksdb::Slice key, bool sync); + + ~DelWorker(); + + void DoExecute() override; + + rocksdb::WriteOptions options_; + rocksdb::Slice key_; +}; + +/** + * Worker class for calculating the size of a range. + */ +struct ApproximateSizeWorker final : public PriorityWorker { + ApproximateSizeWorker(napi_env env, Database* database, napi_value callback, + rocksdb::Slice start, rocksdb::Slice end); + + ~ApproximateSizeWorker(); + + void DoExecute() override; + + void HandleOKCallback(napi_env env, napi_value callback) override; + + rocksdb::Slice start_; + rocksdb::Slice end_; + uint64_t size_; +}; + +/** + * Worker class for compacting a range in a database. + */ +struct CompactRangeWorker final : public PriorityWorker { + CompactRangeWorker(napi_env env, Database* database, napi_value callback, + rocksdb::Slice start, rocksdb::Slice end); + + ~CompactRangeWorker(); + + void DoExecute() override; + + rocksdb::Slice start_; + rocksdb::Slice end_; +}; + +/** + * Worker class for destroying a database. + */ +struct DestroyWorker final : public BaseWorker { + DestroyWorker(napi_env env, const std::string& location, napi_value callback); + + ~DestroyWorker(); + + void DoExecute() override; + + std::string location_; +}; + +/** + * Worker class for repairing a database. + */ +struct RepairWorker final : public BaseWorker { + RepairWorker(napi_env env, const std::string& location, napi_value callback); + + ~RepairWorker(); + + void DoExecute() override; + + std::string location_; +}; diff --git a/src/rocksdb/napi/workers/iterator_workers.cpp b/src/rocksdb/napi/workers/iterator_workers.cpp new file mode 100644 index 00000000..adf14ed7 --- /dev/null +++ b/src/rocksdb/napi/workers/iterator_workers.cpp @@ -0,0 +1,215 @@ +#define NAPI_VERSION 3 + +#include "iterator_workers.h" + +#include +#include +#include + +#include + +#include "../worker.h" +#include "../iterator.h" +#include "../utils.h" + +IteratorCloseWorker::IteratorCloseWorker(napi_env env, Iterator* iterator, + napi_value callback) + : BaseWorker(env, iterator->database_, callback, "rocksdb.iterator.close"), + iterator_(iterator) {} + +IteratorCloseWorker::~IteratorCloseWorker() {} + +void IteratorCloseWorker::DoExecute() { iterator_->Close(); } + +void IteratorCloseWorker::DoFinally(napi_env env) { + iterator_->Detach(env); + BaseWorker::DoFinally(env); +} + +IteratorNextWorker::IteratorNextWorker(napi_env env, Iterator* iterator, + uint32_t size, napi_value callback) + : BaseWorker(env, iterator->database_, callback, "rocksdb.iterator.next"), + iterator_(iterator), + size_(size), + ok_() {} + +IteratorNextWorker::~IteratorNextWorker() {} + +void IteratorNextWorker::DoExecute() { + if (!iterator_->DidSeek()) { + iterator_->SeekToRange(); + } + + ok_ = iterator_->ReadMany(size_); + + if (!ok_) { + SetStatus(iterator_->Status()); + } +} + +void IteratorNextWorker::HandleOKCallback(napi_env env, napi_value callback) { + size_t size = iterator_->cache_.size(); + napi_value jsArray; + napi_create_array_with_length(env, size, &jsArray); + + const bool kab = iterator_->keyAsBuffer_; + const bool vab = iterator_->valueAsBuffer_; + + for (uint32_t idx = 0; idx < size; idx++) { + napi_value element; + iterator_->cache_[idx].ConvertByMode(env, Mode::entries, kab, vab, + &element); + napi_set_element(env, jsArray, idx, element); + } + + napi_value argv[3]; + napi_get_null(env, &argv[0]); + argv[1] = jsArray; + napi_get_boolean(env, !ok_, &argv[2]); + CallFunction(env, callback, 3, argv); +} + +void IteratorNextWorker::DoFinally(napi_env env) { + // clean up & handle the next/close state + iterator_->nexting_ = false; + + if (iterator_->closeWorker_ != NULL) { + iterator_->closeWorker_->Queue(env); + iterator_->closeWorker_ = NULL; + } + + BaseWorker::DoFinally(env); +} + +IteratorClearWorker::IteratorClearWorker(napi_env env, Database* database, + napi_value callback, const int limit, + std::string* lt, std::string* lte, + std::string* gt, std::string* gte, + const bool sync, + const Snapshot* snapshot) + : PriorityWorker(env, database, callback, "rocksdb.iterator.clear") { + iterator_ = new BaseIterator(database, false, lt, lte, gt, gte, limit, false, + snapshot); + writeOptions_ = new rocksdb::WriteOptions(); + writeOptions_->sync = sync; +} + +IteratorClearWorker::IteratorClearWorker(napi_env env, Transaction* transaction, + napi_value callback, const int limit, + std::string* lt, std::string* lte, + std::string* gt, std::string* gte, + const TransactionSnapshot* snapshot) + : PriorityWorker(env, transaction, callback, "rocksdb.iterator.clear") { + iterator_ = new BaseIterator(transaction, false, lt, lte, gt, gte, limit, + false, snapshot); + writeOptions_ = nullptr; +} + +IteratorClearWorker::~IteratorClearWorker() { + delete iterator_; + delete writeOptions_; +} + +void IteratorClearWorker::DoExecute() { + assert(database_ != nullptr || transaction_ != nullptr); + iterator_->SeekToRange(); + uint32_t hwm = 16 * 1024; + if (database_ != nullptr) { + rocksdb::WriteBatch batch; + while (true) { + size_t bytesRead = 0; + while (bytesRead <= hwm && iterator_->Valid() && iterator_->Increment()) { + rocksdb::Slice key = iterator_->CurrentKey(); + // If this fails, we return + if (!SetStatus(batch.Delete(key))) return; + bytesRead += key.size(); + iterator_->Next(); + } + if (!SetStatus(iterator_->Status()) || bytesRead == 0) { + break; + } + if (!SetStatus(database_->WriteBatch(*writeOptions_, &batch))) { + break; + } + batch.Clear(); + } + } else if (transaction_ != nullptr) { + while (true) { + size_t bytesRead = 0; + while (bytesRead <= hwm && iterator_->Valid() && iterator_->Increment()) { + rocksdb::Slice key = iterator_->CurrentKey(); + // If this fails, we return + if (!SetStatus(transaction_->Del(key))) return; + bytesRead += key.size(); + iterator_->Next(); + } + if (!SetStatus(iterator_->Status()) || bytesRead == 0) { + break; + } + } + } + iterator_->Close(); +} + +IteratorCountWorker::IteratorCountWorker(napi_env env, Database* database, + napi_value callback, const int limit, + std::string* lt, std::string* lte, + std::string* gt, std::string* gte, + const Snapshot* snapshot) + : PriorityWorker(env, database, callback, "rocksdb.iterator.count") { + iterator_ = new BaseIterator(database, false, lt, lte, gt, gte, limit, false, + snapshot); +} + +IteratorCountWorker::IteratorCountWorker(napi_env env, Transaction* transaction, + napi_value callback, const int limit, + std::string* lt, std::string* lte, + std::string* gt, std::string* gte, + const TransactionSnapshot* snapshot) + : PriorityWorker(env, transaction, callback, "rocksdb.iterator.close") { + iterator_ = new BaseIterator(transaction, false, lt, lte, gt, gte, limit, + false, snapshot); +} + +IteratorCountWorker::~IteratorCountWorker() { delete iterator_; } + +void IteratorCountWorker::DoExecute() { + assert(database_ != nullptr || transaction_ != nullptr); + iterator_->SeekToRange(); + uint32_t hwm = 16 * 1024; + if (database_ != nullptr) { + while (true) { + size_t bytesRead = 0; + while (bytesRead <= hwm && iterator_->Valid() && iterator_->Increment()) { + rocksdb::Slice key = iterator_->CurrentKey(); + count_++; + bytesRead += key.size(); + iterator_->Next(); + } + if (!SetStatus(iterator_->Status()) || bytesRead == 0) { + break; + } + } + } else if (transaction_ != nullptr) { + while (true) { + size_t bytesRead = 0; + while (bytesRead <= hwm && iterator_->Valid() && iterator_->Increment()) { + rocksdb::Slice key = iterator_->CurrentKey(); + count_++; + bytesRead += key.size(); + iterator_->Next(); + } + if (!SetStatus(iterator_->Status()) || bytesRead == 0) { + break; + } + } + } + iterator_->Close(); +} + +void IteratorCountWorker::HandleOKCallback(napi_env env, napi_value callback) { + napi_value argv[2]; + napi_get_null(env, &argv[0]); + napi_create_uint32(env, count_, &argv[1]); + CallFunction(env, callback, 2, argv); +} diff --git a/src/rocksdb/napi/workers/iterator_workers.h b/src/rocksdb/napi/workers/iterator_workers.h new file mode 100644 index 00000000..878654bf --- /dev/null +++ b/src/rocksdb/napi/workers/iterator_workers.h @@ -0,0 +1,99 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include + +#include +#include + +#include "../worker.h" +#include "../database.h" +#include "../iterator.h" +#include "../transaction.h" +#include "../snapshot.h" + +/** + * Worker class for closing an iterator + */ +struct IteratorCloseWorker final : public BaseWorker { + IteratorCloseWorker(napi_env env, Iterator* iterator, napi_value callback); + + ~IteratorCloseWorker(); + + void DoExecute() override; + + void DoFinally(napi_env env) override; + + private: + Iterator* iterator_; +}; + +/** + * Worker class for nexting an iterator. + */ +struct IteratorNextWorker final : public BaseWorker { + IteratorNextWorker(napi_env env, Iterator* iterator, uint32_t size, + napi_value callback); + + ~IteratorNextWorker(); + + void DoExecute() override; + + void HandleOKCallback(napi_env env, napi_value callback) override; + + void DoFinally(napi_env env) override; + + private: + Iterator* iterator_; + uint32_t size_; + bool ok_; +}; + +/** + * Worker class for deleting a range from a database. + */ +struct IteratorClearWorker final : public PriorityWorker { + IteratorClearWorker(napi_env env, Database* database, napi_value callback, + const int limit, std::string* lt, std::string* lte, + std::string* gt, std::string* gte, const bool sync, + const Snapshot* snapshot = nullptr); + + IteratorClearWorker(napi_env env, Transaction* transaction, + napi_value callback, const int limit, std::string* lt, + std::string* lte, std::string* gt, std::string* gte, + const TransactionSnapshot* snapshot = nullptr); + + ~IteratorClearWorker(); + + void DoExecute() override; + + private: + BaseIterator* iterator_; + rocksdb::WriteOptions* writeOptions_; +}; + +struct IteratorCountWorker final : public PriorityWorker { + IteratorCountWorker(napi_env env, Database* database, napi_value callback, + const int limit, std::string* lt, std::string* lte, + std::string* gt, std::string* gte, + const Snapshot* snapshot = nullptr); + + IteratorCountWorker(napi_env env, Transaction* transaction, + napi_value callback, const int limit, std::string* lt, + std::string* lte, std::string* gt, std::string* gte, + const TransactionSnapshot* snapshot = nullptr); + + ~IteratorCountWorker(); + + void DoExecute() override; + + void HandleOKCallback(napi_env env, napi_value callback) override; + + private: + BaseIterator* iterator_; + uint32_t count_ = 0; +}; diff --git a/src/rocksdb/napi/workers/snapshot_workers.cpp b/src/rocksdb/napi/workers/snapshot_workers.cpp new file mode 100644 index 00000000..9e867826 --- /dev/null +++ b/src/rocksdb/napi/workers/snapshot_workers.cpp @@ -0,0 +1,23 @@ +#define NAPI_VERSION 3 + +#include "snapshot_workers.h" + +#include + +#include "../worker.h" +#include "../snapshot.h" + +SnapshotReleaseWorker::SnapshotReleaseWorker(napi_env env, Snapshot* snapshot, + napi_value callback) + : PriorityWorker(env, snapshot->database_, callback, + "rocksdb.snapshot.release"), + snapshot_(snapshot) {} + +SnapshotReleaseWorker::~SnapshotReleaseWorker() = default; + +void SnapshotReleaseWorker::DoExecute() { snapshot_->Release(); }; + +void SnapshotReleaseWorker::DoFinally(napi_env env) { + snapshot_->Detach(env); + PriorityWorker::DoFinally(env); +}; diff --git a/src/rocksdb/napi/workers/snapshot_workers.h b/src/rocksdb/napi/workers/snapshot_workers.h new file mode 100644 index 00000000..05c3a352 --- /dev/null +++ b/src/rocksdb/napi/workers/snapshot_workers.h @@ -0,0 +1,26 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include + +#include "../worker.h" +#include "../snapshot.h" + +/** + * Worker class for closing a snapshot + */ +struct SnapshotReleaseWorker final : public PriorityWorker { + SnapshotReleaseWorker(napi_env env, Snapshot* snapshot, napi_value callback); + + ~SnapshotReleaseWorker(); + + void DoExecute() override; + + void DoFinally(napi_env env) override; + + private: + Snapshot* snapshot_; +}; diff --git a/src/rocksdb/napi/workers/transaction_workers.cpp b/src/rocksdb/napi/workers/transaction_workers.cpp new file mode 100644 index 00000000..8820371d --- /dev/null +++ b/src/rocksdb/napi/workers/transaction_workers.cpp @@ -0,0 +1,280 @@ +#define NAPI_VERSION 3 + +#include "transaction_workers.h" + +#include +#include + +#include +#include +#include + +#include "../worker.h" +#include "../transaction.h" +#include "../iterator.h" +#include "../snapshot.h" +#include "../utils.h" + +/** + * Transaction commit + */ + +TransactionCommitWorker::TransactionCommitWorker(napi_env env, + Transaction* tran, + napi_value callback) + : BaseWorker(env, tran, callback, "rocksdb.transaction.commit") {} + +TransactionCommitWorker::~TransactionCommitWorker() = default; + +void TransactionCommitWorker::DoExecute() { SetStatus(transaction_->Commit()); } + +void TransactionCommitWorker::DoFinally(napi_env env) { + transaction_->Detach(env); + BaseWorker::DoFinally(env); +} + +/** + * Transaction rollback + */ + +TransactionRollbackWorker::TransactionRollbackWorker(napi_env env, + Transaction* tran, + napi_value callback) + : BaseWorker(env, tran, callback, "rocksdb.transaction.rollback") {} + +TransactionRollbackWorker::~TransactionRollbackWorker() = default; + +void TransactionRollbackWorker::DoExecute() { + SetStatus(transaction_->Rollback()); +} + +void TransactionRollbackWorker::DoFinally(napi_env env) { + transaction_->Detach(env); + BaseWorker::DoFinally(env); +} + +/** + * Transaction get + */ + +TransactionGetWorker::TransactionGetWorker(napi_env env, Transaction* tran, + napi_value callback, + rocksdb::Slice key, + const bool asBuffer, + const bool fillCache, + const TransactionSnapshot* snapshot) + : PriorityWorker(env, tran, callback, "rocksdb.transaction.get"), + key_(key), + asBuffer_(asBuffer) { + options_.fill_cache = fillCache; + if (snapshot != nullptr) options_.snapshot = snapshot->snapshot(); +} + +TransactionGetWorker::~TransactionGetWorker() { DisposeSliceBuffer(key_); } + +void TransactionGetWorker::DoExecute() { + SetStatus(transaction_->Get(options_, key_, value_)); +} + +void TransactionGetWorker::HandleOKCallback(napi_env env, napi_value callback) { + napi_value argv[2]; + napi_get_null(env, &argv[0]); + Entry::Convert(env, &value_, asBuffer_, &argv[1]); + CallFunction(env, callback, 2, argv); +} + +/** + * Transaction get for update + */ + +TransactionGetForUpdateWorker::TransactionGetForUpdateWorker( + napi_env env, Transaction* tran, napi_value callback, rocksdb::Slice key, + const bool asBuffer, const bool fillCache, + const TransactionSnapshot* snapshot) + : PriorityWorker(env, tran, callback, "rocksdb.transaction.get_for_update"), + key_(key), + asBuffer_(asBuffer) { + options_.fill_cache = fillCache; + if (snapshot != nullptr) options_.snapshot = snapshot->snapshot(); +} + +TransactionGetForUpdateWorker::~TransactionGetForUpdateWorker() { + DisposeSliceBuffer(key_); +} + +void TransactionGetForUpdateWorker::DoExecute() { + SetStatus(transaction_->GetForUpdate(options_, key_, value_)); +} + +void TransactionGetForUpdateWorker::HandleOKCallback(napi_env env, + napi_value callback) { + napi_value argv[2]; + napi_get_null(env, &argv[0]); + Entry::Convert(env, &value_, asBuffer_, &argv[1]); + CallFunction(env, callback, 2, argv); +} + +/** + * Transaction multi get + */ + +TransactionMultiGetWorker::TransactionMultiGetWorker( + napi_env env, Transaction* transaction, + const std::vector* keys, napi_value callback, + const bool valueAsBuffer, const bool fillCache, + const TransactionSnapshot* snapshot) + : PriorityWorker(env, transaction, callback, + "rocksdb.transaction.multiget"), + keys_(keys), + valueAsBuffer_(valueAsBuffer) { + options_.fill_cache = fillCache; + if (snapshot) options_.snapshot = snapshot->snapshot(); +} + +TransactionMultiGetWorker::~TransactionMultiGetWorker() { delete keys_; } + +void TransactionMultiGetWorker::DoExecute() { + // NAPI requires a vector of string pointers + // the nullptr can be used to represent `undefined` + values_.reserve(keys_->size()); + // RocksDB requires just a vector of strings + // these will be automatically deallocated + std::vector values(keys_->size()); + std::vector statuses = + transaction_->MultiGet(options_, *keys_, values); + for (size_t i = 0; i != statuses.size(); i++) { + if (statuses[i].ok()) { + std::string* value = new std::string(values[i]); + values_.push_back(value); + } else if (statuses[i].IsNotFound()) { + values_.push_back(nullptr); + } else { + for (const std::string* value : values_) { + if (value != NULL) delete value; + } + SetStatus(statuses[i]); + break; + } + } +} + +void TransactionMultiGetWorker::HandleOKCallback(napi_env env, + napi_value callback) { + size_t size = values_.size(); + napi_value array; + napi_create_array_with_length(env, size, &array); + + for (size_t idx = 0; idx < size; idx++) { + std::string* value = values_[idx]; + napi_value element; + Entry::Convert(env, value, valueAsBuffer_, &element); + napi_set_element(env, array, static_cast(idx), element); + if (value != nullptr) delete value; + } + + napi_value argv[2]; + napi_get_null(env, &argv[0]); + argv[1] = array; + CallFunction(env, callback, 2, argv); +} + +/** + * Transaction multi get for update + */ + +TransactionMultiGetForUpdateWorker::TransactionMultiGetForUpdateWorker( + napi_env env, Transaction* transaction, + const std::vector* keys, napi_value callback, + const bool valueAsBuffer, const bool fillCache, + const TransactionSnapshot* snapshot) + : PriorityWorker(env, transaction, callback, + "rocksdb.transaction.multiget_for_update"), + keys_(keys), + valueAsBuffer_(valueAsBuffer) { + options_.fill_cache = fillCache; + if (snapshot) options_.snapshot = snapshot->snapshot(); +} + +TransactionMultiGetForUpdateWorker::~TransactionMultiGetForUpdateWorker() { + delete keys_; +} + +void TransactionMultiGetForUpdateWorker::DoExecute() { + // NAPI requires a vector of string pointers + // the nullptr can be used to represent `undefined` + values_.reserve(keys_->size()); + // RocksDB requires just a vector of strings + // these will be automatically deallocated + std::vector values(keys_->size()); + std::vector statuses = + transaction_->MultiGetForUpdate(options_, *keys_, values); + for (size_t i = 0; i != statuses.size(); i++) { + if (statuses[i].ok()) { + std::string* value = new std::string(values[i]); + values_.push_back(value); + } else if (statuses[i].IsNotFound()) { + values_.push_back(nullptr); + } else { + for (const std::string* value : values_) { + if (value != NULL) delete value; + } + SetStatus(statuses[i]); + break; + } + } +} + +void TransactionMultiGetForUpdateWorker::HandleOKCallback(napi_env env, + napi_value callback) { + size_t size = values_.size(); + napi_value array; + napi_create_array_with_length(env, size, &array); + + for (size_t idx = 0; idx < size; idx++) { + std::string* value = values_[idx]; + napi_value element; + Entry::Convert(env, value, valueAsBuffer_, &element); + napi_set_element(env, array, static_cast(idx), element); + if (value != nullptr) delete value; + } + + napi_value argv[2]; + napi_get_null(env, &argv[0]); + argv[1] = array; + CallFunction(env, callback, 2, argv); +} + +/** + * Transaction put + */ + +TransactionPutWorker::TransactionPutWorker(napi_env env, Transaction* tran, + napi_value callback, + rocksdb::Slice key, + rocksdb::Slice value) + : PriorityWorker(env, tran, callback, "rocksdb.transaction.put"), + key_(key), + value_(value) {} + +TransactionPutWorker::~TransactionPutWorker() { + DisposeSliceBuffer(key_); + DisposeSliceBuffer(value_); +} + +void TransactionPutWorker::DoExecute() { + SetStatus(transaction_->Put(key_, value_)); +} + +/** + * Transaction del + */ + +TransactionDelWorker::TransactionDelWorker(napi_env env, Transaction* tran, + napi_value callback, + rocksdb::Slice key) + : PriorityWorker(env, tran, callback, "rocksdb.transaction.del"), + key_(key) {} + +TransactionDelWorker::~TransactionDelWorker() { DisposeSliceBuffer(key_); } + +void TransactionDelWorker::DoExecute() { SetStatus(transaction_->Del(key_)); } diff --git a/src/rocksdb/napi/workers/transaction_workers.h b/src/rocksdb/napi/workers/transaction_workers.h new file mode 100644 index 00000000..9a87ae65 --- /dev/null +++ b/src/rocksdb/napi/workers/transaction_workers.h @@ -0,0 +1,159 @@ +#pragma once + +#ifndef NAPI_VERSION +#define NAPI_VERSION 3 +#endif + +#include +#include + +#include +#include +#include +#include + +#include "../worker.h" +#include "../transaction.h" +#include "../snapshot.h" + +/** + * Transaction commit worker + */ +struct TransactionCommitWorker final : public BaseWorker { + TransactionCommitWorker(napi_env env, Transaction* tran, napi_value callback); + + ~TransactionCommitWorker(); + + void DoExecute() override; + + void DoFinally(napi_env env) override; +}; + +/** + * Rollback commit worker + */ +struct TransactionRollbackWorker final : public BaseWorker { + TransactionRollbackWorker(napi_env env, Transaction* tran, + napi_value callback); + + ~TransactionRollbackWorker(); + + void DoExecute() override; + + void DoFinally(napi_env env) override; +}; + +/** + * Worker for transaction get + */ +struct TransactionGetWorker final : public PriorityWorker { + TransactionGetWorker(napi_env env, Transaction* tran, napi_value callback, + rocksdb::Slice key, const bool asBuffer, + const bool fillCache, + const TransactionSnapshot* snapshot = nullptr); + + ~TransactionGetWorker(); + + void DoExecute() override; + + void HandleOKCallback(napi_env env, napi_value callback) override; + + private: + rocksdb::ReadOptions options_; + rocksdb::Slice key_; + std::string value_; + const bool asBuffer_; +}; + +/** + * Worker for transaction get for update + */ +struct TransactionGetForUpdateWorker final : public PriorityWorker { + TransactionGetForUpdateWorker(napi_env env, Transaction* tran, + napi_value callback, rocksdb::Slice key, + const bool asBuffer, const bool fillCache, + const TransactionSnapshot* snapshot = nullptr); + + ~TransactionGetForUpdateWorker(); + + void DoExecute() override; + + void HandleOKCallback(napi_env env, napi_value callback) override; + + private: + rocksdb::ReadOptions options_; + rocksdb::Slice key_; + std::string value_; + const bool asBuffer_; +}; + +struct TransactionMultiGetWorker final : public PriorityWorker { + TransactionMultiGetWorker(napi_env env, Transaction* transaction, + const std::vector* keys, + napi_value callback, const bool valueAsBuffer, + const bool fillCache, + const TransactionSnapshot* snapshot = nullptr); + + ~TransactionMultiGetWorker(); + + void DoExecute() override; + + void HandleOKCallback(napi_env env, napi_value callback) override; + + private: + rocksdb::ReadOptions options_; + const std::vector* keys_; + std::vector values_; + const bool valueAsBuffer_; +}; + +struct TransactionMultiGetForUpdateWorker final : public PriorityWorker { + TransactionMultiGetForUpdateWorker( + napi_env env, Transaction* transaction, + const std::vector* keys, napi_value callback, + const bool valueAsBuffer, const bool fillCache, + const TransactionSnapshot* snapshot = nullptr); + + ~TransactionMultiGetForUpdateWorker(); + + void DoExecute() override; + + void HandleOKCallback(napi_env env, napi_value callback) override; + + private: + rocksdb::ReadOptions options_; + const std::vector* keys_; + std::vector values_; + const bool valueAsBuffer_; +}; + +/** + * Worker for transaction put + */ +struct TransactionPutWorker final : public PriorityWorker { + TransactionPutWorker(napi_env env, Transaction* tran, napi_value callback, + rocksdb::Slice key, rocksdb::Slice value); + + ~TransactionPutWorker(); + + void DoExecute() override; + + private: + rocksdb::Slice key_; + rocksdb::Slice value_; +}; + +/** + * Worker for transaction del + */ +struct TransactionDelWorker final : public PriorityWorker { + TransactionDelWorker(napi_env env, Transaction* tran, napi_value callback, + rocksdb::Slice key); + + ~TransactionDelWorker(); + + void DoExecute() override; + + private: + rocksdb::Slice key_; +}; diff --git a/src/rocksdb/rocksdb.ts b/src/rocksdb/rocksdb.ts new file mode 100644 index 00000000..f97265c7 --- /dev/null +++ b/src/rocksdb/rocksdb.ts @@ -0,0 +1,278 @@ +import type { Callback } from '../types'; +import type { + RocksDBDatabase, + RocksDBIterator, + RocksDBTransaction, + RocksDBSnapshot, + RocksDBTransactionSnapshot, + RocksDBBatch, + RocksDBDatabaseOptions, + RocksDBGetOptions, + RocksDBPutOptions, + RocksDBDelOptions, + RocksDBClearOptions, + RocksDBIteratorOptions, + RocksDBTransactionOptions, + RocksDBBatchOptions, + RocksDBBatchDelOperation, + RocksDBBatchPutOperation, + RocksDBCountOptions, +} from './types'; +import path from 'path'; +import nodeGypBuild from 'node-gyp-build'; + +interface RocksDB { + dbInit(): RocksDBDatabase; + dbOpen( + database: RocksDBDatabase, + location: string, + options: RocksDBDatabaseOptions, + callback: Callback<[], void>, + ): void; + dbClose(database: RocksDBDatabase, callback: Callback<[], void>): void; + dbGet( + database: RocksDBDatabase, + key: string | Buffer, + options: RocksDBGetOptions & { valueEncoding?: 'utf8' }, + callback: Callback<[string], void>, + ): void; + dbGet( + database: RocksDBDatabase, + key: string | Buffer, + options: RocksDBGetOptions & { valueEncoding: 'buffer' }, + callback: Callback<[Buffer], void>, + ): void; + dbMultiGet( + database: RocksDBDatabase, + keys: Array, + options: RocksDBGetOptions & { valueEncoding?: 'utf8' }, + callback: Callback<[Array], void>, + ): void; + dbMultiGet( + database: RocksDBDatabase, + keys: Array, + options: RocksDBGetOptions & { valueEncoding: 'buffer' }, + callback: Callback<[Array], void>, + ): void; + dbPut( + database: RocksDBDatabase, + key: string | Buffer, + value: string | Buffer, + options: RocksDBPutOptions, + callback: Callback<[], void>, + ): void; + dbDel( + database: RocksDBDatabase, + key: string | Buffer, + options: RocksDBDelOptions, + callback: Callback<[], void>, + ): void; + dbClear( + database: RocksDBDatabase, + options: RocksDBClearOptions, + callback: Callback<[], void>, + ): void; + dbCount( + database: RocksDBDatabase, + options: RocksDBCountOptions, + callback: Callback<[number], void>, + ): void; + dbApproximateSize( + database: RocksDBDatabase, + start: string | Buffer, + end: string | Buffer, + callback: Callback<[number], void>, + ): void; + dbCompactRange( + database: RocksDBDatabase, + start: string | Buffer, + end: string | Buffer, + callback: Callback<[], void>, + ): void; + dbGetProperty(database: RocksDBDatabase, property: string): string; + snapshotInit(database: RocksDBDatabase): RocksDBSnapshot; + snapshotRelease( + snapshot: RocksDBSnapshot, + callback: Callback<[], void>, + ): void; + destroyDb(location: string, callback: Callback<[], void>): void; + repairDb(location: string, callback: Callback<[], void>): void; + iteratorInit( + database: RocksDBDatabase, + options: RocksDBIteratorOptions & { + keyEncoding: 'buffer'; + valueEncoding: 'buffer'; + }, + ): RocksDBIterator; + iteratorInit( + database: RocksDBDatabase, + options: RocksDBIteratorOptions & { keyEncoding: 'buffer' }, + ): RocksDBIterator; + iteratorInit( + database: RocksDBDatabase, + options: RocksDBIteratorOptions & { valueEncoding: 'buffer' }, + ): RocksDBIterator; + iteratorInit( + database: RocksDBDatabase, + options: RocksDBIteratorOptions, + ): RocksDBIterator; + iteratorSeek( + iterator: RocksDBIterator, + target: K, + ): void; + iteratorClose(iterator: RocksDBIterator, callback: Callback<[], void>): void; + iteratorNextv( + iterator: RocksDBIterator, + size: number, + callback: Callback<[Array<[K, V]>, boolean], void>, + ): void; + batchDo( + database: RocksDBDatabase, + operations: Array, + options: RocksDBBatchOptions, + callback: Callback<[], void>, + ): void; + batchInit(database: RocksDBDatabase): RocksDBBatch; + batchPut( + batch: RocksDBBatch, + key: string | Buffer, + value: string | Buffer, + ): void; + batchDel(batch: RocksDBBatch, key: string | Buffer): void; + batchClear(batch: RocksDBBatch): void; + batchWrite( + batch: RocksDBBatch, + options: RocksDBBatchOptions, + callback: Callback<[], void>, + ): void; + transactionInit( + database: RocksDBDatabase, + options: RocksDBTransactionOptions, + ): RocksDBTransaction; + transactionId(transaction: RocksDBTransaction): number; + transactionCommit( + transaction: RocksDBTransaction, + callback: Callback<[], void>, + ): void; + transactionRollback( + transaction: RocksDBTransaction, + callback: Callback<[], void>, + ): void; + transactionGet( + transaction: RocksDBTransaction, + key: string | Buffer, + options: RocksDBGetOptions & { + valueEncoding?: 'utf8'; + }, + callback: Callback<[string], void>, + ): void; + transactionGet( + transaction: RocksDBTransaction, + key: string | Buffer, + options: RocksDBGetOptions & { + valueEncoding: 'buffer'; + }, + callback: Callback<[Buffer], void>, + ): void; + transactionGetForUpdate( + transaction: RocksDBTransaction, + key: string | Buffer, + options: RocksDBGetOptions & { + valueEncoding?: 'utf8'; + }, + callback: Callback<[string], void>, + ): void; + transactionGetForUpdate( + transaction: RocksDBTransaction, + key: string | Buffer, + options: RocksDBGetOptions & { + valueEncoding: 'buffer'; + }, + callback: Callback<[Buffer], void>, + ): void; + transactionMultiGet( + transaction: RocksDBTransaction, + keys: Array, + options: RocksDBGetOptions & { + valueEncoding?: 'utf8'; + }, + callback: Callback<[Array], void>, + ): void; + transactionMultiGet( + transaction: RocksDBTransaction, + keys: Array, + options: RocksDBGetOptions & { + valueEncoding: 'buffer'; + }, + callback: Callback<[Array], void>, + ): void; + transactionMultiGetForUpdate( + transaction: RocksDBTransaction, + keys: Array, + options: RocksDBGetOptions & { + valueEncoding?: 'utf8'; + }, + callback: Callback<[Array], void>, + ): void; + transactionMultiGetForUpdate( + transaction: RocksDBTransaction, + keys: Array, + options: RocksDBGetOptions & { + valueEncoding: 'buffer'; + }, + callback: Callback<[Array], void>, + ): void; + transactionPut( + transaction: RocksDBTransaction, + key: string | Buffer, + value: string | Buffer, + callback: Callback<[], void>, + ): void; + transactionDel( + transaction: RocksDBTransaction, + key: string | Buffer, + callback: Callback<[], void>, + ): void; + transactionSnapshot( + transaction: RocksDBTransaction, + ): RocksDBTransactionSnapshot; + transactionIteratorInit( + transaction: RocksDBTransaction, + options: RocksDBIteratorOptions & { + keyEncoding: 'buffer'; + valueEncoding: 'buffer'; + }, + ): RocksDBIterator; + transactionIteratorInit( + transaction: RocksDBTransaction, + options: RocksDBIteratorOptions & { + keyEncoding: 'buffer'; + }, + ): RocksDBIterator; + transactionIteratorInit( + transaction: RocksDBTransaction, + options: RocksDBIteratorOptions & { + valueEncoding: 'buffer'; + }, + ): RocksDBIterator; + transactionIteratorInit( + transaction: RocksDBTransaction, + options: RocksDBIteratorOptions, + ): RocksDBIterator; + transactionClear( + transaction: RocksDBTransaction, + options: RocksDBClearOptions, + callback: Callback<[], void>, + ): void; + transactionCount( + transaction: RocksDBTransaction, + options: RocksDBCountOptions, + callback: Callback<[number], void>, + ): void; +} + +const rocksdb: RocksDB = nodeGypBuild(path.join(__dirname, '../../')); + +export default rocksdb; + +export type { RocksDB }; diff --git a/src/rocksdb/rocksdbP.ts b/src/rocksdb/rocksdbP.ts new file mode 100644 index 00000000..6c3070c3 --- /dev/null +++ b/src/rocksdb/rocksdbP.ts @@ -0,0 +1,291 @@ +import type { + RocksDBDatabase, + RocksDBIterator, + RocksDBTransaction, + RocksDBSnapshot, + RocksDBTransactionSnapshot, + RocksDBBatch, + RocksDBDatabaseOptions, + RocksDBGetOptions, + RocksDBPutOptions, + RocksDBDelOptions, + RocksDBClearOptions, + RocksDBCountOptions, + RocksDBIteratorOptions, + RocksDBTransactionOptions, + RocksDBBatchOptions, + RocksDBBatchDelOperation, + RocksDBBatchPutOperation, +} from './types'; +import rocksdb from './rocksdb'; +import * as utils from '../utils'; + +interface RocksDBP { + dbInit(): RocksDBDatabase; + dbOpen( + database: RocksDBDatabase, + location: string, + options: RocksDBDatabaseOptions, + ): Promise; + dbClose(database: RocksDBDatabase): Promise; + dbGet( + database: RocksDBDatabase, + key: string | Buffer, + options: RocksDBGetOptions & { valueEncoding?: 'utf8' }, + ): Promise; + dbGet( + database: RocksDBDatabase, + key: string | Buffer, + options: RocksDBGetOptions & { valueEncoding: 'buffer' }, + ): Promise; + dbMultiGet( + database: RocksDBDatabase, + keys: Array, + options: RocksDBGetOptions & { valueEncoding?: 'utf8' }, + ): Promise>; + dbMultiGet( + database: RocksDBDatabase, + keys: Array, + options: RocksDBGetOptions & { valueEncoding: 'buffer' }, + ): Promise>; + dbPut( + database: RocksDBDatabase, + key: string | Buffer, + value: string | Buffer, + options: RocksDBPutOptions, + ): Promise; + dbDel( + database: RocksDBDatabase, + key: string | Buffer, + options: RocksDBDelOptions, + ): Promise; + dbClear( + database: RocksDBDatabase, + options: RocksDBClearOptions, + ): Promise; + dbCount( + database: RocksDBDatabase, + options: RocksDBCountOptions, + ): Promise; + dbApproximateSize( + database: RocksDBDatabase, + start: string | Buffer, + end: string | Buffer, + ): Promise; + dbCompactRange( + database: RocksDBDatabase, + start: string | Buffer, + end: string | Buffer, + ): Promise; + dbGetProperty(database: RocksDBDatabase, property: string): string; + snapshotInit(database: RocksDBDatabase): RocksDBSnapshot; + snapshotRelease(snapshot: RocksDBSnapshot): Promise; + destroyDb(location: string): Promise; + repairDb(location: string): Promise; + iteratorInit( + database: RocksDBDatabase, + options: RocksDBIteratorOptions & { + keyEncoding: 'buffer'; + valueEncoding: 'buffer'; + }, + ): RocksDBIterator; + iteratorInit( + database: RocksDBDatabase, + options: RocksDBIteratorOptions & { keyEncoding: 'buffer' }, + ): RocksDBIterator; + iteratorInit( + database: RocksDBDatabase, + options: RocksDBIteratorOptions & { valueEncoding: 'buffer' }, + ): RocksDBIterator; + iteratorInit( + database: RocksDBDatabase, + options: RocksDBIteratorOptions, + ): RocksDBIterator; + iteratorSeek( + iterator: RocksDBIterator, + target: K, + ): void; + iteratorClose(iterator: RocksDBIterator): Promise; + iteratorNextv( + iterator: RocksDBIterator, + size: number, + ): Promise<[Array<[K, V]>, boolean]>; + batchDo( + database: RocksDBDatabase, + operations: Array, + options: RocksDBBatchOptions, + ): Promise; + batchInit(database: RocksDBDatabase): RocksDBBatch; + batchPut( + batch: RocksDBBatch, + key: string | Buffer, + value: string | Buffer, + ): void; + batchDel(batch: RocksDBBatch, key: string | Buffer): void; + batchClear(batch: RocksDBBatch): void; + batchWrite(batch: RocksDBBatch, options: RocksDBBatchOptions): Promise; + transactionInit( + database: RocksDBDatabase, + options: RocksDBTransactionOptions, + ): RocksDBTransaction; + transactionId(transaction: RocksDBTransaction): number; + transactionCommit(transaction: RocksDBTransaction): Promise; + transactionRollback(transaction: RocksDBTransaction): Promise; + transactionGet( + transaction: RocksDBTransaction, + key: string | Buffer, + options: RocksDBGetOptions & { + valueEncoding?: 'utf8'; + }, + ): Promise; + transactionGet( + transaction: RocksDBTransaction, + key: string | Buffer, + options: RocksDBGetOptions & { + valueEncoding: 'buffer'; + }, + ): Promise; + transactionGetForUpdate( + transaction: RocksDBTransaction, + key: string | Buffer, + options: RocksDBGetOptions & { + valueEncoding?: 'utf8'; + }, + ): Promise; + transactionGetForUpdate( + transaction: RocksDBTransaction, + key: string | Buffer, + options: RocksDBGetOptions & { + valueEncoding: 'buffer'; + }, + ): Promise; + transactionMultiGet( + transaction: RocksDBTransaction, + keys: Array, + options: RocksDBGetOptions & { + valueEncoding?: 'utf8'; + }, + ): Promise>; + transactionMultiGet( + transaction: RocksDBTransaction, + keys: Array, + options: RocksDBGetOptions & { + valueEncoding: 'buffer'; + }, + ): Promise>; + transactionMultiGetForUpdate( + transaction: RocksDBTransaction, + keys: Array, + options: RocksDBGetOptions & { + valueEncoding?: 'utf8'; + }, + ): Promise>; + transactionMultiGetForUpdate( + transaction: RocksDBTransaction, + keys: Array, + options: RocksDBGetOptions & { + valueEncoding: 'buffer'; + }, + ): Promise>; + transactionPut( + transaction: RocksDBTransaction, + key: string | Buffer, + value: string | Buffer, + ): Promise; + transactionDel( + transaction: RocksDBTransaction, + key: string | Buffer, + ): Promise; + transactionSnapshot( + transaction: RocksDBTransaction, + ): RocksDBTransactionSnapshot; + transactionIteratorInit( + transaction: RocksDBTransaction, + options: RocksDBIteratorOptions & { + keyEncoding: 'buffer'; + valueEncoding: 'buffer'; + }, + ): RocksDBIterator; + transactionIteratorInit( + transaction: RocksDBTransaction, + options: RocksDBIteratorOptions & { + keyEncoding: 'buffer'; + }, + ): RocksDBIterator; + transactionIteratorInit( + transaction: RocksDBTransaction, + options: RocksDBIteratorOptions & { + valueEncoding: 'buffer'; + }, + ): RocksDBIterator; + transactionIteratorInit( + database: RocksDBTransaction, + options: RocksDBIteratorOptions, + ): RocksDBIterator; + transactionClear( + transaction: RocksDBTransaction, + options: RocksDBClearOptions, + ): Promise; + transactionCount( + transaction: RocksDBTransaction, + options: RocksDBCountOptions, + ): Promise; +} + +/** + * Promisified version of RocksDB + */ +const rocksdbP: RocksDBP = { + dbInit: rocksdb.dbInit.bind(rocksdb), + dbOpen: utils.promisify(rocksdb.dbOpen).bind(rocksdb), + dbClose: utils.promisify(rocksdb.dbClose).bind(rocksdb), + dbGet: utils.promisify(rocksdb.dbGet).bind(rocksdb), + dbMultiGet: utils.promisify(rocksdb.dbMultiGet).bind(rocksdb), + dbPut: utils.promisify(rocksdb.dbPut).bind(rocksdb), + dbDel: utils.promisify(rocksdb.dbDel).bind(rocksdb), + dbClear: utils.promisify(rocksdb.dbClear).bind(rocksdb), + dbCount: utils.promisify(rocksdb.dbCount).bind(rocksdb), + dbApproximateSize: utils.promisify(rocksdb.dbApproximateSize).bind(rocksdb), + dbCompactRange: utils.promisify(rocksdb.dbCompactRange).bind(rocksdb), + dbGetProperty: rocksdb.dbGetProperty.bind(rocksdb), + snapshotInit: rocksdb.snapshotInit.bind(rocksdb), + snapshotRelease: utils.promisify(rocksdb.snapshotRelease).bind(rocksdb), + destroyDb: utils.promisify(rocksdb.destroyDb).bind(rocksdb), + repairDb: utils.promisify(rocksdb.repairDb).bind(rocksdb), + iteratorInit: rocksdb.iteratorInit.bind(rocksdb), + iteratorSeek: rocksdb.iteratorSeek.bind(rocksdb), + iteratorClose: utils.promisify(rocksdb.iteratorClose).bind(rocksdb), + iteratorNextv: utils.promisify(rocksdb.iteratorNextv).bind(rocksdb), + batchDo: utils.promisify(rocksdb.batchDo).bind(rocksdb), + batchInit: rocksdb.batchInit.bind(rocksdb), + batchPut: rocksdb.batchPut.bind(rocksdb), + batchDel: rocksdb.batchDel.bind(rocksdb), + batchClear: rocksdb.batchClear.bind(rocksdb), + batchWrite: rocksdb.batchWrite.bind(rocksdb), + transactionInit: rocksdb.transactionInit.bind(rocksdb), + transactionId: rocksdb.transactionId.bind(rocksdb), + transactionCommit: utils.promisify(rocksdb.transactionCommit).bind(rocksdb), + transactionRollback: utils + .promisify(rocksdb.transactionRollback) + .bind(rocksdb), + transactionGet: utils.promisify(rocksdb.transactionGet).bind(rocksdb), + transactionGetForUpdate: utils + .promisify(rocksdb.transactionGetForUpdate) + .bind(rocksdb), + transactionMultiGet: utils + .promisify(rocksdb.transactionMultiGet) + .bind(rocksdb), + transactionMultiGetForUpdate: utils + .promisify(rocksdb.transactionMultiGetForUpdate) + .bind(rocksdb), + transactionPut: utils.promisify(rocksdb.transactionPut).bind(rocksdb), + transactionDel: utils.promisify(rocksdb.transactionDel).bind(rocksdb), + transactionSnapshot: rocksdb.transactionSnapshot.bind(rocksdb), + transactionIteratorInit: rocksdb.transactionIteratorInit.bind(rocksdb), + transactionClear: utils.promisify(rocksdb.transactionClear).bind(rocksdb), + transactionCount: utils.promisify(rocksdb.transactionCount).bind(rocksdb), +}; + +export default rocksdbP; + +export type { RocksDBP }; diff --git a/src/rocksdb/types.ts b/src/rocksdb/types.ts new file mode 100644 index 00000000..5a7e326b --- /dev/null +++ b/src/rocksdb/types.ts @@ -0,0 +1,186 @@ +import type { Opaque } from '../types'; + +/** + * Note that `undefined` is not a valid value for these options + * If properties exist, they must have the correct type + */ + +/** + * RocksDBDatabase object + * A `napi_external` type + */ +type RocksDBDatabase = Opaque<'RocksDBDatabase', object>; + +/** + * RocksDBIterator object + * A `napi_external` type + * If `keys` or `values` is set to `false` then + * `K` and `V` will be an empty buffer + * If `keys` and `values` is set to `false`, the iterator will + * give back empty array as entries + */ +type RocksDBIterator< + K extends string | Buffer = string | Buffer, + V extends string | Buffer = string | Buffer, +> = Opaque<'RocksDBIterator', object> & { + readonly [brandRocksDBIteratorK]: K; + readonly [brandRocksDBIteratorV]: V; +}; +declare const brandRocksDBIteratorK: unique symbol; +declare const brandRocksDBIteratorV: unique symbol; + +/** + * RocksDBTransaction object + * A `napi_external` type + */ +type RocksDBTransaction = Opaque<'RocksDBTransaction', object>; + +/** + * RocksDBBatch object + * A `napi_external` type + */ +type RocksDBBatch = Opaque<'RocksDBBatch', object>; + +/** + * RocksDBSnapshot object + * A `napi_external` type + */ +type RocksDBSnapshot = Opaque<'RocksDBSnapshot', object>; + +/** + * RocksDBTransactionSnapshot object + * A `napi_external` type + */ +type RocksDBTransactionSnapshot = Opaque<'RocksDBTransactionSnapshot', object>; + +/** + * RocksDB database options + */ +type RocksDBDatabaseOptions = { + createIfMissing?: boolean; // Default true + errorIfExists?: boolean; // Default false + compression?: boolean; // Default true + infoLogLevel?: 'debug' | 'info' | 'warn' | 'error' | 'fatal' | 'header'; // Default undefined + cacheSize?: number; // Default 8 * 1024 * 1024 + writeBufferSize?: number; // Default 4 * 1024 * 1024 + blockSize?: number; // Default 4096 + maxOpenFiles?: number; // Default 1000 + blockRestartInterval?: number; // Default 16 + maxFileSize?: number; // Default 2 * 1024 * 1024 +}; + +/** + * Get options + */ +type RocksDBGetOptions< + S extends RocksDBSnapshot | RocksDBTransactionSnapshot = RocksDBSnapshot, +> = { + valueEncoding?: 'utf8' | 'buffer'; // Default 'utf8'; + fillCache?: boolean; // Default true + snapshot?: S; +}; + +/** + * Put options + */ +type RocksDBPutOptions = { + /** + * If `true`, rocksdb will perform `fsync()` before completing operation + * It is still asynchronous relative to Node.js + * If the operating system crashes, writes may be lost + * Prefer to flip this to be true when a transaction batch is written + * This will amortize the cost of `fsync()` across the entire transaction + */ + sync?: boolean; // Default false +}; + +/** + * Del options + */ +type RocksDBDelOptions = RocksDBPutOptions; + +/** + * Range options + */ +type RocksDBRangeOptions = { + gt?: string | Buffer; + gte?: string | Buffer; + lt?: string | Buffer; + lte?: string | Buffer; + reverse?: boolean; // Default false + limit?: number; // Default -1 +}; + +/** + * Clear options + */ +type RocksDBClearOptions< + S extends RocksDBSnapshot | RocksDBTransactionSnapshot = RocksDBSnapshot, +> = Omit & { + snapshot?: S; + sync?: S extends RocksDBSnapshot ? boolean : void; // Default false +}; + +/** + * Count options + */ +type RocksDBCountOptions< + S extends RocksDBSnapshot | RocksDBTransactionSnapshot = RocksDBSnapshot, +> = Omit & { + snapshot?: S; +}; + +/** + * Iterator options + */ +type RocksDBIteratorOptions< + S extends RocksDBSnapshot | RocksDBTransactionSnapshot = RocksDBSnapshot, +> = RocksDBGetOptions & + RocksDBRangeOptions & { + keys?: boolean; + values?: boolean; + keyEncoding?: 'utf8' | 'buffer'; // Default 'utf8' + highWaterMarkBytes?: number; // Default is 16 * 1024 + }; + +/** + * Transaction options + */ +type RocksDBTransactionOptions = RocksDBPutOptions; + +/** + * Batch options + */ +type RocksDBBatchOptions = RocksDBPutOptions; + +type RocksDBBatchPutOperation = { + type: 'put'; + key: string | Buffer; + value: string | Buffer; +}; + +type RocksDBBatchDelOperation = { + type: 'del'; + key: string | Buffer; +}; + +export type { + RocksDBDatabase, + RocksDBIterator, + RocksDBTransaction, + RocksDBBatch, + RocksDBSnapshot, + RocksDBTransactionSnapshot, + RocksDBDatabaseOptions, + RocksDBGetOptions, + RocksDBPutOptions, + RocksDBDelOptions, + RocksDBRangeOptions, + RocksDBClearOptions, + RocksDBCountOptions, + RocksDBIteratorOptions, + RocksDBTransactionOptions, + RocksDBBatchOptions, + RocksDBBatchDelOperation, + RocksDBBatchPutOperation, +}; diff --git a/src/types.ts b/src/types.ts index 9c2836eb..71f3dfd5 100644 --- a/src/types.ts +++ b/src/types.ts @@ -1,12 +1,56 @@ -import type { AbstractBatch } from 'abstract-leveldown'; import type fs from 'fs'; +import type { RWLockWriter } from '@matrixai/async-locks'; import type { WorkerManagerInterface } from '@matrixai/workers'; +import type { + RocksDBDatabaseOptions, + RocksDBIteratorOptions, + RocksDBBatchPutOperation, + RocksDBBatchDelOperation, + RocksDBClearOptions, + RocksDBCountOptions, + RocksDBSnapshot, + RocksDBTransactionSnapshot, +} from './rocksdb/types'; /** * Plain data dictionary */ type POJO = { [key: string]: any }; +/** + * Any type that can be turned into a string + */ +interface ToString { + toString(): string; +} + +/** + * Opaque types are wrappers of existing types + * that require smart constructors + */ +type Opaque = T & { readonly [brand]: K }; +declare const brand: unique symbol; + +/** + * Generic callback + */ +type Callback

= [], R = any, E extends Error = Error> = { + (e: E, ...params: Partial

): R; + (e?: null | undefined, ...params: P): R; +}; + +/** + * Merge A property types with B property types + * while B's property types override A's property types + */ +type Merge = { + [K in keyof (A & B)]: K extends keyof B + ? B[K] + : K extends keyof A + ? A[K] + : never; +}; + interface FileSystem { promises: { rm: typeof fs.promises.rm; @@ -40,6 +84,11 @@ type KeyPath = Readonly>; */ type LevelPath = Readonly>; +type DBOptions = Omit< + RocksDBDatabaseOptions, + 'createIfMissing' | 'errorIfExists' +>; + /** * Iterator options * The `keyAsBuffer` property controls @@ -48,30 +97,45 @@ type LevelPath = Readonly>; * The `valueAsBuffer` property controls value type * It should be considered to default to true */ -type DBIteratorOptions = { - gt?: KeyPath | Buffer | string; - gte?: KeyPath | Buffer | string; - lt?: KeyPath | Buffer | string; - lte?: KeyPath | Buffer | string; - limit?: number; - keys?: boolean; - values?: boolean; - keyAsBuffer?: boolean; - valueAsBuffer?: boolean; - reverse?: boolean; -}; +type DBIteratorOptions< + S extends RocksDBSnapshot | RocksDBTransactionSnapshot = RocksDBSnapshot, +> = Merge< + Omit, 'keyEncoding' | 'valueEncoding'>, + { + gt?: KeyPath | Buffer | string; + gte?: KeyPath | Buffer | string; + lt?: KeyPath | Buffer | string; + lte?: KeyPath | Buffer | string; + keyAsBuffer?: boolean; + valueAsBuffer?: boolean; + } +>; -/** - * Iterator - */ -type DBIterator = { - seek: (k: KeyPath | string | Buffer) => void; - end: () => Promise; - next: () => Promise<[K, V] | undefined>; - [Symbol.asyncIterator]: () => AsyncGenerator<[K, V]>; -}; +type DBClearOptions< + S extends RocksDBSnapshot | RocksDBTransactionSnapshot = RocksDBSnapshot, +> = Merge< + RocksDBClearOptions, + { + gt?: KeyPath | Buffer | string; + gte?: KeyPath | Buffer | string; + lt?: KeyPath | Buffer | string; + lte?: KeyPath | Buffer | string; + } +>; -type DBBatch = AbstractBatch; +type DBCountOptions< + S extends RocksDBSnapshot | RocksDBTransactionSnapshot = RocksDBSnapshot, +> = Merge< + RocksDBCountOptions, + { + gt?: KeyPath | Buffer | string; + gte?: KeyPath | Buffer | string; + lt?: KeyPath | Buffer | string; + lte?: KeyPath | Buffer | string; + } +>; + +type DBBatch = RocksDBBatchPutOperation | RocksDBBatchDelOperation; type DBOp_ = | { @@ -95,16 +159,28 @@ type DBOp = type DBOps = Array; +type MultiLockRequest = [ + key: ToString, + ...lockingParams: Parameters, +]; + export type { POJO, + ToString, + Opaque, + Callback, + Merge, FileSystem, Crypto, DBWorkerManagerInterface, KeyPath, LevelPath, + DBOptions, DBIteratorOptions, - DBIterator, + DBClearOptions, + DBCountOptions, DBBatch, DBOp, DBOps, + MultiLockRequest, }; diff --git a/src/utils.ts b/src/utils.ts index 1ffd2c5e..d87d0177 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -1,4 +1,4 @@ -import type { KeyPath, LevelPath } from './types'; +import type { Callback, Merge, KeyPath, LevelPath } from './types'; import * as errors from './errors'; /** @@ -90,6 +90,7 @@ function decodePart(data: Buffer): Buffer { /** * Used to convert possible KeyPath into legal KeyPath + * Returns a copy which can be mutated */ function toKeyPath(keyPath: KeyPath | string | Buffer): KeyPath { if (!Array.isArray(keyPath)) { @@ -274,6 +275,125 @@ function fromArrayBuffer( return Buffer.from(b, offset, length); } +/** + * Convert callback-style to promise-style + * If this is applied to overloaded function + * it will only choose one of the function signatures to use + */ +function promisify< + T extends Array, + P extends Array, + R extends T extends [] ? void : T extends [unknown] ? T[0] : T, +>( + f: (...args: [...params: P, callback: Callback]) => unknown, +): (...params: P) => Promise { + // Uses a regular function so that `this` can be bound + const g = function (...params: P): Promise { + return new Promise((resolve, reject) => { + const callback = (error, ...values) => { + if (error != null) { + return reject(error); + } + if (values.length === 0) { + (resolve as () => void)(); + } else if (values.length === 1) { + resolve(values[0] as R); + } else { + resolve(values as R); + } + return; + }; + params.push(callback); + f.apply(this, params); + }); + }; + Object.defineProperty(g, 'name', { value: f.name }); + return g; +} + +/** + * Native addons expect strict optional properties + * Properties that have the value undefined may be misinterpreted + * Apply these to options objects before passing them to the native addon + */ +function filterUndefined(o: object): void { + Object.keys(o).forEach((k) => { + if (o[k] === undefined) { + delete o[k]; + } + }); +} + +function iterationOptions< + O extends { + gt?: KeyPath | Buffer | string; + gte?: KeyPath | Buffer | string; + lt?: KeyPath | Buffer | string; + lte?: KeyPath | Buffer | string; + }, +>( + options: O, + levelPath: LevelPath, +): Merge< + O, + { + gt?: Buffer; + gte?: Buffer; + lt?: Buffer; + lte?: Buffer; + keyEncoding: 'buffer'; + valueEncoding: 'buffer'; + } +> { + const options_ = { + ...options, + // Internally we always use the buffer + keyEncoding: 'buffer' as const, + valueEncoding: 'buffer' as const, + } as Merge< + O, + { + gt?: Buffer; + gte?: Buffer; + lt?: Buffer; + lte?: Buffer; + keyEncoding: 'buffer'; + valueEncoding: 'buffer'; + } + >; + if (options?.gt != null) { + options_.gt = keyPathToKey(levelPath.concat(toKeyPath(options.gt))); + } + if (options?.gte != null) { + options_.gte = keyPathToKey(levelPath.concat(toKeyPath(options.gte))); + } + if (options?.gt == null && options?.gte == null) { + // If the level path is empty then all keys are allowed + if (levelPath.length > 0) { + options_.gt = levelPathToKey(levelPath); + } + } + if (options?.lt != null) { + options_.lt = keyPathToKey(levelPath.concat(toKeyPath(options.lt))); + } + if (options?.lte != null) { + options_.lte = keyPathToKey(levelPath.concat(toKeyPath(options.lte))); + } + if (options?.lt == null && options?.lte == null) { + // If the level path is empty then all keys are allowed + if (levelPath.length > 0) { + const levelKeyEnd = levelPathToKey(levelPath); + // This works because the separator byte is 0x00 + // Therefore we have `sep level sep` + // and we can acquire keys less than `sep level sep+1` + levelKeyEnd[levelKeyEnd.length - 1] += 1; + options_.lt = levelKeyEnd; + } + } + filterUndefined(options_); + return options_; +} + export { sep, encodePart, @@ -287,4 +407,7 @@ export { deserialize, toArrayBuffer, fromArrayBuffer, + promisify, + filterUndefined, + iterationOptions, }; diff --git a/tests/DB.test.ts b/tests/DB.test.ts index becb6a14..440b80ec 100644 --- a/tests/DB.test.ts +++ b/tests/DB.test.ts @@ -1,31 +1,27 @@ -import type { LevelDB } from 'level'; import type { KeyPath } from '@/types'; import type { DBWorkerModule } from './workers/dbWorkerModule'; import os from 'os'; import path from 'path'; import fs from 'fs'; import nodeCrypto from 'crypto'; -import nodeUtil from 'util'; -import lexi from 'lexicographic-integer'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { WorkerManager } from '@matrixai/workers'; import { withF } from '@matrixai/resources'; import { spawn, Worker } from 'threads'; -import level from 'level'; import DB from '@/DB'; import * as errors from '@/errors'; import * as utils from '@/utils'; -import * as testUtils from './utils'; +import * as testsUtils from './utils'; describe(DB.name, () => { const logger = new Logger(`${DB.name} Test`, LogLevel.WARN, [ new StreamHandler(), ]); const crypto = { - key: testUtils.generateKeySync(256), + key: testsUtils.generateKeySync(256), ops: { - encrypt: testUtils.encrypt, - decrypt: testUtils.decrypt, + encrypt: testsUtils.encrypt, + decrypt: testsUtils.decrypt, }, }; let dataDir: string; @@ -83,28 +79,13 @@ describe(DB.name, () => { expect(await db2.get('key')).toBeUndefined(); await db2.stop(); }); - test('start wipes dirty transaction state', async () => { - const dbPath = `${dataDir}/db`; - const db = await DB.createDB({ dbPath, crypto, logger }); - const data = await db.serializeEncrypt('bar', false); - // Put in dirty transaction state - await db.db.put(utils.keyPathToKey(['transactions', 'foo']), data); - expect(await db.dump(['transactions'], false, true)).toStrictEqual([ - [['foo'], 'bar'], - ]); - await db.stop(); - // Should wipe the transaction state - await db.start(); - expect(await db.dump(['transactions'], false, true)).toStrictEqual([]); - await db.stop(); - }); test('start performs canary check to validate key', async () => { const dbPath = `${dataDir}/db`; let db = await DB.createDB({ dbPath, crypto, logger }); await db.stop(); const crypto_ = { ...crypto, - key: testUtils.generateKeySync(256), + key: testsUtils.generateKeySync(256), }; await expect( DB.createDB({ dbPath, crypto: crypto_, logger }), @@ -150,8 +131,8 @@ describe(DB.name, () => { const dbPath = `${dataDir}/db`; const db = await DB.createDB({ dbPath, crypto, logger }); const keyPaths: Array = Array.from({ length: 1000 }, () => - Array.from({ length: testUtils.getRandomInt(0, 11) }, () => - nodeCrypto.randomBytes(testUtils.getRandomInt(0, 11)), + Array.from({ length: testsUtils.getRandomInt(0, 11) }, () => + nodeCrypto.randomBytes(testsUtils.getRandomInt(0, 11)), ), ); for (const kP of keyPaths) { @@ -199,7 +180,7 @@ describe(DB.name, () => { 'key', ]); const records: Array<[KeyPath, Buffer]> = []; - for await (const [kP, v] of db.iterator(undefined, [ + for await (const [kP, v] of db.iterator([ Buffer.concat([utils.sep, Buffer.from('level')]), ])) { records.push([kP, v]); @@ -331,309 +312,6 @@ describe(DB.name, () => { expect(await db.get(['level1', 'level2', 'a'])).toBeUndefined(); await db.stop(); }); - test('internal db lexicographic iteration order', async () => { - const dbPath = `${dataDir}/db`; - const db = await new Promise>( - (resolve, reject) => { - const db = level( - dbPath, - { - keyEncoding: 'binary', - valueEncoding: 'binary', - }, - (e) => { - if (e) { - reject(e); - } else { - resolve(db); - } - }, - ); - }, - ); - await db.put(Buffer.from([0x01]), Buffer.alloc(0)); - await db.put(Buffer.from([0x00, 0x00, 0x00]), Buffer.alloc(0)); - await db.put(Buffer.from([0x00, 0x00]), Buffer.alloc(0)); - // The empty key is not supported in leveldb - // However in this DB, empty keys are always put under root level of `data` - // therefore empty keys are supported - // await db_.put(Buffer.from([]), Buffer.alloc(0)); - const keys: Array = []; - // @ts-ignore Outdated types - for await (const [k] of db.iterator()) { - keys.push(k); - } - expect(keys).toStrictEqual([ - // Therefore `aa` is earlier than `aaa` - Buffer.from([0x00, 0x00]), - Buffer.from([0x00, 0x00, 0x00]), - // Therefore `aa` is earlier than `z` - Buffer.from([0x01]), - ]); - await db.close(); - }); - test('lexicographic iteration order', async () => { - const dbPath = `${dataDir}/db`; - const db = await DB.createDB({ dbPath, crypto, logger }); - await db.put(Buffer.from([0x01]), Buffer.alloc(0)); - await db.put(Buffer.from([0x00, 0x00, 0x00]), Buffer.alloc(0)); - await db.put(Buffer.from([0x00, 0x00]), Buffer.alloc(0)); - await db.put(Buffer.from([]), Buffer.alloc(0)); - const keyPaths: Array = []; - for await (const [kP] of db.iterator({ values: false })) { - keyPaths.push(kP); - } - expect(keyPaths).toStrictEqual([ - // Therefore empty buffer sorts first - [Buffer.from([])], - // Therefore `aa` is earlier than `aaa` - [Buffer.from([0x00, 0x00])], - [Buffer.from([0x00, 0x00, 0x00])], - // Therefore `aa` is earlier than `z` - [Buffer.from([0x01])], - ]); - // Check that this matches Buffer.compare order - const keyPaths_ = [...keyPaths]; - keyPaths_.sort((kP1: Array, kP2: Array) => { - // Only concatenate the key paths - const k1 = Buffer.concat(kP1); - const k2 = Buffer.concat(kP2); - return Buffer.compare(k1, k2); - }); - expect(keyPaths_).toStrictEqual(keyPaths); - await db.stop(); - }); - test('lexicographic iteration order fuzzing', async () => { - const dbPath = `${dataDir}/db`; - const db = await DB.createDB({ dbPath, crypto, logger }); - const keys: Array = Array.from({ length: 1000 }, () => - nodeCrypto.randomBytes(testUtils.getRandomInt(0, 101)), - ); - for (const k of keys) { - await db.put(k, 'value'); - } - const keyPaths: Array = []; - for await (const [kP] of db.iterator({ values: false })) { - keyPaths.push(kP); - } - // Check that this matches Buffer.compare order - const keyPaths_ = [...keyPaths]; - keyPaths_.sort((kP1: Array, kP2: Array) => { - // Only concatenate the key paths - const k1 = Buffer.concat(kP1); - const k2 = Buffer.concat(kP2); - return Buffer.compare(k1, k2); - }); - expect(keyPaths_).toStrictEqual(keyPaths); - await db.stop(); - }); - test('lexicographic integer iteration order', async () => { - // Using the lexicographic-integer encoding - const dbPath = `${dataDir}/db`; - const db = await DB.createDB({ dbPath, crypto, logger }); - // Sorted order should be [3, 4, 42, 100] - const keys = [100, 3, 4, 42]; - for (const k of keys) { - await db.put(Buffer.from(lexi.pack(k)), 'value'); - } - const keysIterated: Array = []; - for await (const [kP] of db.iterator({ values: false })) { - keysIterated.push(lexi.unpack([...kP[0]])); - } - expect(keys).not.toEqual(keysIterated); - // Numeric sort - expect(keys.sort((a, b) => a - b)).toEqual(keysIterated); - await db.stop(); - }); - test('lexicographic level iteration order', async () => { - const dbPath = `${dataDir}/db`; - const db = await DB.createDB({ dbPath, crypto, logger }); - // With levels and empty keys, the sorting is more complicated - await db.put([Buffer.from([0x01])], Buffer.alloc(0)); - await db.put( - [Buffer.from([0x00, 0x00]), Buffer.from([0x00, 0x00])], - Buffer.alloc(0), - ); - await db.put( - [Buffer.from([0x00, 0x00, 0x00]), Buffer.from([0x00])], - Buffer.alloc(0), - ); - await db.put( - [Buffer.from([0x00, 0x00]), Buffer.from([0x01])], - Buffer.alloc(0), - ); - await db.put( - [Buffer.from([0x00, 0x00, 0x00]), Buffer.from([0x01])], - Buffer.alloc(0), - ); - await db.put([Buffer.from([0x01]), Buffer.from([0x00])], Buffer.alloc(0)); - await db.put([Buffer.from([0x00]), Buffer.from([0x00])], Buffer.alloc(0)); - await db.put([Buffer.from([0x00, 0x00])], Buffer.alloc(0)); - await db.put([Buffer.from([0x00, 0x00]), ''], Buffer.alloc(0)); - await db.put([Buffer.from([0xff]), ''], Buffer.alloc(0)); - await db.put([Buffer.from([0x00]), ''], Buffer.alloc(0)); - await db.put([Buffer.from([])], Buffer.alloc(0)); - await db.put([Buffer.from([]), Buffer.from([])], Buffer.alloc(0)); - await db.put([Buffer.from([0x00])], Buffer.alloc(0)); - await db.put( - [Buffer.from([0x00, 0x00]), Buffer.from([0xff]), Buffer.from([])], - Buffer.alloc(0), - ); - await db.put( - [Buffer.from([0x00, 0x00]), Buffer.from([]), Buffer.from([])], - Buffer.alloc(0), - ); - const keyPaths: Array = []; - for await (const [kP] of db.iterator({ values: false })) { - keyPaths.push(kP); - } - /** - * Suppose that: - * - * * `[]` is a key path of degree 0 - * * `['a']` is a key path of degree 0 - * * `['a', 'b']` is a key path of degree 1 - * - * The sorting process goes through 3 steps in-order: - * - * 1. Level parts at each degree are sorted lexicographically - * 2. Key parts with the same level path are sorted lexicographically - * 3. Key parts with degree n are sorted in front of key parts with degree n -1 - */ - expect(keyPaths).toStrictEqual([ - /* Begin degree 1 */ - [Buffer.from([]), Buffer.from([])], - [Buffer.from([0x00]), Buffer.from([])], - [Buffer.from([0x00]), Buffer.from([0x00])], - /* Begin degree 2 */ - [Buffer.from([0x00, 0x00]), Buffer.from([]), Buffer.from([])], - [Buffer.from([0x00, 0x00]), Buffer.from([0xff]), Buffer.from([])], - /* End degree 2 */ - [Buffer.from([0x00, 0x00]), Buffer.from([])], - [Buffer.from([0x00, 0x00]), Buffer.from([0x00, 0x00])], - [Buffer.from([0x00, 0x00]), Buffer.from([0x01])], - [Buffer.from([0x00, 0x00, 0x00]), Buffer.from([0x00])], - [Buffer.from([0x00, 0x00, 0x00]), Buffer.from([0x01])], - [Buffer.from([0x01]), Buffer.from([0x00])], - [Buffer.from([0xff]), Buffer.from([])], - /* End degree 1*/ - /* Begin degree 0 */ - [Buffer.from([])], - [Buffer.from([0x00])], - [Buffer.from([0x00, 0x00])], - [Buffer.from([0x01])], - /* End degree 0 */ - ]); - await db.stop(); - }); - test('lexicographic level iteration order fuzzing', async () => { - const dbPath = `${dataDir}/db`; - const db = await DB.createDB({ dbPath, crypto, logger }); - const keyPathsInput: Array = Array.from({ length: 5000 }, () => - Array.from({ length: testUtils.getRandomInt(0, 11) }, () => - nodeCrypto.randomBytes(testUtils.getRandomInt(0, 11)), - ), - ); - for (const kP of keyPathsInput) { - await db.put(kP, 'value'); - } - const keyPathsOutput: Array = []; - for await (const [kP] of db.iterator({ values: false })) { - keyPathsOutput.push(kP); - } - // Copy the DB sorted key paths - const keyPathsOutput_ = [...keyPathsOutput]; - // Shuffle the DB sorted key paths - testUtils.arrayShuffle(keyPathsOutput_); - keyPathsOutput_.sort((kP1: Array, kP2: Array) => { - const lP1 = kP1.slice(0, kP1.length - 1); - const lP2 = kP2.slice(0, kP2.length - 1); - // Level parts at each degree are sorted lexicographically - for (let i = 0; i < Math.min(lP1.length, lP2.length); i++) { - const comp = Buffer.compare(lP1[i], lP2[i]); - if (comp !== 0) return comp; - // Continue to the next level part - } - // Key parts with the same level path are sorted lexicographically - if ( - lP1.length === lP2.length && - Buffer.concat(lP1).equals(Buffer.concat(lP2)) - ) { - return Buffer.compare(kP1[kP1.length - 1], kP2[kP2.length - 1]); - } - // Key parts with degree n are sorted in front of key parts with degree n -1 - if (kP1.length > kP2.length) { - return -1; - } else if (kP2.length > kP1.length) { - return 1; - } else { - // This cannot happen - throw new Error(); - } - }); - for (let i = 0; i < keyPathsOutput_.length; i++) { - try { - expect(keyPathsOutput_[i]).toStrictEqual(keyPathsOutput[i]); - } catch (e) { - // eslint-disable-next-line no-console - console.error( - 'mismatch: %s vs %s', - nodeUtil.inspect({ - sort: keyPathsOutput_[i], - sortBefore: keyPathsOutput_.slice(Math.max(0, i - 5), i), - sortAfter: keyPathsOutput_.slice(i + 1, i + 1 + 5), - }), - nodeUtil.inspect({ - db: keyPathsOutput[i], - dbBefore: keyPathsOutput.slice(Math.max(0, i - 5), i), - dbAfter: keyPathsOutput.slice(i + 1, i + 1 + 5), - }), - ); - throw e; - } - } - await db.stop(); - }); - test('iterating sublevels', async () => { - const dbPath = `${dataDir}/db`; - const db = await DB.createDB({ dbPath, crypto, logger }); - await db.put('a', 'value0'); - await db.put('b', 'value1'); - await db.put(['level1', 'a'], 'value0'); - await db.put(['level1', 'b'], 'value1'); - await db.put(['level1', 'level2', 'a'], 'value0'); - await db.put(['level1', 'level2', 'b'], 'value1'); - let results: Array<[KeyPath, string]>; - results = []; - for await (const [kP, v] of db.iterator({ - keyAsBuffer: false, - valueAsBuffer: false, - })) { - results.push([kP, v]); - } - expect(results).toStrictEqual([ - [['level1', 'level2', 'a'], 'value0'], - [['level1', 'level2', 'b'], 'value1'], - [['level1', 'a'], 'value0'], - [['level1', 'b'], 'value1'], - [['a'], 'value0'], - [['b'], 'value1'], - ]); - results = []; - for await (const [kP, v] of db.iterator( - { keyAsBuffer: false, valueAsBuffer: false }, - ['level1'], - )) { - results.push([kP, v]); - } - expect(results).toStrictEqual([ - [['level2', 'a'], 'value0'], - [['level2', 'b'], 'value1'], - [['a'], 'value0'], - [['b'], 'value1'], - ]); - await db.stop(); - }); test('counting sublevels', async () => { const dbPath = `${dataDir}/db`; const db = await DB.createDB({ dbPath, crypto, logger }); @@ -818,4 +496,77 @@ describe(DB.name, () => { expect(await db.get('d')).toBe('value3'); await db.stop(); }); + test('debug dumping', async () => { + const dbPath = `${dataDir}/db`; + const db = await DB.createDB({ dbPath, crypto, logger }); + await db.put('a', 'value0'); + await db.put('b', 'value1'); + await db.put('c', 'value2'); + await db.put('d', 'value3'); + expect(await db.dump()).toStrictEqual([ + [['a'], 'value0'], + [['b'], 'value1'], + [['c'], 'value2'], + [['d'], 'value3'], + ]); + // Remember non-raw data is always encoded with JSON + // So the raw dump is a buffer version of the JSON string + expect(await db.dump([], true)).toStrictEqual([ + [[Buffer.from('a')], Buffer.from(JSON.stringify('value0'))], + [[Buffer.from('b')], Buffer.from(JSON.stringify('value1'))], + [[Buffer.from('c')], Buffer.from(JSON.stringify('value2'))], + [[Buffer.from('d')], Buffer.from(JSON.stringify('value3'))], + ]); + // Raw dumping will acquire values from the `data` root level + // and also the canary key + expect(await db.dump([], true, true)).toStrictEqual([ + [ + [Buffer.from('data'), Buffer.from('a')], + Buffer.from(JSON.stringify('value0')), + ], + [ + [Buffer.from('data'), Buffer.from('b')], + Buffer.from(JSON.stringify('value1')), + ], + [ + [Buffer.from('data'), Buffer.from('c')], + Buffer.from(JSON.stringify('value2')), + ], + [ + [Buffer.from('data'), Buffer.from('d')], + Buffer.from(JSON.stringify('value3')), + ], + [[Buffer.from('canary')], Buffer.from(JSON.stringify('deadbeef'))], + ]); + // It is also possible to insert at root level + await db._put([], 'value0'); + await db._put(['a'], 'value1'); + await db._put(['a', 'b'], 'value2'); + expect(await db.dump([], true, true)).toStrictEqual([ + [ + [Buffer.from('a'), Buffer.from('b')], + Buffer.from(JSON.stringify('value2')), + ], + [ + [Buffer.from('data'), Buffer.from('a')], + Buffer.from(JSON.stringify('value0')), + ], + [ + [Buffer.from('data'), Buffer.from('b')], + Buffer.from(JSON.stringify('value1')), + ], + [ + [Buffer.from('data'), Buffer.from('c')], + Buffer.from(JSON.stringify('value2')), + ], + [ + [Buffer.from('data'), Buffer.from('d')], + Buffer.from(JSON.stringify('value3')), + ], + [[Buffer.from('')], Buffer.from(JSON.stringify('value0'))], + [[Buffer.from('a')], Buffer.from(JSON.stringify('value1'))], + [[Buffer.from('canary')], Buffer.from(JSON.stringify('deadbeef'))], + ]); + await db.stop(); + }); }); diff --git a/tests/DBIterator.test.ts b/tests/DBIterator.test.ts new file mode 100644 index 00000000..aa2066dd --- /dev/null +++ b/tests/DBIterator.test.ts @@ -0,0 +1,382 @@ +import type { KeyPath } from '@'; +import os from 'os'; +import path from 'path'; +import fs from 'fs'; +import nodeCrypto from 'crypto'; +import nodeUtil from 'util'; +import lexi from 'lexicographic-integer'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import DB from '@/DB'; +import DBIterator from '@/DBIterator'; +import rocksdbP from '@/rocksdb/rocksdbP'; +import * as testsUtils from './utils'; + +describe(DBIterator.name, () => { + const logger = new Logger(`${DBIterator.name} test`, LogLevel.WARN, [ + new StreamHandler(), + ]); + const crypto = { + key: testsUtils.generateKeySync(256), + ops: { + encrypt: testsUtils.encrypt, + decrypt: testsUtils.decrypt, + }, + }; + let dataDir: string; + let db: DB; + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'db-iter-test-'), + ); + const dbPath = `${dataDir}/db`; + db = await DB.createDB({ dbPath, crypto, logger }); + }); + afterEach(async () => { + await db.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + }); + test('internal db lexicographic iteration order', async () => { + const dbPath = `${dataDir}/leveldb`; + const db = rocksdbP.dbInit(); + await rocksdbP.dbOpen(db, dbPath, {}); + await rocksdbP.dbPut(db, Buffer.from([0x01]), Buffer.alloc(0), {}); + await rocksdbP.dbPut( + db, + Buffer.from([0x00, 0x00, 0x00]), + Buffer.alloc(0), + {}, + ); + await rocksdbP.dbPut(db, Buffer.from([0x00, 0x00]), Buffer.alloc(0), {}); + await rocksdbP.dbPut(db, Buffer.from([]), Buffer.alloc(0), {}); + const iterator = rocksdbP.iteratorInit(db, { + keyEncoding: 'buffer', + valueEncoding: 'buffer', + }); + const [entries] = await rocksdbP.iteratorNextv(iterator, 4); + await rocksdbP.iteratorClose(iterator); + const keys = entries.map((entry) => entry[0]); + expect(keys).toEqual([ + Buffer.from([]), + // Therefore `aa` is earlier than `aaa` + Buffer.from([0x00, 0x00]), + Buffer.from([0x00, 0x00, 0x00]), + // Therefore `aa` is earlier than `z` + Buffer.from([0x01]), + ]); + await rocksdbP.dbClose(db); + }); + test('lexicographic iteration order', async () => { + await db.put(Buffer.from([0x01]), Buffer.alloc(0)); + await db.put(Buffer.from([0x00, 0x00, 0x00]), Buffer.alloc(0)); + await db.put(Buffer.from([0x00, 0x00]), Buffer.alloc(0)); + await db.put(Buffer.from([]), Buffer.alloc(0)); + const keyPaths: Array = []; + for await (const [kP] of db.iterator([], { values: false })) { + keyPaths.push(kP); + } + expect(keyPaths).toEqual([ + // Therefore empty buffer sorts first + [Buffer.from([])], + // Therefore `aa` is earlier than `aaa` + [Buffer.from([0x00, 0x00])], + [Buffer.from([0x00, 0x00, 0x00])], + // Therefore `aa` is earlier than `z` + [Buffer.from([0x01])], + ]); + // Check that this matches Buffer.compare order + const keyPaths_ = [...keyPaths]; + keyPaths_.sort((kP1: Array, kP2: Array) => { + // Only concatenate the key paths + const k1 = Buffer.concat(kP1); + const k2 = Buffer.concat(kP2); + return Buffer.compare(k1, k2); + }); + expect(keyPaths_).toEqual(keyPaths); + }); + test('lexicographic iteration order fuzzing', async () => { + const keys: Array = Array.from({ length: 1000 }, () => + nodeCrypto.randomBytes(testsUtils.getRandomInt(0, 101)), + ); + for (const k of keys) { + await db.put(k, 'value'); + } + const keyPaths: Array = []; + for await (const [kP] of db.iterator([], { values: false })) { + keyPaths.push(kP); + } + // Check that this matches Buffer.compare order + const keyPaths_ = [...keyPaths]; + keyPaths_.sort((kP1: Array, kP2: Array) => { + // Only concatenate the key paths + const k1 = Buffer.concat(kP1); + const k2 = Buffer.concat(kP2); + return Buffer.compare(k1, k2); + }); + expect(keyPaths_).toEqual(keyPaths); + }); + test('lexicographic integer iteration order', async () => { + // Using the lexicographic-integer encoding + // Sorted order should be [3, 4, 42, 100] + const keys = [100, 3, 4, 42]; + for (const k of keys) { + await db.put(Buffer.from(lexi.pack(k)), 'value'); + } + const keysIterated: Array = []; + for await (const [kP] of db.iterator([], { values: false })) { + keysIterated.push(lexi.unpack([...kP[0]])); + } + expect(keys).not.toEqual(keysIterated); + // Numeric sort + expect(keys.sort((a, b) => a - b)).toEqual(keysIterated); + }); + test('lexicographic level iteration order', async () => { + // With levels and empty keys, the sorting is more complicated + await db.put([Buffer.from([0x01])], Buffer.alloc(0)); + await db.put( + [Buffer.from([0x00, 0x00]), Buffer.from([0x00, 0x00])], + Buffer.alloc(0), + ); + await db.put( + [Buffer.from([0x00, 0x00, 0x00]), Buffer.from([0x00])], + Buffer.alloc(0), + ); + await db.put( + [Buffer.from([0x00, 0x00]), Buffer.from([0x01])], + Buffer.alloc(0), + ); + await db.put( + [Buffer.from([0x00, 0x00, 0x00]), Buffer.from([0x01])], + Buffer.alloc(0), + ); + await db.put([Buffer.from([0x01]), Buffer.from([0x00])], Buffer.alloc(0)); + await db.put([Buffer.from([0x00]), Buffer.from([0x00])], Buffer.alloc(0)); + await db.put([Buffer.from([0x00, 0x00])], Buffer.alloc(0)); + await db.put([Buffer.from([0x00, 0x00]), ''], Buffer.alloc(0)); + await db.put([Buffer.from([0xff]), ''], Buffer.alloc(0)); + await db.put([Buffer.from([0x00]), ''], Buffer.alloc(0)); + await db.put([Buffer.from([])], Buffer.alloc(0)); + await db.put([Buffer.from([]), Buffer.from([])], Buffer.alloc(0)); + await db.put([Buffer.from([0x00])], Buffer.alloc(0)); + await db.put( + [Buffer.from([0x00, 0x00]), Buffer.from([0xff]), Buffer.from([])], + Buffer.alloc(0), + ); + await db.put( + [Buffer.from([0x00, 0x00]), Buffer.from([]), Buffer.from([])], + Buffer.alloc(0), + ); + const keyPaths: Array = []; + for await (const [kP] of db.iterator([], { values: false })) { + keyPaths.push(kP); + } + /** + * Suppose that: + * + * * `[]` is a key path of degree 0 + * * `['a']` is a key path of degree 0 + * * `['a', 'b']` is a key path of degree 1 + * + * The sorting process goes through 3 steps in-order: + * + * 1. Level parts at each degree are sorted lexicographically + * 2. Key parts with the same level path are sorted lexicographically + * 3. Key parts with degree n are sorted in front of key parts with degree n - 1 + */ + expect(keyPaths).toEqual([ + /* Begin degree 1 */ + [Buffer.from([]), Buffer.from([])], + [Buffer.from([0x00]), Buffer.from([])], + [Buffer.from([0x00]), Buffer.from([0x00])], + /* Begin degree 2 */ + [Buffer.from([0x00, 0x00]), Buffer.from([]), Buffer.from([])], + [Buffer.from([0x00, 0x00]), Buffer.from([0xff]), Buffer.from([])], + /* End degree 2 */ + [Buffer.from([0x00, 0x00]), Buffer.from([])], + [Buffer.from([0x00, 0x00]), Buffer.from([0x00, 0x00])], + [Buffer.from([0x00, 0x00]), Buffer.from([0x01])], + [Buffer.from([0x00, 0x00, 0x00]), Buffer.from([0x00])], + [Buffer.from([0x00, 0x00, 0x00]), Buffer.from([0x01])], + [Buffer.from([0x01]), Buffer.from([0x00])], + [Buffer.from([0xff]), Buffer.from([])], + /* End degree 1*/ + /* Begin degree 0 */ + [Buffer.from([])], + [Buffer.from([0x00])], + [Buffer.from([0x00, 0x00])], + [Buffer.from([0x01])], + /* End degree 0 */ + ]); + }); + test('lexicographic level iteration order fuzzing', async () => { + const keyPathsInput: Array = Array.from({ length: 5000 }, () => + Array.from({ length: testsUtils.getRandomInt(0, 11) }, () => + nodeCrypto.randomBytes(testsUtils.getRandomInt(0, 11)), + ), + ); + for (const kP of keyPathsInput) { + await db.put(kP, 'value'); + } + const keyPathsOutput: Array = []; + for await (const [kP] of db.iterator([], { values: false })) { + keyPathsOutput.push(kP); + } + // Copy the DB sorted key paths + const keyPathsOutput_ = [...keyPathsOutput]; + // Shuffle the DB sorted key paths + testsUtils.arrayShuffle(keyPathsOutput_); + keyPathsOutput_.sort((kP1: Array, kP2: Array) => { + const lP1 = kP1.slice(0, kP1.length - 1); + const lP2 = kP2.slice(0, kP2.length - 1); + // Level parts at each degree are sorted lexicographically + for (let i = 0; i < Math.min(lP1.length, lP2.length); i++) { + const comp = Buffer.compare(lP1[i], lP2[i]); + if (comp !== 0) return comp; + // Continue to the next level part + } + // Key parts with the same level path are sorted lexicographically + if ( + lP1.length === lP2.length && + Buffer.concat(lP1).equals(Buffer.concat(lP2)) + ) { + return Buffer.compare(kP1[kP1.length - 1], kP2[kP2.length - 1]); + } + // Key parts with degree n are sorted in front of key parts with degree n -1 + if (kP1.length > kP2.length) { + return -1; + } else if (kP2.length > kP1.length) { + return 1; + } else { + // This cannot happen + throw new Error(); + } + }); + for (let i = 0; i < keyPathsOutput_.length; i++) { + try { + expect(keyPathsOutput_[i]).toEqual(keyPathsOutput[i]); + } catch (e) { + // eslint-disable-next-line no-console + console.error( + 'mismatch: %s vs %s', + nodeUtil.inspect({ + sort: keyPathsOutput_[i], + sortBefore: keyPathsOutput_.slice(Math.max(0, i - 5), i), + sortAfter: keyPathsOutput_.slice(i + 1, i + 1 + 5), + }), + nodeUtil.inspect({ + db: keyPathsOutput[i], + dbBefore: keyPathsOutput.slice(Math.max(0, i - 5), i), + dbAfter: keyPathsOutput.slice(i + 1, i + 1 + 5), + }), + ); + throw e; + } + } + }); + test('iterating sublevels', async () => { + await db.put('a', 'value0'); + await db.put('b', 'value1'); + await db.put(['level1', 'a'], 'value0'); + await db.put(['level1', 'b'], 'value1'); + await db.put(['level1', 'level2', 'a'], 'value0'); + await db.put(['level1', 'level2', 'b'], 'value1'); + let results: Array<[KeyPath, string]>; + results = []; + for await (const [kP, v] of db.iterator([], { + keyAsBuffer: false, + valueAsBuffer: false, + })) { + results.push([kP, v]); + } + expect(results).toStrictEqual([ + [['level1', 'level2', 'a'], 'value0'], + [['level1', 'level2', 'b'], 'value1'], + [['level1', 'a'], 'value0'], + [['level1', 'b'], 'value1'], + [['a'], 'value0'], + [['b'], 'value1'], + ]); + results = []; + for await (const [kP, v] of db.iterator(['level1'], { + keyAsBuffer: false, + valueAsBuffer: false, + })) { + results.push([kP, v]); + } + expect(results).toStrictEqual([ + [['level2', 'a'], 'value0'], + [['level2', 'b'], 'value1'], + [['a'], 'value0'], + [['b'], 'value1'], + ]); + }); + test('iterating sublevels with range', async () => { + // Note that `'a'` is `0x61` + await db.put(['level', Buffer.from([0x30, 0x34]), 'a'], 'value'); + await db.put(['level', Buffer.from([0x30, 0x35]), 'a', 'b'], 'value'); + // Suppose we only wanted these 2 entries + await db.put(['level', Buffer.from([0x30, 0x35]), ''], 'value'); + await db.put(['level', Buffer.from([0x30, 0x35]), 'a'], 'value'); + // And none of these entries + await db.put(['level', Buffer.from([0x30, 0x36]), 'a', 'b'], 'value'); + await db.put(['level', Buffer.from([0x30, 0x36]), 'a'], 'value'); + await db.put(['level', Buffer.from([0x30, 0x34])], 'value'); + let keyPaths: Array = []; + // Here we are iterating until the sublevel of `0x30 0x35` + // We must use a key path for the `lte` + // It cannot just be `Buffer.from([0x30, 0x35])` + // Notice that this will not cover the key of `0x30 0x34` + // That's because of rule 3 + // 3. Key parts with degree n are sorted in front of key parts with degree n - 1 + for await (const [kP] of db.iterator(['level'], { + lte: [Buffer.from([0x30, 0x35]), ''], + values: false, + })) { + keyPaths.push(kP); + } + expect(keyPaths).toStrictEqual([ + [Buffer.from([0x30, 0x34]), Buffer.from([0x61])], + [Buffer.from([0x30, 0x35]), Buffer.from([0x61]), Buffer.from([0x62])], + [Buffer.from([0x30, 0x35]), Buffer.from([])], + ]); + // If we only wanted entries under the sublevel of `0x30 0x35` + // this would not work because of rule 3 + // The deeper level is in front + keyPaths = []; + for await (const [kP] of db.iterator(['level'], { + gte: [Buffer.from([0x30, 0x35]), ''], + lt: [Buffer.from([0x30, 0x36]), ''], + })) { + keyPaths.push(kP); + } + expect(keyPaths).toStrictEqual([ + [Buffer.from([0x30, 0x35]), Buffer.from([])], + [Buffer.from([0x30, 0x35]), Buffer.from([0x61])], + [Buffer.from([0x30, 0x36]), Buffer.from([0x61]), Buffer.from([0x62])], + ]); + // To actually do it, we need to specify as part of the level path parameter + keyPaths = []; + for await (const [kP] of db.iterator([ + 'level', + Buffer.from([0x30, 0x35]), + ])) { + keyPaths.push(kP); + } + expect(keyPaths).toStrictEqual([ + [Buffer.from([0x61]), Buffer.from([0x62])], + [Buffer.from([])], + [Buffer.from([0x61])], + ]); + // However the deeper level is still there + // But because of rule 3, we can do this instead + keyPaths = []; + for await (const [kP] of db.iterator(['level', Buffer.from([0x30, 0x35])], { + gte: '', + })) { + keyPaths.push(kP); + } + expect(keyPaths).toStrictEqual([[Buffer.from([])], [Buffer.from([0x61])]]); + }); +}); diff --git a/tests/DBTransaction.test.ts b/tests/DBTransaction.test.ts index 190f8e7a..9108f129 100644 --- a/tests/DBTransaction.test.ts +++ b/tests/DBTransaction.test.ts @@ -1,22 +1,24 @@ -import type { KeyPath } from '@'; +import type { KeyPath } from '@/types'; import os from 'os'; import path from 'path'; import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { withF } from '@matrixai/resources'; +import { errors as locksErrors } from '@matrixai/async-locks'; import DB from '@/DB'; import DBTransaction from '@/DBTransaction'; -import * as testUtils from './utils'; +import * as errors from '@/errors'; +import * as testsUtils from './utils'; describe(DBTransaction.name, () => { const logger = new Logger(`${DBTransaction.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); const crypto = { - key: testUtils.generateKeySync(256), + key: testsUtils.generateKeySync(256), ops: { - encrypt: testUtils.encrypt, - decrypt: testUtils.decrypt, + encrypt: testsUtils.encrypt, + decrypt: testsUtils.decrypt, }, }; let dataDir: string; @@ -35,24 +37,6 @@ describe(DBTransaction.name, () => { recursive: true, }); }); - test('snapshot state is cleared after releasing transactions', async () => { - const acquireTran1 = db.transaction(); - const [releaseTran1, tran1] = await acquireTran1(); - await tran1!.put('hello', 'world'); - const acquireTran2 = db.transaction(); - const [releaseTran2, tran2] = await acquireTran2(); - await tran2!.put('hello', 'world'); - expect(await db.dump(['transactions'], false, true)).toStrictEqual([ - [['0', 'data', 'hello'], 'world'], - [['1', 'data', 'hello'], 'world'], - ]); - await releaseTran1(); - expect(await db.dump(['transactions'], false, true)).toStrictEqual([ - [['1', 'data', 'hello'], 'world'], - ]); - await releaseTran2(); - expect(await db.dump(['transactions'], false, true)).toStrictEqual([]); - }); test('get, put and del', async () => { const p = withF([db.transaction()], async ([tran]) => { expect(await tran.get('foo')).toBeUndefined(); @@ -63,15 +47,11 @@ describe(DBTransaction.name, () => { expect(await tran.get('foo')).toBe('bar'); expect(await tran.get('hello')).toBe('world'); expect(await tran.dump()).toStrictEqual([ - [['data', 'foo'], 'bar'], - [['data', 'hello'], 'world'], + [['foo'], 'bar'], + [['hello'], 'world'], ]); // Delete hello -> world await tran.del('hello'); - // Transaction state should be used - expect( - Object.entries(await db.dump(['transactions'], false, true)).length > 0, - ).toBe(true); }); // While the transaction is executed, there is no data expect(await db.dump(['data'], false, true)).toStrictEqual([]); @@ -80,8 +60,6 @@ describe(DBTransaction.name, () => { expect(await db.dump(['data'], false, true)).toStrictEqual([ [['foo'], 'bar'], ]); - // Transaction state is cleared - expect(await db.dump(['transactions'], false, true)).toStrictEqual([]); }); test('transactional clear', async () => { await db.put('1', '1'); @@ -117,6 +95,20 @@ describe(DBTransaction.name, () => { expect(await tran.count(['level1'])).toBe(2); }); }); + test('snapshot is lazily initiated on the first operation', async () => { + await db.put('foo', 'first'); + expect(await db.get('foo')).toBe('first'); + await withF([db.transaction()], async ([tran]) => { + await db.put('foo', 'second'); + expect(await tran.get('foo')).toBe('second'); + expect(await db.get('foo')).toBe('second'); + await db.put('foo', 'third'); + // Transaction still sees it as `second` + expect(await tran.get('foo')).toBe('second'); + // Database sees it as `third` + expect(await db.get('foo')).toBe('third'); + }); + }); test('no dirty reads', async () => { await withF([db.transaction()], async ([tran1]) => { expect(await tran1.get('hello')).toBeUndefined(); @@ -127,53 +119,67 @@ describe(DBTransaction.name, () => { }); }); await db.clear(); - await withF([db.transaction()], async ([tran1]) => { - expect(await tran1.get('hello')).toBeUndefined(); - await tran1.put('hello', 'foo'); - await withF([db.transaction()], async ([tran2]) => { - // `tran1` has not yet committed - expect(await tran2.get('hello')).toBeUndefined(); - await tran2.put('hello', 'bar'); - // `tran2` has not yet committed - expect(await tran1.get('hello')).toBe('foo'); - }); - }); + await expect( + withF([db.transaction()], async ([tran1]) => { + expect(await tran1.get('hello')).toBeUndefined(); + await tran1.put('hello', 'foo'); + // This transaction commits, but the outside transaction will fail + await withF([db.transaction()], async ([tran2]) => { + // `tran1` has not yet committed + expect(await tran2.get('hello')).toBeUndefined(); + // This will cause a conflict with the external transaction + await tran2.put('hello', 'bar'); + // `tran2` has not yet committed + expect(await tran1.get('hello')).toBe('foo'); + }); + }), + ).rejects.toThrow(errors.ErrorDBTransactionConflict); }); - test('non-repeatable reads', async () => { + test('repeatable reads', async () => { await withF([db.transaction()], async ([tran1]) => { + expect(await tran1.get('hello')).toBeUndefined(); + await db.put('hello', '?'); expect(await tran1.get('hello')).toBeUndefined(); await db.withTransactionF(async (tran2) => { await tran2.put('hello', 'world'); }); - // `tran2` is now committed - expect(await tran1.get('hello')).toBe('world'); - }); - await db.clear(); - await db.withTransactionF(async (tran1) => { + // Even though `tran2` is now committed + // the snapshot was taken when `hello` was still undefined expect(await tran1.get('hello')).toBeUndefined(); - await tran1.put('hello', 'foo'); - await withF([db.transaction()], async ([tran2]) => { - // `tran1` has not yet committed - expect(await tran2.get('hello')).toBeUndefined(); - await tran2.put('hello', 'bar'); - }); - // `tran2` is now committed - // however because `foo` has been written in tran1, it stays as `foo` - expect(await tran1.get('hello')).toBe('foo'); }); + expect(await db.get('hello')).toBe('world'); + await db.clear(); + await expect( + db.withTransactionF(async (tran1) => { + expect(await tran1.get('hello')).toBeUndefined(); + await tran1.put('hello', 'foo'); + await expect( + withF([db.transaction()], async ([tran2]) => { + // `tran1` has not yet committed + expect(await tran2.get('hello')).toBeUndefined(); + await tran2.put('hello', 'bar'); + }), + ).resolves.toBeUndefined(); + // `tran2` is now committed + // however because `foo` has been written in tran1, it stays as `foo` + expect(await tran1.get('hello')).toBe('foo'); + // `hello` -> `foo` conflicts with `hello` -> `bar` + }), + ).rejects.toThrow(errors.ErrorDBTransactionConflict); + expect(await db.get('hello')).toBe('bar'); }); - test('phantom reads', async () => { + test('no phantom reads', async () => { await db.put('1', '1'); await db.put('2', '2'); await db.put('3', '3'); let rows: Array<[string, string]>; await withF([db.transaction()], async ([tran1]) => { rows = []; - for await (const [k, v] of tran1.iterator({ + for await (const [kP, v] of tran1.iterator([], { keyAsBuffer: false, valueAsBuffer: false, })) { - rows.push([k.toString(), v]); + rows.push([kP.join(), v]); } expect(rows).toStrictEqual([ ['1', '1'], @@ -184,11 +190,11 @@ describe(DBTransaction.name, () => { await tran2.del('1'); await tran2.put('4', '4'); rows = []; - for await (const [k, v] of tran1.iterator({ + for await (const [kP, v] of tran1.iterator([], { keyAsBuffer: false, valueAsBuffer: false, })) { - rows.push([k.toString(), v]); + rows.push([kP.join(), v]); } expect(rows).toStrictEqual([ ['1', '1'], @@ -197,29 +203,49 @@ describe(DBTransaction.name, () => { ]); }); rows = []; - for await (const [k, v] of tran1.iterator({ + for await (const [kP, v] of tran1.iterator([], { keyAsBuffer: false, valueAsBuffer: false, })) { - rows.push([k.toString(), v]); + rows.push([kP.toString(), v]); } + // This is the same as repeatable read + // but this applied to different key-values expect(rows).toStrictEqual([ + ['1', '1'], ['2', '2'], ['3', '3'], - ['4', '4'], ]); }); + // Starting a new iterator, see the new results + rows = []; + for await (const [kP, v] of db.iterator([], { + keyAsBuffer: false, + valueAsBuffer: false, + })) { + rows.push([kP.toString(), v]); + } + expect(rows).toStrictEqual([ + ['2', '2'], + ['3', '3'], + ['4', '4'], + ]); }); - test('lost updates', async () => { - await withF([db.transaction()], async ([tran1]) => { + test('no lost updates', async () => { + const p = withF([db.transaction()], async ([tran1]) => { await tran1.put('hello', 'foo'); await withF([db.transaction()], async ([tran2]) => { await tran2.put('hello', 'bar'); }); + // `tran1` sees `foo` expect(await tran1.get('hello')).toBe('foo'); + // However `db` sees `bar` as that's what is committed + expect(await db.get('hello')).toBe('bar'); }); - // `tran2` write is lost because `tran1` committed last - expect(await db.get('hello')).toBe('foo'); + // Even though `tran1` committed last, the `tran2` write is not lost, + // instead `tran1` results in a conflict + await expect(p).rejects.toThrow(errors.ErrorDBTransactionConflict); + expect(await db.get('hello')).toBe('bar'); }); test('get after delete consistency', async () => { await db.put('hello', 'world'); @@ -228,12 +254,98 @@ describe(DBTransaction.name, () => { await tran.put('hello', 'another'); expect(await tran.get('hello')).toBe('another'); await tran.del('hello'); - expect(await tran.dump()).toStrictEqual([[['tombstone', 'hello'], true]]); expect(await tran.get('hello')).toBeUndefined(); expect(await db.get('hello')).toBe('world'); }); expect(await db.get('hello')).toBeUndefined(); }); + test('getForUpdate addresses write-skew by promoting gets into same-value puts', async () => { + // Snapshot isolation allows write skew anomalies to occur + // A write skew means that 2 transactions concurrently read from overlapping keys + // then make disjoint updates to the keys, that breaks a consistency constraint on those keys + // For example: + // T1 reads from k1, k2, writes to k1 + // T2 reads from k1, k2, writes to k2 + // Where k1 + k2 >= 0 + await db.put('balance1', '100'); + await db.put('balance2', '100'); + const t1 = withF([db.transaction()], async ([tran]) => { + let balance1 = parseInt((await tran.getForUpdate('balance1'))!); + const balance2 = parseInt((await tran.getForUpdate('balance2'))!); + balance1 -= 100; + expect(balance1 + balance2).toBeGreaterThanOrEqual(0); + await tran.put('balance1', balance1.toString()); + }); + const t2 = withF([db.transaction()], async ([tran]) => { + const balance1 = parseInt((await tran.getForUpdate('balance1'))!); + let balance2 = parseInt((await tran.getForUpdate('balance2'))!); + balance2 -= 100; + expect(balance1 + balance2).toBeGreaterThanOrEqual(0); + await tran.put('balance2', balance2.toString()); + }); + // By using getForUpdate, we promote the read to a write, where it writes the same value + // this causes a write-write conflict + const results = await Promise.allSettled([t1, t2]); + // One will succeed, one will fail + expect(results.some((result) => result.status === 'fulfilled')).toBe(true); + expect( + results.some((result) => { + return ( + result.status === 'rejected' && + result.reason instanceof errors.ErrorDBTransactionConflict + ); + }), + ).toBe(true); + }); + test('locking to prevent thrashing for racing counters', async () => { + await db.put('counter', '0'); + let t1 = withF([db.transaction()], async ([tran]) => { + // Can also use `getForUpdate`, but a conflict exists even for `get` + let counter = parseInt((await tran.get('counter'))!); + counter++; + await tran.put('counter', counter.toString()); + }); + let t2 = withF([db.transaction()], async ([tran]) => { + // Can also use `getForUpdate`, but a conflict exists even for `get` + let counter = parseInt((await tran.get('counter'))!); + counter++; + await tran.put('counter', counter.toString()); + }); + let results = await Promise.allSettled([t1, t2]); + expect(results.some((result) => result.status === 'fulfilled')).toBe(true); + expect( + results.some((result) => { + return ( + result.status === 'rejected' && + result.reason instanceof errors.ErrorDBTransactionConflict + ); + }), + ).toBe(true); + expect(await db.get('counter')).toBe('1'); + // In OCC, concurrent requests to update an atomic counter would result + // in race thrashing where only 1 request succeeds, and all other requests + // keep failing. The only way to prevent this thrashing is to use PCC locking + await db.put('counter', '0'); + t1 = withF([db.transaction()], async ([tran]) => { + // Enforces mutual exclusion + await tran.lock('counter'); + // Can also use `get`, no difference here + let counter = parseInt((await tran.getForUpdate('counter'))!); + counter++; + await tran.put('counter', counter.toString()); + }); + t2 = withF([db.transaction()], async ([tran]) => { + // Enforces mutual exclusion + await tran.lock('counter'); + // Can also use `get`, no difference here + let counter = parseInt((await tran.getForUpdate('counter'))!); + counter++; + await tran.put('counter', counter.toString()); + }); + results = await Promise.allSettled([t1, t2]); + expect(results.every((result) => result.status === 'fulfilled')); + expect(await db.get('counter')).toBe('2'); + }); test('iterator get after delete consistency', async () => { await db.put('hello', 'world'); let results: Array<[KeyPath, Buffer]> = []; @@ -266,10 +378,10 @@ describe(DBTransaction.name, () => { const results: Array<[string, string]> = []; await withF([db.transaction()], async ([tran]) => { await tran.del(['a', 'b']); - for await (const [kP, v] of tran.iterator( - { keyAsBuffer: false, valueAsBuffer: false }, - ['a'], - )) { + for await (const [kP, v] of tran.iterator(['a'], { + keyAsBuffer: false, + valueAsBuffer: false, + })) { results.push([kP[0] as string, v]); } }); @@ -277,7 +389,7 @@ describe(DBTransaction.name, () => { }); test('iterator with multiple entombed keys', async () => { /* - | KEYS | DB | SNAPSHOT | RESULT | + | KEYS | DB | TRAN | RESULT | |------|-------|----------|--------| | a | a = a | X | | | b | b = b | | b = b | @@ -308,7 +420,7 @@ describe(DBTransaction.name, () => { await tran.del('h'); await tran.put('j', '10'); await tran.del('k'); - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, valueAsBuffer: false, })) { @@ -322,7 +434,7 @@ describe(DBTransaction.name, () => { ['j', '10'], ]); results = []; - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, valueAsBuffer: false, reverse: true, @@ -340,7 +452,7 @@ describe(DBTransaction.name, () => { }); test('iterator with same largest key', async () => { /* - | KEYS | DB | SNAPSHOT | RESULT | + | KEYS | DB | TRAN | RESULT | |------|-------|----------|--------| | a | a = a | a = 1 | a = 1 | | b | b = b | | b = b | @@ -368,7 +480,7 @@ describe(DBTransaction.name, () => { await tran.put('f', '6'); await tran.put('j', '10'); await tran.put('k', '11'); - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, valueAsBuffer: false, })) { @@ -387,9 +499,9 @@ describe(DBTransaction.name, () => { ['k', '11'], ]); }); - test('iterator with same largest key in reverse', async () => { + test('iterator with same largest key reversed', async () => { /* - | KEYS | DB | SNAPSHOT | RESULT | + | KEYS | DB | TRAN | RESULT | |------|-------|----------|--------| | a | a = a | a = 1 | a = 1 | | b | b = b | | b = b | @@ -417,7 +529,7 @@ describe(DBTransaction.name, () => { await tran.put('f', '6'); await tran.put('j', '10'); await tran.put('k', '11'); - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, valueAsBuffer: false, reverse: true, @@ -439,9 +551,9 @@ describe(DBTransaction.name, () => { ].reverse(), ); }); - test('iterator with snapshot largest key', async () => { + test('iterator with largest key in transaction', async () => { /* - | KEYS | DB | SNAPSHOT | RESULT | + | KEYS | DB | TRAN | RESULT | |------|-------|----------|--------| | a | a = a | a = 1 | a = 1 | | b | b = b | | b = b | @@ -466,7 +578,7 @@ describe(DBTransaction.name, () => { await tran.put('e', '5'); await tran.put('f', '6'); await tran.put('j', '10'); - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, valueAsBuffer: false, })) { @@ -484,9 +596,9 @@ describe(DBTransaction.name, () => { ['j', '10'], ]); }); - test('iterator with snapshot largest key in reverse', async () => { + test('iterator with largest key in transaction reversed', async () => { /* - | KEYS | DB | SNAPSHOT | RESULT | + | KEYS | DB | TRAN | RESULT | |------|-------|----------|--------| | a | a = a | a = 1 | a = 1 | | b | b = b | | b = b | @@ -511,7 +623,7 @@ describe(DBTransaction.name, () => { await tran.put('e', '5'); await tran.put('f', '6'); await tran.put('j', '10'); - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, valueAsBuffer: false, reverse: true, @@ -532,9 +644,9 @@ describe(DBTransaction.name, () => { ].reverse(), ); }); - test('iterator with db largest key', async () => { + test('iterator with largest key in db', async () => { /* - | KEYS | DB | SNAPSHOT | RESULT | + | KEYS | DB | TRAN | RESULT | |------|-------|----------|--------| | a | a = a | a = 1 | a = 1 | | b | b = b | | b = b | @@ -556,7 +668,7 @@ describe(DBTransaction.name, () => { await tran.put('c', '3'); await tran.put('e', '5'); await tran.put('f', '6'); - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, valueAsBuffer: false, })) { @@ -573,7 +685,7 @@ describe(DBTransaction.name, () => { ['h', 'h'], ]); }); - test('iterator with db largest key in reverse', async () => { + test('iterator with largest key in db reversed', async () => { /* | KEYS | DB | SNAPSHOT | RESULT | |------|-------|----------|--------| @@ -597,7 +709,7 @@ describe(DBTransaction.name, () => { await tran.put('c', '3'); await tran.put('e', '5'); await tran.put('f', '6'); - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, valueAsBuffer: false, reverse: true, @@ -619,7 +731,7 @@ describe(DBTransaction.name, () => { }); test('iterator with undefined values', async () => { /* - | KEYS | DB | SNAPSHOT | RESULT | + | KEYS | DB | TRAN | RESULT | |------|-------|----------|--------| | a | a = a | a = 1 | a = 1 | | b | b = b | | b = b | @@ -647,7 +759,7 @@ describe(DBTransaction.name, () => { await tran.put('f', '6'); await tran.put('j', '10'); await tran.put('k', '11'); - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, values: false, })) { @@ -668,7 +780,7 @@ describe(DBTransaction.name, () => { }); test('iterator using seek and next', async () => { /* - | KEYS | DB | SNAPSHOT | RESULT | + | KEYS | DB | TRAN | RESULT | |------|-------|----------|--------| | a | a = a | a = 1 | a = 1 | | b | b = b | | b = b | @@ -724,7 +836,7 @@ describe(DBTransaction.name, () => { [Buffer.from('j')], Buffer.from('"10"'), ]); - await iterator.end(); + await iterator.destroy(); }); }); test('iterator with async generator yield', async () => { @@ -733,7 +845,7 @@ describe(DBTransaction.name, () => { const g = db.withTransactionG(async function* ( tran: DBTransaction, ): AsyncGenerator<[string, string]> { - for await (const [kP, v] of tran.iterator({ + for await (const [kP, v] of tran.iterator([], { keyAsBuffer: false, valueAsBuffer: false, })) { @@ -856,16 +968,167 @@ describe(DBTransaction.name, () => { await db.put('1', 'a'); await db.put('2', 'b'); const mockFailure = jest.fn(); + const mockFinally = jest.fn(); + const e = new Error('Oh no!'); await expect( db.withTransactionF(async (tran) => { await tran.put('1', '1'); await tran.put('2', '2'); tran.queueFailure(mockFailure); - throw new Error('Oh no!'); + tran.queueFinally(mockFinally); + throw e; }), - ).rejects.toThrow('Oh no!'); - expect(mockFailure).toBeCalled(); + ).rejects.toThrow(e); + expect(mockFailure).toBeCalledWith(e); + expect(mockFinally).toBeCalledWith(e); expect(await db.get('1')).toBe('a'); expect(await db.get('2')).toBe('b'); }); + test('locking and unlocking', async () => { + await db.withTransactionF(async (tran) => { + await tran.lock('foo'); + await tran.unlock('foo'); + expect(tran.locks.size).toBe(0); + }); + await db.withTransactionF(async (tran) => { + await tran.lock('foo', 'bar'); + await tran.unlock('bar'); + expect(tran.locks.size).toBe(1); + expect(tran.locks.has('foo')).toBe(true); + }); + await db.withTransactionF(async (tran) => { + await tran.lock('foo', 'bar'); + await tran.unlock('bar', 'foo'); + expect(tran.locks.size).toBe(0); + }); + await db.withTransactionF(async (tran) => { + await tran.lock('bar', 'foo'); + await tran.unlock('bar', 'foo'); + expect(tran.locks.size).toBe(0); + }); + // Duplicates are eliminated + await db.withTransactionF(async (tran) => { + await tran.lock('foo', 'foo'); + expect(tran.locks.size).toBe(1); + await tran.unlock('foo', 'foo'); + expect(tran.locks.size).toBe(0); + }); + }); + test('read and write locking', async () => { + await db.withTransactionF(async (tran1) => { + await tran1.lock(['foo', 'read']); + await tran1.lock(['bar', 'write']); + // There is no automatic lock upgrade or downgrade + await expect(tran1.lock(['foo', 'write'])).rejects.toThrow( + errors.ErrorDBTransactionLockType, + ); + await expect(tran1.lock(['bar', 'read'])).rejects.toThrow( + errors.ErrorDBTransactionLockType, + ); + await db.withTransactionF(async (tran2) => { + await tran2.lock(['foo', 'read']); + await expect(tran2.lock(['bar', 'write', 0])).rejects.toThrow( + locksErrors.ErrorAsyncLocksTimeout, + ); + expect(tran1.locks.size).toBe(2); + expect(tran1.locks.has('foo')).toBe(true); + expect(tran1.locks.get('foo')!.type).toBe('read'); + expect(tran2.locks.size).toBe(1); + expect(tran2.locks.has('foo')).toBe(true); + expect(tran2.locks.get('foo')!.type).toBe('read'); + }); + expect(tran1.locks.size).toBe(2); + await tran1.unlock('bar'); + await db.withTransactionF(async (tran2) => { + await tran2.lock(['foo', 'read']); + await tran2.lock(['bar', 'write']); + expect(tran1.locks.size).toBe(1); + expect(tran1.locks.has('foo')).toBe(true); + expect(tran1.locks.get('foo')!.type).toBe('read'); + expect(tran2.locks.size).toBe(2); + expect(tran2.locks.has('foo')).toBe(true); + expect(tran2.locks.get('foo')!.type).toBe('read'); + expect(tran2.locks.has('bar')).toBe(true); + expect(tran2.locks.get('bar')!.type).toBe('write'); + }); + }); + }); + test('locks are unlocked in reverse order', async () => { + const order: Array = []; + let p1, p2; + await db.withTransactionF(async (tran) => { + // '1' and '2' are in sort order + await tran.lock('1'); + await tran.lock('2'); + p1 = db.withTransactionF(async (tran) => { + await tran.lock('1'); + order.push('1'); + }); + p2 = db.withTransactionF(async (tran) => { + await tran.lock('2'); + order.push('2'); + }); + }); + await Promise.all([p2, p1]); + expect(order).toStrictEqual(['2', '1']); + }); + test('lock re-entrancy', async () => { + await db.withTransactionF(async (tran) => { + // Locking with the same keys is idempotent + await tran.lock('key1', 'key2'); + await tran.lock('key1', 'key2'); + await tran.lock('key1'); + await tran.lock('key2'); + }); + }); + test('locks are isolated per transaction', async () => { + await db.withTransactionF(async (tran1) => { + await tran1.lock('key1', 'key2'); + expect(tran1.locks.size).toBe(2); + await db.withTransactionF(async (tran2) => { + // This is a noop, because `tran1` owns `key1` and `key2` + await tran2.unlock('key1', 'key2'); + // This fails because `key1` is still locked by `tran1` + await expect(tran2.lock(['key1', 'write', 0])).rejects.toThrow( + locksErrors.ErrorAsyncLocksTimeout, + ); + await tran1.unlock('key1'); + expect(tran1.locks.size).toBe(1); + // This succeeds because `key1` is now unlocked + await tran2.lock('key1'); + expect(tran2.locks.size).toBe(1); + // This is a noop, because `tran2` owns `key1` + await tran1.unlock('key1'); + expect(tran2.locks.has('key1')).toBe(true); + expect(tran1.locks.has('key1')).toBe(false); + await expect(tran1.lock(['key1', 'write', 0])).rejects.toThrow( + locksErrors.ErrorAsyncLocksTimeout, + ); + }); + await tran1.lock('key1'); + expect(tran1.locks.has('key1')).toBe(true); + expect(tran1.locks.has('key2')).toBe(true); + }); + }); + test('deadlock', async () => { + await db.withTransactionF(async (tran1) => { + await db.withTransactionF(async (tran2) => { + await tran1.lock('foo'); + await tran2.lock('bar'); + // Currently a deadlock can happen, and the only way to avoid is to use timeouts + // In the future, we want to have `DBTransaction` detect deadlocks + // and automatically give us `ErrorDBTransactionDeadlock` exception + const p1 = tran1.lock(['bar', 'write', 50]); + const p2 = tran2.lock(['foo', 'write', 50]); + const results = await Promise.allSettled([p1, p2]); + expect( + results.every( + (r) => + r.status === 'rejected' && + r.reason instanceof locksErrors.ErrorAsyncLocksTimeout, + ), + ).toBe(true); + }); + }); + }); }); diff --git a/tests/rocksdb/rocksdbP.test.ts b/tests/rocksdb/rocksdbP.test.ts new file mode 100644 index 00000000..a82d8af8 --- /dev/null +++ b/tests/rocksdb/rocksdbP.test.ts @@ -0,0 +1,756 @@ +import type { RocksDBDatabase } from '@/rocksdb/types'; +import os from 'os'; +import path from 'path'; +import fs from 'fs'; +import rocksdbP from '@/rocksdb/rocksdbP'; + +describe('rocksdbP', () => { + let dataDir: string; + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'db-test-')); + }); + afterEach(async () => { + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + }); + test('dbOpen invalid log level option', async () => { + const dbPath = `${dataDir}/db`; + const db = rocksdbP.dbInit(); + await expect( + rocksdbP.dbOpen(db, dbPath, { + // @ts-ignore use incorrect value + infoLogLevel: 'incorrect', + }), + ).rejects.toHaveProperty('code', 'DB_OPEN'); + }); + test('dbClose is idempotent', async () => { + const dbPath = `${dataDir}/db`; + const db = rocksdbP.dbInit(); + await rocksdbP.dbOpen(db, dbPath, {}); + await expect(rocksdbP.dbClose(db)).resolves.toBeUndefined(); + await expect(rocksdbP.dbClose(db)).resolves.toBeUndefined(); + }); + test('dbClose auto-closes dangling snapshots, iterators and transactions', async () => { + const dbPath = `${dataDir}/db`; + const db = rocksdbP.dbInit(); + await rocksdbP.dbOpen(db, dbPath, {}); + const snap = rocksdbP.snapshotInit(db); + const iterator = rocksdbP.iteratorInit(db, {}); + const tran = rocksdbP.transactionInit(db, {}); + // This should auto-close them + await rocksdbP.dbClose(db); + // We can also attempt to close, which is idempotent + await rocksdbP.snapshotRelease(snap); + await rocksdbP.iteratorClose(iterator); + await rocksdbP.transactionRollback(tran); + }); + describe('database', () => { + let dbPath: string; + let db: RocksDBDatabase; + beforeEach(async () => { + dbPath = `${dataDir}/db`; + db = rocksdbP.dbInit(); + await rocksdbP.dbOpen(db, dbPath, {}); + }); + afterEach(async () => { + await rocksdbP.dbClose(db); + }); + test('dbMultiGet', async () => { + await rocksdbP.dbPut(db, 'foo', 'bar', {}); + await rocksdbP.dbPut(db, 'bar', 'foo', {}); + expect(await rocksdbP.dbMultiGet(db, ['foo', 'bar', 'abc'], {})).toEqual([ + 'bar', + 'foo', + undefined, + ]); + }); + test('dbGet and dbMultiget with snapshots', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const snap = rocksdbP.snapshotInit(db); + await rocksdbP.dbPut(db, 'K1', '200', {}); + await rocksdbP.dbPut(db, 'K2', '200', {}); + expect(await rocksdbP.dbGet(db, 'K1', { snapshot: snap })).toBe('100'); + expect(await rocksdbP.dbGet(db, 'K2', { snapshot: snap })).toBe('100'); + expect( + await rocksdbP.dbMultiGet(db, ['K1', 'K2'], { + snapshot: snap, + }), + ).toEqual(['100', '100']); + expect(await rocksdbP.dbGet(db, 'K1', {})).toBe('200'); + expect(await rocksdbP.dbGet(db, 'K2', {})).toBe('200'); + expect(await rocksdbP.dbMultiGet(db, ['K1', 'K2'], {})).toEqual([ + '200', + '200', + ]); + await rocksdbP.snapshotRelease(snap); + }); + describe('iterators', () => { + test('iteratorClose is idempotent', async () => { + const it = rocksdbP.iteratorInit(db, {}); + await expect(rocksdbP.iteratorClose(it)).resolves.toBeUndefined(); + await expect(rocksdbP.iteratorClose(it)).resolves.toBeUndefined(); + }); + test('iteratorNextv signals when iterator is finished', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const iter1 = rocksdbP.iteratorInit(db, {}); + expect(await rocksdbP.iteratorNextv(iter1, 2)).toEqual([ + [ + ['K1', '100'], + ['K2', '100'], + ], + false, + ]); + await rocksdbP.iteratorClose(iter1); + const iter2 = rocksdbP.iteratorInit(db, {}); + expect(await rocksdbP.iteratorNextv(iter2, 3)).toEqual([ + [ + ['K1', '100'], + ['K2', '100'], + ], + true, + ]); + await rocksdbP.iteratorClose(iter2); + const iter3 = rocksdbP.iteratorInit(db, {}); + expect(await rocksdbP.iteratorNextv(iter3, 2)).toEqual([ + [ + ['K1', '100'], + ['K2', '100'], + ], + false, + ]); + expect(await rocksdbP.iteratorNextv(iter3, 1)).toEqual([[], true]); + await rocksdbP.iteratorClose(iter3); + }); + test('iteratorInit with implicit snapshot', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const iter = rocksdbP.iteratorInit(db, {}); + await rocksdbP.dbPut(db, 'K1', '200', {}); + await rocksdbP.dbPut(db, 'K2', '200', {}); + expect(await rocksdbP.iteratorNextv(iter, 2)).toEqual([ + [ + ['K1', '100'], + ['K2', '100'], + ], + false, + ]); + await rocksdbP.iteratorClose(iter); + }); + test('iteratorInit with explicit snapshot', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const snap = rocksdbP.snapshotInit(db); + await rocksdbP.dbPut(db, 'K1', '200', {}); + await rocksdbP.dbPut(db, 'K2', '200', {}); + const iter = rocksdbP.iteratorInit(db, { + snapshot: snap, + }); + expect(await rocksdbP.iteratorNextv(iter, 2)).toEqual([ + [ + ['K1', '100'], + ['K2', '100'], + ], + false, + ]); + await rocksdbP.iteratorClose(iter); + await rocksdbP.snapshotRelease(snap); + }); + test('iterators have consistent iteration', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const iter = rocksdbP.iteratorInit(db, {}); + expect(await rocksdbP.iteratorNextv(iter, 1)).toEqual([ + [['K1', '100']], + false, + ]); + await rocksdbP.dbPut(db, 'K2', '200', {}); + expect(await rocksdbP.iteratorNextv(iter, 1)).toEqual([ + [['K2', '100']], + false, + ]); + await rocksdbP.iteratorClose(iter); + }); + test('dbClear with implicit snapshot', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + await rocksdbP.dbClear(db, {}); + await expect(rocksdbP.dbGet(db, 'K1', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + await expect(rocksdbP.dbGet(db, 'K2', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + }); + test('dbClear with explicit snapshot', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const snap = rocksdbP.snapshotInit(db); + await rocksdbP.dbPut(db, 'K1', '200', {}); + await rocksdbP.dbPut(db, 'K2', '200', {}); + await rocksdbP.dbPut(db, 'K3', '200', {}); + await rocksdbP.dbPut(db, 'K4', '200', {}); + await rocksdbP.dbClear(db, { + snapshot: snap, + }); + await rocksdbP.snapshotRelease(snap); + await expect(rocksdbP.dbGet(db, 'K1', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + await expect(rocksdbP.dbGet(db, 'K2', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + expect(await rocksdbP.dbGet(db, 'K3', {})).toBe('200'); + expect(await rocksdbP.dbGet(db, 'K4', {})).toBe('200'); + }); + }); + describe('transactions', () => { + test('transactionCommit is idempotent', async () => { + const tran = rocksdbP.transactionInit(db, {}); + await expect(rocksdbP.transactionCommit(tran)).resolves.toBeUndefined(); + await expect(rocksdbP.transactionCommit(tran)).resolves.toBeUndefined(); + }); + test('transactionRollback is idempotent', async () => { + const tran = rocksdbP.transactionInit(db, {}); + await expect( + rocksdbP.transactionRollback(tran), + ).resolves.toBeUndefined(); + await expect( + rocksdbP.transactionRollback(tran), + ).resolves.toBeUndefined(); + }); + test('transactionGet, transactionPut, transactionDel', async () => { + const tran = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionPut(tran, 'foo', 'bar'); + await rocksdbP.transactionPut(tran, 'bar', 'foo'); + expect(await rocksdbP.transactionGet(tran, 'foo', {})).toBe('bar'); + await rocksdbP.transactionDel(tran, 'bar'); + await rocksdbP.transactionCommit(tran); + expect(await rocksdbP.dbGet(db, 'foo', {})).toBe('bar'); + await expect(rocksdbP.dbGet(db, 'bar', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + }); + test('transactionGetForUpdate addresses write skew by promoting gets into same-value puts', async () => { + // Snapshot isolation allows write skew anomalies to occur + // A write skew means that 2 transactions concurrently read from overlapping keys + // then make disjoint updates to the keys, that breaks a consistency constraint on those keys + // For example: + // T1 reads from k1, k2, writes to k1 + // T2 reads from k1, k2, writes to k2 + // Where k1 + k2 >= 0 + await rocksdbP.dbPut(db, 'balance1', '100', {}); + await rocksdbP.dbPut(db, 'balance2', '100', {}); + const t1 = async () => { + const tran1 = rocksdbP.transactionInit(db, {}); + let balance1 = parseInt( + await rocksdbP.transactionGetForUpdate(tran1, 'balance1', {}), + ); + const balance2 = parseInt( + await rocksdbP.transactionGetForUpdate(tran1, 'balance2', {}), + ); + balance1 -= 100; + expect(balance1 + balance2).toBeGreaterThanOrEqual(0); + await rocksdbP.transactionPut(tran1, 'balance1', balance1.toString()); + await rocksdbP.transactionCommit(tran1); + }; + const t2 = async () => { + const tran2 = rocksdbP.transactionInit(db, {}); + const balance1 = parseInt( + await rocksdbP.transactionGetForUpdate(tran2, 'balance1', {}), + ); + let balance2 = parseInt( + await rocksdbP.transactionGetForUpdate(tran2, 'balance2', {}), + ); + balance2 -= 100; + expect(balance1 + balance2).toBeGreaterThanOrEqual(0); + await rocksdbP.transactionPut(tran2, 'balance2', balance2.toString()); + await rocksdbP.transactionCommit(tran2); + }; + // By using transactionGetForUpdate, we promote the read to a write, where it writes the same value + // this causes a write-write conflict + const results = await Promise.allSettled([t1(), t2()]); + // One will succeed, one will fail + expect(results.some((result) => result.status === 'fulfilled')).toBe( + true, + ); + expect( + results.some((result) => { + return ( + result.status === 'rejected' && + result.reason.code === 'TRANSACTION_CONFLICT' + ); + }), + ).toBe(true); + }); + test('transactionMultiGetForUpdate addresses write skew by promoting gets into same-value puts', async () => { + // Snapshot isolation allows write skew anomalies to occur + // A write skew means that 2 transactions concurrently read from overlapping keys + // then make disjoint updates to the keys, that breaks a consistency constraint on those keys + // For example: + // T1 reads from k1, k2, writes to k1 + // T2 reads from k1, k2, writes to k2 + // Where k1 + k2 >= 0 + await rocksdbP.dbPut(db, 'balance1', '100', {}); + await rocksdbP.dbPut(db, 'balance2', '100', {}); + const t1 = async () => { + const tran1 = rocksdbP.transactionInit(db, {}); + let balance1 = parseInt( + ( + await rocksdbP.transactionMultiGetForUpdate( + tran1, + ['balance1'], + {}, + ) + )[0], + ); + const balance2 = parseInt( + ( + await rocksdbP.transactionMultiGetForUpdate( + tran1, + ['balance2'], + {}, + ) + )[0], + ); + balance1 -= 100; + expect(balance1 + balance2).toBeGreaterThanOrEqual(0); + await rocksdbP.transactionPut(tran1, 'balance1', balance1.toString()); + await rocksdbP.transactionCommit(tran1); + }; + const t2 = async () => { + const tran2 = rocksdbP.transactionInit(db, {}); + const balance1 = parseInt( + ( + await rocksdbP.transactionMultiGetForUpdate( + tran2, + ['balance1'], + {}, + ) + )[0], + ); + let balance2 = parseInt( + ( + await rocksdbP.transactionMultiGetForUpdate( + tran2, + ['balance2'], + {}, + ) + )[0], + ); + balance2 -= 100; + expect(balance1 + balance2).toBeGreaterThanOrEqual(0); + await rocksdbP.transactionPut(tran2, 'balance2', balance2.toString()); + await rocksdbP.transactionCommit(tran2); + }; + // By using transactionGetForUpdate, we promote the read to a write, where it writes the same value + // this causes a write-write conflict + const results = await Promise.allSettled([t1(), t2()]); + // One will succeed, one will fail + expect(results.some((result) => result.status === 'fulfilled')).toBe( + true, + ); + expect( + results.some((result) => { + return ( + result.status === 'rejected' && + result.reason.code === 'TRANSACTION_CONFLICT' + ); + }), + ).toBe(true); + }); + test('transactionIteratorInit iterates over overlay defaults to underlay', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + await rocksdbP.dbPut(db, 'K3', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionPut(tran, 'K2', '200'); + await rocksdbP.transactionDel(tran, 'K3'); + await rocksdbP.transactionPut(tran, 'K4', '200'); + const iter = rocksdbP.transactionIteratorInit(tran, {}); + expect(await rocksdbP.iteratorNextv(iter, 3)).toEqual([ + [ + ['K1', '100'], + ['K2', '200'], + ['K4', '200'], + ], + false, + ]); + await rocksdbP.iteratorClose(iter); + await rocksdbP.transactionRollback(tran); + }); + test('transactionGetForUpdate does not block transactions', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + // T1 locks in K2 and updates K2 + const tran1 = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionGetForUpdate(tran1, 'K2', {}); + await rocksdbP.transactionPut(tran1, 'K2', '200'); + // T2 locks in K2 and updates K2 to the same value + // if `transactionGetForUpdate` was blocking, then this + // would result in a deadlock + const tran2 = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionGetForUpdate(tran2, 'K2', {}); + await rocksdbP.transactionPut(tran2, 'K2', '200'); + await rocksdbP.transactionCommit(tran2); + // However optimistic transactions never deadlock + // So T2 commits, but T1 will have conflict exception + // And therefore the `exclusive` option is not relevant + // to optimistic transactions + await expect(rocksdbP.transactionCommit(tran1)).rejects.toHaveProperty( + 'code', + 'TRANSACTION_CONFLICT', + ); + }); + test('transactionMultiGetForUpdate does not block transactions', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + // T1 locks in K2 and updates K2 + const tran1 = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionMultiGetForUpdate(tran1, ['K2'], {}); + await rocksdbP.transactionPut(tran1, 'K2', '200'); + // T2 locks in K2 and updates K2 to the same value + // if `transactionGetForUpdate` was blocking, then this + // would result in a deadlock + const tran2 = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionMultiGetForUpdate(tran2, ['K2'], {}); + await rocksdbP.transactionPut(tran2, 'K2', '200'); + await rocksdbP.transactionCommit(tran2); + // However optimistic transactions never deadlock + // So T2 commits, but T1 will have conflict exception + // And therefore the `exclusive` option is not relevant + // to optimistic transactions + await expect(rocksdbP.transactionCommit(tran1)).rejects.toHaveProperty( + 'code', + 'TRANSACTION_CONFLICT', + ); + }); + describe('transaction without snapshot', () => { + test('no conflict when db write occurs before transaction write', async () => { + // No conflict since the write directly to DB occurred before the transaction write occurred + const tran = rocksdbP.transactionInit(db, {}); + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.transactionPut(tran, 'K1', '200'); + await rocksdbP.transactionCommit(tran); + expect(await rocksdbP.dbGet(db, 'K1', {})).toBe('200'); + }); + test('conflicts when db write occurs after transaction write', async () => { + // Conflict because write directly to DB occurred after the transaction write occurred + const tran = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionPut(tran, 'K1', '200'); + await rocksdbP.dbPut(db, 'K1', '100', {}); + await expect(rocksdbP.transactionCommit(tran)).rejects.toHaveProperty( + 'code', + 'TRANSACTION_CONFLICT', + ); + }); + test('transactionGet non-repeatable reads', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + expect(await rocksdbP.transactionGet(tran, 'K1', {})).toBe('100'); + await rocksdbP.dbPut(db, 'K1', '200', {}); + expect(await rocksdbP.transactionGet(tran, 'K1', {})).toBe('200'); + await rocksdbP.transactionCommit(tran); + }); + test('transactionGetForUpdate non-repeatable reads', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + expect(await rocksdbP.transactionGetForUpdate(tran, 'K1', {})).toBe( + '100', + ); + await rocksdbP.dbPut(db, 'K1', '200', {}); + expect(await rocksdbP.transactionGetForUpdate(tran, 'K1', {})).toBe( + '200', + ); + await expect(rocksdbP.transactionCommit(tran)).rejects.toHaveProperty( + 'code', + 'TRANSACTION_CONFLICT', + ); + }); + test('iterator non-repeatable reads', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + await rocksdbP.dbPut(db, 'K1', '200', {}); + await rocksdbP.dbPut(db, 'K2', '200', {}); + const iter1 = rocksdbP.transactionIteratorInit(tran, {}); + expect(await rocksdbP.iteratorNextv(iter1, 2)).toEqual([ + [ + ['K1', '200'], + ['K2', '200'], + ], + false, + ]); + await rocksdbP.iteratorClose(iter1); + await rocksdbP.dbPut(db, 'K1', '300', {}); + await rocksdbP.dbPut(db, 'K2', '300', {}); + const iter2 = rocksdbP.transactionIteratorInit(tran, {}); + expect(await rocksdbP.iteratorNextv(iter2, 2)).toEqual([ + [ + ['K1', '300'], + ['K2', '300'], + ], + false, + ]); + await rocksdbP.iteratorClose(iter2); + await rocksdbP.transactionRollback(tran); + }); + test('clear with non-repeatable read', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionPut(tran, 'K2', '200'); + await rocksdbP.transactionPut(tran, 'K3', '200'); + await rocksdbP.dbPut(db, 'K4', '200', {}); + // This will delete K1, K2, K3, K4 + await rocksdbP.transactionClear(tran, {}); + await rocksdbP.transactionCommit(tran); + await expect(rocksdbP.dbGet(db, 'K1', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + await expect(rocksdbP.dbGet(db, 'K2', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + await expect(rocksdbP.dbGet(db, 'K3', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + await expect(rocksdbP.dbGet(db, 'K4', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + }); + test('transactionMultiGet with non-repeatable read', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionPut(tran, 'K2', '200'); + await rocksdbP.transactionPut(tran, 'K3', '200'); + await rocksdbP.dbPut(db, 'K4', '200', {}); + expect( + await rocksdbP.transactionMultiGet( + tran, + ['K1', 'K2', 'K3', 'K4', 'K5'], + {}, + ), + ).toEqual(['100', '200', '200', '200', undefined]); + await rocksdbP.transactionCommit(tran); + }); + test('transactionMultiGetForUpdate with non-repeatable read', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionPut(tran, 'K2', '200'); + await rocksdbP.transactionPut(tran, 'K3', '200'); + await rocksdbP.dbPut(db, 'K4', '200', {}); + expect( + await rocksdbP.transactionMultiGetForUpdate( + tran, + ['K1', 'K2', 'K3', 'K4', 'K5'], + {}, + ), + ).toEqual(['100', '200', '200', '200', undefined]); + // No conflict because K4 write was done prior to `transactionMultiGetForUpdate` + await rocksdbP.transactionCommit(tran); + }); + }); + describe('transaction with snapshot', () => { + test('conflicts when db write occurs after snapshot creation', async () => { + const tran = rocksdbP.transactionInit(db, {}); + rocksdbP.transactionSnapshot(tran); + // Conflict because snapshot was set at the beginning of the transaction + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.transactionPut(tran, 'K1', '200'); + await expect(rocksdbP.transactionCommit(tran)).rejects.toHaveProperty( + 'code', + 'TRANSACTION_CONFLICT', + ); + }); + test('transactionGet repeatable reads', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + const tranSnap = rocksdbP.transactionSnapshot(tran); + expect( + await rocksdbP.transactionGet(tran, 'K1', { snapshot: tranSnap }), + ).toBe('100'); + await rocksdbP.dbPut(db, 'K1', '200', {}); + expect( + await rocksdbP.transactionGet(tran, 'K1', { snapshot: tranSnap }), + ).toBe('100'); + await rocksdbP.transactionRollback(tran); + }); + test('transactionGet repeatable reads use write overlay', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + const tranSnap = rocksdbP.transactionSnapshot(tran); + expect( + await rocksdbP.transactionGet(tran, 'K1', { snapshot: tranSnap }), + ).toBe('100'); + await rocksdbP.transactionPut(tran, 'K1', '300'); + await rocksdbP.dbPut(db, 'K1', '200', {}); + // Here even though we're using the snapshot, because the transaction has 300 written + // it ends up using 300, but it ignores the 200 that's written directly to the DB + expect( + await rocksdbP.transactionGet(tran, 'K1', { snapshot: tranSnap }), + ).toBe('300'); + await rocksdbP.transactionRollback(tran); + }); + test('transactionGetForUpdate repeatable reads', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + const tranSnap = rocksdbP.transactionSnapshot(tran); + expect( + await rocksdbP.transactionGetForUpdate(tran, 'K1', { + snapshot: tranSnap, + }), + ).toBe('100'); + await rocksdbP.dbPut(db, 'K1', '200', {}); + expect( + await rocksdbP.transactionGetForUpdate(tran, 'K1', { + snapshot: tranSnap, + }), + ).toBe('100'); + await expect(rocksdbP.transactionCommit(tran)).rejects.toHaveProperty( + 'code', + 'TRANSACTION_CONFLICT', + ); + }); + test('iterator repeatable reads', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + await rocksdbP.transactionPut(tran, 'K3', '100'); + const tranSnap1 = rocksdbP.transactionSnapshot(tran); + const iter1 = rocksdbP.transactionIteratorInit(tran, { + snapshot: tranSnap1, + }); + expect(await rocksdbP.iteratorNextv(iter1, 3)).toEqual([ + [ + ['K1', '100'], + ['K2', '100'], + ['K3', '100'], + ], + false, + ]); + await rocksdbP.iteratorClose(iter1); + await rocksdbP.transactionPut(tran, 'K2', '200'); + await rocksdbP.transactionPut(tran, 'K3', '200'); + await rocksdbP.dbPut(db, 'K1', '200', {}); + const iter2 = rocksdbP.transactionIteratorInit(tran, { + snapshot: tranSnap1, + }); + // Notice that this iteration uses the new values written + // to in this transaction, this mean the snapshot only applies + // to the underlying database, it's not a snapshot on the transaction + // writes + expect(await rocksdbP.iteratorNextv(iter2, 3)).toEqual([ + [ + ['K1', '100'], + ['K2', '200'], + ['K3', '200'], + ], + false, + ]); + await rocksdbP.iteratorClose(iter2); + // Resetting the snapshot for the transaction + // Now the snapshot takes the current state of the DB, + // but the transaction writes are overlayed on top + const tranSnap2 = rocksdbP.transactionSnapshot(tran); + await rocksdbP.dbPut(db, 'K2', '300', {}); + const iter3 = rocksdbP.transactionIteratorInit(tran, { + snapshot: tranSnap2, + }); + expect(await rocksdbP.iteratorNextv(iter3, 3)).toEqual([ + [ + ['K1', '200'], + ['K2', '200'], + ['K3', '200'], + ], + false, + ]); + await rocksdbP.iteratorClose(iter3); + // Therefore iterators should always use the snapshot taken + // at the beginning of the transaction + await rocksdbP.transactionRollback(tran); + }); + test('clear with repeatable read', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + const tranSnap = rocksdbP.transactionSnapshot(tran); + await rocksdbP.transactionPut(tran, 'K2', '200'); + await rocksdbP.transactionPut(tran, 'K3', '200'); + await rocksdbP.dbPut(db, 'K4', '200', {}); + // This will delete K1, K2, K3 + await rocksdbP.transactionClear(tran, { snapshot: tranSnap }); + await rocksdbP.transactionCommit(tran); + await expect(rocksdbP.dbGet(db, 'K1', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + await expect(rocksdbP.dbGet(db, 'K2', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + await expect(rocksdbP.dbGet(db, 'K3', {})).rejects.toHaveProperty( + 'code', + 'NOT_FOUND', + ); + expect(await rocksdbP.dbGet(db, 'K4', {})).toBe('200'); + }); + test('transactionMultiGet with repeatable read', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + const tranSnap = rocksdbP.transactionSnapshot(tran); + await rocksdbP.transactionPut(tran, 'K2', '200'); + await rocksdbP.transactionPut(tran, 'K3', '200'); + await rocksdbP.dbPut(db, 'K4', '200', {}); + expect( + await rocksdbP.transactionMultiGet( + tran, + ['K1', 'K2', 'K3', 'K4', 'K5'], + { + snapshot: tranSnap, + }, + ), + ).toEqual(['100', '200', '200', undefined, undefined]); + await rocksdbP.transactionCommit(tran); + }); + test('transactionMultiGetForUpdate with repeatable read', async () => { + await rocksdbP.dbPut(db, 'K1', '100', {}); + await rocksdbP.dbPut(db, 'K2', '100', {}); + const tran = rocksdbP.transactionInit(db, {}); + const tranSnap = rocksdbP.transactionSnapshot(tran); + await rocksdbP.transactionPut(tran, 'K2', '200'); + await rocksdbP.transactionPut(tran, 'K3', '200'); + await rocksdbP.dbPut(db, 'K4', '200', {}); + expect( + await rocksdbP.transactionMultiGetForUpdate( + tran, + ['K1', 'K2', 'K3', 'K4', 'K5'], + { + snapshot: tranSnap, + }, + ), + ).toEqual(['100', '200', '200', undefined, undefined]); + // Conflict because of K4 write was done after snapshot + await expect(rocksdbP.transactionCommit(tran)).rejects.toHaveProperty( + 'code', + 'TRANSACTION_CONFLICT', + ); + }); + }); + }); + }); +});