diff --git a/db/sqlite3/src/sqlite3.c b/db/sqlite3/src/sqlite3.c index 73c69efbd50f..77dc0278d65a 100644 --- a/db/sqlite3/src/sqlite3.c +++ b/db/sqlite3/src/sqlite3.c @@ -1,6 +1,6 @@ /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.22.0. By combining all the individual C code files into this +** version 3.23.0. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -213,7 +213,7 @@ static const char * const sqlite3azCompileOpt[] = { "ENABLE_BATCH_ATOMIC_WRITE", #endif #if SQLITE_ENABLE_CEROD - "ENABLE_CEROD", + "ENABLE_CEROD=" CTIMEOPT_VAL(SQLITE_ENABLE_CEROD), #endif #if SQLITE_ENABLE_COLUMN_METADATA "ENABLE_COLUMN_METADATA", @@ -1147,9 +1147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.22.0" -#define SQLITE_VERSION_NUMBER 3022000 -#define SQLITE_SOURCE_ID "2018-01-22 18:45:57 0c55d179733b46d8d0ba4d88e01a25e10677046ee3da1d5b1581e86726f2171d" +#define SQLITE_VERSION "3.23.0" +#define SQLITE_VERSION_NUMBER 3023000 +#define SQLITE_SOURCE_ID "2018-04-02 11:04:16 736b53f57f70b23172c30880186dce7ad9baa3b74e3838cae5847cffb98f5cd2" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -2088,6 +2088,12 @@ struct sqlite3_io_methods { ** so that all subsequent write operations are independent. ** ^SQLite will never invoke SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE without ** a prior successful call to [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE]. +** +**
  • [[SQLITE_FCNTL_LOCK_TIMEOUT]] +** The [SQLITE_FCNTL_LOCK_TIMEOUT] opcode causes attempts to obtain +** a file lock using the xLock or xShmLock methods of the VFS to wait +** for up to M milliseconds before failing, where M is the single +** unsigned integer parameter. ** */ #define SQLITE_FCNTL_LOCKSTATE 1 @@ -2122,6 +2128,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_BEGIN_ATOMIC_WRITE 31 #define SQLITE_FCNTL_COMMIT_ATOMIC_WRITE 32 #define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33 +#define SQLITE_FCNTL_LOCK_TIMEOUT 34 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -3078,11 +3085,13 @@ struct sqlite3_mem_methods { ** connections at all to the database. If so, it performs a checkpoint ** operation before closing the connection. This option may be used to ** override this behaviour. The first parameter passed to this operation -** is an integer - non-zero to disable checkpoints-on-close, or zero (the -** default) to enable them. The second parameter is a pointer to an integer +** is an integer - positive to disable checkpoints-on-close, or zero (the +** default) to enable them, and negative to leave the setting unchanged. +** The second parameter is a pointer to an integer ** into which is written 0 or 1 to indicate whether checkpoints-on-close ** have been disabled - 0 if they are not disabled, 1 if they are. ** +** **
    SQLITE_DBCONFIG_ENABLE_QPSG
    **
    ^(The SQLITE_DBCONFIG_ENABLE_QPSG option activates or deactivates ** the [query planner stability guarantee] (QPSG). When the QPSG is active, @@ -3092,13 +3101,20 @@ struct sqlite3_mem_methods { ** slower. But the QPSG has the advantage of more predictable behavior. With ** the QPSG active, SQLite will always use the same query plan in the field as ** was used during testing in the lab. +** The first argument to this setting is an integer which is 0 to disable +** the QPSG, positive to enable QPSG, or negative to leave the setting +** unchanged. The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether the QPSG is disabled or enabled +** following this call. **
    +** **
    SQLITE_DBCONFIG_TRIGGER_EQP
    **
    By default, the output of EXPLAIN QUERY PLAN commands does not ** include output for any operations performed by trigger programs. This ** option is used to set or clear (the default) a flag that governs this ** behavior. The first parameter passed to this operation is an integer - -** non-zero to enable output for trigger programs, or zero to disable it. +** positive to enable output for trigger programs, or zero to disable it, +** or negative to leave the setting unchanged. ** The second parameter is a pointer to an integer into which is written ** 0 or 1 to indicate whether output-for-triggers has been disabled - 0 if ** it is not disabled, 1 if it is. @@ -3520,16 +3536,16 @@ SQLITE_API void sqlite3_free_table(char **result); ** ** These routines are work-alikes of the "printf()" family of functions ** from the standard C library. -** These routines understand most of the common K&R formatting options, -** plus some additional non-standard formats, detailed below. -** Note that some of the more obscure formatting options from recent -** C-library standards are omitted from this implementation. +** These routines understand most of the common formatting options from +** the standard library printf() +** plus some additional non-standard formats ([%q], [%Q], [%w], and [%z]). +** See the [built-in printf()] documentation for details. ** ** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their -** results into memory obtained from [sqlite3_malloc()]. +** results into memory obtained from [sqlite3_malloc64()]. ** The strings returned by these two routines should be ** released by [sqlite3_free()]. ^Both routines return a -** NULL pointer if [sqlite3_malloc()] is unable to allocate enough +** NULL pointer if [sqlite3_malloc64()] is unable to allocate enough ** memory to hold the resulting string. ** ** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from @@ -3553,71 +3569,7 @@ SQLITE_API void sqlite3_free_table(char **result); ** ** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf(). ** -** These routines all implement some additional formatting -** options that are useful for constructing SQL statements. -** All of the usual printf() formatting options apply. In addition, there -** is are "%q", "%Q", "%w" and "%z" options. -** -** ^(The %q option works like %s in that it substitutes a nul-terminated -** string from the argument list. But %q also doubles every '\'' character. -** %q is designed for use inside a string literal.)^ By doubling each '\'' -** character it escapes that character and allows it to be inserted into -** the string. -** -** For example, assume the string variable zText contains text as follows: -** -**
    -**  char *zText = "It's a happy day!";
    -** 
    -** -** One can use this text in an SQL statement as follows: -** -**
    -**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
    -**  sqlite3_exec(db, zSQL, 0, 0, 0);
    -**  sqlite3_free(zSQL);
    -** 
    -** -** Because the %q format string is used, the '\'' character in zText -** is escaped and the SQL generated is as follows: -** -**
    -**  INSERT INTO table1 VALUES('It''s a happy day!')
    -** 
    -** -** This is correct. Had we used %s instead of %q, the generated SQL -** would have looked like this: -** -**
    -**  INSERT INTO table1 VALUES('It's a happy day!');
    -** 
    -** -** This second example is an SQL syntax error. As a general rule you should -** always use %q instead of %s when inserting text into a string literal. -** -** ^(The %Q option works like %q except it also adds single quotes around -** the outside of the total string. Additionally, if the parameter in the -** argument list is a NULL pointer, %Q substitutes the text "NULL" (without -** single quotes).)^ So, for example, one could say: -** -**
    -**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
    -**  sqlite3_exec(db, zSQL, 0, 0, 0);
    -**  sqlite3_free(zSQL);
    -** 
    -** -** The code above will render a correct SQL statement in the zSQL -** variable even if the zText variable is a NULL pointer. -** -** ^(The "%w" formatting option is like "%q" except that it expects to -** be contained within double-quotes instead of single quotes, and it -** escapes the double-quote character instead of the single-quote -** character.)^ The "%w" formatting option is intended for safely inserting -** table and column names into a constructed SQL statement. -** -** ^(The "%z" formatting option works like "%s" but with the -** addition that after the string has been read and copied into -** the result, [sqlite3_free()] is called on the input string.)^ +** See also: [built-in printf()], [printf() SQL function] */ SQLITE_API char *sqlite3_mprintf(const char*,...); SQLITE_API char *sqlite3_vmprintf(const char*, va_list); @@ -4683,13 +4635,13 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** or [GLOB] operator or if the parameter is compared to an indexed column ** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled. **
  • +** ** **

    ^sqlite3_prepare_v3() differs from sqlite3_prepare_v2() only in having ** the extra prepFlags parameter, which is a bit array consisting of zero or ** more of the [SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_*] flags. ^The ** sqlite3_prepare_v2() interface works exactly the same as ** sqlite3_prepare_v3() with a zero prepFlags parameter. -** */ SQLITE_API int sqlite3_prepare( sqlite3 *db, /* Database handle */ @@ -8318,6 +8270,15 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. ** ** +** [[SQLITE_DBSTATUS_CACHE_SPILL]] ^(

    SQLITE_DBSTATUS_CACHE_SPILL
    +**
    This parameter returns the number of dirty cache entries that have +** been written to disk in the middle of a transaction due to the page +** cache overflowing. Transactions are more efficient if they are written +** to disk all at once. When pages spill mid-transaction, that introduces +** additional overhead. This parameter can be used help identify +** inefficiencies that can be resolve by increasing the cache size. +**
    +** ** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(
    SQLITE_DBSTATUS_DEFERRED_FKS
    **
    This parameter returns zero for the current value if and only if ** all foreign key constraints (deferred or immediate) have been @@ -8337,7 +8298,8 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r #define SQLITE_DBSTATUS_CACHE_WRITE 9 #define SQLITE_DBSTATUS_DEFERRED_FKS 10 #define SQLITE_DBSTATUS_CACHE_USED_SHARED 11 -#define SQLITE_DBSTATUS_MAX 11 /* Largest defined DBSTATUS */ +#define SQLITE_DBSTATUS_CACHE_SPILL 12 +#define SQLITE_DBSTATUS_MAX 12 /* Largest defined DBSTATUS */ /* @@ -9817,6 +9779,128 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( */ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); +/* +** CAPI3REF: Serialize a database +** +** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory +** that is a serialization of the S database on [database connection] D. +** If P is not a NULL pointer, then the size of the database in bytes +** is written into *P. +** +** For an ordinary on-disk database file, the serialization is just a +** copy of the disk file. For an in-memory database or a "TEMP" database, +** the serialization is the same sequence of bytes which would be written +** to disk if that database where backed up to disk. +** +** The usual case is that sqlite3_serialize() copies the serialization of +** the database into memory obtained from [sqlite3_malloc64()] and returns +** a pointer to that memory. The caller is responsible for freeing the +** returned value to avoid a memory leak. However, if the F argument +** contains the SQLITE_SERIALIZE_NOCOPY bit, then no memory allocations +** are made, and the sqlite3_serialize() function will return a pointer +** to the contiguous memory representation of the database that SQLite +** is currently using for that database, or NULL if the no such contiguous +** memory representation of the database exists. A contiguous memory +** representation of the database will usually only exist if there has +** been a prior call to [sqlite3_deserialize(D,S,...)] with the same +** values of D and S. +** The size of the database is written into *P even if the +** SQLITE_SERIALIZE_NOCOPY bit is set but no contigious copy +** of the database exists. +** +** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the +** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory +** allocation error occurs. +** +** This interface is only available if SQLite is compiled with the +** [SQLITE_ENABLE_DESERIALIZE] option. +*/ +SQLITE_API unsigned char *sqlite3_serialize( + sqlite3 *db, /* The database connection */ + const char *zSchema, /* Which DB to serialize. ex: "main", "temp", ... */ + sqlite3_int64 *piSize, /* Write size of the DB here, if not NULL */ + unsigned int mFlags /* Zero or more SQLITE_SERIALIZE_* flags */ +); + +/* +** CAPI3REF: Flags for sqlite3_serialize +** +** Zero or more of the following constants can be OR-ed together for +** the F argument to [sqlite3_serialize(D,S,P,F)]. +** +** SQLITE_SERIALIZE_NOCOPY means that [sqlite3_serialize()] will return +** a pointer to contiguous in-memory database that it is currently using, +** without making a copy of the database. If SQLite is not currently using +** a contiguous in-memory database, then this option causes +** [sqlite3_serialize()] to return a NULL pointer. SQLite will only be +** using a contiguous in-memory database if it has been initialized by a +** prior call to [sqlite3_deserialize()]. +*/ +#define SQLITE_SERIALIZE_NOCOPY 0x001 /* Do no memory allocations */ + +/* +** CAPI3REF: Deserialize a database +** +** The sqlite3_deserialize(D,S,P,N,M,F) interface causes the +** [database connection] D to disconnect from database S and then +** reopen S as an in-memory database based on the serialization contained +** in P. The serialized database P is N bytes in size. M is the size of +** the buffer P, which might be larger than N. If M is larger than N, and +** the SQLITE_DESERIALIZE_READONLY bit is not set in F, then SQLite is +** permitted to add content to the in-memory database as long as the total +** size does not exceed M bytes. +** +** If the SQLITE_DESERIALIZE_FREEONCLOSE bit is set in F, then SQLite will +** invoke sqlite3_free() on the serialization buffer when the database +** connection closes. If the SQLITE_DESERIALIZE_RESIZEABLE bit is set, then +** SQLite will try to increase the buffer size using sqlite3_realloc64() +** if writes on the database cause it to grow larger than M bytes. +** +** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the +** database is currently in a read transaction or is involved in a backup +** operation. +** +** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the +** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then +** [sqlite3_free()] is invoked on argument P prior to returning. +** +** This interface is only available if SQLite is compiled with the +** [SQLITE_ENABLE_DESERIALIZE] option. +*/ +SQLITE_API int sqlite3_deserialize( + sqlite3 *db, /* The database connection */ + const char *zSchema, /* Which DB to reopen with the deserialization */ + unsigned char *pData, /* The serialized database content */ + sqlite3_int64 szDb, /* Number bytes in the deserialization */ + sqlite3_int64 szBuf, /* Total size of buffer pData[] */ + unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */ +); + +/* +** CAPI3REF: Flags for sqlite3_deserialize() +** +** The following are allowed values for 6th argument (the F argument) to +** the [sqlite3_deserialize(D,S,P,N,M,F)] interface. +** +** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization +** in the P argument is held in memory obtained from [sqlite3_malloc64()] +** and that SQLite should take ownership of this memory and automatically +** free it when it has finished using it. Without this flag, the caller +** is resposible for freeing any dynamically allocated memory. +** +** The SQLITE_DESERIALIZE_RESIZEABLE flag means that SQLite is allowed to +** grow the size of the database using calls to [sqlite3_realloc64()]. This +** flag should only be used if SQLITE_DESERIALIZE_FREEONCLOSE is also used. +** Without this flag, the deserialized database cannot increase in size beyond +** the number of bytes specified by the M parameter. +** +** The SQLITE_DESERIALIZE_READONLY flag means that the deserialized database +** should be treated as read-only. +*/ +#define SQLITE_DESERIALIZE_FREEONCLOSE 1 /* Call sqlite3_free() on close */ +#define SQLITE_DESERIALIZE_RESIZEABLE 2 /* Resize using sqlite3_realloc64() */ +#define SQLITE_DESERIALIZE_READONLY 4 /* Database is read-only */ + /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. @@ -9964,16 +10048,23 @@ extern "C" { /* ** CAPI3REF: Session Object Handle +** +** An instance of this object is a [session] that can be used to +** record changes to a database. */ typedef struct sqlite3_session sqlite3_session; /* ** CAPI3REF: Changeset Iterator Handle +** +** An instance of this object acts as a cursor for iterating +** over the elements of a [changeset] or [patchset]. */ typedef struct sqlite3_changeset_iter sqlite3_changeset_iter; /* ** CAPI3REF: Create A New Session Object +** CONSTRUCTOR: sqlite3_session ** ** Create a new session object attached to database handle db. If successful, ** a pointer to the new object is written to *ppSession and SQLITE_OK is @@ -10010,6 +10101,7 @@ SQLITE_API int sqlite3session_create( /* ** CAPI3REF: Delete A Session Object +** DESTRUCTOR: sqlite3_session ** ** Delete a session object previously allocated using ** [sqlite3session_create()]. Once a session object has been deleted, the @@ -10025,6 +10117,7 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* ** CAPI3REF: Enable Or Disable A Session Object +** METHOD: sqlite3_session ** ** Enable or disable the recording of changes by a session object. When ** enabled, a session object records changes made to the database. When @@ -10044,6 +10137,7 @@ SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable); /* ** CAPI3REF: Set Or Clear the Indirect Change Flag +** METHOD: sqlite3_session ** ** Each change recorded by a session object is marked as either direct or ** indirect. A change is marked as indirect if either: @@ -10073,6 +10167,7 @@ SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect) /* ** CAPI3REF: Attach A Table To A Session Object +** METHOD: sqlite3_session ** ** If argument zTab is not NULL, then it is the name of a table to attach ** to the session object passed as the first argument. All subsequent changes @@ -10135,6 +10230,7 @@ SQLITE_API int sqlite3session_attach( /* ** CAPI3REF: Set a table filter on a Session Object. +** METHOD: sqlite3_session ** ** The second argument (xFilter) is the "filter callback". For changes to rows ** in tables that are not attached to the Session object, the filter is called @@ -10153,6 +10249,7 @@ SQLITE_API void sqlite3session_table_filter( /* ** CAPI3REF: Generate A Changeset From A Session Object +** METHOD: sqlite3_session ** ** Obtain a changeset containing changes to the tables attached to the ** session object passed as the first argument. If successful, @@ -10262,7 +10359,8 @@ SQLITE_API int sqlite3session_changeset( ); /* -** CAPI3REF: Load The Difference Between Tables Into A Session +** CAPI3REF: Load The Difference Between Tables Into A Session +** METHOD: sqlite3_session ** ** If it is not already attached to the session object passed as the first ** argument, this function attaches table zTbl in the same manner as the @@ -10327,6 +10425,7 @@ SQLITE_API int sqlite3session_diff( /* ** CAPI3REF: Generate A Patchset From A Session Object +** METHOD: sqlite3_session ** ** The differences between a patchset and a changeset are that: ** @@ -10378,6 +10477,7 @@ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); /* ** CAPI3REF: Create An Iterator To Traverse A Changeset +** CONSTRUCTOR: sqlite3_changeset_iter ** ** Create an iterator used to iterate through the contents of a changeset. ** If successful, *pp is set to point to the iterator handle and SQLITE_OK @@ -10418,6 +10518,7 @@ SQLITE_API int sqlite3changeset_start( /* ** CAPI3REF: Advance A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** This function may only be used with iterators created by function ** [sqlite3changeset_start()]. If it is called on an iterator passed to @@ -10442,6 +10543,7 @@ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); /* ** CAPI3REF: Obtain The Current Operation From A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** The pIter argument passed to this function may either be an iterator ** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator @@ -10476,6 +10578,7 @@ SQLITE_API int sqlite3changeset_op( /* ** CAPI3REF: Obtain The Primary Key Definition Of A Table +** METHOD: sqlite3_changeset_iter ** ** For each modified table, a changeset includes the following: ** @@ -10507,6 +10610,7 @@ SQLITE_API int sqlite3changeset_pk( /* ** CAPI3REF: Obtain old.* Values From A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** The pIter argument passed to this function may either be an iterator ** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator @@ -10537,6 +10641,7 @@ SQLITE_API int sqlite3changeset_old( /* ** CAPI3REF: Obtain new.* Values From A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** The pIter argument passed to this function may either be an iterator ** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator @@ -10570,6 +10675,7 @@ SQLITE_API int sqlite3changeset_new( /* ** CAPI3REF: Obtain Conflicting Row Values From A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** This function should only be used with iterator objects passed to a ** conflict-handler callback by [sqlite3changeset_apply()] with either @@ -10597,6 +10703,7 @@ SQLITE_API int sqlite3changeset_conflict( /* ** CAPI3REF: Determine The Number Of Foreign Key Constraint Violations +** METHOD: sqlite3_changeset_iter ** ** This function may only be called with an iterator passed to an ** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case @@ -10613,6 +10720,7 @@ SQLITE_API int sqlite3changeset_fk_conflicts( /* ** CAPI3REF: Finalize A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** This function is used to finalize an iterator allocated with ** [sqlite3changeset_start()]. @@ -10629,6 +10737,7 @@ SQLITE_API int sqlite3changeset_fk_conflicts( ** to that error is returned by this function. Otherwise, SQLITE_OK is ** returned. This is to allow the following pattern (pseudo-code): ** +**
     **   sqlite3changeset_start();
     **   while( SQLITE_ROW==sqlite3changeset_next() ){
     **     // Do something with change.
    @@ -10637,6 +10746,7 @@ SQLITE_API int sqlite3changeset_fk_conflicts(
     **   if( rc!=SQLITE_OK ){
     **     // An error has occurred 
     **   }
    +** 
    */ SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); @@ -10684,6 +10794,7 @@ SQLITE_API int sqlite3changeset_invert( ** sqlite3_changegroup object. Calling it produces similar results as the ** following code fragment: ** +**
     **   sqlite3_changegroup *pGrp;
     **   rc = sqlite3_changegroup_new(&pGrp);
     **   if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nA, pA);
    @@ -10694,6 +10805,7 @@ SQLITE_API int sqlite3changeset_invert(
     **     *ppOut = 0;
     **     *pnOut = 0;
     **   }
    +** 
    ** ** Refer to the sqlite3_changegroup documentation below for details. */ @@ -10709,11 +10821,15 @@ SQLITE_API int sqlite3changeset_concat( /* ** CAPI3REF: Changegroup Handle +** +** A changegroup is an object used to combine two or more +** [changesets] or [patchsets] */ typedef struct sqlite3_changegroup sqlite3_changegroup; /* ** CAPI3REF: Create A New Changegroup Object +** CONSTRUCTOR: sqlite3_changegroup ** ** An sqlite3_changegroup object is used to combine two or more changesets ** (or patchsets) into a single changeset (or patchset). A single changegroup @@ -10751,6 +10867,7 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); /* ** CAPI3REF: Add A Changeset To A Changegroup +** METHOD: sqlite3_changegroup ** ** Add all changes within the changeset (or patchset) in buffer pData (size ** nData bytes) to the changegroup. @@ -10828,6 +10945,7 @@ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pDa /* ** CAPI3REF: Obtain A Composite Changeset From A Changegroup +** METHOD: sqlite3_changegroup ** ** Obtain a buffer containing a changeset (or patchset) representing the ** current contents of the changegroup. If the inputs to the changegroup @@ -10858,25 +10976,25 @@ SQLITE_API int sqlite3changegroup_output( /* ** CAPI3REF: Delete A Changegroup Object +** DESTRUCTOR: sqlite3_changegroup */ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); /* ** CAPI3REF: Apply A Changeset To A Database ** -** Apply a changeset to a database. This function attempts to update the -** "main" database attached to handle db with the changes found in the -** changeset passed via the second and third arguments. +** Apply a changeset or patchset to a database. These functions attempt to +** update the "main" database attached to handle db with the changes found in +** the changeset passed via the second and third arguments. ** -** The fourth argument (xFilter) passed to this function is the "filter +** The fourth argument (xFilter) passed to these functions is the "filter ** callback". If it is not NULL, then for each table affected by at least one ** change in the changeset, the filter callback is invoked with ** the table name as the second argument, and a copy of the context pointer -** passed as the sixth argument to this function as the first. If the "filter -** callback" returns zero, then no attempt is made to apply any changes to -** the table. Otherwise, if the return value is non-zero or the xFilter -** argument to this function is NULL, all changes related to the table are -** attempted. +** passed as the sixth argument as the first. If the "filter callback" +** returns zero, then no attempt is made to apply any changes to the table. +** Otherwise, if the return value is non-zero or the xFilter argument to +** is NULL, all changes related to the table are attempted. ** ** For each table that is not excluded by the filter callback, this function ** tests that the target database contains a compatible table. A table is @@ -10921,7 +11039,7 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** **
    **
    DELETE Changes
    -** For each DELETE change, this function checks if the target database +** For each DELETE change, the function checks if the target database ** contains a row with the same primary key value (or values) as the ** original row values stored in the changeset. If it does, and the values ** stored in all non-primary key columns also match the values stored in @@ -10966,7 +11084,7 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** [SQLITE_CHANGESET_REPLACE]. ** **
    UPDATE Changes
    -** For each UPDATE change, this function checks if the target database +** For each UPDATE change, the function checks if the target database ** contains a row with the same primary key value (or values) as the ** original row values stored in the changeset. If it does, and the values ** stored in all modified non-primary key columns also match the values @@ -10997,11 +11115,21 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** This can be used to further customize the applications conflict ** resolution strategy. ** -** All changes made by this function are enclosed in a savepoint transaction. +** All changes made by these functions are enclosed in a savepoint transaction. ** If any other error (aside from a constraint failure when attempting to ** write to the target database) occurs, then the savepoint transaction is ** rolled back, restoring the target database to its original state, and an ** SQLite error code returned. +** +** If the output parameters (ppRebase) and (pnRebase) are non-NULL and +** the input is a changeset (not a patchset), then sqlite3changeset_apply_v2() +** may set (*ppRebase) to point to a "rebase" that may be used with the +** sqlite3_rebaser APIs buffer before returning. In this case (*pnRebase) +** is set to the size of the buffer in bytes. It is the responsibility of the +** caller to eventually free any such buffer using sqlite3_free(). The buffer +** is only allocated and populated if one or more conflicts were encountered +** while applying the patchset. See comments surrounding the sqlite3_rebaser +** APIs for further details. */ SQLITE_API int sqlite3changeset_apply( sqlite3 *db, /* Apply change to "main" db of this handle */ @@ -11018,6 +11146,22 @@ SQLITE_API int sqlite3changeset_apply( ), void *pCtx /* First argument passed to xConflict */ ); +SQLITE_API int sqlite3changeset_apply_v2( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase +); /* ** CAPI3REF: Constants Passed To The Conflict Handler @@ -11115,6 +11259,161 @@ SQLITE_API int sqlite3changeset_apply( #define SQLITE_CHANGESET_REPLACE 1 #define SQLITE_CHANGESET_ABORT 2 +/* +** CAPI3REF: Rebasing changesets +** EXPERIMENTAL +** +** Suppose there is a site hosting a database in state S0. And that +** modifications are made that move that database to state S1 and a +** changeset recorded (the "local" changeset). Then, a changeset based +** on S0 is received from another site (the "remote" changeset) and +** applied to the database. The database is then in state +** (S1+"remote"), where the exact state depends on any conflict +** resolution decisions (OMIT or REPLACE) made while applying "remote". +** Rebasing a changeset is to update it to take those conflict +** resolution decisions into account, so that the same conflicts +** do not have to be resolved elsewhere in the network. +** +** For example, if both the local and remote changesets contain an +** INSERT of the same key on "CREATE TABLE t1(a PRIMARY KEY, b)": +** +** local: INSERT INTO t1 VALUES(1, 'v1'); +** remote: INSERT INTO t1 VALUES(1, 'v2'); +** +** and the conflict resolution is REPLACE, then the INSERT change is +** removed from the local changeset (it was overridden). Or, if the +** conflict resolution was "OMIT", then the local changeset is modified +** to instead contain: +** +** UPDATE t1 SET b = 'v2' WHERE a=1; +** +** Changes within the local changeset are rebased as follows: +** +**
    +**
    Local INSERT
    +** This may only conflict with a remote INSERT. If the conflict +** resolution was OMIT, then add an UPDATE change to the rebased +** changeset. Or, if the conflict resolution was REPLACE, add +** nothing to the rebased changeset. +** +**
    Local DELETE
    +** This may conflict with a remote UPDATE or DELETE. In both cases the +** only possible resolution is OMIT. If the remote operation was a +** DELETE, then add no change to the rebased changeset. If the remote +** operation was an UPDATE, then the old.* fields of change are updated +** to reflect the new.* values in the UPDATE. +** +**
    Local UPDATE
    +** This may conflict with a remote UPDATE or DELETE. If it conflicts +** with a DELETE, and the conflict resolution was OMIT, then the update +** is changed into an INSERT. Any undefined values in the new.* record +** from the update change are filled in using the old.* values from +** the conflicting DELETE. Or, if the conflict resolution was REPLACE, +** the UPDATE change is simply omitted from the rebased changeset. +** +** If conflict is with a remote UPDATE and the resolution is OMIT, then +** the old.* values are rebased using the new.* values in the remote +** change. Or, if the resolution is REPLACE, then the change is copied +** into the rebased changeset with updates to columns also updated by +** the conflicting remote UPDATE removed. If this means no columns would +** be updated, the change is omitted. +**
    +** +** A local change may be rebased against multiple remote changes +** simultaneously. If a single key is modified by multiple remote +** changesets, they are combined as follows before the local changeset +** is rebased: +** +**
      +**
    • If there has been one or more REPLACE resolutions on a +** key, it is rebased according to a REPLACE. +** +**
    • If there have been no REPLACE resolutions on a key, then +** the local changeset is rebased according to the most recent +** of the OMIT resolutions. +**
    +** +** Note that conflict resolutions from multiple remote changesets are +** combined on a per-field basis, not per-row. This means that in the +** case of multiple remote UPDATE operations, some fields of a single +** local change may be rebased for REPLACE while others are rebased for +** OMIT. +** +** In order to rebase a local changeset, the remote changeset must first +** be applied to the local database using sqlite3changeset_apply_v2() and +** the buffer of rebase information captured. Then: +** +**
      +**
    1. An sqlite3_rebaser object is created by calling +** sqlite3rebaser_create(). +**
    2. The new object is configured with the rebase buffer obtained from +** sqlite3changeset_apply_v2() by calling sqlite3rebaser_configure(). +** If the local changeset is to be rebased against multiple remote +** changesets, then sqlite3rebaser_configure() should be called +** multiple times, in the same order that the multiple +** sqlite3changeset_apply_v2() calls were made. +**
    3. Each local changeset is rebased by calling sqlite3rebaser_rebase(). +**
    4. The sqlite3_rebaser object is deleted by calling +** sqlite3rebaser_delete(). +**
    +*/ +typedef struct sqlite3_rebaser sqlite3_rebaser; + +/* +** CAPI3REF: Create a changeset rebaser object. +** EXPERIMENTAL +** +** Allocate a new changeset rebaser object. If successful, set (*ppNew) to +** point to the new object and return SQLITE_OK. Otherwise, if an error +** occurs, return an SQLite error code (e.g. SQLITE_NOMEM) and set (*ppNew) +** to NULL. +*/ +SQLITE_API int sqlite3rebaser_create(sqlite3_rebaser **ppNew); + +/* +** CAPI3REF: Configure a changeset rebaser object. +** EXPERIMENTAL +** +** Configure the changeset rebaser object to rebase changesets according +** to the conflict resolutions described by buffer pRebase (size nRebase +** bytes), which must have been obtained from a previous call to +** sqlite3changeset_apply_v2(). +*/ +SQLITE_API int sqlite3rebaser_configure( + sqlite3_rebaser*, + int nRebase, const void *pRebase +); + +/* +** CAPI3REF: Rebase a changeset +** EXPERIMENTAL +** +** Argument pIn must point to a buffer containing a changeset nIn bytes +** in size. This function allocates and populates a buffer with a copy +** of the changeset rebased rebased according to the configuration of the +** rebaser object passed as the first argument. If successful, (*ppOut) +** is set to point to the new buffer containing the rebased changset and +** (*pnOut) to its size in bytes and SQLITE_OK returned. It is the +** responsibility of the caller to eventually free the new buffer using +** sqlite3_free(). Otherwise, if an error occurs, (*ppOut) and (*pnOut) +** are set to zero and an SQLite error code returned. +*/ +SQLITE_API int sqlite3rebaser_rebase( + sqlite3_rebaser*, + int nIn, const void *pIn, + int *pnOut, void **ppOut +); + +/* +** CAPI3REF: Delete a changeset rebaser object. +** EXPERIMENTAL +** +** Delete the changeset rebaser object and all associated resources. There +** should be one call to this function for each successful invocation +** of sqlite3rebaser_create(). +*/ +SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p); + /* ** CAPI3REF: Streaming Versions of API functions. ** @@ -11219,6 +11518,22 @@ SQLITE_API int sqlite3changeset_apply_strm( ), void *pCtx /* First argument passed to xConflict */ ); +SQLITE_API int sqlite3changeset_apply_v2_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase +); SQLITE_API int sqlite3changeset_concat_strm( int (*xInputA)(void *pIn, void *pData, int *pnData), void *pInA, @@ -11256,6 +11571,13 @@ SQLITE_API int sqlite3changegroup_output_strm(sqlite3_changegroup*, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ); +SQLITE_API int sqlite3rebaser_rebase_strm( + sqlite3_rebaser *pRebaser, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); /* @@ -12673,23 +12995,25 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_INDEX 140 #define TK_ALTER 141 #define TK_ADD 142 -#define TK_ISNOT 143 -#define TK_FUNCTION 144 -#define TK_COLUMN 145 -#define TK_AGG_FUNCTION 146 -#define TK_AGG_COLUMN 147 -#define TK_UMINUS 148 -#define TK_UPLUS 149 -#define TK_REGISTER 150 -#define TK_VECTOR 151 -#define TK_SELECT_COLUMN 152 -#define TK_IF_NULL_ROW 153 -#define TK_ASTERISK 154 -#define TK_SPAN 155 -#define TK_END_OF_FILE 156 -#define TK_UNCLOSED_STRING 157 -#define TK_SPACE 158 -#define TK_ILLEGAL 159 +#define TK_TRUEFALSE 143 +#define TK_ISNOT 144 +#define TK_FUNCTION 145 +#define TK_COLUMN 146 +#define TK_AGG_FUNCTION 147 +#define TK_AGG_COLUMN 148 +#define TK_UMINUS 149 +#define TK_UPLUS 150 +#define TK_TRUTH 151 +#define TK_REGISTER 152 +#define TK_VECTOR 153 +#define TK_SELECT_COLUMN 154 +#define TK_IF_NULL_ROW 155 +#define TK_ASTERISK 156 +#define TK_SPAN 157 +#define TK_END_OF_FILE 158 +#define TK_UNCLOSED_STRING 159 +#define TK_SPACE 160 +#define TK_ILLEGAL 161 /* The token codes above must all fit in 8 bits */ #define TKFLG_MASK 0xff @@ -13133,9 +13457,10 @@ typedef INT16_TYPE LogEst; */ typedef struct BusyHandler BusyHandler; struct BusyHandler { - int (*xFunc)(void *,int); /* The busy callback */ - void *pArg; /* First arg to busy callback */ - int nBusy; /* Incremented with each busy call */ + int (*xBusyHandler)(void *,int); /* The busy callback */ + void *pBusyArg; /* First arg to busy callback */ + int nBusy; /* Incremented with each busy call */ + u8 bExtraFileArg; /* Include sqlite3_file as callback arg */ }; /* @@ -13935,80 +14260,81 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Concat 93 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ #define OP_Compare 94 /* synopsis: r[P1@P3] <-> r[P2@P3] */ #define OP_BitNot 95 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */ -#define OP_Offset 96 /* synopsis: r[P3] = sqlite_offset(P1) */ +#define OP_IsTrue 96 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */ #define OP_String8 97 /* same as TK_STRING, synopsis: r[P2]='P4' */ -#define OP_Column 98 /* synopsis: r[P3]=PX */ -#define OP_Affinity 99 /* synopsis: affinity(r[P1@P2]) */ -#define OP_MakeRecord 100 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ -#define OP_Count 101 /* synopsis: r[P2]=count() */ -#define OP_ReadCookie 102 -#define OP_SetCookie 103 -#define OP_ReopenIdx 104 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenRead 105 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenWrite 106 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenDup 107 -#define OP_OpenAutoindex 108 /* synopsis: nColumn=P2 */ -#define OP_OpenEphemeral 109 /* synopsis: nColumn=P2 */ -#define OP_SorterOpen 110 -#define OP_SequenceTest 111 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ -#define OP_OpenPseudo 112 /* synopsis: P3 columns in r[P2] */ -#define OP_Close 113 -#define OP_ColumnsUsed 114 -#define OP_Sequence 115 /* synopsis: r[P2]=cursor[P1].ctr++ */ -#define OP_NewRowid 116 /* synopsis: r[P2]=rowid */ -#define OP_Insert 117 /* synopsis: intkey=r[P3] data=r[P2] */ -#define OP_InsertInt 118 /* synopsis: intkey=P3 data=r[P2] */ -#define OP_Delete 119 -#define OP_ResetCount 120 -#define OP_SorterCompare 121 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ -#define OP_SorterData 122 /* synopsis: r[P2]=data */ -#define OP_RowData 123 /* synopsis: r[P2]=data */ -#define OP_Rowid 124 /* synopsis: r[P2]=rowid */ -#define OP_NullRow 125 -#define OP_SeekEnd 126 -#define OP_SorterInsert 127 /* synopsis: key=r[P2] */ -#define OP_IdxInsert 128 /* synopsis: key=r[P2] */ -#define OP_IdxDelete 129 /* synopsis: key=r[P2@P3] */ -#define OP_DeferredSeek 130 /* synopsis: Move P3 to P1.rowid if needed */ -#define OP_IdxRowid 131 /* synopsis: r[P2]=rowid */ +#define OP_Offset 98 /* synopsis: r[P3] = sqlite_offset(P1) */ +#define OP_Column 99 /* synopsis: r[P3]=PX */ +#define OP_Affinity 100 /* synopsis: affinity(r[P1@P2]) */ +#define OP_MakeRecord 101 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ +#define OP_Count 102 /* synopsis: r[P2]=count() */ +#define OP_ReadCookie 103 +#define OP_SetCookie 104 +#define OP_ReopenIdx 105 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenRead 106 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenWrite 107 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenDup 108 +#define OP_OpenAutoindex 109 /* synopsis: nColumn=P2 */ +#define OP_OpenEphemeral 110 /* synopsis: nColumn=P2 */ +#define OP_SorterOpen 111 +#define OP_SequenceTest 112 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ +#define OP_OpenPseudo 113 /* synopsis: P3 columns in r[P2] */ +#define OP_Close 114 +#define OP_ColumnsUsed 115 +#define OP_Sequence 116 /* synopsis: r[P2]=cursor[P1].ctr++ */ +#define OP_NewRowid 117 /* synopsis: r[P2]=rowid */ +#define OP_Insert 118 /* synopsis: intkey=r[P3] data=r[P2] */ +#define OP_InsertInt 119 /* synopsis: intkey=P3 data=r[P2] */ +#define OP_Delete 120 +#define OP_ResetCount 121 +#define OP_SorterCompare 122 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ +#define OP_SorterData 123 /* synopsis: r[P2]=data */ +#define OP_RowData 124 /* synopsis: r[P2]=data */ +#define OP_Rowid 125 /* synopsis: r[P2]=rowid */ +#define OP_NullRow 126 +#define OP_SeekEnd 127 +#define OP_SorterInsert 128 /* synopsis: key=r[P2] */ +#define OP_IdxInsert 129 /* synopsis: key=r[P2] */ +#define OP_IdxDelete 130 /* synopsis: key=r[P2@P3] */ +#define OP_DeferredSeek 131 /* synopsis: Move P3 to P1.rowid if needed */ #define OP_Real 132 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ -#define OP_Destroy 133 -#define OP_Clear 134 -#define OP_ResetSorter 135 -#define OP_CreateBtree 136 /* synopsis: r[P2]=root iDb=P1 flags=P3 */ -#define OP_SqlExec 137 -#define OP_ParseSchema 138 -#define OP_LoadAnalysis 139 -#define OP_DropTable 140 -#define OP_DropIndex 141 -#define OP_DropTrigger 142 -#define OP_IntegrityCk 143 -#define OP_RowSetAdd 144 /* synopsis: rowset(P1)=r[P2] */ -#define OP_Param 145 -#define OP_FkCounter 146 /* synopsis: fkctr[P1]+=P2 */ -#define OP_MemMax 147 /* synopsis: r[P1]=max(r[P1],r[P2]) */ -#define OP_OffsetLimit 148 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ -#define OP_AggStep0 149 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggStep 150 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggFinal 151 /* synopsis: accum=r[P1] N=P2 */ -#define OP_Expire 152 -#define OP_TableLock 153 /* synopsis: iDb=P1 root=P2 write=P3 */ -#define OP_VBegin 154 -#define OP_VCreate 155 -#define OP_VDestroy 156 -#define OP_VOpen 157 -#define OP_VColumn 158 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VRename 159 -#define OP_Pagecount 160 -#define OP_MaxPgcnt 161 -#define OP_PureFunc0 162 -#define OP_Function0 163 /* synopsis: r[P3]=func(r[P2@P5]) */ -#define OP_PureFunc 164 -#define OP_Function 165 /* synopsis: r[P3]=func(r[P2@P5]) */ -#define OP_Trace 166 -#define OP_CursorHint 167 -#define OP_Noop 168 -#define OP_Explain 169 +#define OP_IdxRowid 133 /* synopsis: r[P2]=rowid */ +#define OP_Destroy 134 +#define OP_Clear 135 +#define OP_ResetSorter 136 +#define OP_CreateBtree 137 /* synopsis: r[P2]=root iDb=P1 flags=P3 */ +#define OP_SqlExec 138 +#define OP_ParseSchema 139 +#define OP_LoadAnalysis 140 +#define OP_DropTable 141 +#define OP_DropIndex 142 +#define OP_DropTrigger 143 +#define OP_IntegrityCk 144 +#define OP_RowSetAdd 145 /* synopsis: rowset(P1)=r[P2] */ +#define OP_Param 146 +#define OP_FkCounter 147 /* synopsis: fkctr[P1]+=P2 */ +#define OP_MemMax 148 /* synopsis: r[P1]=max(r[P1],r[P2]) */ +#define OP_OffsetLimit 149 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ +#define OP_AggStep0 150 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggStep 151 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggFinal 152 /* synopsis: accum=r[P1] N=P2 */ +#define OP_Expire 153 +#define OP_TableLock 154 /* synopsis: iDb=P1 root=P2 write=P3 */ +#define OP_VBegin 155 +#define OP_VCreate 156 +#define OP_VDestroy 157 +#define OP_VOpen 158 +#define OP_VColumn 159 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 160 +#define OP_Pagecount 161 +#define OP_MaxPgcnt 162 +#define OP_PureFunc0 163 +#define OP_Function0 164 /* synopsis: r[P3]=func(r[P2@P5]) */ +#define OP_PureFunc 165 +#define OP_Function 166 /* synopsis: r[P3]=func(r[P2@P5]) */ +#define OP_Trace 167 +#define OP_CursorHint 168 +#define OP_Noop 169 +#define OP_Explain 170 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c @@ -14033,16 +14359,16 @@ typedef struct VdbeOpList VdbeOpList; /* 72 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\ /* 80 */ 0x02, 0x02, 0x02, 0x00, 0x26, 0x26, 0x26, 0x26,\ /* 88 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x00, 0x12,\ -/* 96 */ 0x20, 0x10, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\ +/* 96 */ 0x12, 0x10, 0x20, 0x00, 0x00, 0x00, 0x10, 0x10,\ /* 104 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 112 */ 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00,\ -/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x04,\ -/* 128 */ 0x04, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00, 0x00,\ -/* 136 */ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 144 */ 0x06, 0x10, 0x00, 0x04, 0x1a, 0x00, 0x00, 0x00,\ +/* 112 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\ +/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,\ +/* 128 */ 0x04, 0x04, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00,\ +/* 136 */ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 144 */ 0x00, 0x06, 0x10, 0x00, 0x04, 0x1a, 0x00, 0x00,\ /* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 160 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 168 */ 0x00, 0x00,} +/* 160 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 168 */ 0x00, 0x00, 0x00,} /* The sqlite3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum @@ -14343,7 +14669,7 @@ SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3*); SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*); /* Functions used to configure a Pager object. */ -SQLITE_PRIVATE void sqlite3PagerSetBusyhandler(Pager*, int(*)(void *), void *); +SQLITE_PRIVATE void sqlite3PagerSetBusyHandler(Pager*, int(*)(void *), void *); SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager*, u32*, int); #ifdef SQLITE_HAS_CODEC SQLITE_PRIVATE void sqlite3PagerAlignReserve(Pager*,Pager*); @@ -14429,6 +14755,11 @@ SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*); SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *); SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*); SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT +SQLITE_PRIVATE void sqlite3PagerResetLockTimeout(Pager *pPager); +#else +# define sqlite3PagerResetLockTimeout(X) +#endif /* Functions used to truncate the database file. */ SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager*,Pgno); @@ -15248,8 +15579,9 @@ struct sqlite3 { int newTnum; /* Rootpage of table being initialized */ u8 iDb; /* Which db file is being initialized */ u8 busy; /* TRUE if currently initializing */ - u8 orphanTrigger; /* Last statement is orphaned TEMP trigger */ - u8 imposterTable; /* Building an imposter table */ + unsigned orphanTrigger : 1; /* Last statement is orphaned TEMP trigger */ + unsigned imposterTable : 1; /* Building an imposter table */ + unsigned reopenMemdb : 1; /* ATTACH is really a reopen using MemDB */ } init; int nVdbeActive; /* Number of VDBEs currently running */ int nVdbeRead; /* Number of active VDBEs that read or write */ @@ -15414,6 +15746,8 @@ struct sqlite3 { #define SQLITE_CursorHints 0x0400 /* Add OP_CursorHint opcodes */ #define SQLITE_Stat34 0x0800 /* Use STAT3 or STAT4 data */ /* TH3 expects the Stat34 ^^^^^^ value to be 0x0800. Don't change it */ +#define SQLITE_PushDown 0x1000 /* The push-down optimization */ +#define SQLITE_SimplifyJoin 0x2000 /* Convert LEFT JOIN to JOIN */ #define SQLITE_AllOpts 0xffff /* All optimizations */ /* @@ -15637,6 +15971,7 @@ struct Column { #define COLFLAG_PRIMKEY 0x0001 /* Column is part of the primary key */ #define COLFLAG_HIDDEN 0x0002 /* A hidden column in a virtual table */ #define COLFLAG_HASTYPE 0x0004 /* Type name follows column name */ +#define COLFLAG_UNIQUE 0x0008 /* Column def contains "UNIQUE" or "PK" */ /* ** A "Collating Sequence" is defined by an instance of the following @@ -16874,7 +17209,6 @@ struct Parse { int nMaxArg; /* Max args passed to user function by sub-program */ #if SELECTTRACE_ENABLED int nSelect; /* Number of SELECT statements seen */ - int nSelectIndent; /* How far to indent SELECTTRACE() output */ #endif #ifndef SQLITE_OMIT_SHARED_CACHE int nTableLock; /* Number of locks in aTableLock */ @@ -17238,9 +17572,9 @@ struct Walker { struct CCurHint *pCCurHint; /* Used by codeCursorHint() */ int *aiCol; /* array of column indexes */ struct IdxCover *pIdxCover; /* Check for index coverage */ - struct IdxExprTrans *pIdxTrans; /* Convert indexed expr to column */ + struct IdxExprTrans *pIdxTrans; /* Convert idxed expr to column */ ExprList *pGroupBy; /* GROUP BY clause */ - struct HavingToWhereCtx *pHavingCtx; /* HAVING to WHERE clause ctx */ + Select *pSelect; /* HAVING to WHERE clause ctx */ } u; }; @@ -17704,6 +18038,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(Parse*,Expr*, Expr*, int); SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr*, Expr*, int); SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList*, ExprList*, int); SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Parse*,Expr*, Expr*, int); +SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(Expr*, int iCur, Index *pIdx); @@ -17721,6 +18056,8 @@ SQLITE_PRIVATE void sqlite3EndTransaction(Parse*,int); SQLITE_PRIVATE void sqlite3Savepoint(Parse*, int, Token*); SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *); SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*); +SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr*); +SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); @@ -17903,6 +18240,10 @@ SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **); SQLITE_PRIVATE const char *sqlite3ErrName(int); #endif +#ifdef SQLITE_ENABLE_DESERIALIZE +SQLITE_PRIVATE int sqlite3MemdbInit(void); +#endif + SQLITE_PRIVATE const char *sqlite3ErrStr(int); SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse); SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int); @@ -17951,6 +18292,9 @@ SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions; SQLITE_PRIVATE int sqlite3PendingByte; #endif #endif +#ifdef VDBE_PROFILE +SQLITE_PRIVATE sqlite3_uint64 sqlite3NProfileCnt; +#endif SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3*, int, int, int); SQLITE_PRIVATE void sqlite3Reindex(Parse*, Token*, Token*); SQLITE_PRIVATE void sqlite3AlterFunctions(void); @@ -17973,7 +18317,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *); SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq(Parse*, u8, CollSeq *, const char*); SQLITE_PRIVATE char sqlite3AffinityType(const char*, u8*); SQLITE_PRIVATE void sqlite3Analyze(Parse*, Token*, Token*); -SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler*); +SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler*, sqlite3_file*); SQLITE_PRIVATE int sqlite3FindDb(sqlite3*, Token*); SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *, const char *); SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3*,int iDB); @@ -18574,6 +18918,13 @@ SQLITE_PRIVATE const Token sqlite3IntTokens[] = { { "1", 1 } }; +#ifdef VDBE_PROFILE +/* +** The following performance counter can be used in place of +** sqlite3Hwtime() for profiling. This is a no-op on standard builds. +*/ +SQLITE_PRIVATE sqlite3_uint64 sqlite3NProfileCnt = 0; +#endif /* ** The value of the "pending" byte must be 0x40000000 (1 byte past the @@ -18857,8 +19208,6 @@ struct sqlite3_value { ** If the MEM_Null flag is set, then the value is an SQL NULL value. ** For a pointer type created using sqlite3_bind_pointer() or ** sqlite3_result_pointer() the MEM_Term and MEM_Subtype flags are also set. -** If both MEM_Null and MEM_Zero are set, that means that the value is -** an unchanging column value from VColumn. ** ** If the MEM_Str flag is set then Mem.z points at a string representation. ** Usually this is encoded in the same unicode encoding as the main @@ -18952,7 +19301,6 @@ struct sqlite3_context { int iOp; /* Instruction number of OP_Function */ int isError; /* Error code returned by the function. */ u8 skipFlag; /* Skip accumulator loading if true */ - u8 fErrorOrAux; /* isError!=0 or pVdbe->pAuxData modified */ u8 argc; /* Number of arguments */ sqlite3_value *argv[1]; /* Argument set */ }; @@ -19122,6 +19470,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8); SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem*); SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem*); +SQLITE_PRIVATE int sqlite3VdbeBooleanValue(Mem*, int ifNull); SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem*); @@ -19515,6 +19864,9 @@ SQLITE_API int sqlite3_db_status( ** pagers the database handle is connected to. *pHighwater is always set ** to zero. */ + case SQLITE_DBSTATUS_CACHE_SPILL: + op = SQLITE_DBSTATUS_CACHE_WRITE+1; + /* Fall through into the next case */ case SQLITE_DBSTATUS_CACHE_HIT: case SQLITE_DBSTATUS_CACHE_MISS: case SQLITE_DBSTATUS_CACHE_WRITE:{ @@ -20936,7 +21288,9 @@ SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){ */ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ #ifdef SQLITE_TEST - if( op!=SQLITE_FCNTL_COMMIT_PHASETWO ){ + if( op!=SQLITE_FCNTL_COMMIT_PHASETWO + && op!=SQLITE_FCNTL_LOCK_TIMEOUT + ){ /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite ** is using a regular VFS, it is called after the corresponding ** transaction has been committed. Injecting a fault at this point @@ -20950,10 +21304,11 @@ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ DO_OS_MALLOC_TEST(id); } #endif + if( id->pMethods==0 ) return SQLITE_NOTFOUND; return id->pMethods->xFileControl(id, op, pArg); } SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file *id, int op, void *pArg){ - (void)id->pMethods->xFileControl(id, op, pArg); + if( id->pMethods ) (void)id->pMethods->xFileControl(id, op, pArg); } SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id){ @@ -24108,11 +24463,12 @@ struct sqlite3_mutex { #endif }; #if SQLITE_MUTEX_NREF -#define SQLITE3_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER,0,0,(pthread_t)0,0} +# define SQLITE3_MUTEX_INITIALIZER(id) \ + {PTHREAD_MUTEX_INITIALIZER,id,0,(pthread_t)0,0} #elif defined(SQLITE_ENABLE_API_ARMOR) -#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0 } +# define SQLITE3_MUTEX_INITIALIZER(id) { PTHREAD_MUTEX_INITIALIZER, id } #else -#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER } +#define SQLITE3_MUTEX_INITIALIZER(id) { PTHREAD_MUTEX_INITIALIZER } #endif /* @@ -24209,18 +24565,18 @@ static int pthreadMutexEnd(void){ return SQLITE_OK; } */ static sqlite3_mutex *pthreadMutexAlloc(int iType){ static sqlite3_mutex staticMutexes[] = { - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER + SQLITE3_MUTEX_INITIALIZER(2), + SQLITE3_MUTEX_INITIALIZER(3), + SQLITE3_MUTEX_INITIALIZER(4), + SQLITE3_MUTEX_INITIALIZER(5), + SQLITE3_MUTEX_INITIALIZER(6), + SQLITE3_MUTEX_INITIALIZER(7), + SQLITE3_MUTEX_INITIALIZER(8), + SQLITE3_MUTEX_INITIALIZER(9), + SQLITE3_MUTEX_INITIALIZER(10), + SQLITE3_MUTEX_INITIALIZER(11), + SQLITE3_MUTEX_INITIALIZER(12), + SQLITE3_MUTEX_INITIALIZER(13) }; sqlite3_mutex *p; switch( iType ){ @@ -24238,6 +24594,9 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){ pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&p->mutex, &recursiveAttr); pthread_mutexattr_destroy(&recursiveAttr); +#endif +#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR) + p->id = SQLITE_MUTEX_RECURSIVE; #endif } break; @@ -24246,6 +24605,9 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){ p = sqlite3MallocZero( sizeof(*p) ); if( p ){ pthread_mutex_init(&p->mutex, 0); +#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR) + p->id = SQLITE_MUTEX_FAST; +#endif } break; } @@ -24261,7 +24623,7 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){ } } #if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR) - if( p ) p->id = iType; + assert( p==0 || p->id==iType ); #endif return p; } @@ -24778,7 +25140,7 @@ struct sqlite3_mutex { #ifdef SQLITE_DEBUG volatile int nRef; /* Number of enterances */ volatile DWORD owner; /* Thread holding this mutex */ - volatile int trace; /* True to trace changes */ + volatile LONG trace; /* True to trace changes */ #endif }; @@ -24790,10 +25152,10 @@ struct sqlite3_mutex { #define SQLITE_W32_MUTEX_INITIALIZER { 0 } #ifdef SQLITE_DEBUG -#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0, \ +#define SQLITE3_MUTEX_INITIALIZER(id) { SQLITE_W32_MUTEX_INITIALIZER, id, \ 0L, (DWORD)0, 0 } #else -#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0 } +#define SQLITE3_MUTEX_INITIALIZER(id) { SQLITE_W32_MUTEX_INITIALIZER, id } #endif #ifdef SQLITE_DEBUG @@ -24836,18 +25198,18 @@ SQLITE_PRIVATE void sqlite3MemoryBarrier(void){ ** Initialize and deinitialize the mutex subsystem. */ static sqlite3_mutex winMutex_staticMutexes[] = { - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER, - SQLITE3_MUTEX_INITIALIZER + SQLITE3_MUTEX_INITIALIZER(2), + SQLITE3_MUTEX_INITIALIZER(3), + SQLITE3_MUTEX_INITIALIZER(4), + SQLITE3_MUTEX_INITIALIZER(5), + SQLITE3_MUTEX_INITIALIZER(6), + SQLITE3_MUTEX_INITIALIZER(7), + SQLITE3_MUTEX_INITIALIZER(8), + SQLITE3_MUTEX_INITIALIZER(9), + SQLITE3_MUTEX_INITIALIZER(10), + SQLITE3_MUTEX_INITIALIZER(11), + SQLITE3_MUTEX_INITIALIZER(12), + SQLITE3_MUTEX_INITIALIZER(13) }; static int winMutex_isInit = 0; @@ -24977,15 +25339,15 @@ static sqlite3_mutex *winMutexAlloc(int iType){ } #endif p = &winMutex_staticMutexes[iType-2]; - p->id = iType; #ifdef SQLITE_DEBUG #ifdef SQLITE_WIN32_MUTEX_TRACE_STATIC - p->trace = 1; + InterlockedCompareExchange(&p->trace, 1, 0); #endif #endif break; } } + assert( p==0 || p->id==iType ); return p; } @@ -26064,6 +26426,11 @@ SQLITE_PRIVATE void sqlite3VXPrintf( PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */ char buf[etBUFSIZE]; /* Conversion buffer */ + /* pAccum never starts out with an empty buffer that was obtained from + ** malloc(). This precondition is required by the mprintf("%z...") + ** optimization. */ + assert( pAccum->nChar>0 || (pAccum->printfFlags&SQLITE_PRINTF_MALLOCED)==0 ); + bufpt = 0; if( (pAccum->printfFlags & SQLITE_PRINTF_SQLFUNC)!=0 ){ pArgList = va_arg(ap, PrintfArguments*); @@ -26482,9 +26849,38 @@ SQLITE_PRIVATE void sqlite3VXPrintf( case etCHARX: if( bArgList ){ bufpt = getTextArg(pArgList); - c = bufpt ? bufpt[0] : 0; + length = 1; + if( bufpt ){ + buf[0] = c = *(bufpt++); + if( (c&0xc0)==0xc0 ){ + while( length<4 && (bufpt[0]&0xc0)==0x80 ){ + buf[length++] = *(bufpt++); + } + } + }else{ + buf[0] = 0; + } }else{ - c = va_arg(ap,int); + unsigned int ch = va_arg(ap,unsigned int); + if( ch<0x00080 ){ + buf[0] = ch & 0xff; + length = 1; + }else if( ch<0x00800 ){ + buf[0] = 0xc0 + (u8)((ch>>6)&0x1f); + buf[1] = 0x80 + (u8)(ch & 0x3f); + length = 2; + }else if( ch<0x10000 ){ + buf[0] = 0xe0 + (u8)((ch>>12)&0x0f); + buf[1] = 0x80 + (u8)((ch>>6) & 0x3f); + buf[2] = 0x80 + (u8)(ch & 0x3f); + length = 3; + }else{ + buf[0] = 0xf0 + (u8)((ch>>18) & 0x07); + buf[1] = 0x80 + (u8)((ch>>12) & 0x3f); + buf[2] = 0x80 + (u8)((ch>>6) & 0x3f); + buf[3] = 0x80 + (u8)(ch & 0x3f); + length = 4; + } } if( precision>1 ){ width -= precision-1; @@ -26492,12 +26888,13 @@ SQLITE_PRIVATE void sqlite3VXPrintf( sqlite3AppendChar(pAccum, width-1, ' '); width = 0; } - sqlite3AppendChar(pAccum, precision-1, c); + while( precision-- > 1 ){ + sqlite3StrAccumAppend(pAccum, buf, length); + } } - length = 1; - buf[0] = c; bufpt = buf; - break; + flag_altform2 = 1; + goto adjust_width_for_utf8; case etSTRING: case etDYNSTRING: if( bArgList ){ @@ -26509,17 +26906,45 @@ SQLITE_PRIVATE void sqlite3VXPrintf( if( bufpt==0 ){ bufpt = ""; }else if( xtype==etDYNSTRING ){ + if( pAccum->nChar==0 && pAccum->mxAlloc && width==0 && precision<0 ){ + /* Special optimization for sqlite3_mprintf("%z..."): + ** Extend an existing memory allocation rather than creating + ** a new one. */ + assert( (pAccum->printfFlags&SQLITE_PRINTF_MALLOCED)==0 ); + pAccum->zText = bufpt; + pAccum->nAlloc = sqlite3DbMallocSize(pAccum->db, bufpt); + pAccum->nChar = 0x7fffffff & (int)strlen(bufpt); + pAccum->printfFlags |= SQLITE_PRINTF_MALLOCED; + length = 0; + break; + } zExtra = bufpt; } if( precision>=0 ){ - for(length=0; length 0 && z[0] ){ + SQLITE_SKIP_UTF8(z); + } + length = (int)(z - (unsigned char*)bufpt); + }else{ + for(length=0; length0 ){ + /* Adjust width to account for extra bytes in UTF-8 characters */ + int ii = length - 1; + while( ii>=0 ) if( (bufpt[ii--] & 0xc0)==0x80 ) width++; + } break; - case etSQLESCAPE: /* Escape ' characters */ - case etSQLESCAPE2: /* Escape ' and enclose in '...' */ - case etSQLESCAPE3: { /* Escape " characters */ + case etSQLESCAPE: /* %q: Escape ' characters */ + case etSQLESCAPE2: /* %Q: Escape ' and enclose in '...' */ + case etSQLESCAPE3: { /* %w: Escape " characters */ int i, j, k, n, isnull; int needQuote; char ch; @@ -26533,9 +26958,17 @@ SQLITE_PRIVATE void sqlite3VXPrintf( } isnull = escarg==0; if( isnull ) escarg = (xtype==etSQLESCAPE2 ? "NULL" : "(NULL)"); + /* For %q, %Q, and %w, the precision is the number of byte (or + ** characters if the ! flags is present) to use from the input. + ** Because of the extra quoting characters inserted, the number + ** of output characters may be larger than the precision. + */ k = precision; for(i=n=0; k!=0 && (ch=escarg[i])!=0; i++, k--){ if( ch==q ) n++; + if( flag_altform2 && (ch&0xc0)==0xc0 ){ + while( (escarg[i+1]&0xc0)==0x80 ){ i++; } + } } needQuote = !isnull && xtype==etSQLESCAPE2; n += i + 3; @@ -26558,10 +26991,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf( if( needQuote ) bufpt[j++] = q; bufpt[j] = 0; length = j; - /* The precision in %q and %Q means how many input characters to - ** consume, not the length of the output... - ** if( precision>=0 && precision0 ){ @@ -27115,11 +27548,21 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m sqlite3TreeViewPush(pView, 1); } do{ +#if SELECTTRACE_ENABLED + sqlite3TreeViewLine(pView, + "SELECT%s%s (%s/%p) selFlags=0x%x nSelectRow=%d", + ((p->selFlags & SF_Distinct) ? " DISTINCT" : ""), + ((p->selFlags & SF_Aggregate) ? " agg_flag" : ""), + p->zSelName, p, p->selFlags, + (int)p->nSelectRow + ); +#else sqlite3TreeViewLine(pView, "SELECT%s%s (0x%p) selFlags=0x%x nSelectRow=%d", ((p->selFlags & SF_Distinct) ? " DISTINCT" : ""), ((p->selFlags & SF_Aggregate) ? " agg_flag" : ""), p, p->selFlags, (int)p->nSelectRow ); +#endif if( cnt++ ) sqlite3TreeViewPop(pView); if( p->pPrior ){ n = 1000; @@ -27270,6 +27713,11 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m sqlite3TreeViewLine(pView,"NULL"); break; } + case TK_TRUEFALSE: { + sqlite3TreeViewLine(pView, + sqlite3ExprTruthValue(pExpr) ? "TRUE" : "FALSE"); + break; + } #ifndef SQLITE_OMIT_BLOB_LITERAL case TK_BLOB: { sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken); @@ -27326,6 +27774,19 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m case TK_ISNULL: zUniOp = "ISNULL"; break; case TK_NOTNULL: zUniOp = "NOTNULL"; break; + case TK_TRUTH: { + int x; + const char *azOp[] = { + "IS-FALSE", "IS-TRUE", "IS-NOT-FALSE", "IS-NOT-TRUE" + }; + assert( pExpr->op2==TK_IS || pExpr->op2==TK_ISNOT ); + assert( pExpr->pRight ); + assert( pExpr->pRight->op==TK_TRUEFALSE ); + x = (pExpr->op2==TK_ISNOT)*2 + sqlite3ExprTruthValue(pExpr->pRight); + zUniOp = azOp[x]; + break; + } + case TK_SPAN: { sqlite3TreeViewLine(pView, "SPAN %Q", pExpr->u.zToken); sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); @@ -29063,7 +29524,7 @@ static int compare2pow63(const char *zNum, int incr){ ** Returns: ** ** 0 Successful transformation. Fits in a 64-bit signed integer. -** 1 Excess text after the integer value +** 1 Excess non-space text after the integer value ** 2 Integer too large for a 64-bit signed integer or is malformed ** 3 Special case of 9223372036854775808 ** @@ -29106,47 +29567,57 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc for(i=0; &zNum[i]='0' && c<='9'; i+=incr){ u = u*10 + c - '0'; } + testcase( i==18*incr ); + testcase( i==19*incr ); + testcase( i==20*incr ); if( u>LARGEST_INT64 ){ + /* This test and assignment is needed only to suppress UB warnings + ** from clang and -fsanitize=undefined. This test and assignment make + ** the code a little larger and slower, and no harm comes from omitting + ** them, but we must appaise the undefined-behavior pharisees. */ *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64; }else if( neg ){ *pNum = -(i64)u; }else{ *pNum = (i64)u; } - testcase( i==18 ); - testcase( i==19 ); - testcase( i==20 ); - if( &zNum[i]19*incr ){ /* Too many digits */ - /* zNum is empty or contains non-numeric text or is longer - ** than 19 digits (thus guaranteeing that it is too large) */ - return 2; - }else if( i<19*incr ){ + if( i<19*incr ){ /* Less than 19 digits, so we know that it fits in 64 bits */ assert( u<=LARGEST_INT64 ); return rc; }else{ /* zNum is a 19-digit numbers. Compare it against 9223372036854775808. */ - c = compare2pow63(zNum, incr); + c = i>19*incr ? 1 : compare2pow63(zNum, incr); if( c<0 ){ /* zNum is less than 9223372036854775808 so it fits */ assert( u<=LARGEST_INT64 ); return rc; - }else if( c>0 ){ - /* zNum is greater than 9223372036854775808 so it overflows */ - return 2; }else{ - /* zNum is exactly 9223372036854775808. Fits if negative. The - ** special case 2 overflow if positive */ - assert( u-1==LARGEST_INT64 ); - return neg ? rc : 3; + *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64; + if( c>0 ){ + /* zNum is greater than 9223372036854775808 so it overflows */ + return 2; + }else{ + /* zNum is exactly 9223372036854775808. Fits if negative. The + ** special case 2 overflow if positive */ + assert( u-1==LARGEST_INT64 ); + return neg ? rc : 3; + } } } } @@ -30463,80 +30934,81 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 93 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), /* 94 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), /* 95 */ "BitNot" OpHelp("r[P1]= ~r[P1]"), - /* 96 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), + /* 96 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"), /* 97 */ "String8" OpHelp("r[P2]='P4'"), - /* 98 */ "Column" OpHelp("r[P3]=PX"), - /* 99 */ "Affinity" OpHelp("affinity(r[P1@P2])"), - /* 100 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), - /* 101 */ "Count" OpHelp("r[P2]=count()"), - /* 102 */ "ReadCookie" OpHelp(""), - /* 103 */ "SetCookie" OpHelp(""), - /* 104 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), - /* 105 */ "OpenRead" OpHelp("root=P2 iDb=P3"), - /* 106 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), - /* 107 */ "OpenDup" OpHelp(""), - /* 108 */ "OpenAutoindex" OpHelp("nColumn=P2"), - /* 109 */ "OpenEphemeral" OpHelp("nColumn=P2"), - /* 110 */ "SorterOpen" OpHelp(""), - /* 111 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), - /* 112 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), - /* 113 */ "Close" OpHelp(""), - /* 114 */ "ColumnsUsed" OpHelp(""), - /* 115 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), - /* 116 */ "NewRowid" OpHelp("r[P2]=rowid"), - /* 117 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), - /* 118 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"), - /* 119 */ "Delete" OpHelp(""), - /* 120 */ "ResetCount" OpHelp(""), - /* 121 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), - /* 122 */ "SorterData" OpHelp("r[P2]=data"), - /* 123 */ "RowData" OpHelp("r[P2]=data"), - /* 124 */ "Rowid" OpHelp("r[P2]=rowid"), - /* 125 */ "NullRow" OpHelp(""), - /* 126 */ "SeekEnd" OpHelp(""), - /* 127 */ "SorterInsert" OpHelp("key=r[P2]"), - /* 128 */ "IdxInsert" OpHelp("key=r[P2]"), - /* 129 */ "IdxDelete" OpHelp("key=r[P2@P3]"), - /* 130 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"), - /* 131 */ "IdxRowid" OpHelp("r[P2]=rowid"), + /* 98 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), + /* 99 */ "Column" OpHelp("r[P3]=PX"), + /* 100 */ "Affinity" OpHelp("affinity(r[P1@P2])"), + /* 101 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), + /* 102 */ "Count" OpHelp("r[P2]=count()"), + /* 103 */ "ReadCookie" OpHelp(""), + /* 104 */ "SetCookie" OpHelp(""), + /* 105 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), + /* 106 */ "OpenRead" OpHelp("root=P2 iDb=P3"), + /* 107 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), + /* 108 */ "OpenDup" OpHelp(""), + /* 109 */ "OpenAutoindex" OpHelp("nColumn=P2"), + /* 110 */ "OpenEphemeral" OpHelp("nColumn=P2"), + /* 111 */ "SorterOpen" OpHelp(""), + /* 112 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), + /* 113 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), + /* 114 */ "Close" OpHelp(""), + /* 115 */ "ColumnsUsed" OpHelp(""), + /* 116 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), + /* 117 */ "NewRowid" OpHelp("r[P2]=rowid"), + /* 118 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), + /* 119 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"), + /* 120 */ "Delete" OpHelp(""), + /* 121 */ "ResetCount" OpHelp(""), + /* 122 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), + /* 123 */ "SorterData" OpHelp("r[P2]=data"), + /* 124 */ "RowData" OpHelp("r[P2]=data"), + /* 125 */ "Rowid" OpHelp("r[P2]=rowid"), + /* 126 */ "NullRow" OpHelp(""), + /* 127 */ "SeekEnd" OpHelp(""), + /* 128 */ "SorterInsert" OpHelp("key=r[P2]"), + /* 129 */ "IdxInsert" OpHelp("key=r[P2]"), + /* 130 */ "IdxDelete" OpHelp("key=r[P2@P3]"), + /* 131 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"), /* 132 */ "Real" OpHelp("r[P2]=P4"), - /* 133 */ "Destroy" OpHelp(""), - /* 134 */ "Clear" OpHelp(""), - /* 135 */ "ResetSorter" OpHelp(""), - /* 136 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"), - /* 137 */ "SqlExec" OpHelp(""), - /* 138 */ "ParseSchema" OpHelp(""), - /* 139 */ "LoadAnalysis" OpHelp(""), - /* 140 */ "DropTable" OpHelp(""), - /* 141 */ "DropIndex" OpHelp(""), - /* 142 */ "DropTrigger" OpHelp(""), - /* 143 */ "IntegrityCk" OpHelp(""), - /* 144 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), - /* 145 */ "Param" OpHelp(""), - /* 146 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), - /* 147 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), - /* 148 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), - /* 149 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 150 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 151 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), - /* 152 */ "Expire" OpHelp(""), - /* 153 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), - /* 154 */ "VBegin" OpHelp(""), - /* 155 */ "VCreate" OpHelp(""), - /* 156 */ "VDestroy" OpHelp(""), - /* 157 */ "VOpen" OpHelp(""), - /* 158 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 159 */ "VRename" OpHelp(""), - /* 160 */ "Pagecount" OpHelp(""), - /* 161 */ "MaxPgcnt" OpHelp(""), - /* 162 */ "PureFunc0" OpHelp(""), - /* 163 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"), - /* 164 */ "PureFunc" OpHelp(""), - /* 165 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"), - /* 166 */ "Trace" OpHelp(""), - /* 167 */ "CursorHint" OpHelp(""), - /* 168 */ "Noop" OpHelp(""), - /* 169 */ "Explain" OpHelp(""), + /* 133 */ "IdxRowid" OpHelp("r[P2]=rowid"), + /* 134 */ "Destroy" OpHelp(""), + /* 135 */ "Clear" OpHelp(""), + /* 136 */ "ResetSorter" OpHelp(""), + /* 137 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"), + /* 138 */ "SqlExec" OpHelp(""), + /* 139 */ "ParseSchema" OpHelp(""), + /* 140 */ "LoadAnalysis" OpHelp(""), + /* 141 */ "DropTable" OpHelp(""), + /* 142 */ "DropIndex" OpHelp(""), + /* 143 */ "DropTrigger" OpHelp(""), + /* 144 */ "IntegrityCk" OpHelp(""), + /* 145 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), + /* 146 */ "Param" OpHelp(""), + /* 147 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), + /* 148 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), + /* 149 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), + /* 150 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 151 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 152 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), + /* 153 */ "Expire" OpHelp(""), + /* 154 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), + /* 155 */ "VBegin" OpHelp(""), + /* 156 */ "VCreate" OpHelp(""), + /* 157 */ "VDestroy" OpHelp(""), + /* 158 */ "VOpen" OpHelp(""), + /* 159 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 160 */ "VRename" OpHelp(""), + /* 161 */ "Pagecount" OpHelp(""), + /* 162 */ "MaxPgcnt" OpHelp(""), + /* 163 */ "PureFunc0" OpHelp(""), + /* 164 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"), + /* 165 */ "PureFunc" OpHelp(""), + /* 166 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"), + /* 167 */ "Trace" OpHelp(""), + /* 168 */ "CursorHint" OpHelp(""), + /* 169 */ "Noop" OpHelp(""), + /* 170 */ "Explain" OpHelp(""), }; return azName[i]; } @@ -30775,6 +31247,9 @@ struct unixFile { #if SQLITE_ENABLE_LOCKING_STYLE || defined(__APPLE__) unsigned fsFlags; /* cached details from statfs() */ #endif +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + unsigned iBusyTimeout; /* Wait this many millisec on locks */ +#endif #if OS_VXWORKS struct vxworksFileId *pId; /* Unique file ID */ #endif @@ -31212,7 +31687,11 @@ static struct unix_syscall { #endif #define osFchown ((int(*)(int,uid_t,gid_t))aSyscall[20].pCurrent) +#if defined(HAVE_FCHOWN) { "geteuid", (sqlite3_syscall_ptr)geteuid, 0 }, +#else + { "geteuid", (sqlite3_syscall_ptr)0, 0 }, +#endif #define osGeteuid ((uid_t(*)(void))aSyscall[21].pCurrent) #if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 @@ -31440,15 +31919,16 @@ static int robust_open(const char *z, int f, mode_t m){ ** assert( unixMutexHeld() ); ** unixEnterLeave() */ +static sqlite3_mutex *unixBigLock = 0; static void unixEnterMutex(void){ - sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); + sqlite3_mutex_enter(unixBigLock); } static void unixLeaveMutex(void){ - sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); + sqlite3_mutex_leave(unixBigLock); } #ifdef SQLITE_DEBUG static int unixMutexHeld(void) { - return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); + return sqlite3_mutex_held(unixBigLock); } #endif @@ -32204,6 +32684,43 @@ static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){ return rc; } +/* +** Set a posix-advisory-lock. +** +** There are two versions of this routine. If compiled with +** SQLITE_ENABLE_SETLK_TIMEOUT then the routine has an extra parameter +** which is a pointer to a unixFile. If the unixFile->iBusyTimeout +** value is set, then it is the number of milliseconds to wait before +** failing the lock. The iBusyTimeout value is always reset back to +** zero on each call. +** +** If SQLITE_ENABLE_SETLK_TIMEOUT is not defined, then do a non-blocking +** attempt to set the lock. +*/ +#ifndef SQLITE_ENABLE_SETLK_TIMEOUT +# define osSetPosixAdvisoryLock(h,x,t) osFcntl(h,F_SETLK,x) +#else +static int osSetPosixAdvisoryLock( + int h, /* The file descriptor on which to take the lock */ + struct flock *pLock, /* The description of the lock */ + unixFile *pFile /* Structure holding timeout value */ +){ + int rc = osFcntl(h,F_SETLK,pLock); + while( rc<0 && pFile->iBusyTimeout>0 ){ + /* On systems that support some kind of blocking file lock with a timeout, + ** make appropriate changes here to invoke that blocking file lock. On + ** generic posix, however, there is no such API. So we simply try the + ** lock once every millisecond until either the timeout expires, or until + ** the lock is obtained. */ + usleep(1000); + rc = osFcntl(h,F_SETLK,pLock); + pFile->iBusyTimeout--; + } + return rc; +} +#endif /* SQLITE_ENABLE_SETLK_TIMEOUT */ + + /* ** Attempt to set a system-lock on the file pFile. The lock is ** described by pLock. @@ -32236,7 +32753,7 @@ static int unixFileLock(unixFile *pFile, struct flock *pLock){ lock.l_start = SHARED_FIRST; lock.l_len = SHARED_SIZE; lock.l_type = F_WRLCK; - rc = osFcntl(pFile->h, F_SETLK, &lock); + rc = osSetPosixAdvisoryLock(pFile->h, &lock, pFile); if( rc<0 ) return rc; pInode->bProcessLock = 1; pInode->nLock++; @@ -32244,7 +32761,7 @@ static int unixFileLock(unixFile *pFile, struct flock *pLock){ rc = 0; } }else{ - rc = osFcntl(pFile->h, F_SETLK, pLock); + rc = osSetPosixAdvisoryLock(pFile->h, pLock, pFile); } return rc; } @@ -34604,6 +35121,12 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ *(int*)pArg = fileHasMoved(pFile); return SQLITE_OK; } +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + case SQLITE_FCNTL_LOCK_TIMEOUT: { + pFile->iBusyTimeout = *(int*)pArg; + return SQLITE_OK; + } +#endif #if SQLITE_MAX_MMAP_SIZE>0 case SQLITE_FCNTL_MMAP_SIZE: { i64 newLimit = *(i64*)pArg; @@ -34919,13 +35442,11 @@ static int unixShmSystemLock( if( pShmNode->h>=0 ){ /* Initialize the locking parameters */ - memset(&f, 0, sizeof(f)); f.l_type = lockType; f.l_whence = SEEK_SET; f.l_start = ofst; f.l_len = n; - - rc = osFcntl(pShmNode->h, F_SETLK, &f); + rc = osSetPosixAdvisoryLock(pShmNode->h, &f, pFile); rc = (rc!=(-1)) ? SQLITE_OK : SQLITE_BUSY; } @@ -36590,7 +37111,6 @@ static int unixOpen( randomnessPid = osGetpid(0); sqlite3_randomness(0,0); } - memset(p, 0, sizeof(unixFile)); if( eType==SQLITE_OPEN_MAIN_DB ){ @@ -38465,6 +38985,7 @@ SQLITE_API int sqlite3_os_init(void){ for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){ sqlite3_vfs_register(&aVfs[i], i==0); } + unixBigLock = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); return SQLITE_OK; } @@ -38476,6 +38997,7 @@ SQLITE_API int sqlite3_os_init(void){ ** This routine is a no-op for unix. */ SQLITE_API int sqlite3_os_end(void){ + unixBigLock = 0; return SQLITE_OK; } @@ -42314,15 +42836,16 @@ static SYSTEM_INFO winSysInfo; ** assert( winShmMutexHeld() ); ** winShmLeaveMutex() */ +static sqlite3_mutex *winBigLock = 0; static void winShmEnterMutex(void){ - sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); + sqlite3_mutex_enter(winBigLock); } static void winShmLeaveMutex(void){ - sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); + sqlite3_mutex_leave(winBigLock); } #ifndef NDEBUG static int winShmMutexHeld(void) { - return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); + return sqlite3_mutex_held(winBigLock); } #endif @@ -44745,6 +45268,10 @@ SQLITE_API int sqlite3_os_init(void){ sqlite3_vfs_register(&winLongPathNolockVfs, 0); #endif +#ifndef SQLITE_OMIT_WAL + winBigLock = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); +#endif + return SQLITE_OK; } @@ -44755,12 +45282,609 @@ SQLITE_API int sqlite3_os_end(void){ sleepObj = NULL; } #endif + +#ifndef SQLITE_OMIT_WAL + winBigLock = 0; +#endif + return SQLITE_OK; } #endif /* SQLITE_OS_WIN */ /************** End of os_win.c **********************************************/ +/************** Begin file memdb.c *******************************************/ +/* +** 2016-09-07 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file implements an in-memory VFS. A database is held as a contiguous +** block of memory. +** +** This file also implements interface sqlite3_serialize() and +** sqlite3_deserialize(). +*/ +#ifdef SQLITE_ENABLE_DESERIALIZE +/* #include "sqliteInt.h" */ + +/* +** Forward declaration of objects used by this utility +*/ +typedef struct sqlite3_vfs MemVfs; +typedef struct MemFile MemFile; + +/* Access to a lower-level VFS that (might) implement dynamic loading, +** access to randomness, etc. +*/ +#define ORIGVFS(p) ((sqlite3_vfs*)((p)->pAppData)) + +/* An open file */ +struct MemFile { + sqlite3_file base; /* IO methods */ + sqlite3_int64 sz; /* Size of the file */ + sqlite3_int64 szMax; /* Space allocated to aData */ + unsigned char *aData; /* content of the file */ + int nMmap; /* Number of memory mapped pages */ + unsigned mFlags; /* Flags */ + int eLock; /* Most recent lock against this file */ +}; + +/* +** Methods for MemFile +*/ +static int memdbClose(sqlite3_file*); +static int memdbRead(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int memdbWrite(sqlite3_file*,const void*,int iAmt, sqlite3_int64 iOfst); +static int memdbTruncate(sqlite3_file*, sqlite3_int64 size); +static int memdbSync(sqlite3_file*, int flags); +static int memdbFileSize(sqlite3_file*, sqlite3_int64 *pSize); +static int memdbLock(sqlite3_file*, int); +/* static int memdbCheckReservedLock(sqlite3_file*, int *pResOut);// not used */ +static int memdbFileControl(sqlite3_file*, int op, void *pArg); +/* static int memdbSectorSize(sqlite3_file*); // not used */ +static int memdbDeviceCharacteristics(sqlite3_file*); +static int memdbFetch(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp); +static int memdbUnfetch(sqlite3_file*, sqlite3_int64 iOfst, void *p); + +/* +** Methods for MemVfs +*/ +static int memdbOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); +/* static int memdbDelete(sqlite3_vfs*, const char *zName, int syncDir); */ +static int memdbAccess(sqlite3_vfs*, const char *zName, int flags, int *); +static int memdbFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut); +static void *memdbDlOpen(sqlite3_vfs*, const char *zFilename); +static void memdbDlError(sqlite3_vfs*, int nByte, char *zErrMsg); +static void (*memdbDlSym(sqlite3_vfs *pVfs, void *p, const char*zSym))(void); +static void memdbDlClose(sqlite3_vfs*, void*); +static int memdbRandomness(sqlite3_vfs*, int nByte, char *zOut); +static int memdbSleep(sqlite3_vfs*, int microseconds); +/* static int memdbCurrentTime(sqlite3_vfs*, double*); */ +static int memdbGetLastError(sqlite3_vfs*, int, char *); +static int memdbCurrentTimeInt64(sqlite3_vfs*, sqlite3_int64*); + +static sqlite3_vfs memdb_vfs = { + 2, /* iVersion */ + 0, /* szOsFile (set when registered) */ + 1024, /* mxPathname */ + 0, /* pNext */ + "memdb", /* zName */ + 0, /* pAppData (set when registered) */ + memdbOpen, /* xOpen */ + 0, /* memdbDelete, */ /* xDelete */ + memdbAccess, /* xAccess */ + memdbFullPathname, /* xFullPathname */ + memdbDlOpen, /* xDlOpen */ + memdbDlError, /* xDlError */ + memdbDlSym, /* xDlSym */ + memdbDlClose, /* xDlClose */ + memdbRandomness, /* xRandomness */ + memdbSleep, /* xSleep */ + 0, /* memdbCurrentTime, */ /* xCurrentTime */ + memdbGetLastError, /* xGetLastError */ + memdbCurrentTimeInt64 /* xCurrentTimeInt64 */ +}; + +static const sqlite3_io_methods memdb_io_methods = { + 3, /* iVersion */ + memdbClose, /* xClose */ + memdbRead, /* xRead */ + memdbWrite, /* xWrite */ + memdbTruncate, /* xTruncate */ + memdbSync, /* xSync */ + memdbFileSize, /* xFileSize */ + memdbLock, /* xLock */ + memdbLock, /* xUnlock - same as xLock in this case */ + 0, /* memdbCheckReservedLock, */ /* xCheckReservedLock */ + memdbFileControl, /* xFileControl */ + 0, /* memdbSectorSize,*/ /* xSectorSize */ + memdbDeviceCharacteristics, /* xDeviceCharacteristics */ + 0, /* xShmMap */ + 0, /* xShmLock */ + 0, /* xShmBarrier */ + 0, /* xShmUnmap */ + memdbFetch, /* xFetch */ + memdbUnfetch /* xUnfetch */ +}; + + + +/* +** Close an memdb-file. +** +** The pData pointer is owned by the application, so there is nothing +** to free. +*/ +static int memdbClose(sqlite3_file *pFile){ + MemFile *p = (MemFile *)pFile; + if( p->mFlags & SQLITE_DESERIALIZE_FREEONCLOSE ) sqlite3_free(p->aData); + return SQLITE_OK; +} + +/* +** Read data from an memdb-file. +*/ +static int memdbRead( + sqlite3_file *pFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + MemFile *p = (MemFile *)pFile; + if( iOfst+iAmt>p->sz ){ + memset(zBuf, 0, iAmt); + if( iOfstsz ) memcpy(zBuf, p->aData+iOfst, p->sz - iOfst); + return SQLITE_IOERR_SHORT_READ; + } + memcpy(zBuf, p->aData+iOfst, iAmt); + return SQLITE_OK; +} + +/* +** Try to enlarge the memory allocation to hold at least sz bytes +*/ +static int memdbEnlarge(MemFile *p, sqlite3_int64 newSz){ + unsigned char *pNew; + if( (p->mFlags & SQLITE_DESERIALIZE_RESIZEABLE)==0 || p->nMmap>0 ){ + return SQLITE_FULL; + } + pNew = sqlite3_realloc64(p->aData, newSz); + if( pNew==0 ) return SQLITE_NOMEM; + p->aData = pNew; + p->szMax = newSz; + return SQLITE_OK; +} + +/* +** Write data to an memdb-file. +*/ +static int memdbWrite( + sqlite3_file *pFile, + const void *z, + int iAmt, + sqlite_int64 iOfst +){ + MemFile *p = (MemFile *)pFile; + if( iOfst+iAmt>p->sz ){ + int rc; + if( iOfst+iAmt>p->szMax + && (rc = memdbEnlarge(p, (iOfst+iAmt)*2))!=SQLITE_OK + ){ + return rc; + } + if( iOfst>p->sz ) memset(p->aData+p->sz, 0, iOfst-p->sz); + p->sz = iOfst+iAmt; + } + memcpy(p->aData+iOfst, z, iAmt); + return SQLITE_OK; +} + +/* +** Truncate an memdb-file. +** +** In rollback mode (which is always the case for memdb, as it does not +** support WAL mode) the truncate() method is only used to reduce +** the size of a file, never to increase the size. +*/ +static int memdbTruncate(sqlite3_file *pFile, sqlite_int64 size){ + MemFile *p = (MemFile *)pFile; + if( NEVER(size>p->sz) ) return SQLITE_FULL; + p->sz = size; + return SQLITE_OK; +} + +/* +** Sync an memdb-file. +*/ +static int memdbSync(sqlite3_file *pFile, int flags){ + return SQLITE_OK; +} + +/* +** Return the current file-size of an memdb-file. +*/ +static int memdbFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ + MemFile *p = (MemFile *)pFile; + *pSize = p->sz; + return SQLITE_OK; +} + +/* +** Lock an memdb-file. +*/ +static int memdbLock(sqlite3_file *pFile, int eLock){ + MemFile *p = (MemFile *)pFile; + p->eLock = eLock; + return SQLITE_OK; +} + +#if 0 /* Never used because memdbAccess() always returns false */ +/* +** Check if another file-handle holds a RESERVED lock on an memdb-file. +*/ +static int memdbCheckReservedLock(sqlite3_file *pFile, int *pResOut){ + *pResOut = 0; + return SQLITE_OK; +} +#endif + +/* +** File control method. For custom operations on an memdb-file. +*/ +static int memdbFileControl(sqlite3_file *pFile, int op, void *pArg){ + MemFile *p = (MemFile *)pFile; + int rc = SQLITE_NOTFOUND; + if( op==SQLITE_FCNTL_VFSNAME ){ + *(char**)pArg = sqlite3_mprintf("memdb(%p,%lld)", p->aData, p->sz); + rc = SQLITE_OK; + } + return rc; +} + +#if 0 /* Not used because of SQLITE_IOCAP_POWERSAFE_OVERWRITE */ +/* +** Return the sector-size in bytes for an memdb-file. +*/ +static int memdbSectorSize(sqlite3_file *pFile){ + return 1024; +} +#endif + +/* +** Return the device characteristic flags supported by an memdb-file. +*/ +static int memdbDeviceCharacteristics(sqlite3_file *pFile){ + return SQLITE_IOCAP_ATOMIC | + SQLITE_IOCAP_POWERSAFE_OVERWRITE | + SQLITE_IOCAP_SAFE_APPEND | + SQLITE_IOCAP_SEQUENTIAL; +} + +/* Fetch a page of a memory-mapped file */ +static int memdbFetch( + sqlite3_file *pFile, + sqlite3_int64 iOfst, + int iAmt, + void **pp +){ + MemFile *p = (MemFile *)pFile; + p->nMmap++; + *pp = (void*)(p->aData + iOfst); + return SQLITE_OK; +} + +/* Release a memory-mapped page */ +static int memdbUnfetch(sqlite3_file *pFile, sqlite3_int64 iOfst, void *pPage){ + MemFile *p = (MemFile *)pFile; + p->nMmap--; + return SQLITE_OK; +} + +/* +** Open an mem file handle. +*/ +static int memdbOpen( + sqlite3_vfs *pVfs, + const char *zName, + sqlite3_file *pFile, + int flags, + int *pOutFlags +){ + MemFile *p = (MemFile*)pFile; + if( (flags & SQLITE_OPEN_MAIN_DB)==0 ){ + return ORIGVFS(pVfs)->xOpen(ORIGVFS(pVfs), zName, pFile, flags, pOutFlags); + } + memset(p, 0, sizeof(*p)); + p->mFlags = SQLITE_DESERIALIZE_RESIZEABLE | SQLITE_DESERIALIZE_FREEONCLOSE; + assert( pOutFlags!=0 ); /* True because flags==SQLITE_OPEN_MAIN_DB */ + *pOutFlags = flags | SQLITE_OPEN_MEMORY; + p->base.pMethods = &memdb_io_methods; + return SQLITE_OK; +} + +#if 0 /* Only used to delete rollback journals, master journals, and WAL + ** files, none of which exist in memdb. So this routine is never used */ +/* +** Delete the file located at zPath. If the dirSync argument is true, +** ensure the file-system modifications are synced to disk before +** returning. +*/ +static int memdbDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + return SQLITE_IOERR_DELETE; +} +#endif + +/* +** Test for access permissions. Return true if the requested permission +** is available, or false otherwise. +** +** With memdb, no files ever exist on disk. So always return false. +*/ +static int memdbAccess( + sqlite3_vfs *pVfs, + const char *zPath, + int flags, + int *pResOut +){ + *pResOut = 0; + return SQLITE_OK; +} + +/* +** Populate buffer zOut with the full canonical pathname corresponding +** to the pathname in zPath. zOut is guaranteed to point to a buffer +** of at least (INST_MAX_PATHNAME+1) bytes. +*/ +static int memdbFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nOut, + char *zOut +){ + sqlite3_snprintf(nOut, zOut, "%s", zPath); + return SQLITE_OK; +} + +/* +** Open the dynamic library located at zPath and return a handle. +*/ +static void *memdbDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + return ORIGVFS(pVfs)->xDlOpen(ORIGVFS(pVfs), zPath); +} + +/* +** Populate the buffer zErrMsg (size nByte bytes) with a human readable +** utf-8 string describing the most recent error encountered associated +** with dynamic libraries. +*/ +static void memdbDlError(sqlite3_vfs *pVfs, int nByte, char *zErrMsg){ + ORIGVFS(pVfs)->xDlError(ORIGVFS(pVfs), nByte, zErrMsg); +} + +/* +** Return a pointer to the symbol zSymbol in the dynamic library pHandle. +*/ +static void (*memdbDlSym(sqlite3_vfs *pVfs, void *p, const char *zSym))(void){ + return ORIGVFS(pVfs)->xDlSym(ORIGVFS(pVfs), p, zSym); +} + +/* +** Close the dynamic library handle pHandle. +*/ +static void memdbDlClose(sqlite3_vfs *pVfs, void *pHandle){ + ORIGVFS(pVfs)->xDlClose(ORIGVFS(pVfs), pHandle); +} + +/* +** Populate the buffer pointed to by zBufOut with nByte bytes of +** random data. +*/ +static int memdbRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + return ORIGVFS(pVfs)->xRandomness(ORIGVFS(pVfs), nByte, zBufOut); +} + +/* +** Sleep for nMicro microseconds. Return the number of microseconds +** actually slept. +*/ +static int memdbSleep(sqlite3_vfs *pVfs, int nMicro){ + return ORIGVFS(pVfs)->xSleep(ORIGVFS(pVfs), nMicro); +} + +#if 0 /* Never used. Modern cores only call xCurrentTimeInt64() */ +/* +** Return the current time as a Julian Day number in *pTimeOut. +*/ +static int memdbCurrentTime(sqlite3_vfs *pVfs, double *pTimeOut){ + return ORIGVFS(pVfs)->xCurrentTime(ORIGVFS(pVfs), pTimeOut); +} +#endif + +static int memdbGetLastError(sqlite3_vfs *pVfs, int a, char *b){ + return ORIGVFS(pVfs)->xGetLastError(ORIGVFS(pVfs), a, b); +} +static int memdbCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *p){ + return ORIGVFS(pVfs)->xCurrentTimeInt64(ORIGVFS(pVfs), p); +} + +/* +** Translate a database connection pointer and schema name into a +** MemFile pointer. +*/ +static MemFile *memdbFromDbSchema(sqlite3 *db, const char *zSchema){ + MemFile *p = 0; + int rc = sqlite3_file_control(db, zSchema, SQLITE_FCNTL_FILE_POINTER, &p); + if( rc ) return 0; + if( p->base.pMethods!=&memdb_io_methods ) return 0; + return p; +} + +/* +** Return the serialization of a database +*/ +SQLITE_API unsigned char *sqlite3_serialize( + sqlite3 *db, /* The database connection */ + const char *zSchema, /* Which database within the connection */ + sqlite3_int64 *piSize, /* Write size here, if not NULL */ + unsigned int mFlags /* Maybe SQLITE_SERIALIZE_NOCOPY */ +){ + MemFile *p; + int iDb; + Btree *pBt; + sqlite3_int64 sz; + int szPage = 0; + sqlite3_stmt *pStmt = 0; + unsigned char *pOut; + char *zSql; + int rc; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + + if( zSchema==0 ) zSchema = db->aDb[0].zDbSName; + p = memdbFromDbSchema(db, zSchema); + iDb = sqlite3FindDbName(db, zSchema); + if( piSize ) *piSize = -1; + if( iDb<0 ) return 0; + if( p ){ + if( piSize ) *piSize = p->sz; + if( mFlags & SQLITE_SERIALIZE_NOCOPY ){ + pOut = p->aData; + }else{ + pOut = sqlite3_malloc64( p->sz ); + if( pOut ) memcpy(pOut, p->aData, p->sz); + } + return pOut; + } + pBt = db->aDb[iDb].pBt; + if( pBt==0 ) return 0; + szPage = sqlite3BtreeGetPageSize(pBt); + zSql = sqlite3_mprintf("PRAGMA \"%w\".page_count", zSchema); + rc = zSql ? sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0) : SQLITE_NOMEM; + sqlite3_free(zSql); + if( rc ) return 0; + rc = sqlite3_step(pStmt); + if( rc!=SQLITE_ROW ){ + pOut = 0; + }else{ + sz = sqlite3_column_int64(pStmt, 0)*szPage; + if( piSize ) *piSize = sz; + if( mFlags & SQLITE_SERIALIZE_NOCOPY ){ + pOut = 0; + }else{ + pOut = sqlite3_malloc64( sz ); + if( pOut ){ + int nPage = sqlite3_column_int(pStmt, 0); + Pager *pPager = sqlite3BtreePager(pBt); + int pgno; + for(pgno=1; pgno<=nPage; pgno++){ + DbPage *pPage = 0; + unsigned char *pTo = pOut + szPage*(sqlite3_int64)(pgno-1); + rc = sqlite3PagerGet(pPager, pgno, (DbPage**)&pPage, 0); + if( rc==SQLITE_OK ){ + memcpy(pTo, sqlite3PagerGetData(pPage), szPage); + }else{ + memset(pTo, 0, szPage); + } + sqlite3PagerUnref(pPage); + } + } + } + } + sqlite3_finalize(pStmt); + return pOut; +} + +/* Convert zSchema to a MemDB and initialize its content. +*/ +SQLITE_API int sqlite3_deserialize( + sqlite3 *db, /* The database connection */ + const char *zSchema, /* Which DB to reopen with the deserialization */ + unsigned char *pData, /* The serialized database content */ + sqlite3_int64 szDb, /* Number bytes in the deserialization */ + sqlite3_int64 szBuf, /* Total size of buffer pData[] */ + unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */ +){ + MemFile *p; + char *zSql; + sqlite3_stmt *pStmt = 0; + int rc; + int iDb; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE_BKPT; + } + if( szDb<0 ) return SQLITE_MISUSE_BKPT; + if( szBuf<0 ) return SQLITE_MISUSE_BKPT; +#endif + + sqlite3_mutex_enter(db->mutex); + if( zSchema==0 ) zSchema = db->aDb[0].zDbSName; + iDb = sqlite3FindDbName(db, zSchema); + if( iDb<0 ){ + rc = SQLITE_ERROR; + goto end_deserialize; + } + zSql = sqlite3_mprintf("ATTACH x AS %Q", zSchema); + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + sqlite3_free(zSql); + if( rc ) goto end_deserialize; + db->init.iDb = (u8)iDb; + db->init.reopenMemdb = 1; + rc = sqlite3_step(pStmt); + db->init.reopenMemdb = 0; + if( rc!=SQLITE_DONE ){ + rc = SQLITE_ERROR; + goto end_deserialize; + } + p = memdbFromDbSchema(db, zSchema); + if( p==0 ){ + rc = SQLITE_ERROR; + }else{ + p->aData = pData; + p->sz = szDb; + p->szMax = szBuf; + p->mFlags = mFlags; + rc = SQLITE_OK; + } + +end_deserialize: + sqlite3_finalize(pStmt); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** This routine is called when the extension is loaded. +** Register the new VFS. +*/ +SQLITE_PRIVATE int sqlite3MemdbInit(void){ + sqlite3_vfs *pLower = sqlite3_vfs_find(0); + int sz = pLower->szOsFile; + memdb_vfs.pAppData = pLower; + /* In all known configurations of SQLite, the size of a default + ** sqlite3_file is greater than the size of a memdb sqlite3_file. + ** Should that ever change, remove the following NEVER() */ + if( NEVER(szpgno, pgno, - sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), + sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache), numberOfCachePages(pCache)); #endif pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno)); @@ -48665,7 +49789,7 @@ struct Pager { char *zJournal; /* Name of the journal file */ int (*xBusyHandler)(void*); /* Function to call when busy */ void *pBusyHandlerArg; /* Context argument for xBusyHandler */ - int aStat[3]; /* Total cache hits, misses and writes */ + int aStat[4]; /* Total cache hits, misses, writes, spills */ #ifdef SQLITE_TEST int nRead; /* Database pages read */ #endif @@ -48693,6 +49817,7 @@ struct Pager { #define PAGER_STAT_HIT 0 #define PAGER_STAT_MISS 1 #define PAGER_STAT_WRITE 2 +#define PAGER_STAT_SPILL 3 /* ** The following global variables hold counters used for @@ -49179,7 +50304,7 @@ static int jrnlBufferSize(Pager *pPager){ #endif #ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE - if( dc&SQLITE_IOCAP_BATCH_ATOMIC ){ + if( pPager->dbSize>0 && (dc&SQLITE_IOCAP_BATCH_ATOMIC) ){ return -1; } #endif @@ -50095,7 +51220,7 @@ static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){ rc = pager_truncate(pPager, pPager->dbSize); } - if( rc==SQLITE_OK && bCommit && isOpen(pPager->fd) ){ + if( rc==SQLITE_OK && bCommit ){ rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_COMMIT_PHASETWO, 0); if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; } @@ -50914,9 +52039,7 @@ end_playback: ** assertion that the transaction counter was modified. */ #ifdef SQLITE_DEBUG - if( pPager->fd->pMethods ){ - sqlite3OsFileControlHint(pPager->fd,SQLITE_FCNTL_DB_UNCHANGED,0); - } + sqlite3OsFileControlHint(pPager->fd,SQLITE_FCNTL_DB_UNCHANGED,0); #endif /* If this playback is happening automatically as a result of an IO or @@ -51669,20 +52792,18 @@ static int pagerOpentemp( ** retried. If it returns zero, then the SQLITE_BUSY error is ** returned to the caller of the pager API function. */ -SQLITE_PRIVATE void sqlite3PagerSetBusyhandler( +SQLITE_PRIVATE void sqlite3PagerSetBusyHandler( Pager *pPager, /* Pager object */ int (*xBusyHandler)(void *), /* Pointer to busy-handler function */ void *pBusyHandlerArg /* Argument to pass to xBusyHandler */ ){ + void **ap; pPager->xBusyHandler = xBusyHandler; pPager->pBusyHandlerArg = pBusyHandlerArg; - - if( isOpen(pPager->fd) ){ - void **ap = (void **)&pPager->xBusyHandler; - assert( ((int(*)(void *))(ap[0]))==xBusyHandler ); - assert( ap[1]==pBusyHandlerArg ); - sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_BUSYHANDLER, (void *)ap); - } + ap = (void **)&pPager->xBusyHandler; + assert( ((int(*)(void *))(ap[0]))==xBusyHandler ); + assert( ap[1]==pBusyHandlerArg ); + sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_BUSYHANDLER, (void *)ap); } /* @@ -52068,6 +53189,30 @@ static void pagerFreeMapHdrs(Pager *pPager){ } } +/* Verify that the database file has not be deleted or renamed out from +** under the pager. Return SQLITE_OK if the database is still where it ought +** to be on disk. Return non-zero (SQLITE_READONLY_DBMOVED or some other error +** code from sqlite3OsAccess()) if the database has gone missing. +*/ +static int databaseIsUnmoved(Pager *pPager){ + int bHasMoved = 0; + int rc; + + if( pPager->tempFile ) return SQLITE_OK; + if( pPager->dbSize==0 ) return SQLITE_OK; + assert( pPager->zFilename && pPager->zFilename[0] ); + rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_HAS_MOVED, &bHasMoved); + if( rc==SQLITE_NOTFOUND ){ + /* If the HAS_MOVED file-control is unimplemented, assume that the file + ** has not been moved. That is the historical behavior of SQLite: prior to + ** version 3.8.3, it never checked */ + rc = SQLITE_OK; + }else if( rc==SQLITE_OK && bHasMoved ){ + rc = SQLITE_READONLY_DBMOVED; + } + return rc; +} + /* ** Shutdown the page cache. Free all memory and close all files. @@ -52084,8 +53229,7 @@ static void pagerFreeMapHdrs(Pager *pPager){ ** to the caller. */ SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3 *db){ - u8 *pTmp = (u8 *)pPager->pTmpSpace; - + u8 *pTmp = (u8*)pPager->pTmpSpace; assert( db || pagerUseWal(pPager)==0 ); assert( assert_pager_state(pPager) ); disable_simulated_io_errors(); @@ -52094,11 +53238,17 @@ SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3 *db){ /* pPager->errCode = 0; */ pPager->exclusiveMode = 0; #ifndef SQLITE_OMIT_WAL - assert( db || pPager->pWal==0 ); - sqlite3WalClose(pPager->pWal, db, pPager->walSyncFlags, pPager->pageSize, - (db && (db->flags & SQLITE_NoCkptOnClose) ? 0 : pTmp) - ); - pPager->pWal = 0; + { + u8 *a = 0; + assert( db || pPager->pWal==0 ); + if( db && 0==(db->flags & SQLITE_NoCkptOnClose) + && SQLITE_OK==databaseIsUnmoved(pPager) + ){ + a = pTmp; + } + sqlite3WalClose(pPager->pWal, db, pPager->walSyncFlags, pPager->pageSize,a); + pPager->pWal = 0; + } #endif pager_reset(pPager); if( MEMDB ){ @@ -52555,6 +53705,7 @@ static int pagerStress(void *p, PgHdr *pPg){ return SQLITE_OK; } + pPager->aStat[PAGER_STAT_SPILL]++; pPg->pDirty = 0; if( pagerUseWal(pPager) ){ /* Write a single frame for this page to the log. */ @@ -52660,6 +53811,11 @@ SQLITE_PRIVATE int sqlite3PagerOpen( int rc = SQLITE_OK; /* Return code */ int tempFile = 0; /* True for temp files (incl. in-memory files) */ int memDb = 0; /* True if this is an in-memory file */ +#ifdef SQLITE_ENABLE_DESERIALIZE + int memJM = 0; /* Memory journal mode */ +#else +# define memJM 0 +#endif int readOnly = 0; /* True if this is a read-only file */ int journalFileSize; /* Bytes to allocate for each journal fd */ char *zPathname = 0; /* Full path to database file */ @@ -52787,7 +53943,10 @@ SQLITE_PRIVATE int sqlite3PagerOpen( int fout = 0; /* VFS flags returned by xOpen() */ rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout); assert( !memDb ); - readOnly = (fout&SQLITE_OPEN_READONLY); +#ifdef SQLITE_ENABLE_DESERIALIZE + memJM = (fout&SQLITE_OPEN_MEMORY)!=0; +#endif + readOnly = (fout&SQLITE_OPEN_READONLY)!=0; /* If the file was successfully opened for read/write access, ** choose a default page size in case we have to create the @@ -52918,7 +54077,7 @@ act_like_temp_file: setSectorSize(pPager); if( !useJournal ){ pPager->journalMode = PAGER_JOURNALMODE_OFF; - }else if( memDb ){ + }else if( memDb || memJM ){ pPager->journalMode = PAGER_JOURNALMODE_MEMORY; } /* pPager->xBusyHandler = 0; */ @@ -52933,30 +54092,6 @@ act_like_temp_file: } -/* Verify that the database file has not be deleted or renamed out from -** under the pager. Return SQLITE_OK if the database is still were it ought -** to be on disk. Return non-zero (SQLITE_READONLY_DBMOVED or some other error -** code from sqlite3OsAccess()) if the database has gone missing. -*/ -static int databaseIsUnmoved(Pager *pPager){ - int bHasMoved = 0; - int rc; - - if( pPager->tempFile ) return SQLITE_OK; - if( pPager->dbSize==0 ) return SQLITE_OK; - assert( pPager->zFilename && pPager->zFilename[0] ); - rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_HAS_MOVED, &bHasMoved); - if( rc==SQLITE_NOTFOUND ){ - /* If the HAS_MOVED file-control is unimplemented, assume that the file - ** has not been moved. That is the historical behavior of SQLite: prior to - ** version 3.8.3, it never checked */ - rc = SQLITE_OK; - }else if( rc==SQLITE_OK && bHasMoved ){ - rc = SQLITE_READONLY_DBMOVED; - } - return rc; -} - /* ** This function is called after transitioning from PAGER_UNLOCK to @@ -53644,6 +54779,7 @@ SQLITE_PRIVATE void sqlite3PagerUnrefPageOne(DbPage *pPg){ assert( pPg->pgno==1 ); assert( (pPg->flags & PGHDR_MMAP)==0 ); /* Page1 is never memory mapped */ pPager = pPg->pPager; + sqlite3PagerResetLockTimeout(pPager); sqlite3PcacheRelease(pPg); pagerUnlockIfUnused(pPager); } @@ -54239,12 +55375,9 @@ static int pager_incr_changecounter(Pager *pPager, int isDirectMode){ */ SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster){ int rc = SQLITE_OK; - - if( isOpen(pPager->fd) ){ - void *pArg = (void*)zMaster; - rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_SYNC, pArg); - if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; - } + void *pArg = (void*)zMaster; + rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_SYNC, pArg); + if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; if( rc==SQLITE_OK && !pPager->noSync ){ assert( !MEMDB ); rc = sqlite3OsSync(pPager->fd, pPager->syncFlags); @@ -54465,8 +55598,9 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne( if( bBatch ){ if( rc==SQLITE_OK ){ rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, 0); - }else{ - sqlite3OsFileControl(fd, SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE, 0); + } + if( rc!=SQLITE_OK ){ + sqlite3OsFileControlHint(fd, SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE, 0); } } @@ -54690,8 +55824,12 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ #endif /* -** Parameter eStat must be either SQLITE_DBSTATUS_CACHE_HIT or -** SQLITE_DBSTATUS_CACHE_MISS. Before returning, *pnVal is incremented by the +** Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE, +** or _WRITE+1. The SQLITE_DBSTATUS_CACHE_WRITE+1 case is a translation +** of SQLITE_DBSTATUS_CACHE_SPILL. The _SPILL case is not contiguous because +** it was added later. +** +** Before returning, *pnVal is incremented by the ** current cache hit or miss count, according to the value of eStat. If the ** reset parameter is non-zero, the cache hit or miss count is zeroed before ** returning. @@ -54701,15 +55839,18 @@ SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, i assert( eStat==SQLITE_DBSTATUS_CACHE_HIT || eStat==SQLITE_DBSTATUS_CACHE_MISS || eStat==SQLITE_DBSTATUS_CACHE_WRITE + || eStat==SQLITE_DBSTATUS_CACHE_WRITE+1 ); assert( SQLITE_DBSTATUS_CACHE_HIT+1==SQLITE_DBSTATUS_CACHE_MISS ); assert( SQLITE_DBSTATUS_CACHE_HIT+2==SQLITE_DBSTATUS_CACHE_WRITE ); - assert( PAGER_STAT_HIT==0 && PAGER_STAT_MISS==1 && PAGER_STAT_WRITE==2 ); + assert( PAGER_STAT_HIT==0 && PAGER_STAT_MISS==1 + && PAGER_STAT_WRITE==2 && PAGER_STAT_SPILL==3 ); - *pnVal += pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT]; + eStat -= SQLITE_DBSTATUS_CACHE_HIT; + *pnVal += pPager->aStat[eStat]; if( reset ){ - pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT] = 0; + pPager->aStat[eStat] = 0; } } @@ -54913,6 +56054,16 @@ SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager *pPager){ return pPager->fd; } +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT +/* +** Reset the lock timeout for pager. +*/ +SQLITE_PRIVATE void sqlite3PagerResetLockTimeout(Pager *pPager){ + int x = 0; + sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_LOCK_TIMEOUT, &x); +} +#endif + /* ** Return the file handle for the journal file (if it exists). ** This will be either the rollback journal or the WAL file. @@ -55373,6 +56524,7 @@ SQLITE_PRIVATE int sqlite3PagerCheckpoint( pPager->walSyncFlags, pPager->pageSize, (u8 *)pPager->pTmpSpace, pnLog, pnCkpt ); + sqlite3PagerResetLockTimeout(pPager); } return rc; } @@ -56155,7 +57307,11 @@ struct WalIterator { ** page and SQLITE_OK is returned. If an error (an OOM or VFS error) occurs, ** then an SQLite error code is returned and *ppPage is set to 0. */ -static int walIndexPage(Wal *pWal, int iPage, volatile u32 **ppPage){ +static SQLITE_NOINLINE int walIndexPageRealloc( + Wal *pWal, /* The WAL context */ + int iPage, /* The page we seek */ + volatile u32 **ppPage /* Write the page pointer here */ +){ int rc = SQLITE_OK; /* Enlarge the pWal->apWiData[] array if required */ @@ -56174,21 +57330,20 @@ static int walIndexPage(Wal *pWal, int iPage, volatile u32 **ppPage){ } /* Request a pointer to the required page from the VFS */ - if( pWal->apWiData[iPage]==0 ){ - if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ - pWal->apWiData[iPage] = (u32 volatile *)sqlite3MallocZero(WALINDEX_PGSZ); - if( !pWal->apWiData[iPage] ) rc = SQLITE_NOMEM_BKPT; - }else{ - rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ, - pWal->writeLock, (void volatile **)&pWal->apWiData[iPage] - ); - assert( pWal->apWiData[iPage]!=0 || rc!=SQLITE_OK || pWal->writeLock==0 ); - testcase( pWal->apWiData[iPage]==0 && rc==SQLITE_OK ); - if( (rc&0xff)==SQLITE_READONLY ){ - pWal->readOnly |= WAL_SHM_RDONLY; - if( rc==SQLITE_READONLY ){ - rc = SQLITE_OK; - } + assert( pWal->apWiData[iPage]==0 ); + if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ + pWal->apWiData[iPage] = (u32 volatile *)sqlite3MallocZero(WALINDEX_PGSZ); + if( !pWal->apWiData[iPage] ) rc = SQLITE_NOMEM_BKPT; + }else{ + rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ, + pWal->writeLock, (void volatile **)&pWal->apWiData[iPage] + ); + assert( pWal->apWiData[iPage]!=0 || rc!=SQLITE_OK || pWal->writeLock==0 ); + testcase( pWal->apWiData[iPage]==0 && rc==SQLITE_OK ); + if( (rc&0xff)==SQLITE_READONLY ){ + pWal->readOnly |= WAL_SHM_RDONLY; + if( rc==SQLITE_READONLY ){ + rc = SQLITE_OK; } } } @@ -56197,6 +57352,16 @@ static int walIndexPage(Wal *pWal, int iPage, volatile u32 **ppPage){ assert( iPage==0 || *ppPage || rc!=SQLITE_OK ); return rc; } +static int walIndexPage( + Wal *pWal, /* The WAL context */ + int iPage, /* The page we seek */ + volatile u32 **ppPage /* Write the page pointer here */ +){ + if( pWal->nWiData<=iPage || (*ppPage = pWal->apWiData[iPage])==0 ){ + return walIndexPageRealloc(pWal, iPage, ppPage); + } + return SQLITE_OK; +} /* ** Return a pointer to the WalCkptInfo structure in the wal-index. @@ -57173,8 +58338,9 @@ static void walIteratorFree(WalIterator *p){ /* ** Construct a WalInterator object that can be used to loop over all -** pages in the WAL in ascending order. The caller must hold the checkpoint -** lock. +** pages in the WAL following frame nBackfill in ascending order. Frames +** nBackfill or earlier may be included - excluding them is an optimization +** only. The caller must hold the checkpoint lock. ** ** On success, make *pp point to the newly allocated WalInterator object ** return SQLITE_OK. Otherwise, return an error code. If this routine @@ -57183,7 +58349,7 @@ static void walIteratorFree(WalIterator *p){ ** The calling routine should invoke walIteratorFree() to destroy the ** WalIterator object when it has finished with it. */ -static int walIteratorInit(Wal *pWal, WalIterator **pp){ +static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ WalIterator *p; /* Return value */ int nSegment; /* Number of segments to merge */ u32 iLast; /* Last frame in log */ @@ -57220,7 +58386,7 @@ static int walIteratorInit(Wal *pWal, WalIterator **pp){ rc = SQLITE_NOMEM_BKPT; } - for(i=0; rc==SQLITE_OK && inBackfillhdr.mxFrame ){ - /* Allocate the iterator */ - rc = walIteratorInit(pWal, &pIter); - if( rc!=SQLITE_OK ){ - return rc; - } - assert( pIter ); - /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked ** in the SQLITE_CHECKPOINT_PASSIVE mode. */ assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 ); @@ -57419,7 +58579,13 @@ static int walCheckpoint( } } - if( pInfo->nBackfillnBackfillnBackfill, &pIter); + assert( rc==SQLITE_OK || pIter==0 ); + } + + if( pIter && (rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(0),1))==SQLITE_OK ){ i64 nSize; /* Current size of database file */ @@ -58469,7 +59635,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame( ** table after the current read-transaction had started. */ iMinHash = walFramePage(pWal->minFrame); - for(iHash=walFramePage(iLast); iHash>=iMinHash && iRead==0; iHash--){ + for(iHash=walFramePage(iLast); iHash>=iMinHash; iHash--){ volatile ht_slot *aHash; /* Pointer to hash table */ volatile u32 *aPgno; /* Pointer to array of page numbers */ u32 iZero; /* Frame number corresponding to aPgno[0] */ @@ -58492,6 +59658,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame( return SQLITE_CORRUPT_BKPT; } } + if( iRead ) break; } #ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT @@ -59906,20 +61073,20 @@ struct BtCursor { u8 curFlags; /* zero or more BTCF_* flags defined below */ u8 curPagerFlags; /* Flags to send to sqlite3PagerGet() */ u8 hints; /* As configured by CursorSetHints() */ - int nOvflAlloc; /* Allocated size of aOverflow[] array */ - Btree *pBtree; /* The Btree to which this cursor belongs */ - BtShared *pBt; /* The BtShared this cursor points to */ - BtCursor *pNext; /* Forms a linked list of all cursors */ - Pgno *aOverflow; /* Cache of overflow page locations */ - CellInfo info; /* A parse of the cell we are pointing at */ - i64 nKey; /* Size of pKey, or last integer key */ - void *pKey; /* Saved key that was cursor last known position */ - Pgno pgnoRoot; /* The root page of this tree */ int skipNext; /* Prev() is noop if negative. Next() is noop if positive. ** Error code if eState==CURSOR_FAULT */ + Btree *pBtree; /* The Btree to which this cursor belongs */ + Pgno *aOverflow; /* Cache of overflow page locations */ + void *pKey; /* Saved key that was cursor last known position */ /* All fields above are zeroed when the cursor is allocated. See ** sqlite3BtreeCursorZero(). Fields that follow must be manually ** initialized. */ +#define BTCURSOR_FIRST_UNINIT pBt /* Name of first uninitialized field */ + BtShared *pBt; /* The BtShared this cursor points to */ + BtCursor *pNext; /* Forms a linked list of all cursors */ + CellInfo info; /* A parse of the cell we are pointing at */ + i64 nKey; /* Size of pKey, or last integer key */ + Pgno pgnoRoot; /* The root page of this tree */ i8 iPage; /* Index of current page in apPage */ u8 curIntKey; /* Value of apPage[0]->intKey */ u16 ix; /* Current index for apPage[iPage] */ @@ -59969,8 +61136,8 @@ struct BtCursor { ** Do nothing else with this cursor. Any attempt to use the cursor ** should return the error code stored in BtCursor.skipNext */ -#define CURSOR_INVALID 0 -#define CURSOR_VALID 1 +#define CURSOR_VALID 0 +#define CURSOR_INVALID 1 #define CURSOR_SKIPNEXT 2 #define CURSOR_REQUIRESEEK 3 #define CURSOR_FAULT 4 @@ -62634,7 +63801,8 @@ static int btreeInvokeBusyHandler(void *pArg){ BtShared *pBt = (BtShared*)pArg; assert( pBt->db ); assert( sqlite3_mutex_held(pBt->db->mutex) ); - return sqlite3InvokeBusyHandler(&pBt->db->busyHandler); + return sqlite3InvokeBusyHandler(&pBt->db->busyHandler, + sqlite3PagerFile(pBt->pPager)); } /* @@ -62812,7 +63980,7 @@ SQLITE_PRIVATE int sqlite3BtreeOpen( } pBt->openFlags = (u8)flags; pBt->db = db; - sqlite3PagerSetBusyhandler(pBt->pPager, btreeInvokeBusyHandler, pBt); + sqlite3PagerSetBusyHandler(pBt->pPager, btreeInvokeBusyHandler, pBt); p->pBt = pBt; pBt->pCursor = 0; @@ -63775,6 +64943,7 @@ SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag){ } }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && btreeInvokeBusyHandler(pBt) ); + sqlite3PagerResetLockTimeout(pBt->pPager); if( rc==SQLITE_OK ){ if( p->inTrans==TRANS_NONE ){ @@ -64748,7 +65917,7 @@ SQLITE_PRIVATE int sqlite3BtreeCursorSize(void){ ** of run-time by skipping the initialization of those elements. */ SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor *p){ - memset(p, 0, offsetof(BtCursor, iPage)); + memset(p, 0, offsetof(BtCursor, BTCURSOR_FIRST_UNINIT)); } /* @@ -64791,11 +65960,19 @@ SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){ ** Using this cache reduces the number of calls to btreeParseCell(). */ #ifndef NDEBUG + static int cellInfoEqual(CellInfo *a, CellInfo *b){ + if( a->nKey!=b->nKey ) return 0; + if( a->pPayload!=b->pPayload ) return 0; + if( a->nPayload!=b->nPayload ) return 0; + if( a->nLocal!=b->nLocal ) return 0; + if( a->nSize!=b->nSize ) return 0; + return 1; + } static void assertCellInfo(BtCursor *pCur){ CellInfo info; memset(&info, 0, sizeof(info)); btreeParseCell(pCur->pPage, pCur->ix, &info); - assert( CORRUPT_DB || memcmp(&info, &pCur->info, sizeof(info))==0 ); + assert( CORRUPT_DB || cellInfoEqual(&info, &pCur->info) ); } #else #define assertCellInfo(x) @@ -65071,14 +66248,15 @@ static int accessPayload( */ if( (pCur->curFlags & BTCF_ValidOvfl)==0 ){ int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize; - if( nOvfl>pCur->nOvflAlloc ){ + if( pCur->aOverflow==0 + || nOvfl*(int)sizeof(Pgno) > sqlite3MallocSize(pCur->aOverflow) + ){ Pgno *aNew = (Pgno*)sqlite3Realloc( pCur->aOverflow, nOvfl*2*sizeof(Pgno) ); if( aNew==0 ){ return SQLITE_NOMEM_BKPT; }else{ - pCur->nOvflAlloc = nOvfl*2; pCur->aOverflow = aNew; } } @@ -66592,9 +67770,8 @@ static void freePage(MemPage *pPage, int *pRC){ } /* -** Free any overflow pages associated with the given Cell. Write the -** local Cell size (the number of bytes on the original page, omitting -** overflow) into *pnSize. +** Free any overflow pages associated with the given Cell. Store +** size information about the cell in pInfo. */ static int clearCell( MemPage *pPage, /* The page that contains the Cell */ @@ -67798,7 +68975,7 @@ static int balance_nonroot( } /* Load b.apCell[] with pointers to all cells in pOld. If pOld - ** constains overflow cells, include them in the b.apCell[] array + ** contains overflow cells, include them in the b.apCell[] array ** in the correct spot. ** ** Note that when there are multiple overflow cells, it is always the @@ -71283,6 +72460,51 @@ SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem *p){ } #endif +#ifdef SQLITE_DEBUG +/* +** Check that string value of pMem agrees with its integer or real value. +** +** A single int or real value always converts to the same strings. But +** many different strings can be converted into the same int or real. +** If a table contains a numeric value and an index is based on the +** corresponding string value, then it is important that the string be +** derived from the numeric value, not the other way around, to ensure +** that the index and table are consistent. See ticket +** https://www.sqlite.org/src/info/343634942dd54ab (2018-01-31) for +** an example. +** +** This routine looks at pMem to verify that if it has both a numeric +** representation and a string representation then the string rep has +** been derived from the numeric and not the other way around. It returns +** true if everything is ok and false if there is a problem. +** +** This routine is for use inside of assert() statements only. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemConsistentDualRep(Mem *p){ + char zBuf[100]; + char *z; + int i, j, incr; + if( (p->flags & MEM_Str)==0 ) return 1; + if( (p->flags & (MEM_Int|MEM_Real))==0 ) return 1; + if( p->flags & MEM_Int ){ + sqlite3_snprintf(sizeof(zBuf),zBuf,"%lld",p->u.i); + }else{ + sqlite3_snprintf(sizeof(zBuf),zBuf,"%!.15g",p->u.r); + } + z = p->z; + i = j = 0; + incr = 1; + if( p->enc!=SQLITE_UTF8 ){ + incr = 2; + if( p->enc==SQLITE_UTF16BE ) z++; + } + while( zBuf[j] ){ + if( zBuf[j++]!=z[i] ) return 0; + i += incr; + } + return 1; +} +#endif /* SQLITE_DEBUG */ /* ** If pMem is an object with a valid string representation, this routine @@ -71717,6 +72939,16 @@ SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem *pMem){ } } +/* +** Return 1 if pMem represents true, and return 0 if pMem represents false. +** Return the value ifNull if pMem is NULL. +*/ +SQLITE_PRIVATE int sqlite3VdbeBooleanValue(Mem *pMem, int ifNull){ + if( pMem->flags & MEM_Int ) return pMem->u.i!=0; + if( pMem->flags & MEM_Null ) return ifNull; + return sqlite3VdbeRealValue(pMem)!=0.0; +} + /* ** The MEM structure is already a MEM_Real. Try to also make it a ** MEM_Int if we can. @@ -71772,6 +73004,18 @@ SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem *pMem){ return SQLITE_OK; } +/* Compare a floating point value to an integer. Return true if the two +** values are the same within the precision of the floating point value. +** +** For some versions of GCC on 32-bit machines, if you do the more obvious +** comparison of "r1==(double)i" you sometimes get an answer of false even +** though the r1 and (double)i values are bit-for-bit the same. +*/ +static int sqlite3RealSameAsInt(double r1, sqlite3_int64 i){ + double r2 = (double)i; + return memcmp(&r1, &r2, sizeof(r1))==0; +} + /* ** Convert pMem so that it has types MEM_Real or MEM_Int or both. ** Invalidate any prior representations. @@ -71791,7 +73035,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){ }else{ i64 i = pMem->u.i; sqlite3AtoF(pMem->z, &pMem->u.r, pMem->n, pMem->enc); - if( rc==1 && pMem->u.r==(double)i ){ + if( rc==1 && sqlite3RealSameAsInt(pMem->u.r, i) ){ pMem->u.i = i; MemSetTypeFlag(pMem, MEM_Int); }else{ @@ -72274,6 +73518,7 @@ static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){ assert(pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) || pVal->db==0 || pVal->db->mallocFailed ); if( pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) ){ + assert( sqlite3VdbeMemConsistentDualRep(pVal) ); return pVal->z; }else{ return 0; @@ -72296,6 +73541,7 @@ SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) ); assert( (pVal->flags & MEM_RowSet)==0 ); if( (pVal->flags&(MEM_Str|MEM_Term))==(MEM_Str|MEM_Term) && pVal->enc==enc ){ + assert( sqlite3VdbeMemConsistentDualRep(pVal) ); return pVal->z; } if( pVal->flags&MEM_Null ){ @@ -78077,14 +79323,12 @@ SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){ SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; - pCtx->fErrorOrAux = 1; sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; - pCtx->fErrorOrAux = 1; sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT); } #endif @@ -78190,8 +79434,7 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){ return SQLITE_OK; } SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ - pCtx->isError = errCode; - pCtx->fErrorOrAux = 1; + pCtx->isError = errCode ? errCode : -1; #ifdef SQLITE_DEBUG if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode; #endif @@ -78205,7 +79448,6 @@ SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_TOOBIG; - pCtx->fErrorOrAux = 1; sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1, SQLITE_UTF8, SQLITE_STATIC); } @@ -78215,7 +79457,6 @@ SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetNull(pCtx->pOut); pCtx->isError = SQLITE_NOMEM_BKPT; - pCtx->fErrorOrAux = 1; sqlite3OomFault(pCtx->pOut->db); } @@ -78622,10 +79863,7 @@ SQLITE_API void sqlite3_set_auxdata( pAuxData->iAuxArg = iArg; pAuxData->pNextAux = pVdbe->pAuxData; pVdbe->pAuxData = pAuxData; - if( pCtx->fErrorOrAux==0 ){ - pCtx->isError = 0; - pCtx->fErrorOrAux = 1; - } + if( pCtx->isError==0 ) pCtx->isError = -1; }else if( pAuxData->xDeleteAux ){ pAuxData->xDeleteAux(pAuxData->pAux); } @@ -79381,7 +80619,9 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){ Vdbe *pVdbe = (Vdbe*)pStmt; u32 v; #ifdef SQLITE_ENABLE_API_ARMOR - if( !pStmt ){ + if( !pStmt + || (op!=SQLITE_STMTSTATUS_MEMUSED && (op<0||op>=ArraySize(pVdbe->aCounter))) + ){ (void)SQLITE_MISUSE_BKPT; return 0; } @@ -80155,6 +81395,11 @@ static void applyNumericAffinity(Mem *pRec, int bTryForInt){ pRec->flags |= MEM_Real; if( bTryForInt ) sqlite3VdbeIntegerAffinity(pRec); } + /* TEXT->NUMERIC is many->one. Hence, it is important to invalidate the + ** string representation after computing a numeric equivalent, because the + ** string representation might not be the canonical representation for the + ** numeric value. Ticket [343634942dd54ab57b7024] 2018-01-31. */ + pRec->flags &= ~MEM_Str; } /* @@ -80623,7 +81868,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec( assert( pOp>=aOp && pOp<&aOp[p->nOp]); #ifdef VDBE_PROFILE - start = sqlite3Hwtime(); + start = sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); #endif nVmStep++; #ifdef SQLITE_ENABLE_STMT_SCANSTATUS @@ -82147,18 +83392,8 @@ case OP_Or: { /* same as TK_OR, in1, in2, out3 */ int v1; /* Left operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ int v2; /* Right operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ - pIn1 = &aMem[pOp->p1]; - if( pIn1->flags & MEM_Null ){ - v1 = 2; - }else{ - v1 = sqlite3VdbeIntValue(pIn1)!=0; - } - pIn2 = &aMem[pOp->p2]; - if( pIn2->flags & MEM_Null ){ - v2 = 2; - }else{ - v2 = sqlite3VdbeIntValue(pIn2)!=0; - } + v1 = sqlite3VdbeBooleanValue(&aMem[pOp->p1], 2); + v2 = sqlite3VdbeBooleanValue(&aMem[pOp->p2], 2); if( pOp->opcode==OP_And ){ static const unsigned char and_logic[] = { 0, 0, 0, 0, 1, 2, 0, 2, 2 }; v1 = and_logic[v1*3+v2]; @@ -82176,6 +83411,35 @@ case OP_Or: { /* same as TK_OR, in1, in2, out3 */ break; } +/* Opcode: IsTrue P1 P2 P3 P4 * +** Synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 +** +** This opcode implements the IS TRUE, IS FALSE, IS NOT TRUE, and +** IS NOT FALSE operators. +** +** Interpret the value in register P1 as a boolean value. Store that +** boolean (a 0 or 1) in register P2. Or if the value in register P1 is +** NULL, then the P3 is stored in register P2. Invert the answer if P4 +** is 1. +** +** The logic is summarized like this: +** +**
      +**
    • If P3==0 and P4==0 then r[P2] := r[P1] IS TRUE +**
    • If P3==1 and P4==1 then r[P2] := r[P1] IS FALSE +**
    • If P3==0 and P4==1 then r[P2] := r[P1] IS NOT TRUE +**
    • If P3==1 and P4==0 then r[P2] := r[P1] IS NOT FALSE +**
    +*/ +case OP_IsTrue: { /* in1, out2 */ + assert( pOp->p4type==P4_INT32 ); + assert( pOp->p4.i==0 || pOp->p4.i==1 ); + assert( pOp->p3==0 || pOp->p3==1 ); + sqlite3VdbeMemSetInt64(&aMem[pOp->p2], + sqlite3VdbeBooleanValue(&aMem[pOp->p1], pOp->p3) ^ pOp->p4.i); + break; +} + /* Opcode: Not P1 P2 * * * ** Synopsis: r[P2]= !r[P1] ** @@ -82186,10 +83450,10 @@ case OP_Or: { /* same as TK_OR, in1, in2, out3 */ case OP_Not: { /* same as TK_NOT, in1, out2 */ pIn1 = &aMem[pOp->p1]; pOut = &aMem[pOp->p2]; - sqlite3VdbeMemSetNull(pOut); if( (pIn1->flags & MEM_Null)==0 ){ - pOut->flags = MEM_Int; - pOut->u.i = !sqlite3VdbeIntValue(pIn1); + sqlite3VdbeMemSetInt64(pOut, !sqlite3VdbeBooleanValue(pIn1,0)); + }else{ + sqlite3VdbeMemSetNull(pOut); } break; } @@ -82256,30 +83520,25 @@ case OP_Once: { /* jump */ ** is considered true if it is numeric and non-zero. If the value ** in P1 is NULL then take the jump if and only if P3 is non-zero. */ +case OP_If: { /* jump, in1 */ + int c; + c = sqlite3VdbeBooleanValue(&aMem[pOp->p1], pOp->p3); + VdbeBranchTaken(c!=0, 2); + if( c ) goto jump_to_p2; + break; +} + /* Opcode: IfNot P1 P2 P3 * * ** ** Jump to P2 if the value in register P1 is False. The value ** is considered false if it has a numeric value of zero. If the value ** in P1 is NULL then take the jump if and only if P3 is non-zero. */ -case OP_If: /* jump, in1 */ case OP_IfNot: { /* jump, in1 */ int c; - pIn1 = &aMem[pOp->p1]; - if( pIn1->flags & MEM_Null ){ - c = pOp->p3; - }else{ -#ifdef SQLITE_OMIT_FLOATING_POINT - c = sqlite3VdbeIntValue(pIn1)!=0; -#else - c = sqlite3VdbeRealValue(pIn1)!=0.0; -#endif - if( pOp->opcode==OP_IfNot ) c = !c; - } + c = !sqlite3VdbeBooleanValue(&aMem[pOp->p1], !pOp->p3); VdbeBranchTaken(c!=0, 2); - if( c ){ - goto jump_to_p2; - } + if( c ) goto jump_to_p2; break; } @@ -82340,7 +83599,7 @@ case OP_IfNullRow: { /* jump */ ** P2 is the column number for the argument to the sqlite_offset() function. ** This opcode does not use P2 itself, but the P2 value is used by the ** code generator. The P1, P2, and P3 operands to this opcode are the -** as as for OP_Column. +** same as for OP_Column. ** ** This opcode is only available if SQLite is compiled with the ** -DSQLITE_ENABLE_OFFSET_SQL_FUNC option. @@ -84248,6 +85507,10 @@ case OP_NewRowid: { /* out2 */ pOut = out2Prerelease(p, pOp); assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; + if( !pC->isTable ){ + rc = SQLITE_CORRUPT_BKPT; + goto abort_due_to_error; + } assert( pC!=0 ); assert( pC->eCurType==CURTYPE_BTREE ); assert( pC->uc.pCursor!=0 ); @@ -86184,12 +87447,17 @@ case OP_AggStep0: { assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem+1 - p->nCursor)+1) ); assert( pOp->p3p2 || pOp->p3>=pOp->p2+n ); - pCtx = sqlite3DbMallocRawNN(db, sizeof(*pCtx) + (n-1)*sizeof(sqlite3_value*)); + pCtx = sqlite3DbMallocRawNN(db, n*sizeof(sqlite3_value*) + + (sizeof(pCtx[0]) + sizeof(Mem) - sizeof(sqlite3_value*))); if( pCtx==0 ) goto no_mem; pCtx->pMem = 0; + pCtx->pOut = (Mem*)&(pCtx->argv[n]); + sqlite3VdbeMemInit(pCtx->pOut, db, MEM_Null); pCtx->pFunc = pOp->p4.pFunc; pCtx->iOp = (int)(pOp - aOp); pCtx->pVdbe = p; + pCtx->skipFlag = 0; + pCtx->isError = 0; pCtx->argc = n; pOp->p4type = P4_FUNCCTX; pOp->p4.pCtx = pCtx; @@ -86200,7 +87468,6 @@ case OP_AggStep: { int i; sqlite3_context *pCtx; Mem *pMem; - Mem t; assert( pOp->p4type==P4_FUNCCTX ); pCtx = pOp->p4.pCtx; @@ -86223,26 +87490,28 @@ case OP_AggStep: { #endif pMem->n++; - sqlite3VdbeMemInit(&t, db, MEM_Null); - pCtx->pOut = &t; - pCtx->fErrorOrAux = 0; - pCtx->skipFlag = 0; + assert( pCtx->pOut->flags==MEM_Null ); + assert( pCtx->isError==0 ); + assert( pCtx->skipFlag==0 ); (pCtx->pFunc->xSFunc)(pCtx,pCtx->argc,pCtx->argv); /* IMP: R-24505-23230 */ - if( pCtx->fErrorOrAux ){ - if( pCtx->isError ){ - sqlite3VdbeError(p, "%s", sqlite3_value_text(&t)); + if( pCtx->isError ){ + if( pCtx->isError>0 ){ + sqlite3VdbeError(p, "%s", sqlite3_value_text(pCtx->pOut)); rc = pCtx->isError; } - sqlite3VdbeMemRelease(&t); + if( pCtx->skipFlag ){ + assert( pOp[-1].opcode==OP_CollSeq ); + i = pOp[-1].p1; + if( i ) sqlite3VdbeMemSetInt64(&aMem[i], 1); + pCtx->skipFlag = 0; + } + sqlite3VdbeMemRelease(pCtx->pOut); + pCtx->pOut->flags = MEM_Null; + pCtx->isError = 0; if( rc ) goto abort_due_to_error; - }else{ - assert( t.flags==MEM_Null ); - } - if( pCtx->skipFlag ){ - assert( pOp[-1].opcode==OP_CollSeq ); - i = pOp[-1].p1; - if( i ) sqlite3VdbeMemSetInt64(&aMem[i], 1); } + assert( pCtx->pOut->flags==MEM_Null ); + assert( pCtx->skipFlag==0 ); break; } @@ -86729,7 +87998,8 @@ case OP_VColumn: { } rc = pModule->xColumn(pCur->uc.pVCur, &sContext, pOp->p2); sqlite3VtabImportErrmsg(p, pVtab); - if( sContext.isError ){ + if( sContext.isError>0 ){ + sqlite3VdbeError(p, "%s", sqlite3_value_text(pDest)); rc = sContext.isError; } sqlite3VdbeChangeEncoding(pDest, encoding); @@ -86994,6 +88264,7 @@ case OP_Function0: { pCtx->pFunc = pOp->p4.pFunc; pCtx->iOp = (int)(pOp - aOp); pCtx->pVdbe = p; + pCtx->isError = 0; pCtx->argc = n; pOp->p4type = P4_FUNCCTX; pOp->p4.pCtx = pCtx; @@ -87028,16 +88299,17 @@ case OP_Function: { } #endif MemSetTypeFlag(pOut, MEM_Null); - pCtx->fErrorOrAux = 0; + assert( pCtx->isError==0 ); (*pCtx->pFunc->xSFunc)(pCtx, pCtx->argc, pCtx->argv);/* IMP: R-24505-23230 */ /* If the function returned an error, throw an exception */ - if( pCtx->fErrorOrAux ){ - if( pCtx->isError ){ + if( pCtx->isError ){ + if( pCtx->isError>0 ){ sqlite3VdbeError(p, "%s", sqlite3_value_text(pOut)); rc = pCtx->isError; } sqlite3VdbeDeleteAuxData(db, &p->pAuxData, pCtx->iOp, pOp->p1); + pCtx->isError = 0; if( rc ) goto abort_due_to_error; } @@ -87079,8 +88351,10 @@ case OP_Function: { */ case OP_Trace: case OP_Init: { /* jump */ - char *zTrace; int i; +#ifndef SQLITE_OMIT_TRACE + char *zTrace; +#endif /* If the P4 argument is not NULL, then it must be an SQL comment string. ** The "--" string is broken up to prevent false-positives with srcck1.c. @@ -87197,7 +88471,7 @@ default: { /* This is really OP_Noop and OP_Explain */ #ifdef VDBE_PROFILE { - u64 endTime = sqlite3Hwtime(); + u64 endTime = sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); if( endTime>start ) pOrigOp->cycles += endTime - start; pOrigOp->cnt++; } @@ -91584,10 +92858,16 @@ static int lookupName( ** Because no reference was made to outer contexts, the pNC->nRef ** fields are not changed in any context. */ - if( cnt==0 && zTab==0 && ExprHasProperty(pExpr,EP_DblQuoted) ){ - pExpr->op = TK_STRING; - pExpr->pTab = 0; - return WRC_Prune; + if( cnt==0 && zTab==0 ){ + assert( pExpr->op==TK_ID ); + if( ExprHasProperty(pExpr,EP_DblQuoted) ){ + pExpr->op = TK_STRING; + pExpr->pTab = 0; + return WRC_Prune; + } + if( sqlite3ExprIdToTrueFalse(pExpr) ){ + return WRC_Prune; + } } /* @@ -91936,15 +93216,30 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ notValid(pParse, pNC, "parameters", NC_IsCheck|NC_PartIdx|NC_IdxExpr); break; } + case TK_IS: + case TK_ISNOT: { + Expr *pRight; + assert( !ExprHasProperty(pExpr, EP_Reduced) ); + /* Handle special cases of "x IS TRUE", "x IS FALSE", "x IS NOT TRUE", + ** and "x IS NOT FALSE". */ + if( (pRight = pExpr->pRight)->op==TK_ID ){ + int rc = resolveExprStep(pWalker, pRight); + if( rc==WRC_Abort ) return WRC_Abort; + if( pRight->op==TK_TRUEFALSE ){ + pExpr->op2 = pExpr->op; + pExpr->op = TK_TRUTH; + return WRC_Continue; + } + } + /* Fall thru */ + } case TK_BETWEEN: case TK_EQ: case TK_NE: case TK_LT: case TK_LE: case TK_GT: - case TK_GE: - case TK_IS: - case TK_ISNOT: { + case TK_GE: { int nLeft, nRight; if( pParse->db->mallocFailed ) break; assert( pExpr->pLeft!=0 ); @@ -94419,6 +95714,34 @@ SQLITE_PRIVATE int sqlite3SelectWalkFail(Walker *pWalker, Select *NotUsed){ return WRC_Abort; } +/* +** If the input expression is an ID with the name "true" or "false" +** then convert it into an TK_TRUEFALSE term. Return non-zero if +** the conversion happened, and zero if the expression is unaltered. +*/ +SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr *pExpr){ + assert( pExpr->op==TK_ID || pExpr->op==TK_STRING ); + if( sqlite3StrICmp(pExpr->u.zToken, "true")==0 + || sqlite3StrICmp(pExpr->u.zToken, "false")==0 + ){ + pExpr->op = TK_TRUEFALSE; + return 1; + } + return 0; +} + +/* +** The argument must be a TK_TRUEFALSE Expr node. Return 1 if it is TRUE +** and 0 if it is FALSE. +*/ +SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr *pExpr){ + assert( pExpr->op==TK_TRUEFALSE ); + assert( sqlite3StrICmp(pExpr->u.zToken,"true")==0 + || sqlite3StrICmp(pExpr->u.zToken,"false")==0 ); + return pExpr->u.zToken[4]==0; +} + + /* ** These routines are Walker callbacks used to check expressions to ** see if they are "constant" for some definition of constant. The @@ -94466,6 +95789,12 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ return WRC_Abort; } case TK_ID: + /* Convert "true" or "false" in a DEFAULT clause into the + ** appropriate TK_TRUEFALSE operator */ + if( sqlite3ExprIdToTrueFalse(pExpr) ){ + return WRC_Prune; + } + /* Fall thru */ case TK_COLUMN: case TK_AGG_FUNCTION: case TK_AGG_COLUMN: @@ -96230,6 +97559,10 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) codeInteger(pParse, pExpr, 0, target); return target; } + case TK_TRUEFALSE: { + sqlite3VdbeAddOp2(v, OP_Integer, sqlite3ExprTruthValue(pExpr), target); + return target; + } #ifndef SQLITE_OMIT_FLOATING_POINT case TK_FLOAT: { assert( !ExprHasProperty(pExpr, EP_IntValue) ); @@ -96385,6 +97718,18 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) sqlite3VdbeAddOp2(v, op, r1, inReg); break; } + case TK_TRUTH: { + int isTrue; /* IS TRUE or IS NOT TRUE */ + int bNormal; /* IS TRUE or IS FALSE */ + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + testcase( regFree1==0 ); + isTrue = sqlite3ExprTruthValue(pExpr->pRight); + bNormal = pExpr->op2==TK_IS; + testcase( isTrue && bNormal); + testcase( !isTrue && bNormal); + sqlite3VdbeAddOp4Int(v, OP_IsTrue, r1, inReg, !isTrue, isTrue ^ bNormal); + break; + } case TK_ISNULL: case TK_NOTNULL: { int addr; @@ -97160,6 +98505,23 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); break; } + case TK_TRUTH: { + int isNot; /* IS NOT TRUE or IS NOT FALSE */ + int isTrue; /* IS TRUE or IS NOT TRUE */ + testcase( jumpIfNull==0 ); + isNot = pExpr->op2==TK_ISNOT; + isTrue = sqlite3ExprTruthValue(pExpr->pRight); + testcase( isTrue && isNot ); + testcase( !isTrue && isNot ); + if( isTrue ^ isNot ){ + sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, + isNot ? SQLITE_JUMPIFNULL : 0); + }else{ + sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, + isNot ? SQLITE_JUMPIFNULL : 0); + } + break; + } case TK_IS: case TK_ISNOT: testcase( op==TK_IS ); @@ -97314,6 +98676,26 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); break; } + case TK_TRUTH: { + int isNot; /* IS NOT TRUE or IS NOT FALSE */ + int isTrue; /* IS TRUE or IS NOT TRUE */ + testcase( jumpIfNull==0 ); + isNot = pExpr->op2==TK_ISNOT; + isTrue = sqlite3ExprTruthValue(pExpr->pRight); + testcase( isTrue && isNot ); + testcase( !isTrue && isNot ); + if( isTrue ^ isNot ){ + /* IS TRUE and IS NOT FALSE */ + sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, + isNot ? 0 : SQLITE_JUMPIFNULL); + + }else{ + /* IS FALSE and IS NOT TRUE */ + sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, + isNot ? 0 : SQLITE_JUMPIFNULL); + } + break; + } case TK_IS: case TK_ISNOT: testcase( pExpr->op==TK_IS ); @@ -97601,6 +98983,80 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Parse *pParse, Expr *pE1, Expr *pE2, i return 0; } +/* +** This is the Expr node callback for sqlite3ExprImpliesNotNullRow(). +** If the expression node requires that the table at pWalker->iCur +** have a non-NULL column, then set pWalker->eCode to 1 and abort. +*/ +static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){ + /* This routine is only called for WHERE clause expressions and so it + ** cannot have any TK_AGG_COLUMN entries because those are only found + ** in HAVING clauses. We can get a TK_AGG_FUNCTION in a WHERE clause, + ** but that is an illegal construct and the query will be rejected at + ** a later stage of processing, so the TK_AGG_FUNCTION case does not + ** need to be considered here. */ + assert( pExpr->op!=TK_AGG_COLUMN ); + testcase( pExpr->op==TK_AGG_FUNCTION ); + + if( ExprHasProperty(pExpr, EP_FromJoin) ) return WRC_Prune; + switch( pExpr->op ){ + case TK_ISNULL: + case TK_IS: + case TK_OR: + case TK_CASE: + case TK_IN: + case TK_FUNCTION: + testcase( pExpr->op==TK_ISNULL ); + testcase( pExpr->op==TK_IS ); + testcase( pExpr->op==TK_OR ); + testcase( pExpr->op==TK_CASE ); + testcase( pExpr->op==TK_IN ); + testcase( pExpr->op==TK_FUNCTION ); + return WRC_Prune; + case TK_COLUMN: + if( pWalker->u.iCur==pExpr->iTable ){ + pWalker->eCode = 1; + return WRC_Abort; + } + return WRC_Prune; + default: + return WRC_Continue; + } +} + +/* +** Return true (non-zero) if expression p can only be true if at least +** one column of table iTab is non-null. In other words, return true +** if expression p will always be NULL or false if every column of iTab +** is NULL. +** +** False negatives are acceptable. In other words, it is ok to return +** zero even if expression p will never be true of every column of iTab +** is NULL. A false negative is merely a missed optimization opportunity. +** +** False positives are not allowed, however. A false positive may result +** in an incorrect answer. +** +** Terms of p that are marked with EP_FromJoin (and hence that come from +** the ON or USING clauses of LEFT JOINS) are excluded from the analysis. +** +** This routine is used to check if a LEFT JOIN can be converted into +** an ordinary JOIN. The p argument is the WHERE clause. If the WHERE +** clause requires that some column of the right table of the LEFT JOIN +** be non-NULL, then the LEFT JOIN can be safely converted into an +** ordinary join. +*/ +SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){ + Walker w; + w.xExprCallback = impliesNotNullRow; + w.xSelectCallback = 0; + w.xSelectCallback2 = 0; + w.eCode = 0; + w.u.iCur = iTab; + sqlite3WalkExpr(&w, p); + return w.eCode; +} + /* ** An instance of the following structure is used by the tree walker ** to determine if an expression can be evaluated by reference to the @@ -99845,7 +101301,7 @@ static void analyzeOneTable( /* Do not gather statistics on views or virtual tables */ return; } - if( sqlite3_strlike("sqlite_%", pTab->zName, 0)==0 ){ + if( sqlite3_strlike("sqlite\\_%", pTab->zName, '\\')==0 ){ /* Do not gather statistics on system tables */ return; } @@ -100824,6 +102280,10 @@ static int resolveAttachExpr(NameContext *pName, Expr *pExpr) ** ** If the optional "KEY z" syntax is omitted, an SQL NULL is passed as the ** third argument. +** +** If the db->init.reopenMemdb flags is set, then instead of attaching a +** new database, close the database on db->init.iDb and reopen it as an +** empty MemDB. */ static void attachFunc( sqlite3_context *context, @@ -100844,65 +102304,85 @@ static void attachFunc( sqlite3_vfs *pVfs; UNUSED_PARAMETER(NotUsed); - zFile = (const char *)sqlite3_value_text(argv[0]); zName = (const char *)sqlite3_value_text(argv[1]); if( zFile==0 ) zFile = ""; if( zName==0 ) zName = ""; - /* Check for the following errors: - ** - ** * Too many attached databases, - ** * Transaction currently open - ** * Specified database name already being used. - */ - if( db->nDb>=db->aLimit[SQLITE_LIMIT_ATTACHED]+2 ){ - zErrDyn = sqlite3MPrintf(db, "too many attached databases - max %d", - db->aLimit[SQLITE_LIMIT_ATTACHED] - ); - goto attach_error; - } - for(i=0; inDb; i++){ - char *z = db->aDb[i].zDbSName; - assert( z && zName ); - if( sqlite3StrICmp(z, zName)==0 ){ - zErrDyn = sqlite3MPrintf(db, "database %s is already in use", zName); +#ifdef SQLITE_ENABLE_DESERIALIZE +# define REOPEN_AS_MEMDB(db) (db->init.reopenMemdb) +#else +# define REOPEN_AS_MEMDB(db) (0) +#endif + + if( REOPEN_AS_MEMDB(db) ){ + /* This is not a real ATTACH. Instead, this routine is being called + ** from sqlite3_deserialize() to close database db->init.iDb and + ** reopen it as a MemDB */ + pVfs = sqlite3_vfs_find("memdb"); + if( pVfs==0 ) return; + pNew = &db->aDb[db->init.iDb]; + if( pNew->pBt ) sqlite3BtreeClose(pNew->pBt); + pNew->pBt = 0; + pNew->pSchema = 0; + rc = sqlite3BtreeOpen(pVfs, "x", db, &pNew->pBt, 0, SQLITE_OPEN_MAIN_DB); + }else{ + /* This is a real ATTACH + ** + ** Check for the following errors: + ** + ** * Too many attached databases, + ** * Transaction currently open + ** * Specified database name already being used. + */ + if( db->nDb>=db->aLimit[SQLITE_LIMIT_ATTACHED]+2 ){ + zErrDyn = sqlite3MPrintf(db, "too many attached databases - max %d", + db->aLimit[SQLITE_LIMIT_ATTACHED] + ); goto attach_error; } + for(i=0; inDb; i++){ + char *z = db->aDb[i].zDbSName; + assert( z && zName ); + if( sqlite3StrICmp(z, zName)==0 ){ + zErrDyn = sqlite3MPrintf(db, "database %s is already in use", zName); + goto attach_error; + } + } + + /* Allocate the new entry in the db->aDb[] array and initialize the schema + ** hash tables. + */ + if( db->aDb==db->aDbStatic ){ + aNew = sqlite3DbMallocRawNN(db, sizeof(db->aDb[0])*3 ); + if( aNew==0 ) return; + memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2); + }else{ + aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); + if( aNew==0 ) return; + } + db->aDb = aNew; + pNew = &db->aDb[db->nDb]; + memset(pNew, 0, sizeof(*pNew)); + + /* Open the database file. If the btree is successfully opened, use + ** it to obtain the database schema. At this point the schema may + ** or may not be initialized. + */ + flags = db->openFlags; + rc = sqlite3ParseUri(db->pVfs->zName, zFile, &flags, &pVfs, &zPath, &zErr); + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + assert( pVfs ); + flags |= SQLITE_OPEN_MAIN_DB; + rc = sqlite3BtreeOpen(pVfs, zPath, db, &pNew->pBt, 0, flags); + sqlite3_free( zPath ); + db->nDb++; } - - /* Allocate the new entry in the db->aDb[] array and initialize the schema - ** hash tables. - */ - if( db->aDb==db->aDbStatic ){ - aNew = sqlite3DbMallocRawNN(db, sizeof(db->aDb[0])*3 ); - if( aNew==0 ) return; - memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2); - }else{ - aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); - if( aNew==0 ) return; - } - db->aDb = aNew; - pNew = &db->aDb[db->nDb]; - memset(pNew, 0, sizeof(*pNew)); - - /* Open the database file. If the btree is successfully opened, use - ** it to obtain the database schema. At this point the schema may - ** or may not be initialized. - */ - flags = db->openFlags; - rc = sqlite3ParseUri(db->pVfs->zName, zFile, &flags, &pVfs, &zPath, &zErr); - if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); - return; - } - assert( pVfs ); - flags |= SQLITE_OPEN_MAIN_DB; - rc = sqlite3BtreeOpen(pVfs, zPath, db, &pNew->pBt, 0, flags); - sqlite3_free( zPath ); - db->nDb++; db->skipBtreeMutex = 0; if( rc==SQLITE_CONSTRAINT ){ rc = SQLITE_ERROR; @@ -100929,7 +102409,7 @@ static void attachFunc( sqlite3BtreeLeave(pNew->pBt); } pNew->safety_level = SQLITE_DEFAULT_SYNCHRONOUS+1; - pNew->zDbSName = sqlite3DbStrDup(db, zName); + if( !REOPEN_AS_MEMDB(db) ) pNew->zDbSName = sqlite3DbStrDup(db, zName); if( rc==SQLITE_OK && pNew->zDbSName==0 ){ rc = SQLITE_NOMEM_BKPT; } @@ -100969,13 +102449,15 @@ static void attachFunc( /* If the file was opened successfully, read the schema for the new database. ** If this fails, or if opening the file failed, then close the file and - ** remove the entry from the db->aDb[] array. i.e. put everything back the way - ** we found it. + ** remove the entry from the db->aDb[] array. i.e. put everything back the + ** way we found it. */ if( rc==SQLITE_OK ){ sqlite3BtreeEnterAll(db); + db->init.iDb = 0; rc = sqlite3Init(db, &zErrDyn); sqlite3BtreeLeaveAll(db); + assert( zErrDyn==0 || rc!=SQLITE_OK ); } #ifdef SQLITE_USER_AUTHENTICATION if( rc==SQLITE_OK ){ @@ -100987,21 +102469,23 @@ static void attachFunc( } #endif if( rc ){ - int iDb = db->nDb - 1; - assert( iDb>=2 ); - if( db->aDb[iDb].pBt ){ - sqlite3BtreeClose(db->aDb[iDb].pBt); - db->aDb[iDb].pBt = 0; - db->aDb[iDb].pSchema = 0; - } - sqlite3ResetAllSchemasOfConnection(db); - db->nDb = iDb; - if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ - sqlite3OomFault(db); - sqlite3DbFree(db, zErrDyn); - zErrDyn = sqlite3MPrintf(db, "out of memory"); - }else if( zErrDyn==0 ){ - zErrDyn = sqlite3MPrintf(db, "unable to open database: %s", zFile); + if( !REOPEN_AS_MEMDB(db) ){ + int iDb = db->nDb - 1; + assert( iDb>=2 ); + if( db->aDb[iDb].pBt ){ + sqlite3BtreeClose(db->aDb[iDb].pBt); + db->aDb[iDb].pBt = 0; + db->aDb[iDb].pSchema = 0; + } + sqlite3ResetAllSchemasOfConnection(db); + db->nDb = iDb; + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ + sqlite3OomFault(db); + sqlite3DbFree(db, zErrDyn); + zErrDyn = sqlite3MPrintf(db, "out of memory"); + }else if( zErrDyn==0 ){ + zErrDyn = sqlite3MPrintf(db, "unable to open database: %s", zFile); + } } goto attach_error; } @@ -101273,6 +102757,14 @@ SQLITE_PRIVATE int sqlite3FixSelect( if( sqlite3FixExpr(pFix, pSelect->pLimit) ){ return 1; } + if( pSelect->pWith ){ + int i; + for(i=0; ipWith->nCte; i++){ + if( sqlite3FixSelect(pFix, pSelect->pWith->a[i].pSelect) ){ + return 1; + } + } + } pSelect = pSelect->pPrior; } return 0; @@ -102736,10 +104228,24 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ */ SQLITE_PRIVATE void sqlite3AddNotNull(Parse *pParse, int onError){ Table *p; + Column *pCol; p = pParse->pNewTable; if( p==0 || NEVER(p->nCol<1) ) return; - p->aCol[p->nCol-1].notNull = (u8)onError; + pCol = &p->aCol[p->nCol-1]; + pCol->notNull = (u8)onError; p->tabFlags |= TF_HasNotNull; + + /* Set the uniqNotNull flag on any UNIQUE or PK indexes already created + ** on this column. */ + if( pCol->colFlags & COLFLAG_UNIQUE ){ + Index *pIdx; + for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){ + assert( pIdx->nKeyCol==1 && pIdx->onError!=OE_None ); + if( pIdx->aiColumn[0]==p->nCol-1 ){ + pIdx->uniqNotNull = 1; + } + } + } } /* @@ -102856,7 +104362,7 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue( pCol->zName); }else{ /* A copy of pExpr is used instead of the original, as pExpr contains - ** tokens that point to volatile memory. + ** tokens that point to volatile memory. */ Expr x; sqlite3ExprDelete(db, pCol->pDflt); @@ -103474,8 +104980,6 @@ SQLITE_PRIVATE void sqlite3EndTable( p = pParse->pNewTable; if( p==0 ) return; - assert( !db->init.busy || !pSelect ); - /* If the db->init.busy is 1 it means we are reading the SQL off the ** "sqlite_master" or "sqlite_temp_master" table on the disk. ** So do not write to the disk again. Extract the root page number @@ -103486,6 +104990,10 @@ SQLITE_PRIVATE void sqlite3EndTable( ** table itself. So mark it read-only. */ if( db->init.busy ){ + if( pSelect ){ + sqlite3ErrorMsg(pParse, ""); + return; + } p->tnum = db->init.newTnum; if( p->tnum==1 ) p->tabFlags |= TF_Readonly; } @@ -103776,7 +105284,7 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ int nErr = 0; /* Number of errors encountered */ int n; /* Temporarily holds the number of cursors assigned */ sqlite3 *db = pParse->db; /* Database connection for malloc errors */ -#ifndef SQLITE_OMIT_VIRTUALTABLE +#ifndef SQLITE_OMIT_VIRTUALTABLE int rc; #endif #ifndef SQLITE_OMIT_AUTHORIZATION @@ -104703,7 +106211,9 @@ SQLITE_PRIVATE void sqlite3CreateIndex( */ if( pList==0 ){ Token prevCol; - sqlite3TokenInit(&prevCol, pTab->aCol[pTab->nCol-1].zName); + Column *pCol = &pTab->aCol[pTab->nCol-1]; + pCol->colFlags |= COLFLAG_UNIQUE; + sqlite3TokenInit(&prevCol, pCol->zName); pList = sqlite3ExprListAppend(pParse, 0, sqlite3ExprAlloc(db, TK_ID, &prevCol, 0)); if( pList==0 ) goto exit_create_index; @@ -107548,6 +109058,8 @@ static CollSeq *sqlite3GetFuncCollSeq(sqlite3_context *context){ ** iteration of the aggregate loop. */ static void sqlite3SkipAccumulatorLoad(sqlite3_context *context){ + assert( context->isError<=0 ); + context->isError = -1; context->skipFlag = 1; } @@ -107614,8 +109126,6 @@ static void lengthFunc( int argc, sqlite3_value **argv ){ - int len; - assert( argc==1 ); UNUSED_PARAMETER(argc); switch( sqlite3_value_type(argv[0]) ){ @@ -107627,13 +109137,17 @@ static void lengthFunc( } case SQLITE_TEXT: { const unsigned char *z = sqlite3_value_text(argv[0]); + const unsigned char *z0; + unsigned char c; if( z==0 ) return; - len = 0; - while( *z ){ - len++; - SQLITE_SKIP_UTF8(z); + z0 = z; + while( (c = *z)!=0 ){ + z++; + if( c>=0xc0 ){ + while( (*z & 0xc0)==0x80 ){ z++; z0++; } + } } - sqlite3_result_int(context, len); + sqlite3_result_int(context, (int)(z-z0)); break; } default: { @@ -108708,6 +110222,8 @@ static void replaceFunc( i64 nOut; /* Maximum size of zOut */ int loopLimit; /* Last zStr[] that might match zPattern[] */ int i, j; /* Loop counters */ + unsigned cntExpand; /* Number zOut expansions */ + sqlite3 *db = sqlite3_context_db_handle(context); assert( argc==3 ); UNUSED_PARAMETER(argc); @@ -108739,33 +110255,40 @@ static void replaceFunc( return; } loopLimit = nStr - nPattern; + cntExpand = 0; for(i=j=0; i<=loopLimit; i++){ if( zStr[i]!=zPattern[0] || memcmp(&zStr[i], zPattern, nPattern) ){ zOut[j++] = zStr[i]; }else{ - u8 *zOld; - sqlite3 *db = sqlite3_context_db_handle(context); - nOut += nRep - nPattern; - testcase( nOut-1==db->aLimit[SQLITE_LIMIT_LENGTH] ); - testcase( nOut-2==db->aLimit[SQLITE_LIMIT_LENGTH] ); - if( nOut-1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ - sqlite3_result_error_toobig(context); - sqlite3_free(zOut); - return; - } - zOld = zOut; - zOut = sqlite3_realloc64(zOut, (int)nOut); - if( zOut==0 ){ - sqlite3_result_error_nomem(context); - sqlite3_free(zOld); - return; + if( nRep>nPattern ){ + nOut += nRep - nPattern; + testcase( nOut-1==db->aLimit[SQLITE_LIMIT_LENGTH] ); + testcase( nOut-2==db->aLimit[SQLITE_LIMIT_LENGTH] ); + if( nOut-1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ + sqlite3_result_error_toobig(context); + sqlite3_free(zOut); + return; + } + cntExpand++; + if( (cntExpand&(cntExpand-1))==0 ){ + /* Grow the size of the output buffer only on substitutions + ** whose index is a power of two: 1, 2, 4, 8, 16, 32, ... */ + u8 *zOld; + zOld = zOut; + zOut = sqlite3_realloc64(zOut, (int)nOut + (nOut - nStr - 1)); + if( zOut==0 ){ + sqlite3_result_error_nomem(context); + sqlite3_free(zOld); + return; + } + } } memcpy(&zOut[j], zRep, nRep); j += nRep; i += nPattern-1; } } - assert( j+nStr-i+1==nOut ); + assert( j+nStr-i+1<=nOut ); memcpy(&zOut[j], &zStr[i], nStr-i); j += nStr - i; assert( j<=nOut ); @@ -111047,11 +112570,12 @@ static int readsTable(Parse *p, int iDb, Table *pTab){ ** first use of table pTab. On 2nd and subsequent uses, the original ** AutoincInfo structure is used. ** -** Three memory locations are allocated: +** Four consecutive registers are allocated: ** -** (1) Register to hold the name of the pTab table. -** (2) Register to hold the maximum ROWID of pTab. -** (3) Register to hold the rowid in sqlite_sequence of pTab +** (1) The name of the pTab table. +** (2) The maximum ROWID of pTab. +** (3) The rowid in sqlite_sequence of pTab +** (4) The original value of the max ROWID in pTab, or NULL if none ** ** The 2nd register is the one that is returned. That is all the ** insert routine needs to know about. @@ -111079,7 +112603,7 @@ static int autoIncBegin( pInfo->iDb = iDb; pToplevel->nMem++; /* Register to hold name of table */ pInfo->regCtr = ++pToplevel->nMem; /* Max rowid register */ - pToplevel->nMem++; /* Rowid in sqlite_sequence */ + pToplevel->nMem +=2; /* Rowid in sqlite_sequence + orig max val */ } memId = pInfo->regCtr; } @@ -111107,15 +112631,17 @@ SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse){ static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList autoInc[] = { /* 0 */ {OP_Null, 0, 0, 0}, - /* 1 */ {OP_Rewind, 0, 9, 0}, + /* 1 */ {OP_Rewind, 0, 10, 0}, /* 2 */ {OP_Column, 0, 0, 0}, - /* 3 */ {OP_Ne, 0, 7, 0}, + /* 3 */ {OP_Ne, 0, 9, 0}, /* 4 */ {OP_Rowid, 0, 0, 0}, /* 5 */ {OP_Column, 0, 1, 0}, - /* 6 */ {OP_Goto, 0, 9, 0}, - /* 7 */ {OP_Next, 0, 2, 0}, - /* 8 */ {OP_Integer, 0, 0, 0}, - /* 9 */ {OP_Close, 0, 0, 0} + /* 6 */ {OP_AddImm, 0, 0, 0}, + /* 7 */ {OP_Copy, 0, 0, 0}, + /* 8 */ {OP_Goto, 0, 11, 0}, + /* 9 */ {OP_Next, 0, 2, 0}, + /* 10 */ {OP_Integer, 0, 0, 0}, + /* 11 */ {OP_Close, 0, 0, 0} }; VdbeOp *aOp; pDb = &db->aDb[p->iDb]; @@ -111126,14 +112652,17 @@ SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse){ aOp = sqlite3VdbeAddOpList(v, ArraySize(autoInc), autoInc, iLn); if( aOp==0 ) break; aOp[0].p2 = memId; - aOp[0].p3 = memId+1; + aOp[0].p3 = memId+2; aOp[2].p3 = memId; aOp[3].p1 = memId-1; aOp[3].p3 = memId; aOp[3].p5 = SQLITE_JUMPIFNULL; aOp[4].p2 = memId+1; aOp[5].p3 = memId; - aOp[8].p2 = memId; + aOp[6].p1 = memId; + aOp[7].p2 = memId+2; + aOp[7].p1 = memId; + aOp[10].p2 = memId; } } @@ -111180,6 +112709,8 @@ static SQLITE_NOINLINE void autoIncrementEnd(Parse *pParse){ iRec = sqlite3GetTempReg(pParse); assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) ); + sqlite3VdbeAddOp3(v, OP_Le, memId+2, sqlite3VdbeCurrentAddr(v)+7, memId); + VdbeCoverage(v); sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenWrite); aOp = sqlite3VdbeAddOpList(v, ArraySize(autoIncEnd), autoIncEnd, iLn); if( aOp==0 ) break; @@ -113828,8 +115359,8 @@ typedef int (*sqlite3_loadext_entry)( #define sqlite3_value_pointer sqlite3_api->value_pointer /* Version 3.22.0 and later */ #define sqlite3_vtab_nochange sqlite3_api->vtab_nochange -#define sqlite3_value_nochange sqltie3_api->value_nochange -#define sqlite3_vtab_collation sqltie3_api->vtab_collation +#define sqlite3_value_nochange sqlite3_api->value_nochange +#define sqlite3_vtab_collation sqlite3_api->vtab_collation #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) @@ -117813,7 +119344,7 @@ static void corruptSchema( char *z; if( zObj==0 ) zObj = "?"; z = sqlite3MPrintf(db, "malformed database schema (%s)", zObj); - if( zExtra ) z = sqlite3MPrintf(db, "%z - %s", z, zExtra); + if( zExtra && zExtra[0] ) z = sqlite3MPrintf(db, "%z - %s", z, zExtra); sqlite3DbFree(db, *pData->pzErrMsg); *pData->pzErrMsg = z; } @@ -118707,8 +120238,7 @@ SQLITE_API int sqlite3_prepare16_v3( /***/ int sqlite3SelectTrace = 0; # define SELECTTRACE(K,P,S,X) \ if(sqlite3SelectTrace&(K)) \ - sqlite3DebugPrintf("%*s%s.%p: ",(P)->nSelectIndent*2-2,"",\ - (S)->zSelName,(S)),\ + sqlite3DebugPrintf("%s/%p: ",(S)->zSelName,(S)),\ sqlite3DebugPrintf X #else # define SELECTTRACE(K,P,S,X) @@ -119069,6 +120599,29 @@ static void setJoinExpr(Expr *p, int iTable){ } } +/* Undo the work of setJoinExpr(). In the expression tree p, convert every +** term that is marked with EP_FromJoin and iRightJoinTable==iTable into +** an ordinary term that omits the EP_FromJoin mark. +** +** This happens when a LEFT JOIN is simplified into an ordinary JOIN. +*/ +static void unsetJoinExpr(Expr *p, int iTable){ + while( p ){ + if( ExprHasProperty(p, EP_FromJoin) + && (iTable<0 || p->iRightJoinTable==iTable) ){ + ExprClearProperty(p, EP_FromJoin); + } + if( p->op==TK_FUNCTION && p->x.pList ){ + int i; + for(i=0; ix.pList->nExpr; i++){ + unsetJoinExpr(p->x.pList->a[i].pExpr, iTable); + } + } + unsetJoinExpr(p->pLeft, iTable); + p = p->pRight; + } +} + /* ** This routine processes the join information for a SELECT statement. ** ON and USING clauses are converted into extra terms of the WHERE clause. @@ -119952,12 +121505,15 @@ static void generateSortTail( iSortTab = iTab; bSeq = 1; } - for(i=0, iCol=nKey+bSeq; i=0; i--){ int iRead; if( aOutEx[i].u.x.iOrderByCol ){ iRead = aOutEx[i].u.x.iOrderByCol-1; }else{ - iRead = iCol++; + iRead = iCol--; } sqlite3VdbeAddOp3(v, OP_Column, iSortTab, iRead, regRow+i); VdbeComment((v, "%s", aOutEx[i].zName ? aOutEx[i].zName : aOutEx[i].zSpan)); @@ -122434,7 +123990,6 @@ static int flattenSubquery( pOrderBy->a[i].u.x.iOrderByCol = 0; } assert( pParent->pOrderBy==0 ); - assert( pSub->pPrior==0 ); pParent->pOrderBy = pOrderBy; pSub->pOrderBy = 0; } @@ -122518,12 +124073,22 @@ static int flattenSubquery( ** (3) The inner query has a LIMIT clause (since the changes to the WHERE ** close would change the meaning of the LIMIT). ** -** (4) The inner query is the right operand of a LEFT JOIN. (The caller -** enforces this restriction since this routine does not have enough -** information to know.) +** (4) The inner query is the right operand of a LEFT JOIN and the +** expression to be pushed down does not come from the ON clause +** on that LEFT JOIN. ** ** (5) The WHERE clause expression originates in the ON or USING clause -** of a LEFT JOIN. +** of a LEFT JOIN where iCursor is not the right-hand table of that +** left join. An example: +** +** SELECT * +** FROM (SELECT 1 AS a1 UNION ALL SELECT 2) AS aa +** JOIN (SELECT 1 AS b2 UNION ALL SELECT 2) AS bb ON (a1=b2) +** LEFT JOIN (SELECT 8 AS c3 UNION ALL SELECT 9) AS cc ON (b2=2); +** +** The correct answer is three rows: (1,1,NULL),(2,2,8),(2,2,9). +** But if the (b2=2) term were to be pushed down into the bb subquery, +** then the (1,1,NULL) row would be suppressed. ** ** Return 0 if no changes are made and non-zero if one or more WHERE clause ** terms are duplicated into the subquery. @@ -122532,7 +124097,8 @@ static int pushDownWhereTerms( Parse *pParse, /* Parse context (for malloc() and error reporting) */ Select *pSubq, /* The subquery whose WHERE clause is to be augmented */ Expr *pWhere, /* The WHERE clause of the outer query */ - int iCursor /* Cursor number of the subquery */ + int iCursor, /* Cursor number of the subquery */ + int isLeftJoin /* True if pSubq is the right term of a LEFT JOIN */ ){ Expr *pNew; int nChng = 0; @@ -122556,15 +124122,25 @@ static int pushDownWhereTerms( return 0; /* restriction (3) */ } while( pWhere->op==TK_AND ){ - nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, iCursor); + nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, + iCursor, isLeftJoin); pWhere = pWhere->pLeft; } - if( ExprHasProperty(pWhere,EP_FromJoin) ) return 0; /* restriction (5) */ + if( isLeftJoin + && (ExprHasProperty(pWhere,EP_FromJoin)==0 + || pWhere->iRightJoinTable!=iCursor) + ){ + return 0; /* restriction (4) */ + } + if( ExprHasProperty(pWhere,EP_FromJoin) && pWhere->iRightJoinTable!=iCursor ){ + return 0; /* restriction (5) */ + } if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){ nChng++; while( pSubq ){ SubstContext x; pNew = sqlite3ExprDup(pParse->db, pWhere, 0); + unsetJoinExpr(pNew, -1); x.pParse = pParse; x.iTable = iCursor; x.iNewTable = iCursor; @@ -123596,14 +125172,6 @@ static void explainSimpleCount( # define explainSimpleCount(a,b,c) #endif -/* -** Context object for havingToWhereExprCb(). -*/ -struct HavingToWhereCtx { - Expr **ppWhere; - ExprList *pGroupBy; -}; - /* ** sqlite3WalkExpr() callback used by havingToWhere(). ** @@ -123617,15 +125185,16 @@ struct HavingToWhereCtx { */ static int havingToWhereExprCb(Walker *pWalker, Expr *pExpr){ if( pExpr->op!=TK_AND ){ - struct HavingToWhereCtx *p = pWalker->u.pHavingCtx; - if( sqlite3ExprIsConstantOrGroupBy(pWalker->pParse, pExpr, p->pGroupBy) ){ + Select *pS = pWalker->u.pSelect; + if( sqlite3ExprIsConstantOrGroupBy(pWalker->pParse, pExpr, pS->pGroupBy) ){ sqlite3 *db = pWalker->pParse->db; Expr *pNew = sqlite3ExprAlloc(db, TK_INTEGER, &sqlite3IntTokens[1], 0); if( pNew ){ - Expr *pWhere = *(p->ppWhere); + Expr *pWhere = pS->pWhere; SWAP(Expr, *pNew, *pExpr); pNew = sqlite3ExprAnd(db, pWhere, pNew); - *(p->ppWhere) = pNew; + pS->pWhere = pNew; + pWalker->eCode = 1; } } return WRC_Prune; @@ -123648,23 +125217,19 @@ static int havingToWhereExprCb(Walker *pWalker, Expr *pExpr){ ** entirely of constants and expressions that are also GROUP BY terms that ** use the "BINARY" collation sequence. */ -static void havingToWhere( - Parse *pParse, - ExprList *pGroupBy, - Expr *pHaving, - Expr **ppWhere -){ - struct HavingToWhereCtx sCtx; +static void havingToWhere(Parse *pParse, Select *p){ Walker sWalker; - - sCtx.ppWhere = ppWhere; - sCtx.pGroupBy = pGroupBy; - memset(&sWalker, 0, sizeof(sWalker)); sWalker.pParse = pParse; sWalker.xExprCallback = havingToWhereExprCb; - sWalker.u.pHavingCtx = &sCtx; - sqlite3WalkExpr(&sWalker, pHaving); + sWalker.u.pSelect = p; + sqlite3WalkExpr(&sWalker, p->pHaving); +#if SELECTTRACE_ENABLED + if( sWalker.eCode && (sqlite3SelectTrace & 0x100)!=0 ){ + SELECTTRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n")); + sqlite3TreeViewSelect(0, p, 0); + } +#endif } /* @@ -123825,7 +125390,6 @@ SQLITE_PRIVATE int sqlite3Select( if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; memset(&sAggInfo, 0, sizeof(sAggInfo)); #if SELECTTRACE_ENABLED - pParse->nSelectIndent++; SELECTTRACE(1,pParse,p, ("begin processing:\n")); if( sqlite3SelectTrace & 0x100 ){ sqlite3TreeViewSelect(0, p, 0); @@ -123871,13 +125435,29 @@ SQLITE_PRIVATE int sqlite3Select( generateColumnNames(pParse, p); } - /* Try to flatten subqueries in the FROM clause up into the main query + /* Try to various optimizations (flattening subqueries, and strength + ** reduction of join operators) in the FROM clause up into the main query */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) for(i=0; !p->pPrior && inSrc; i++){ struct SrcList_item *pItem = &pTabList->a[i]; Select *pSub = pItem->pSelect; Table *pTab = pItem->pTab; + + /* Convert LEFT JOIN into JOIN if there are terms of the right table + ** of the LEFT JOIN used in the WHERE clause. + */ + if( (pItem->fg.jointype & JT_LEFT)!=0 + && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor) + && OptimizationEnabled(db, SQLITE_SimplifyJoin) + ){ + SELECTTRACE(0x100,pParse,p, + ("LEFT-JOIN simplifies to JOIN on term %d\n",i)); + pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER); + unsetJoinExpr(p->pWhere, pItem->iCursor); + } + + /* No futher action if this term of the FROM clause is no a subquery */ if( pSub==0 ) continue; /* Catch mismatch in the declared columns of a view and the number of @@ -123946,7 +125526,6 @@ SQLITE_PRIVATE int sqlite3Select( explainSetInteger(pParse->iSelectId, iRestoreSelectId); #if SELECTTRACE_ENABLED SELECTTRACE(1,pParse,p,("end compound-select processing\n")); - pParse->nSelectIndent--; #endif return rc; } @@ -124019,8 +125598,9 @@ SQLITE_PRIVATE int sqlite3Select( /* Make copies of constant WHERE-clause terms in the outer query down ** inside the subquery. This can help the subquery to run more efficiently. */ - if( (pItem->fg.jointype & JT_OUTER)==0 - && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor) + if( OptimizationEnabled(db, SQLITE_PushDown) + && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor, + (pItem->fg.jointype & JT_OUTER)!=0) ){ #if SELECTTRACE_ENABLED if( sqlite3SelectTrace & 0x100 ){ @@ -124028,6 +125608,8 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3TreeViewSelect(0, p, 0); } #endif + }else{ + SELECTTRACE(0x100,pParse,p,("Push-down not possible\n")); } zSavedAuthContext = pParse->zAuthContext; @@ -124230,6 +125812,7 @@ SQLITE_PRIVATE int sqlite3Select( wctrlFlags |= p->selFlags & SF_FixedLimit; /* Begin the database scan. */ + SELECTTRACE(1,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy, p->pEList, wctrlFlags, p->nSelectRow); if( pWInfo==0 ) goto select_end; @@ -124331,7 +125914,9 @@ SQLITE_PRIVATE int sqlite3Select( if( pHaving ){ if( pGroupBy ){ assert( pWhere==p->pWhere ); - havingToWhere(pParse, pGroupBy, pHaving, &p->pWhere); + assert( pHaving==p->pHaving ); + assert( pGroupBy==p->pGroupBy ); + havingToWhere(pParse, p); pWhere = p->pWhere; } sqlite3ExprAnalyzeAggregates(&sNC, pHaving); @@ -124418,6 +126003,7 @@ SQLITE_PRIVATE int sqlite3Select( ** in the right order to begin with. */ sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); + SELECTTRACE(1,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, 0, WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0), 0 ); @@ -124673,6 +126259,7 @@ SQLITE_PRIVATE int sqlite3Select( assert( minMaxFlag==WHERE_ORDERBY_NORMAL || pMinMaxOrderBy!=0 ); assert( pMinMaxOrderBy==0 || pMinMaxOrderBy->nExpr==1 ); + SELECTTRACE(1,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy, 0, minMaxFlag, 0); if( pWInfo==0 ){ @@ -124728,7 +126315,6 @@ select_end: sqlite3DbFree(db, sAggInfo.aFunc); #if SELECTTRACE_ENABLED SELECTTRACE(1,pParse,p,("end processing\n")); - pParse->nSelectIndent--; #endif return rc; } @@ -127024,8 +128610,8 @@ static int execSql(sqlite3 *db, char **pzErrMsg, const char *zSql){ while( SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){ const char *zSubSql = (const char*)sqlite3_column_text(pStmt,0); assert( sqlite3_strnicmp(zSql,"SELECT",6)==0 ); - if( zSubSql ){ - assert( zSubSql[0]!='S' ); + assert( sqlite3_strnicmp(zSubSql,"SELECT",6)!=0 || CORRUPT_DB ); + if( zSubSql && zSubSql[0]!='S' ){ rc = execSql(db, pzErrMsg, zSubSql); if( rc!=SQLITE_OK ) break; } @@ -130563,7 +132149,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( sqlite3ExprIsVector(pX->pRight) ){ r1 = rTemp = sqlite3GetTempReg(pParse); codeExprOrVector(pParse, pX->pRight, r1, 1); - op = aMoveOp[(pX->op - TK_GT) | 0x0001]; + testcase( pX->op==TK_GT ); + testcase( pX->op==TK_GE ); + testcase( pX->op==TK_LT ); + testcase( pX->op==TK_LE ); + op = aMoveOp[((pX->op - TK_GT - 1) & 0x3) | 0x1]; + assert( pX->op!=TK_GT || op==OP_SeekGE ); + assert( pX->op!=TK_GE || op==OP_SeekGE ); + assert( pX->op!=TK_LT || op==OP_SeekLE ); + assert( pX->op!=TK_LE || op==OP_SeekLE ); }else{ r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp); disableTerm(pLevel, pStart); @@ -131287,7 +132881,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( continue; } - if( pTerm->wtFlags & TERM_LIKECOND ){ + if( (pTerm->wtFlags & TERM_LIKECOND)!=0 ){ /* If the TERM_LIKECOND flag is set, that means that the range search ** is sufficient to guarantee that the LIKE operator is true, so we ** can skip the call to the like(A,B) function. But this only works @@ -131297,8 +132891,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( continue; #else u32 x = pLevel->iLikeRepCntr; - assert( x>0 ); - skipLikeAddr = sqlite3VdbeAddOp1(v, (x&1)?OP_IfNot:OP_If, (int)(x>>1)); + if( x>0 ){ + skipLikeAddr = sqlite3VdbeAddOp1(v, (x&1)?OP_IfNot:OP_If,(int)(x>>1)); + } VdbeCoverage(v); #endif } @@ -131338,6 +132933,12 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( WO_EQ|WO_IN|WO_IS, 0); if( pAlt==0 ) continue; if( pAlt->wtFlags & (TERM_CODED) ) continue; + if( (pAlt->eOperator & WO_IN) + && (pAlt->pExpr->flags & EP_xIsSelect) + && (pAlt->pExpr->x.pSelect->pEList->nExpr>1) + ){ + continue; + } testcase( pAlt->eOperator & WO_EQ ); testcase( pAlt->eOperator & WO_IS ); testcase( pAlt->eOperator & WO_IN ); @@ -132252,6 +133853,9 @@ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){ for(i=0; inSrc; i++){ mask |= exprSelectUsage(pMaskSet, pSrc->a[i].pSelect); mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].pOn); + if( pSrc->a[i].fg.isTabFunc ){ + mask |= sqlite3WhereExprListUsage(pMaskSet, pSrc->a[i].u1.pFuncArg); + } } } pS = pS->pPrior; @@ -132664,7 +134268,7 @@ static void exprAnalyze( exprAnalyze(pSrc, pWC, idxNew); } pTerm = &pWC->a[idxTerm]; - pTerm->wtFlags = TERM_CODED|TERM_VIRTUAL; /* Disable the original */ + pTerm->wtFlags |= TERM_CODED|TERM_VIRTUAL; /* Disable the original */ pTerm->eOperator = 0; } @@ -135389,10 +136993,12 @@ static int whereLoopAddBtreeIndex( if( iCol==XN_ROWID || (iCol>=0 && nInMul==0 && saved_nEq==pProbe->nKeyCol-1) ){ - if( iCol>=0 && pProbe->uniqNotNull==0 ){ - pNew->wsFlags |= WHERE_UNQ_WANTED; - }else{ + if( iCol==XN_ROWID || pProbe->uniqNotNull + || (pProbe->nKeyCol==1 && pProbe->onError && eOp==WO_EQ) + ){ pNew->wsFlags |= WHERE_ONEROW; + }else{ + pNew->wsFlags |= WHERE_UNQ_WANTED; } } }else if( eOp & WO_ISNULL ){ @@ -137539,6 +139145,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( */ for(ii=0; iinTerm; ii++){ WhereTerm *pT = &sWLB.pWC->a[ii]; + if( pT->wtFlags & TERM_VIRTUAL ) continue; if( pT->prereqAll==0 && (nTabList==0 || exprIsDeterministic(pT->pExpr)) ){ sqlite3ExprIfFalse(pParse, pT->pExpr, pWInfo->iBreak, SQLITE_JUMPIFNULL); pT->wtFlags |= TERM_CODED; @@ -139997,7 +141604,8 @@ static unsigned int yy_find_shift_action( #endif do{ i = yy_shift_ofst[stateno]; - assert( i>=0 && i+YYNTOKEN<=sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) ); + assert( i>=0 ); + assert( i+YYNTOKEN<=(int)sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) ); assert( iLookAhead!=YYNOCODE ); assert( iLookAhead < YYNTOKEN ); i += iLookAhead; @@ -140711,6 +142319,10 @@ static void yy_reduce( case 34: /* ccons ::= DEFAULT scanpt ID|INDEXED */ { Expr *p = tokenExpr(pParse, TK_STRING, yymsp[0].minor.yy0); + if( p ){ + sqlite3ExprIdToTrueFalse(p); + testcase( p->op==TK_TRUEFALSE && sqlite3ExprTruthValue(p) ); + } sqlite3AddDefaultValue(pParse,p,yymsp[0].minor.yy0.z,yymsp[0].minor.yy0.z+yymsp[0].minor.yy0.n); } break; @@ -140904,8 +142516,7 @@ static void yy_reduce( if( yymsp[-8].minor.yy387!=0 ){ const char *z = s.z+6; int i; - sqlite3_snprintf(sizeof(yymsp[-8].minor.yy387->zSelName), yymsp[-8].minor.yy387->zSelName, "#%d", - ++pParse->nSelect); + sqlite3_snprintf(sizeof(yymsp[-8].minor.yy387->zSelName), yymsp[-8].minor.yy387->zSelName,"#%d",++pParse->nSelect); while( z[0]==' ' ) z++; if( z[0]=='/' && z[1]=='*' ){ z += 2; @@ -143604,6 +145215,11 @@ SQLITE_API int sqlite3_initialize(void){ sqlite3GlobalConfig.isPCacheInit = 1; rc = sqlite3OsInit(); } +#ifdef SQLITE_ENABLE_DESERIALIZE + if( rc==SQLITE_OK ){ + rc = sqlite3MemdbInit(); + } +#endif if( rc==SQLITE_OK ){ sqlite3PCacheBufferSetup( sqlite3GlobalConfig.pPage, sqlite3GlobalConfig.szPage, sqlite3GlobalConfig.nPage); @@ -143636,7 +145252,7 @@ SQLITE_API int sqlite3_initialize(void){ #ifndef NDEBUG #ifndef SQLITE_OMIT_FLOATING_POINT /* This section of code's only "output" is via assert() statements. */ - if ( rc==SQLITE_OK ){ + if( rc==SQLITE_OK ){ u64 x = (((u64)1)<<63)-1; double y; assert(sizeof(x)==8); @@ -144803,6 +146419,8 @@ SQLITE_PRIVATE const char *sqlite3ErrStr(int rc){ /* SQLITE_FORMAT */ 0, /* SQLITE_RANGE */ "column index out of range", /* SQLITE_NOTADB */ "file is not a database", + /* SQLITE_NOTICE */ "notification message", + /* SQLITE_WARNING */ "warning message", }; const char *zErr = "unknown error"; switch( rc ){ @@ -144810,6 +146428,14 @@ SQLITE_PRIVATE const char *sqlite3ErrStr(int rc){ zErr = "abort due to ROLLBACK"; break; } + case SQLITE_ROW: { + zErr = "another row available"; + break; + } + case SQLITE_DONE: { + zErr = "no more rows available"; + break; + } default: { rc &= 0xff; if( ALWAYS(rc>=0) && rcbusyTimeout; + int tmout = db->busyTimeout; int delay, prior; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( sqlite3OsFileControl(pFile,SQLITE_FCNTL_LOCK_TIMEOUT,&tmout)==SQLITE_OK ){ + if( count ){ + tmout = 0; + sqlite3OsFileControl(pFile, SQLITE_FCNTL_LOCK_TIMEOUT, &tmout); + return 0; + }else{ + return 1; + } + } +#else + UNUSED_PARAMETER(pFile); +#endif assert( count>=0 ); if( count < NDELAY ){ delay = delays[count]; @@ -144849,16 +146494,19 @@ static int sqliteDefaultBusyCallback( delay = delays[NDELAY-1]; prior = totals[NDELAY-1] + delay*(count-(NDELAY-1)); } - if( prior + delay > timeout ){ - delay = timeout - prior; + if( prior + delay > tmout ){ + delay = tmout - prior; if( delay<=0 ) return 0; } sqlite3OsSleep(db->pVfs, delay*1000); return 1; #else + /* This case for unix systems that lack usleep() support. Sleeping + ** must be done in increments of whole seconds */ sqlite3 *db = (sqlite3 *)ptr; - int timeout = ((sqlite3 *)ptr)->busyTimeout; - if( (count+1)*1000 > timeout ){ + int tmout = ((sqlite3 *)ptr)->busyTimeout; + UNUSED_PARAMETER(pFile); + if( (count+1)*1000 > tmout ){ return 0; } sqlite3OsSleep(db->pVfs, 1000000); @@ -144869,14 +146517,25 @@ static int sqliteDefaultBusyCallback( /* ** Invoke the given busy handler. ** -** This routine is called when an operation failed with a lock. +** This routine is called when an operation failed to acquire a +** lock on VFS file pFile. +** ** If this routine returns non-zero, the lock is retried. If it ** returns 0, the operation aborts with an SQLITE_BUSY error. */ -SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler *p){ +SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler *p, sqlite3_file *pFile){ int rc; - if( NEVER(p==0) || p->xFunc==0 || p->nBusy<0 ) return 0; - rc = p->xFunc(p->pArg, p->nBusy); + if( p->xBusyHandler==0 || p->nBusy<0 ) return 0; + if( p->bExtraFileArg ){ + /* Add an extra parameter with the pFile pointer to the end of the + ** callback argument list */ + int (*xTra)(void*,int,sqlite3_file*); + xTra = (int(*)(void*,int,sqlite3_file*))p->xBusyHandler; + rc = xTra(p->pBusyArg, p->nBusy, pFile); + }else{ + /* Legacy style busy handler callback */ + rc = p->xBusyHandler(p->pBusyArg, p->nBusy); + } if( rc==0 ){ p->nBusy = -1; }else{ @@ -144898,9 +146557,10 @@ SQLITE_API int sqlite3_busy_handler( if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); - db->busyHandler.xFunc = xBusy; - db->busyHandler.pArg = pArg; + db->busyHandler.xBusyHandler = xBusy; + db->busyHandler.pBusyArg = pArg; db->busyHandler.nBusy = 0; + db->busyHandler.bExtraFileArg = 0; db->busyTimeout = 0; sqlite3_mutex_leave(db->mutex); return SQLITE_OK; @@ -144948,8 +146608,10 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif if( ms>0 ){ - sqlite3_busy_handler(db, sqliteDefaultBusyCallback, (void*)db); + sqlite3_busy_handler(db, (int(*)(void*,int))sqliteDefaultBusyCallback, + (void*)db); db->busyTimeout = ms; + db->busyHandler.bExtraFileArg = 1; }else{ sqlite3_busy_handler(db, 0, 0); } @@ -146942,10 +148604,8 @@ SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, vo }else if( op==SQLITE_FCNTL_JOURNAL_POINTER ){ *(sqlite3_file**)pArg = sqlite3PagerJrnlFile(pPager); rc = SQLITE_OK; - }else if( fd->pMethods ){ - rc = sqlite3OsFileControl(fd, op, pArg); }else{ - rc = SQLITE_NOTFOUND; + rc = sqlite3OsFileControl(fd, op, pArg); } sqlite3BtreeLeave(pBtree); } @@ -160796,6 +162456,7 @@ static int fts3WriteSegment( sqlite3_bind_blob(pStmt, 2, z, n, SQLITE_STATIC); sqlite3_step(pStmt); rc = sqlite3_reset(pStmt); + sqlite3_bind_null(pStmt, 2); } return rc; } @@ -160852,6 +162513,7 @@ static int fts3WriteSegdir( sqlite3_bind_blob(pStmt, 6, zRoot, nRoot, SQLITE_STATIC); sqlite3_step(pStmt); rc = sqlite3_reset(pStmt); + sqlite3_bind_null(pStmt, 6); } return rc; } @@ -162331,6 +163993,7 @@ static void fts3UpdateDocTotals( sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, SQLITE_STATIC); sqlite3_step(pStmt); *pRC = sqlite3_reset(pStmt); + sqlite3_bind_null(pStmt, 2); sqlite3_free(a); } @@ -163519,6 +165182,7 @@ static int fts3TruncateSegment( sqlite3_bind_int(pChomp, 4, iIdx); sqlite3_step(pChomp); rc = sqlite3_reset(pChomp); + sqlite3_bind_null(pChomp, 2); } } @@ -163598,6 +165262,7 @@ static int fts3IncrmergeHintStore(Fts3Table *p, Blob *pHint){ sqlite3_bind_blob(pReplace, 2, pHint->a, pHint->n, SQLITE_STATIC); sqlite3_step(pReplace); rc = sqlite3_reset(pReplace); + sqlite3_bind_null(pReplace, 2); } return rc; @@ -164412,7 +166077,6 @@ SQLITE_PRIVATE int sqlite3Fts3UpdateMethod( ){ Fts3Table *p = (Fts3Table *)pVtab; int rc = SQLITE_OK; /* Return Code */ - int isRemove = 0; /* True for an UPDATE or DELETE */ u32 *aSzIns = 0; /* Sizes of inserted documents */ u32 *aSzDel = 0; /* Sizes of deleted documents */ int nChng = 0; /* Net change in number of documents */ @@ -164510,7 +166174,6 @@ SQLITE_PRIVATE int sqlite3Fts3UpdateMethod( if( sqlite3_value_type(apVal[0])!=SQLITE_NULL ){ assert( sqlite3_value_type(apVal[0])==SQLITE_INTEGER ); rc = fts3DeleteByRowid(p, apVal[0], &nChng, aSzDel); - isRemove = 1; } /* If this is an INSERT or UPDATE operation, insert the new record. */ @@ -164522,7 +166185,7 @@ SQLITE_PRIVATE int sqlite3Fts3UpdateMethod( rc = FTS_CORRUPT_VTAB; } } - if( rc==SQLITE_OK && (!isRemove || *pRowid!=p->iPrevDocid ) ){ + if( rc==SQLITE_OK ){ rc = fts3PendingTermsDocid(p, 0, iLangid, *pRowid); } if( rc==SQLITE_OK ){ @@ -167830,6 +169493,7 @@ static int nodeWrite(Rtree *pRtree, RtreeNode *pNode){ sqlite3_step(p); pNode->isDirty = 0; rc = sqlite3_reset(p); + sqlite3_bind_null(p, 2); if( pNode->iNode==0 && rc==SQLITE_OK ){ pNode->iNode = sqlite3_last_insert_rowid(pRtree->db); nodeHashInsert(pRtree, pNode); @@ -174517,7 +176181,7 @@ static void rbuCreateImposterTable2(sqlite3rbu *p, RbuObjIter *pIter){ int iCid = sqlite3_column_int(pXInfo, 1); int bDesc = sqlite3_column_int(pXInfo, 3); const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4); - zCols = rbuMPrintf(p, "%z%sc%d %s COLLATE %s", zCols, zComma, + zCols = rbuMPrintf(p, "%z%sc%d %s COLLATE %Q", zCols, zComma, iCid, pIter->azTblType[iCid], zCollate ); zPk = rbuMPrintf(p, "%z%sc%d%s", zPk, zComma, iCid, bDesc?" DESC":""); @@ -174578,7 +176242,7 @@ static void rbuCreateImposterTable(sqlite3rbu *p, RbuObjIter *pIter){ ** "PRIMARY KEY" to the imposter table column declaration. */ zPk = "PRIMARY KEY "; } - zSql = rbuMPrintf(p, "%z%s\"%w\" %s %sCOLLATE %s%s", + zSql = rbuMPrintf(p, "%z%s\"%w\" %s %sCOLLATE %Q%s", zSql, zComma, zCol, pIter->azTblType[iCol], zPk, zColl, (pIter->abNotNull[iCol] ? " NOT NULL" : "") ); @@ -178012,7 +179676,7 @@ static void statSizeAndOffset(StatCursor *pCsr){ */ fd = sqlite3PagerFile(pPager); x[0] = pCsr->iPageno; - if( fd->pMethods!=0 && sqlite3OsFileControl(fd, 230440, &x)==SQLITE_OK ){ + if( sqlite3OsFileControl(fd, 230440, &x)==SQLITE_OK ){ pCsr->iOffset = x[0]; pCsr->szPage = (int)x[1]; } @@ -178942,8 +180606,8 @@ struct SessionTable { ** statement. ** ** For a DELETE change, all fields within the record except those associated -** with PRIMARY KEY columns are set to "undefined". The PRIMARY KEY fields -** contain the values identifying the row to delete. +** with PRIMARY KEY columns are omitted. The PRIMARY KEY fields contain the +** values identifying the row to delete. ** ** For an UPDATE change, all fields except those associated with PRIMARY KEY ** columns and columns that are modified by the UPDATE are set to "undefined". @@ -179226,7 +180890,7 @@ static int sessionPreupdateHash( static int sessionSerialLen(u8 *a){ int e = *a; int n; - if( e==0 ) return 1; + if( e==0 || e==0xFF ) return 1; if( e==SQLITE_NULL ) return 1; if( e==SQLITE_INTEGER || e==SQLITE_FLOAT ) return 9; return sessionVarintGet(&a[1], &n) + 1 + n; @@ -179306,7 +180970,7 @@ static int sessionChangeEqual( int n1 = sessionSerialLen(a1); int n2 = sessionSerialLen(a2); - if( pTab->abPK[iCol] && (n1!=n2 || memcmp(a1, a2, n1)) ){ + if( n1!=n2 || memcmp(a1, a2, n1) ){ return 0; } a1 += n1; @@ -179549,7 +181213,7 @@ static int sessionPreupdateEqual( }else{ z = sqlite3_value_blob(pVal); } - if( memcmp(a, z, n) ) return 0; + if( n>0 && memcmp(a, z, n) ) return 0; a += n; } } @@ -179825,7 +181489,7 @@ static void sessionPreupdateOneChange( int iHash; int bNull = 0; int rc = SQLITE_OK; - SessionStat1Ctx stat1; + SessionStat1Ctx stat1 = {0}; if( pSession->rc ) return; @@ -180893,6 +182557,7 @@ static int sessionSelectStmt( "SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND " "idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)", zDb ); + if( zSql==0 ) rc = SQLITE_NOMEM; }else{ int i; const char *zSep = ""; @@ -181430,13 +183095,16 @@ static int sessionReadRecord( if( abPK && abPK[i]==0 ) continue; rc = sessionInputBuffer(pIn, 9); if( rc==SQLITE_OK ){ - eType = pIn->aData[pIn->iNext++]; - } - - assert( apOut[i]==0 ); - if( eType ){ - apOut[i] = sqlite3ValueNew(0); - if( !apOut[i] ) rc = SQLITE_NOMEM; + if( pIn->iNext>=pIn->nData ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + eType = pIn->aData[pIn->iNext++]; + assert( apOut[i]==0 ); + if( eType ){ + apOut[i] = sqlite3ValueNew(0); + if( !apOut[i] ) rc = SQLITE_NOMEM; + } + } } if( rc==SQLITE_OK ){ @@ -181446,10 +183114,14 @@ static int sessionReadRecord( pIn->iNext += sessionVarintGet(aVal, &nByte); rc = sessionInputBuffer(pIn, nByte); if( rc==SQLITE_OK ){ - u8 enc = (eType==SQLITE_TEXT ? SQLITE_UTF8 : 0); - rc = sessionValueSetStr(apOut[i],&pIn->aData[pIn->iNext],nByte,enc); + if( nByte<0 || nByte>pIn->nData-pIn->iNext ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + u8 enc = (eType==SQLITE_TEXT ? SQLITE_UTF8 : 0); + rc = sessionValueSetStr(apOut[i],&pIn->aData[pIn->iNext],nByte,enc); + pIn->iNext += nByte; + } } - pIn->iNext += nByte; } if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ sqlite3_int64 v = sessionGetI64(aVal); @@ -181489,8 +183161,19 @@ static int sessionChangesetBufferTblhdr(SessionInput *pIn, int *pnByte){ rc = sessionInputBuffer(pIn, 9); if( rc==SQLITE_OK ){ nRead += sessionVarintGet(&pIn->aData[pIn->iNext + nRead], &nCol); - rc = sessionInputBuffer(pIn, nRead+nCol+100); - nRead += nCol; + /* The hard upper limit for the number of columns in an SQLite + ** database table is, according to sqliteLimit.h, 32676. So + ** consider any table-header that purports to have more than 65536 + ** columns to be corrupt. This is convenient because otherwise, + ** if the (nCol>65536) condition below were omitted, a sufficiently + ** large value for nCol may cause nRead to wrap around and become + ** negative. Leading to a crash. */ + if( nCol<0 || nCol>65536 ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + rc = sessionInputBuffer(pIn, nRead+nCol+100); + nRead += nCol; + } } while( rc==SQLITE_OK ){ @@ -181567,11 +183250,15 @@ static int sessionChangesetReadTblhdr(sqlite3_changeset_iter *p){ int nByte; int nVarint; nVarint = sessionVarintGet(&p->in.aData[p->in.iNext], &p->nCol); - nCopy -= nVarint; - p->in.iNext += nVarint; - nByte = p->nCol * sizeof(sqlite3_value*) * 2 + nCopy; - p->tblhdr.nBuf = 0; - sessionBufferGrow(&p->tblhdr, nByte, &rc); + if( p->nCol>0 ){ + nCopy -= nVarint; + p->in.iNext += nVarint; + nByte = p->nCol * sizeof(sqlite3_value*) * 2 + nCopy; + p->tblhdr.nBuf = 0; + sessionBufferGrow(&p->tblhdr, nByte, &rc); + }else{ + rc = SQLITE_CORRUPT_BKPT; + } } if( rc==SQLITE_OK ){ @@ -181606,7 +183293,8 @@ static int sessionChangesetReadTblhdr(sqlite3_changeset_iter *p){ static int sessionChangesetNext( sqlite3_changeset_iter *p, /* Changeset iterator */ u8 **paRec, /* If non-NULL, store record pointer here */ - int *pnRec /* If non-NULL, store size of record here */ + int *pnRec, /* If non-NULL, store size of record here */ + int *pbNew /* If non-NULL, true if new table */ ){ int i; u8 op; @@ -181641,6 +183329,7 @@ static int sessionChangesetNext( op = p->in.aData[p->in.iNext++]; while( op=='T' || op=='P' ){ + if( pbNew ) *pbNew = 1; p->bPatchset = (op=='P'); if( sessionChangesetReadTblhdr(p) ) return p->rc; if( (p->rc = sessionInputBuffer(&p->in, 2)) ) return p->rc; @@ -181649,6 +183338,13 @@ static int sessionChangesetNext( op = p->in.aData[p->in.iNext++]; } + if( p->zTab==0 ){ + /* The first record in the changeset is not a table header. Must be a + ** corrupt changeset. */ + assert( p->in.iNext==1 ); + return (p->rc = SQLITE_CORRUPT_BKPT); + } + p->op = op; p->bIndirect = p->in.aData[p->in.iNext++]; if( p->op!=SQLITE_UPDATE && p->op!=SQLITE_DELETE && p->op!=SQLITE_INSERT ){ @@ -181691,9 +183387,9 @@ static int sessionChangesetNext( ** new.* to old.*, to accommodate the code that reads these arrays. */ for(i=0; inCol; i++){ assert( p->apValue[i]==0 ); - assert( p->abPK[i]==0 || p->apValue[i+p->nCol] ); if( p->abPK[i] ){ p->apValue[i] = p->apValue[i+p->nCol]; + if( p->apValue[i]==0 ) return (p->rc = SQLITE_CORRUPT_BKPT); p->apValue[i+p->nCol] = 0; } } @@ -181712,7 +183408,7 @@ static int sessionChangesetNext( ** callback by changeset_apply(). */ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *p){ - return sessionChangesetNext(p, 0, 0); + return sessionChangesetNext(p, 0, 0, 0); } /* @@ -182091,6 +183787,8 @@ struct SessionApplyCtx { int bStat1; /* True if table is sqlite_stat1 */ int bDeferConstraints; /* True to defer constraints */ SessionBuffer constraints; /* Deferred constraints are stored here */ + SessionBuffer rebase; /* Rebase information (if any) here */ + int bRebaseStarted; /* If table header is already in rebase */ }; /* @@ -182357,7 +184055,6 @@ static int sessionStat1Sql(sqlite3 *db, SessionApplyCtx *p){ "AND (?4 OR stat IS ?3)" ); } - assert( rc==SQLITE_OK ); return rc; } @@ -182418,7 +184115,13 @@ static int sessionBindRow( if( !abPK || abPK[i] ){ sqlite3_value *pVal; (void)xValue(pIter, i, &pVal); - rc = sessionBindValue(pStmt, i+1, pVal); + if( pVal==0 ){ + /* The value in the changeset was "undefined". This indicates a + ** corrupt changeset blob. */ + rc = SQLITE_CORRUPT_BKPT; + }else{ + rc = sessionBindValue(pStmt, i+1, pVal); + } } } return rc; @@ -182466,6 +184169,54 @@ static int sessionSeekToRow( return rc; } +/* +** This function is called from within sqlite3changset_apply_v2() when +** a conflict is encountered and resolved using conflict resolution +** mode eType (either SQLITE_CHANGESET_OMIT or SQLITE_CHANGESET_REPLACE).. +** It adds a conflict resolution record to the buffer in +** SessionApplyCtx.rebase, which will eventually be returned to the caller +** of apply_v2() as the "rebase" buffer. +** +** Return SQLITE_OK if successful, or an SQLite error code otherwise. +*/ +static int sessionRebaseAdd( + SessionApplyCtx *p, /* Apply context */ + int eType, /* Conflict resolution (OMIT or REPLACE) */ + sqlite3_changeset_iter *pIter /* Iterator pointing at current change */ +){ + int rc = SQLITE_OK; + int i; + int eOp = pIter->op; + if( p->bRebaseStarted==0 ){ + /* Append a table-header to the rebase buffer */ + const char *zTab = pIter->zTab; + sessionAppendByte(&p->rebase, 'T', &rc); + sessionAppendVarint(&p->rebase, p->nCol, &rc); + sessionAppendBlob(&p->rebase, p->abPK, p->nCol, &rc); + sessionAppendBlob(&p->rebase, (u8*)zTab, (int)strlen(zTab)+1, &rc); + p->bRebaseStarted = 1; + } + + assert( eType==SQLITE_CHANGESET_REPLACE||eType==SQLITE_CHANGESET_OMIT ); + assert( eOp==SQLITE_DELETE || eOp==SQLITE_INSERT || eOp==SQLITE_UPDATE ); + + sessionAppendByte(&p->rebase, + (eOp==SQLITE_DELETE ? SQLITE_DELETE : SQLITE_INSERT), &rc + ); + sessionAppendByte(&p->rebase, (eType==SQLITE_CHANGESET_REPLACE), &rc); + for(i=0; inCol; i++){ + sqlite3_value *pVal = 0; + if( eOp==SQLITE_DELETE || (eOp==SQLITE_UPDATE && p->abPK[i]) ){ + sqlite3changeset_old(pIter, i, &pVal); + }else{ + sqlite3changeset_new(pIter, i, &pVal); + } + sessionAppendValue(&p->rebase, pVal, &rc); + } + + return rc; +} + /* ** Invoke the conflict handler for the change that the changeset iterator ** currently points to. @@ -182541,7 +184292,7 @@ static int sessionConflictHandler( u8 *aBlob = &pIter->in.aData[pIter->in.iCurrent]; int nBlob = pIter->in.iNext - pIter->in.iCurrent; sessionAppendBlob(&p->constraints, aBlob, nBlob, &rc); - res = SQLITE_CHANGESET_OMIT; + return SQLITE_OK; }else{ /* No other row with the new.* primary key. */ res = xConflict(pCtx, eType+1, pIter); @@ -182567,6 +184318,9 @@ static int sessionConflictHandler( rc = SQLITE_MISUSE; break; } + if( rc==SQLITE_OK ){ + rc = sessionRebaseAdd(p, res, pIter); + } } return rc; @@ -182742,42 +184496,42 @@ static int sessionApplyOneWithRetry( int rc; rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, &bReplace, &bRetry); - assert( rc==SQLITE_OK || (bRetry==0 && bReplace==0) ); - - /* If the bRetry flag is set, the change has not been applied due to an - ** SQLITE_CHANGESET_DATA problem (i.e. this is an UPDATE or DELETE and - ** a row with the correct PK is present in the db, but one or more other - ** fields do not contain the expected values) and the conflict handler - ** returned SQLITE_CHANGESET_REPLACE. In this case retry the operation, - ** but pass NULL as the final argument so that sessionApplyOneOp() ignores - ** the SQLITE_CHANGESET_DATA problem. */ - if( bRetry ){ - assert( pIter->op==SQLITE_UPDATE || pIter->op==SQLITE_DELETE ); - rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, 0, 0); - } - - /* If the bReplace flag is set, the change is an INSERT that has not - ** been performed because the database already contains a row with the - ** specified primary key and the conflict handler returned - ** SQLITE_CHANGESET_REPLACE. In this case remove the conflicting row - ** before reattempting the INSERT. */ - else if( bReplace ){ - assert( pIter->op==SQLITE_INSERT ); - rc = sqlite3_exec(db, "SAVEPOINT replace_op", 0, 0, 0); - if( rc==SQLITE_OK ){ - rc = sessionBindRow(pIter, - sqlite3changeset_new, pApply->nCol, pApply->abPK, pApply->pDelete); - sqlite3_bind_int(pApply->pDelete, pApply->nCol+1, 1); - } - if( rc==SQLITE_OK ){ - sqlite3_step(pApply->pDelete); - rc = sqlite3_reset(pApply->pDelete); - } - if( rc==SQLITE_OK ){ + if( rc==SQLITE_OK ){ + /* If the bRetry flag is set, the change has not been applied due to an + ** SQLITE_CHANGESET_DATA problem (i.e. this is an UPDATE or DELETE and + ** a row with the correct PK is present in the db, but one or more other + ** fields do not contain the expected values) and the conflict handler + ** returned SQLITE_CHANGESET_REPLACE. In this case retry the operation, + ** but pass NULL as the final argument so that sessionApplyOneOp() ignores + ** the SQLITE_CHANGESET_DATA problem. */ + if( bRetry ){ + assert( pIter->op==SQLITE_UPDATE || pIter->op==SQLITE_DELETE ); rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, 0, 0); } - if( rc==SQLITE_OK ){ - rc = sqlite3_exec(db, "RELEASE replace_op", 0, 0, 0); + + /* If the bReplace flag is set, the change is an INSERT that has not + ** been performed because the database already contains a row with the + ** specified primary key and the conflict handler returned + ** SQLITE_CHANGESET_REPLACE. In this case remove the conflicting row + ** before reattempting the INSERT. */ + else if( bReplace ){ + assert( pIter->op==SQLITE_INSERT ); + rc = sqlite3_exec(db, "SAVEPOINT replace_op", 0, 0, 0); + if( rc==SQLITE_OK ){ + rc = sessionBindRow(pIter, + sqlite3changeset_new, pApply->nCol, pApply->abPK, pApply->pDelete); + sqlite3_bind_int(pApply->pDelete, pApply->nCol+1, 1); + } + if( rc==SQLITE_OK ){ + sqlite3_step(pApply->pDelete); + rc = sqlite3_reset(pApply->pDelete); + } + if( rc==SQLITE_OK ){ + rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, 0, 0); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_exec(db, "RELEASE replace_op", 0, 0, 0); + } } } @@ -182853,7 +184607,8 @@ static int sessionChangesetApply( int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ sqlite3_changeset_iter *p /* Handle describing change and conflict */ ), - void *pCtx /* First argument passed to xConflict */ + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase /* OUT: Rebase information */ ){ int schemaMismatch = 0; int rc; /* Return code */ @@ -182891,9 +184646,18 @@ static int sessionChangesetApply( sqlite3_finalize(sApply.pUpdate); sqlite3_finalize(sApply.pInsert); sqlite3_finalize(sApply.pSelect); - memset(&sApply, 0, sizeof(sApply)); sApply.db = db; + sApply.pDelete = 0; + sApply.pUpdate = 0; + sApply.pInsert = 0; + sApply.pSelect = 0; + sApply.nCol = 0; + sApply.azCol = 0; + sApply.abPK = 0; + sApply.bStat1 = 0; sApply.bDeferConstraints = 1; + sApply.bRebaseStarted = 0; + memset(&sApply.constraints, 0, sizeof(SessionBuffer)); /* If an xFilter() callback was specified, invoke it now. If the ** xFilter callback returns zero, skip this table. If it returns @@ -183003,16 +184767,52 @@ static int sessionChangesetApply( sqlite3_exec(db, "RELEASE changeset_apply", 0, 0, 0); } + if( rc==SQLITE_OK && bPatchset==0 && ppRebase && pnRebase ){ + *ppRebase = (void*)sApply.rebase.aBuf; + *pnRebase = sApply.rebase.nBuf; + sApply.rebase.aBuf = 0; + } sqlite3_finalize(sApply.pInsert); sqlite3_finalize(sApply.pDelete); sqlite3_finalize(sApply.pUpdate); sqlite3_finalize(sApply.pSelect); sqlite3_free((char*)sApply.azCol); /* cast works around VC++ bug */ sqlite3_free((char*)sApply.constraints.aBuf); + sqlite3_free((char*)sApply.rebase.aBuf); sqlite3_mutex_leave(sqlite3_db_mutex(db)); return rc; } +/* +** Apply the changeset passed via pChangeset/nChangeset to the main +** database attached to handle "db". +*/ +SQLITE_API int sqlite3changeset_apply_v2( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase +){ + sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ + int rc = sqlite3changeset_start(&pIter, nChangeset, pChangeset); + if( rc==SQLITE_OK ){ + rc = sessionChangesetApply( + db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase + ); + } + return rc; +} + /* ** Apply the changeset passed via pChangeset/nChangeset to the main database ** attached to handle "db". Invoke the supplied conflict handler callback @@ -183033,12 +184833,9 @@ SQLITE_API int sqlite3changeset_apply( ), void *pCtx /* First argument passed to xConflict */ ){ - sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ - int rc = sqlite3changeset_start(&pIter, nChangeset, pChangeset); - if( rc==SQLITE_OK ){ - rc = sessionChangesetApply(db, pIter, xFilter, xConflict, pCtx); - } - return rc; + return sqlite3changeset_apply_v2( + db, nChangeset, pChangeset, xFilter, xConflict, pCtx, 0, 0 + ); } /* @@ -183046,6 +184843,31 @@ SQLITE_API int sqlite3changeset_apply( ** attached to handle "db". Invoke the supplied conflict handler callback ** to resolve any conflicts encountered while applying the change. */ +SQLITE_API int sqlite3changeset_apply_v2_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase +){ + sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ + int rc = sqlite3changeset_start_strm(&pIter, xInput, pIn); + if( rc==SQLITE_OK ){ + rc = sessionChangesetApply( + db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase + ); + } + return rc; +} SQLITE_API int sqlite3changeset_apply_strm( sqlite3 *db, /* Apply change to "main" db of this handle */ int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ @@ -183061,12 +184883,9 @@ SQLITE_API int sqlite3changeset_apply_strm( ), void *pCtx /* First argument passed to xConflict */ ){ - sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ - int rc = sqlite3changeset_start_strm(&pIter, xInput, pIn); - if( rc==SQLITE_OK ){ - rc = sessionChangesetApply(db, pIter, xFilter, xConflict, pCtx); - } - return rc; + return sqlite3changeset_apply_v2_strm( + db, xInput, pIn, xFilter, xConflict, pCtx, 0, 0 + ); } /* @@ -183085,6 +184904,7 @@ struct sqlite3_changegroup { */ static int sessionChangeMerge( SessionTable *pTab, /* Table structure */ + int bRebase, /* True for a rebase hash-table */ int bPatchset, /* True for patchsets */ SessionChange *pExist, /* Existing change */ int op2, /* Second change operation */ @@ -183094,6 +184914,7 @@ static int sessionChangeMerge( SessionChange **ppNew /* OUT: Merged change */ ){ SessionChange *pNew = 0; + int rc = SQLITE_OK; if( !pExist ){ pNew = (SessionChange *)sqlite3_malloc(sizeof(SessionChange) + nRec); @@ -183103,9 +184924,66 @@ static int sessionChangeMerge( memset(pNew, 0, sizeof(SessionChange)); pNew->op = op2; pNew->bIndirect = bIndirect; - pNew->nRecord = nRec; pNew->aRecord = (u8*)&pNew[1]; - memcpy(pNew->aRecord, aRec, nRec); + if( bIndirect==0 || bRebase==0 ){ + pNew->nRecord = nRec; + memcpy(pNew->aRecord, aRec, nRec); + }else{ + int i; + u8 *pIn = aRec; + u8 *pOut = pNew->aRecord; + for(i=0; inCol; i++){ + int nIn = sessionSerialLen(pIn); + if( *pIn==0 ){ + *pOut++ = 0; + }else if( pTab->abPK[i]==0 ){ + *pOut++ = 0xFF; + }else{ + memcpy(pOut, pIn, nIn); + pOut += nIn; + } + pIn += nIn; + } + pNew->nRecord = pOut - pNew->aRecord; + } + }else if( bRebase ){ + if( pExist->op==SQLITE_DELETE && pExist->bIndirect ){ + *ppNew = pExist; + }else{ + int nByte = nRec + pExist->nRecord + sizeof(SessionChange); + pNew = (SessionChange*)sqlite3_malloc(nByte); + if( pNew==0 ){ + rc = SQLITE_NOMEM; + }else{ + int i; + u8 *a1 = pExist->aRecord; + u8 *a2 = aRec; + u8 *pOut; + + memset(pNew, 0, nByte); + pNew->bIndirect = bIndirect || pExist->bIndirect; + pNew->op = op2; + pOut = pNew->aRecord = (u8*)&pNew[1]; + + for(i=0; inCol; i++){ + int n1 = sessionSerialLen(a1); + int n2 = sessionSerialLen(a2); + if( *a1==0xFF || (pTab->abPK[i]==0 && bIndirect) ){ + *pOut++ = 0xFF; + }else if( *a2==0 ){ + memcpy(pOut, a1, n1); + pOut += n1; + }else{ + memcpy(pOut, a2, n2); + pOut += n2; + } + a1 += n1; + a2 += n2; + } + pNew->nRecord = pOut - pNew->aRecord; + } + sqlite3_free(pExist); + } }else{ int op1 = pExist->op; @@ -183199,7 +185077,7 @@ static int sessionChangeMerge( } *ppNew = pNew; - return SQLITE_OK; + return rc; } /* @@ -183208,15 +185086,15 @@ static int sessionChangeMerge( */ static int sessionChangesetToHash( sqlite3_changeset_iter *pIter, /* Iterator to read from */ - sqlite3_changegroup *pGrp /* Changegroup object to add changeset to */ + sqlite3_changegroup *pGrp, /* Changegroup object to add changeset to */ + int bRebase /* True if hash table is for rebasing */ ){ u8 *aRec; int nRec; int rc = SQLITE_OK; SessionTable *pTab = 0; - - while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec) ){ + while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, 0) ){ const char *zNew; int nCol; int op; @@ -183296,7 +185174,7 @@ static int sessionChangesetToHash( } } - rc = sessionChangeMerge(pTab, + rc = sessionChangeMerge(pTab, bRebase, pIter->bPatchset, pExist, op, bIndirect, aRec, nRec, &pChange ); if( rc ) break; @@ -183404,7 +185282,7 @@ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup *pGrp, int nData, void rc = sqlite3changeset_start(&pIter, nData, pData); if( rc==SQLITE_OK ){ - rc = sessionChangesetToHash(pIter, pGrp); + rc = sessionChangesetToHash(pIter, pGrp, 0); } sqlite3changeset_finalize(pIter); return rc; @@ -183435,7 +185313,7 @@ SQLITE_API int sqlite3changegroup_add_strm( rc = sqlite3changeset_start_strm(&pIter, xInput, pIn); if( rc==SQLITE_OK ){ - rc = sessionChangesetToHash(pIter, pGrp); + rc = sessionChangesetToHash(pIter, pGrp, 0); } sqlite3changeset_finalize(pIter); return rc; @@ -183520,6 +185398,349 @@ SQLITE_API int sqlite3changeset_concat_strm( return rc; } +/* +** Changeset rebaser handle. +*/ +struct sqlite3_rebaser { + sqlite3_changegroup grp; /* Hash table */ +}; + +/* +** Buffers a1 and a2 must both contain a sessions module record nCol +** fields in size. This function appends an nCol sessions module +** record to buffer pBuf that is a copy of a1, except that for +** each field that is undefined in a1[], swap in the field from a2[]. +*/ +static void sessionAppendRecordMerge( + SessionBuffer *pBuf, /* Buffer to append to */ + int nCol, /* Number of columns in each record */ + u8 *a1, int n1, /* Record 1 */ + u8 *a2, int n2, /* Record 2 */ + int *pRc /* IN/OUT: error code */ +){ + sessionBufferGrow(pBuf, n1+n2, pRc); + if( *pRc==SQLITE_OK ){ + int i; + u8 *pOut = &pBuf->aBuf[pBuf->nBuf]; + for(i=0; inBuf = pOut-pBuf->aBuf; + assert( pBuf->nBuf<=pBuf->nAlloc ); + } +} + +/* +** This function is called when rebasing a local UPDATE change against one +** or more remote UPDATE changes. The aRec/nRec buffer contains the current +** old.* and new.* records for the change. The rebase buffer (a single +** record) is in aChange/nChange. The rebased change is appended to buffer +** pBuf. +** +** Rebasing the UPDATE involves: +** +** * Removing any changes to fields for which the corresponding field +** in the rebase buffer is set to "replaced" (type 0xFF). If this +** means the UPDATE change updates no fields, nothing is appended +** to the output buffer. +** +** * For each field modified by the local change for which the +** corresponding field in the rebase buffer is not "undefined" (0x00) +** or "replaced" (0xFF), the old.* value is replaced by the value +** in the rebase buffer. +*/ +static void sessionAppendPartialUpdate( + SessionBuffer *pBuf, /* Append record here */ + sqlite3_changeset_iter *pIter, /* Iterator pointed at local change */ + u8 *aRec, int nRec, /* Local change */ + u8 *aChange, int nChange, /* Record to rebase against */ + int *pRc /* IN/OUT: Return Code */ +){ + sessionBufferGrow(pBuf, 2+nRec+nChange, pRc); + if( *pRc==SQLITE_OK ){ + int bData = 0; + u8 *pOut = &pBuf->aBuf[pBuf->nBuf]; + int i; + u8 *a1 = aRec; + u8 *a2 = aChange; + + *pOut++ = SQLITE_UPDATE; + *pOut++ = pIter->bIndirect; + for(i=0; inCol; i++){ + int n1 = sessionSerialLen(a1); + int n2 = sessionSerialLen(a2); + if( pIter->abPK[i] || a2[0]==0 ){ + if( !pIter->abPK[i] ) bData = 1; + memcpy(pOut, a1, n1); + pOut += n1; + }else if( a2[0]!=0xFF ){ + bData = 1; + memcpy(pOut, a2, n2); + pOut += n2; + }else{ + *pOut++ = '\0'; + } + a1 += n1; + a2 += n2; + } + if( bData ){ + a2 = aChange; + for(i=0; inCol; i++){ + int n1 = sessionSerialLen(a1); + int n2 = sessionSerialLen(a2); + if( pIter->abPK[i] || a2[0]!=0xFF ){ + memcpy(pOut, a1, n1); + pOut += n1; + }else{ + *pOut++ = '\0'; + } + a1 += n1; + a2 += n2; + } + pBuf->nBuf = (pOut - pBuf->aBuf); + } + } +} + +/* +** pIter is configured to iterate through a changeset. This function rebases +** that changeset according to the current configuration of the rebaser +** object passed as the first argument. If no error occurs and argument xOutput +** is not NULL, then the changeset is returned to the caller by invoking +** xOutput zero or more times and SQLITE_OK returned. Or, if xOutput is NULL, +** then (*ppOut) is set to point to a buffer containing the rebased changeset +** before this function returns. In this case (*pnOut) is set to the size of +** the buffer in bytes. It is the responsibility of the caller to eventually +** free the (*ppOut) buffer using sqlite3_free(). +** +** If an error occurs, an SQLite error code is returned. If ppOut and +** pnOut are not NULL, then the two output parameters are set to 0 before +** returning. +*/ +static int sessionRebase( + sqlite3_rebaser *p, /* Rebaser hash table */ + sqlite3_changeset_iter *pIter, /* Input data */ + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut, /* Context for xOutput callback */ + int *pnOut, /* OUT: Number of bytes in output changeset */ + void **ppOut /* OUT: Inverse of pChangeset */ +){ + int rc = SQLITE_OK; + u8 *aRec = 0; + int nRec = 0; + int bNew = 0; + SessionTable *pTab = 0; + SessionBuffer sOut = {0,0,0}; + + while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, &bNew) ){ + SessionChange *pChange = 0; + int bDone = 0; + + if( bNew ){ + const char *zTab = pIter->zTab; + for(pTab=p->grp.pList; pTab; pTab=pTab->pNext){ + if( 0==sqlite3_stricmp(pTab->zName, zTab) ) break; + } + bNew = 0; + + /* A patchset may not be rebased */ + if( pIter->bPatchset ){ + rc = SQLITE_ERROR; + } + + /* Append a table header to the output for this new table */ + sessionAppendByte(&sOut, pIter->bPatchset ? 'P' : 'T', &rc); + sessionAppendVarint(&sOut, pIter->nCol, &rc); + sessionAppendBlob(&sOut, pIter->abPK, pIter->nCol, &rc); + sessionAppendBlob(&sOut,(u8*)pIter->zTab,(int)strlen(pIter->zTab)+1,&rc); + } + + if( pTab && rc==SQLITE_OK ){ + int iHash = sessionChangeHash(pTab, 0, aRec, pTab->nChange); + + for(pChange=pTab->apChange[iHash]; pChange; pChange=pChange->pNext){ + if( sessionChangeEqual(pTab, 0, aRec, 0, pChange->aRecord) ){ + break; + } + } + } + + if( pChange ){ + assert( pChange->op==SQLITE_DELETE || pChange->op==SQLITE_INSERT ); + switch( pIter->op ){ + case SQLITE_INSERT: + if( pChange->op==SQLITE_INSERT ){ + bDone = 1; + if( pChange->bIndirect==0 ){ + sessionAppendByte(&sOut, SQLITE_UPDATE, &rc); + sessionAppendByte(&sOut, pIter->bIndirect, &rc); + sessionAppendBlob(&sOut, pChange->aRecord, pChange->nRecord, &rc); + sessionAppendBlob(&sOut, aRec, nRec, &rc); + } + } + break; + + case SQLITE_UPDATE: + bDone = 1; + if( pChange->op==SQLITE_DELETE ){ + if( pChange->bIndirect==0 ){ + u8 *pCsr = aRec; + sessionSkipRecord(&pCsr, pIter->nCol); + sessionAppendByte(&sOut, SQLITE_INSERT, &rc); + sessionAppendByte(&sOut, pIter->bIndirect, &rc); + sessionAppendRecordMerge(&sOut, pIter->nCol, + pCsr, nRec-(pCsr-aRec), + pChange->aRecord, pChange->nRecord, &rc + ); + } + }else{ + sessionAppendPartialUpdate(&sOut, pIter, + aRec, nRec, pChange->aRecord, pChange->nRecord, &rc + ); + } + break; + + default: + assert( pIter->op==SQLITE_DELETE ); + bDone = 1; + if( pChange->op==SQLITE_INSERT ){ + sessionAppendByte(&sOut, SQLITE_DELETE, &rc); + sessionAppendByte(&sOut, pIter->bIndirect, &rc); + sessionAppendRecordMerge(&sOut, pIter->nCol, + pChange->aRecord, pChange->nRecord, aRec, nRec, &rc + ); + } + break; + } + } + + if( bDone==0 ){ + sessionAppendByte(&sOut, pIter->op, &rc); + sessionAppendByte(&sOut, pIter->bIndirect, &rc); + sessionAppendBlob(&sOut, aRec, nRec, &rc); + } + if( rc==SQLITE_OK && xOutput && sOut.nBuf>SESSIONS_STRM_CHUNK_SIZE ){ + rc = xOutput(pOut, sOut.aBuf, sOut.nBuf); + sOut.nBuf = 0; + } + if( rc ) break; + } + + if( rc!=SQLITE_OK ){ + sqlite3_free(sOut.aBuf); + memset(&sOut, 0, sizeof(sOut)); + } + + if( rc==SQLITE_OK ){ + if( xOutput ){ + if( sOut.nBuf>0 ){ + rc = xOutput(pOut, sOut.aBuf, sOut.nBuf); + } + }else{ + *ppOut = (void*)sOut.aBuf; + *pnOut = sOut.nBuf; + sOut.aBuf = 0; + } + } + sqlite3_free(sOut.aBuf); + return rc; +} + +/* +** Create a new rebaser object. +*/ +SQLITE_API int sqlite3rebaser_create(sqlite3_rebaser **ppNew){ + int rc = SQLITE_OK; + sqlite3_rebaser *pNew; + + pNew = sqlite3_malloc(sizeof(sqlite3_rebaser)); + if( pNew==0 ){ + rc = SQLITE_NOMEM; + }else{ + memset(pNew, 0, sizeof(sqlite3_rebaser)); + } + *ppNew = pNew; + return rc; +} + +/* +** Call this one or more times to configure a rebaser. +*/ +SQLITE_API int sqlite3rebaser_configure( + sqlite3_rebaser *p, + int nRebase, const void *pRebase +){ + sqlite3_changeset_iter *pIter = 0; /* Iterator opened on pData/nData */ + int rc; /* Return code */ + rc = sqlite3changeset_start(&pIter, nRebase, (void*)pRebase); + if( rc==SQLITE_OK ){ + rc = sessionChangesetToHash(pIter, &p->grp, 1); + } + sqlite3changeset_finalize(pIter); + return rc; +} + +/* +** Rebase a changeset according to current rebaser configuration +*/ +SQLITE_API int sqlite3rebaser_rebase( + sqlite3_rebaser *p, + int nIn, const void *pIn, + int *pnOut, void **ppOut +){ + sqlite3_changeset_iter *pIter = 0; /* Iterator to skip through input */ + int rc = sqlite3changeset_start(&pIter, nIn, (void*)pIn); + + if( rc==SQLITE_OK ){ + rc = sessionRebase(p, pIter, 0, 0, pnOut, ppOut); + sqlite3changeset_finalize(pIter); + } + + return rc; +} + +/* +** Rebase a changeset according to current rebaser configuration +*/ +SQLITE_API int sqlite3rebaser_rebase_strm( + sqlite3_rebaser *p, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +){ + sqlite3_changeset_iter *pIter = 0; /* Iterator to skip through input */ + int rc = sqlite3changeset_start_strm(&pIter, xInput, pIn); + + if( rc==SQLITE_OK ){ + rc = sessionRebase(p, pIter, xOutput, pOut, 0, 0); + sqlite3changeset_finalize(pIter); + } + + return rc; +} + +/* +** Destroy a rebaser object +*/ +SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p){ + if( p ){ + sessionDeleteTable(p->grp.pList); + sqlite3_free(p); + } +} + #endif /* SQLITE_ENABLE_SESSION && SQLITE_ENABLE_PREUPDATE_HOOK */ /************** End of sqlite3session.c **************************************/ @@ -188055,7 +190276,8 @@ static unsigned int fts5yy_find_shift_action( #endif do{ i = fts5yy_shift_ofst[stateno]; - assert( i>=0 && i+fts5YYNFTS5TOKEN<=sizeof(fts5yy_lookahead)/sizeof(fts5yy_lookahead[0]) ); + assert( i>=0 ); + assert( i+fts5YYNFTS5TOKEN<=(int)sizeof(fts5yy_lookahead)/sizeof(fts5yy_lookahead[0]) ); assert( iLookAhead!=fts5YYNOCODE ); assert( iLookAhead < fts5YYNFTS5TOKEN ); i += iLookAhead; @@ -192497,7 +194719,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( ** no token characters at all. (e.g ... MATCH '""'). */ sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, sizeof(Fts5ExprPhrase)); }else if( sCtx.pPhrase->nTerm ){ - sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = bPrefix; + sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = (u8)bPrefix; } pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase; } @@ -194960,6 +197182,7 @@ static void fts5DataWrite(Fts5Index *p, i64 iRowid, const u8 *pData, int nData){ sqlite3_bind_blob(p->pWriter, 2, pData, nData, SQLITE_STATIC); sqlite3_step(p->pWriter); p->rc = sqlite3_reset(p->pWriter); + sqlite3_bind_null(p->pWriter, 2); } /* @@ -196588,6 +198811,7 @@ static void fts5SegIterSeekInit( bDlidx = (val & 0x0001); } p->rc = sqlite3_reset(pIdxSelect); + sqlite3_bind_null(pIdxSelect, 2); if( iPgpgnoFirst ){ iPg = pSeg->pgnoFirst; @@ -197800,6 +200024,7 @@ static int fts5AllocateSegid(Fts5Index *p, Fts5Structure *pStruct){ sqlite3_bind_blob(pIdxSelect, 2, aBlob, 2, SQLITE_STATIC); assert( sqlite3_step(pIdxSelect)!=SQLITE_ROW ); p->rc = sqlite3_reset(pIdxSelect); + sqlite3_bind_null(pIdxSelect, 2); } } #endif @@ -197926,6 +200151,7 @@ static void fts5WriteFlushBtree(Fts5Index *p, Fts5SegWriter *pWriter){ sqlite3_bind_int64(p->pIdxWriter, 3, bFlag + ((i64)pWriter->iBtPage<<1)); sqlite3_step(p->pIdxWriter); p->rc = sqlite3_reset(p->pIdxWriter); + sqlite3_bind_null(p->pIdxWriter, 2); } pWriter->iBtPage = 0; } @@ -203333,7 +205559,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2018-01-22 18:45:57 0c55d179733b46d8d0ba4d88e01a25e10677046ee3da1d5b1581e86726f2171d", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2018-04-02 11:04:16 736b53f57f70b23172c30880186dce7ad9baa3b74e3838cae5847cffb98f5cd2", -1, SQLITE_TRANSIENT); } static int fts5Init(sqlite3 *db){ @@ -203909,6 +206135,7 @@ static int fts5StorageInsertDocsize( sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); sqlite3_step(pReplace); rc = sqlite3_reset(pReplace); + sqlite3_bind_null(pReplace, 2); } } return rc; @@ -204569,6 +206796,7 @@ static int sqlite3Fts5StorageConfigValue( } sqlite3_step(pReplace); rc = sqlite3_reset(pReplace); + sqlite3_bind_null(pReplace, 1); } if( rc==SQLITE_OK && pVal ){ int iNew = p->pConfig->iCookie + 1; @@ -207601,9 +209829,9 @@ SQLITE_API int sqlite3_stmt_init( #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB) */ /************** End of stmt.c ************************************************/ -#if __LINE__!=207604 +#if __LINE__!=209832 #undef SQLITE_SOURCE_ID -#define SQLITE_SOURCE_ID "2018-01-22 18:45:57 0c55d179733b46d8d0ba4d88e01a25e10677046ee3da1d5b1581e86726f2alt2" +#define SQLITE_SOURCE_ID "2018-04-02 11:04:16 736b53f57f70b23172c30880186dce7ad9baa3b74e3838cae5847cffb98falt2" #endif /* Return the source-id for this library */ SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } diff --git a/db/sqlite3/src/sqlite3.h b/db/sqlite3/src/sqlite3.h index d8138b709ae3..b501c4dbe923 100644 --- a/db/sqlite3/src/sqlite3.h +++ b/db/sqlite3/src/sqlite3.h @@ -123,9 +123,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.22.0" -#define SQLITE_VERSION_NUMBER 3022000 -#define SQLITE_SOURCE_ID "2018-01-22 18:45:57 0c55d179733b46d8d0ba4d88e01a25e10677046ee3da1d5b1581e86726f2171d" +#define SQLITE_VERSION "3.23.0" +#define SQLITE_VERSION_NUMBER 3023000 +#define SQLITE_SOURCE_ID "2018-04-02 11:04:16 736b53f57f70b23172c30880186dce7ad9baa3b74e3838cae5847cffb98f5cd2" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1064,6 +1064,12 @@ struct sqlite3_io_methods { ** so that all subsequent write operations are independent. ** ^SQLite will never invoke SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE without ** a prior successful call to [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE]. +** +**
  • [[SQLITE_FCNTL_LOCK_TIMEOUT]] +** The [SQLITE_FCNTL_LOCK_TIMEOUT] opcode causes attempts to obtain +** a file lock using the xLock or xShmLock methods of the VFS to wait +** for up to M milliseconds before failing, where M is the single +** unsigned integer parameter. ** */ #define SQLITE_FCNTL_LOCKSTATE 1 @@ -1098,6 +1104,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_BEGIN_ATOMIC_WRITE 31 #define SQLITE_FCNTL_COMMIT_ATOMIC_WRITE 32 #define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33 +#define SQLITE_FCNTL_LOCK_TIMEOUT 34 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -2054,11 +2061,13 @@ struct sqlite3_mem_methods { ** connections at all to the database. If so, it performs a checkpoint ** operation before closing the connection. This option may be used to ** override this behaviour. The first parameter passed to this operation -** is an integer - non-zero to disable checkpoints-on-close, or zero (the -** default) to enable them. The second parameter is a pointer to an integer +** is an integer - positive to disable checkpoints-on-close, or zero (the +** default) to enable them, and negative to leave the setting unchanged. +** The second parameter is a pointer to an integer ** into which is written 0 or 1 to indicate whether checkpoints-on-close ** have been disabled - 0 if they are not disabled, 1 if they are. **
  • +** **
    SQLITE_DBCONFIG_ENABLE_QPSG
    **
    ^(The SQLITE_DBCONFIG_ENABLE_QPSG option activates or deactivates ** the [query planner stability guarantee] (QPSG). When the QPSG is active, @@ -2068,13 +2077,20 @@ struct sqlite3_mem_methods { ** slower. But the QPSG has the advantage of more predictable behavior. With ** the QPSG active, SQLite will always use the same query plan in the field as ** was used during testing in the lab. +** The first argument to this setting is an integer which is 0 to disable +** the QPSG, positive to enable QPSG, or negative to leave the setting +** unchanged. The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether the QPSG is disabled or enabled +** following this call. **
    +** **
    SQLITE_DBCONFIG_TRIGGER_EQP
    **
    By default, the output of EXPLAIN QUERY PLAN commands does not ** include output for any operations performed by trigger programs. This ** option is used to set or clear (the default) a flag that governs this ** behavior. The first parameter passed to this operation is an integer - -** non-zero to enable output for trigger programs, or zero to disable it. +** positive to enable output for trigger programs, or zero to disable it, +** or negative to leave the setting unchanged. ** The second parameter is a pointer to an integer into which is written ** 0 or 1 to indicate whether output-for-triggers has been disabled - 0 if ** it is not disabled, 1 if it is. @@ -2496,16 +2512,16 @@ SQLITE_API void sqlite3_free_table(char **result); ** ** These routines are work-alikes of the "printf()" family of functions ** from the standard C library. -** These routines understand most of the common K&R formatting options, -** plus some additional non-standard formats, detailed below. -** Note that some of the more obscure formatting options from recent -** C-library standards are omitted from this implementation. +** These routines understand most of the common formatting options from +** the standard library printf() +** plus some additional non-standard formats ([%q], [%Q], [%w], and [%z]). +** See the [built-in printf()] documentation for details. ** ** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their -** results into memory obtained from [sqlite3_malloc()]. +** results into memory obtained from [sqlite3_malloc64()]. ** The strings returned by these two routines should be ** released by [sqlite3_free()]. ^Both routines return a -** NULL pointer if [sqlite3_malloc()] is unable to allocate enough +** NULL pointer if [sqlite3_malloc64()] is unable to allocate enough ** memory to hold the resulting string. ** ** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from @@ -2529,71 +2545,7 @@ SQLITE_API void sqlite3_free_table(char **result); ** ** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf(). ** -** These routines all implement some additional formatting -** options that are useful for constructing SQL statements. -** All of the usual printf() formatting options apply. In addition, there -** is are "%q", "%Q", "%w" and "%z" options. -** -** ^(The %q option works like %s in that it substitutes a nul-terminated -** string from the argument list. But %q also doubles every '\'' character. -** %q is designed for use inside a string literal.)^ By doubling each '\'' -** character it escapes that character and allows it to be inserted into -** the string. -** -** For example, assume the string variable zText contains text as follows: -** -**
    -**  char *zText = "It's a happy day!";
    -** 
    -** -** One can use this text in an SQL statement as follows: -** -**
    -**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
    -**  sqlite3_exec(db, zSQL, 0, 0, 0);
    -**  sqlite3_free(zSQL);
    -** 
    -** -** Because the %q format string is used, the '\'' character in zText -** is escaped and the SQL generated is as follows: -** -**
    -**  INSERT INTO table1 VALUES('It''s a happy day!')
    -** 
    -** -** This is correct. Had we used %s instead of %q, the generated SQL -** would have looked like this: -** -**
    -**  INSERT INTO table1 VALUES('It's a happy day!');
    -** 
    -** -** This second example is an SQL syntax error. As a general rule you should -** always use %q instead of %s when inserting text into a string literal. -** -** ^(The %Q option works like %q except it also adds single quotes around -** the outside of the total string. Additionally, if the parameter in the -** argument list is a NULL pointer, %Q substitutes the text "NULL" (without -** single quotes).)^ So, for example, one could say: -** -**
    -**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
    -**  sqlite3_exec(db, zSQL, 0, 0, 0);
    -**  sqlite3_free(zSQL);
    -** 
    -** -** The code above will render a correct SQL statement in the zSQL -** variable even if the zText variable is a NULL pointer. -** -** ^(The "%w" formatting option is like "%q" except that it expects to -** be contained within double-quotes instead of single quotes, and it -** escapes the double-quote character instead of the single-quote -** character.)^ The "%w" formatting option is intended for safely inserting -** table and column names into a constructed SQL statement. -** -** ^(The "%z" formatting option works like "%s" but with the -** addition that after the string has been read and copied into -** the result, [sqlite3_free()] is called on the input string.)^ +** See also: [built-in printf()], [printf() SQL function] */ SQLITE_API char *sqlite3_mprintf(const char*,...); SQLITE_API char *sqlite3_vmprintf(const char*, va_list); @@ -3659,13 +3611,13 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); ** or [GLOB] operator or if the parameter is compared to an indexed column ** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled. ** +** ** **

    ^sqlite3_prepare_v3() differs from sqlite3_prepare_v2() only in having ** the extra prepFlags parameter, which is a bit array consisting of zero or ** more of the [SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_*] flags. ^The ** sqlite3_prepare_v2() interface works exactly the same as ** sqlite3_prepare_v3() with a zero prepFlags parameter. -** */ SQLITE_API int sqlite3_prepare( sqlite3 *db, /* Database handle */ @@ -7294,6 +7246,15 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. **

    ** +** [[SQLITE_DBSTATUS_CACHE_SPILL]] ^(
    SQLITE_DBSTATUS_CACHE_SPILL
    +**
    This parameter returns the number of dirty cache entries that have +** been written to disk in the middle of a transaction due to the page +** cache overflowing. Transactions are more efficient if they are written +** to disk all at once. When pages spill mid-transaction, that introduces +** additional overhead. This parameter can be used help identify +** inefficiencies that can be resolve by increasing the cache size. +**
    +** ** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(
    SQLITE_DBSTATUS_DEFERRED_FKS
    **
    This parameter returns zero for the current value if and only if ** all foreign key constraints (deferred or immediate) have been @@ -7313,7 +7274,8 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r #define SQLITE_DBSTATUS_CACHE_WRITE 9 #define SQLITE_DBSTATUS_DEFERRED_FKS 10 #define SQLITE_DBSTATUS_CACHE_USED_SHARED 11 -#define SQLITE_DBSTATUS_MAX 11 /* Largest defined DBSTATUS */ +#define SQLITE_DBSTATUS_CACHE_SPILL 12 +#define SQLITE_DBSTATUS_MAX 12 /* Largest defined DBSTATUS */ /* @@ -8793,6 +8755,128 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( */ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); +/* +** CAPI3REF: Serialize a database +** +** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory +** that is a serialization of the S database on [database connection] D. +** If P is not a NULL pointer, then the size of the database in bytes +** is written into *P. +** +** For an ordinary on-disk database file, the serialization is just a +** copy of the disk file. For an in-memory database or a "TEMP" database, +** the serialization is the same sequence of bytes which would be written +** to disk if that database where backed up to disk. +** +** The usual case is that sqlite3_serialize() copies the serialization of +** the database into memory obtained from [sqlite3_malloc64()] and returns +** a pointer to that memory. The caller is responsible for freeing the +** returned value to avoid a memory leak. However, if the F argument +** contains the SQLITE_SERIALIZE_NOCOPY bit, then no memory allocations +** are made, and the sqlite3_serialize() function will return a pointer +** to the contiguous memory representation of the database that SQLite +** is currently using for that database, or NULL if the no such contiguous +** memory representation of the database exists. A contiguous memory +** representation of the database will usually only exist if there has +** been a prior call to [sqlite3_deserialize(D,S,...)] with the same +** values of D and S. +** The size of the database is written into *P even if the +** SQLITE_SERIALIZE_NOCOPY bit is set but no contigious copy +** of the database exists. +** +** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the +** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory +** allocation error occurs. +** +** This interface is only available if SQLite is compiled with the +** [SQLITE_ENABLE_DESERIALIZE] option. +*/ +SQLITE_API unsigned char *sqlite3_serialize( + sqlite3 *db, /* The database connection */ + const char *zSchema, /* Which DB to serialize. ex: "main", "temp", ... */ + sqlite3_int64 *piSize, /* Write size of the DB here, if not NULL */ + unsigned int mFlags /* Zero or more SQLITE_SERIALIZE_* flags */ +); + +/* +** CAPI3REF: Flags for sqlite3_serialize +** +** Zero or more of the following constants can be OR-ed together for +** the F argument to [sqlite3_serialize(D,S,P,F)]. +** +** SQLITE_SERIALIZE_NOCOPY means that [sqlite3_serialize()] will return +** a pointer to contiguous in-memory database that it is currently using, +** without making a copy of the database. If SQLite is not currently using +** a contiguous in-memory database, then this option causes +** [sqlite3_serialize()] to return a NULL pointer. SQLite will only be +** using a contiguous in-memory database if it has been initialized by a +** prior call to [sqlite3_deserialize()]. +*/ +#define SQLITE_SERIALIZE_NOCOPY 0x001 /* Do no memory allocations */ + +/* +** CAPI3REF: Deserialize a database +** +** The sqlite3_deserialize(D,S,P,N,M,F) interface causes the +** [database connection] D to disconnect from database S and then +** reopen S as an in-memory database based on the serialization contained +** in P. The serialized database P is N bytes in size. M is the size of +** the buffer P, which might be larger than N. If M is larger than N, and +** the SQLITE_DESERIALIZE_READONLY bit is not set in F, then SQLite is +** permitted to add content to the in-memory database as long as the total +** size does not exceed M bytes. +** +** If the SQLITE_DESERIALIZE_FREEONCLOSE bit is set in F, then SQLite will +** invoke sqlite3_free() on the serialization buffer when the database +** connection closes. If the SQLITE_DESERIALIZE_RESIZEABLE bit is set, then +** SQLite will try to increase the buffer size using sqlite3_realloc64() +** if writes on the database cause it to grow larger than M bytes. +** +** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the +** database is currently in a read transaction or is involved in a backup +** operation. +** +** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the +** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then +** [sqlite3_free()] is invoked on argument P prior to returning. +** +** This interface is only available if SQLite is compiled with the +** [SQLITE_ENABLE_DESERIALIZE] option. +*/ +SQLITE_API int sqlite3_deserialize( + sqlite3 *db, /* The database connection */ + const char *zSchema, /* Which DB to reopen with the deserialization */ + unsigned char *pData, /* The serialized database content */ + sqlite3_int64 szDb, /* Number bytes in the deserialization */ + sqlite3_int64 szBuf, /* Total size of buffer pData[] */ + unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */ +); + +/* +** CAPI3REF: Flags for sqlite3_deserialize() +** +** The following are allowed values for 6th argument (the F argument) to +** the [sqlite3_deserialize(D,S,P,N,M,F)] interface. +** +** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization +** in the P argument is held in memory obtained from [sqlite3_malloc64()] +** and that SQLite should take ownership of this memory and automatically +** free it when it has finished using it. Without this flag, the caller +** is resposible for freeing any dynamically allocated memory. +** +** The SQLITE_DESERIALIZE_RESIZEABLE flag means that SQLite is allowed to +** grow the size of the database using calls to [sqlite3_realloc64()]. This +** flag should only be used if SQLITE_DESERIALIZE_FREEONCLOSE is also used. +** Without this flag, the deserialized database cannot increase in size beyond +** the number of bytes specified by the M parameter. +** +** The SQLITE_DESERIALIZE_READONLY flag means that the deserialized database +** should be treated as read-only. +*/ +#define SQLITE_DESERIALIZE_FREEONCLOSE 1 /* Call sqlite3_free() on close */ +#define SQLITE_DESERIALIZE_RESIZEABLE 2 /* Resize using sqlite3_realloc64() */ +#define SQLITE_DESERIALIZE_READONLY 4 /* Database is read-only */ + /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. @@ -8940,16 +9024,23 @@ extern "C" { /* ** CAPI3REF: Session Object Handle +** +** An instance of this object is a [session] that can be used to +** record changes to a database. */ typedef struct sqlite3_session sqlite3_session; /* ** CAPI3REF: Changeset Iterator Handle +** +** An instance of this object acts as a cursor for iterating +** over the elements of a [changeset] or [patchset]. */ typedef struct sqlite3_changeset_iter sqlite3_changeset_iter; /* ** CAPI3REF: Create A New Session Object +** CONSTRUCTOR: sqlite3_session ** ** Create a new session object attached to database handle db. If successful, ** a pointer to the new object is written to *ppSession and SQLITE_OK is @@ -8986,6 +9077,7 @@ SQLITE_API int sqlite3session_create( /* ** CAPI3REF: Delete A Session Object +** DESTRUCTOR: sqlite3_session ** ** Delete a session object previously allocated using ** [sqlite3session_create()]. Once a session object has been deleted, the @@ -9001,6 +9093,7 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* ** CAPI3REF: Enable Or Disable A Session Object +** METHOD: sqlite3_session ** ** Enable or disable the recording of changes by a session object. When ** enabled, a session object records changes made to the database. When @@ -9020,6 +9113,7 @@ SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable); /* ** CAPI3REF: Set Or Clear the Indirect Change Flag +** METHOD: sqlite3_session ** ** Each change recorded by a session object is marked as either direct or ** indirect. A change is marked as indirect if either: @@ -9049,6 +9143,7 @@ SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect) /* ** CAPI3REF: Attach A Table To A Session Object +** METHOD: sqlite3_session ** ** If argument zTab is not NULL, then it is the name of a table to attach ** to the session object passed as the first argument. All subsequent changes @@ -9111,6 +9206,7 @@ SQLITE_API int sqlite3session_attach( /* ** CAPI3REF: Set a table filter on a Session Object. +** METHOD: sqlite3_session ** ** The second argument (xFilter) is the "filter callback". For changes to rows ** in tables that are not attached to the Session object, the filter is called @@ -9129,6 +9225,7 @@ SQLITE_API void sqlite3session_table_filter( /* ** CAPI3REF: Generate A Changeset From A Session Object +** METHOD: sqlite3_session ** ** Obtain a changeset containing changes to the tables attached to the ** session object passed as the first argument. If successful, @@ -9238,7 +9335,8 @@ SQLITE_API int sqlite3session_changeset( ); /* -** CAPI3REF: Load The Difference Between Tables Into A Session +** CAPI3REF: Load The Difference Between Tables Into A Session +** METHOD: sqlite3_session ** ** If it is not already attached to the session object passed as the first ** argument, this function attaches table zTbl in the same manner as the @@ -9303,6 +9401,7 @@ SQLITE_API int sqlite3session_diff( /* ** CAPI3REF: Generate A Patchset From A Session Object +** METHOD: sqlite3_session ** ** The differences between a patchset and a changeset are that: ** @@ -9354,6 +9453,7 @@ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); /* ** CAPI3REF: Create An Iterator To Traverse A Changeset +** CONSTRUCTOR: sqlite3_changeset_iter ** ** Create an iterator used to iterate through the contents of a changeset. ** If successful, *pp is set to point to the iterator handle and SQLITE_OK @@ -9394,6 +9494,7 @@ SQLITE_API int sqlite3changeset_start( /* ** CAPI3REF: Advance A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** This function may only be used with iterators created by function ** [sqlite3changeset_start()]. If it is called on an iterator passed to @@ -9418,6 +9519,7 @@ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); /* ** CAPI3REF: Obtain The Current Operation From A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** The pIter argument passed to this function may either be an iterator ** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator @@ -9452,6 +9554,7 @@ SQLITE_API int sqlite3changeset_op( /* ** CAPI3REF: Obtain The Primary Key Definition Of A Table +** METHOD: sqlite3_changeset_iter ** ** For each modified table, a changeset includes the following: ** @@ -9483,6 +9586,7 @@ SQLITE_API int sqlite3changeset_pk( /* ** CAPI3REF: Obtain old.* Values From A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** The pIter argument passed to this function may either be an iterator ** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator @@ -9513,6 +9617,7 @@ SQLITE_API int sqlite3changeset_old( /* ** CAPI3REF: Obtain new.* Values From A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** The pIter argument passed to this function may either be an iterator ** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator @@ -9546,6 +9651,7 @@ SQLITE_API int sqlite3changeset_new( /* ** CAPI3REF: Obtain Conflicting Row Values From A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** This function should only be used with iterator objects passed to a ** conflict-handler callback by [sqlite3changeset_apply()] with either @@ -9573,6 +9679,7 @@ SQLITE_API int sqlite3changeset_conflict( /* ** CAPI3REF: Determine The Number Of Foreign Key Constraint Violations +** METHOD: sqlite3_changeset_iter ** ** This function may only be called with an iterator passed to an ** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case @@ -9589,6 +9696,7 @@ SQLITE_API int sqlite3changeset_fk_conflicts( /* ** CAPI3REF: Finalize A Changeset Iterator +** METHOD: sqlite3_changeset_iter ** ** This function is used to finalize an iterator allocated with ** [sqlite3changeset_start()]. @@ -9605,6 +9713,7 @@ SQLITE_API int sqlite3changeset_fk_conflicts( ** to that error is returned by this function. Otherwise, SQLITE_OK is ** returned. This is to allow the following pattern (pseudo-code): ** +**
     **   sqlite3changeset_start();
     **   while( SQLITE_ROW==sqlite3changeset_next() ){
     **     // Do something with change.
    @@ -9613,6 +9722,7 @@ SQLITE_API int sqlite3changeset_fk_conflicts(
     **   if( rc!=SQLITE_OK ){
     **     // An error has occurred 
     **   }
    +** 
    */ SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); @@ -9660,6 +9770,7 @@ SQLITE_API int sqlite3changeset_invert( ** sqlite3_changegroup object. Calling it produces similar results as the ** following code fragment: ** +**
     **   sqlite3_changegroup *pGrp;
     **   rc = sqlite3_changegroup_new(&pGrp);
     **   if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nA, pA);
    @@ -9670,6 +9781,7 @@ SQLITE_API int sqlite3changeset_invert(
     **     *ppOut = 0;
     **     *pnOut = 0;
     **   }
    +** 
    ** ** Refer to the sqlite3_changegroup documentation below for details. */ @@ -9685,11 +9797,15 @@ SQLITE_API int sqlite3changeset_concat( /* ** CAPI3REF: Changegroup Handle +** +** A changegroup is an object used to combine two or more +** [changesets] or [patchsets] */ typedef struct sqlite3_changegroup sqlite3_changegroup; /* ** CAPI3REF: Create A New Changegroup Object +** CONSTRUCTOR: sqlite3_changegroup ** ** An sqlite3_changegroup object is used to combine two or more changesets ** (or patchsets) into a single changeset (or patchset). A single changegroup @@ -9727,6 +9843,7 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); /* ** CAPI3REF: Add A Changeset To A Changegroup +** METHOD: sqlite3_changegroup ** ** Add all changes within the changeset (or patchset) in buffer pData (size ** nData bytes) to the changegroup. @@ -9804,6 +9921,7 @@ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pDa /* ** CAPI3REF: Obtain A Composite Changeset From A Changegroup +** METHOD: sqlite3_changegroup ** ** Obtain a buffer containing a changeset (or patchset) representing the ** current contents of the changegroup. If the inputs to the changegroup @@ -9834,25 +9952,25 @@ SQLITE_API int sqlite3changegroup_output( /* ** CAPI3REF: Delete A Changegroup Object +** DESTRUCTOR: sqlite3_changegroup */ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); /* ** CAPI3REF: Apply A Changeset To A Database ** -** Apply a changeset to a database. This function attempts to update the -** "main" database attached to handle db with the changes found in the -** changeset passed via the second and third arguments. +** Apply a changeset or patchset to a database. These functions attempt to +** update the "main" database attached to handle db with the changes found in +** the changeset passed via the second and third arguments. ** -** The fourth argument (xFilter) passed to this function is the "filter +** The fourth argument (xFilter) passed to these functions is the "filter ** callback". If it is not NULL, then for each table affected by at least one ** change in the changeset, the filter callback is invoked with ** the table name as the second argument, and a copy of the context pointer -** passed as the sixth argument to this function as the first. If the "filter -** callback" returns zero, then no attempt is made to apply any changes to -** the table. Otherwise, if the return value is non-zero or the xFilter -** argument to this function is NULL, all changes related to the table are -** attempted. +** passed as the sixth argument as the first. If the "filter callback" +** returns zero, then no attempt is made to apply any changes to the table. +** Otherwise, if the return value is non-zero or the xFilter argument to +** is NULL, all changes related to the table are attempted. ** ** For each table that is not excluded by the filter callback, this function ** tests that the target database contains a compatible table. A table is @@ -9897,7 +10015,7 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** **
    **
    DELETE Changes
    -** For each DELETE change, this function checks if the target database +** For each DELETE change, the function checks if the target database ** contains a row with the same primary key value (or values) as the ** original row values stored in the changeset. If it does, and the values ** stored in all non-primary key columns also match the values stored in @@ -9942,7 +10060,7 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** [SQLITE_CHANGESET_REPLACE]. ** **
    UPDATE Changes
    -** For each UPDATE change, this function checks if the target database +** For each UPDATE change, the function checks if the target database ** contains a row with the same primary key value (or values) as the ** original row values stored in the changeset. If it does, and the values ** stored in all modified non-primary key columns also match the values @@ -9973,11 +10091,21 @@ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); ** This can be used to further customize the applications conflict ** resolution strategy. ** -** All changes made by this function are enclosed in a savepoint transaction. +** All changes made by these functions are enclosed in a savepoint transaction. ** If any other error (aside from a constraint failure when attempting to ** write to the target database) occurs, then the savepoint transaction is ** rolled back, restoring the target database to its original state, and an ** SQLite error code returned. +** +** If the output parameters (ppRebase) and (pnRebase) are non-NULL and +** the input is a changeset (not a patchset), then sqlite3changeset_apply_v2() +** may set (*ppRebase) to point to a "rebase" that may be used with the +** sqlite3_rebaser APIs buffer before returning. In this case (*pnRebase) +** is set to the size of the buffer in bytes. It is the responsibility of the +** caller to eventually free any such buffer using sqlite3_free(). The buffer +** is only allocated and populated if one or more conflicts were encountered +** while applying the patchset. See comments surrounding the sqlite3_rebaser +** APIs for further details. */ SQLITE_API int sqlite3changeset_apply( sqlite3 *db, /* Apply change to "main" db of this handle */ @@ -9994,6 +10122,22 @@ SQLITE_API int sqlite3changeset_apply( ), void *pCtx /* First argument passed to xConflict */ ); +SQLITE_API int sqlite3changeset_apply_v2( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase +); /* ** CAPI3REF: Constants Passed To The Conflict Handler @@ -10091,6 +10235,161 @@ SQLITE_API int sqlite3changeset_apply( #define SQLITE_CHANGESET_REPLACE 1 #define SQLITE_CHANGESET_ABORT 2 +/* +** CAPI3REF: Rebasing changesets +** EXPERIMENTAL +** +** Suppose there is a site hosting a database in state S0. And that +** modifications are made that move that database to state S1 and a +** changeset recorded (the "local" changeset). Then, a changeset based +** on S0 is received from another site (the "remote" changeset) and +** applied to the database. The database is then in state +** (S1+"remote"), where the exact state depends on any conflict +** resolution decisions (OMIT or REPLACE) made while applying "remote". +** Rebasing a changeset is to update it to take those conflict +** resolution decisions into account, so that the same conflicts +** do not have to be resolved elsewhere in the network. +** +** For example, if both the local and remote changesets contain an +** INSERT of the same key on "CREATE TABLE t1(a PRIMARY KEY, b)": +** +** local: INSERT INTO t1 VALUES(1, 'v1'); +** remote: INSERT INTO t1 VALUES(1, 'v2'); +** +** and the conflict resolution is REPLACE, then the INSERT change is +** removed from the local changeset (it was overridden). Or, if the +** conflict resolution was "OMIT", then the local changeset is modified +** to instead contain: +** +** UPDATE t1 SET b = 'v2' WHERE a=1; +** +** Changes within the local changeset are rebased as follows: +** +**
    +**
    Local INSERT
    +** This may only conflict with a remote INSERT. If the conflict +** resolution was OMIT, then add an UPDATE change to the rebased +** changeset. Or, if the conflict resolution was REPLACE, add +** nothing to the rebased changeset. +** +**
    Local DELETE
    +** This may conflict with a remote UPDATE or DELETE. In both cases the +** only possible resolution is OMIT. If the remote operation was a +** DELETE, then add no change to the rebased changeset. If the remote +** operation was an UPDATE, then the old.* fields of change are updated +** to reflect the new.* values in the UPDATE. +** +**
    Local UPDATE
    +** This may conflict with a remote UPDATE or DELETE. If it conflicts +** with a DELETE, and the conflict resolution was OMIT, then the update +** is changed into an INSERT. Any undefined values in the new.* record +** from the update change are filled in using the old.* values from +** the conflicting DELETE. Or, if the conflict resolution was REPLACE, +** the UPDATE change is simply omitted from the rebased changeset. +** +** If conflict is with a remote UPDATE and the resolution is OMIT, then +** the old.* values are rebased using the new.* values in the remote +** change. Or, if the resolution is REPLACE, then the change is copied +** into the rebased changeset with updates to columns also updated by +** the conflicting remote UPDATE removed. If this means no columns would +** be updated, the change is omitted. +**
    +** +** A local change may be rebased against multiple remote changes +** simultaneously. If a single key is modified by multiple remote +** changesets, they are combined as follows before the local changeset +** is rebased: +** +**
      +**
    • If there has been one or more REPLACE resolutions on a +** key, it is rebased according to a REPLACE. +** +**
    • If there have been no REPLACE resolutions on a key, then +** the local changeset is rebased according to the most recent +** of the OMIT resolutions. +**
    +** +** Note that conflict resolutions from multiple remote changesets are +** combined on a per-field basis, not per-row. This means that in the +** case of multiple remote UPDATE operations, some fields of a single +** local change may be rebased for REPLACE while others are rebased for +** OMIT. +** +** In order to rebase a local changeset, the remote changeset must first +** be applied to the local database using sqlite3changeset_apply_v2() and +** the buffer of rebase information captured. Then: +** +**
      +**
    1. An sqlite3_rebaser object is created by calling +** sqlite3rebaser_create(). +**
    2. The new object is configured with the rebase buffer obtained from +** sqlite3changeset_apply_v2() by calling sqlite3rebaser_configure(). +** If the local changeset is to be rebased against multiple remote +** changesets, then sqlite3rebaser_configure() should be called +** multiple times, in the same order that the multiple +** sqlite3changeset_apply_v2() calls were made. +**
    3. Each local changeset is rebased by calling sqlite3rebaser_rebase(). +**
    4. The sqlite3_rebaser object is deleted by calling +** sqlite3rebaser_delete(). +**
    +*/ +typedef struct sqlite3_rebaser sqlite3_rebaser; + +/* +** CAPI3REF: Create a changeset rebaser object. +** EXPERIMENTAL +** +** Allocate a new changeset rebaser object. If successful, set (*ppNew) to +** point to the new object and return SQLITE_OK. Otherwise, if an error +** occurs, return an SQLite error code (e.g. SQLITE_NOMEM) and set (*ppNew) +** to NULL. +*/ +SQLITE_API int sqlite3rebaser_create(sqlite3_rebaser **ppNew); + +/* +** CAPI3REF: Configure a changeset rebaser object. +** EXPERIMENTAL +** +** Configure the changeset rebaser object to rebase changesets according +** to the conflict resolutions described by buffer pRebase (size nRebase +** bytes), which must have been obtained from a previous call to +** sqlite3changeset_apply_v2(). +*/ +SQLITE_API int sqlite3rebaser_configure( + sqlite3_rebaser*, + int nRebase, const void *pRebase +); + +/* +** CAPI3REF: Rebase a changeset +** EXPERIMENTAL +** +** Argument pIn must point to a buffer containing a changeset nIn bytes +** in size. This function allocates and populates a buffer with a copy +** of the changeset rebased rebased according to the configuration of the +** rebaser object passed as the first argument. If successful, (*ppOut) +** is set to point to the new buffer containing the rebased changset and +** (*pnOut) to its size in bytes and SQLITE_OK returned. It is the +** responsibility of the caller to eventually free the new buffer using +** sqlite3_free(). Otherwise, if an error occurs, (*ppOut) and (*pnOut) +** are set to zero and an SQLite error code returned. +*/ +SQLITE_API int sqlite3rebaser_rebase( + sqlite3_rebaser*, + int nIn, const void *pIn, + int *pnOut, void **ppOut +); + +/* +** CAPI3REF: Delete a changeset rebaser object. +** EXPERIMENTAL +** +** Delete the changeset rebaser object and all associated resources. There +** should be one call to this function for each successful invocation +** of sqlite3rebaser_create(). +*/ +SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p); + /* ** CAPI3REF: Streaming Versions of API functions. ** @@ -10195,6 +10494,22 @@ SQLITE_API int sqlite3changeset_apply_strm( ), void *pCtx /* First argument passed to xConflict */ ); +SQLITE_API int sqlite3changeset_apply_v2_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase +); SQLITE_API int sqlite3changeset_concat_strm( int (*xInputA)(void *pIn, void *pData, int *pnData), void *pInA, @@ -10232,6 +10547,13 @@ SQLITE_API int sqlite3changegroup_output_strm(sqlite3_changegroup*, int (*xOutput)(void *pOut, const void *pData, int nData), void *pOut ); +SQLITE_API int sqlite3rebaser_rebase_strm( + sqlite3_rebaser *pRebaser, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); /* diff --git a/devtools/client/netmonitor/src/assets/styles/NetworkDetailsPanel.css b/devtools/client/netmonitor/src/assets/styles/NetworkDetailsPanel.css index 8285cc28484f..ffd358f29217 100644 --- a/devtools/client/netmonitor/src/assets/styles/NetworkDetailsPanel.css +++ b/devtools/client/netmonitor/src/assets/styles/NetworkDetailsPanel.css @@ -210,10 +210,6 @@ margin-inline-end: 6px; } -.network-monitor .headers-summary .requests-list-status-icon { - min-width: 10px; -} - .network-monitor .headers-summary .raw-headers-container { display: flex; width: 100%; diff --git a/devtools/client/netmonitor/src/components/HeadersPanel.js b/devtools/client/netmonitor/src/components/HeadersPanel.js index 59d41960686f..4a969d8b9f35 100644 --- a/devtools/client/netmonitor/src/components/HeadersPanel.js +++ b/devtools/client/netmonitor/src/components/HeadersPanel.js @@ -27,11 +27,11 @@ const { // Components const PropertiesView = createFactory(require("./PropertiesView")); +const StatusCode = createFactory(require("./StatusCode")); loader.lazyGetter(this, "MDNLink", function() { return createFactory(require("./MdnLink")); }); - loader.lazyGetter(this, "Rep", function() { return require("devtools/client/shared/components/reps/reps").REPS.Rep; }); @@ -180,6 +180,7 @@ class HeadersPanel extends Component { urlDetails, }, } = this.props; + let item = { fromCache, fromServiceWorker, status, statusText }; if ((!requestHeaders || !requestHeaders.headers.length) && (!uploadHeaders || !uploadHeaders.headers.length) && @@ -209,38 +210,17 @@ class HeadersPanel extends Component { let summaryStatus; if (status) { - let code; - if (fromCache) { - code = "cached"; - } else if (fromServiceWorker) { - code = "service worker"; - } else { - code = status; - } - let statusCodeDocURL = getHTTPStatusCodeURL(status.toString()); - let inputWidth = status.toString().length + statusText.length + 1; let toggleRawHeadersClassList = ["devtools-button", "raw-headers-button"]; if (this.state.rawHeadersOpened) { toggleRawHeadersClassList.push("checked"); } - summaryStatus = ( div({ className: "tabpanel-summary-container headers-summary" }, div({ className: "tabpanel-summary-label headers-summary-label", }, SUMMARY_STATUS), - div({ - className: "requests-list-status-icon", - "data-code": code, - }), - input({ - className: "tabpanel-summary-value textbox-input devtools-monospace" - + " status-text", - readOnly: true, - value: `${status} ${statusText}`, - size: `${inputWidth}`, - }), + StatusCode({ item }), statusCodeDocURL ? MDNLink({ url: statusCodeDocURL, }) : span({ diff --git a/devtools/client/netmonitor/src/components/RequestListColumnStatus.js b/devtools/client/netmonitor/src/components/RequestListColumnStatus.js index aec0d8a9743a..e1366249d87f 100644 --- a/devtools/client/netmonitor/src/components/RequestListColumnStatus.js +++ b/devtools/client/netmonitor/src/components/RequestListColumnStatus.js @@ -4,21 +4,16 @@ "use strict"; -const { Component } = require("devtools/client/shared/vendor/react"); +const { Component, createFactory } = require("devtools/client/shared/vendor/react"); const dom = require("devtools/client/shared/vendor/react-dom-factories"); const PropTypes = require("devtools/client/shared/vendor/react-prop-types"); -const { L10N } = require("../utils/l10n"); -const { propertiesEqual } = require("../utils/request-utils"); + +// Components + +const StatusCode = createFactory(require("./StatusCode")); const { div } = dom; -const UPDATED_STATUS_PROPS = [ - "fromCache", - "fromServiceWorker", - "status", - "statusText", -]; - class RequestListColumnStatus extends Component { static get propTypes() { return { @@ -26,68 +21,16 @@ class RequestListColumnStatus extends Component { }; } - shouldComponentUpdate(nextProps) { - return !propertiesEqual(UPDATED_STATUS_PROPS, this.props.item, nextProps.item); - } - render() { let { item } = this.props; - let { fromCache, fromServiceWorker, status, statusText } = item; - let code; - - if (status) { - if (fromCache) { - code = "cached"; - } else if (fromServiceWorker) { - code = "service worker"; - } else { - code = status; - } - } return ( div({ className: "requests-list-column requests-list-status", - onMouseOver: function({ target }) { - if (status && statusText && !target.title) { - target.title = getColumnTitle(item); - } - }, }, - - /* - `data-code` refers to the status-code - `data-status-code` can be one of "cached", "service worker" - or the status-code itself - For example - if a resource is cached, `data-code` would be 200 - and the `data-status-code` would be "cached" - */ - div({ - className: "requests-list-status-code status-code", - "data-status-code": code, - "data-code": status, - }, status) + StatusCode({ item }), )); } } -function getColumnTitle(item) { - let { fromCache, fromServiceWorker, status, statusText } = item; - let title; - if (fromCache && fromServiceWorker) { - title = L10N.getFormatStr("netmonitor.status.tooltip.cachedworker", - status, statusText); - } else if (fromCache) { - title = L10N.getFormatStr("netmonitor.status.tooltip.cached", - status, statusText); - } else if (fromServiceWorker) { - title = L10N.getFormatStr("netmonitor.status.tooltip.worker", - status, statusText); - } else { - title = L10N.getFormatStr("netmonitor.status.tooltip.simple", - status, statusText); - } - return title; -} - module.exports = RequestListColumnStatus; diff --git a/devtools/client/netmonitor/src/components/StatusCode.js b/devtools/client/netmonitor/src/components/StatusCode.js new file mode 100644 index 000000000000..1f07f742fdd6 --- /dev/null +++ b/devtools/client/netmonitor/src/components/StatusCode.js @@ -0,0 +1,92 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +"use strict"; + +const { Component } = require("devtools/client/shared/vendor/react"); +const dom = require("devtools/client/shared/vendor/react-dom-factories"); +const PropTypes = require("devtools/client/shared/vendor/react-prop-types"); +const { L10N } = require("../utils/l10n"); +const { propertiesEqual } = require("../utils/request-utils"); + +const { div } = dom; + +const UPDATED_STATUS_PROPS = [ + "fromCache", + "fromServiceWorker", + "status", + "statusText", +]; + +/** + * Status code component + * Displays HTTP status code icon + * Used in RequestListColumnStatus and HeadersPanel + */ +class StatusCode extends Component { + static get propTypes() { + return { + item: PropTypes.object.isRequired, + }; + } + + shouldComponentUpdate(nextProps) { + return !propertiesEqual(UPDATED_STATUS_PROPS, this.props.item, nextProps.item); + } + + render() { + let { item } = this.props; + let { fromCache, fromServiceWorker, status, statusText } = item; + let code; + + if (status) { + if (fromCache) { + code = "cached"; + } else if (fromServiceWorker) { + code = "service worker"; + } else { + code = status; + } + } + + // `data-code` refers to the status-code + // `data-status-code` can be one of "cached", "service worker" + // or the status-code itself + // For example - if a resource is cached, `data-code` would be 200 + // and the `data-status-code` would be "cached" + return ( + div({ + className: "requests-list-status-code status-code", + onMouseOver: function({ target }) { + if (status && statusText && !target.title) { + target.title = getStatusTooltip(item); + } + }, + "data-status-code": code, + "data-code": status, + }, status) + ); + } +} + +function getStatusTooltip(item) { + let { fromCache, fromServiceWorker, status, statusText } = item; + let title; + if (fromCache && fromServiceWorker) { + title = L10N.getFormatStr("netmonitor.status.tooltip.cachedworker", + status, statusText); + } else if (fromCache) { + title = L10N.getFormatStr("netmonitor.status.tooltip.cached", + status, statusText); + } else if (fromServiceWorker) { + title = L10N.getFormatStr("netmonitor.status.tooltip.worker", + status, statusText); + } else { + title = L10N.getFormatStr("netmonitor.status.tooltip.simple", + status, statusText); + } + return title; +} + +module.exports = StatusCode; diff --git a/devtools/client/netmonitor/src/components/moz.build b/devtools/client/netmonitor/src/components/moz.build index 329685db07fe..1b263c0697ba 100644 --- a/devtools/client/netmonitor/src/components/moz.build +++ b/devtools/client/netmonitor/src/components/moz.build @@ -41,6 +41,7 @@ DevToolsModules( 'StackTracePanel.js', 'StatisticsPanel.js', 'StatusBar.js', + 'StatusCode.js', 'TabboxPanel.js', 'TimingsPanel.js', 'Toolbar.js', diff --git a/devtools/client/netmonitor/test/browser_net_brotli.js b/devtools/client/netmonitor/test/browser_net_brotli.js index ebe86b1111c6..63a9a64fe96f 100644 --- a/devtools/client/netmonitor/test/browser_net_brotli.js +++ b/devtools/client/netmonitor/test/browser_net_brotli.js @@ -29,7 +29,7 @@ add_task(async function() { await performRequests(monitor, tab, BROTLI_REQUESTS); let requestItem = document.querySelector(".request-list-item"); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_cached-status.js b/devtools/client/netmonitor/test/browser_net_cached-status.js index 8904f275a8c7..dbb8f5b79ec3 100644 --- a/devtools/client/netmonitor/test/browser_net_cached-status.js +++ b/devtools/client/netmonitor/test/browser_net_cached-status.js @@ -98,7 +98,7 @@ add_task(async function() { for (let request of REQUEST_DATA) { let requestItem = document.querySelectorAll(".request-list-item")[index]; requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_content-type.js b/devtools/client/netmonitor/test/browser_net_content-type.js index a3026aa36af3..6f58becad513 100644 --- a/devtools/client/netmonitor/test/browser_net_content-type.js +++ b/devtools/client/netmonitor/test/browser_net_content-type.js @@ -25,7 +25,7 @@ add_task(async function() { await performRequests(monitor, tab, CONTENT_TYPE_WITHOUT_CACHE_REQUESTS); for (let requestItem of document.querySelectorAll(".request-list-item")) { - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); requestItem.scrollIntoView(); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_cyrillic-01.js b/devtools/client/netmonitor/test/browser_net_cyrillic-01.js index 10b1a4ae308d..4595e7e2d920 100644 --- a/devtools/client/netmonitor/test/browser_net_cyrillic-01.js +++ b/devtools/client/netmonitor/test/browser_net_cyrillic-01.js @@ -24,7 +24,7 @@ add_task(async function() { await performRequests(monitor, tab, 1); let requestItem = document.querySelectorAll(".request-list-item")[0]; - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); requestItem.scrollIntoView(); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_cyrillic-02.js b/devtools/client/netmonitor/test/browser_net_cyrillic-02.js index c3cebe5d1ccf..caa6a5979ebc 100644 --- a/devtools/client/netmonitor/test/browser_net_cyrillic-02.js +++ b/devtools/client/netmonitor/test/browser_net_cyrillic-02.js @@ -23,7 +23,7 @@ add_task(async function() { await wait; let requestItem = document.querySelectorAll(".request-list-item")[0]; - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); requestItem.scrollIntoView(); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_filter-01.js b/devtools/client/netmonitor/test/browser_net_filter-01.js index ca636a2944bd..a3286ac6b002 100644 --- a/devtools/client/netmonitor/test/browser_net_filter-01.js +++ b/devtools/client/netmonitor/test/browser_net_filter-01.js @@ -307,7 +307,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/browser_net_filter-02.js b/devtools/client/netmonitor/test/browser_net_filter-02.js index f79317aeb115..b4162c316357 100644 --- a/devtools/client/netmonitor/test/browser_net_filter-02.js +++ b/devtools/client/netmonitor/test/browser_net_filter-02.js @@ -209,7 +209,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/browser_net_filter-flags.js b/devtools/client/netmonitor/test/browser_net_filter-flags.js index b27658b6e30a..cc75973408fd 100644 --- a/devtools/client/netmonitor/test/browser_net_filter-flags.js +++ b/devtools/client/netmonitor/test/browser_net_filter-flags.js @@ -392,7 +392,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/browser_net_json-long.js b/devtools/client/netmonitor/test/browser_net_json-long.js index a467bb5a6522..4816c6b99f67 100644 --- a/devtools/client/netmonitor/test/browser_net_json-long.js +++ b/devtools/client/netmonitor/test/browser_net_json-long.js @@ -30,7 +30,7 @@ add_task(async function() { await performRequests(monitor, tab, 1); let requestItem = document.querySelector(".request-list-item"); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_json-malformed.js b/devtools/client/netmonitor/test/browser_net_json-malformed.js index f5aaae8c30d4..93f5241b1ba4 100644 --- a/devtools/client/netmonitor/test/browser_net_json-malformed.js +++ b/devtools/client/netmonitor/test/browser_net_json-malformed.js @@ -25,7 +25,7 @@ add_task(async function() { await performRequests(monitor, tab, 1); let requestItem = document.querySelector(".request-list-item"); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_json_custom_mime.js b/devtools/client/netmonitor/test/browser_net_json_custom_mime.js index d266f273cdba..e5c79cf63023 100644 --- a/devtools/client/netmonitor/test/browser_net_json_custom_mime.js +++ b/devtools/client/netmonitor/test/browser_net_json_custom_mime.js @@ -25,7 +25,7 @@ add_task(async function() { await performRequests(monitor, tab, 1); let requestItem = document.querySelector(".request-list-item"); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_json_text_mime.js b/devtools/client/netmonitor/test/browser_net_json_text_mime.js index d7fa390a3f57..28c3113cfba7 100644 --- a/devtools/client/netmonitor/test/browser_net_json_text_mime.js +++ b/devtools/client/netmonitor/test/browser_net_json_text_mime.js @@ -26,7 +26,7 @@ add_task(async function() { await performRequests(monitor, tab, 1); let requestItem = document.querySelector(".request-list-item"); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_jsonp.js b/devtools/client/netmonitor/test/browser_net_jsonp.js index 77009da78428..5fb4666961c4 100644 --- a/devtools/client/netmonitor/test/browser_net_jsonp.js +++ b/devtools/client/netmonitor/test/browser_net_jsonp.js @@ -28,7 +28,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/browser_net_large-response.js b/devtools/client/netmonitor/test/browser_net_large-response.js index f268eb00d4bb..1f8bf790b60c 100644 --- a/devtools/client/netmonitor/test/browser_net_large-response.js +++ b/devtools/client/netmonitor/test/browser_net_large-response.js @@ -34,7 +34,7 @@ add_task(async function() { let requestItem = document.querySelector(".request-list-item"); requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_post-data-01.js b/devtools/client/netmonitor/test/browser_net_post-data-01.js index fe7373948e89..cfb43cd21f62 100644 --- a/devtools/client/netmonitor/test/browser_net_post-data-01.js +++ b/devtools/client/netmonitor/test/browser_net_post-data-01.js @@ -31,7 +31,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/browser_net_req-resp-bodies.js b/devtools/client/netmonitor/test/browser_net_req-resp-bodies.js index 4b477caae9ef..eb6f26eeb7cf 100644 --- a/devtools/client/netmonitor/test/browser_net_req-resp-bodies.js +++ b/devtools/client/netmonitor/test/browser_net_req-resp-bodies.js @@ -53,7 +53,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/browser_net_service-worker-status.js b/devtools/client/netmonitor/test/browser_net_service-worker-status.js index 404a4e827932..26e92c6a3bf8 100644 --- a/devtools/client/netmonitor/test/browser_net_service-worker-status.js +++ b/devtools/client/netmonitor/test/browser_net_service-worker-status.js @@ -58,7 +58,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/browser_net_simple-request-data.js b/devtools/client/netmonitor/test/browser_net_simple-request-data.js index a6b43a098be2..0b4d4fc7c52e 100644 --- a/devtools/client/netmonitor/test/browser_net_simple-request-data.js +++ b/devtools/client/netmonitor/test/browser_net_simple-request-data.js @@ -224,7 +224,7 @@ function test() { let requestListItem = document.querySelector(".request-list-item"); requestListItem.scrollIntoView(); - let requestsListStatus = requestListItem.querySelector(".requests-list-status"); + let requestsListStatus = requestListItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); diff --git a/devtools/client/netmonitor/test/browser_net_sort-01.js b/devtools/client/netmonitor/test/browser_net_sort-01.js index c56029a621a2..e54a9bc34fa9 100644 --- a/devtools/client/netmonitor/test/browser_net_sort-01.js +++ b/devtools/client/netmonitor/test/browser_net_sort-01.js @@ -159,7 +159,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/browser_net_sort-02.js b/devtools/client/netmonitor/test/browser_net_sort-02.js index ce4e0bff8ea7..2820139c18c6 100644 --- a/devtools/client/netmonitor/test/browser_net_sort-02.js +++ b/devtools/client/netmonitor/test/browser_net_sort-02.js @@ -243,7 +243,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/browser_net_status-codes.js b/devtools/client/netmonitor/test/browser_net_status-codes.js index 1034f5afa636..40f21402a2a6 100644 --- a/devtools/client/netmonitor/test/browser_net_status-codes.js +++ b/devtools/client/netmonitor/test/browser_net_status-codes.js @@ -116,7 +116,7 @@ add_task(async function() { let requestListItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestListItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } @@ -175,13 +175,16 @@ add_task(async function() { let panel = document.querySelector("#headers-panel"); let summaryValues = panel.querySelectorAll(".tabpanel-summary-value.textbox-input"); let { method, correctUri, details: { status, statusText } } = data; + let statusCode = panel.querySelector(".status-code"); + EventUtils.sendMouseEvent({ type: "mouseover" }, statusCode); + await waitUntil(() => statusCode.title); is(summaryValues[0].value, correctUri, "The url summary value is incorrect."); is(summaryValues[1].value, method, "The method summary value is incorrect."); - is(panel.querySelector(".requests-list-status-icon").dataset.code, status, + is(statusCode.dataset.code, status, "The status summary code is incorrect."); - is(summaryValues[3].value, status + " " + statusText, + is(statusCode.getAttribute("title"), status + " " + statusText, "The status summary value is incorrect."); } diff --git a/devtools/client/netmonitor/test/browser_net_streaming-response.js b/devtools/client/netmonitor/test/browser_net_streaming-response.js index 021424a7afb7..5bd29f967944 100644 --- a/devtools/client/netmonitor/test/browser_net_streaming-response.js +++ b/devtools/client/netmonitor/test/browser_net_streaming-response.js @@ -38,7 +38,7 @@ add_task(async function() { let requestItems = document.querySelectorAll(".request-list-item"); for (let requestItem of requestItems) { requestItem.scrollIntoView(); - let requestsListStatus = requestItem.querySelector(".requests-list-status"); + let requestsListStatus = requestItem.querySelector(".status-code"); EventUtils.sendMouseEvent({ type: "mouseover" }, requestsListStatus); await waitUntil(() => requestsListStatus.title); } diff --git a/devtools/client/netmonitor/test/head.js b/devtools/client/netmonitor/test/head.js index 46515cb8150d..a8c80f8f678d 100644 --- a/devtools/client/netmonitor/test/head.js +++ b/devtools/client/netmonitor/test/head.js @@ -493,7 +493,8 @@ function verifyRequestItemTarget(document, requestList, requestItem, method, let value = target.querySelector(".requests-list-status-code") .getAttribute("data-status-code"); let codeValue = target.querySelector(".requests-list-status-code").textContent; - let tooltip = target.querySelector(".requests-list-status").getAttribute("title"); + let tooltip = target.querySelector(".requests-list-status-code") + .getAttribute("title"); info("Displayed status: " + value); info("Displayed code: " + codeValue); info("Tooltip status: " + tooltip); diff --git a/devtools/client/shared/source-map/index.js b/devtools/client/shared/source-map/index.js index 124057be099b..a4cea0a30e5b 100644 --- a/devtools/client/shared/source-map/index.js +++ b/devtools/client/shared/source-map/index.js @@ -11,650 +11,712 @@ return /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; - +/******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { - +/******/ /******/ // Check if module is in cache -/******/ if(installedModules[moduleId]) +/******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; - +/******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { -/******/ exports: {}, -/******/ id: moduleId, -/******/ loaded: false +/******/ i: moduleId, +/******/ l: false, +/******/ exports: {} /******/ }; - +/******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); - +/******/ /******/ // Flag the module as loaded -/******/ module.loaded = true; - +/******/ module.l = true; +/******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } - - +/******/ +/******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; - +/******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; - +/******/ +/******/ // define getter function for harmony exports +/******/ __webpack_require__.d = function(exports, name, getter) { +/******/ if(!__webpack_require__.o(exports, name)) { +/******/ Object.defineProperty(exports, name, { +/******/ configurable: false, +/******/ enumerable: true, +/******/ get: getter +/******/ }); +/******/ } +/******/ }; +/******/ +/******/ // getDefaultExport function for compatibility with non-harmony modules +/******/ __webpack_require__.n = function(module) { +/******/ var getter = module && module.__esModule ? +/******/ function getDefault() { return module['default']; } : +/******/ function getModuleExports() { return module; }; +/******/ __webpack_require__.d(getter, 'a', getter); +/******/ return getter; +/******/ }; +/******/ +/******/ // Object.prototype.hasOwnProperty.call +/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; +/******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; - +/******/ /******/ // Load entry module and return exports -/******/ return __webpack_require__(0); +/******/ return __webpack_require__(__webpack_require__.s = 15); /******/ }) /************************************************************************/ /******/ ([ /* 0 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports) { - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +var charenc = { + // UTF-8 encoding + utf8: { + // Convert a string to a byte array + stringToBytes: function(str) { + return charenc.bin.stringToBytes(unescape(encodeURIComponent(str))); + }, - const { - originalToGeneratedId, - generatedToOriginalId, - isGeneratedId, - isOriginalId - } = __webpack_require__(1); + // Convert a byte array to a string + bytesToString: function(bytes) { + return decodeURIComponent(escape(charenc.bin.bytesToString(bytes))); + } + }, - const { workerUtils: { WorkerDispatcher } } = __webpack_require__(6); + // Binary encoding + bin: { + // Convert a string to a byte array + stringToBytes: function(str) { + for (var bytes = [], i = 0; i < str.length; i++) + bytes.push(str.charCodeAt(i) & 0xFF); + return bytes; + }, - const dispatcher = new WorkerDispatcher(); + // Convert a byte array to a string + bytesToString: function(bytes) { + for (var str = [], i = 0; i < bytes.length; i++) + str.push(String.fromCharCode(bytes[i])); + return str.join(''); + } + } +}; - const getOriginalURLs = dispatcher.task("getOriginalURLs"); - const getGeneratedLocation = dispatcher.task("getGeneratedLocation"); - const getOriginalLocation = dispatcher.task("getOriginalLocation"); - const getLocationScopes = dispatcher.task("getLocationScopes"); - const getOriginalSourceText = dispatcher.task("getOriginalSourceText"); - const applySourceMap = dispatcher.task("applySourceMap"); - const clearSourceMaps = dispatcher.task("clearSourceMaps"); - const hasMappedSource = dispatcher.task("hasMappedSource"); +module.exports = charenc; - module.exports = { - originalToGeneratedId, - generatedToOriginalId, - isGeneratedId, - isOriginalId, - hasMappedSource, - getOriginalURLs, - getGeneratedLocation, - getOriginalLocation, - getLocationScopes, - getOriginalSourceText, - applySourceMap, - clearSourceMaps, - startSourceMapWorker: dispatcher.start.bind(dispatcher), - stopSourceMapWorker: dispatcher.stop.bind(dispatcher) - }; -/***/ }, +/***/ }), /* 1 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports, __webpack_require__) { - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - const md5 = __webpack_require__(2); +const networkRequest = __webpack_require__(7); +const workerUtils = __webpack_require__(8); - function originalToGeneratedId(originalId) { - const match = originalId.match(/(.*)\/originalSource/); - return match ? match[1] : ""; - } +module.exports = { + networkRequest, + workerUtils +}; - function generatedToOriginalId(generatedId, url) { - return `${generatedId}/originalSource-${md5(url)}`; - } - - function isOriginalId(id) { - return !!id.match(/\/originalSource/); - } - - function isGeneratedId(id) { - return !isOriginalId(id); - } - - /** - * Trims the query part or reference identifier of a URL string, if necessary. - */ - function trimUrlQuery(url) { - let length = url.length; - let q1 = url.indexOf("?"); - let q2 = url.indexOf("&"); - let q3 = url.indexOf("#"); - let q = Math.min(q1 != -1 ? q1 : length, q2 != -1 ? q2 : length, q3 != -1 ? q3 : length); - - return url.slice(0, q); - } - - // Map suffix to content type. - const contentMap = { - "js": "text/javascript", - "jsm": "text/javascript", - "ts": "text/typescript", - "tsx": "text/typescript-jsx", - "jsx": "text/jsx", - "coffee": "text/coffeescript", - "elm": "text/elm", - "cljs": "text/x-clojure" - }; - - /** - * Returns the content type for the specified URL. If no specific - * content type can be determined, "text/plain" is returned. - * - * @return String - * The content type. - */ - function getContentType(url) { - url = trimUrlQuery(url); - let dot = url.lastIndexOf("."); - if (dot >= 0) { - let name = url.substring(dot + 1); - if (name in contentMap) { - return contentMap[name]; - } - } - return "text/plain"; - } - - module.exports = { - originalToGeneratedId, - generatedToOriginalId, - isOriginalId, - isGeneratedId, - getContentType, - contentMapForTesting: contentMap - }; - -/***/ }, -/* 2 */ -/***/ function(module, exports, __webpack_require__) { - - (function () { - var crypt = __webpack_require__(3), - utf8 = __webpack_require__(4).utf8, - isBuffer = __webpack_require__(5), - bin = __webpack_require__(4).bin, - - - // The core - md5 = function (message, options) { - // Convert to byte array - if (message.constructor == String) { - if (options && options.encoding === 'binary') message = bin.stringToBytes(message);else message = utf8.stringToBytes(message); - } else if (isBuffer(message)) message = Array.prototype.slice.call(message, 0);else if (!Array.isArray(message)) message = message.toString(); - // else, assume byte array already - - var m = crypt.bytesToWords(message), - l = message.length * 8, - a = 1732584193, - b = -271733879, - c = -1732584194, - d = 271733878; - - // Swap endian - for (var i = 0; i < m.length; i++) { - m[i] = (m[i] << 8 | m[i] >>> 24) & 0x00FF00FF | (m[i] << 24 | m[i] >>> 8) & 0xFF00FF00; - } - - // Padding - m[l >>> 5] |= 0x80 << l % 32; - m[(l + 64 >>> 9 << 4) + 14] = l; - - // Method shortcuts - var FF = md5._ff, - GG = md5._gg, - HH = md5._hh, - II = md5._ii; - - for (var i = 0; i < m.length; i += 16) { - - var aa = a, - bb = b, - cc = c, - dd = d; - - a = FF(a, b, c, d, m[i + 0], 7, -680876936); - d = FF(d, a, b, c, m[i + 1], 12, -389564586); - c = FF(c, d, a, b, m[i + 2], 17, 606105819); - b = FF(b, c, d, a, m[i + 3], 22, -1044525330); - a = FF(a, b, c, d, m[i + 4], 7, -176418897); - d = FF(d, a, b, c, m[i + 5], 12, 1200080426); - c = FF(c, d, a, b, m[i + 6], 17, -1473231341); - b = FF(b, c, d, a, m[i + 7], 22, -45705983); - a = FF(a, b, c, d, m[i + 8], 7, 1770035416); - d = FF(d, a, b, c, m[i + 9], 12, -1958414417); - c = FF(c, d, a, b, m[i + 10], 17, -42063); - b = FF(b, c, d, a, m[i + 11], 22, -1990404162); - a = FF(a, b, c, d, m[i + 12], 7, 1804603682); - d = FF(d, a, b, c, m[i + 13], 12, -40341101); - c = FF(c, d, a, b, m[i + 14], 17, -1502002290); - b = FF(b, c, d, a, m[i + 15], 22, 1236535329); - - a = GG(a, b, c, d, m[i + 1], 5, -165796510); - d = GG(d, a, b, c, m[i + 6], 9, -1069501632); - c = GG(c, d, a, b, m[i + 11], 14, 643717713); - b = GG(b, c, d, a, m[i + 0], 20, -373897302); - a = GG(a, b, c, d, m[i + 5], 5, -701558691); - d = GG(d, a, b, c, m[i + 10], 9, 38016083); - c = GG(c, d, a, b, m[i + 15], 14, -660478335); - b = GG(b, c, d, a, m[i + 4], 20, -405537848); - a = GG(a, b, c, d, m[i + 9], 5, 568446438); - d = GG(d, a, b, c, m[i + 14], 9, -1019803690); - c = GG(c, d, a, b, m[i + 3], 14, -187363961); - b = GG(b, c, d, a, m[i + 8], 20, 1163531501); - a = GG(a, b, c, d, m[i + 13], 5, -1444681467); - d = GG(d, a, b, c, m[i + 2], 9, -51403784); - c = GG(c, d, a, b, m[i + 7], 14, 1735328473); - b = GG(b, c, d, a, m[i + 12], 20, -1926607734); - - a = HH(a, b, c, d, m[i + 5], 4, -378558); - d = HH(d, a, b, c, m[i + 8], 11, -2022574463); - c = HH(c, d, a, b, m[i + 11], 16, 1839030562); - b = HH(b, c, d, a, m[i + 14], 23, -35309556); - a = HH(a, b, c, d, m[i + 1], 4, -1530992060); - d = HH(d, a, b, c, m[i + 4], 11, 1272893353); - c = HH(c, d, a, b, m[i + 7], 16, -155497632); - b = HH(b, c, d, a, m[i + 10], 23, -1094730640); - a = HH(a, b, c, d, m[i + 13], 4, 681279174); - d = HH(d, a, b, c, m[i + 0], 11, -358537222); - c = HH(c, d, a, b, m[i + 3], 16, -722521979); - b = HH(b, c, d, a, m[i + 6], 23, 76029189); - a = HH(a, b, c, d, m[i + 9], 4, -640364487); - d = HH(d, a, b, c, m[i + 12], 11, -421815835); - c = HH(c, d, a, b, m[i + 15], 16, 530742520); - b = HH(b, c, d, a, m[i + 2], 23, -995338651); - - a = II(a, b, c, d, m[i + 0], 6, -198630844); - d = II(d, a, b, c, m[i + 7], 10, 1126891415); - c = II(c, d, a, b, m[i + 14], 15, -1416354905); - b = II(b, c, d, a, m[i + 5], 21, -57434055); - a = II(a, b, c, d, m[i + 12], 6, 1700485571); - d = II(d, a, b, c, m[i + 3], 10, -1894986606); - c = II(c, d, a, b, m[i + 10], 15, -1051523); - b = II(b, c, d, a, m[i + 1], 21, -2054922799); - a = II(a, b, c, d, m[i + 8], 6, 1873313359); - d = II(d, a, b, c, m[i + 15], 10, -30611744); - c = II(c, d, a, b, m[i + 6], 15, -1560198380); - b = II(b, c, d, a, m[i + 13], 21, 1309151649); - a = II(a, b, c, d, m[i + 4], 6, -145523070); - d = II(d, a, b, c, m[i + 11], 10, -1120210379); - c = II(c, d, a, b, m[i + 2], 15, 718787259); - b = II(b, c, d, a, m[i + 9], 21, -343485551); - - a = a + aa >>> 0; - b = b + bb >>> 0; - c = c + cc >>> 0; - d = d + dd >>> 0; - } - - return crypt.endian([a, b, c, d]); - }; - - // Auxiliary functions - md5._ff = function (a, b, c, d, x, s, t) { - var n = a + (b & c | ~b & d) + (x >>> 0) + t; - return (n << s | n >>> 32 - s) + b; - }; - md5._gg = function (a, b, c, d, x, s, t) { - var n = a + (b & d | c & ~d) + (x >>> 0) + t; - return (n << s | n >>> 32 - s) + b; - }; - md5._hh = function (a, b, c, d, x, s, t) { - var n = a + (b ^ c ^ d) + (x >>> 0) + t; - return (n << s | n >>> 32 - s) + b; - }; - md5._ii = function (a, b, c, d, x, s, t) { - var n = a + (c ^ (b | ~d)) + (x >>> 0) + t; - return (n << s | n >>> 32 - s) + b; - }; - - // Package private blocksize - md5._blocksize = 16; - md5._digestsize = 16; - - module.exports = function (message, options) { - if (message === undefined || message === null) throw new Error('Illegal argument ' + message); - - var digestbytes = crypt.wordsToBytes(md5(message, options)); - return options && options.asBytes ? digestbytes : options && options.asString ? bin.bytesToString(digestbytes) : crypt.bytesToHex(digestbytes); - }; - })(); - -/***/ }, +/***/ }), +/* 2 */, /* 3 */ -/***/ function(module, exports) { +/***/ (function(module, exports, __webpack_require__) { - (function () { - var base64map = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/', - crypt = { - // Bit-wise rotation left - rotl: function (n, b) { - return n << b | n >>> 32 - b; - }, +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - // Bit-wise rotation right - rotr: function (n, b) { - return n << 32 - b | n >>> b; - }, +const md5 = __webpack_require__(4); - // Swap big-endian to little-endian and vice versa - endian: function (n) { - // If number given, swap endian - if (n.constructor == Number) { - return crypt.rotl(n, 8) & 0x00FF00FF | crypt.rotl(n, 24) & 0xFF00FF00; - } +function originalToGeneratedId(originalId) { + const match = originalId.match(/(.*)\/originalSource/); + return match ? match[1] : ""; +} - // Else, assume array and swap all items - for (var i = 0; i < n.length; i++) n[i] = crypt.endian(n[i]); - return n; - }, +function generatedToOriginalId(generatedId, url) { + return `${generatedId}/originalSource-${md5(url)}`; +} - // Generate an array of any length of random bytes - randomBytes: function (n) { - for (var bytes = []; n > 0; n--) bytes.push(Math.floor(Math.random() * 256)); - return bytes; - }, +function isOriginalId(id) { + return !!id.match(/\/originalSource/); +} - // Convert a byte array to big-endian 32-bit words - bytesToWords: function (bytes) { - for (var words = [], i = 0, b = 0; i < bytes.length; i++, b += 8) words[b >>> 5] |= bytes[i] << 24 - b % 32; - return words; - }, +function isGeneratedId(id) { + return !isOriginalId(id); +} - // Convert big-endian 32-bit words to a byte array - wordsToBytes: function (words) { - for (var bytes = [], b = 0; b < words.length * 32; b += 8) bytes.push(words[b >>> 5] >>> 24 - b % 32 & 0xFF); - return bytes; - }, +/** + * Trims the query part or reference identifier of a URL string, if necessary. + */ +function trimUrlQuery(url) { + let length = url.length; + let q1 = url.indexOf("?"); + let q2 = url.indexOf("&"); + let q3 = url.indexOf("#"); + let q = Math.min(q1 != -1 ? q1 : length, q2 != -1 ? q2 : length, q3 != -1 ? q3 : length); - // Convert a byte array to a hex string - bytesToHex: function (bytes) { - for (var hex = [], i = 0; i < bytes.length; i++) { - hex.push((bytes[i] >>> 4).toString(16)); - hex.push((bytes[i] & 0xF).toString(16)); - } - return hex.join(''); - }, + return url.slice(0, q); +} - // Convert a hex string to a byte array - hexToBytes: function (hex) { - for (var bytes = [], c = 0; c < hex.length; c += 2) bytes.push(parseInt(hex.substr(c, 2), 16)); - return bytes; - }, +// Map suffix to content type. +const contentMap = { + "js": "text/javascript", + "jsm": "text/javascript", + "mjs": "text/javascript", + "ts": "text/typescript", + "tsx": "text/typescript-jsx", + "jsx": "text/jsx", + "coffee": "text/coffeescript", + "elm": "text/elm", + "cljs": "text/x-clojure" +}; - // Convert a byte array to a base-64 string - bytesToBase64: function (bytes) { - for (var base64 = [], i = 0; i < bytes.length; i += 3) { - var triplet = bytes[i] << 16 | bytes[i + 1] << 8 | bytes[i + 2]; - for (var j = 0; j < 4; j++) if (i * 8 + j * 6 <= bytes.length * 8) base64.push(base64map.charAt(triplet >>> 6 * (3 - j) & 0x3F));else base64.push('='); - } - return base64.join(''); - }, +/** + * Returns the content type for the specified URL. If no specific + * content type can be determined, "text/plain" is returned. + * + * @return String + * The content type. + */ +function getContentType(url) { + url = trimUrlQuery(url); + let dot = url.lastIndexOf("."); + if (dot >= 0) { + let name = url.substring(dot + 1); + if (name in contentMap) { + return contentMap[name]; + } + } + return "text/plain"; +} - // Convert a base-64 string to a byte array - base64ToBytes: function (base64) { - // Remove non-base-64 characters - base64 = base64.replace(/[^A-Z0-9+\/]/ig, ''); +module.exports = { + originalToGeneratedId, + generatedToOriginalId, + isOriginalId, + isGeneratedId, + getContentType, + contentMapForTesting: contentMap +}; - for (var bytes = [], i = 0, imod4 = 0; i < base64.length; imod4 = ++i % 4) { - if (imod4 == 0) continue; - bytes.push((base64map.indexOf(base64.charAt(i - 1)) & Math.pow(2, -2 * imod4 + 8) - 1) << imod4 * 2 | base64map.indexOf(base64.charAt(i)) >>> 6 - imod4 * 2); - } - return bytes; - } - }; - - module.exports = crypt; - })(); - -/***/ }, +/***/ }), /* 4 */ -/***/ function(module, exports) { +/***/ (function(module, exports, __webpack_require__) { - var charenc = { - // UTF-8 encoding - utf8: { - // Convert a string to a byte array - stringToBytes: function (str) { - return charenc.bin.stringToBytes(unescape(encodeURIComponent(str))); - }, +(function(){ + var crypt = __webpack_require__(5), + utf8 = __webpack_require__(0).utf8, + isBuffer = __webpack_require__(6), + bin = __webpack_require__(0).bin, + + // The core + md5 = function (message, options) { + // Convert to byte array + if (message.constructor == String) + if (options && options.encoding === 'binary') + message = bin.stringToBytes(message); + else + message = utf8.stringToBytes(message); + else if (isBuffer(message)) + message = Array.prototype.slice.call(message, 0); + else if (!Array.isArray(message)) + message = message.toString(); + // else, assume byte array already + + var m = crypt.bytesToWords(message), + l = message.length * 8, + a = 1732584193, + b = -271733879, + c = -1732584194, + d = 271733878; + + // Swap endian + for (var i = 0; i < m.length; i++) { + m[i] = ((m[i] << 8) | (m[i] >>> 24)) & 0x00FF00FF | + ((m[i] << 24) | (m[i] >>> 8)) & 0xFF00FF00; + } + + // Padding + m[l >>> 5] |= 0x80 << (l % 32); + m[(((l + 64) >>> 9) << 4) + 14] = l; + + // Method shortcuts + var FF = md5._ff, + GG = md5._gg, + HH = md5._hh, + II = md5._ii; + + for (var i = 0; i < m.length; i += 16) { + + var aa = a, + bb = b, + cc = c, + dd = d; + + a = FF(a, b, c, d, m[i+ 0], 7, -680876936); + d = FF(d, a, b, c, m[i+ 1], 12, -389564586); + c = FF(c, d, a, b, m[i+ 2], 17, 606105819); + b = FF(b, c, d, a, m[i+ 3], 22, -1044525330); + a = FF(a, b, c, d, m[i+ 4], 7, -176418897); + d = FF(d, a, b, c, m[i+ 5], 12, 1200080426); + c = FF(c, d, a, b, m[i+ 6], 17, -1473231341); + b = FF(b, c, d, a, m[i+ 7], 22, -45705983); + a = FF(a, b, c, d, m[i+ 8], 7, 1770035416); + d = FF(d, a, b, c, m[i+ 9], 12, -1958414417); + c = FF(c, d, a, b, m[i+10], 17, -42063); + b = FF(b, c, d, a, m[i+11], 22, -1990404162); + a = FF(a, b, c, d, m[i+12], 7, 1804603682); + d = FF(d, a, b, c, m[i+13], 12, -40341101); + c = FF(c, d, a, b, m[i+14], 17, -1502002290); + b = FF(b, c, d, a, m[i+15], 22, 1236535329); + + a = GG(a, b, c, d, m[i+ 1], 5, -165796510); + d = GG(d, a, b, c, m[i+ 6], 9, -1069501632); + c = GG(c, d, a, b, m[i+11], 14, 643717713); + b = GG(b, c, d, a, m[i+ 0], 20, -373897302); + a = GG(a, b, c, d, m[i+ 5], 5, -701558691); + d = GG(d, a, b, c, m[i+10], 9, 38016083); + c = GG(c, d, a, b, m[i+15], 14, -660478335); + b = GG(b, c, d, a, m[i+ 4], 20, -405537848); + a = GG(a, b, c, d, m[i+ 9], 5, 568446438); + d = GG(d, a, b, c, m[i+14], 9, -1019803690); + c = GG(c, d, a, b, m[i+ 3], 14, -187363961); + b = GG(b, c, d, a, m[i+ 8], 20, 1163531501); + a = GG(a, b, c, d, m[i+13], 5, -1444681467); + d = GG(d, a, b, c, m[i+ 2], 9, -51403784); + c = GG(c, d, a, b, m[i+ 7], 14, 1735328473); + b = GG(b, c, d, a, m[i+12], 20, -1926607734); + + a = HH(a, b, c, d, m[i+ 5], 4, -378558); + d = HH(d, a, b, c, m[i+ 8], 11, -2022574463); + c = HH(c, d, a, b, m[i+11], 16, 1839030562); + b = HH(b, c, d, a, m[i+14], 23, -35309556); + a = HH(a, b, c, d, m[i+ 1], 4, -1530992060); + d = HH(d, a, b, c, m[i+ 4], 11, 1272893353); + c = HH(c, d, a, b, m[i+ 7], 16, -155497632); + b = HH(b, c, d, a, m[i+10], 23, -1094730640); + a = HH(a, b, c, d, m[i+13], 4, 681279174); + d = HH(d, a, b, c, m[i+ 0], 11, -358537222); + c = HH(c, d, a, b, m[i+ 3], 16, -722521979); + b = HH(b, c, d, a, m[i+ 6], 23, 76029189); + a = HH(a, b, c, d, m[i+ 9], 4, -640364487); + d = HH(d, a, b, c, m[i+12], 11, -421815835); + c = HH(c, d, a, b, m[i+15], 16, 530742520); + b = HH(b, c, d, a, m[i+ 2], 23, -995338651); + + a = II(a, b, c, d, m[i+ 0], 6, -198630844); + d = II(d, a, b, c, m[i+ 7], 10, 1126891415); + c = II(c, d, a, b, m[i+14], 15, -1416354905); + b = II(b, c, d, a, m[i+ 5], 21, -57434055); + a = II(a, b, c, d, m[i+12], 6, 1700485571); + d = II(d, a, b, c, m[i+ 3], 10, -1894986606); + c = II(c, d, a, b, m[i+10], 15, -1051523); + b = II(b, c, d, a, m[i+ 1], 21, -2054922799); + a = II(a, b, c, d, m[i+ 8], 6, 1873313359); + d = II(d, a, b, c, m[i+15], 10, -30611744); + c = II(c, d, a, b, m[i+ 6], 15, -1560198380); + b = II(b, c, d, a, m[i+13], 21, 1309151649); + a = II(a, b, c, d, m[i+ 4], 6, -145523070); + d = II(d, a, b, c, m[i+11], 10, -1120210379); + c = II(c, d, a, b, m[i+ 2], 15, 718787259); + b = II(b, c, d, a, m[i+ 9], 21, -343485551); + + a = (a + aa) >>> 0; + b = (b + bb) >>> 0; + c = (c + cc) >>> 0; + d = (d + dd) >>> 0; + } + + return crypt.endian([a, b, c, d]); + }; + + // Auxiliary functions + md5._ff = function (a, b, c, d, x, s, t) { + var n = a + (b & c | ~b & d) + (x >>> 0) + t; + return ((n << s) | (n >>> (32 - s))) + b; + }; + md5._gg = function (a, b, c, d, x, s, t) { + var n = a + (b & d | c & ~d) + (x >>> 0) + t; + return ((n << s) | (n >>> (32 - s))) + b; + }; + md5._hh = function (a, b, c, d, x, s, t) { + var n = a + (b ^ c ^ d) + (x >>> 0) + t; + return ((n << s) | (n >>> (32 - s))) + b; + }; + md5._ii = function (a, b, c, d, x, s, t) { + var n = a + (c ^ (b | ~d)) + (x >>> 0) + t; + return ((n << s) | (n >>> (32 - s))) + b; + }; + + // Package private blocksize + md5._blocksize = 16; + md5._digestsize = 16; + + module.exports = function (message, options) { + if (message === undefined || message === null) + throw new Error('Illegal argument ' + message); + + var digestbytes = crypt.wordsToBytes(md5(message, options)); + return options && options.asBytes ? digestbytes : + options && options.asString ? bin.bytesToString(digestbytes) : + crypt.bytesToHex(digestbytes); + }; + +})(); - // Convert a byte array to a string - bytesToString: function (bytes) { - return decodeURIComponent(escape(charenc.bin.bytesToString(bytes))); - } - }, - // Binary encoding - bin: { - // Convert a string to a byte array - stringToBytes: function (str) { - for (var bytes = [], i = 0; i < str.length; i++) bytes.push(str.charCodeAt(i) & 0xFF); - return bytes; - }, - - // Convert a byte array to a string - bytesToString: function (bytes) { - for (var str = [], i = 0; i < bytes.length; i++) str.push(String.fromCharCode(bytes[i])); - return str.join(''); - } - } - }; - - module.exports = charenc; - -/***/ }, +/***/ }), /* 5 */ -/***/ function(module, exports) { +/***/ (function(module, exports) { - /*! - * Determine if an object is a Buffer - * - * @author Feross Aboukhadijeh - * @license MIT - */ +(function() { + var base64map + = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/', - // The _isBuffer check is for Safari 5-7 support, because it's missing - // Object.prototype.constructor. Remove this eventually - module.exports = function (obj) { - return obj != null && (isBuffer(obj) || isSlowBuffer(obj) || !!obj._isBuffer); - }; + crypt = { + // Bit-wise rotation left + rotl: function(n, b) { + return (n << b) | (n >>> (32 - b)); + }, - function isBuffer(obj) { - return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj); - } + // Bit-wise rotation right + rotr: function(n, b) { + return (n << (32 - b)) | (n >>> b); + }, - // For Node v0.10 support. Remove this eventually. - function isSlowBuffer(obj) { - return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isBuffer(obj.slice(0, 0)); - } + // Swap big-endian to little-endian and vice versa + endian: function(n) { + // If number given, swap endian + if (n.constructor == Number) { + return crypt.rotl(n, 8) & 0x00FF00FF | crypt.rotl(n, 24) & 0xFF00FF00; + } -/***/ }, + // Else, assume array and swap all items + for (var i = 0; i < n.length; i++) + n[i] = crypt.endian(n[i]); + return n; + }, + + // Generate an array of any length of random bytes + randomBytes: function(n) { + for (var bytes = []; n > 0; n--) + bytes.push(Math.floor(Math.random() * 256)); + return bytes; + }, + + // Convert a byte array to big-endian 32-bit words + bytesToWords: function(bytes) { + for (var words = [], i = 0, b = 0; i < bytes.length; i++, b += 8) + words[b >>> 5] |= bytes[i] << (24 - b % 32); + return words; + }, + + // Convert big-endian 32-bit words to a byte array + wordsToBytes: function(words) { + for (var bytes = [], b = 0; b < words.length * 32; b += 8) + bytes.push((words[b >>> 5] >>> (24 - b % 32)) & 0xFF); + return bytes; + }, + + // Convert a byte array to a hex string + bytesToHex: function(bytes) { + for (var hex = [], i = 0; i < bytes.length; i++) { + hex.push((bytes[i] >>> 4).toString(16)); + hex.push((bytes[i] & 0xF).toString(16)); + } + return hex.join(''); + }, + + // Convert a hex string to a byte array + hexToBytes: function(hex) { + for (var bytes = [], c = 0; c < hex.length; c += 2) + bytes.push(parseInt(hex.substr(c, 2), 16)); + return bytes; + }, + + // Convert a byte array to a base-64 string + bytesToBase64: function(bytes) { + for (var base64 = [], i = 0; i < bytes.length; i += 3) { + var triplet = (bytes[i] << 16) | (bytes[i + 1] << 8) | bytes[i + 2]; + for (var j = 0; j < 4; j++) + if (i * 8 + j * 6 <= bytes.length * 8) + base64.push(base64map.charAt((triplet >>> 6 * (3 - j)) & 0x3F)); + else + base64.push('='); + } + return base64.join(''); + }, + + // Convert a base-64 string to a byte array + base64ToBytes: function(base64) { + // Remove non-base-64 characters + base64 = base64.replace(/[^A-Z0-9+\/]/ig, ''); + + for (var bytes = [], i = 0, imod4 = 0; i < base64.length; + imod4 = ++i % 4) { + if (imod4 == 0) continue; + bytes.push(((base64map.indexOf(base64.charAt(i - 1)) + & (Math.pow(2, -2 * imod4 + 8) - 1)) << (imod4 * 2)) + | (base64map.indexOf(base64.charAt(i)) >>> (6 - imod4 * 2))); + } + return bytes; + } + }; + + module.exports = crypt; +})(); + + +/***/ }), /* 6 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports) { - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +/*! + * Determine if an object is a Buffer + * + * @author Feross Aboukhadijeh + * @license MIT + */ - const networkRequest = __webpack_require__(7); - const workerUtils = __webpack_require__(8); +// The _isBuffer check is for Safari 5-7 support, because it's missing +// Object.prototype.constructor. Remove this eventually +module.exports = function (obj) { + return obj != null && (isBuffer(obj) || isSlowBuffer(obj) || !!obj._isBuffer) +} - module.exports = { - networkRequest, - workerUtils - }; +function isBuffer (obj) { + return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj) +} -/***/ }, +// For Node v0.10 support. Remove this eventually. +function isSlowBuffer (obj) { + return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isBuffer(obj.slice(0, 0)) +} + + +/***/ }), /* 7 */ -/***/ function(module, exports) { +/***/ (function(module, exports) { - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - function networkRequest(url, opts) { - return fetch(url, { - cache: opts.loadFromCache ? "default" : "no-cache" - }).then(res => { - if (res.status >= 200 && res.status < 300) { - return res.text().then(text => ({ content: text })); - } - return Promise.reject(`request failed with status ${res.status}`); - }); - } +function networkRequest(url, opts) { + return fetch(url, { + cache: opts.loadFromCache ? "default" : "no-cache" + }).then(res => { + if (res.status >= 200 && res.status < 300) { + return res.text().then(text => ({ content: text })); + } + return Promise.reject(`request failed with status ${res.status}`); + }); +} - module.exports = networkRequest; +module.exports = networkRequest; -/***/ }, +/***/ }), /* 8 */ -/***/ function(module, exports) { +/***/ (function(module, exports) { - function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; } +function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; } - function WorkerDispatcher() { - this.msgId = 1; - this.worker = null; - } /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +function WorkerDispatcher() { + this.msgId = 1; + this.worker = null; +} /* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - WorkerDispatcher.prototype = { - start(url) { - this.worker = new Worker(url); - this.worker.onerror = () => { - console.error(`Error in worker ${url}`); - }; - }, +WorkerDispatcher.prototype = { + start(url) { + this.worker = new Worker(url); + this.worker.onerror = () => { + console.error(`Error in worker ${url}`); + }; + }, - stop() { - if (!this.worker) { - return; - } + stop() { + if (!this.worker) { + return; + } - this.worker.terminate(); - this.worker = null; - }, + this.worker.terminate(); + this.worker = null; + }, - task(method) { - return (...args) => { - return new Promise((resolve, reject) => { - const id = this.msgId++; - this.worker.postMessage({ id, method, args }); + task(method) { + return (...args) => { + return new Promise((resolve, reject) => { + const id = this.msgId++; + this.worker.postMessage({ id, method, args }); - const listener = ({ data: result }) => { - if (result.id !== id) { - return; - } + const listener = ({ data: result }) => { + if (result.id !== id) { + return; + } - if (!this.worker) { - reject("Oops, The worker has shutdown!"); - return; - } - this.worker.removeEventListener("message", listener); - if (result.error) { - reject(result.error); - } else { - resolve(result.response); - } - }; + if (!this.worker) { + return; + } - this.worker.addEventListener("message", listener); - }); - }; - } - }; + this.worker.removeEventListener("message", listener); + if (result.error) { + reject(result.error); + } else { + resolve(result.response); + } + }; - function workerHandler(publicInterface) { - return function (msg) { - const { id, method, args } = msg.data; - try { - const response = publicInterface[method].apply(undefined, args); - if (response instanceof Promise) { - response.then(val => self.postMessage({ id, response: val }), - // Error can't be sent via postMessage, so be sure to - // convert to string. - err => self.postMessage({ id, error: err.toString() })); - } else { - self.postMessage({ id, response }); - } - } catch (error) { - // Error can't be sent via postMessage, so be sure to convert to - // string. - self.postMessage({ id, error: error.toString() }); - } - }; - } + this.worker.addEventListener("message", listener); + }); + }; + } +}; - function streamingWorkerHandler(publicInterface, { timeout = 100 } = {}, worker = self) { - let streamingWorker = (() => { - var _ref = _asyncToGenerator(function* (id, tasks) { - let isWorking = true; +function workerHandler(publicInterface) { + return function (msg) { + const { id, method, args } = msg.data; + try { + const response = publicInterface[method].apply(undefined, args); + if (response instanceof Promise) { + response.then(val => self.postMessage({ id, response: val }), + // Error can't be sent via postMessage, so be sure to + // convert to string. + err => self.postMessage({ id, error: err.toString() })); + } else { + self.postMessage({ id, response }); + } + } catch (error) { + // Error can't be sent via postMessage, so be sure to convert to + // string. + self.postMessage({ id, error: error.toString() }); + } + }; +} - const intervalId = setTimeout(function () { - isWorking = false; - }, timeout); +function streamingWorkerHandler(publicInterface, { timeout = 100 } = {}, worker = self) { + let streamingWorker = (() => { + var _ref = _asyncToGenerator(function* (id, tasks) { + let isWorking = true; - const results = []; - while (tasks.length !== 0 && isWorking) { - const { callback, context, args } = tasks.shift(); - const result = yield callback.call(context, args); - results.push(result); - } - worker.postMessage({ id, status: "pending", data: results }); - clearInterval(intervalId); + const intervalId = setTimeout(function () { + isWorking = false; + }, timeout); - if (tasks.length !== 0) { - yield streamingWorker(id, tasks); - } - }); + const results = []; + while (tasks.length !== 0 && isWorking) { + const { callback, context, args } = tasks.shift(); + const result = yield callback.call(context, args); + results.push(result); + } + worker.postMessage({ id, status: "pending", data: results }); + clearInterval(intervalId); - return function streamingWorker(_x, _x2) { - return _ref.apply(this, arguments); - }; - })(); + if (tasks.length !== 0) { + yield streamingWorker(id, tasks); + } + }); - return (() => { - var _ref2 = _asyncToGenerator(function* (msg) { - const { id, method, args } = msg.data; - const workerMethod = publicInterface[method]; - if (!workerMethod) { - console.error(`Could not find ${method} defined in worker.`); - } - worker.postMessage({ id, status: "start" }); + return function streamingWorker(_x, _x2) { + return _ref.apply(this, arguments); + }; + })(); - try { - const tasks = workerMethod(args); - yield streamingWorker(id, tasks); - worker.postMessage({ id, status: "done" }); - } catch (error) { - worker.postMessage({ id, status: "error", error }); - } - }); + return (() => { + var _ref2 = _asyncToGenerator(function* (msg) { + const { id, method, args } = msg.data; + const workerMethod = publicInterface[method]; + if (!workerMethod) { + console.error(`Could not find ${method} defined in worker.`); + } + worker.postMessage({ id, status: "start" }); - return function (_x3) { - return _ref2.apply(this, arguments); - }; - })(); - } + try { + const tasks = workerMethod(args); + yield streamingWorker(id, tasks); + worker.postMessage({ id, status: "done" }); + } catch (error) { + worker.postMessage({ id, status: "error", error }); + } + }); - module.exports = { - WorkerDispatcher, - workerHandler, - streamingWorkerHandler - }; + return function (_x3) { + return _ref2.apply(this, arguments); + }; + })(); +} -/***/ } -/******/ ]) -}); -; \ No newline at end of file +module.exports = { + WorkerDispatcher, + workerHandler, + streamingWorkerHandler +}; + +/***/ }), +/* 9 */, +/* 10 */, +/* 11 */, +/* 12 */, +/* 13 */, +/* 14 */, +/* 15 */ +/***/ (function(module, exports, __webpack_require__) { + +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const { + originalToGeneratedId, + generatedToOriginalId, + isGeneratedId, + isOriginalId +} = __webpack_require__(3); + +const { workerUtils: { WorkerDispatcher } } = __webpack_require__(1); + +const dispatcher = new WorkerDispatcher(); + +const getOriginalURLs = dispatcher.task("getOriginalURLs"); +const getGeneratedLocation = dispatcher.task("getGeneratedLocation"); +const getAllGeneratedLocations = dispatcher.task("getAllGeneratedLocations"); +const getOriginalLocation = dispatcher.task("getOriginalLocation"); +const getLocationScopes = dispatcher.task("getLocationScopes"); +const getOriginalSourceText = dispatcher.task("getOriginalSourceText"); +const applySourceMap = dispatcher.task("applySourceMap"); +const clearSourceMaps = dispatcher.task("clearSourceMaps"); +const hasMappedSource = dispatcher.task("hasMappedSource"); + +module.exports = { + originalToGeneratedId, + generatedToOriginalId, + isGeneratedId, + isOriginalId, + hasMappedSource, + getOriginalURLs, + getGeneratedLocation, + getAllGeneratedLocations, + getOriginalLocation, + getLocationScopes, + getOriginalSourceText, + applySourceMap, + clearSourceMaps, + startSourceMapWorker: dispatcher.start.bind(dispatcher), + stopSourceMapWorker: dispatcher.stop.bind(dispatcher) +}; + +/***/ }) +/******/ ]); +}); \ No newline at end of file diff --git a/devtools/client/shared/source-map/worker.js b/devtools/client/shared/source-map/worker.js index 6b9b4386f3a8..28fbf90d82be 100644 --- a/devtools/client/shared/source-map/worker.js +++ b/devtools/client/shared/source-map/worker.js @@ -11,7120 +11,4318 @@ return /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; - +/******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { - +/******/ /******/ // Check if module is in cache -/******/ if(installedModules[moduleId]) +/******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; - +/******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { -/******/ exports: {}, -/******/ id: moduleId, -/******/ loaded: false +/******/ i: moduleId, +/******/ l: false, +/******/ exports: {} /******/ }; - +/******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); - +/******/ /******/ // Flag the module as loaded -/******/ module.loaded = true; - +/******/ module.l = true; +/******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } - - +/******/ +/******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; - +/******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; - +/******/ +/******/ // define getter function for harmony exports +/******/ __webpack_require__.d = function(exports, name, getter) { +/******/ if(!__webpack_require__.o(exports, name)) { +/******/ Object.defineProperty(exports, name, { +/******/ configurable: false, +/******/ enumerable: true, +/******/ get: getter +/******/ }); +/******/ } +/******/ }; +/******/ +/******/ // getDefaultExport function for compatibility with non-harmony modules +/******/ __webpack_require__.n = function(module) { +/******/ var getter = module && module.__esModule ? +/******/ function getDefault() { return module['default']; } : +/******/ function getModuleExports() { return module; }; +/******/ __webpack_require__.d(getter, 'a', getter); +/******/ return getter; +/******/ }; +/******/ +/******/ // Object.prototype.hasOwnProperty.call +/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; +/******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; - +/******/ /******/ // Load entry module and return exports -/******/ return __webpack_require__(0); +/******/ return __webpack_require__(__webpack_require__.s = 16); /******/ }) /************************************************************************/ /******/ ([ /* 0 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports) { - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +var charenc = { + // UTF-8 encoding + utf8: { + // Convert a string to a byte array + stringToBytes: function(str) { + return charenc.bin.stringToBytes(unescape(encodeURIComponent(str))); + }, - const { - getOriginalURLs, - getGeneratedLocation, - getOriginalLocation, - getOriginalSourceText, - getLocationScopes, - hasMappedSource, - applySourceMap - } = __webpack_require__(9); + // Convert a byte array to a string + bytesToString: function(bytes) { + return decodeURIComponent(escape(charenc.bin.bytesToString(bytes))); + } + }, - const { clearSourceMaps } = __webpack_require__(23); + // Binary encoding + bin: { + // Convert a string to a byte array + stringToBytes: function(str) { + for (var bytes = [], i = 0; i < str.length; i++) + bytes.push(str.charCodeAt(i) & 0xFF); + return bytes; + }, - const { workerUtils: { workerHandler } } = __webpack_require__(6); + // Convert a byte array to a string + bytesToString: function(bytes) { + for (var str = [], i = 0; i < bytes.length; i++) + str.push(String.fromCharCode(bytes[i])); + return str.join(''); + } + } +}; - // The interface is implemented in source-map to be - // easier to unit test. - self.onmessage = workerHandler({ - getOriginalURLs, - getGeneratedLocation, - getOriginalLocation, - getLocationScopes, - getOriginalSourceText, - hasMappedSource, - applySourceMap, - clearSourceMaps - }); +module.exports = charenc; -/***/ }, + +/***/ }), /* 1 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports, __webpack_require__) { - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - const md5 = __webpack_require__(2); +const networkRequest = __webpack_require__(7); +const workerUtils = __webpack_require__(8); - function originalToGeneratedId(originalId) { - const match = originalId.match(/(.*)\/originalSource/); - return match ? match[1] : ""; - } +module.exports = { + networkRequest, + workerUtils +}; - function generatedToOriginalId(generatedId, url) { - return `${generatedId}/originalSource-${md5(url)}`; - } - - function isOriginalId(id) { - return !!id.match(/\/originalSource/); - } - - function isGeneratedId(id) { - return !isOriginalId(id); - } - - /** - * Trims the query part or reference identifier of a URL string, if necessary. - */ - function trimUrlQuery(url) { - let length = url.length; - let q1 = url.indexOf("?"); - let q2 = url.indexOf("&"); - let q3 = url.indexOf("#"); - let q = Math.min(q1 != -1 ? q1 : length, q2 != -1 ? q2 : length, q3 != -1 ? q3 : length); - - return url.slice(0, q); - } - - // Map suffix to content type. - const contentMap = { - "js": "text/javascript", - "jsm": "text/javascript", - "ts": "text/typescript", - "tsx": "text/typescript-jsx", - "jsx": "text/jsx", - "coffee": "text/coffeescript", - "elm": "text/elm", - "cljs": "text/x-clojure" - }; - - /** - * Returns the content type for the specified URL. If no specific - * content type can be determined, "text/plain" is returned. - * - * @return String - * The content type. - */ - function getContentType(url) { - url = trimUrlQuery(url); - let dot = url.lastIndexOf("."); - if (dot >= 0) { - let name = url.substring(dot + 1); - if (name in contentMap) { - return contentMap[name]; - } - } - return "text/plain"; - } - - module.exports = { - originalToGeneratedId, - generatedToOriginalId, - isOriginalId, - isGeneratedId, - getContentType, - contentMapForTesting: contentMap - }; - -/***/ }, +/***/ }), /* 2 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports) { - (function () { - var crypt = __webpack_require__(3), - utf8 = __webpack_require__(4).utf8, - isBuffer = __webpack_require__(5), - bin = __webpack_require__(4).bin, +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + */ + +/** + * This is a helper function for getting values from parameter/options + * objects. + * + * @param args The object we are extracting values from + * @param name The name of the property we are getting. + * @param defaultValue An optional value to return if the property is missing + * from the object. If this is not specified and the property is missing, an + * error will be thrown. + */ +function getArg(aArgs, aName, aDefaultValue) { + if (aName in aArgs) { + return aArgs[aName]; + } else if (arguments.length === 3) { + return aDefaultValue; + } else { + throw new Error('"' + aName + '" is a required argument.'); + } +} +exports.getArg = getArg; + +var urlRegexp = /^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/; +var dataUrlRegexp = /^data:.+\,.+$/; + +function urlParse(aUrl) { + var match = aUrl.match(urlRegexp); + if (!match) { + return null; + } + return { + scheme: match[1], + auth: match[2], + host: match[3], + port: match[4], + path: match[5] + }; +} +exports.urlParse = urlParse; + +function urlGenerate(aParsedUrl) { + var url = ''; + if (aParsedUrl.scheme) { + url += aParsedUrl.scheme + ':'; + } + url += '//'; + if (aParsedUrl.auth) { + url += aParsedUrl.auth + '@'; + } + if (aParsedUrl.host) { + url += aParsedUrl.host; + } + if (aParsedUrl.port) { + url += ":" + aParsedUrl.port + } + if (aParsedUrl.path) { + url += aParsedUrl.path; + } + return url; +} +exports.urlGenerate = urlGenerate; + +/** + * Normalizes a path, or the path portion of a URL: + * + * - Replaces consecutive slashes with one slash. + * - Removes unnecessary '.' parts. + * - Removes unnecessary '/..' parts. + * + * Based on code in the Node.js 'path' core module. + * + * @param aPath The path or url to normalize. + */ +function normalize(aPath) { + var path = aPath; + var url = urlParse(aPath); + if (url) { + if (!url.path) { + return aPath; + } + path = url.path; + } + var isAbsolute = exports.isAbsolute(path); + + var parts = path.split(/\/+/); + for (var part, up = 0, i = parts.length - 1; i >= 0; i--) { + part = parts[i]; + if (part === '.') { + parts.splice(i, 1); + } else if (part === '..') { + up++; + } else if (up > 0) { + if (part === '') { + // The first part is blank if the path is absolute. Trying to go + // above the root is a no-op. Therefore we can remove all '..' parts + // directly after the root. + parts.splice(i + 1, up); + up = 0; + } else { + parts.splice(i, 2); + up--; + } + } + } + path = parts.join('/'); + + if (path === '') { + path = isAbsolute ? '/' : '.'; + } + + if (url) { + url.path = path; + return urlGenerate(url); + } + return path; +} +exports.normalize = normalize; + +/** + * Joins two paths/URLs. + * + * @param aRoot The root path or URL. + * @param aPath The path or URL to be joined with the root. + * + * - If aPath is a URL or a data URI, aPath is returned, unless aPath is a + * scheme-relative URL: Then the scheme of aRoot, if any, is prepended + * first. + * - Otherwise aPath is a path. If aRoot is a URL, then its path portion + * is updated with the result and aRoot is returned. Otherwise the result + * is returned. + * - If aPath is absolute, the result is aPath. + * - Otherwise the two paths are joined with a slash. + * - Joining for example 'http://' and 'www.example.com' is also supported. + */ +function join(aRoot, aPath) { + if (aRoot === "") { + aRoot = "."; + } + if (aPath === "") { + aPath = "."; + } + var aPathUrl = urlParse(aPath); + var aRootUrl = urlParse(aRoot); + if (aRootUrl) { + aRoot = aRootUrl.path || '/'; + } + + // `join(foo, '//www.example.org')` + if (aPathUrl && !aPathUrl.scheme) { + if (aRootUrl) { + aPathUrl.scheme = aRootUrl.scheme; + } + return urlGenerate(aPathUrl); + } + + if (aPathUrl || aPath.match(dataUrlRegexp)) { + return aPath; + } + + // `join('http://', 'www.example.com')` + if (aRootUrl && !aRootUrl.host && !aRootUrl.path) { + aRootUrl.host = aPath; + return urlGenerate(aRootUrl); + } + + var joined = aPath.charAt(0) === '/' + ? aPath + : normalize(aRoot.replace(/\/+$/, '') + '/' + aPath); + + if (aRootUrl) { + aRootUrl.path = joined; + return urlGenerate(aRootUrl); + } + return joined; +} +exports.join = join; + +exports.isAbsolute = function (aPath) { + return aPath.charAt(0) === '/' || urlRegexp.test(aPath); +}; + +/** + * Make a path relative to a URL or another path. + * + * @param aRoot The root path or URL. + * @param aPath The path or URL to be made relative to aRoot. + */ +function relative(aRoot, aPath) { + if (aRoot === "") { + aRoot = "."; + } + + aRoot = aRoot.replace(/\/$/, ''); + + // It is possible for the path to be above the root. In this case, simply + // checking whether the root is a prefix of the path won't work. Instead, we + // need to remove components from the root one by one, until either we find + // a prefix that fits, or we run out of components to remove. + var level = 0; + while (aPath.indexOf(aRoot + '/') !== 0) { + var index = aRoot.lastIndexOf("/"); + if (index < 0) { + return aPath; + } + + // If the only part of the root that is left is the scheme (i.e. http://, + // file:///, etc.), one or more slashes (/), or simply nothing at all, we + // have exhausted all components, so the path is not relative to the root. + aRoot = aRoot.slice(0, index); + if (aRoot.match(/^([^\/]+:\/)?\/*$/)) { + return aPath; + } + + ++level; + } + + // Make sure we add a "../" for each component we removed from the root. + return Array(level + 1).join("../") + aPath.substr(aRoot.length + 1); +} +exports.relative = relative; + +var supportsNullProto = (function () { + var obj = Object.create(null); + return !('__proto__' in obj); +}()); + +function identity (s) { + return s; +} + +/** + * Because behavior goes wacky when you set `__proto__` on objects, we + * have to prefix all the strings in our set with an arbitrary character. + * + * See https://github.com/mozilla/source-map/pull/31 and + * https://github.com/mozilla/source-map/issues/30 + * + * @param String aStr + */ +function toSetString(aStr) { + if (isProtoString(aStr)) { + return '$' + aStr; + } + + return aStr; +} +exports.toSetString = supportsNullProto ? identity : toSetString; + +function fromSetString(aStr) { + if (isProtoString(aStr)) { + return aStr.slice(1); + } + + return aStr; +} +exports.fromSetString = supportsNullProto ? identity : fromSetString; + +function isProtoString(s) { + if (!s) { + return false; + } + + var length = s.length; + + if (length < 9 /* "__proto__".length */) { + return false; + } + + if (s.charCodeAt(length - 1) !== 95 /* '_' */ || + s.charCodeAt(length - 2) !== 95 /* '_' */ || + s.charCodeAt(length - 3) !== 111 /* 'o' */ || + s.charCodeAt(length - 4) !== 116 /* 't' */ || + s.charCodeAt(length - 5) !== 111 /* 'o' */ || + s.charCodeAt(length - 6) !== 114 /* 'r' */ || + s.charCodeAt(length - 7) !== 112 /* 'p' */ || + s.charCodeAt(length - 8) !== 95 /* '_' */ || + s.charCodeAt(length - 9) !== 95 /* '_' */) { + return false; + } + + for (var i = length - 10; i >= 0; i--) { + if (s.charCodeAt(i) !== 36 /* '$' */) { + return false; + } + } + + return true; +} + +/** + * Comparator between two mappings where the original positions are compared. + * + * Optionally pass in `true` as `onlyCompareGenerated` to consider two + * mappings with the same original source/line/column, but different generated + * line and column the same. Useful when searching for a mapping with a + * stubbed out mapping. + */ +function compareByOriginalPositions(mappingA, mappingB, onlyCompareOriginal) { + var cmp = strcmp(mappingA.source, mappingB.source); + if (cmp !== 0) { + return cmp; + } + + cmp = mappingA.originalLine - mappingB.originalLine; + if (cmp !== 0) { + return cmp; + } + + cmp = mappingA.originalColumn - mappingB.originalColumn; + if (cmp !== 0 || onlyCompareOriginal) { + return cmp; + } + + cmp = mappingA.generatedColumn - mappingB.generatedColumn; + if (cmp !== 0) { + return cmp; + } + + cmp = mappingA.generatedLine - mappingB.generatedLine; + if (cmp !== 0) { + return cmp; + } + + return strcmp(mappingA.name, mappingB.name); +} +exports.compareByOriginalPositions = compareByOriginalPositions; + +/** + * Comparator between two mappings with deflated source and name indices where + * the generated positions are compared. + * + * Optionally pass in `true` as `onlyCompareGenerated` to consider two + * mappings with the same generated line and column, but different + * source/name/original line and column the same. Useful when searching for a + * mapping with a stubbed out mapping. + */ +function compareByGeneratedPositionsDeflated(mappingA, mappingB, onlyCompareGenerated) { + var cmp = mappingA.generatedLine - mappingB.generatedLine; + if (cmp !== 0) { + return cmp; + } + + cmp = mappingA.generatedColumn - mappingB.generatedColumn; + if (cmp !== 0 || onlyCompareGenerated) { + return cmp; + } + + cmp = strcmp(mappingA.source, mappingB.source); + if (cmp !== 0) { + return cmp; + } + + cmp = mappingA.originalLine - mappingB.originalLine; + if (cmp !== 0) { + return cmp; + } + + cmp = mappingA.originalColumn - mappingB.originalColumn; + if (cmp !== 0) { + return cmp; + } + + return strcmp(mappingA.name, mappingB.name); +} +exports.compareByGeneratedPositionsDeflated = compareByGeneratedPositionsDeflated; + +function strcmp(aStr1, aStr2) { + if (aStr1 === aStr2) { + return 0; + } + + if (aStr1 === null) { + return 1; // aStr2 !== null + } + + if (aStr2 === null) { + return -1; // aStr1 !== null + } + + if (aStr1 > aStr2) { + return 1; + } + + return -1; +} + +/** + * Comparator between two mappings with inflated source and name strings where + * the generated positions are compared. + */ +function compareByGeneratedPositionsInflated(mappingA, mappingB) { + var cmp = mappingA.generatedLine - mappingB.generatedLine; + if (cmp !== 0) { + return cmp; + } + + cmp = mappingA.generatedColumn - mappingB.generatedColumn; + if (cmp !== 0) { + return cmp; + } + + cmp = strcmp(mappingA.source, mappingB.source); + if (cmp !== 0) { + return cmp; + } + + cmp = mappingA.originalLine - mappingB.originalLine; + if (cmp !== 0) { + return cmp; + } + + cmp = mappingA.originalColumn - mappingB.originalColumn; + if (cmp !== 0) { + return cmp; + } + + return strcmp(mappingA.name, mappingB.name); +} +exports.compareByGeneratedPositionsInflated = compareByGeneratedPositionsInflated; + +/** + * Strip any JSON XSSI avoidance prefix from the string (as documented + * in the source maps specification), and then parse the string as + * JSON. + */ +function parseSourceMapInput(str) { + return JSON.parse(str.replace(/^\)]}'[^\n]*\n/, '')); +} +exports.parseSourceMapInput = parseSourceMapInput; + +/** + * Compute the URL of a source given the the source root, the source's + * URL, and the source map's URL. + */ +function computeSourceURL(sourceRoot, sourceURL, sourceMapURL) { + sourceURL = sourceURL || ''; + + if (sourceRoot) { + // This follows what Chrome does. + if (sourceRoot[sourceRoot.length - 1] !== '/' && sourceURL[0] !== '/') { + sourceRoot += '/'; + } + // The spec says: + // Line 4: An optional source root, useful for relocating source + // files on a server or removing repeated values in the + // “sources” entry. This value is prepended to the individual + // entries in the “source” field. + sourceURL = sourceRoot + sourceURL; + } + + // Historically, SourceMapConsumer did not take the sourceMapURL as + // a parameter. This mode is still somewhat supported, which is why + // this code block is conditional. However, it's preferable to pass + // the source map URL to SourceMapConsumer, so that this function + // can implement the source URL resolution algorithm as outlined in + // the spec. This block is basically the equivalent of: + // new URL(sourceURL, sourceMapURL).toString() + // ... except it avoids using URL, which wasn't available in the + // older releases of node still supported by this library. + // + // The spec says: + // If the sources are not absolute URLs after prepending of the + // “sourceRoot”, the sources are resolved relative to the + // SourceMap (like resolving script src in a html document). + if (sourceMapURL) { + var parsed = urlParse(sourceMapURL); + if (!parsed) { + throw new Error("sourceMapURL could not be parsed"); + } + if (parsed.path) { + // Strip the last path component, but keep the "/". + var index = parsed.path.lastIndexOf('/'); + if (index >= 0) { + parsed.path = parsed.path.substring(0, index + 1); + } + } + sourceURL = join(urlGenerate(parsed), sourceURL); + } + + return normalize(sourceURL); +} +exports.computeSourceURL = computeSourceURL; - // The core - md5 = function (message, options) { - // Convert to byte array - if (message.constructor == String) { - if (options && options.encoding === 'binary') message = bin.stringToBytes(message);else message = utf8.stringToBytes(message); - } else if (isBuffer(message)) message = Array.prototype.slice.call(message, 0);else if (!Array.isArray(message)) message = message.toString(); - // else, assume byte array already - - var m = crypt.bytesToWords(message), - l = message.length * 8, - a = 1732584193, - b = -271733879, - c = -1732584194, - d = 271733878; - - // Swap endian - for (var i = 0; i < m.length; i++) { - m[i] = (m[i] << 8 | m[i] >>> 24) & 0x00FF00FF | (m[i] << 24 | m[i] >>> 8) & 0xFF00FF00; - } - - // Padding - m[l >>> 5] |= 0x80 << l % 32; - m[(l + 64 >>> 9 << 4) + 14] = l; - - // Method shortcuts - var FF = md5._ff, - GG = md5._gg, - HH = md5._hh, - II = md5._ii; - - for (var i = 0; i < m.length; i += 16) { - - var aa = a, - bb = b, - cc = c, - dd = d; - - a = FF(a, b, c, d, m[i + 0], 7, -680876936); - d = FF(d, a, b, c, m[i + 1], 12, -389564586); - c = FF(c, d, a, b, m[i + 2], 17, 606105819); - b = FF(b, c, d, a, m[i + 3], 22, -1044525330); - a = FF(a, b, c, d, m[i + 4], 7, -176418897); - d = FF(d, a, b, c, m[i + 5], 12, 1200080426); - c = FF(c, d, a, b, m[i + 6], 17, -1473231341); - b = FF(b, c, d, a, m[i + 7], 22, -45705983); - a = FF(a, b, c, d, m[i + 8], 7, 1770035416); - d = FF(d, a, b, c, m[i + 9], 12, -1958414417); - c = FF(c, d, a, b, m[i + 10], 17, -42063); - b = FF(b, c, d, a, m[i + 11], 22, -1990404162); - a = FF(a, b, c, d, m[i + 12], 7, 1804603682); - d = FF(d, a, b, c, m[i + 13], 12, -40341101); - c = FF(c, d, a, b, m[i + 14], 17, -1502002290); - b = FF(b, c, d, a, m[i + 15], 22, 1236535329); - - a = GG(a, b, c, d, m[i + 1], 5, -165796510); - d = GG(d, a, b, c, m[i + 6], 9, -1069501632); - c = GG(c, d, a, b, m[i + 11], 14, 643717713); - b = GG(b, c, d, a, m[i + 0], 20, -373897302); - a = GG(a, b, c, d, m[i + 5], 5, -701558691); - d = GG(d, a, b, c, m[i + 10], 9, 38016083); - c = GG(c, d, a, b, m[i + 15], 14, -660478335); - b = GG(b, c, d, a, m[i + 4], 20, -405537848); - a = GG(a, b, c, d, m[i + 9], 5, 568446438); - d = GG(d, a, b, c, m[i + 14], 9, -1019803690); - c = GG(c, d, a, b, m[i + 3], 14, -187363961); - b = GG(b, c, d, a, m[i + 8], 20, 1163531501); - a = GG(a, b, c, d, m[i + 13], 5, -1444681467); - d = GG(d, a, b, c, m[i + 2], 9, -51403784); - c = GG(c, d, a, b, m[i + 7], 14, 1735328473); - b = GG(b, c, d, a, m[i + 12], 20, -1926607734); - - a = HH(a, b, c, d, m[i + 5], 4, -378558); - d = HH(d, a, b, c, m[i + 8], 11, -2022574463); - c = HH(c, d, a, b, m[i + 11], 16, 1839030562); - b = HH(b, c, d, a, m[i + 14], 23, -35309556); - a = HH(a, b, c, d, m[i + 1], 4, -1530992060); - d = HH(d, a, b, c, m[i + 4], 11, 1272893353); - c = HH(c, d, a, b, m[i + 7], 16, -155497632); - b = HH(b, c, d, a, m[i + 10], 23, -1094730640); - a = HH(a, b, c, d, m[i + 13], 4, 681279174); - d = HH(d, a, b, c, m[i + 0], 11, -358537222); - c = HH(c, d, a, b, m[i + 3], 16, -722521979); - b = HH(b, c, d, a, m[i + 6], 23, 76029189); - a = HH(a, b, c, d, m[i + 9], 4, -640364487); - d = HH(d, a, b, c, m[i + 12], 11, -421815835); - c = HH(c, d, a, b, m[i + 15], 16, 530742520); - b = HH(b, c, d, a, m[i + 2], 23, -995338651); - - a = II(a, b, c, d, m[i + 0], 6, -198630844); - d = II(d, a, b, c, m[i + 7], 10, 1126891415); - c = II(c, d, a, b, m[i + 14], 15, -1416354905); - b = II(b, c, d, a, m[i + 5], 21, -57434055); - a = II(a, b, c, d, m[i + 12], 6, 1700485571); - d = II(d, a, b, c, m[i + 3], 10, -1894986606); - c = II(c, d, a, b, m[i + 10], 15, -1051523); - b = II(b, c, d, a, m[i + 1], 21, -2054922799); - a = II(a, b, c, d, m[i + 8], 6, 1873313359); - d = II(d, a, b, c, m[i + 15], 10, -30611744); - c = II(c, d, a, b, m[i + 6], 15, -1560198380); - b = II(b, c, d, a, m[i + 13], 21, 1309151649); - a = II(a, b, c, d, m[i + 4], 6, -145523070); - d = II(d, a, b, c, m[i + 11], 10, -1120210379); - c = II(c, d, a, b, m[i + 2], 15, 718787259); - b = II(b, c, d, a, m[i + 9], 21, -343485551); - - a = a + aa >>> 0; - b = b + bb >>> 0; - c = c + cc >>> 0; - d = d + dd >>> 0; - } - - return crypt.endian([a, b, c, d]); - }; - - // Auxiliary functions - md5._ff = function (a, b, c, d, x, s, t) { - var n = a + (b & c | ~b & d) + (x >>> 0) + t; - return (n << s | n >>> 32 - s) + b; - }; - md5._gg = function (a, b, c, d, x, s, t) { - var n = a + (b & d | c & ~d) + (x >>> 0) + t; - return (n << s | n >>> 32 - s) + b; - }; - md5._hh = function (a, b, c, d, x, s, t) { - var n = a + (b ^ c ^ d) + (x >>> 0) + t; - return (n << s | n >>> 32 - s) + b; - }; - md5._ii = function (a, b, c, d, x, s, t) { - var n = a + (c ^ (b | ~d)) + (x >>> 0) + t; - return (n << s | n >>> 32 - s) + b; - }; - - // Package private blocksize - md5._blocksize = 16; - md5._digestsize = 16; - - module.exports = function (message, options) { - if (message === undefined || message === null) throw new Error('Illegal argument ' + message); - - var digestbytes = crypt.wordsToBytes(md5(message, options)); - return options && options.asBytes ? digestbytes : options && options.asString ? bin.bytesToString(digestbytes) : crypt.bytesToHex(digestbytes); - }; - })(); - -/***/ }, +/***/ }), /* 3 */ -/***/ function(module, exports) { +/***/ (function(module, exports, __webpack_require__) { - (function () { - var base64map = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/', - crypt = { - // Bit-wise rotation left - rotl: function (n, b) { - return n << b | n >>> 32 - b; - }, +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - // Bit-wise rotation right - rotr: function (n, b) { - return n << 32 - b | n >>> b; - }, +const md5 = __webpack_require__(4); - // Swap big-endian to little-endian and vice versa - endian: function (n) { - // If number given, swap endian - if (n.constructor == Number) { - return crypt.rotl(n, 8) & 0x00FF00FF | crypt.rotl(n, 24) & 0xFF00FF00; - } +function originalToGeneratedId(originalId) { + const match = originalId.match(/(.*)\/originalSource/); + return match ? match[1] : ""; +} - // Else, assume array and swap all items - for (var i = 0; i < n.length; i++) n[i] = crypt.endian(n[i]); - return n; - }, +function generatedToOriginalId(generatedId, url) { + return `${generatedId}/originalSource-${md5(url)}`; +} - // Generate an array of any length of random bytes - randomBytes: function (n) { - for (var bytes = []; n > 0; n--) bytes.push(Math.floor(Math.random() * 256)); - return bytes; - }, +function isOriginalId(id) { + return !!id.match(/\/originalSource/); +} - // Convert a byte array to big-endian 32-bit words - bytesToWords: function (bytes) { - for (var words = [], i = 0, b = 0; i < bytes.length; i++, b += 8) words[b >>> 5] |= bytes[i] << 24 - b % 32; - return words; - }, +function isGeneratedId(id) { + return !isOriginalId(id); +} - // Convert big-endian 32-bit words to a byte array - wordsToBytes: function (words) { - for (var bytes = [], b = 0; b < words.length * 32; b += 8) bytes.push(words[b >>> 5] >>> 24 - b % 32 & 0xFF); - return bytes; - }, +/** + * Trims the query part or reference identifier of a URL string, if necessary. + */ +function trimUrlQuery(url) { + let length = url.length; + let q1 = url.indexOf("?"); + let q2 = url.indexOf("&"); + let q3 = url.indexOf("#"); + let q = Math.min(q1 != -1 ? q1 : length, q2 != -1 ? q2 : length, q3 != -1 ? q3 : length); - // Convert a byte array to a hex string - bytesToHex: function (bytes) { - for (var hex = [], i = 0; i < bytes.length; i++) { - hex.push((bytes[i] >>> 4).toString(16)); - hex.push((bytes[i] & 0xF).toString(16)); - } - return hex.join(''); - }, + return url.slice(0, q); +} - // Convert a hex string to a byte array - hexToBytes: function (hex) { - for (var bytes = [], c = 0; c < hex.length; c += 2) bytes.push(parseInt(hex.substr(c, 2), 16)); - return bytes; - }, +// Map suffix to content type. +const contentMap = { + "js": "text/javascript", + "jsm": "text/javascript", + "mjs": "text/javascript", + "ts": "text/typescript", + "tsx": "text/typescript-jsx", + "jsx": "text/jsx", + "coffee": "text/coffeescript", + "elm": "text/elm", + "cljs": "text/x-clojure" +}; - // Convert a byte array to a base-64 string - bytesToBase64: function (bytes) { - for (var base64 = [], i = 0; i < bytes.length; i += 3) { - var triplet = bytes[i] << 16 | bytes[i + 1] << 8 | bytes[i + 2]; - for (var j = 0; j < 4; j++) if (i * 8 + j * 6 <= bytes.length * 8) base64.push(base64map.charAt(triplet >>> 6 * (3 - j) & 0x3F));else base64.push('='); - } - return base64.join(''); - }, +/** + * Returns the content type for the specified URL. If no specific + * content type can be determined, "text/plain" is returned. + * + * @return String + * The content type. + */ +function getContentType(url) { + url = trimUrlQuery(url); + let dot = url.lastIndexOf("."); + if (dot >= 0) { + let name = url.substring(dot + 1); + if (name in contentMap) { + return contentMap[name]; + } + } + return "text/plain"; +} - // Convert a base-64 string to a byte array - base64ToBytes: function (base64) { - // Remove non-base-64 characters - base64 = base64.replace(/[^A-Z0-9+\/]/ig, ''); +module.exports = { + originalToGeneratedId, + generatedToOriginalId, + isOriginalId, + isGeneratedId, + getContentType, + contentMapForTesting: contentMap +}; - for (var bytes = [], i = 0, imod4 = 0; i < base64.length; imod4 = ++i % 4) { - if (imod4 == 0) continue; - bytes.push((base64map.indexOf(base64.charAt(i - 1)) & Math.pow(2, -2 * imod4 + 8) - 1) << imod4 * 2 | base64map.indexOf(base64.charAt(i)) >>> 6 - imod4 * 2); - } - return bytes; - } - }; - - module.exports = crypt; - })(); - -/***/ }, +/***/ }), /* 4 */ -/***/ function(module, exports) { +/***/ (function(module, exports, __webpack_require__) { - var charenc = { - // UTF-8 encoding - utf8: { - // Convert a string to a byte array - stringToBytes: function (str) { - return charenc.bin.stringToBytes(unescape(encodeURIComponent(str))); - }, +(function(){ + var crypt = __webpack_require__(5), + utf8 = __webpack_require__(0).utf8, + isBuffer = __webpack_require__(6), + bin = __webpack_require__(0).bin, + + // The core + md5 = function (message, options) { + // Convert to byte array + if (message.constructor == String) + if (options && options.encoding === 'binary') + message = bin.stringToBytes(message); + else + message = utf8.stringToBytes(message); + else if (isBuffer(message)) + message = Array.prototype.slice.call(message, 0); + else if (!Array.isArray(message)) + message = message.toString(); + // else, assume byte array already + + var m = crypt.bytesToWords(message), + l = message.length * 8, + a = 1732584193, + b = -271733879, + c = -1732584194, + d = 271733878; + + // Swap endian + for (var i = 0; i < m.length; i++) { + m[i] = ((m[i] << 8) | (m[i] >>> 24)) & 0x00FF00FF | + ((m[i] << 24) | (m[i] >>> 8)) & 0xFF00FF00; + } + + // Padding + m[l >>> 5] |= 0x80 << (l % 32); + m[(((l + 64) >>> 9) << 4) + 14] = l; + + // Method shortcuts + var FF = md5._ff, + GG = md5._gg, + HH = md5._hh, + II = md5._ii; + + for (var i = 0; i < m.length; i += 16) { + + var aa = a, + bb = b, + cc = c, + dd = d; + + a = FF(a, b, c, d, m[i+ 0], 7, -680876936); + d = FF(d, a, b, c, m[i+ 1], 12, -389564586); + c = FF(c, d, a, b, m[i+ 2], 17, 606105819); + b = FF(b, c, d, a, m[i+ 3], 22, -1044525330); + a = FF(a, b, c, d, m[i+ 4], 7, -176418897); + d = FF(d, a, b, c, m[i+ 5], 12, 1200080426); + c = FF(c, d, a, b, m[i+ 6], 17, -1473231341); + b = FF(b, c, d, a, m[i+ 7], 22, -45705983); + a = FF(a, b, c, d, m[i+ 8], 7, 1770035416); + d = FF(d, a, b, c, m[i+ 9], 12, -1958414417); + c = FF(c, d, a, b, m[i+10], 17, -42063); + b = FF(b, c, d, a, m[i+11], 22, -1990404162); + a = FF(a, b, c, d, m[i+12], 7, 1804603682); + d = FF(d, a, b, c, m[i+13], 12, -40341101); + c = FF(c, d, a, b, m[i+14], 17, -1502002290); + b = FF(b, c, d, a, m[i+15], 22, 1236535329); + + a = GG(a, b, c, d, m[i+ 1], 5, -165796510); + d = GG(d, a, b, c, m[i+ 6], 9, -1069501632); + c = GG(c, d, a, b, m[i+11], 14, 643717713); + b = GG(b, c, d, a, m[i+ 0], 20, -373897302); + a = GG(a, b, c, d, m[i+ 5], 5, -701558691); + d = GG(d, a, b, c, m[i+10], 9, 38016083); + c = GG(c, d, a, b, m[i+15], 14, -660478335); + b = GG(b, c, d, a, m[i+ 4], 20, -405537848); + a = GG(a, b, c, d, m[i+ 9], 5, 568446438); + d = GG(d, a, b, c, m[i+14], 9, -1019803690); + c = GG(c, d, a, b, m[i+ 3], 14, -187363961); + b = GG(b, c, d, a, m[i+ 8], 20, 1163531501); + a = GG(a, b, c, d, m[i+13], 5, -1444681467); + d = GG(d, a, b, c, m[i+ 2], 9, -51403784); + c = GG(c, d, a, b, m[i+ 7], 14, 1735328473); + b = GG(b, c, d, a, m[i+12], 20, -1926607734); + + a = HH(a, b, c, d, m[i+ 5], 4, -378558); + d = HH(d, a, b, c, m[i+ 8], 11, -2022574463); + c = HH(c, d, a, b, m[i+11], 16, 1839030562); + b = HH(b, c, d, a, m[i+14], 23, -35309556); + a = HH(a, b, c, d, m[i+ 1], 4, -1530992060); + d = HH(d, a, b, c, m[i+ 4], 11, 1272893353); + c = HH(c, d, a, b, m[i+ 7], 16, -155497632); + b = HH(b, c, d, a, m[i+10], 23, -1094730640); + a = HH(a, b, c, d, m[i+13], 4, 681279174); + d = HH(d, a, b, c, m[i+ 0], 11, -358537222); + c = HH(c, d, a, b, m[i+ 3], 16, -722521979); + b = HH(b, c, d, a, m[i+ 6], 23, 76029189); + a = HH(a, b, c, d, m[i+ 9], 4, -640364487); + d = HH(d, a, b, c, m[i+12], 11, -421815835); + c = HH(c, d, a, b, m[i+15], 16, 530742520); + b = HH(b, c, d, a, m[i+ 2], 23, -995338651); + + a = II(a, b, c, d, m[i+ 0], 6, -198630844); + d = II(d, a, b, c, m[i+ 7], 10, 1126891415); + c = II(c, d, a, b, m[i+14], 15, -1416354905); + b = II(b, c, d, a, m[i+ 5], 21, -57434055); + a = II(a, b, c, d, m[i+12], 6, 1700485571); + d = II(d, a, b, c, m[i+ 3], 10, -1894986606); + c = II(c, d, a, b, m[i+10], 15, -1051523); + b = II(b, c, d, a, m[i+ 1], 21, -2054922799); + a = II(a, b, c, d, m[i+ 8], 6, 1873313359); + d = II(d, a, b, c, m[i+15], 10, -30611744); + c = II(c, d, a, b, m[i+ 6], 15, -1560198380); + b = II(b, c, d, a, m[i+13], 21, 1309151649); + a = II(a, b, c, d, m[i+ 4], 6, -145523070); + d = II(d, a, b, c, m[i+11], 10, -1120210379); + c = II(c, d, a, b, m[i+ 2], 15, 718787259); + b = II(b, c, d, a, m[i+ 9], 21, -343485551); + + a = (a + aa) >>> 0; + b = (b + bb) >>> 0; + c = (c + cc) >>> 0; + d = (d + dd) >>> 0; + } + + return crypt.endian([a, b, c, d]); + }; + + // Auxiliary functions + md5._ff = function (a, b, c, d, x, s, t) { + var n = a + (b & c | ~b & d) + (x >>> 0) + t; + return ((n << s) | (n >>> (32 - s))) + b; + }; + md5._gg = function (a, b, c, d, x, s, t) { + var n = a + (b & d | c & ~d) + (x >>> 0) + t; + return ((n << s) | (n >>> (32 - s))) + b; + }; + md5._hh = function (a, b, c, d, x, s, t) { + var n = a + (b ^ c ^ d) + (x >>> 0) + t; + return ((n << s) | (n >>> (32 - s))) + b; + }; + md5._ii = function (a, b, c, d, x, s, t) { + var n = a + (c ^ (b | ~d)) + (x >>> 0) + t; + return ((n << s) | (n >>> (32 - s))) + b; + }; + + // Package private blocksize + md5._blocksize = 16; + md5._digestsize = 16; + + module.exports = function (message, options) { + if (message === undefined || message === null) + throw new Error('Illegal argument ' + message); + + var digestbytes = crypt.wordsToBytes(md5(message, options)); + return options && options.asBytes ? digestbytes : + options && options.asString ? bin.bytesToString(digestbytes) : + crypt.bytesToHex(digestbytes); + }; + +})(); - // Convert a byte array to a string - bytesToString: function (bytes) { - return decodeURIComponent(escape(charenc.bin.bytesToString(bytes))); - } - }, - // Binary encoding - bin: { - // Convert a string to a byte array - stringToBytes: function (str) { - for (var bytes = [], i = 0; i < str.length; i++) bytes.push(str.charCodeAt(i) & 0xFF); - return bytes; - }, - - // Convert a byte array to a string - bytesToString: function (bytes) { - for (var str = [], i = 0; i < bytes.length; i++) str.push(String.fromCharCode(bytes[i])); - return str.join(''); - } - } - }; - - module.exports = charenc; - -/***/ }, +/***/ }), /* 5 */ -/***/ function(module, exports) { +/***/ (function(module, exports) { - /*! - * Determine if an object is a Buffer - * - * @author Feross Aboukhadijeh - * @license MIT - */ +(function() { + var base64map + = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/', - // The _isBuffer check is for Safari 5-7 support, because it's missing - // Object.prototype.constructor. Remove this eventually - module.exports = function (obj) { - return obj != null && (isBuffer(obj) || isSlowBuffer(obj) || !!obj._isBuffer); - }; + crypt = { + // Bit-wise rotation left + rotl: function(n, b) { + return (n << b) | (n >>> (32 - b)); + }, - function isBuffer(obj) { - return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj); - } + // Bit-wise rotation right + rotr: function(n, b) { + return (n << (32 - b)) | (n >>> b); + }, - // For Node v0.10 support. Remove this eventually. - function isSlowBuffer(obj) { - return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isBuffer(obj.slice(0, 0)); - } + // Swap big-endian to little-endian and vice versa + endian: function(n) { + // If number given, swap endian + if (n.constructor == Number) { + return crypt.rotl(n, 8) & 0x00FF00FF | crypt.rotl(n, 24) & 0xFF00FF00; + } -/***/ }, + // Else, assume array and swap all items + for (var i = 0; i < n.length; i++) + n[i] = crypt.endian(n[i]); + return n; + }, + + // Generate an array of any length of random bytes + randomBytes: function(n) { + for (var bytes = []; n > 0; n--) + bytes.push(Math.floor(Math.random() * 256)); + return bytes; + }, + + // Convert a byte array to big-endian 32-bit words + bytesToWords: function(bytes) { + for (var words = [], i = 0, b = 0; i < bytes.length; i++, b += 8) + words[b >>> 5] |= bytes[i] << (24 - b % 32); + return words; + }, + + // Convert big-endian 32-bit words to a byte array + wordsToBytes: function(words) { + for (var bytes = [], b = 0; b < words.length * 32; b += 8) + bytes.push((words[b >>> 5] >>> (24 - b % 32)) & 0xFF); + return bytes; + }, + + // Convert a byte array to a hex string + bytesToHex: function(bytes) { + for (var hex = [], i = 0; i < bytes.length; i++) { + hex.push((bytes[i] >>> 4).toString(16)); + hex.push((bytes[i] & 0xF).toString(16)); + } + return hex.join(''); + }, + + // Convert a hex string to a byte array + hexToBytes: function(hex) { + for (var bytes = [], c = 0; c < hex.length; c += 2) + bytes.push(parseInt(hex.substr(c, 2), 16)); + return bytes; + }, + + // Convert a byte array to a base-64 string + bytesToBase64: function(bytes) { + for (var base64 = [], i = 0; i < bytes.length; i += 3) { + var triplet = (bytes[i] << 16) | (bytes[i + 1] << 8) | bytes[i + 2]; + for (var j = 0; j < 4; j++) + if (i * 8 + j * 6 <= bytes.length * 8) + base64.push(base64map.charAt((triplet >>> 6 * (3 - j)) & 0x3F)); + else + base64.push('='); + } + return base64.join(''); + }, + + // Convert a base-64 string to a byte array + base64ToBytes: function(base64) { + // Remove non-base-64 characters + base64 = base64.replace(/[^A-Z0-9+\/]/ig, ''); + + for (var bytes = [], i = 0, imod4 = 0; i < base64.length; + imod4 = ++i % 4) { + if (imod4 == 0) continue; + bytes.push(((base64map.indexOf(base64.charAt(i - 1)) + & (Math.pow(2, -2 * imod4 + 8) - 1)) << (imod4 * 2)) + | (base64map.indexOf(base64.charAt(i)) >>> (6 - imod4 * 2))); + } + return bytes; + } + }; + + module.exports = crypt; +})(); + + +/***/ }), /* 6 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports) { - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +/*! + * Determine if an object is a Buffer + * + * @author Feross Aboukhadijeh + * @license MIT + */ - const networkRequest = __webpack_require__(7); - const workerUtils = __webpack_require__(8); +// The _isBuffer check is for Safari 5-7 support, because it's missing +// Object.prototype.constructor. Remove this eventually +module.exports = function (obj) { + return obj != null && (isBuffer(obj) || isSlowBuffer(obj) || !!obj._isBuffer) +} - module.exports = { - networkRequest, - workerUtils - }; +function isBuffer (obj) { + return !!obj.constructor && typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj) +} -/***/ }, +// For Node v0.10 support. Remove this eventually. +function isSlowBuffer (obj) { + return typeof obj.readFloatLE === 'function' && typeof obj.slice === 'function' && isBuffer(obj.slice(0, 0)) +} + + +/***/ }), /* 7 */ -/***/ function(module, exports) { +/***/ (function(module, exports) { - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - function networkRequest(url, opts) { - return fetch(url, { - cache: opts.loadFromCache ? "default" : "no-cache" - }).then(res => { - if (res.status >= 200 && res.status < 300) { - return res.text().then(text => ({ content: text })); - } - return Promise.reject(`request failed with status ${res.status}`); - }); - } +function networkRequest(url, opts) { + return fetch(url, { + cache: opts.loadFromCache ? "default" : "no-cache" + }).then(res => { + if (res.status >= 200 && res.status < 300) { + return res.text().then(text => ({ content: text })); + } + return Promise.reject(`request failed with status ${res.status}`); + }); +} - module.exports = networkRequest; +module.exports = networkRequest; -/***/ }, +/***/ }), /* 8 */ -/***/ function(module, exports) { +/***/ (function(module, exports) { - function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; } +function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; } - function WorkerDispatcher() { - this.msgId = 1; - this.worker = null; - } /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +function WorkerDispatcher() { + this.msgId = 1; + this.worker = null; +} /* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - WorkerDispatcher.prototype = { - start(url) { - this.worker = new Worker(url); - this.worker.onerror = () => { - console.error(`Error in worker ${url}`); - }; - }, +WorkerDispatcher.prototype = { + start(url) { + this.worker = new Worker(url); + this.worker.onerror = () => { + console.error(`Error in worker ${url}`); + }; + }, - stop() { - if (!this.worker) { - return; - } + stop() { + if (!this.worker) { + return; + } - this.worker.terminate(); - this.worker = null; - }, + this.worker.terminate(); + this.worker = null; + }, - task(method) { - return (...args) => { - return new Promise((resolve, reject) => { - const id = this.msgId++; - this.worker.postMessage({ id, method, args }); + task(method) { + return (...args) => { + return new Promise((resolve, reject) => { + const id = this.msgId++; + this.worker.postMessage({ id, method, args }); - const listener = ({ data: result }) => { - if (result.id !== id) { - return; - } + const listener = ({ data: result }) => { + if (result.id !== id) { + return; + } - if (!this.worker) { - reject("Oops, The worker has shutdown!"); - return; - } - this.worker.removeEventListener("message", listener); - if (result.error) { - reject(result.error); - } else { - resolve(result.response); - } - }; + if (!this.worker) { + return; + } - this.worker.addEventListener("message", listener); - }); - }; - } - }; + this.worker.removeEventListener("message", listener); + if (result.error) { + reject(result.error); + } else { + resolve(result.response); + } + }; - function workerHandler(publicInterface) { - return function (msg) { - const { id, method, args } = msg.data; - try { - const response = publicInterface[method].apply(undefined, args); - if (response instanceof Promise) { - response.then(val => self.postMessage({ id, response: val }), - // Error can't be sent via postMessage, so be sure to - // convert to string. - err => self.postMessage({ id, error: err.toString() })); - } else { - self.postMessage({ id, response }); - } - } catch (error) { - // Error can't be sent via postMessage, so be sure to convert to - // string. - self.postMessage({ id, error: error.toString() }); - } - }; - } + this.worker.addEventListener("message", listener); + }); + }; + } +}; - function streamingWorkerHandler(publicInterface, { timeout = 100 } = {}, worker = self) { - let streamingWorker = (() => { - var _ref = _asyncToGenerator(function* (id, tasks) { - let isWorking = true; +function workerHandler(publicInterface) { + return function (msg) { + const { id, method, args } = msg.data; + try { + const response = publicInterface[method].apply(undefined, args); + if (response instanceof Promise) { + response.then(val => self.postMessage({ id, response: val }), + // Error can't be sent via postMessage, so be sure to + // convert to string. + err => self.postMessage({ id, error: err.toString() })); + } else { + self.postMessage({ id, response }); + } + } catch (error) { + // Error can't be sent via postMessage, so be sure to convert to + // string. + self.postMessage({ id, error: error.toString() }); + } + }; +} - const intervalId = setTimeout(function () { - isWorking = false; - }, timeout); +function streamingWorkerHandler(publicInterface, { timeout = 100 } = {}, worker = self) { + let streamingWorker = (() => { + var _ref = _asyncToGenerator(function* (id, tasks) { + let isWorking = true; - const results = []; - while (tasks.length !== 0 && isWorking) { - const { callback, context, args } = tasks.shift(); - const result = yield callback.call(context, args); - results.push(result); - } - worker.postMessage({ id, status: "pending", data: results }); - clearInterval(intervalId); + const intervalId = setTimeout(function () { + isWorking = false; + }, timeout); - if (tasks.length !== 0) { - yield streamingWorker(id, tasks); - } - }); + const results = []; + while (tasks.length !== 0 && isWorking) { + const { callback, context, args } = tasks.shift(); + const result = yield callback.call(context, args); + results.push(result); + } + worker.postMessage({ id, status: "pending", data: results }); + clearInterval(intervalId); - return function streamingWorker(_x, _x2) { - return _ref.apply(this, arguments); - }; - })(); + if (tasks.length !== 0) { + yield streamingWorker(id, tasks); + } + }); - return (() => { - var _ref2 = _asyncToGenerator(function* (msg) { - const { id, method, args } = msg.data; - const workerMethod = publicInterface[method]; - if (!workerMethod) { - console.error(`Could not find ${method} defined in worker.`); - } - worker.postMessage({ id, status: "start" }); + return function streamingWorker(_x, _x2) { + return _ref.apply(this, arguments); + }; + })(); - try { - const tasks = workerMethod(args); - yield streamingWorker(id, tasks); - worker.postMessage({ id, status: "done" }); - } catch (error) { - worker.postMessage({ id, status: "error", error }); - } - }); + return (() => { + var _ref2 = _asyncToGenerator(function* (msg) { + const { id, method, args } = msg.data; + const workerMethod = publicInterface[method]; + if (!workerMethod) { + console.error(`Could not find ${method} defined in worker.`); + } + worker.postMessage({ id, status: "start" }); - return function (_x3) { - return _ref2.apply(this, arguments); - }; - })(); - } + try { + const tasks = workerMethod(args); + yield streamingWorker(id, tasks); + worker.postMessage({ id, status: "done" }); + } catch (error) { + worker.postMessage({ id, status: "error", error }); + } + }); - module.exports = { - WorkerDispatcher, - workerHandler, - streamingWorkerHandler - }; + return function (_x3) { + return _ref2.apply(this, arguments); + }; + })(); +} -/***/ }, +module.exports = { + WorkerDispatcher, + workerHandler, + streamingWorkerHandler +}; + +/***/ }), /* 9 */ -/***/ function(module, exports, __webpack_require__) { - - let getOriginalURLs = (() => { - var _ref = _asyncToGenerator(function* (generatedSource) { - const map = yield fetchSourceMap(generatedSource); - return map && map.sources; - }); - - return function getOriginalURLs(_x) { - return _ref.apply(this, arguments); - }; - })(); - - let getGeneratedLocation = (() => { - var _ref2 = _asyncToGenerator(function* (location, originalSource) { - if (!isOriginalId(location.sourceId)) { - return location; - } - - const generatedSourceId = originalToGeneratedId(location.sourceId); - const map = yield getSourceMap(generatedSourceId); - if (!map) { - return location; - } - - const { line, column } = map.generatedPositionFor({ - source: originalSource.url, - line: location.line, - column: location.column == null ? 0 : location.column, - bias: SourceMapConsumer.LEAST_UPPER_BOUND - }); - - return { - sourceId: generatedSourceId, - line, - column - }; - }); - - return function getGeneratedLocation(_x2, _x3) { - return _ref2.apply(this, arguments); - }; - })(); - - let getOriginalLocation = (() => { - var _ref3 = _asyncToGenerator(function* (location) { - if (!isGeneratedId(location.sourceId)) { - return location; - } - - const map = yield getSourceMap(location.sourceId); - if (!map) { - return location; - } - - const { source: sourceUrl, line, column } = map.originalPositionFor({ - line: location.line, - column: location.column == null ? 0 : location.column - }); - - if (sourceUrl == null) { - // No url means the location didn't map. - return location; - } - - return { - sourceId: generatedToOriginalId(location.sourceId, sourceUrl), - sourceUrl, - line, - column - }; - }); - - return function getOriginalLocation(_x4) { - return _ref3.apply(this, arguments); - }; - })(); - - let getOriginalSourceText = (() => { - var _ref4 = _asyncToGenerator(function* (originalSource) { - assert(isOriginalId(originalSource.id), "Source is not an original source"); - - const generatedSourceId = originalToGeneratedId(originalSource.id); - const map = yield getSourceMap(generatedSourceId); - if (!map) { - return null; - } - - let text = map.sourceContentFor(originalSource.url); - if (!text) { - text = (yield networkRequest(originalSource.url, { loadFromCache: false })).content; - } - - return { - text, - contentType: getContentType(originalSource.url || "") - }; - }); - - return function getOriginalSourceText(_x5) { - return _ref4.apply(this, arguments); - }; - })(); - - /** - * Finds the scope binding information at the source map based on locations and - * generated source scopes. The generated source scopes contain variables names - * and all their references in the generated source. This information is merged - * with the source map original names information and returned as result. - * @memberof utils/source-map-worker - * @static - */ +/***/ (function(module, exports) { - let getLocationScopes = (() => { - var _ref5 = _asyncToGenerator(function* (location, generatedSourceScopes) { - if (!isGeneratedId(location.sourceId)) { - return null; - } +let sourceMapRequests = new Map(); - const map = yield getSourceMap(location.sourceId); - if (!map) { - return null; - } +function clearSourceMaps() { + sourceMapRequests.clear(); +} - return getLocationScopesFromMap(map, generatedSourceScopes, location); - }); +function getSourceMap(generatedSourceId) { + return sourceMapRequests.get(generatedSourceId); +} - return function getLocationScopes(_x6, _x7) { - return _ref5.apply(this, arguments); - }; - })(); +function setSourceMap(generatedId, request) { + sourceMapRequests.set(generatedId, request); +} - let hasMappedSource = (() => { - var _ref6 = _asyncToGenerator(function* (location) { - if (isOriginalId(location.sourceId)) { - return true; - } +module.exports = { + clearSourceMaps, + getSourceMap, + setSourceMap +}; - const loc = yield getOriginalLocation(location); - return loc.sourceId !== location.sourceId; - }); - - return function hasMappedSource(_x8) { - return _ref6.apply(this, arguments); - }; - })(); - - function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; } - - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - - /** - * Source Map Worker - * @module utils/source-map-worker - */ - - const { networkRequest } = __webpack_require__(6); - const { SourceMapConsumer, SourceMapGenerator } = __webpack_require__(10); - - const assert = __webpack_require__(21); - const { fetchSourceMap } = __webpack_require__(22); - const { - getSourceMap, - setSourceMap, - clearSourceMaps - } = __webpack_require__(23); - const { - originalToGeneratedId, - generatedToOriginalId, - isGeneratedId, - isOriginalId, - getContentType - } = __webpack_require__(1); - const { - getLocationScopes: getLocationScopesFromMap - } = __webpack_require__(40); - - const { WasmRemap } = __webpack_require__(24); - - function applySourceMap(generatedId, url, code, mappings) { - const generator = new SourceMapGenerator({ file: url }); - mappings.forEach(mapping => generator.addMapping(mapping)); - generator.setSourceContent(url, code); - - const map = SourceMapConsumer(generator.toJSON()); - setSourceMap(generatedId, Promise.resolve(map)); - } - - module.exports = { - getOriginalURLs, - getGeneratedLocation, - getOriginalLocation, - getOriginalSourceText, - getLocationScopes, - applySourceMap, - clearSourceMaps, - hasMappedSource - }; - -/***/ }, +/***/ }), /* 10 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports, __webpack_require__) { - /* - * Copyright 2009-2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE.txt or: - * http://opensource.org/licenses/BSD-3-Clause - */ - exports.SourceMapGenerator = __webpack_require__(11).SourceMapGenerator; - exports.SourceMapConsumer = __webpack_require__(17).SourceMapConsumer; - exports.SourceNode = __webpack_require__(20).SourceNode; +/* + * Copyright 2009-2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE.txt or: + * http://opensource.org/licenses/BSD-3-Clause + */ +exports.SourceMapGenerator = __webpack_require__(11).SourceMapGenerator; +exports.SourceMapConsumer = __webpack_require__(20).SourceMapConsumer; +exports.SourceNode = __webpack_require__(23).SourceNode; -/***/ }, + +/***/ }), /* 11 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports, __webpack_require__) { - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + */ - var base64VLQ = __webpack_require__(12); - var util = __webpack_require__(14); - var ArraySet = __webpack_require__(15).ArraySet; - var MappingList = __webpack_require__(16).MappingList; +var base64VLQ = __webpack_require__(12); +var util = __webpack_require__(2); +var ArraySet = __webpack_require__(13).ArraySet; +var MappingList = __webpack_require__(19).MappingList; - /** - * An instance of the SourceMapGenerator represents a source map which is - * being built incrementally. You may pass an object with the following - * properties: - * - * - file: The filename of the generated source. - * - sourceRoot: A root for all relative URLs in this source map. - */ - function SourceMapGenerator(aArgs) { - if (!aArgs) { - aArgs = {}; - } - this._file = util.getArg(aArgs, 'file', null); - this._sourceRoot = util.getArg(aArgs, 'sourceRoot', null); - this._skipValidation = util.getArg(aArgs, 'skipValidation', false); - this._sources = new ArraySet(); - this._names = new ArraySet(); - this._mappings = new MappingList(); - this._sourcesContents = null; - } +/** + * An instance of the SourceMapGenerator represents a source map which is + * being built incrementally. You may pass an object with the following + * properties: + * + * - file: The filename of the generated source. + * - sourceRoot: A root for all relative URLs in this source map. + */ +function SourceMapGenerator(aArgs) { + if (!aArgs) { + aArgs = {}; + } + this._file = util.getArg(aArgs, 'file', null); + this._sourceRoot = util.getArg(aArgs, 'sourceRoot', null); + this._skipValidation = util.getArg(aArgs, 'skipValidation', false); + this._sources = new ArraySet(); + this._names = new ArraySet(); + this._mappings = new MappingList(); + this._sourcesContents = null; +} - SourceMapGenerator.prototype._version = 3; +SourceMapGenerator.prototype._version = 3; - /** - * Creates a new SourceMapGenerator based on a SourceMapConsumer - * - * @param aSourceMapConsumer The SourceMap. - */ - SourceMapGenerator.fromSourceMap = function SourceMapGenerator_fromSourceMap(aSourceMapConsumer) { - var sourceRoot = aSourceMapConsumer.sourceRoot; - var generator = new SourceMapGenerator({ - file: aSourceMapConsumer.file, - sourceRoot: sourceRoot - }); - aSourceMapConsumer.eachMapping(function (mapping) { - var newMapping = { - generated: { - line: mapping.generatedLine, - column: mapping.generatedColumn - } - }; +/** + * Creates a new SourceMapGenerator based on a SourceMapConsumer + * + * @param aSourceMapConsumer The SourceMap. + */ +SourceMapGenerator.fromSourceMap = + function SourceMapGenerator_fromSourceMap(aSourceMapConsumer) { + var sourceRoot = aSourceMapConsumer.sourceRoot; + var generator = new SourceMapGenerator({ + file: aSourceMapConsumer.file, + sourceRoot: sourceRoot + }); + aSourceMapConsumer.eachMapping(function (mapping) { + var newMapping = { + generated: { + line: mapping.generatedLine, + column: mapping.generatedColumn + } + }; - if (mapping.source != null) { - newMapping.source = mapping.source; - if (sourceRoot != null) { - newMapping.source = util.relative(sourceRoot, newMapping.source); - } + if (mapping.source != null) { + newMapping.source = mapping.source; + if (sourceRoot != null) { + newMapping.source = util.relative(sourceRoot, newMapping.source); + } - newMapping.original = { - line: mapping.originalLine, - column: mapping.originalColumn - }; + newMapping.original = { + line: mapping.originalLine, + column: mapping.originalColumn + }; - if (mapping.name != null) { - newMapping.name = mapping.name; - } - } + if (mapping.name != null) { + newMapping.name = mapping.name; + } + } - generator.addMapping(newMapping); - }); - aSourceMapConsumer.sources.forEach(function (sourceFile) { - var sourceRelative = sourceFile; - if (sourceRoot !== null) { - sourceRelative = util.relative(sourceRoot, sourceFile); - } + generator.addMapping(newMapping); + }); + aSourceMapConsumer.sources.forEach(function (sourceFile) { + var sourceRelative = sourceFile; + if (sourceRoot !== null) { + sourceRelative = util.relative(sourceRoot, sourceFile); + } - if (!generator._sources.has(sourceRelative)) { - generator._sources.add(sourceRelative); - } + if (!generator._sources.has(sourceRelative)) { + generator._sources.add(sourceRelative); + } - var content = aSourceMapConsumer.sourceContentFor(sourceFile); - if (content != null) { - generator.setSourceContent(sourceFile, content); - } - }); - return generator; - }; + var content = aSourceMapConsumer.sourceContentFor(sourceFile); + if (content != null) { + generator.setSourceContent(sourceFile, content); + } + }); + return generator; + }; - /** - * Add a single mapping from original source line and column to the generated - * source's line and column for this source map being created. The mapping - * object should have the following properties: - * - * - generated: An object with the generated line and column positions. - * - original: An object with the original line and column positions. - * - source: The original source file (relative to the sourceRoot). - * - name: An optional original token name for this mapping. - */ - SourceMapGenerator.prototype.addMapping = function SourceMapGenerator_addMapping(aArgs) { - var generated = util.getArg(aArgs, 'generated'); - var original = util.getArg(aArgs, 'original', null); - var source = util.getArg(aArgs, 'source', null); - var name = util.getArg(aArgs, 'name', null); +/** + * Add a single mapping from original source line and column to the generated + * source's line and column for this source map being created. The mapping + * object should have the following properties: + * + * - generated: An object with the generated line and column positions. + * - original: An object with the original line and column positions. + * - source: The original source file (relative to the sourceRoot). + * - name: An optional original token name for this mapping. + */ +SourceMapGenerator.prototype.addMapping = + function SourceMapGenerator_addMapping(aArgs) { + var generated = util.getArg(aArgs, 'generated'); + var original = util.getArg(aArgs, 'original', null); + var source = util.getArg(aArgs, 'source', null); + var name = util.getArg(aArgs, 'name', null); - if (!this._skipValidation) { - this._validateMapping(generated, original, source, name); - } + if (!this._skipValidation) { + this._validateMapping(generated, original, source, name); + } - if (source != null) { - source = String(source); - if (!this._sources.has(source)) { - this._sources.add(source); - } - } + if (source != null) { + source = String(source); + if (!this._sources.has(source)) { + this._sources.add(source); + } + } - if (name != null) { - name = String(name); - if (!this._names.has(name)) { - this._names.add(name); - } - } + if (name != null) { + name = String(name); + if (!this._names.has(name)) { + this._names.add(name); + } + } - this._mappings.add({ - generatedLine: generated.line, - generatedColumn: generated.column, - originalLine: original != null && original.line, - originalColumn: original != null && original.column, - source: source, - name: name - }); - }; + this._mappings.add({ + generatedLine: generated.line, + generatedColumn: generated.column, + originalLine: original != null && original.line, + originalColumn: original != null && original.column, + source: source, + name: name + }); + }; - /** - * Set the source content for a source file. - */ - SourceMapGenerator.prototype.setSourceContent = function SourceMapGenerator_setSourceContent(aSourceFile, aSourceContent) { - var source = aSourceFile; - if (this._sourceRoot != null) { - source = util.relative(this._sourceRoot, source); - } +/** + * Set the source content for a source file. + */ +SourceMapGenerator.prototype.setSourceContent = + function SourceMapGenerator_setSourceContent(aSourceFile, aSourceContent) { + var source = aSourceFile; + if (this._sourceRoot != null) { + source = util.relative(this._sourceRoot, source); + } - if (aSourceContent != null) { - // Add the source content to the _sourcesContents map. - // Create a new _sourcesContents map if the property is null. - if (!this._sourcesContents) { - this._sourcesContents = Object.create(null); - } - this._sourcesContents[util.toSetString(source)] = aSourceContent; - } else if (this._sourcesContents) { - // Remove the source file from the _sourcesContents map. - // If the _sourcesContents map is empty, set the property to null. - delete this._sourcesContents[util.toSetString(source)]; - if (Object.keys(this._sourcesContents).length === 0) { - this._sourcesContents = null; - } - } - }; + if (aSourceContent != null) { + // Add the source content to the _sourcesContents map. + // Create a new _sourcesContents map if the property is null. + if (!this._sourcesContents) { + this._sourcesContents = Object.create(null); + } + this._sourcesContents[util.toSetString(source)] = aSourceContent; + } else if (this._sourcesContents) { + // Remove the source file from the _sourcesContents map. + // If the _sourcesContents map is empty, set the property to null. + delete this._sourcesContents[util.toSetString(source)]; + if (Object.keys(this._sourcesContents).length === 0) { + this._sourcesContents = null; + } + } + }; - /** - * Applies the mappings of a sub-source-map for a specific source file to the - * source map being generated. Each mapping to the supplied source file is - * rewritten using the supplied source map. Note: The resolution for the - * resulting mappings is the minimium of this map and the supplied map. - * - * @param aSourceMapConsumer The source map to be applied. - * @param aSourceFile Optional. The filename of the source file. - * If omitted, SourceMapConsumer's file property will be used. - * @param aSourceMapPath Optional. The dirname of the path to the source map - * to be applied. If relative, it is relative to the SourceMapConsumer. - * This parameter is needed when the two source maps aren't in the same - * directory, and the source map to be applied contains relative source - * paths. If so, those relative source paths need to be rewritten - * relative to the SourceMapGenerator. - */ - SourceMapGenerator.prototype.applySourceMap = function SourceMapGenerator_applySourceMap(aSourceMapConsumer, aSourceFile, aSourceMapPath) { - var sourceFile = aSourceFile; - // If aSourceFile is omitted, we will use the file property of the SourceMap - if (aSourceFile == null) { - if (aSourceMapConsumer.file == null) { - throw new Error('SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, ' + 'or the source map\'s "file" property. Both were omitted.'); - } - sourceFile = aSourceMapConsumer.file; - } - var sourceRoot = this._sourceRoot; - // Make "sourceFile" relative if an absolute Url is passed. - if (sourceRoot != null) { - sourceFile = util.relative(sourceRoot, sourceFile); - } - // Applying the SourceMap can add and remove items from the sources and - // the names array. - var newSources = new ArraySet(); - var newNames = new ArraySet(); +/** + * Applies the mappings of a sub-source-map for a specific source file to the + * source map being generated. Each mapping to the supplied source file is + * rewritten using the supplied source map. Note: The resolution for the + * resulting mappings is the minimium of this map and the supplied map. + * + * @param aSourceMapConsumer The source map to be applied. + * @param aSourceFile Optional. The filename of the source file. + * If omitted, SourceMapConsumer's file property will be used. + * @param aSourceMapPath Optional. The dirname of the path to the source map + * to be applied. If relative, it is relative to the SourceMapConsumer. + * This parameter is needed when the two source maps aren't in the same + * directory, and the source map to be applied contains relative source + * paths. If so, those relative source paths need to be rewritten + * relative to the SourceMapGenerator. + */ +SourceMapGenerator.prototype.applySourceMap = + function SourceMapGenerator_applySourceMap(aSourceMapConsumer, aSourceFile, aSourceMapPath) { + var sourceFile = aSourceFile; + // If aSourceFile is omitted, we will use the file property of the SourceMap + if (aSourceFile == null) { + if (aSourceMapConsumer.file == null) { + throw new Error( + 'SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, ' + + 'or the source map\'s "file" property. Both were omitted.' + ); + } + sourceFile = aSourceMapConsumer.file; + } + var sourceRoot = this._sourceRoot; + // Make "sourceFile" relative if an absolute Url is passed. + if (sourceRoot != null) { + sourceFile = util.relative(sourceRoot, sourceFile); + } + // Applying the SourceMap can add and remove items from the sources and + // the names array. + var newSources = new ArraySet(); + var newNames = new ArraySet(); - // Find mappings for the "sourceFile" - this._mappings.unsortedForEach(function (mapping) { - if (mapping.source === sourceFile && mapping.originalLine != null) { - // Check if it can be mapped by the source map, then update the mapping. - var original = aSourceMapConsumer.originalPositionFor({ - line: mapping.originalLine, - column: mapping.originalColumn - }); - if (original.source != null) { - // Copy mapping - mapping.source = original.source; - if (aSourceMapPath != null) { - mapping.source = util.join(aSourceMapPath, mapping.source); - } - if (sourceRoot != null) { - mapping.source = util.relative(sourceRoot, mapping.source); - } - mapping.originalLine = original.line; - mapping.originalColumn = original.column; - if (original.name != null) { - mapping.name = original.name; - } - } - } + // Find mappings for the "sourceFile" + this._mappings.unsortedForEach(function (mapping) { + if (mapping.source === sourceFile && mapping.originalLine != null) { + // Check if it can be mapped by the source map, then update the mapping. + var original = aSourceMapConsumer.originalPositionFor({ + line: mapping.originalLine, + column: mapping.originalColumn + }); + if (original.source != null) { + // Copy mapping + mapping.source = original.source; + if (aSourceMapPath != null) { + mapping.source = util.join(aSourceMapPath, mapping.source) + } + if (sourceRoot != null) { + mapping.source = util.relative(sourceRoot, mapping.source); + } + mapping.originalLine = original.line; + mapping.originalColumn = original.column; + if (original.name != null) { + mapping.name = original.name; + } + } + } - var source = mapping.source; - if (source != null && !newSources.has(source)) { - newSources.add(source); - } + var source = mapping.source; + if (source != null && !newSources.has(source)) { + newSources.add(source); + } - var name = mapping.name; - if (name != null && !newNames.has(name)) { - newNames.add(name); - } - }, this); - this._sources = newSources; - this._names = newNames; + var name = mapping.name; + if (name != null && !newNames.has(name)) { + newNames.add(name); + } - // Copy sourcesContents of applied map. - aSourceMapConsumer.sources.forEach(function (sourceFile) { - var content = aSourceMapConsumer.sourceContentFor(sourceFile); - if (content != null) { - if (aSourceMapPath != null) { - sourceFile = util.join(aSourceMapPath, sourceFile); - } - if (sourceRoot != null) { - sourceFile = util.relative(sourceRoot, sourceFile); - } - this.setSourceContent(sourceFile, content); - } - }, this); - }; + }, this); + this._sources = newSources; + this._names = newNames; - /** - * A mapping can have one of the three levels of data: - * - * 1. Just the generated position. - * 2. The Generated position, original position, and original source. - * 3. Generated and original position, original source, as well as a name - * token. - * - * To maintain consistency, we validate that any new mapping being added falls - * in to one of these categories. - */ - SourceMapGenerator.prototype._validateMapping = function SourceMapGenerator_validateMapping(aGenerated, aOriginal, aSource, aName) { - // When aOriginal is truthy but has empty values for .line and .column, - // it is most likely a programmer error. In this case we throw a very - // specific error message to try to guide them the right way. - // For example: https://github.com/Polymer/polymer-bundler/pull/519 - if (aOriginal && typeof aOriginal.line !== 'number' && typeof aOriginal.column !== 'number') { - throw new Error('original.line and original.column are not numbers -- you probably meant to omit ' + 'the original mapping entirely and only map the generated position. If so, pass ' + 'null for the original mapping instead of an object with empty or null values.'); - } + // Copy sourcesContents of applied map. + aSourceMapConsumer.sources.forEach(function (sourceFile) { + var content = aSourceMapConsumer.sourceContentFor(sourceFile); + if (content != null) { + if (aSourceMapPath != null) { + sourceFile = util.join(aSourceMapPath, sourceFile); + } + if (sourceRoot != null) { + sourceFile = util.relative(sourceRoot, sourceFile); + } + this.setSourceContent(sourceFile, content); + } + }, this); + }; - if (aGenerated && 'line' in aGenerated && 'column' in aGenerated && aGenerated.line > 0 && aGenerated.column >= 0 && !aOriginal && !aSource && !aName) { - // Case 1. - return; - } else if (aGenerated && 'line' in aGenerated && 'column' in aGenerated && aOriginal && 'line' in aOriginal && 'column' in aOriginal && aGenerated.line > 0 && aGenerated.column >= 0 && aOriginal.line > 0 && aOriginal.column >= 0 && aSource) { - // Cases 2 and 3. - return; - } else { - throw new Error('Invalid mapping: ' + JSON.stringify({ - generated: aGenerated, - source: aSource, - original: aOriginal, - name: aName - })); - } - }; +/** + * A mapping can have one of the three levels of data: + * + * 1. Just the generated position. + * 2. The Generated position, original position, and original source. + * 3. Generated and original position, original source, as well as a name + * token. + * + * To maintain consistency, we validate that any new mapping being added falls + * in to one of these categories. + */ +SourceMapGenerator.prototype._validateMapping = + function SourceMapGenerator_validateMapping(aGenerated, aOriginal, aSource, + aName) { + // When aOriginal is truthy but has empty values for .line and .column, + // it is most likely a programmer error. In this case we throw a very + // specific error message to try to guide them the right way. + // For example: https://github.com/Polymer/polymer-bundler/pull/519 + if (aOriginal && typeof aOriginal.line !== 'number' && typeof aOriginal.column !== 'number') { + throw new Error( + 'original.line and original.column are not numbers -- you probably meant to omit ' + + 'the original mapping entirely and only map the generated position. If so, pass ' + + 'null for the original mapping instead of an object with empty or null values.' + ); + } - /** - * Serialize the accumulated mappings in to the stream of base 64 VLQs - * specified by the source map format. - */ - SourceMapGenerator.prototype._serializeMappings = function SourceMapGenerator_serializeMappings() { - var previousGeneratedColumn = 0; - var previousGeneratedLine = 1; - var previousOriginalColumn = 0; - var previousOriginalLine = 0; - var previousName = 0; - var previousSource = 0; - var result = ''; - var next; - var mapping; - var nameIdx; - var sourceIdx; + if (aGenerated && 'line' in aGenerated && 'column' in aGenerated + && aGenerated.line > 0 && aGenerated.column >= 0 + && !aOriginal && !aSource && !aName) { + // Case 1. + return; + } + else if (aGenerated && 'line' in aGenerated && 'column' in aGenerated + && aOriginal && 'line' in aOriginal && 'column' in aOriginal + && aGenerated.line > 0 && aGenerated.column >= 0 + && aOriginal.line > 0 && aOriginal.column >= 0 + && aSource) { + // Cases 2 and 3. + return; + } + else { + throw new Error('Invalid mapping: ' + JSON.stringify({ + generated: aGenerated, + source: aSource, + original: aOriginal, + name: aName + })); + } + }; - var mappings = this._mappings.toArray(); - for (var i = 0, len = mappings.length; i < len; i++) { - mapping = mappings[i]; - next = ''; +/** + * Serialize the accumulated mappings in to the stream of base 64 VLQs + * specified by the source map format. + */ +SourceMapGenerator.prototype._serializeMappings = + function SourceMapGenerator_serializeMappings() { + var previousGeneratedColumn = 0; + var previousGeneratedLine = 1; + var previousOriginalColumn = 0; + var previousOriginalLine = 0; + var previousName = 0; + var previousSource = 0; + var result = ''; + var next; + var mapping; + var nameIdx; + var sourceIdx; - if (mapping.generatedLine !== previousGeneratedLine) { - previousGeneratedColumn = 0; - while (mapping.generatedLine !== previousGeneratedLine) { - next += ';'; - previousGeneratedLine++; - } - } else { - if (i > 0) { - if (!util.compareByGeneratedPositionsInflated(mapping, mappings[i - 1])) { - continue; - } - next += ','; - } - } + var mappings = this._mappings.toArray(); + for (var i = 0, len = mappings.length; i < len; i++) { + mapping = mappings[i]; + next = '' - next += base64VLQ.encode(mapping.generatedColumn - previousGeneratedColumn); - previousGeneratedColumn = mapping.generatedColumn; + if (mapping.generatedLine !== previousGeneratedLine) { + previousGeneratedColumn = 0; + while (mapping.generatedLine !== previousGeneratedLine) { + next += ';'; + previousGeneratedLine++; + } + } + else { + if (i > 0) { + if (!util.compareByGeneratedPositionsInflated(mapping, mappings[i - 1])) { + continue; + } + next += ','; + } + } - if (mapping.source != null) { - sourceIdx = this._sources.indexOf(mapping.source); - next += base64VLQ.encode(sourceIdx - previousSource); - previousSource = sourceIdx; + next += base64VLQ.encode(mapping.generatedColumn + - previousGeneratedColumn); + previousGeneratedColumn = mapping.generatedColumn; - // lines are stored 0-based in SourceMap spec version 3 - next += base64VLQ.encode(mapping.originalLine - 1 - previousOriginalLine); - previousOriginalLine = mapping.originalLine - 1; + if (mapping.source != null) { + sourceIdx = this._sources.indexOf(mapping.source); + next += base64VLQ.encode(sourceIdx - previousSource); + previousSource = sourceIdx; - next += base64VLQ.encode(mapping.originalColumn - previousOriginalColumn); - previousOriginalColumn = mapping.originalColumn; + // lines are stored 0-based in SourceMap spec version 3 + next += base64VLQ.encode(mapping.originalLine - 1 + - previousOriginalLine); + previousOriginalLine = mapping.originalLine - 1; - if (mapping.name != null) { - nameIdx = this._names.indexOf(mapping.name); - next += base64VLQ.encode(nameIdx - previousName); - previousName = nameIdx; - } - } + next += base64VLQ.encode(mapping.originalColumn + - previousOriginalColumn); + previousOriginalColumn = mapping.originalColumn; - result += next; - } + if (mapping.name != null) { + nameIdx = this._names.indexOf(mapping.name); + next += base64VLQ.encode(nameIdx - previousName); + previousName = nameIdx; + } + } - return result; - }; + result += next; + } - SourceMapGenerator.prototype._generateSourcesContent = function SourceMapGenerator_generateSourcesContent(aSources, aSourceRoot) { - return aSources.map(function (source) { - if (!this._sourcesContents) { - return null; - } - if (aSourceRoot != null) { - source = util.relative(aSourceRoot, source); - } - var key = util.toSetString(source); - return Object.prototype.hasOwnProperty.call(this._sourcesContents, key) ? this._sourcesContents[key] : null; - }, this); - }; + return result; + }; - /** - * Externalize the source map. - */ - SourceMapGenerator.prototype.toJSON = function SourceMapGenerator_toJSON() { - var map = { - version: this._version, - sources: this._sources.toArray(), - names: this._names.toArray(), - mappings: this._serializeMappings() - }; - if (this._file != null) { - map.file = this._file; - } - if (this._sourceRoot != null) { - map.sourceRoot = this._sourceRoot; - } - if (this._sourcesContents) { - map.sourcesContent = this._generateSourcesContent(map.sources, map.sourceRoot); - } +SourceMapGenerator.prototype._generateSourcesContent = + function SourceMapGenerator_generateSourcesContent(aSources, aSourceRoot) { + return aSources.map(function (source) { + if (!this._sourcesContents) { + return null; + } + if (aSourceRoot != null) { + source = util.relative(aSourceRoot, source); + } + var key = util.toSetString(source); + return Object.prototype.hasOwnProperty.call(this._sourcesContents, key) + ? this._sourcesContents[key] + : null; + }, this); + }; - return map; - }; +/** + * Externalize the source map. + */ +SourceMapGenerator.prototype.toJSON = + function SourceMapGenerator_toJSON() { + var map = { + version: this._version, + sources: this._sources.toArray(), + names: this._names.toArray(), + mappings: this._serializeMappings() + }; + if (this._file != null) { + map.file = this._file; + } + if (this._sourceRoot != null) { + map.sourceRoot = this._sourceRoot; + } + if (this._sourcesContents) { + map.sourcesContent = this._generateSourcesContent(map.sources, map.sourceRoot); + } - /** - * Render the source map being generated to a string. - */ - SourceMapGenerator.prototype.toString = function SourceMapGenerator_toString() { - return JSON.stringify(this.toJSON()); - }; + return map; + }; - exports.SourceMapGenerator = SourceMapGenerator; +/** + * Render the source map being generated to a string. + */ +SourceMapGenerator.prototype.toString = + function SourceMapGenerator_toString() { + return JSON.stringify(this.toJSON()); + }; -/***/ }, +exports.SourceMapGenerator = SourceMapGenerator; + + +/***/ }), /* 12 */ -/***/ function(module, exports, __webpack_require__) { +/***/ (function(module, exports, __webpack_require__) { - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - * - * Based on the Base 64 VLQ implementation in Closure Compiler: - * https://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/debugging/sourcemap/Base64VLQ.java - * - * Copyright 2011 The Closure Compiler Authors. All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + * + * Based on the Base 64 VLQ implementation in Closure Compiler: + * https://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/debugging/sourcemap/Base64VLQ.java + * + * Copyright 2011 The Closure Compiler Authors. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ - var base64 = __webpack_require__(13); +var base64 = __webpack_require__(18); - // A single base 64 digit can contain 6 bits of data. For the base 64 variable - // length quantities we use in the source map spec, the first bit is the sign, - // the next four bits are the actual value, and the 6th bit is the - // continuation bit. The continuation bit tells us whether there are more - // digits in this value following this digit. - // - // Continuation - // | Sign - // | | - // V V - // 101011 +// A single base 64 digit can contain 6 bits of data. For the base 64 variable +// length quantities we use in the source map spec, the first bit is the sign, +// the next four bits are the actual value, and the 6th bit is the +// continuation bit. The continuation bit tells us whether there are more +// digits in this value following this digit. +// +// Continuation +// | Sign +// | | +// V V +// 101011 - var VLQ_BASE_SHIFT = 5; +var VLQ_BASE_SHIFT = 5; - // binary: 100000 - var VLQ_BASE = 1 << VLQ_BASE_SHIFT; +// binary: 100000 +var VLQ_BASE = 1 << VLQ_BASE_SHIFT; - // binary: 011111 - var VLQ_BASE_MASK = VLQ_BASE - 1; +// binary: 011111 +var VLQ_BASE_MASK = VLQ_BASE - 1; - // binary: 100000 - var VLQ_CONTINUATION_BIT = VLQ_BASE; +// binary: 100000 +var VLQ_CONTINUATION_BIT = VLQ_BASE; - /** - * Converts from a two-complement value to a value where the sign bit is - * placed in the least significant bit. For example, as decimals: - * 1 becomes 2 (10 binary), -1 becomes 3 (11 binary) - * 2 becomes 4 (100 binary), -2 becomes 5 (101 binary) - */ - function toVLQSigned(aValue) { - return aValue < 0 ? (-aValue << 1) + 1 : (aValue << 1) + 0; - } +/** + * Converts from a two-complement value to a value where the sign bit is + * placed in the least significant bit. For example, as decimals: + * 1 becomes 2 (10 binary), -1 becomes 3 (11 binary) + * 2 becomes 4 (100 binary), -2 becomes 5 (101 binary) + */ +function toVLQSigned(aValue) { + return aValue < 0 + ? ((-aValue) << 1) + 1 + : (aValue << 1) + 0; +} - /** - * Converts to a two-complement value from a value where the sign bit is - * placed in the least significant bit. For example, as decimals: - * 2 (10 binary) becomes 1, 3 (11 binary) becomes -1 - * 4 (100 binary) becomes 2, 5 (101 binary) becomes -2 - */ - function fromVLQSigned(aValue) { - var isNegative = (aValue & 1) === 1; - var shifted = aValue >> 1; - return isNegative ? -shifted : shifted; - } +/** + * Converts to a two-complement value from a value where the sign bit is + * placed in the least significant bit. For example, as decimals: + * 2 (10 binary) becomes 1, 3 (11 binary) becomes -1 + * 4 (100 binary) becomes 2, 5 (101 binary) becomes -2 + */ +function fromVLQSigned(aValue) { + var isNegative = (aValue & 1) === 1; + var shifted = aValue >> 1; + return isNegative + ? -shifted + : shifted; +} - /** - * Returns the base 64 VLQ encoded value. - */ - exports.encode = function base64VLQ_encode(aValue) { - var encoded = ""; - var digit; +/** + * Returns the base 64 VLQ encoded value. + */ +exports.encode = function base64VLQ_encode(aValue) { + var encoded = ""; + var digit; - var vlq = toVLQSigned(aValue); + var vlq = toVLQSigned(aValue); - do { - digit = vlq & VLQ_BASE_MASK; - vlq >>>= VLQ_BASE_SHIFT; - if (vlq > 0) { - // There are still more digits in this value, so we must make sure the - // continuation bit is marked. - digit |= VLQ_CONTINUATION_BIT; - } - encoded += base64.encode(digit); - } while (vlq > 0); + do { + digit = vlq & VLQ_BASE_MASK; + vlq >>>= VLQ_BASE_SHIFT; + if (vlq > 0) { + // There are still more digits in this value, so we must make sure the + // continuation bit is marked. + digit |= VLQ_CONTINUATION_BIT; + } + encoded += base64.encode(digit); + } while (vlq > 0); - return encoded; - }; + return encoded; +}; - /** - * Decodes the next base 64 VLQ value from the given string and returns the - * value and the rest of the string via the out parameter. - */ - exports.decode = function base64VLQ_decode(aStr, aIndex, aOutParam) { - var strLen = aStr.length; - var result = 0; - var shift = 0; - var continuation, digit; +/** + * Decodes the next base 64 VLQ value from the given string and returns the + * value and the rest of the string via the out parameter. + */ +exports.decode = function base64VLQ_decode(aStr, aIndex, aOutParam) { + var strLen = aStr.length; + var result = 0; + var shift = 0; + var continuation, digit; - do { - if (aIndex >= strLen) { - throw new Error("Expected more digits in base 64 VLQ value."); - } + do { + if (aIndex >= strLen) { + throw new Error("Expected more digits in base 64 VLQ value."); + } - digit = base64.decode(aStr.charCodeAt(aIndex++)); - if (digit === -1) { - throw new Error("Invalid base64 digit: " + aStr.charAt(aIndex - 1)); - } + digit = base64.decode(aStr.charCodeAt(aIndex++)); + if (digit === -1) { + throw new Error("Invalid base64 digit: " + aStr.charAt(aIndex - 1)); + } - continuation = !!(digit & VLQ_CONTINUATION_BIT); - digit &= VLQ_BASE_MASK; - result = result + (digit << shift); - shift += VLQ_BASE_SHIFT; - } while (continuation); + continuation = !!(digit & VLQ_CONTINUATION_BIT); + digit &= VLQ_BASE_MASK; + result = result + (digit << shift); + shift += VLQ_BASE_SHIFT; + } while (continuation); - aOutParam.value = fromVLQSigned(result); - aOutParam.rest = aIndex; - }; + aOutParam.value = fromVLQSigned(result); + aOutParam.rest = aIndex; +}; -/***/ }, + +/***/ }), /* 13 */ -/***/ function(module, exports) { +/***/ (function(module, exports, __webpack_require__) { - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + */ - var intToCharMap = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'.split(''); +var util = __webpack_require__(2); +var has = Object.prototype.hasOwnProperty; +var hasNativeMap = typeof Map !== "undefined"; - /** - * Encode an integer in the range of 0 to 63 to a single base 64 digit. - */ - exports.encode = function (number) { - if (0 <= number && number < intToCharMap.length) { - return intToCharMap[number]; - } - throw new TypeError("Must be between 0 and 63: " + number); - }; +/** + * A data structure which is a combination of an array and a set. Adding a new + * member is O(1), testing for membership is O(1), and finding the index of an + * element is O(1). Removing elements from the set is not supported. Only + * strings are supported for membership. + */ +function ArraySet() { + this._array = []; + this._set = hasNativeMap ? new Map() : Object.create(null); +} - /** - * Decode a single base 64 character code digit to an integer. Returns -1 on - * failure. - */ - exports.decode = function (charCode) { - var bigA = 65; // 'A' - var bigZ = 90; // 'Z' +/** + * Static method for creating ArraySet instances from an existing array. + */ +ArraySet.fromArray = function ArraySet_fromArray(aArray, aAllowDuplicates) { + var set = new ArraySet(); + for (var i = 0, len = aArray.length; i < len; i++) { + set.add(aArray[i], aAllowDuplicates); + } + return set; +}; - var littleA = 97; // 'a' - var littleZ = 122; // 'z' +/** + * Return how many unique items are in this ArraySet. If duplicates have been + * added, than those do not count towards the size. + * + * @returns Number + */ +ArraySet.prototype.size = function ArraySet_size() { + return hasNativeMap ? this._set.size : Object.getOwnPropertyNames(this._set).length; +}; - var zero = 48; // '0' - var nine = 57; // '9' +/** + * Add the given string to this set. + * + * @param String aStr + */ +ArraySet.prototype.add = function ArraySet_add(aStr, aAllowDuplicates) { + var sStr = hasNativeMap ? aStr : util.toSetString(aStr); + var isDuplicate = hasNativeMap ? this.has(aStr) : has.call(this._set, sStr); + var idx = this._array.length; + if (!isDuplicate || aAllowDuplicates) { + this._array.push(aStr); + } + if (!isDuplicate) { + if (hasNativeMap) { + this._set.set(aStr, idx); + } else { + this._set[sStr] = idx; + } + } +}; - var plus = 43; // '+' - var slash = 47; // '/' +/** + * Is the given string a member of this set? + * + * @param String aStr + */ +ArraySet.prototype.has = function ArraySet_has(aStr) { + if (hasNativeMap) { + return this._set.has(aStr); + } else { + var sStr = util.toSetString(aStr); + return has.call(this._set, sStr); + } +}; - var littleOffset = 26; - var numberOffset = 52; +/** + * What is the index of the given string in the array? + * + * @param String aStr + */ +ArraySet.prototype.indexOf = function ArraySet_indexOf(aStr) { + if (hasNativeMap) { + var idx = this._set.get(aStr); + if (idx >= 0) { + return idx; + } + } else { + var sStr = util.toSetString(aStr); + if (has.call(this._set, sStr)) { + return this._set[sStr]; + } + } - // 0 - 25: ABCDEFGHIJKLMNOPQRSTUVWXYZ - if (bigA <= charCode && charCode <= bigZ) { - return charCode - bigA; - } + throw new Error('"' + aStr + '" is not in the set.'); +}; - // 26 - 51: abcdefghijklmnopqrstuvwxyz - if (littleA <= charCode && charCode <= littleZ) { - return charCode - littleA + littleOffset; - } +/** + * What is the element at the given index? + * + * @param Number aIdx + */ +ArraySet.prototype.at = function ArraySet_at(aIdx) { + if (aIdx >= 0 && aIdx < this._array.length) { + return this._array[aIdx]; + } + throw new Error('No element indexed by ' + aIdx); +}; - // 52 - 61: 0123456789 - if (zero <= charCode && charCode <= nine) { - return charCode - zero + numberOffset; - } +/** + * Returns the array representation of this set (which has the proper indices + * indicated by indexOf). Note that this is a copy of the internal array used + * for storing the members so that no one can mess with internal state. + */ +ArraySet.prototype.toArray = function ArraySet_toArray() { + return this._array.slice(); +}; - // 62: + - if (charCode == plus) { - return 62; - } +exports.ArraySet = ArraySet; - // 63: / - if (charCode == slash) { - return 63; - } - // Invalid base64 digit. - return -1; - }; - -/***/ }, +/***/ }), /* 14 */ -/***/ function(module, exports) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - /** - * This is a helper function for getting values from parameter/options - * objects. - * - * @param args The object we are extracting values from - * @param name The name of the property we are getting. - * @param defaultValue An optional value to return if the property is missing - * from the object. If this is not specified and the property is missing, an - * error will be thrown. - */ - function getArg(aArgs, aName, aDefaultValue) { - if (aName in aArgs) { - return aArgs[aName]; - } else if (arguments.length === 3) { - return aDefaultValue; - } else { - throw new Error('"' + aName + '" is a required argument.'); - } - } - exports.getArg = getArg; - - var urlRegexp = /^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/; - var dataUrlRegexp = /^data:.+\,.+$/; - - function urlParse(aUrl) { - var match = aUrl.match(urlRegexp); - if (!match) { - return null; - } - return { - scheme: match[1], - auth: match[2], - host: match[3], - port: match[4], - path: match[5] - }; - } - exports.urlParse = urlParse; - - function urlGenerate(aParsedUrl) { - var url = ''; - if (aParsedUrl.scheme) { - url += aParsedUrl.scheme + ':'; - } - url += '//'; - if (aParsedUrl.auth) { - url += aParsedUrl.auth + '@'; - } - if (aParsedUrl.host) { - url += aParsedUrl.host; - } - if (aParsedUrl.port) { - url += ":" + aParsedUrl.port; - } - if (aParsedUrl.path) { - url += aParsedUrl.path; - } - return url; - } - exports.urlGenerate = urlGenerate; - - /** - * Normalizes a path, or the path portion of a URL: - * - * - Replaces consecutive slashes with one slash. - * - Removes unnecessary '.' parts. - * - Removes unnecessary '/..' parts. - * - * Based on code in the Node.js 'path' core module. - * - * @param aPath The path or url to normalize. - */ - function normalize(aPath) { - var path = aPath; - var url = urlParse(aPath); - if (url) { - if (!url.path) { - return aPath; - } - path = url.path; - } - var isAbsolute = exports.isAbsolute(path); - - var parts = path.split(/\/+/); - for (var part, up = 0, i = parts.length - 1; i >= 0; i--) { - part = parts[i]; - if (part === '.') { - parts.splice(i, 1); - } else if (part === '..') { - up++; - } else if (up > 0) { - if (part === '') { - // The first part is blank if the path is absolute. Trying to go - // above the root is a no-op. Therefore we can remove all '..' parts - // directly after the root. - parts.splice(i + 1, up); - up = 0; - } else { - parts.splice(i, 2); - up--; - } - } - } - path = parts.join('/'); - - if (path === '') { - path = isAbsolute ? '/' : '.'; - } - - if (url) { - url.path = path; - return urlGenerate(url); - } - return path; - } - exports.normalize = normalize; - - /** - * Joins two paths/URLs. - * - * @param aRoot The root path or URL. - * @param aPath The path or URL to be joined with the root. - * - * - If aPath is a URL or a data URI, aPath is returned, unless aPath is a - * scheme-relative URL: Then the scheme of aRoot, if any, is prepended - * first. - * - Otherwise aPath is a path. If aRoot is a URL, then its path portion - * is updated with the result and aRoot is returned. Otherwise the result - * is returned. - * - If aPath is absolute, the result is aPath. - * - Otherwise the two paths are joined with a slash. - * - Joining for example 'http://' and 'www.example.com' is also supported. - */ - function join(aRoot, aPath) { - if (aRoot === "") { - aRoot = "."; - } - if (aPath === "") { - aPath = "."; - } - var aPathUrl = urlParse(aPath); - var aRootUrl = urlParse(aRoot); - if (aRootUrl) { - aRoot = aRootUrl.path || '/'; - } - - // `join(foo, '//www.example.org')` - if (aPathUrl && !aPathUrl.scheme) { - if (aRootUrl) { - aPathUrl.scheme = aRootUrl.scheme; - } - return urlGenerate(aPathUrl); - } - - if (aPathUrl || aPath.match(dataUrlRegexp)) { - return aPath; - } - - // `join('http://', 'www.example.com')` - if (aRootUrl && !aRootUrl.host && !aRootUrl.path) { - aRootUrl.host = aPath; - return urlGenerate(aRootUrl); - } - - var joined = aPath.charAt(0) === '/' ? aPath : normalize(aRoot.replace(/\/+$/, '') + '/' + aPath); - - if (aRootUrl) { - aRootUrl.path = joined; - return urlGenerate(aRootUrl); - } - return joined; - } - exports.join = join; - - exports.isAbsolute = function (aPath) { - return aPath.charAt(0) === '/' || urlRegexp.test(aPath); - }; - - /** - * Make a path relative to a URL or another path. - * - * @param aRoot The root path or URL. - * @param aPath The path or URL to be made relative to aRoot. - */ - function relative(aRoot, aPath) { - if (aRoot === "") { - aRoot = "."; - } - - aRoot = aRoot.replace(/\/$/, ''); - - // It is possible for the path to be above the root. In this case, simply - // checking whether the root is a prefix of the path won't work. Instead, we - // need to remove components from the root one by one, until either we find - // a prefix that fits, or we run out of components to remove. - var level = 0; - while (aPath.indexOf(aRoot + '/') !== 0) { - var index = aRoot.lastIndexOf("/"); - if (index < 0) { - return aPath; - } - - // If the only part of the root that is left is the scheme (i.e. http://, - // file:///, etc.), one or more slashes (/), or simply nothing at all, we - // have exhausted all components, so the path is not relative to the root. - aRoot = aRoot.slice(0, index); - if (aRoot.match(/^([^\/]+:\/)?\/*$/)) { - return aPath; - } - - ++level; - } - - // Make sure we add a "../" for each component we removed from the root. - return Array(level + 1).join("../") + aPath.substr(aRoot.length + 1); - } - exports.relative = relative; - - var supportsNullProto = function () { - var obj = Object.create(null); - return !('__proto__' in obj); - }(); - - function identity(s) { - return s; - } - - /** - * Because behavior goes wacky when you set `__proto__` on objects, we - * have to prefix all the strings in our set with an arbitrary character. - * - * See https://github.com/mozilla/source-map/pull/31 and - * https://github.com/mozilla/source-map/issues/30 - * - * @param String aStr - */ - function toSetString(aStr) { - if (isProtoString(aStr)) { - return '$' + aStr; - } - - return aStr; - } - exports.toSetString = supportsNullProto ? identity : toSetString; - - function fromSetString(aStr) { - if (isProtoString(aStr)) { - return aStr.slice(1); - } - - return aStr; - } - exports.fromSetString = supportsNullProto ? identity : fromSetString; - - function isProtoString(s) { - if (!s) { - return false; - } - - var length = s.length; - - if (length < 9 /* "__proto__".length */) { - return false; - } - - if (s.charCodeAt(length - 1) !== 95 /* '_' */ || s.charCodeAt(length - 2) !== 95 /* '_' */ || s.charCodeAt(length - 3) !== 111 /* 'o' */ || s.charCodeAt(length - 4) !== 116 /* 't' */ || s.charCodeAt(length - 5) !== 111 /* 'o' */ || s.charCodeAt(length - 6) !== 114 /* 'r' */ || s.charCodeAt(length - 7) !== 112 /* 'p' */ || s.charCodeAt(length - 8) !== 95 /* '_' */ || s.charCodeAt(length - 9) !== 95 /* '_' */) { - return false; - } - - for (var i = length - 10; i >= 0; i--) { - if (s.charCodeAt(i) !== 36 /* '$' */) { - return false; - } - } - - return true; - } - - /** - * Comparator between two mappings where the original positions are compared. - * - * Optionally pass in `true` as `onlyCompareGenerated` to consider two - * mappings with the same original source/line/column, but different generated - * line and column the same. Useful when searching for a mapping with a - * stubbed out mapping. - */ - function compareByOriginalPositions(mappingA, mappingB, onlyCompareOriginal) { - var cmp = strcmp(mappingA.source, mappingB.source); - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalLine - mappingB.originalLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalColumn - mappingB.originalColumn; - if (cmp !== 0 || onlyCompareOriginal) { - return cmp; - } - - cmp = mappingA.generatedColumn - mappingB.generatedColumn; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.generatedLine - mappingB.generatedLine; - if (cmp !== 0) { - return cmp; - } - - return strcmp(mappingA.name, mappingB.name); - } - exports.compareByOriginalPositions = compareByOriginalPositions; - - /** - * Comparator between two mappings with deflated source and name indices where - * the generated positions are compared. - * - * Optionally pass in `true` as `onlyCompareGenerated` to consider two - * mappings with the same generated line and column, but different - * source/name/original line and column the same. Useful when searching for a - * mapping with a stubbed out mapping. - */ - function compareByGeneratedPositionsDeflated(mappingA, mappingB, onlyCompareGenerated) { - var cmp = mappingA.generatedLine - mappingB.generatedLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.generatedColumn - mappingB.generatedColumn; - if (cmp !== 0 || onlyCompareGenerated) { - return cmp; - } - - cmp = strcmp(mappingA.source, mappingB.source); - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalLine - mappingB.originalLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalColumn - mappingB.originalColumn; - if (cmp !== 0) { - return cmp; - } - - return strcmp(mappingA.name, mappingB.name); - } - exports.compareByGeneratedPositionsDeflated = compareByGeneratedPositionsDeflated; - - function strcmp(aStr1, aStr2) { - if (aStr1 === aStr2) { - return 0; - } - - if (aStr1 === null) { - return 1; // aStr2 !== null - } - - if (aStr2 === null) { - return -1; // aStr1 !== null - } - - if (aStr1 > aStr2) { - return 1; - } - - return -1; - } - - /** - * Comparator between two mappings with inflated source and name strings where - * the generated positions are compared. - */ - function compareByGeneratedPositionsInflated(mappingA, mappingB) { - var cmp = mappingA.generatedLine - mappingB.generatedLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.generatedColumn - mappingB.generatedColumn; - if (cmp !== 0) { - return cmp; - } - - cmp = strcmp(mappingA.source, mappingB.source); - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalLine - mappingB.originalLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalColumn - mappingB.originalColumn; - if (cmp !== 0) { - return cmp; - } - - return strcmp(mappingA.name, mappingB.name); - } - exports.compareByGeneratedPositionsInflated = compareByGeneratedPositionsInflated; - - /** - * Strip any JSON XSSI avoidance prefix from the string (as documented - * in the source maps specification), and then parse the string as - * JSON. - */ - function parseSourceMapInput(str) { - return JSON.parse(str.replace(/^\)]}'[^\n]*\n/, '')); - } - exports.parseSourceMapInput = parseSourceMapInput; - - /** - * Compute the URL of a source given the the source root, the source's - * URL, and the source map's URL. - */ - function computeSourceURL(sourceRoot, sourceURL, sourceMapURL) { - sourceURL = sourceURL || ''; - - if (sourceRoot) { - // This follows what Chrome does. - if (sourceRoot[sourceRoot.length - 1] !== '/' && sourceURL[0] !== '/') { - sourceRoot += '/'; - } - // The spec says: - // Line 4: An optional source root, useful for relocating source - // files on a server or removing repeated values in the - // “sources” entry. This value is prepended to the individual - // entries in the “source” field. - sourceURL = sourceRoot + sourceURL; - } - - // Historically, SourceMapConsumer did not take the sourceMapURL as - // a parameter. This mode is still somewhat supported, which is why - // this code block is conditional. However, it's preferable to pass - // the source map URL to SourceMapConsumer, so that this function - // can implement the source URL resolution algorithm as outlined in - // the spec. This block is basically the equivalent of: - // new URL(sourceURL, sourceMapURL).toString() - // ... except it avoids using URL, which wasn't available in the - // older releases of node still supported by this library. - // - // The spec says: - // If the sources are not absolute URLs after prepending of the - // “sourceRoot”, the sources are resolved relative to the - // SourceMap (like resolving script src in a html document). - if (sourceMapURL) { - var parsed = urlParse(sourceMapURL); - if (!parsed) { - throw new Error("sourceMapURL could not be parsed"); - } - if (parsed.path) { - // Strip the last path component, but keep the "/". - var index = parsed.path.lastIndexOf('/'); - if (index >= 0) { - parsed.path = parsed.path.substring(0, index + 1); - } - } - sourceURL = join(urlGenerate(parsed), sourceURL); - } - - return normalize(sourceURL); - } - exports.computeSourceURL = computeSourceURL; - -/***/ }, -/* 15 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var util = __webpack_require__(14); - var has = Object.prototype.hasOwnProperty; - var hasNativeMap = typeof Map !== "undefined"; - - /** - * A data structure which is a combination of an array and a set. Adding a new - * member is O(1), testing for membership is O(1), and finding the index of an - * element is O(1). Removing elements from the set is not supported. Only - * strings are supported for membership. - */ - function ArraySet() { - this._array = []; - this._set = hasNativeMap ? new Map() : Object.create(null); - } - - /** - * Static method for creating ArraySet instances from an existing array. - */ - ArraySet.fromArray = function ArraySet_fromArray(aArray, aAllowDuplicates) { - var set = new ArraySet(); - for (var i = 0, len = aArray.length; i < len; i++) { - set.add(aArray[i], aAllowDuplicates); - } - return set; - }; - - /** - * Return how many unique items are in this ArraySet. If duplicates have been - * added, than those do not count towards the size. - * - * @returns Number - */ - ArraySet.prototype.size = function ArraySet_size() { - return hasNativeMap ? this._set.size : Object.getOwnPropertyNames(this._set).length; - }; - - /** - * Add the given string to this set. - * - * @param String aStr - */ - ArraySet.prototype.add = function ArraySet_add(aStr, aAllowDuplicates) { - var sStr = hasNativeMap ? aStr : util.toSetString(aStr); - var isDuplicate = hasNativeMap ? this.has(aStr) : has.call(this._set, sStr); - var idx = this._array.length; - if (!isDuplicate || aAllowDuplicates) { - this._array.push(aStr); - } - if (!isDuplicate) { - if (hasNativeMap) { - this._set.set(aStr, idx); - } else { - this._set[sStr] = idx; - } - } - }; - - /** - * Is the given string a member of this set? - * - * @param String aStr - */ - ArraySet.prototype.has = function ArraySet_has(aStr) { - if (hasNativeMap) { - return this._set.has(aStr); - } else { - var sStr = util.toSetString(aStr); - return has.call(this._set, sStr); - } - }; - - /** - * What is the index of the given string in the array? - * - * @param String aStr - */ - ArraySet.prototype.indexOf = function ArraySet_indexOf(aStr) { - if (hasNativeMap) { - var idx = this._set.get(aStr); - if (idx >= 0) { - return idx; - } - } else { - var sStr = util.toSetString(aStr); - if (has.call(this._set, sStr)) { - return this._set[sStr]; - } - } - - throw new Error('"' + aStr + '" is not in the set.'); - }; - - /** - * What is the element at the given index? - * - * @param Number aIdx - */ - ArraySet.prototype.at = function ArraySet_at(aIdx) { - if (aIdx >= 0 && aIdx < this._array.length) { - return this._array[aIdx]; - } - throw new Error('No element indexed by ' + aIdx); - }; - - /** - * Returns the array representation of this set (which has the proper indices - * indicated by indexOf). Note that this is a copy of the internal array used - * for storing the members so that no one can mess with internal state. - */ - ArraySet.prototype.toArray = function ArraySet_toArray() { - return this._array.slice(); - }; - - exports.ArraySet = ArraySet; - -/***/ }, +/***/ (function(module, exports, __webpack_require__) { + +"use strict"; +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + + + +/** + * SourceMapConsumer for WebAssembly source maps. It transposes columns with + * lines, which allows mapping data to be used with SpiderMonkey Debugger API. + */ + +class WasmRemap { + /** + * @param map SourceMapConsumer + */ + constructor(map) { + this._map = map; + this.version = map.version; + this.file = map.file; + this._computeColumnSpans = false; + } + + get sources() { + return this._map.sources; + } + + get sourceRoot() { + return this._map.sourceRoot; + } + + get names() { + return this._map.names; + } + + get sourcesContent() { + return this._map.sourcesContent; + } + + get mappings() { + throw new Error("not supported"); + } + + computeColumnSpans() { + this._computeColumnSpans = true; + } + + originalPositionFor(generatedPosition) { + let result = this._map.originalPositionFor({ + line: 1, + column: generatedPosition.line, + bias: generatedPosition.bias + }); + return result; + } + + _remapGeneratedPosition(position) { + let generatedPosition = { + line: position.column, + column: 0 + }; + if (this._computeColumnSpans) { + generatedPosition.lastColumn = Infinity; + } + return generatedPosition; + } + + generatedPositionFor(originalPosition) { + let position = this._map.generatedPositionFor(originalPosition); + return this._remapGeneratedPosition(position); + } + + allGeneratedPositionsFor(originalPosition) { + let positions = this._map.allGeneratedPositionsFor(originalPosition); + return positions.map(position => { + return this._remapGeneratedPosition(position); + }); + } + + hasContentsOfAllSources() { + return this._map.hasContentsOfAllSources(); + } + + sourceContentFor(source, returnNullOnMissing) { + return this._map.sourceContentFor(source, returnNullOnMissing); + } + + eachMapping(callback, context, order) { + this._map.eachMapping(entry => { + let { + source, + generatedColumn, + originalLine, + originalColumn, + name + } = entry; + callback({ + source, + generatedLine: generatedColumn, + generatedColumn: 0, + originalLine, + originalColumn, + name + }); + }, context, order); + } +} + +exports.WasmRemap = WasmRemap; + +/***/ }), +/* 15 */, /* 16 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2014 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var util = __webpack_require__(14); - - /** - * Determine whether mappingB is after mappingA with respect to generated - * position. - */ - function generatedPositionAfter(mappingA, mappingB) { - // Optimized for most common case - var lineA = mappingA.generatedLine; - var lineB = mappingB.generatedLine; - var columnA = mappingA.generatedColumn; - var columnB = mappingB.generatedColumn; - return lineB > lineA || lineB == lineA && columnB >= columnA || util.compareByGeneratedPositionsInflated(mappingA, mappingB) <= 0; - } - - /** - * A data structure to provide a sorted view of accumulated mappings in a - * performance conscious manner. It trades a neglibable overhead in general - * case for a large speedup in case of mappings being added in order. - */ - function MappingList() { - this._array = []; - this._sorted = true; - // Serves as infimum - this._last = { generatedLine: -1, generatedColumn: 0 }; - } - - /** - * Iterate through internal items. This method takes the same arguments that - * `Array.prototype.forEach` takes. - * - * NOTE: The order of the mappings is NOT guaranteed. - */ - MappingList.prototype.unsortedForEach = function MappingList_forEach(aCallback, aThisArg) { - this._array.forEach(aCallback, aThisArg); - }; - - /** - * Add the given source mapping. - * - * @param Object aMapping - */ - MappingList.prototype.add = function MappingList_add(aMapping) { - if (generatedPositionAfter(this._last, aMapping)) { - this._last = aMapping; - this._array.push(aMapping); - } else { - this._sorted = false; - this._array.push(aMapping); - } - }; - - /** - * Returns the flat, sorted array of mappings. The mappings are sorted by - * generated position. - * - * WARNING: This method returns internal data without copying, for - * performance. The return value must NOT be mutated, and should be treated as - * an immutable borrow. If you want to take ownership, you must make your own - * copy. - */ - MappingList.prototype.toArray = function MappingList_toArray() { - if (!this._sorted) { - this._array.sort(util.compareByGeneratedPositionsInflated); - this._sorted = true; - } - return this._array; - }; - - exports.MappingList = MappingList; - -/***/ }, -/* 17 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var util = __webpack_require__(14); - var binarySearch = __webpack_require__(18); - var ArraySet = __webpack_require__(15).ArraySet; - var base64VLQ = __webpack_require__(12); - var quickSort = __webpack_require__(19).quickSort; - - function SourceMapConsumer(aSourceMap, aSourceMapURL) { - var sourceMap = aSourceMap; - if (typeof aSourceMap === 'string') { - sourceMap = util.parseSourceMapInput(aSourceMap); - } - - return sourceMap.sections != null ? new IndexedSourceMapConsumer(sourceMap, aSourceMapURL) : new BasicSourceMapConsumer(sourceMap, aSourceMapURL); - } - - SourceMapConsumer.fromSourceMap = function (aSourceMap, aSourceMapURL) { - return BasicSourceMapConsumer.fromSourceMap(aSourceMap, aSourceMapURL); - }; - - /** - * The version of the source mapping spec that we are consuming. - */ - SourceMapConsumer.prototype._version = 3; - - // `__generatedMappings` and `__originalMappings` are arrays that hold the - // parsed mapping coordinates from the source map's "mappings" attribute. They - // are lazily instantiated, accessed via the `_generatedMappings` and - // `_originalMappings` getters respectively, and we only parse the mappings - // and create these arrays once queried for a source location. We jump through - // these hoops because there can be many thousands of mappings, and parsing - // them is expensive, so we only want to do it if we must. - // - // Each object in the arrays is of the form: - // - // { - // generatedLine: The line number in the generated code, - // generatedColumn: The column number in the generated code, - // source: The path to the original source file that generated this - // chunk of code, - // originalLine: The line number in the original source that - // corresponds to this chunk of generated code, - // originalColumn: The column number in the original source that - // corresponds to this chunk of generated code, - // name: The name of the original symbol which generated this chunk of - // code. - // } - // - // All properties except for `generatedLine` and `generatedColumn` can be - // `null`. - // - // `_generatedMappings` is ordered by the generated positions. - // - // `_originalMappings` is ordered by the original positions. - - SourceMapConsumer.prototype.__generatedMappings = null; - Object.defineProperty(SourceMapConsumer.prototype, '_generatedMappings', { - configurable: true, - enumerable: true, - get: function () { - if (!this.__generatedMappings) { - this._parseMappings(this._mappings, this.sourceRoot); - } - - return this.__generatedMappings; - } - }); - - SourceMapConsumer.prototype.__originalMappings = null; - Object.defineProperty(SourceMapConsumer.prototype, '_originalMappings', { - configurable: true, - enumerable: true, - get: function () { - if (!this.__originalMappings) { - this._parseMappings(this._mappings, this.sourceRoot); - } - - return this.__originalMappings; - } - }); - - SourceMapConsumer.prototype._charIsMappingSeparator = function SourceMapConsumer_charIsMappingSeparator(aStr, index) { - var c = aStr.charAt(index); - return c === ";" || c === ","; - }; - - /** - * Parse the mappings in a string in to a data structure which we can easily - * query (the ordered arrays in the `this.__generatedMappings` and - * `this.__originalMappings` properties). - */ - SourceMapConsumer.prototype._parseMappings = function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { - throw new Error("Subclasses must implement _parseMappings"); - }; - - SourceMapConsumer.GENERATED_ORDER = 1; - SourceMapConsumer.ORIGINAL_ORDER = 2; - - SourceMapConsumer.GREATEST_LOWER_BOUND = 1; - SourceMapConsumer.LEAST_UPPER_BOUND = 2; - - /** - * Iterate over each mapping between an original source/line/column and a - * generated line/column in this source map. - * - * @param Function aCallback - * The function that is called with each mapping. - * @param Object aContext - * Optional. If specified, this object will be the value of `this` every - * time that `aCallback` is called. - * @param aOrder - * Either `SourceMapConsumer.GENERATED_ORDER` or - * `SourceMapConsumer.ORIGINAL_ORDER`. Specifies whether you want to - * iterate over the mappings sorted by the generated file's line/column - * order or the original's source/line/column order, respectively. Defaults to - * `SourceMapConsumer.GENERATED_ORDER`. - */ - SourceMapConsumer.prototype.eachMapping = function SourceMapConsumer_eachMapping(aCallback, aContext, aOrder) { - var context = aContext || null; - var order = aOrder || SourceMapConsumer.GENERATED_ORDER; - - var mappings; - switch (order) { - case SourceMapConsumer.GENERATED_ORDER: - mappings = this._generatedMappings; - break; - case SourceMapConsumer.ORIGINAL_ORDER: - mappings = this._originalMappings; - break; - default: - throw new Error("Unknown order of iteration."); - } - - var sourceRoot = this.sourceRoot; - mappings.map(function (mapping) { - var source = mapping.source === null ? null : this._sources.at(mapping.source); - source = util.computeSourceURL(sourceRoot, source, this._sourceMapURL); - return { - source: source, - generatedLine: mapping.generatedLine, - generatedColumn: mapping.generatedColumn, - originalLine: mapping.originalLine, - originalColumn: mapping.originalColumn, - name: mapping.name === null ? null : this._names.at(mapping.name) - }; - }, this).forEach(aCallback, context); - }; - - /** - * Returns all generated line and column information for the original source, - * line, and column provided. If no column is provided, returns all mappings - * corresponding to a either the line we are searching for or the next - * closest line that has any mappings. Otherwise, returns all mappings - * corresponding to the given line and either the column we are searching for - * or the next closest column that has any offsets. - * - * The only argument is an object with the following properties: - * - * - source: The filename of the original source. - * - line: The line number in the original source. The line number is 1-based. - * - column: Optional. the column number in the original source. - * The column number is 0-based. - * - * and an array of objects is returned, each with the following properties: - * - * - line: The line number in the generated source, or null. The - * line number is 1-based. - * - column: The column number in the generated source, or null. - * The column number is 0-based. - */ - SourceMapConsumer.prototype.allGeneratedPositionsFor = function SourceMapConsumer_allGeneratedPositionsFor(aArgs) { - var line = util.getArg(aArgs, 'line'); - - // When there is no exact match, BasicSourceMapConsumer.prototype._findMapping - // returns the index of the closest mapping less than the needle. By - // setting needle.originalColumn to 0, we thus find the last mapping for - // the given line, provided such a mapping exists. - var needle = { - source: util.getArg(aArgs, 'source'), - originalLine: line, - originalColumn: util.getArg(aArgs, 'column', 0) - }; - - needle.source = this._findSourceIndex(needle.source); - if (needle.source < 0) { - return []; - } - - var mappings = []; - - var index = this._findMapping(needle, this._originalMappings, "originalLine", "originalColumn", util.compareByOriginalPositions, binarySearch.LEAST_UPPER_BOUND); - if (index >= 0) { - var mapping = this._originalMappings[index]; - - if (aArgs.column === undefined) { - var originalLine = mapping.originalLine; - - // Iterate until either we run out of mappings, or we run into - // a mapping for a different line than the one we found. Since - // mappings are sorted, this is guaranteed to find all mappings for - // the line we found. - while (mapping && mapping.originalLine === originalLine) { - mappings.push({ - line: util.getArg(mapping, 'generatedLine', null), - column: util.getArg(mapping, 'generatedColumn', null), - lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) - }); - - mapping = this._originalMappings[++index]; - } - } else { - var originalColumn = mapping.originalColumn; - - // Iterate until either we run out of mappings, or we run into - // a mapping for a different line than the one we were searching for. - // Since mappings are sorted, this is guaranteed to find all mappings for - // the line we are searching for. - while (mapping && mapping.originalLine === line && mapping.originalColumn == originalColumn) { - mappings.push({ - line: util.getArg(mapping, 'generatedLine', null), - column: util.getArg(mapping, 'generatedColumn', null), - lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) - }); - - mapping = this._originalMappings[++index]; - } - } - } - - return mappings; - }; - - exports.SourceMapConsumer = SourceMapConsumer; - - /** - * A BasicSourceMapConsumer instance represents a parsed source map which we can - * query for information about the original file positions by giving it a file - * position in the generated source. - * - * The first parameter is the raw source map (either as a JSON string, or - * already parsed to an object). According to the spec, source maps have the - * following attributes: - * - * - version: Which version of the source map spec this map is following. - * - sources: An array of URLs to the original source files. - * - names: An array of identifiers which can be referrenced by individual mappings. - * - sourceRoot: Optional. The URL root from which all sources are relative. - * - sourcesContent: Optional. An array of contents of the original source files. - * - mappings: A string of base64 VLQs which contain the actual mappings. - * - file: Optional. The generated file this source map is associated with. - * - * Here is an example source map, taken from the source map spec[0]: - * - * { - * version : 3, - * file: "out.js", - * sourceRoot : "", - * sources: ["foo.js", "bar.js"], - * names: ["src", "maps", "are", "fun"], - * mappings: "AA,AB;;ABCDE;" - * } - * - * The second parameter, if given, is a string whose value is the URL - * at which the source map was found. This URL is used to compute the - * sources array. - * - * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1# - */ - function BasicSourceMapConsumer(aSourceMap, aSourceMapURL) { - var sourceMap = aSourceMap; - if (typeof aSourceMap === 'string') { - sourceMap = util.parseSourceMapInput(aSourceMap); - } - - var version = util.getArg(sourceMap, 'version'); - var sources = util.getArg(sourceMap, 'sources'); - // Sass 3.3 leaves out the 'names' array, so we deviate from the spec (which - // requires the array) to play nice here. - var names = util.getArg(sourceMap, 'names', []); - var sourceRoot = util.getArg(sourceMap, 'sourceRoot', null); - var sourcesContent = util.getArg(sourceMap, 'sourcesContent', null); - var mappings = util.getArg(sourceMap, 'mappings'); - var file = util.getArg(sourceMap, 'file', null); - - // Once again, Sass deviates from the spec and supplies the version as a - // string rather than a number, so we use loose equality checking here. - if (version != this._version) { - throw new Error('Unsupported version: ' + version); - } - - if (sourceRoot) { - sourceRoot = util.normalize(sourceRoot); - } - - sources = sources.map(String) - // Some source maps produce relative source paths like "./foo.js" instead of - // "foo.js". Normalize these first so that future comparisons will succeed. - // See bugzil.la/1090768. - .map(util.normalize) - // Always ensure that absolute sources are internally stored relative to - // the source root, if the source root is absolute. Not doing this would - // be particularly problematic when the source root is a prefix of the - // source (valid, but why??). See github issue #199 and bugzil.la/1188982. - .map(function (source) { - return sourceRoot && util.isAbsolute(sourceRoot) && util.isAbsolute(source) ? util.relative(sourceRoot, source) : source; - }); - - // Pass `true` below to allow duplicate names and sources. While source maps - // are intended to be compressed and deduplicated, the TypeScript compiler - // sometimes generates source maps with duplicates in them. See Github issue - // #72 and bugzil.la/889492. - this._names = ArraySet.fromArray(names.map(String), true); - this._sources = ArraySet.fromArray(sources, true); - - this._absoluteSources = this._sources.toArray().map(function (s) { - return util.computeSourceURL(sourceRoot, s, aSourceMapURL); - }); - - this.sourceRoot = sourceRoot; - this.sourcesContent = sourcesContent; - this._mappings = mappings; - this._sourceMapURL = aSourceMapURL; - this.file = file; - } - - BasicSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); - BasicSourceMapConsumer.prototype.consumer = SourceMapConsumer; - - /** - * Utility function to find the index of a source. Returns -1 if not - * found. - */ - BasicSourceMapConsumer.prototype._findSourceIndex = function (aSource) { - var relativeSource = aSource; - if (this.sourceRoot != null) { - relativeSource = util.relative(this.sourceRoot, relativeSource); - } - - if (this._sources.has(relativeSource)) { - return this._sources.indexOf(relativeSource); - } - - // Maybe aSource is an absolute URL as returned by |sources|. In - // this case we can't simply undo the transform. - var i; - for (i = 0; i < this._absoluteSources.length; ++i) { - if (this._absoluteSources[i] == aSource) { - return i; - } - } - - return -1; - }; - - /** - * Create a BasicSourceMapConsumer from a SourceMapGenerator. - * - * @param SourceMapGenerator aSourceMap - * The source map that will be consumed. - * @param String aSourceMapURL - * The URL at which the source map can be found (optional) - * @returns BasicSourceMapConsumer - */ - BasicSourceMapConsumer.fromSourceMap = function SourceMapConsumer_fromSourceMap(aSourceMap, aSourceMapURL) { - var smc = Object.create(BasicSourceMapConsumer.prototype); - - var names = smc._names = ArraySet.fromArray(aSourceMap._names.toArray(), true); - var sources = smc._sources = ArraySet.fromArray(aSourceMap._sources.toArray(), true); - smc.sourceRoot = aSourceMap._sourceRoot; - smc.sourcesContent = aSourceMap._generateSourcesContent(smc._sources.toArray(), smc.sourceRoot); - smc.file = aSourceMap._file; - smc._sourceMapURL = aSourceMapURL; - smc._absoluteSources = smc._sources.toArray().map(function (s) { - return util.computeSourceURL(smc.sourceRoot, s, aSourceMapURL); - }); - - // Because we are modifying the entries (by converting string sources and - // names to indices into the sources and names ArraySets), we have to make - // a copy of the entry or else bad things happen. Shared mutable state - // strikes again! See github issue #191. - - var generatedMappings = aSourceMap._mappings.toArray().slice(); - var destGeneratedMappings = smc.__generatedMappings = []; - var destOriginalMappings = smc.__originalMappings = []; - - for (var i = 0, length = generatedMappings.length; i < length; i++) { - var srcMapping = generatedMappings[i]; - var destMapping = new Mapping(); - destMapping.generatedLine = srcMapping.generatedLine; - destMapping.generatedColumn = srcMapping.generatedColumn; - - if (srcMapping.source) { - destMapping.source = sources.indexOf(srcMapping.source); - destMapping.originalLine = srcMapping.originalLine; - destMapping.originalColumn = srcMapping.originalColumn; - - if (srcMapping.name) { - destMapping.name = names.indexOf(srcMapping.name); - } - - destOriginalMappings.push(destMapping); - } - - destGeneratedMappings.push(destMapping); - } - - quickSort(smc.__originalMappings, util.compareByOriginalPositions); - - return smc; - }; - - /** - * The version of the source mapping spec that we are consuming. - */ - BasicSourceMapConsumer.prototype._version = 3; - - /** - * The list of original sources. - */ - Object.defineProperty(BasicSourceMapConsumer.prototype, 'sources', { - get: function () { - return this._absoluteSources.slice(); - } - }); - - /** - * Provide the JIT with a nice shape / hidden class. - */ - function Mapping() { - this.generatedLine = 0; - this.generatedColumn = 0; - this.source = null; - this.originalLine = null; - this.originalColumn = null; - this.name = null; - } - - /** - * Parse the mappings in a string in to a data structure which we can easily - * query (the ordered arrays in the `this.__generatedMappings` and - * `this.__originalMappings` properties). - */ - BasicSourceMapConsumer.prototype._parseMappings = function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { - var generatedLine = 1; - var previousGeneratedColumn = 0; - var previousOriginalLine = 0; - var previousOriginalColumn = 0; - var previousSource = 0; - var previousName = 0; - var length = aStr.length; - var index = 0; - var cachedSegments = {}; - var temp = {}; - var originalMappings = []; - var generatedMappings = []; - var mapping, str, segment, end, value; - - while (index < length) { - if (aStr.charAt(index) === ';') { - generatedLine++; - index++; - previousGeneratedColumn = 0; - } else if (aStr.charAt(index) === ',') { - index++; - } else { - mapping = new Mapping(); - mapping.generatedLine = generatedLine; - - // Because each offset is encoded relative to the previous one, - // many segments often have the same encoding. We can exploit this - // fact by caching the parsed variable length fields of each segment, - // allowing us to avoid a second parse if we encounter the same - // segment again. - for (end = index; end < length; end++) { - if (this._charIsMappingSeparator(aStr, end)) { - break; - } - } - str = aStr.slice(index, end); - - segment = cachedSegments[str]; - if (segment) { - index += str.length; - } else { - segment = []; - while (index < end) { - base64VLQ.decode(aStr, index, temp); - value = temp.value; - index = temp.rest; - segment.push(value); - } - - if (segment.length === 2) { - throw new Error('Found a source, but no line and column'); - } - - if (segment.length === 3) { - throw new Error('Found a source and line, but no column'); - } - - cachedSegments[str] = segment; - } - - // Generated column. - mapping.generatedColumn = previousGeneratedColumn + segment[0]; - previousGeneratedColumn = mapping.generatedColumn; - - if (segment.length > 1) { - // Original source. - mapping.source = previousSource + segment[1]; - previousSource += segment[1]; - - // Original line. - mapping.originalLine = previousOriginalLine + segment[2]; - previousOriginalLine = mapping.originalLine; - // Lines are stored 0-based - mapping.originalLine += 1; - - // Original column. - mapping.originalColumn = previousOriginalColumn + segment[3]; - previousOriginalColumn = mapping.originalColumn; - - if (segment.length > 4) { - // Original name. - mapping.name = previousName + segment[4]; - previousName += segment[4]; - } - } - - generatedMappings.push(mapping); - if (typeof mapping.originalLine === 'number') { - originalMappings.push(mapping); - } - } - } - - quickSort(generatedMappings, util.compareByGeneratedPositionsDeflated); - this.__generatedMappings = generatedMappings; - - quickSort(originalMappings, util.compareByOriginalPositions); - this.__originalMappings = originalMappings; - }; - - /** - * Find the mapping that best matches the hypothetical "needle" mapping that - * we are searching for in the given "haystack" of mappings. - */ - BasicSourceMapConsumer.prototype._findMapping = function SourceMapConsumer_findMapping(aNeedle, aMappings, aLineName, aColumnName, aComparator, aBias) { - // To return the position we are searching for, we must first find the - // mapping for the given position and then return the opposite position it - // points to. Because the mappings are sorted, we can use binary search to - // find the best mapping. - - if (aNeedle[aLineName] <= 0) { - throw new TypeError('Line must be greater than or equal to 1, got ' + aNeedle[aLineName]); - } - if (aNeedle[aColumnName] < 0) { - throw new TypeError('Column must be greater than or equal to 0, got ' + aNeedle[aColumnName]); - } - - return binarySearch.search(aNeedle, aMappings, aComparator, aBias); - }; - - /** - * Compute the last column for each generated mapping. The last column is - * inclusive. - */ - BasicSourceMapConsumer.prototype.computeColumnSpans = function SourceMapConsumer_computeColumnSpans() { - for (var index = 0; index < this._generatedMappings.length; ++index) { - var mapping = this._generatedMappings[index]; - - // Mappings do not contain a field for the last generated columnt. We - // can come up with an optimistic estimate, however, by assuming that - // mappings are contiguous (i.e. given two consecutive mappings, the - // first mapping ends where the second one starts). - if (index + 1 < this._generatedMappings.length) { - var nextMapping = this._generatedMappings[index + 1]; - - if (mapping.generatedLine === nextMapping.generatedLine) { - mapping.lastGeneratedColumn = nextMapping.generatedColumn - 1; - continue; - } - } - - // The last mapping for each line spans the entire line. - mapping.lastGeneratedColumn = Infinity; - } - }; - - /** - * Returns the original source, line, and column information for the generated - * source's line and column positions provided. The only argument is an object - * with the following properties: - * - * - line: The line number in the generated source. The line number - * is 1-based. - * - column: The column number in the generated source. The column - * number is 0-based. - * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or - * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the - * closest element that is smaller than or greater than the one we are - * searching for, respectively, if the exact element cannot be found. - * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. - * - * and an object is returned with the following properties: - * - * - source: The original source file, or null. - * - line: The line number in the original source, or null. The - * line number is 1-based. - * - column: The column number in the original source, or null. The - * column number is 0-based. - * - name: The original identifier, or null. - */ - BasicSourceMapConsumer.prototype.originalPositionFor = function SourceMapConsumer_originalPositionFor(aArgs) { - var needle = { - generatedLine: util.getArg(aArgs, 'line'), - generatedColumn: util.getArg(aArgs, 'column') - }; - - var index = this._findMapping(needle, this._generatedMappings, "generatedLine", "generatedColumn", util.compareByGeneratedPositionsDeflated, util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)); - - if (index >= 0) { - var mapping = this._generatedMappings[index]; - - if (mapping.generatedLine === needle.generatedLine) { - var source = util.getArg(mapping, 'source', null); - if (source !== null) { - source = this._sources.at(source); - source = util.computeSourceURL(this.sourceRoot, source, this._sourceMapURL); - } - var name = util.getArg(mapping, 'name', null); - if (name !== null) { - name = this._names.at(name); - } - return { - source: source, - line: util.getArg(mapping, 'originalLine', null), - column: util.getArg(mapping, 'originalColumn', null), - name: name - }; - } - } - - return { - source: null, - line: null, - column: null, - name: null - }; - }; - - /** - * Return true if we have the source content for every source in the source - * map, false otherwise. - */ - BasicSourceMapConsumer.prototype.hasContentsOfAllSources = function BasicSourceMapConsumer_hasContentsOfAllSources() { - if (!this.sourcesContent) { - return false; - } - return this.sourcesContent.length >= this._sources.size() && !this.sourcesContent.some(function (sc) { - return sc == null; - }); - }; - - /** - * Returns the original source content. The only argument is the url of the - * original source file. Returns null if no original source content is - * available. - */ - BasicSourceMapConsumer.prototype.sourceContentFor = function SourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { - if (!this.sourcesContent) { - return null; - } - - var index = this._findSourceIndex(aSource); - if (index >= 0) { - return this.sourcesContent[index]; - } - - var relativeSource = aSource; - if (this.sourceRoot != null) { - relativeSource = util.relative(this.sourceRoot, relativeSource); - } - - var url; - if (this.sourceRoot != null && (url = util.urlParse(this.sourceRoot))) { - // XXX: file:// URIs and absolute paths lead to unexpected behavior for - // many users. We can help them out when they expect file:// URIs to - // behave like it would if they were running a local HTTP server. See - // https://bugzilla.mozilla.org/show_bug.cgi?id=885597. - var fileUriAbsPath = relativeSource.replace(/^file:\/\//, ""); - if (url.scheme == "file" && this._sources.has(fileUriAbsPath)) { - return this.sourcesContent[this._sources.indexOf(fileUriAbsPath)]; - } - - if ((!url.path || url.path == "/") && this._sources.has("/" + relativeSource)) { - return this.sourcesContent[this._sources.indexOf("/" + relativeSource)]; - } - } - - // This function is used recursively from - // IndexedSourceMapConsumer.prototype.sourceContentFor. In that case, we - // don't want to throw if we can't find the source - we just want to - // return null, so we provide a flag to exit gracefully. - if (nullOnMissing) { - return null; - } else { - throw new Error('"' + relativeSource + '" is not in the SourceMap.'); - } - }; - - /** - * Returns the generated line and column information for the original source, - * line, and column positions provided. The only argument is an object with - * the following properties: - * - * - source: The filename of the original source. - * - line: The line number in the original source. The line number - * is 1-based. - * - column: The column number in the original source. The column - * number is 0-based. - * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or - * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the - * closest element that is smaller than or greater than the one we are - * searching for, respectively, if the exact element cannot be found. - * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. - * - * and an object is returned with the following properties: - * - * - line: The line number in the generated source, or null. The - * line number is 1-based. - * - column: The column number in the generated source, or null. - * The column number is 0-based. - */ - BasicSourceMapConsumer.prototype.generatedPositionFor = function SourceMapConsumer_generatedPositionFor(aArgs) { - var source = util.getArg(aArgs, 'source'); - source = this._findSourceIndex(source); - if (source < 0) { - return { - line: null, - column: null, - lastColumn: null - }; - } - - var needle = { - source: source, - originalLine: util.getArg(aArgs, 'line'), - originalColumn: util.getArg(aArgs, 'column') - }; - - var index = this._findMapping(needle, this._originalMappings, "originalLine", "originalColumn", util.compareByOriginalPositions, util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)); - - if (index >= 0) { - var mapping = this._originalMappings[index]; - - if (mapping.source === needle.source) { - return { - line: util.getArg(mapping, 'generatedLine', null), - column: util.getArg(mapping, 'generatedColumn', null), - lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) - }; - } - } - - return { - line: null, - column: null, - lastColumn: null - }; - }; - - exports.BasicSourceMapConsumer = BasicSourceMapConsumer; - - /** - * An IndexedSourceMapConsumer instance represents a parsed source map which - * we can query for information. It differs from BasicSourceMapConsumer in - * that it takes "indexed" source maps (i.e. ones with a "sections" field) as - * input. - * - * The first parameter is a raw source map (either as a JSON string, or already - * parsed to an object). According to the spec for indexed source maps, they - * have the following attributes: - * - * - version: Which version of the source map spec this map is following. - * - file: Optional. The generated file this source map is associated with. - * - sections: A list of section definitions. - * - * Each value under the "sections" field has two fields: - * - offset: The offset into the original specified at which this section - * begins to apply, defined as an object with a "line" and "column" - * field. - * - map: A source map definition. This source map could also be indexed, - * but doesn't have to be. - * - * Instead of the "map" field, it's also possible to have a "url" field - * specifying a URL to retrieve a source map from, but that's currently - * unsupported. - * - * Here's an example source map, taken from the source map spec[0], but - * modified to omit a section which uses the "url" field. - * - * { - * version : 3, - * file: "app.js", - * sections: [{ - * offset: {line:100, column:10}, - * map: { - * version : 3, - * file: "section.js", - * sources: ["foo.js", "bar.js"], - * names: ["src", "maps", "are", "fun"], - * mappings: "AAAA,E;;ABCDE;" - * } - * }], - * } - * - * The second parameter, if given, is a string whose value is the URL - * at which the source map was found. This URL is used to compute the - * sources array. - * - * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.535es3xeprgt - */ - function IndexedSourceMapConsumer(aSourceMap, aSourceMapURL) { - var sourceMap = aSourceMap; - if (typeof aSourceMap === 'string') { - sourceMap = util.parseSourceMapInput(aSourceMap); - } - - var version = util.getArg(sourceMap, 'version'); - var sections = util.getArg(sourceMap, 'sections'); - - if (version != this._version) { - throw new Error('Unsupported version: ' + version); - } - - this._sources = new ArraySet(); - this._names = new ArraySet(); - - var lastOffset = { - line: -1, - column: 0 - }; - this._sections = sections.map(function (s) { - if (s.url) { - // The url field will require support for asynchronicity. - // See https://github.com/mozilla/source-map/issues/16 - throw new Error('Support for url field in sections not implemented.'); - } - var offset = util.getArg(s, 'offset'); - var offsetLine = util.getArg(offset, 'line'); - var offsetColumn = util.getArg(offset, 'column'); - - if (offsetLine < lastOffset.line || offsetLine === lastOffset.line && offsetColumn < lastOffset.column) { - throw new Error('Section offsets must be ordered and non-overlapping.'); - } - lastOffset = offset; - - return { - generatedOffset: { - // The offset fields are 0-based, but we use 1-based indices when - // encoding/decoding from VLQ. - generatedLine: offsetLine + 1, - generatedColumn: offsetColumn + 1 - }, - consumer: new SourceMapConsumer(util.getArg(s, 'map'), aSourceMapURL) - }; - }); - } - - IndexedSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); - IndexedSourceMapConsumer.prototype.constructor = SourceMapConsumer; - - /** - * The version of the source mapping spec that we are consuming. - */ - IndexedSourceMapConsumer.prototype._version = 3; - - /** - * The list of original sources. - */ - Object.defineProperty(IndexedSourceMapConsumer.prototype, 'sources', { - get: function () { - var sources = []; - for (var i = 0; i < this._sections.length; i++) { - for (var j = 0; j < this._sections[i].consumer.sources.length; j++) { - sources.push(this._sections[i].consumer.sources[j]); - } - } - return sources; - } - }); - - /** - * Returns the original source, line, and column information for the generated - * source's line and column positions provided. The only argument is an object - * with the following properties: - * - * - line: The line number in the generated source. The line number - * is 1-based. - * - column: The column number in the generated source. The column - * number is 0-based. - * - * and an object is returned with the following properties: - * - * - source: The original source file, or null. - * - line: The line number in the original source, or null. The - * line number is 1-based. - * - column: The column number in the original source, or null. The - * column number is 0-based. - * - name: The original identifier, or null. - */ - IndexedSourceMapConsumer.prototype.originalPositionFor = function IndexedSourceMapConsumer_originalPositionFor(aArgs) { - var needle = { - generatedLine: util.getArg(aArgs, 'line'), - generatedColumn: util.getArg(aArgs, 'column') - }; - - // Find the section containing the generated position we're trying to map - // to an original position. - var sectionIndex = binarySearch.search(needle, this._sections, function (needle, section) { - var cmp = needle.generatedLine - section.generatedOffset.generatedLine; - if (cmp) { - return cmp; - } - - return needle.generatedColumn - section.generatedOffset.generatedColumn; - }); - var section = this._sections[sectionIndex]; - - if (!section) { - return { - source: null, - line: null, - column: null, - name: null - }; - } - - return section.consumer.originalPositionFor({ - line: needle.generatedLine - (section.generatedOffset.generatedLine - 1), - column: needle.generatedColumn - (section.generatedOffset.generatedLine === needle.generatedLine ? section.generatedOffset.generatedColumn - 1 : 0), - bias: aArgs.bias - }); - }; - - /** - * Return true if we have the source content for every source in the source - * map, false otherwise. - */ - IndexedSourceMapConsumer.prototype.hasContentsOfAllSources = function IndexedSourceMapConsumer_hasContentsOfAllSources() { - return this._sections.every(function (s) { - return s.consumer.hasContentsOfAllSources(); - }); - }; - - /** - * Returns the original source content. The only argument is the url of the - * original source file. Returns null if no original source content is - * available. - */ - IndexedSourceMapConsumer.prototype.sourceContentFor = function IndexedSourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { - for (var i = 0; i < this._sections.length; i++) { - var section = this._sections[i]; - - var content = section.consumer.sourceContentFor(aSource, true); - if (content) { - return content; - } - } - if (nullOnMissing) { - return null; - } else { - throw new Error('"' + aSource + '" is not in the SourceMap.'); - } - }; - - /** - * Returns the generated line and column information for the original source, - * line, and column positions provided. The only argument is an object with - * the following properties: - * - * - source: The filename of the original source. - * - line: The line number in the original source. The line number - * is 1-based. - * - column: The column number in the original source. The column - * number is 0-based. - * - * and an object is returned with the following properties: - * - * - line: The line number in the generated source, or null. The - * line number is 1-based. - * - column: The column number in the generated source, or null. - * The column number is 0-based. - */ - IndexedSourceMapConsumer.prototype.generatedPositionFor = function IndexedSourceMapConsumer_generatedPositionFor(aArgs) { - for (var i = 0; i < this._sections.length; i++) { - var section = this._sections[i]; - - // Only consider this section if the requested source is in the list of - // sources of the consumer. - if (section.consumer._findSourceIndex(util.getArg(aArgs, 'source')) === -1) { - continue; - } - var generatedPosition = section.consumer.generatedPositionFor(aArgs); - if (generatedPosition) { - var ret = { - line: generatedPosition.line + (section.generatedOffset.generatedLine - 1), - column: generatedPosition.column + (section.generatedOffset.generatedLine === generatedPosition.line ? section.generatedOffset.generatedColumn - 1 : 0) - }; - return ret; - } - } - - return { - line: null, - column: null - }; - }; - - /** - * Parse the mappings in a string in to a data structure which we can easily - * query (the ordered arrays in the `this.__generatedMappings` and - * `this.__originalMappings` properties). - */ - IndexedSourceMapConsumer.prototype._parseMappings = function IndexedSourceMapConsumer_parseMappings(aStr, aSourceRoot) { - this.__generatedMappings = []; - this.__originalMappings = []; - for (var i = 0; i < this._sections.length; i++) { - var section = this._sections[i]; - var sectionMappings = section.consumer._generatedMappings; - for (var j = 0; j < sectionMappings.length; j++) { - var mapping = sectionMappings[j]; - - var source = section.consumer._sources.at(mapping.source); - source = util.computeSourceURL(section.consumer.sourceRoot, source, this._sourceMapURL); - this._sources.add(source); - source = this._sources.indexOf(source); - - var name = null; - if (mapping.name) { - name = section.consumer._names.at(mapping.name); - this._names.add(name); - name = this._names.indexOf(name); - } - - // The mappings coming from the consumer for the section have - // generated positions relative to the start of the section, so we - // need to offset them to be relative to the start of the concatenated - // generated file. - var adjustedMapping = { - source: source, - generatedLine: mapping.generatedLine + (section.generatedOffset.generatedLine - 1), - generatedColumn: mapping.generatedColumn + (section.generatedOffset.generatedLine === mapping.generatedLine ? section.generatedOffset.generatedColumn - 1 : 0), - originalLine: mapping.originalLine, - originalColumn: mapping.originalColumn, - name: name - }; - - this.__generatedMappings.push(adjustedMapping); - if (typeof adjustedMapping.originalLine === 'number') { - this.__originalMappings.push(adjustedMapping); - } - } - } - - quickSort(this.__generatedMappings, util.compareByGeneratedPositionsDeflated); - quickSort(this.__originalMappings, util.compareByOriginalPositions); - }; - - exports.IndexedSourceMapConsumer = IndexedSourceMapConsumer; - -/***/ }, -/* 18 */ -/***/ function(module, exports) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - exports.GREATEST_LOWER_BOUND = 1; - exports.LEAST_UPPER_BOUND = 2; - - /** - * Recursive implementation of binary search. - * - * @param aLow Indices here and lower do not contain the needle. - * @param aHigh Indices here and higher do not contain the needle. - * @param aNeedle The element being searched for. - * @param aHaystack The non-empty array being searched. - * @param aCompare Function which takes two elements and returns -1, 0, or 1. - * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or - * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the - * closest element that is smaller than or greater than the one we are - * searching for, respectively, if the exact element cannot be found. - */ - function recursiveSearch(aLow, aHigh, aNeedle, aHaystack, aCompare, aBias) { - // This function terminates when one of the following is true: - // - // 1. We find the exact element we are looking for. - // - // 2. We did not find the exact element, but we can return the index of - // the next-closest element. - // - // 3. We did not find the exact element, and there is no next-closest - // element than the one we are searching for, so we return -1. - var mid = Math.floor((aHigh - aLow) / 2) + aLow; - var cmp = aCompare(aNeedle, aHaystack[mid], true); - if (cmp === 0) { - // Found the element we are looking for. - return mid; - } else if (cmp > 0) { - // Our needle is greater than aHaystack[mid]. - if (aHigh - mid > 1) { - // The element is in the upper half. - return recursiveSearch(mid, aHigh, aNeedle, aHaystack, aCompare, aBias); - } - - // The exact needle element was not found in this haystack. Determine if - // we are in termination case (3) or (2) and return the appropriate thing. - if (aBias == exports.LEAST_UPPER_BOUND) { - return aHigh < aHaystack.length ? aHigh : -1; - } else { - return mid; - } - } else { - // Our needle is less than aHaystack[mid]. - if (mid - aLow > 1) { - // The element is in the lower half. - return recursiveSearch(aLow, mid, aNeedle, aHaystack, aCompare, aBias); - } - - // we are in termination case (3) or (2) and return the appropriate thing. - if (aBias == exports.LEAST_UPPER_BOUND) { - return mid; - } else { - return aLow < 0 ? -1 : aLow; - } - } - } - - /** - * This is an implementation of binary search which will always try and return - * the index of the closest element if there is no exact hit. This is because - * mappings between original and generated line/col pairs are single points, - * and there is an implicit region between each of them, so a miss just means - * that you aren't on the very start of a region. - * - * @param aNeedle The element you are looking for. - * @param aHaystack The array that is being searched. - * @param aCompare A function which takes the needle and an element in the - * array and returns -1, 0, or 1 depending on whether the needle is less - * than, equal to, or greater than the element, respectively. - * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or - * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the - * closest element that is smaller than or greater than the one we are - * searching for, respectively, if the exact element cannot be found. - * Defaults to 'binarySearch.GREATEST_LOWER_BOUND'. - */ - exports.search = function search(aNeedle, aHaystack, aCompare, aBias) { - if (aHaystack.length === 0) { - return -1; - } - - var index = recursiveSearch(-1, aHaystack.length, aNeedle, aHaystack, aCompare, aBias || exports.GREATEST_LOWER_BOUND); - if (index < 0) { - return -1; - } - - // We have found either the exact element, or the next-closest element than - // the one we are searching for. However, there may be more than one such - // element. Make sure we always return the smallest of these. - while (index - 1 >= 0) { - if (aCompare(aHaystack[index], aHaystack[index - 1], true) !== 0) { - break; - } - --index; - } - - return index; - }; - -/***/ }, -/* 19 */ -/***/ function(module, exports) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - // It turns out that some (most?) JavaScript engines don't self-host - // `Array.prototype.sort`. This makes sense because C++ will likely remain - // faster than JS when doing raw CPU-intensive sorting. However, when using a - // custom comparator function, calling back and forth between the VM's C++ and - // JIT'd JS is rather slow *and* loses JIT type information, resulting in - // worse generated code for the comparator function than would be optimal. In - // fact, when sorting with a comparator, these costs outweigh the benefits of - // sorting in C++. By using our own JS-implemented Quick Sort (below), we get - // a ~3500ms mean speed-up in `bench/bench.html`. - - /** - * Swap the elements indexed by `x` and `y` in the array `ary`. - * - * @param {Array} ary - * The array. - * @param {Number} x - * The index of the first item. - * @param {Number} y - * The index of the second item. - */ - function swap(ary, x, y) { - var temp = ary[x]; - ary[x] = ary[y]; - ary[y] = temp; - } - - /** - * Returns a random integer within the range `low .. high` inclusive. - * - * @param {Number} low - * The lower bound on the range. - * @param {Number} high - * The upper bound on the range. - */ - function randomIntInRange(low, high) { - return Math.round(low + Math.random() * (high - low)); - } - - /** - * The Quick Sort algorithm. - * - * @param {Array} ary - * An array to sort. - * @param {function} comparator - * Function to use to compare two items. - * @param {Number} p - * Start index of the array - * @param {Number} r - * End index of the array - */ - function doQuickSort(ary, comparator, p, r) { - // If our lower bound is less than our upper bound, we (1) partition the - // array into two pieces and (2) recurse on each half. If it is not, this is - // the empty array and our base case. - - if (p < r) { - // (1) Partitioning. - // - // The partitioning chooses a pivot between `p` and `r` and moves all - // elements that are less than or equal to the pivot to the before it, and - // all the elements that are greater than it after it. The effect is that - // once partition is done, the pivot is in the exact place it will be when - // the array is put in sorted order, and it will not need to be moved - // again. This runs in O(n) time. - - // Always choose a random pivot so that an input array which is reverse - // sorted does not cause O(n^2) running time. - var pivotIndex = randomIntInRange(p, r); - var i = p - 1; - - swap(ary, pivotIndex, r); - var pivot = ary[r]; - - // Immediately after `j` is incremented in this loop, the following hold - // true: - // - // * Every element in `ary[p .. i]` is less than or equal to the pivot. - // - // * Every element in `ary[i+1 .. j-1]` is greater than the pivot. - for (var j = p; j < r; j++) { - if (comparator(ary[j], pivot) <= 0) { - i += 1; - swap(ary, i, j); - } - } - - swap(ary, i + 1, j); - var q = i + 1; - - // (2) Recurse on each half. - - doQuickSort(ary, comparator, p, q - 1); - doQuickSort(ary, comparator, q + 1, r); - } - } - - /** - * Sort the given array in-place with the given comparator function. - * - * @param {Array} ary - * An array to sort. - * @param {function} comparator - * Function to use to compare two items. - */ - exports.quickSort = function (ary, comparator) { - doQuickSort(ary, comparator, 0, ary.length - 1); - }; - -/***/ }, -/* 20 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var SourceMapGenerator = __webpack_require__(11).SourceMapGenerator; - var util = __webpack_require__(14); - - // Matches a Windows-style `\r\n` newline or a `\n` newline used by all other - // operating systems these days (capturing the result). - var REGEX_NEWLINE = /(\r?\n)/; - - // Newline character code for charCodeAt() comparisons - var NEWLINE_CODE = 10; - - // Private symbol for identifying `SourceNode`s when multiple versions of - // the source-map library are loaded. This MUST NOT CHANGE across - // versions! - var isSourceNode = "$$$isSourceNode$$$"; - - /** - * SourceNodes provide a way to abstract over interpolating/concatenating - * snippets of generated JavaScript source code while maintaining the line and - * column information associated with the original source code. - * - * @param aLine The original line number. - * @param aColumn The original column number. - * @param aSource The original source's filename. - * @param aChunks Optional. An array of strings which are snippets of - * generated JS, or other SourceNodes. - * @param aName The original identifier. - */ - function SourceNode(aLine, aColumn, aSource, aChunks, aName) { - this.children = []; - this.sourceContents = {}; - this.line = aLine == null ? null : aLine; - this.column = aColumn == null ? null : aColumn; - this.source = aSource == null ? null : aSource; - this.name = aName == null ? null : aName; - this[isSourceNode] = true; - if (aChunks != null) this.add(aChunks); - } - - /** - * Creates a SourceNode from generated code and a SourceMapConsumer. - * - * @param aGeneratedCode The generated code - * @param aSourceMapConsumer The SourceMap for the generated code - * @param aRelativePath Optional. The path that relative sources in the - * SourceMapConsumer should be relative to. - */ - SourceNode.fromStringWithSourceMap = function SourceNode_fromStringWithSourceMap(aGeneratedCode, aSourceMapConsumer, aRelativePath) { - // The SourceNode we want to fill with the generated code - // and the SourceMap - var node = new SourceNode(); - - // All even indices of this array are one line of the generated code, - // while all odd indices are the newlines between two adjacent lines - // (since `REGEX_NEWLINE` captures its match). - // Processed fragments are accessed by calling `shiftNextLine`. - var remainingLines = aGeneratedCode.split(REGEX_NEWLINE); - var remainingLinesIndex = 0; - var shiftNextLine = function () { - var lineContents = getNextLine(); - // The last line of a file might not have a newline. - var newLine = getNextLine() || ""; - return lineContents + newLine; - - function getNextLine() { - return remainingLinesIndex < remainingLines.length ? remainingLines[remainingLinesIndex++] : undefined; - } - }; - - // We need to remember the position of "remainingLines" - var lastGeneratedLine = 1, - lastGeneratedColumn = 0; - - // The generate SourceNodes we need a code range. - // To extract it current and last mapping is used. - // Here we store the last mapping. - var lastMapping = null; - - aSourceMapConsumer.eachMapping(function (mapping) { - if (lastMapping !== null) { - // We add the code from "lastMapping" to "mapping": - // First check if there is a new line in between. - if (lastGeneratedLine < mapping.generatedLine) { - // Associate first line with "lastMapping" - addMappingWithCode(lastMapping, shiftNextLine()); - lastGeneratedLine++; - lastGeneratedColumn = 0; - // The remaining code is added without mapping - } else { - // There is no new line in between. - // Associate the code between "lastGeneratedColumn" and - // "mapping.generatedColumn" with "lastMapping" - var nextLine = remainingLines[remainingLinesIndex] || ''; - var code = nextLine.substr(0, mapping.generatedColumn - lastGeneratedColumn); - remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn - lastGeneratedColumn); - lastGeneratedColumn = mapping.generatedColumn; - addMappingWithCode(lastMapping, code); - // No more remaining code, continue - lastMapping = mapping; - return; - } - } - // We add the generated code until the first mapping - // to the SourceNode without any mapping. - // Each line is added as separate string. - while (lastGeneratedLine < mapping.generatedLine) { - node.add(shiftNextLine()); - lastGeneratedLine++; - } - if (lastGeneratedColumn < mapping.generatedColumn) { - var nextLine = remainingLines[remainingLinesIndex] || ''; - node.add(nextLine.substr(0, mapping.generatedColumn)); - remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn); - lastGeneratedColumn = mapping.generatedColumn; - } - lastMapping = mapping; - }, this); - // We have processed all mappings. - if (remainingLinesIndex < remainingLines.length) { - if (lastMapping) { - // Associate the remaining code in the current line with "lastMapping" - addMappingWithCode(lastMapping, shiftNextLine()); - } - // and add the remaining lines without any mapping - node.add(remainingLines.splice(remainingLinesIndex).join("")); - } - - // Copy sourcesContent into SourceNode - aSourceMapConsumer.sources.forEach(function (sourceFile) { - var content = aSourceMapConsumer.sourceContentFor(sourceFile); - if (content != null) { - if (aRelativePath != null) { - sourceFile = util.join(aRelativePath, sourceFile); - } - node.setSourceContent(sourceFile, content); - } - }); - - return node; - - function addMappingWithCode(mapping, code) { - if (mapping === null || mapping.source === undefined) { - node.add(code); - } else { - var source = aRelativePath ? util.join(aRelativePath, mapping.source) : mapping.source; - node.add(new SourceNode(mapping.originalLine, mapping.originalColumn, source, code, mapping.name)); - } - } - }; - - /** - * Add a chunk of generated JS to this source node. - * - * @param aChunk A string snippet of generated JS code, another instance of - * SourceNode, or an array where each member is one of those things. - */ - SourceNode.prototype.add = function SourceNode_add(aChunk) { - if (Array.isArray(aChunk)) { - aChunk.forEach(function (chunk) { - this.add(chunk); - }, this); - } else if (aChunk[isSourceNode] || typeof aChunk === "string") { - if (aChunk) { - this.children.push(aChunk); - } - } else { - throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk); - } - return this; - }; - - /** - * Add a chunk of generated JS to the beginning of this source node. - * - * @param aChunk A string snippet of generated JS code, another instance of - * SourceNode, or an array where each member is one of those things. - */ - SourceNode.prototype.prepend = function SourceNode_prepend(aChunk) { - if (Array.isArray(aChunk)) { - for (var i = aChunk.length - 1; i >= 0; i--) { - this.prepend(aChunk[i]); - } - } else if (aChunk[isSourceNode] || typeof aChunk === "string") { - this.children.unshift(aChunk); - } else { - throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk); - } - return this; - }; - - /** - * Walk over the tree of JS snippets in this node and its children. The - * walking function is called once for each snippet of JS and is passed that - * snippet and the its original associated source's line/column location. - * - * @param aFn The traversal function. - */ - SourceNode.prototype.walk = function SourceNode_walk(aFn) { - var chunk; - for (var i = 0, len = this.children.length; i < len; i++) { - chunk = this.children[i]; - if (chunk[isSourceNode]) { - chunk.walk(aFn); - } else { - if (chunk !== '') { - aFn(chunk, { source: this.source, - line: this.line, - column: this.column, - name: this.name }); - } - } - } - }; - - /** - * Like `String.prototype.join` except for SourceNodes. Inserts `aStr` between - * each of `this.children`. - * - * @param aSep The separator. - */ - SourceNode.prototype.join = function SourceNode_join(aSep) { - var newChildren; - var i; - var len = this.children.length; - if (len > 0) { - newChildren = []; - for (i = 0; i < len - 1; i++) { - newChildren.push(this.children[i]); - newChildren.push(aSep); - } - newChildren.push(this.children[i]); - this.children = newChildren; - } - return this; - }; - - /** - * Call String.prototype.replace on the very right-most source snippet. Useful - * for trimming whitespace from the end of a source node, etc. - * - * @param aPattern The pattern to replace. - * @param aReplacement The thing to replace the pattern with. - */ - SourceNode.prototype.replaceRight = function SourceNode_replaceRight(aPattern, aReplacement) { - var lastChild = this.children[this.children.length - 1]; - if (lastChild[isSourceNode]) { - lastChild.replaceRight(aPattern, aReplacement); - } else if (typeof lastChild === 'string') { - this.children[this.children.length - 1] = lastChild.replace(aPattern, aReplacement); - } else { - this.children.push(''.replace(aPattern, aReplacement)); - } - return this; - }; - - /** - * Set the source content for a source file. This will be added to the SourceMapGenerator - * in the sourcesContent field. - * - * @param aSourceFile The filename of the source file - * @param aSourceContent The content of the source file - */ - SourceNode.prototype.setSourceContent = function SourceNode_setSourceContent(aSourceFile, aSourceContent) { - this.sourceContents[util.toSetString(aSourceFile)] = aSourceContent; - }; - - /** - * Walk over the tree of SourceNodes. The walking function is called for each - * source file content and is passed the filename and source content. - * - * @param aFn The traversal function. - */ - SourceNode.prototype.walkSourceContents = function SourceNode_walkSourceContents(aFn) { - for (var i = 0, len = this.children.length; i < len; i++) { - if (this.children[i][isSourceNode]) { - this.children[i].walkSourceContents(aFn); - } - } - - var sources = Object.keys(this.sourceContents); - for (var i = 0, len = sources.length; i < len; i++) { - aFn(util.fromSetString(sources[i]), this.sourceContents[sources[i]]); - } - }; - - /** - * Return the string representation of this source node. Walks over the tree - * and concatenates all the various snippets together to one string. - */ - SourceNode.prototype.toString = function SourceNode_toString() { - var str = ""; - this.walk(function (chunk) { - str += chunk; - }); - return str; - }; - - /** - * Returns the string representation of this source node along with a source - * map. - */ - SourceNode.prototype.toStringWithSourceMap = function SourceNode_toStringWithSourceMap(aArgs) { - var generated = { - code: "", - line: 1, - column: 0 - }; - var map = new SourceMapGenerator(aArgs); - var sourceMappingActive = false; - var lastOriginalSource = null; - var lastOriginalLine = null; - var lastOriginalColumn = null; - var lastOriginalName = null; - this.walk(function (chunk, original) { - generated.code += chunk; - if (original.source !== null && original.line !== null && original.column !== null) { - if (lastOriginalSource !== original.source || lastOriginalLine !== original.line || lastOriginalColumn !== original.column || lastOriginalName !== original.name) { - map.addMapping({ - source: original.source, - original: { - line: original.line, - column: original.column - }, - generated: { - line: generated.line, - column: generated.column - }, - name: original.name - }); - } - lastOriginalSource = original.source; - lastOriginalLine = original.line; - lastOriginalColumn = original.column; - lastOriginalName = original.name; - sourceMappingActive = true; - } else if (sourceMappingActive) { - map.addMapping({ - generated: { - line: generated.line, - column: generated.column - } - }); - lastOriginalSource = null; - sourceMappingActive = false; - } - for (var idx = 0, length = chunk.length; idx < length; idx++) { - if (chunk.charCodeAt(idx) === NEWLINE_CODE) { - generated.line++; - generated.column = 0; - // Mappings end at eol - if (idx + 1 === length) { - lastOriginalSource = null; - sourceMappingActive = false; - } else if (sourceMappingActive) { - map.addMapping({ - source: original.source, - original: { - line: original.line, - column: original.column - }, - generated: { - line: generated.line, - column: generated.column - }, - name: original.name - }); - } - } else { - generated.column++; - } - } - }); - this.walkSourceContents(function (sourceFile, sourceContent) { - map.setSourceContent(sourceFile, sourceContent); - }); - - return { code: generated.code, map: map }; - }; - - exports.SourceNode = SourceNode; - -/***/ }, -/* 21 */ -/***/ function(module, exports) { - - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - - function assert(condition, message) { - if (!condition) { - throw new Error(`Assertion failure: ${message}`); - } - } - - module.exports = assert; - -/***/ }, -/* 22 */ -/***/ function(module, exports, __webpack_require__) { - - let _resolveAndFetch = (() => { - var _ref = _asyncToGenerator(function* (generatedSource) { - // Fetch the sourcemap over the network and create it. - const { sourceMapURL, baseURL } = _resolveSourceMapURL(generatedSource); - - const fetched = yield networkRequest(sourceMapURL, { loadFromCache: false }); - - // Create the source map and fix it up. - let map = new SourceMapConsumer(fetched.content, baseURL); - if (generatedSource.isWasm) { - map = new WasmRemap(map); - } - - return map; - }); - - return function _resolveAndFetch(_x) { - return _ref.apply(this, arguments); - }; - })(); - - function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; } - - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - - const { networkRequest } = __webpack_require__(6); - const { getSourceMap, setSourceMap } = __webpack_require__(23); - const { WasmRemap } = __webpack_require__(24); - const { SourceMapConsumer } = __webpack_require__(10); - - function _resolveSourceMapURL(source) { - const { url = "", sourceMapURL = "" } = source; - - if (!url) { - // If the source doesn't have a URL, don't resolve anything. - return { sourceMapURL, baseURL: sourceMapURL }; - } - - const resolvedURL = new URL(sourceMapURL, url); - const resolvedString = resolvedURL.toString(); - - let baseURL = resolvedString; - // When the sourceMap is a data: URL, fall back to using the - // source's URL, if possible. - if (resolvedURL.protocol == "data:") { - baseURL = url; - } - - return { sourceMapURL: resolvedString, baseURL }; - } - - function fetchSourceMap(generatedSource) { - const existingRequest = getSourceMap(generatedSource.id); - - // If it has already been requested, return the request. Make sure - // to do this even if sourcemapping is turned off, because - // pretty-printing uses sourcemaps. - // - // An important behavior here is that if it's in the middle of - // requesting it, all subsequent calls will block on the initial - // request. - if (existingRequest) { - return existingRequest; - } - - if (!generatedSource.sourceMapURL) { - return Promise.resolve(null); - } - - // Fire off the request, set it in the cache, and return it. - const req = _resolveAndFetch(generatedSource); - // Make sure the cached promise does not reject, because we only - // want to report the error once. - setSourceMap(generatedSource.id, req.catch(() => null)); - return req; - } - - module.exports = { fetchSourceMap }; - -/***/ }, -/* 23 */ -/***/ function(module, exports) { - - - let sourceMapRequests = new Map(); - - function clearSourceMaps() { - sourceMapRequests.clear(); - } - - function getSourceMap(generatedSourceId) { - return sourceMapRequests.get(generatedSourceId); - } - - function setSourceMap(generatedId, request) { - sourceMapRequests.set(generatedId, request); - } - - module.exports = { - clearSourceMaps, - getSourceMap, - setSourceMap - }; - -/***/ }, -/* 24 */ -/***/ function(module, exports) { - - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - - "use strict"; - - /** - * SourceMapConsumer for WebAssembly source maps. It transposes columns with - * lines, which allows mapping data to be used with SpiderMonkey Debugger API. - */ - - class WasmRemap { - /** - * @param map SourceMapConsumer - */ - constructor(map) { - this._map = map; - this.version = map.version; - this.file = map.file; - this._computeColumnSpans = false; - } - - get sources() { - return this._map.sources; - } - - get sourceRoot() { - return this._map.sourceRoot; - } - - get names() { - return this._map.names; - } - - get sourcesContent() { - return this._map.sourcesContent; - } - - get mappings() { - throw new Error("not supported"); - } - - computeColumnSpans() { - this._computeColumnSpans = true; - } - - originalPositionFor(generatedPosition) { - let result = this._map.originalPositionFor({ - line: 1, - column: generatedPosition.line, - bias: generatedPosition.bias - }); - return result; - } - - _remapGeneratedPosition(position) { - let generatedPosition = { - line: position.column, - column: 0 - }; - if (this._computeColumnSpans) { - generatedPosition.lastColumn = Infinity; - } - return generatedPosition; - } - - generatedPositionFor(originalPosition) { - let position = this._map.generatedPositionFor(originalPosition); - return this._remapGeneratedPosition(position); - } - - allGeneratedPositionsFor(originalPosition) { - let positions = this._map.allGeneratedPositionsFor(originalPosition); - return positions.map(position => { - return this._remapGeneratedPosition(position); - }); - } - - hasContentsOfAllSources() { - return this._map.hasContentsOfAllSources(); - } - - sourceContentFor(source, returnNullOnMissing) { - return this._map.sourceContentFor(source, returnNullOnMissing); - } - - eachMapping(callback, context, order) { - this._map.eachMapping(entry => { - let { - source, - generatedColumn, - originalLine, - originalColumn, - name - } = entry; - callback({ - source, - generatedLine: generatedColumn, - generatedColumn: 0, - originalLine, - originalColumn, - name - }); - }, context, order); - } - } - - exports.WasmRemap = WasmRemap; - -/***/ }, -/* 25 */, -/* 26 */, -/* 27 */, -/* 28 */, -/* 29 */, -/* 30 */, -/* 31 */, -/* 32 */, -/* 33 */, -/* 34 */, -/* 35 */, -/* 36 */, -/* 37 */, -/* 38 */, -/* 39 */, -/* 40 */ -/***/ function(module, exports, __webpack_require__) { - - /* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - - const { SourceMapConsumer } = __webpack_require__(41); - - function comparePositions(a, b) { - if (a.line === b.line) { - return a.column - b.column; - } else { - return a.line - b.line; - } - } - - function getOriginalPositionName(map, generatedPosition) { - const originalPosition = map.originalPositionFor(generatedPosition); - if (!originalPosition || !originalPosition.name) { - return null; - } - // Verify if it's the exact position. - const originalPositionTest = map.originalPositionFor({ - line: generatedPosition.line, - column: generatedPosition.column, - bias: SourceMapConsumer.LEAST_UPPER_BOUND - }); - if (!originalPositionTest || originalPositionTest.column !== originalPosition.column || originalPositionTest.line !== originalPosition.line) { - return null; - } - return originalPosition.name; - } - - // The `scope` has bindings that contains positions/references to the - // generated names (these may has different original name). Going over all - // these names and positions, to check what original name is listed there. - // To make things more interesting the same generated name can potentially - // be used for different original names, so building trackedBindings for all - // names and positions. - // Example of MappedScopeBindings: - // ``` - // { type: 'block', - // bindings: { - // 'current': 't', - // 'start': 't', - // 'end': 'e' - // } } - // ``` - - function applyBestNamePosition(trackedBindings, generatedName, nameLocation, map, currentPosition) { - const namePosition = { - line: nameLocation.line, - column: nameLocation.column || 0 - }; - const originalName = getOriginalPositionName(map, namePosition); - if (!originalName) { - return trackedBindings; - } - - let trackedBinding = trackedBindings[originalName]; - if (!trackedBinding) { - // No best position was found for original name yet -- using first - // found position. - trackedBindings[originalName] = { - generatedName, - bestPosition: namePosition - }; - } else if (comparePositions(namePosition, currentPosition) <= 0) { - // The position is before location, the more recient will have needed - // name assignment, also discard all positions past currentPosition. - if (comparePositions(trackedBinding.bestPosition, namePosition) < 0 || comparePositions(trackedBinding.bestPosition, currentPosition) > 0) { - trackedBinding.generatedName = generatedName; - trackedBinding.bestPosition = namePosition; - } - } else { - // The position is past location, need to select more recient after - // specified location, but only if "before" bestPosition is not found. - if (comparePositions(trackedBinding.bestPosition, namePosition) > 0 && comparePositions(trackedBinding.bestPosition, currentPosition) < 0) { - trackedBinding.generatedName = generatedName; - trackedBinding.bestPosition = namePosition; - } - } - return trackedBindings; - } - - function applyTrackedBindings(trackedBindings, generatedName, locations, map, currentPosition) { - return locations.reduce((trackedBindings, nameLocation) => applyBestNamePosition(trackedBindings, generatedName, nameLocation, map, currentPosition), trackedBindings); - } - - /** - * Finds the scope binding information at the source map based on locations and - * generated source scopes. The generated source scopes contain variables names - * and all their references in the generated source. This information is merged - * with the source map original names information and returned as result. - */ - function getLocationScopes(map, generatedSourceScopes, location) { - const currentPosition = { - line: location.line, - column: location.column || 0 - }; - - return generatedSourceScopes.map(scope => { - const trackedBindings = Object.keys(scope.bindings).reduce((trackedBindings, generatedName) => applyTrackedBindings(trackedBindings, generatedName, scope.bindings[generatedName], map, currentPosition), Object.create(null)); - - const bindings = Object.keys(trackedBindings).reduce((bindings, name) => { - const { generatedName } = trackedBindings[name]; - // No need to record the binding if generated name matches original one. - if (name !== generatedName) { - bindings[name] = generatedName; - } - return bindings; - }, Object.create(null)); - return { - type: scope.type, - bindings - }; - }); - } - - module.exports = { - getLocationScopes - }; - -/***/ }, -/* 41 */ -/***/ function(module, exports, __webpack_require__) { - - /* - * Copyright 2009-2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE.txt or: - * http://opensource.org/licenses/BSD-3-Clause - */ - exports.SourceMapGenerator = __webpack_require__(42).SourceMapGenerator; - exports.SourceMapConsumer = __webpack_require__(48).SourceMapConsumer; - exports.SourceNode = __webpack_require__(51).SourceNode; - -/***/ }, -/* 42 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var base64VLQ = __webpack_require__(43); - var util = __webpack_require__(45); - var ArraySet = __webpack_require__(46).ArraySet; - var MappingList = __webpack_require__(47).MappingList; - - /** - * An instance of the SourceMapGenerator represents a source map which is - * being built incrementally. You may pass an object with the following - * properties: - * - * - file: The filename of the generated source. - * - sourceRoot: A root for all relative URLs in this source map. - */ - function SourceMapGenerator(aArgs) { - if (!aArgs) { - aArgs = {}; - } - this._file = util.getArg(aArgs, 'file', null); - this._sourceRoot = util.getArg(aArgs, 'sourceRoot', null); - this._skipValidation = util.getArg(aArgs, 'skipValidation', false); - this._sources = new ArraySet(); - this._names = new ArraySet(); - this._mappings = new MappingList(); - this._sourcesContents = null; - } - - SourceMapGenerator.prototype._version = 3; - - /** - * Creates a new SourceMapGenerator based on a SourceMapConsumer - * - * @param aSourceMapConsumer The SourceMap. - */ - SourceMapGenerator.fromSourceMap = function SourceMapGenerator_fromSourceMap(aSourceMapConsumer) { - var sourceRoot = aSourceMapConsumer.sourceRoot; - var generator = new SourceMapGenerator({ - file: aSourceMapConsumer.file, - sourceRoot: sourceRoot - }); - aSourceMapConsumer.eachMapping(function (mapping) { - var newMapping = { - generated: { - line: mapping.generatedLine, - column: mapping.generatedColumn - } - }; - - if (mapping.source != null) { - newMapping.source = mapping.source; - if (sourceRoot != null) { - newMapping.source = util.relative(sourceRoot, newMapping.source); - } - - newMapping.original = { - line: mapping.originalLine, - column: mapping.originalColumn - }; - - if (mapping.name != null) { - newMapping.name = mapping.name; - } - } - - generator.addMapping(newMapping); - }); - aSourceMapConsumer.sources.forEach(function (sourceFile) { - var content = aSourceMapConsumer.sourceContentFor(sourceFile); - if (content != null) { - generator.setSourceContent(sourceFile, content); - } - }); - return generator; - }; - - /** - * Add a single mapping from original source line and column to the generated - * source's line and column for this source map being created. The mapping - * object should have the following properties: - * - * - generated: An object with the generated line and column positions. - * - original: An object with the original line and column positions. - * - source: The original source file (relative to the sourceRoot). - * - name: An optional original token name for this mapping. - */ - SourceMapGenerator.prototype.addMapping = function SourceMapGenerator_addMapping(aArgs) { - var generated = util.getArg(aArgs, 'generated'); - var original = util.getArg(aArgs, 'original', null); - var source = util.getArg(aArgs, 'source', null); - var name = util.getArg(aArgs, 'name', null); - - if (!this._skipValidation) { - this._validateMapping(generated, original, source, name); - } - - if (source != null) { - source = String(source); - if (!this._sources.has(source)) { - this._sources.add(source); - } - } - - if (name != null) { - name = String(name); - if (!this._names.has(name)) { - this._names.add(name); - } - } - - this._mappings.add({ - generatedLine: generated.line, - generatedColumn: generated.column, - originalLine: original != null && original.line, - originalColumn: original != null && original.column, - source: source, - name: name - }); - }; - - /** - * Set the source content for a source file. - */ - SourceMapGenerator.prototype.setSourceContent = function SourceMapGenerator_setSourceContent(aSourceFile, aSourceContent) { - var source = aSourceFile; - if (this._sourceRoot != null) { - source = util.relative(this._sourceRoot, source); - } - - if (aSourceContent != null) { - // Add the source content to the _sourcesContents map. - // Create a new _sourcesContents map if the property is null. - if (!this._sourcesContents) { - this._sourcesContents = Object.create(null); - } - this._sourcesContents[util.toSetString(source)] = aSourceContent; - } else if (this._sourcesContents) { - // Remove the source file from the _sourcesContents map. - // If the _sourcesContents map is empty, set the property to null. - delete this._sourcesContents[util.toSetString(source)]; - if (Object.keys(this._sourcesContents).length === 0) { - this._sourcesContents = null; - } - } - }; - - /** - * Applies the mappings of a sub-source-map for a specific source file to the - * source map being generated. Each mapping to the supplied source file is - * rewritten using the supplied source map. Note: The resolution for the - * resulting mappings is the minimium of this map and the supplied map. - * - * @param aSourceMapConsumer The source map to be applied. - * @param aSourceFile Optional. The filename of the source file. - * If omitted, SourceMapConsumer's file property will be used. - * @param aSourceMapPath Optional. The dirname of the path to the source map - * to be applied. If relative, it is relative to the SourceMapConsumer. - * This parameter is needed when the two source maps aren't in the same - * directory, and the source map to be applied contains relative source - * paths. If so, those relative source paths need to be rewritten - * relative to the SourceMapGenerator. - */ - SourceMapGenerator.prototype.applySourceMap = function SourceMapGenerator_applySourceMap(aSourceMapConsumer, aSourceFile, aSourceMapPath) { - var sourceFile = aSourceFile; - // If aSourceFile is omitted, we will use the file property of the SourceMap - if (aSourceFile == null) { - if (aSourceMapConsumer.file == null) { - throw new Error('SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, ' + 'or the source map\'s "file" property. Both were omitted.'); - } - sourceFile = aSourceMapConsumer.file; - } - var sourceRoot = this._sourceRoot; - // Make "sourceFile" relative if an absolute Url is passed. - if (sourceRoot != null) { - sourceFile = util.relative(sourceRoot, sourceFile); - } - // Applying the SourceMap can add and remove items from the sources and - // the names array. - var newSources = new ArraySet(); - var newNames = new ArraySet(); - - // Find mappings for the "sourceFile" - this._mappings.unsortedForEach(function (mapping) { - if (mapping.source === sourceFile && mapping.originalLine != null) { - // Check if it can be mapped by the source map, then update the mapping. - var original = aSourceMapConsumer.originalPositionFor({ - line: mapping.originalLine, - column: mapping.originalColumn - }); - if (original.source != null) { - // Copy mapping - mapping.source = original.source; - if (aSourceMapPath != null) { - mapping.source = util.join(aSourceMapPath, mapping.source); - } - if (sourceRoot != null) { - mapping.source = util.relative(sourceRoot, mapping.source); - } - mapping.originalLine = original.line; - mapping.originalColumn = original.column; - if (original.name != null) { - mapping.name = original.name; - } - } - } - - var source = mapping.source; - if (source != null && !newSources.has(source)) { - newSources.add(source); - } - - var name = mapping.name; - if (name != null && !newNames.has(name)) { - newNames.add(name); - } - }, this); - this._sources = newSources; - this._names = newNames; - - // Copy sourcesContents of applied map. - aSourceMapConsumer.sources.forEach(function (sourceFile) { - var content = aSourceMapConsumer.sourceContentFor(sourceFile); - if (content != null) { - if (aSourceMapPath != null) { - sourceFile = util.join(aSourceMapPath, sourceFile); - } - if (sourceRoot != null) { - sourceFile = util.relative(sourceRoot, sourceFile); - } - this.setSourceContent(sourceFile, content); - } - }, this); - }; - - /** - * A mapping can have one of the three levels of data: - * - * 1. Just the generated position. - * 2. The Generated position, original position, and original source. - * 3. Generated and original position, original source, as well as a name - * token. - * - * To maintain consistency, we validate that any new mapping being added falls - * in to one of these categories. - */ - SourceMapGenerator.prototype._validateMapping = function SourceMapGenerator_validateMapping(aGenerated, aOriginal, aSource, aName) { - // When aOriginal is truthy but has empty values for .line and .column, - // it is most likely a programmer error. In this case we throw a very - // specific error message to try to guide them the right way. - // For example: https://github.com/Polymer/polymer-bundler/pull/519 - if (aOriginal && typeof aOriginal.line !== 'number' && typeof aOriginal.column !== 'number') { - throw new Error('original.line and original.column are not numbers -- you probably meant to omit ' + 'the original mapping entirely and only map the generated position. If so, pass ' + 'null for the original mapping instead of an object with empty or null values.'); - } - - if (aGenerated && 'line' in aGenerated && 'column' in aGenerated && aGenerated.line > 0 && aGenerated.column >= 0 && !aOriginal && !aSource && !aName) { - // Case 1. - return; - } else if (aGenerated && 'line' in aGenerated && 'column' in aGenerated && aOriginal && 'line' in aOriginal && 'column' in aOriginal && aGenerated.line > 0 && aGenerated.column >= 0 && aOriginal.line > 0 && aOriginal.column >= 0 && aSource) { - // Cases 2 and 3. - return; - } else { - throw new Error('Invalid mapping: ' + JSON.stringify({ - generated: aGenerated, - source: aSource, - original: aOriginal, - name: aName - })); - } - }; - - /** - * Serialize the accumulated mappings in to the stream of base 64 VLQs - * specified by the source map format. - */ - SourceMapGenerator.prototype._serializeMappings = function SourceMapGenerator_serializeMappings() { - var previousGeneratedColumn = 0; - var previousGeneratedLine = 1; - var previousOriginalColumn = 0; - var previousOriginalLine = 0; - var previousName = 0; - var previousSource = 0; - var result = ''; - var next; - var mapping; - var nameIdx; - var sourceIdx; - - var mappings = this._mappings.toArray(); - for (var i = 0, len = mappings.length; i < len; i++) { - mapping = mappings[i]; - next = ''; - - if (mapping.generatedLine !== previousGeneratedLine) { - previousGeneratedColumn = 0; - while (mapping.generatedLine !== previousGeneratedLine) { - next += ';'; - previousGeneratedLine++; - } - } else { - if (i > 0) { - if (!util.compareByGeneratedPositionsInflated(mapping, mappings[i - 1])) { - continue; - } - next += ','; - } - } - - next += base64VLQ.encode(mapping.generatedColumn - previousGeneratedColumn); - previousGeneratedColumn = mapping.generatedColumn; - - if (mapping.source != null) { - sourceIdx = this._sources.indexOf(mapping.source); - next += base64VLQ.encode(sourceIdx - previousSource); - previousSource = sourceIdx; - - // lines are stored 0-based in SourceMap spec version 3 - next += base64VLQ.encode(mapping.originalLine - 1 - previousOriginalLine); - previousOriginalLine = mapping.originalLine - 1; - - next += base64VLQ.encode(mapping.originalColumn - previousOriginalColumn); - previousOriginalColumn = mapping.originalColumn; - - if (mapping.name != null) { - nameIdx = this._names.indexOf(mapping.name); - next += base64VLQ.encode(nameIdx - previousName); - previousName = nameIdx; - } - } - - result += next; - } - - return result; - }; - - SourceMapGenerator.prototype._generateSourcesContent = function SourceMapGenerator_generateSourcesContent(aSources, aSourceRoot) { - return aSources.map(function (source) { - if (!this._sourcesContents) { - return null; - } - if (aSourceRoot != null) { - source = util.relative(aSourceRoot, source); - } - var key = util.toSetString(source); - return Object.prototype.hasOwnProperty.call(this._sourcesContents, key) ? this._sourcesContents[key] : null; - }, this); - }; - - /** - * Externalize the source map. - */ - SourceMapGenerator.prototype.toJSON = function SourceMapGenerator_toJSON() { - var map = { - version: this._version, - sources: this._sources.toArray(), - names: this._names.toArray(), - mappings: this._serializeMappings() - }; - if (this._file != null) { - map.file = this._file; - } - if (this._sourceRoot != null) { - map.sourceRoot = this._sourceRoot; - } - if (this._sourcesContents) { - map.sourcesContent = this._generateSourcesContent(map.sources, map.sourceRoot); - } - - return map; - }; - - /** - * Render the source map being generated to a string. - */ - SourceMapGenerator.prototype.toString = function SourceMapGenerator_toString() { - return JSON.stringify(this.toJSON()); - }; - - exports.SourceMapGenerator = SourceMapGenerator; - -/***/ }, -/* 43 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - * - * Based on the Base 64 VLQ implementation in Closure Compiler: - * https://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/debugging/sourcemap/Base64VLQ.java - * - * Copyright 2011 The Closure Compiler Authors. All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - var base64 = __webpack_require__(44); - - // A single base 64 digit can contain 6 bits of data. For the base 64 variable - // length quantities we use in the source map spec, the first bit is the sign, - // the next four bits are the actual value, and the 6th bit is the - // continuation bit. The continuation bit tells us whether there are more - // digits in this value following this digit. - // - // Continuation - // | Sign - // | | - // V V - // 101011 - - var VLQ_BASE_SHIFT = 5; - - // binary: 100000 - var VLQ_BASE = 1 << VLQ_BASE_SHIFT; - - // binary: 011111 - var VLQ_BASE_MASK = VLQ_BASE - 1; - - // binary: 100000 - var VLQ_CONTINUATION_BIT = VLQ_BASE; - - /** - * Converts from a two-complement value to a value where the sign bit is - * placed in the least significant bit. For example, as decimals: - * 1 becomes 2 (10 binary), -1 becomes 3 (11 binary) - * 2 becomes 4 (100 binary), -2 becomes 5 (101 binary) - */ - function toVLQSigned(aValue) { - return aValue < 0 ? (-aValue << 1) + 1 : (aValue << 1) + 0; - } - - /** - * Converts to a two-complement value from a value where the sign bit is - * placed in the least significant bit. For example, as decimals: - * 2 (10 binary) becomes 1, 3 (11 binary) becomes -1 - * 4 (100 binary) becomes 2, 5 (101 binary) becomes -2 - */ - function fromVLQSigned(aValue) { - var isNegative = (aValue & 1) === 1; - var shifted = aValue >> 1; - return isNegative ? -shifted : shifted; - } - - /** - * Returns the base 64 VLQ encoded value. - */ - exports.encode = function base64VLQ_encode(aValue) { - var encoded = ""; - var digit; - - var vlq = toVLQSigned(aValue); - - do { - digit = vlq & VLQ_BASE_MASK; - vlq >>>= VLQ_BASE_SHIFT; - if (vlq > 0) { - // There are still more digits in this value, so we must make sure the - // continuation bit is marked. - digit |= VLQ_CONTINUATION_BIT; - } - encoded += base64.encode(digit); - } while (vlq > 0); - - return encoded; - }; - - /** - * Decodes the next base 64 VLQ value from the given string and returns the - * value and the rest of the string via the out parameter. - */ - exports.decode = function base64VLQ_decode(aStr, aIndex, aOutParam) { - var strLen = aStr.length; - var result = 0; - var shift = 0; - var continuation, digit; - - do { - if (aIndex >= strLen) { - throw new Error("Expected more digits in base 64 VLQ value."); - } - - digit = base64.decode(aStr.charCodeAt(aIndex++)); - if (digit === -1) { - throw new Error("Invalid base64 digit: " + aStr.charAt(aIndex - 1)); - } - - continuation = !!(digit & VLQ_CONTINUATION_BIT); - digit &= VLQ_BASE_MASK; - result = result + (digit << shift); - shift += VLQ_BASE_SHIFT; - } while (continuation); - - aOutParam.value = fromVLQSigned(result); - aOutParam.rest = aIndex; - }; - -/***/ }, -/* 44 */ -/***/ function(module, exports) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var intToCharMap = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'.split(''); - - /** - * Encode an integer in the range of 0 to 63 to a single base 64 digit. - */ - exports.encode = function (number) { - if (0 <= number && number < intToCharMap.length) { - return intToCharMap[number]; - } - throw new TypeError("Must be between 0 and 63: " + number); - }; - - /** - * Decode a single base 64 character code digit to an integer. Returns -1 on - * failure. - */ - exports.decode = function (charCode) { - var bigA = 65; // 'A' - var bigZ = 90; // 'Z' - - var littleA = 97; // 'a' - var littleZ = 122; // 'z' - - var zero = 48; // '0' - var nine = 57; // '9' - - var plus = 43; // '+' - var slash = 47; // '/' - - var littleOffset = 26; - var numberOffset = 52; - - // 0 - 25: ABCDEFGHIJKLMNOPQRSTUVWXYZ - if (bigA <= charCode && charCode <= bigZ) { - return charCode - bigA; - } - - // 26 - 51: abcdefghijklmnopqrstuvwxyz - if (littleA <= charCode && charCode <= littleZ) { - return charCode - littleA + littleOffset; - } - - // 52 - 61: 0123456789 - if (zero <= charCode && charCode <= nine) { - return charCode - zero + numberOffset; - } - - // 62: + - if (charCode == plus) { - return 62; - } - - // 63: / - if (charCode == slash) { - return 63; - } - - // Invalid base64 digit. - return -1; - }; - -/***/ }, -/* 45 */ -/***/ function(module, exports) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - /** - * This is a helper function for getting values from parameter/options - * objects. - * - * @param args The object we are extracting values from - * @param name The name of the property we are getting. - * @param defaultValue An optional value to return if the property is missing - * from the object. If this is not specified and the property is missing, an - * error will be thrown. - */ - function getArg(aArgs, aName, aDefaultValue) { - if (aName in aArgs) { - return aArgs[aName]; - } else if (arguments.length === 3) { - return aDefaultValue; - } else { - throw new Error('"' + aName + '" is a required argument.'); - } - } - exports.getArg = getArg; - - var urlRegexp = /^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.]*)(?::(\d+))?(\S*)$/; - var dataUrlRegexp = /^data:.+\,.+$/; - - function urlParse(aUrl) { - var match = aUrl.match(urlRegexp); - if (!match) { - return null; - } - return { - scheme: match[1], - auth: match[2], - host: match[3], - port: match[4], - path: match[5] - }; - } - exports.urlParse = urlParse; - - function urlGenerate(aParsedUrl) { - var url = ''; - if (aParsedUrl.scheme) { - url += aParsedUrl.scheme + ':'; - } - url += '//'; - if (aParsedUrl.auth) { - url += aParsedUrl.auth + '@'; - } - if (aParsedUrl.host) { - url += aParsedUrl.host; - } - if (aParsedUrl.port) { - url += ":" + aParsedUrl.port; - } - if (aParsedUrl.path) { - url += aParsedUrl.path; - } - return url; - } - exports.urlGenerate = urlGenerate; - - /** - * Normalizes a path, or the path portion of a URL: - * - * - Replaces consecutive slashes with one slash. - * - Removes unnecessary '.' parts. - * - Removes unnecessary '/..' parts. - * - * Based on code in the Node.js 'path' core module. - * - * @param aPath The path or url to normalize. - */ - function normalize(aPath) { - var path = aPath; - var url = urlParse(aPath); - if (url) { - if (!url.path) { - return aPath; - } - path = url.path; - } - var isAbsolute = exports.isAbsolute(path); - - var parts = path.split(/\/+/); - for (var part, up = 0, i = parts.length - 1; i >= 0; i--) { - part = parts[i]; - if (part === '.') { - parts.splice(i, 1); - } else if (part === '..') { - up++; - } else if (up > 0) { - if (part === '') { - // The first part is blank if the path is absolute. Trying to go - // above the root is a no-op. Therefore we can remove all '..' parts - // directly after the root. - parts.splice(i + 1, up); - up = 0; - } else { - parts.splice(i, 2); - up--; - } - } - } - path = parts.join('/'); - - if (path === '') { - path = isAbsolute ? '/' : '.'; - } - - if (url) { - url.path = path; - return urlGenerate(url); - } - return path; - } - exports.normalize = normalize; - - /** - * Joins two paths/URLs. - * - * @param aRoot The root path or URL. - * @param aPath The path or URL to be joined with the root. - * - * - If aPath is a URL or a data URI, aPath is returned, unless aPath is a - * scheme-relative URL: Then the scheme of aRoot, if any, is prepended - * first. - * - Otherwise aPath is a path. If aRoot is a URL, then its path portion - * is updated with the result and aRoot is returned. Otherwise the result - * is returned. - * - If aPath is absolute, the result is aPath. - * - Otherwise the two paths are joined with a slash. - * - Joining for example 'http://' and 'www.example.com' is also supported. - */ - function join(aRoot, aPath) { - if (aRoot === "") { - aRoot = "."; - } - if (aPath === "") { - aPath = "."; - } - var aPathUrl = urlParse(aPath); - var aRootUrl = urlParse(aRoot); - if (aRootUrl) { - aRoot = aRootUrl.path || '/'; - } - - // `join(foo, '//www.example.org')` - if (aPathUrl && !aPathUrl.scheme) { - if (aRootUrl) { - aPathUrl.scheme = aRootUrl.scheme; - } - return urlGenerate(aPathUrl); - } - - if (aPathUrl || aPath.match(dataUrlRegexp)) { - return aPath; - } - - // `join('http://', 'www.example.com')` - if (aRootUrl && !aRootUrl.host && !aRootUrl.path) { - aRootUrl.host = aPath; - return urlGenerate(aRootUrl); - } - - var joined = aPath.charAt(0) === '/' ? aPath : normalize(aRoot.replace(/\/+$/, '') + '/' + aPath); - - if (aRootUrl) { - aRootUrl.path = joined; - return urlGenerate(aRootUrl); - } - return joined; - } - exports.join = join; - - exports.isAbsolute = function (aPath) { - return aPath.charAt(0) === '/' || !!aPath.match(urlRegexp); - }; - - /** - * Make a path relative to a URL or another path. - * - * @param aRoot The root path or URL. - * @param aPath The path or URL to be made relative to aRoot. - */ - function relative(aRoot, aPath) { - if (aRoot === "") { - aRoot = "."; - } - - aRoot = aRoot.replace(/\/$/, ''); - - // It is possible for the path to be above the root. In this case, simply - // checking whether the root is a prefix of the path won't work. Instead, we - // need to remove components from the root one by one, until either we find - // a prefix that fits, or we run out of components to remove. - var level = 0; - while (aPath.indexOf(aRoot + '/') !== 0) { - var index = aRoot.lastIndexOf("/"); - if (index < 0) { - return aPath; - } - - // If the only part of the root that is left is the scheme (i.e. http://, - // file:///, etc.), one or more slashes (/), or simply nothing at all, we - // have exhausted all components, so the path is not relative to the root. - aRoot = aRoot.slice(0, index); - if (aRoot.match(/^([^\/]+:\/)?\/*$/)) { - return aPath; - } - - ++level; - } - - // Make sure we add a "../" for each component we removed from the root. - return Array(level + 1).join("../") + aPath.substr(aRoot.length + 1); - } - exports.relative = relative; - - var supportsNullProto = function () { - var obj = Object.create(null); - return !('__proto__' in obj); - }(); - - function identity(s) { - return s; - } - - /** - * Because behavior goes wacky when you set `__proto__` on objects, we - * have to prefix all the strings in our set with an arbitrary character. - * - * See https://github.com/mozilla/source-map/pull/31 and - * https://github.com/mozilla/source-map/issues/30 - * - * @param String aStr - */ - function toSetString(aStr) { - if (isProtoString(aStr)) { - return '$' + aStr; - } - - return aStr; - } - exports.toSetString = supportsNullProto ? identity : toSetString; - - function fromSetString(aStr) { - if (isProtoString(aStr)) { - return aStr.slice(1); - } - - return aStr; - } - exports.fromSetString = supportsNullProto ? identity : fromSetString; - - function isProtoString(s) { - if (!s) { - return false; - } - - var length = s.length; - - if (length < 9 /* "__proto__".length */) { - return false; - } - - if (s.charCodeAt(length - 1) !== 95 /* '_' */ || s.charCodeAt(length - 2) !== 95 /* '_' */ || s.charCodeAt(length - 3) !== 111 /* 'o' */ || s.charCodeAt(length - 4) !== 116 /* 't' */ || s.charCodeAt(length - 5) !== 111 /* 'o' */ || s.charCodeAt(length - 6) !== 114 /* 'r' */ || s.charCodeAt(length - 7) !== 112 /* 'p' */ || s.charCodeAt(length - 8) !== 95 /* '_' */ || s.charCodeAt(length - 9) !== 95 /* '_' */) { - return false; - } - - for (var i = length - 10; i >= 0; i--) { - if (s.charCodeAt(i) !== 36 /* '$' */) { - return false; - } - } - - return true; - } - - /** - * Comparator between two mappings where the original positions are compared. - * - * Optionally pass in `true` as `onlyCompareGenerated` to consider two - * mappings with the same original source/line/column, but different generated - * line and column the same. Useful when searching for a mapping with a - * stubbed out mapping. - */ - function compareByOriginalPositions(mappingA, mappingB, onlyCompareOriginal) { - var cmp = mappingA.source - mappingB.source; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalLine - mappingB.originalLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalColumn - mappingB.originalColumn; - if (cmp !== 0 || onlyCompareOriginal) { - return cmp; - } - - cmp = mappingA.generatedColumn - mappingB.generatedColumn; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.generatedLine - mappingB.generatedLine; - if (cmp !== 0) { - return cmp; - } - - return mappingA.name - mappingB.name; - } - exports.compareByOriginalPositions = compareByOriginalPositions; - - /** - * Comparator between two mappings with deflated source and name indices where - * the generated positions are compared. - * - * Optionally pass in `true` as `onlyCompareGenerated` to consider two - * mappings with the same generated line and column, but different - * source/name/original line and column the same. Useful when searching for a - * mapping with a stubbed out mapping. - */ - function compareByGeneratedPositionsDeflated(mappingA, mappingB, onlyCompareGenerated) { - var cmp = mappingA.generatedLine - mappingB.generatedLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.generatedColumn - mappingB.generatedColumn; - if (cmp !== 0 || onlyCompareGenerated) { - return cmp; - } - - cmp = mappingA.source - mappingB.source; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalLine - mappingB.originalLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalColumn - mappingB.originalColumn; - if (cmp !== 0) { - return cmp; - } - - return mappingA.name - mappingB.name; - } - exports.compareByGeneratedPositionsDeflated = compareByGeneratedPositionsDeflated; - - function strcmp(aStr1, aStr2) { - if (aStr1 === aStr2) { - return 0; - } - - if (aStr1 > aStr2) { - return 1; - } - - return -1; - } - - /** - * Comparator between two mappings with inflated source and name strings where - * the generated positions are compared. - */ - function compareByGeneratedPositionsInflated(mappingA, mappingB) { - var cmp = mappingA.generatedLine - mappingB.generatedLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.generatedColumn - mappingB.generatedColumn; - if (cmp !== 0) { - return cmp; - } - - cmp = strcmp(mappingA.source, mappingB.source); - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalLine - mappingB.originalLine; - if (cmp !== 0) { - return cmp; - } - - cmp = mappingA.originalColumn - mappingB.originalColumn; - if (cmp !== 0) { - return cmp; - } - - return strcmp(mappingA.name, mappingB.name); - } - exports.compareByGeneratedPositionsInflated = compareByGeneratedPositionsInflated; - -/***/ }, -/* 46 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var util = __webpack_require__(45); - var has = Object.prototype.hasOwnProperty; - var hasNativeMap = typeof Map !== "undefined"; - - /** - * A data structure which is a combination of an array and a set. Adding a new - * member is O(1), testing for membership is O(1), and finding the index of an - * element is O(1). Removing elements from the set is not supported. Only - * strings are supported for membership. - */ - function ArraySet() { - this._array = []; - this._set = hasNativeMap ? new Map() : Object.create(null); - } - - /** - * Static method for creating ArraySet instances from an existing array. - */ - ArraySet.fromArray = function ArraySet_fromArray(aArray, aAllowDuplicates) { - var set = new ArraySet(); - for (var i = 0, len = aArray.length; i < len; i++) { - set.add(aArray[i], aAllowDuplicates); - } - return set; - }; - - /** - * Return how many unique items are in this ArraySet. If duplicates have been - * added, than those do not count towards the size. - * - * @returns Number - */ - ArraySet.prototype.size = function ArraySet_size() { - return hasNativeMap ? this._set.size : Object.getOwnPropertyNames(this._set).length; - }; - - /** - * Add the given string to this set. - * - * @param String aStr - */ - ArraySet.prototype.add = function ArraySet_add(aStr, aAllowDuplicates) { - var sStr = hasNativeMap ? aStr : util.toSetString(aStr); - var isDuplicate = hasNativeMap ? this.has(aStr) : has.call(this._set, sStr); - var idx = this._array.length; - if (!isDuplicate || aAllowDuplicates) { - this._array.push(aStr); - } - if (!isDuplicate) { - if (hasNativeMap) { - this._set.set(aStr, idx); - } else { - this._set[sStr] = idx; - } - } - }; - - /** - * Is the given string a member of this set? - * - * @param String aStr - */ - ArraySet.prototype.has = function ArraySet_has(aStr) { - if (hasNativeMap) { - return this._set.has(aStr); - } else { - var sStr = util.toSetString(aStr); - return has.call(this._set, sStr); - } - }; - - /** - * What is the index of the given string in the array? - * - * @param String aStr - */ - ArraySet.prototype.indexOf = function ArraySet_indexOf(aStr) { - if (hasNativeMap) { - var idx = this._set.get(aStr); - if (idx >= 0) { - return idx; - } - } else { - var sStr = util.toSetString(aStr); - if (has.call(this._set, sStr)) { - return this._set[sStr]; - } - } - - throw new Error('"' + aStr + '" is not in the set.'); - }; - - /** - * What is the element at the given index? - * - * @param Number aIdx - */ - ArraySet.prototype.at = function ArraySet_at(aIdx) { - if (aIdx >= 0 && aIdx < this._array.length) { - return this._array[aIdx]; - } - throw new Error('No element indexed by ' + aIdx); - }; - - /** - * Returns the array representation of this set (which has the proper indices - * indicated by indexOf). Note that this is a copy of the internal array used - * for storing the members so that no one can mess with internal state. - */ - ArraySet.prototype.toArray = function ArraySet_toArray() { - return this._array.slice(); - }; - - exports.ArraySet = ArraySet; - -/***/ }, -/* 47 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2014 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var util = __webpack_require__(45); - - /** - * Determine whether mappingB is after mappingA with respect to generated - * position. - */ - function generatedPositionAfter(mappingA, mappingB) { - // Optimized for most common case - var lineA = mappingA.generatedLine; - var lineB = mappingB.generatedLine; - var columnA = mappingA.generatedColumn; - var columnB = mappingB.generatedColumn; - return lineB > lineA || lineB == lineA && columnB >= columnA || util.compareByGeneratedPositionsInflated(mappingA, mappingB) <= 0; - } - - /** - * A data structure to provide a sorted view of accumulated mappings in a - * performance conscious manner. It trades a neglibable overhead in general - * case for a large speedup in case of mappings being added in order. - */ - function MappingList() { - this._array = []; - this._sorted = true; - // Serves as infimum - this._last = { generatedLine: -1, generatedColumn: 0 }; - } - - /** - * Iterate through internal items. This method takes the same arguments that - * `Array.prototype.forEach` takes. - * - * NOTE: The order of the mappings is NOT guaranteed. - */ - MappingList.prototype.unsortedForEach = function MappingList_forEach(aCallback, aThisArg) { - this._array.forEach(aCallback, aThisArg); - }; - - /** - * Add the given source mapping. - * - * @param Object aMapping - */ - MappingList.prototype.add = function MappingList_add(aMapping) { - if (generatedPositionAfter(this._last, aMapping)) { - this._last = aMapping; - this._array.push(aMapping); - } else { - this._sorted = false; - this._array.push(aMapping); - } - }; - - /** - * Returns the flat, sorted array of mappings. The mappings are sorted by - * generated position. - * - * WARNING: This method returns internal data without copying, for - * performance. The return value must NOT be mutated, and should be treated as - * an immutable borrow. If you want to take ownership, you must make your own - * copy. - */ - MappingList.prototype.toArray = function MappingList_toArray() { - if (!this._sorted) { - this._array.sort(util.compareByGeneratedPositionsInflated); - this._sorted = true; - } - return this._array; - }; - - exports.MappingList = MappingList; - -/***/ }, -/* 48 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var util = __webpack_require__(45); - var binarySearch = __webpack_require__(49); - var ArraySet = __webpack_require__(46).ArraySet; - var base64VLQ = __webpack_require__(43); - var quickSort = __webpack_require__(50).quickSort; - - function SourceMapConsumer(aSourceMap) { - var sourceMap = aSourceMap; - if (typeof aSourceMap === 'string') { - sourceMap = JSON.parse(aSourceMap.replace(/^\)\]\}'/, '')); - } - - return sourceMap.sections != null ? new IndexedSourceMapConsumer(sourceMap) : new BasicSourceMapConsumer(sourceMap); - } - - SourceMapConsumer.fromSourceMap = function (aSourceMap) { - return BasicSourceMapConsumer.fromSourceMap(aSourceMap); - }; - - /** - * The version of the source mapping spec that we are consuming. - */ - SourceMapConsumer.prototype._version = 3; - - // `__generatedMappings` and `__originalMappings` are arrays that hold the - // parsed mapping coordinates from the source map's "mappings" attribute. They - // are lazily instantiated, accessed via the `_generatedMappings` and - // `_originalMappings` getters respectively, and we only parse the mappings - // and create these arrays once queried for a source location. We jump through - // these hoops because there can be many thousands of mappings, and parsing - // them is expensive, so we only want to do it if we must. - // - // Each object in the arrays is of the form: - // - // { - // generatedLine: The line number in the generated code, - // generatedColumn: The column number in the generated code, - // source: The path to the original source file that generated this - // chunk of code, - // originalLine: The line number in the original source that - // corresponds to this chunk of generated code, - // originalColumn: The column number in the original source that - // corresponds to this chunk of generated code, - // name: The name of the original symbol which generated this chunk of - // code. - // } - // - // All properties except for `generatedLine` and `generatedColumn` can be - // `null`. - // - // `_generatedMappings` is ordered by the generated positions. - // - // `_originalMappings` is ordered by the original positions. - - SourceMapConsumer.prototype.__generatedMappings = null; - Object.defineProperty(SourceMapConsumer.prototype, '_generatedMappings', { - get: function () { - if (!this.__generatedMappings) { - this._parseMappings(this._mappings, this.sourceRoot); - } - - return this.__generatedMappings; - } - }); - - SourceMapConsumer.prototype.__originalMappings = null; - Object.defineProperty(SourceMapConsumer.prototype, '_originalMappings', { - get: function () { - if (!this.__originalMappings) { - this._parseMappings(this._mappings, this.sourceRoot); - } - - return this.__originalMappings; - } - }); - - SourceMapConsumer.prototype._charIsMappingSeparator = function SourceMapConsumer_charIsMappingSeparator(aStr, index) { - var c = aStr.charAt(index); - return c === ";" || c === ","; - }; - - /** - * Parse the mappings in a string in to a data structure which we can easily - * query (the ordered arrays in the `this.__generatedMappings` and - * `this.__originalMappings` properties). - */ - SourceMapConsumer.prototype._parseMappings = function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { - throw new Error("Subclasses must implement _parseMappings"); - }; - - SourceMapConsumer.GENERATED_ORDER = 1; - SourceMapConsumer.ORIGINAL_ORDER = 2; - - SourceMapConsumer.GREATEST_LOWER_BOUND = 1; - SourceMapConsumer.LEAST_UPPER_BOUND = 2; - - /** - * Iterate over each mapping between an original source/line/column and a - * generated line/column in this source map. - * - * @param Function aCallback - * The function that is called with each mapping. - * @param Object aContext - * Optional. If specified, this object will be the value of `this` every - * time that `aCallback` is called. - * @param aOrder - * Either `SourceMapConsumer.GENERATED_ORDER` or - * `SourceMapConsumer.ORIGINAL_ORDER`. Specifies whether you want to - * iterate over the mappings sorted by the generated file's line/column - * order or the original's source/line/column order, respectively. Defaults to - * `SourceMapConsumer.GENERATED_ORDER`. - */ - SourceMapConsumer.prototype.eachMapping = function SourceMapConsumer_eachMapping(aCallback, aContext, aOrder) { - var context = aContext || null; - var order = aOrder || SourceMapConsumer.GENERATED_ORDER; - - var mappings; - switch (order) { - case SourceMapConsumer.GENERATED_ORDER: - mappings = this._generatedMappings; - break; - case SourceMapConsumer.ORIGINAL_ORDER: - mappings = this._originalMappings; - break; - default: - throw new Error("Unknown order of iteration."); - } - - var sourceRoot = this.sourceRoot; - mappings.map(function (mapping) { - var source = mapping.source === null ? null : this._sources.at(mapping.source); - if (source != null && sourceRoot != null) { - source = util.join(sourceRoot, source); - } - return { - source: source, - generatedLine: mapping.generatedLine, - generatedColumn: mapping.generatedColumn, - originalLine: mapping.originalLine, - originalColumn: mapping.originalColumn, - name: mapping.name === null ? null : this._names.at(mapping.name) - }; - }, this).forEach(aCallback, context); - }; - - /** - * Returns all generated line and column information for the original source, - * line, and column provided. If no column is provided, returns all mappings - * corresponding to a either the line we are searching for or the next - * closest line that has any mappings. Otherwise, returns all mappings - * corresponding to the given line and either the column we are searching for - * or the next closest column that has any offsets. - * - * The only argument is an object with the following properties: - * - * - source: The filename of the original source. - * - line: The line number in the original source. - * - column: Optional. the column number in the original source. - * - * and an array of objects is returned, each with the following properties: - * - * - line: The line number in the generated source, or null. - * - column: The column number in the generated source, or null. - */ - SourceMapConsumer.prototype.allGeneratedPositionsFor = function SourceMapConsumer_allGeneratedPositionsFor(aArgs) { - var line = util.getArg(aArgs, 'line'); - - // When there is no exact match, BasicSourceMapConsumer.prototype._findMapping - // returns the index of the closest mapping less than the needle. By - // setting needle.originalColumn to 0, we thus find the last mapping for - // the given line, provided such a mapping exists. - var needle = { - source: util.getArg(aArgs, 'source'), - originalLine: line, - originalColumn: util.getArg(aArgs, 'column', 0) - }; - - if (this.sourceRoot != null) { - needle.source = util.relative(this.sourceRoot, needle.source); - } - if (!this._sources.has(needle.source)) { - return []; - } - needle.source = this._sources.indexOf(needle.source); - - var mappings = []; - - var index = this._findMapping(needle, this._originalMappings, "originalLine", "originalColumn", util.compareByOriginalPositions, binarySearch.LEAST_UPPER_BOUND); - if (index >= 0) { - var mapping = this._originalMappings[index]; - - if (aArgs.column === undefined) { - var originalLine = mapping.originalLine; - - // Iterate until either we run out of mappings, or we run into - // a mapping for a different line than the one we found. Since - // mappings are sorted, this is guaranteed to find all mappings for - // the line we found. - while (mapping && mapping.originalLine === originalLine) { - mappings.push({ - line: util.getArg(mapping, 'generatedLine', null), - column: util.getArg(mapping, 'generatedColumn', null), - lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) - }); - - mapping = this._originalMappings[++index]; - } - } else { - var originalColumn = mapping.originalColumn; - - // Iterate until either we run out of mappings, or we run into - // a mapping for a different line than the one we were searching for. - // Since mappings are sorted, this is guaranteed to find all mappings for - // the line we are searching for. - while (mapping && mapping.originalLine === line && mapping.originalColumn == originalColumn) { - mappings.push({ - line: util.getArg(mapping, 'generatedLine', null), - column: util.getArg(mapping, 'generatedColumn', null), - lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) - }); - - mapping = this._originalMappings[++index]; - } - } - } - - return mappings; - }; - - exports.SourceMapConsumer = SourceMapConsumer; - - /** - * A BasicSourceMapConsumer instance represents a parsed source map which we can - * query for information about the original file positions by giving it a file - * position in the generated source. - * - * The only parameter is the raw source map (either as a JSON string, or - * already parsed to an object). According to the spec, source maps have the - * following attributes: - * - * - version: Which version of the source map spec this map is following. - * - sources: An array of URLs to the original source files. - * - names: An array of identifiers which can be referrenced by individual mappings. - * - sourceRoot: Optional. The URL root from which all sources are relative. - * - sourcesContent: Optional. An array of contents of the original source files. - * - mappings: A string of base64 VLQs which contain the actual mappings. - * - file: Optional. The generated file this source map is associated with. - * - * Here is an example source map, taken from the source map spec[0]: - * - * { - * version : 3, - * file: "out.js", - * sourceRoot : "", - * sources: ["foo.js", "bar.js"], - * names: ["src", "maps", "are", "fun"], - * mappings: "AA,AB;;ABCDE;" - * } - * - * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1# - */ - function BasicSourceMapConsumer(aSourceMap) { - var sourceMap = aSourceMap; - if (typeof aSourceMap === 'string') { - sourceMap = JSON.parse(aSourceMap.replace(/^\)\]\}'/, '')); - } - - var version = util.getArg(sourceMap, 'version'); - var sources = util.getArg(sourceMap, 'sources'); - // Sass 3.3 leaves out the 'names' array, so we deviate from the spec (which - // requires the array) to play nice here. - var names = util.getArg(sourceMap, 'names', []); - var sourceRoot = util.getArg(sourceMap, 'sourceRoot', null); - var sourcesContent = util.getArg(sourceMap, 'sourcesContent', null); - var mappings = util.getArg(sourceMap, 'mappings'); - var file = util.getArg(sourceMap, 'file', null); - - // Once again, Sass deviates from the spec and supplies the version as a - // string rather than a number, so we use loose equality checking here. - if (version != this._version) { - throw new Error('Unsupported version: ' + version); - } - - sources = sources.map(String) - // Some source maps produce relative source paths like "./foo.js" instead of - // "foo.js". Normalize these first so that future comparisons will succeed. - // See bugzil.la/1090768. - .map(util.normalize) - // Always ensure that absolute sources are internally stored relative to - // the source root, if the source root is absolute. Not doing this would - // be particularly problematic when the source root is a prefix of the - // source (valid, but why??). See github issue #199 and bugzil.la/1188982. - .map(function (source) { - return sourceRoot && util.isAbsolute(sourceRoot) && util.isAbsolute(source) ? util.relative(sourceRoot, source) : source; - }); - - // Pass `true` below to allow duplicate names and sources. While source maps - // are intended to be compressed and deduplicated, the TypeScript compiler - // sometimes generates source maps with duplicates in them. See Github issue - // #72 and bugzil.la/889492. - this._names = ArraySet.fromArray(names.map(String), true); - this._sources = ArraySet.fromArray(sources, true); - - this.sourceRoot = sourceRoot; - this.sourcesContent = sourcesContent; - this._mappings = mappings; - this.file = file; - } - - BasicSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); - BasicSourceMapConsumer.prototype.consumer = SourceMapConsumer; - - /** - * Create a BasicSourceMapConsumer from a SourceMapGenerator. - * - * @param SourceMapGenerator aSourceMap - * The source map that will be consumed. - * @returns BasicSourceMapConsumer - */ - BasicSourceMapConsumer.fromSourceMap = function SourceMapConsumer_fromSourceMap(aSourceMap) { - var smc = Object.create(BasicSourceMapConsumer.prototype); - - var names = smc._names = ArraySet.fromArray(aSourceMap._names.toArray(), true); - var sources = smc._sources = ArraySet.fromArray(aSourceMap._sources.toArray(), true); - smc.sourceRoot = aSourceMap._sourceRoot; - smc.sourcesContent = aSourceMap._generateSourcesContent(smc._sources.toArray(), smc.sourceRoot); - smc.file = aSourceMap._file; - - // Because we are modifying the entries (by converting string sources and - // names to indices into the sources and names ArraySets), we have to make - // a copy of the entry or else bad things happen. Shared mutable state - // strikes again! See github issue #191. - - var generatedMappings = aSourceMap._mappings.toArray().slice(); - var destGeneratedMappings = smc.__generatedMappings = []; - var destOriginalMappings = smc.__originalMappings = []; - - for (var i = 0, length = generatedMappings.length; i < length; i++) { - var srcMapping = generatedMappings[i]; - var destMapping = new Mapping(); - destMapping.generatedLine = srcMapping.generatedLine; - destMapping.generatedColumn = srcMapping.generatedColumn; - - if (srcMapping.source) { - destMapping.source = sources.indexOf(srcMapping.source); - destMapping.originalLine = srcMapping.originalLine; - destMapping.originalColumn = srcMapping.originalColumn; - - if (srcMapping.name) { - destMapping.name = names.indexOf(srcMapping.name); - } - - destOriginalMappings.push(destMapping); - } - - destGeneratedMappings.push(destMapping); - } - - quickSort(smc.__originalMappings, util.compareByOriginalPositions); - - return smc; - }; - - /** - * The version of the source mapping spec that we are consuming. - */ - BasicSourceMapConsumer.prototype._version = 3; - - /** - * The list of original sources. - */ - Object.defineProperty(BasicSourceMapConsumer.prototype, 'sources', { - get: function () { - return this._sources.toArray().map(function (s) { - return this.sourceRoot != null ? util.join(this.sourceRoot, s) : s; - }, this); - } - }); - - /** - * Provide the JIT with a nice shape / hidden class. - */ - function Mapping() { - this.generatedLine = 0; - this.generatedColumn = 0; - this.source = null; - this.originalLine = null; - this.originalColumn = null; - this.name = null; - } - - /** - * Parse the mappings in a string in to a data structure which we can easily - * query (the ordered arrays in the `this.__generatedMappings` and - * `this.__originalMappings` properties). - */ - BasicSourceMapConsumer.prototype._parseMappings = function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { - var generatedLine = 1; - var previousGeneratedColumn = 0; - var previousOriginalLine = 0; - var previousOriginalColumn = 0; - var previousSource = 0; - var previousName = 0; - var length = aStr.length; - var index = 0; - var cachedSegments = {}; - var temp = {}; - var originalMappings = []; - var generatedMappings = []; - var mapping, str, segment, end, value; - - while (index < length) { - if (aStr.charAt(index) === ';') { - generatedLine++; - index++; - previousGeneratedColumn = 0; - } else if (aStr.charAt(index) === ',') { - index++; - } else { - mapping = new Mapping(); - mapping.generatedLine = generatedLine; - - // Because each offset is encoded relative to the previous one, - // many segments often have the same encoding. We can exploit this - // fact by caching the parsed variable length fields of each segment, - // allowing us to avoid a second parse if we encounter the same - // segment again. - for (end = index; end < length; end++) { - if (this._charIsMappingSeparator(aStr, end)) { - break; - } - } - str = aStr.slice(index, end); - - segment = cachedSegments[str]; - if (segment) { - index += str.length; - } else { - segment = []; - while (index < end) { - base64VLQ.decode(aStr, index, temp); - value = temp.value; - index = temp.rest; - segment.push(value); - } - - if (segment.length === 2) { - throw new Error('Found a source, but no line and column'); - } - - if (segment.length === 3) { - throw new Error('Found a source and line, but no column'); - } - - cachedSegments[str] = segment; - } - - // Generated column. - mapping.generatedColumn = previousGeneratedColumn + segment[0]; - previousGeneratedColumn = mapping.generatedColumn; - - if (segment.length > 1) { - // Original source. - mapping.source = previousSource + segment[1]; - previousSource += segment[1]; - - // Original line. - mapping.originalLine = previousOriginalLine + segment[2]; - previousOriginalLine = mapping.originalLine; - // Lines are stored 0-based - mapping.originalLine += 1; - - // Original column. - mapping.originalColumn = previousOriginalColumn + segment[3]; - previousOriginalColumn = mapping.originalColumn; - - if (segment.length > 4) { - // Original name. - mapping.name = previousName + segment[4]; - previousName += segment[4]; - } - } - - generatedMappings.push(mapping); - if (typeof mapping.originalLine === 'number') { - originalMappings.push(mapping); - } - } - } - - quickSort(generatedMappings, util.compareByGeneratedPositionsDeflated); - this.__generatedMappings = generatedMappings; - - quickSort(originalMappings, util.compareByOriginalPositions); - this.__originalMappings = originalMappings; - }; - - /** - * Find the mapping that best matches the hypothetical "needle" mapping that - * we are searching for in the given "haystack" of mappings. - */ - BasicSourceMapConsumer.prototype._findMapping = function SourceMapConsumer_findMapping(aNeedle, aMappings, aLineName, aColumnName, aComparator, aBias) { - // To return the position we are searching for, we must first find the - // mapping for the given position and then return the opposite position it - // points to. Because the mappings are sorted, we can use binary search to - // find the best mapping. - - if (aNeedle[aLineName] <= 0) { - throw new TypeError('Line must be greater than or equal to 1, got ' + aNeedle[aLineName]); - } - if (aNeedle[aColumnName] < 0) { - throw new TypeError('Column must be greater than or equal to 0, got ' + aNeedle[aColumnName]); - } - - return binarySearch.search(aNeedle, aMappings, aComparator, aBias); - }; - - /** - * Compute the last column for each generated mapping. The last column is - * inclusive. - */ - BasicSourceMapConsumer.prototype.computeColumnSpans = function SourceMapConsumer_computeColumnSpans() { - for (var index = 0; index < this._generatedMappings.length; ++index) { - var mapping = this._generatedMappings[index]; - - // Mappings do not contain a field for the last generated columnt. We - // can come up with an optimistic estimate, however, by assuming that - // mappings are contiguous (i.e. given two consecutive mappings, the - // first mapping ends where the second one starts). - if (index + 1 < this._generatedMappings.length) { - var nextMapping = this._generatedMappings[index + 1]; - - if (mapping.generatedLine === nextMapping.generatedLine) { - mapping.lastGeneratedColumn = nextMapping.generatedColumn - 1; - continue; - } - } - - // The last mapping for each line spans the entire line. - mapping.lastGeneratedColumn = Infinity; - } - }; - - /** - * Returns the original source, line, and column information for the generated - * source's line and column positions provided. The only argument is an object - * with the following properties: - * - * - line: The line number in the generated source. - * - column: The column number in the generated source. - * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or - * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the - * closest element that is smaller than or greater than the one we are - * searching for, respectively, if the exact element cannot be found. - * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. - * - * and an object is returned with the following properties: - * - * - source: The original source file, or null. - * - line: The line number in the original source, or null. - * - column: The column number in the original source, or null. - * - name: The original identifier, or null. - */ - BasicSourceMapConsumer.prototype.originalPositionFor = function SourceMapConsumer_originalPositionFor(aArgs) { - var needle = { - generatedLine: util.getArg(aArgs, 'line'), - generatedColumn: util.getArg(aArgs, 'column') - }; - - var index = this._findMapping(needle, this._generatedMappings, "generatedLine", "generatedColumn", util.compareByGeneratedPositionsDeflated, util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)); - - if (index >= 0) { - var mapping = this._generatedMappings[index]; - - if (mapping.generatedLine === needle.generatedLine) { - var source = util.getArg(mapping, 'source', null); - if (source !== null) { - source = this._sources.at(source); - if (this.sourceRoot != null) { - source = util.join(this.sourceRoot, source); - } - } - var name = util.getArg(mapping, 'name', null); - if (name !== null) { - name = this._names.at(name); - } - return { - source: source, - line: util.getArg(mapping, 'originalLine', null), - column: util.getArg(mapping, 'originalColumn', null), - name: name - }; - } - } - - return { - source: null, - line: null, - column: null, - name: null - }; - }; - - /** - * Return true if we have the source content for every source in the source - * map, false otherwise. - */ - BasicSourceMapConsumer.prototype.hasContentsOfAllSources = function BasicSourceMapConsumer_hasContentsOfAllSources() { - if (!this.sourcesContent) { - return false; - } - return this.sourcesContent.length >= this._sources.size() && !this.sourcesContent.some(function (sc) { - return sc == null; - }); - }; - - /** - * Returns the original source content. The only argument is the url of the - * original source file. Returns null if no original source content is - * available. - */ - BasicSourceMapConsumer.prototype.sourceContentFor = function SourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { - if (!this.sourcesContent) { - return null; - } - - if (this.sourceRoot != null) { - aSource = util.relative(this.sourceRoot, aSource); - } - - if (this._sources.has(aSource)) { - return this.sourcesContent[this._sources.indexOf(aSource)]; - } - - var url; - if (this.sourceRoot != null && (url = util.urlParse(this.sourceRoot))) { - // XXX: file:// URIs and absolute paths lead to unexpected behavior for - // many users. We can help them out when they expect file:// URIs to - // behave like it would if they were running a local HTTP server. See - // https://bugzilla.mozilla.org/show_bug.cgi?id=885597. - var fileUriAbsPath = aSource.replace(/^file:\/\//, ""); - if (url.scheme == "file" && this._sources.has(fileUriAbsPath)) { - return this.sourcesContent[this._sources.indexOf(fileUriAbsPath)]; - } - - if ((!url.path || url.path == "/") && this._sources.has("/" + aSource)) { - return this.sourcesContent[this._sources.indexOf("/" + aSource)]; - } - } - - // This function is used recursively from - // IndexedSourceMapConsumer.prototype.sourceContentFor. In that case, we - // don't want to throw if we can't find the source - we just want to - // return null, so we provide a flag to exit gracefully. - if (nullOnMissing) { - return null; - } else { - throw new Error('"' + aSource + '" is not in the SourceMap.'); - } - }; - - /** - * Returns the generated line and column information for the original source, - * line, and column positions provided. The only argument is an object with - * the following properties: - * - * - source: The filename of the original source. - * - line: The line number in the original source. - * - column: The column number in the original source. - * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or - * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the - * closest element that is smaller than or greater than the one we are - * searching for, respectively, if the exact element cannot be found. - * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. - * - * and an object is returned with the following properties: - * - * - line: The line number in the generated source, or null. - * - column: The column number in the generated source, or null. - */ - BasicSourceMapConsumer.prototype.generatedPositionFor = function SourceMapConsumer_generatedPositionFor(aArgs) { - var source = util.getArg(aArgs, 'source'); - if (this.sourceRoot != null) { - source = util.relative(this.sourceRoot, source); - } - if (!this._sources.has(source)) { - return { - line: null, - column: null, - lastColumn: null - }; - } - source = this._sources.indexOf(source); - - var needle = { - source: source, - originalLine: util.getArg(aArgs, 'line'), - originalColumn: util.getArg(aArgs, 'column') - }; - - var index = this._findMapping(needle, this._originalMappings, "originalLine", "originalColumn", util.compareByOriginalPositions, util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)); - - if (index >= 0) { - var mapping = this._originalMappings[index]; - - if (mapping.source === needle.source) { - return { - line: util.getArg(mapping, 'generatedLine', null), - column: util.getArg(mapping, 'generatedColumn', null), - lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) - }; - } - } - - return { - line: null, - column: null, - lastColumn: null - }; - }; - - exports.BasicSourceMapConsumer = BasicSourceMapConsumer; - - /** - * An IndexedSourceMapConsumer instance represents a parsed source map which - * we can query for information. It differs from BasicSourceMapConsumer in - * that it takes "indexed" source maps (i.e. ones with a "sections" field) as - * input. - * - * The only parameter is a raw source map (either as a JSON string, or already - * parsed to an object). According to the spec for indexed source maps, they - * have the following attributes: - * - * - version: Which version of the source map spec this map is following. - * - file: Optional. The generated file this source map is associated with. - * - sections: A list of section definitions. - * - * Each value under the "sections" field has two fields: - * - offset: The offset into the original specified at which this section - * begins to apply, defined as an object with a "line" and "column" - * field. - * - map: A source map definition. This source map could also be indexed, - * but doesn't have to be. - * - * Instead of the "map" field, it's also possible to have a "url" field - * specifying a URL to retrieve a source map from, but that's currently - * unsupported. - * - * Here's an example source map, taken from the source map spec[0], but - * modified to omit a section which uses the "url" field. - * - * { - * version : 3, - * file: "app.js", - * sections: [{ - * offset: {line:100, column:10}, - * map: { - * version : 3, - * file: "section.js", - * sources: ["foo.js", "bar.js"], - * names: ["src", "maps", "are", "fun"], - * mappings: "AAAA,E;;ABCDE;" - * } - * }], - * } - * - * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.535es3xeprgt - */ - function IndexedSourceMapConsumer(aSourceMap) { - var sourceMap = aSourceMap; - if (typeof aSourceMap === 'string') { - sourceMap = JSON.parse(aSourceMap.replace(/^\)\]\}'/, '')); - } - - var version = util.getArg(sourceMap, 'version'); - var sections = util.getArg(sourceMap, 'sections'); - - if (version != this._version) { - throw new Error('Unsupported version: ' + version); - } - - this._sources = new ArraySet(); - this._names = new ArraySet(); - - var lastOffset = { - line: -1, - column: 0 - }; - this._sections = sections.map(function (s) { - if (s.url) { - // The url field will require support for asynchronicity. - // See https://github.com/mozilla/source-map/issues/16 - throw new Error('Support for url field in sections not implemented.'); - } - var offset = util.getArg(s, 'offset'); - var offsetLine = util.getArg(offset, 'line'); - var offsetColumn = util.getArg(offset, 'column'); - - if (offsetLine < lastOffset.line || offsetLine === lastOffset.line && offsetColumn < lastOffset.column) { - throw new Error('Section offsets must be ordered and non-overlapping.'); - } - lastOffset = offset; - - return { - generatedOffset: { - // The offset fields are 0-based, but we use 1-based indices when - // encoding/decoding from VLQ. - generatedLine: offsetLine + 1, - generatedColumn: offsetColumn + 1 - }, - consumer: new SourceMapConsumer(util.getArg(s, 'map')) - }; - }); - } - - IndexedSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); - IndexedSourceMapConsumer.prototype.constructor = SourceMapConsumer; - - /** - * The version of the source mapping spec that we are consuming. - */ - IndexedSourceMapConsumer.prototype._version = 3; - - /** - * The list of original sources. - */ - Object.defineProperty(IndexedSourceMapConsumer.prototype, 'sources', { - get: function () { - var sources = []; - for (var i = 0; i < this._sections.length; i++) { - for (var j = 0; j < this._sections[i].consumer.sources.length; j++) { - sources.push(this._sections[i].consumer.sources[j]); - } - } - return sources; - } - }); - - /** - * Returns the original source, line, and column information for the generated - * source's line and column positions provided. The only argument is an object - * with the following properties: - * - * - line: The line number in the generated source. - * - column: The column number in the generated source. - * - * and an object is returned with the following properties: - * - * - source: The original source file, or null. - * - line: The line number in the original source, or null. - * - column: The column number in the original source, or null. - * - name: The original identifier, or null. - */ - IndexedSourceMapConsumer.prototype.originalPositionFor = function IndexedSourceMapConsumer_originalPositionFor(aArgs) { - var needle = { - generatedLine: util.getArg(aArgs, 'line'), - generatedColumn: util.getArg(aArgs, 'column') - }; - - // Find the section containing the generated position we're trying to map - // to an original position. - var sectionIndex = binarySearch.search(needle, this._sections, function (needle, section) { - var cmp = needle.generatedLine - section.generatedOffset.generatedLine; - if (cmp) { - return cmp; - } - - return needle.generatedColumn - section.generatedOffset.generatedColumn; - }); - var section = this._sections[sectionIndex]; - - if (!section) { - return { - source: null, - line: null, - column: null, - name: null - }; - } - - return section.consumer.originalPositionFor({ - line: needle.generatedLine - (section.generatedOffset.generatedLine - 1), - column: needle.generatedColumn - (section.generatedOffset.generatedLine === needle.generatedLine ? section.generatedOffset.generatedColumn - 1 : 0), - bias: aArgs.bias - }); - }; - - /** - * Return true if we have the source content for every source in the source - * map, false otherwise. - */ - IndexedSourceMapConsumer.prototype.hasContentsOfAllSources = function IndexedSourceMapConsumer_hasContentsOfAllSources() { - return this._sections.every(function (s) { - return s.consumer.hasContentsOfAllSources(); - }); - }; - - /** - * Returns the original source content. The only argument is the url of the - * original source file. Returns null if no original source content is - * available. - */ - IndexedSourceMapConsumer.prototype.sourceContentFor = function IndexedSourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { - for (var i = 0; i < this._sections.length; i++) { - var section = this._sections[i]; - - var content = section.consumer.sourceContentFor(aSource, true); - if (content) { - return content; - } - } - if (nullOnMissing) { - return null; - } else { - throw new Error('"' + aSource + '" is not in the SourceMap.'); - } - }; - - /** - * Returns the generated line and column information for the original source, - * line, and column positions provided. The only argument is an object with - * the following properties: - * - * - source: The filename of the original source. - * - line: The line number in the original source. - * - column: The column number in the original source. - * - * and an object is returned with the following properties: - * - * - line: The line number in the generated source, or null. - * - column: The column number in the generated source, or null. - */ - IndexedSourceMapConsumer.prototype.generatedPositionFor = function IndexedSourceMapConsumer_generatedPositionFor(aArgs) { - for (var i = 0; i < this._sections.length; i++) { - var section = this._sections[i]; - - // Only consider this section if the requested source is in the list of - // sources of the consumer. - if (!section.consumer.sources.includes(util.getArg(aArgs, 'source'))) { - continue; - } - var generatedPosition = section.consumer.generatedPositionFor(aArgs); - if (generatedPosition) { - var ret = { - line: generatedPosition.line + (section.generatedOffset.generatedLine - 1), - column: generatedPosition.column + (section.generatedOffset.generatedLine === generatedPosition.line ? section.generatedOffset.generatedColumn - 1 : 0) - }; - return ret; - } - } - - return { - line: null, - column: null - }; - }; - - /** - * Parse the mappings in a string in to a data structure which we can easily - * query (the ordered arrays in the `this.__generatedMappings` and - * `this.__originalMappings` properties). - */ - IndexedSourceMapConsumer.prototype._parseMappings = function IndexedSourceMapConsumer_parseMappings(aStr, aSourceRoot) { - this.__generatedMappings = []; - this.__originalMappings = []; - for (var i = 0; i < this._sections.length; i++) { - var section = this._sections[i]; - var sectionMappings = section.consumer._generatedMappings; - for (var j = 0; j < sectionMappings.length; j++) { - var mapping = sectionMappings[j]; - - var source = section.consumer._sources.at(mapping.source); - if (section.consumer.sourceRoot !== null) { - source = util.join(section.consumer.sourceRoot, source); - } - this._sources.add(source); - source = this._sources.indexOf(source); - - var name = section.consumer._names.at(mapping.name); - this._names.add(name); - name = this._names.indexOf(name); - - // The mappings coming from the consumer for the section have - // generated positions relative to the start of the section, so we - // need to offset them to be relative to the start of the concatenated - // generated file. - var adjustedMapping = { - source: source, - generatedLine: mapping.generatedLine + (section.generatedOffset.generatedLine - 1), - generatedColumn: mapping.generatedColumn + (section.generatedOffset.generatedLine === mapping.generatedLine ? section.generatedOffset.generatedColumn - 1 : 0), - originalLine: mapping.originalLine, - originalColumn: mapping.originalColumn, - name: name - }; - - this.__generatedMappings.push(adjustedMapping); - if (typeof adjustedMapping.originalLine === 'number') { - this.__originalMappings.push(adjustedMapping); - } - } - } - - quickSort(this.__generatedMappings, util.compareByGeneratedPositionsDeflated); - quickSort(this.__originalMappings, util.compareByOriginalPositions); - }; - - exports.IndexedSourceMapConsumer = IndexedSourceMapConsumer; - -/***/ }, -/* 49 */ -/***/ function(module, exports) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - exports.GREATEST_LOWER_BOUND = 1; - exports.LEAST_UPPER_BOUND = 2; - - /** - * Recursive implementation of binary search. - * - * @param aLow Indices here and lower do not contain the needle. - * @param aHigh Indices here and higher do not contain the needle. - * @param aNeedle The element being searched for. - * @param aHaystack The non-empty array being searched. - * @param aCompare Function which takes two elements and returns -1, 0, or 1. - * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or - * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the - * closest element that is smaller than or greater than the one we are - * searching for, respectively, if the exact element cannot be found. - */ - function recursiveSearch(aLow, aHigh, aNeedle, aHaystack, aCompare, aBias) { - // This function terminates when one of the following is true: - // - // 1. We find the exact element we are looking for. - // - // 2. We did not find the exact element, but we can return the index of - // the next-closest element. - // - // 3. We did not find the exact element, and there is no next-closest - // element than the one we are searching for, so we return -1. - var mid = Math.floor((aHigh - aLow) / 2) + aLow; - var cmp = aCompare(aNeedle, aHaystack[mid], true); - if (cmp === 0) { - // Found the element we are looking for. - return mid; - } else if (cmp > 0) { - // Our needle is greater than aHaystack[mid]. - if (aHigh - mid > 1) { - // The element is in the upper half. - return recursiveSearch(mid, aHigh, aNeedle, aHaystack, aCompare, aBias); - } - - // The exact needle element was not found in this haystack. Determine if - // we are in termination case (3) or (2) and return the appropriate thing. - if (aBias == exports.LEAST_UPPER_BOUND) { - return aHigh < aHaystack.length ? aHigh : -1; - } else { - return mid; - } - } else { - // Our needle is less than aHaystack[mid]. - if (mid - aLow > 1) { - // The element is in the lower half. - return recursiveSearch(aLow, mid, aNeedle, aHaystack, aCompare, aBias); - } - - // we are in termination case (3) or (2) and return the appropriate thing. - if (aBias == exports.LEAST_UPPER_BOUND) { - return mid; - } else { - return aLow < 0 ? -1 : aLow; - } - } - } - - /** - * This is an implementation of binary search which will always try and return - * the index of the closest element if there is no exact hit. This is because - * mappings between original and generated line/col pairs are single points, - * and there is an implicit region between each of them, so a miss just means - * that you aren't on the very start of a region. - * - * @param aNeedle The element you are looking for. - * @param aHaystack The array that is being searched. - * @param aCompare A function which takes the needle and an element in the - * array and returns -1, 0, or 1 depending on whether the needle is less - * than, equal to, or greater than the element, respectively. - * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or - * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the - * closest element that is smaller than or greater than the one we are - * searching for, respectively, if the exact element cannot be found. - * Defaults to 'binarySearch.GREATEST_LOWER_BOUND'. - */ - exports.search = function search(aNeedle, aHaystack, aCompare, aBias) { - if (aHaystack.length === 0) { - return -1; - } - - var index = recursiveSearch(-1, aHaystack.length, aNeedle, aHaystack, aCompare, aBias || exports.GREATEST_LOWER_BOUND); - if (index < 0) { - return -1; - } - - // We have found either the exact element, or the next-closest element than - // the one we are searching for. However, there may be more than one such - // element. Make sure we always return the smallest of these. - while (index - 1 >= 0) { - if (aCompare(aHaystack[index], aHaystack[index - 1], true) !== 0) { - break; - } - --index; - } - - return index; - }; - -/***/ }, -/* 50 */ -/***/ function(module, exports) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - // It turns out that some (most?) JavaScript engines don't self-host - // `Array.prototype.sort`. This makes sense because C++ will likely remain - // faster than JS when doing raw CPU-intensive sorting. However, when using a - // custom comparator function, calling back and forth between the VM's C++ and - // JIT'd JS is rather slow *and* loses JIT type information, resulting in - // worse generated code for the comparator function than would be optimal. In - // fact, when sorting with a comparator, these costs outweigh the benefits of - // sorting in C++. By using our own JS-implemented Quick Sort (below), we get - // a ~3500ms mean speed-up in `bench/bench.html`. - - /** - * Swap the elements indexed by `x` and `y` in the array `ary`. - * - * @param {Array} ary - * The array. - * @param {Number} x - * The index of the first item. - * @param {Number} y - * The index of the second item. - */ - function swap(ary, x, y) { - var temp = ary[x]; - ary[x] = ary[y]; - ary[y] = temp; - } - - /** - * Returns a random integer within the range `low .. high` inclusive. - * - * @param {Number} low - * The lower bound on the range. - * @param {Number} high - * The upper bound on the range. - */ - function randomIntInRange(low, high) { - return Math.round(low + Math.random() * (high - low)); - } - - /** - * The Quick Sort algorithm. - * - * @param {Array} ary - * An array to sort. - * @param {function} comparator - * Function to use to compare two items. - * @param {Number} p - * Start index of the array - * @param {Number} r - * End index of the array - */ - function doQuickSort(ary, comparator, p, r) { - // If our lower bound is less than our upper bound, we (1) partition the - // array into two pieces and (2) recurse on each half. If it is not, this is - // the empty array and our base case. - - if (p < r) { - // (1) Partitioning. - // - // The partitioning chooses a pivot between `p` and `r` and moves all - // elements that are less than or equal to the pivot to the before it, and - // all the elements that are greater than it after it. The effect is that - // once partition is done, the pivot is in the exact place it will be when - // the array is put in sorted order, and it will not need to be moved - // again. This runs in O(n) time. - - // Always choose a random pivot so that an input array which is reverse - // sorted does not cause O(n^2) running time. - var pivotIndex = randomIntInRange(p, r); - var i = p - 1; - - swap(ary, pivotIndex, r); - var pivot = ary[r]; - - // Immediately after `j` is incremented in this loop, the following hold - // true: - // - // * Every element in `ary[p .. i]` is less than or equal to the pivot. - // - // * Every element in `ary[i+1 .. j-1]` is greater than the pivot. - for (var j = p; j < r; j++) { - if (comparator(ary[j], pivot) <= 0) { - i += 1; - swap(ary, i, j); - } - } - - swap(ary, i + 1, j); - var q = i + 1; - - // (2) Recurse on each half. - - doQuickSort(ary, comparator, p, q - 1); - doQuickSort(ary, comparator, q + 1, r); - } - } - - /** - * Sort the given array in-place with the given comparator function. - * - * @param {Array} ary - * An array to sort. - * @param {function} comparator - * Function to use to compare two items. - */ - exports.quickSort = function (ary, comparator) { - doQuickSort(ary, comparator, 0, ary.length - 1); - }; - -/***/ }, -/* 51 */ -/***/ function(module, exports, __webpack_require__) { - - /* -*- Mode: js; js-indent-level: 2; -*- */ - /* - * Copyright 2011 Mozilla Foundation and contributors - * Licensed under the New BSD license. See LICENSE or: - * http://opensource.org/licenses/BSD-3-Clause - */ - - var SourceMapGenerator = __webpack_require__(42).SourceMapGenerator; - var util = __webpack_require__(45); - - // Matches a Windows-style `\r\n` newline or a `\n` newline used by all other - // operating systems these days (capturing the result). - var REGEX_NEWLINE = /(\r?\n)/; - - // Newline character code for charCodeAt() comparisons - var NEWLINE_CODE = 10; - - // Private symbol for identifying `SourceNode`s when multiple versions of - // the source-map library are loaded. This MUST NOT CHANGE across - // versions! - var isSourceNode = "$$$isSourceNode$$$"; - - /** - * SourceNodes provide a way to abstract over interpolating/concatenating - * snippets of generated JavaScript source code while maintaining the line and - * column information associated with the original source code. - * - * @param aLine The original line number. - * @param aColumn The original column number. - * @param aSource The original source's filename. - * @param aChunks Optional. An array of strings which are snippets of - * generated JS, or other SourceNodes. - * @param aName The original identifier. - */ - function SourceNode(aLine, aColumn, aSource, aChunks, aName) { - this.children = []; - this.sourceContents = {}; - this.line = aLine == null ? null : aLine; - this.column = aColumn == null ? null : aColumn; - this.source = aSource == null ? null : aSource; - this.name = aName == null ? null : aName; - this[isSourceNode] = true; - if (aChunks != null) this.add(aChunks); - } - - /** - * Creates a SourceNode from generated code and a SourceMapConsumer. - * - * @param aGeneratedCode The generated code - * @param aSourceMapConsumer The SourceMap for the generated code - * @param aRelativePath Optional. The path that relative sources in the - * SourceMapConsumer should be relative to. - */ - SourceNode.fromStringWithSourceMap = function SourceNode_fromStringWithSourceMap(aGeneratedCode, aSourceMapConsumer, aRelativePath) { - // The SourceNode we want to fill with the generated code - // and the SourceMap - var node = new SourceNode(); - - // All even indices of this array are one line of the generated code, - // while all odd indices are the newlines between two adjacent lines - // (since `REGEX_NEWLINE` captures its match). - // Processed fragments are accessed by calling `shiftNextLine`. - var remainingLines = aGeneratedCode.split(REGEX_NEWLINE); - var remainingLinesIndex = 0; - var shiftNextLine = function () { - var lineContents = getNextLine(); - // The last line of a file might not have a newline. - var newLine = getNextLine() || ""; - return lineContents + newLine; - - function getNextLine() { - return remainingLinesIndex < remainingLines.length ? remainingLines[remainingLinesIndex++] : undefined; - } - }; - - // We need to remember the position of "remainingLines" - var lastGeneratedLine = 1, - lastGeneratedColumn = 0; - - // The generate SourceNodes we need a code range. - // To extract it current and last mapping is used. - // Here we store the last mapping. - var lastMapping = null; - - aSourceMapConsumer.eachMapping(function (mapping) { - if (lastMapping !== null) { - // We add the code from "lastMapping" to "mapping": - // First check if there is a new line in between. - if (lastGeneratedLine < mapping.generatedLine) { - // Associate first line with "lastMapping" - addMappingWithCode(lastMapping, shiftNextLine()); - lastGeneratedLine++; - lastGeneratedColumn = 0; - // The remaining code is added without mapping - } else { - // There is no new line in between. - // Associate the code between "lastGeneratedColumn" and - // "mapping.generatedColumn" with "lastMapping" - var nextLine = remainingLines[remainingLinesIndex]; - var code = nextLine.substr(0, mapping.generatedColumn - lastGeneratedColumn); - remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn - lastGeneratedColumn); - lastGeneratedColumn = mapping.generatedColumn; - addMappingWithCode(lastMapping, code); - // No more remaining code, continue - lastMapping = mapping; - return; - } - } - // We add the generated code until the first mapping - // to the SourceNode without any mapping. - // Each line is added as separate string. - while (lastGeneratedLine < mapping.generatedLine) { - node.add(shiftNextLine()); - lastGeneratedLine++; - } - if (lastGeneratedColumn < mapping.generatedColumn) { - var nextLine = remainingLines[remainingLinesIndex]; - node.add(nextLine.substr(0, mapping.generatedColumn)); - remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn); - lastGeneratedColumn = mapping.generatedColumn; - } - lastMapping = mapping; - }, this); - // We have processed all mappings. - if (remainingLinesIndex < remainingLines.length) { - if (lastMapping) { - // Associate the remaining code in the current line with "lastMapping" - addMappingWithCode(lastMapping, shiftNextLine()); - } - // and add the remaining lines without any mapping - node.add(remainingLines.splice(remainingLinesIndex).join("")); - } - - // Copy sourcesContent into SourceNode - aSourceMapConsumer.sources.forEach(function (sourceFile) { - var content = aSourceMapConsumer.sourceContentFor(sourceFile); - if (content != null) { - if (aRelativePath != null) { - sourceFile = util.join(aRelativePath, sourceFile); - } - node.setSourceContent(sourceFile, content); - } - }); - - return node; - - function addMappingWithCode(mapping, code) { - if (mapping === null || mapping.source === undefined) { - node.add(code); - } else { - var source = aRelativePath ? util.join(aRelativePath, mapping.source) : mapping.source; - node.add(new SourceNode(mapping.originalLine, mapping.originalColumn, source, code, mapping.name)); - } - } - }; - - /** - * Add a chunk of generated JS to this source node. - * - * @param aChunk A string snippet of generated JS code, another instance of - * SourceNode, or an array where each member is one of those things. - */ - SourceNode.prototype.add = function SourceNode_add(aChunk) { - if (Array.isArray(aChunk)) { - aChunk.forEach(function (chunk) { - this.add(chunk); - }, this); - } else if (aChunk[isSourceNode] || typeof aChunk === "string") { - if (aChunk) { - this.children.push(aChunk); - } - } else { - throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk); - } - return this; - }; - - /** - * Add a chunk of generated JS to the beginning of this source node. - * - * @param aChunk A string snippet of generated JS code, another instance of - * SourceNode, or an array where each member is one of those things. - */ - SourceNode.prototype.prepend = function SourceNode_prepend(aChunk) { - if (Array.isArray(aChunk)) { - for (var i = aChunk.length - 1; i >= 0; i--) { - this.prepend(aChunk[i]); - } - } else if (aChunk[isSourceNode] || typeof aChunk === "string") { - this.children.unshift(aChunk); - } else { - throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk); - } - return this; - }; - - /** - * Walk over the tree of JS snippets in this node and its children. The - * walking function is called once for each snippet of JS and is passed that - * snippet and the its original associated source's line/column location. - * - * @param aFn The traversal function. - */ - SourceNode.prototype.walk = function SourceNode_walk(aFn) { - var chunk; - for (var i = 0, len = this.children.length; i < len; i++) { - chunk = this.children[i]; - if (chunk[isSourceNode]) { - chunk.walk(aFn); - } else { - if (chunk !== '') { - aFn(chunk, { source: this.source, - line: this.line, - column: this.column, - name: this.name }); - } - } - } - }; - - /** - * Like `String.prototype.join` except for SourceNodes. Inserts `aStr` between - * each of `this.children`. - * - * @param aSep The separator. - */ - SourceNode.prototype.join = function SourceNode_join(aSep) { - var newChildren; - var i; - var len = this.children.length; - if (len > 0) { - newChildren = []; - for (i = 0; i < len - 1; i++) { - newChildren.push(this.children[i]); - newChildren.push(aSep); - } - newChildren.push(this.children[i]); - this.children = newChildren; - } - return this; - }; - - /** - * Call String.prototype.replace on the very right-most source snippet. Useful - * for trimming whitespace from the end of a source node, etc. - * - * @param aPattern The pattern to replace. - * @param aReplacement The thing to replace the pattern with. - */ - SourceNode.prototype.replaceRight = function SourceNode_replaceRight(aPattern, aReplacement) { - var lastChild = this.children[this.children.length - 1]; - if (lastChild[isSourceNode]) { - lastChild.replaceRight(aPattern, aReplacement); - } else if (typeof lastChild === 'string') { - this.children[this.children.length - 1] = lastChild.replace(aPattern, aReplacement); - } else { - this.children.push(''.replace(aPattern, aReplacement)); - } - return this; - }; - - /** - * Set the source content for a source file. This will be added to the SourceMapGenerator - * in the sourcesContent field. - * - * @param aSourceFile The filename of the source file - * @param aSourceContent The content of the source file - */ - SourceNode.prototype.setSourceContent = function SourceNode_setSourceContent(aSourceFile, aSourceContent) { - this.sourceContents[util.toSetString(aSourceFile)] = aSourceContent; - }; - - /** - * Walk over the tree of SourceNodes. The walking function is called for each - * source file content and is passed the filename and source content. - * - * @param aFn The traversal function. - */ - SourceNode.prototype.walkSourceContents = function SourceNode_walkSourceContents(aFn) { - for (var i = 0, len = this.children.length; i < len; i++) { - if (this.children[i][isSourceNode]) { - this.children[i].walkSourceContents(aFn); - } - } - - var sources = Object.keys(this.sourceContents); - for (var i = 0, len = sources.length; i < len; i++) { - aFn(util.fromSetString(sources[i]), this.sourceContents[sources[i]]); - } - }; - - /** - * Return the string representation of this source node. Walks over the tree - * and concatenates all the various snippets together to one string. - */ - SourceNode.prototype.toString = function SourceNode_toString() { - var str = ""; - this.walk(function (chunk) { - str += chunk; - }); - return str; - }; - - /** - * Returns the string representation of this source node along with a source - * map. - */ - SourceNode.prototype.toStringWithSourceMap = function SourceNode_toStringWithSourceMap(aArgs) { - var generated = { - code: "", - line: 1, - column: 0 - }; - var map = new SourceMapGenerator(aArgs); - var sourceMappingActive = false; - var lastOriginalSource = null; - var lastOriginalLine = null; - var lastOriginalColumn = null; - var lastOriginalName = null; - this.walk(function (chunk, original) { - generated.code += chunk; - if (original.source !== null && original.line !== null && original.column !== null) { - if (lastOriginalSource !== original.source || lastOriginalLine !== original.line || lastOriginalColumn !== original.column || lastOriginalName !== original.name) { - map.addMapping({ - source: original.source, - original: { - line: original.line, - column: original.column - }, - generated: { - line: generated.line, - column: generated.column - }, - name: original.name - }); - } - lastOriginalSource = original.source; - lastOriginalLine = original.line; - lastOriginalColumn = original.column; - lastOriginalName = original.name; - sourceMappingActive = true; - } else if (sourceMappingActive) { - map.addMapping({ - generated: { - line: generated.line, - column: generated.column - } - }); - lastOriginalSource = null; - sourceMappingActive = false; - } - for (var idx = 0, length = chunk.length; idx < length; idx++) { - if (chunk.charCodeAt(idx) === NEWLINE_CODE) { - generated.line++; - generated.column = 0; - // Mappings end at eol - if (idx + 1 === length) { - lastOriginalSource = null; - sourceMappingActive = false; - } else if (sourceMappingActive) { - map.addMapping({ - source: original.source, - original: { - line: original.line, - column: original.column - }, - generated: { - line: generated.line, - column: generated.column - }, - name: original.name - }); - } - } else { - generated.column++; - } - } - }); - this.walkSourceContents(function (sourceFile, sourceContent) { - map.setSourceContent(sourceFile, sourceContent); - }); - - return { code: generated.code, map: map }; - }; - - exports.SourceNode = SourceNode; - -/***/ } -/******/ ]) +/***/ (function(module, exports, __webpack_require__) { + +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const { + getOriginalURLs, + getGeneratedLocation, + getAllGeneratedLocations, + getOriginalLocation, + getOriginalSourceText, + getLocationScopes, + hasMappedSource, + applySourceMap +} = __webpack_require__(17); + +const { clearSourceMaps } = __webpack_require__(9); + +const { workerUtils: { workerHandler } } = __webpack_require__(1); + +// The interface is implemented in source-map to be +// easier to unit test. +self.onmessage = workerHandler({ + getOriginalURLs, + getGeneratedLocation, + getAllGeneratedLocations, + getOriginalLocation, + getLocationScopes, + getOriginalSourceText, + hasMappedSource, + applySourceMap, + clearSourceMaps }); -; \ No newline at end of file + +/***/ }), +/* 17 */ +/***/ (function(module, exports, __webpack_require__) { + +let getOriginalURLs = (() => { + var _ref = _asyncToGenerator(function* (generatedSource) { + const map = yield fetchSourceMap(generatedSource); + return map && map.sources; + }); + + return function getOriginalURLs(_x) { + return _ref.apply(this, arguments); + }; +})(); + +let getGeneratedLocation = (() => { + var _ref2 = _asyncToGenerator(function* (location, originalSource) { + if (!isOriginalId(location.sourceId)) { + return location; + } + + const generatedSourceId = originalToGeneratedId(location.sourceId); + const map = yield getSourceMap(generatedSourceId); + if (!map) { + return location; + } + + const { line, column } = map.generatedPositionFor({ + source: originalSource.url, + line: location.line, + column: location.column == null ? 0 : location.column, + bias: SourceMapConsumer.LEAST_UPPER_BOUND + }); + + return { + sourceId: generatedSourceId, + line, + column + }; + }); + + return function getGeneratedLocation(_x2, _x3) { + return _ref2.apply(this, arguments); + }; +})(); + +let getAllGeneratedLocations = (() => { + var _ref3 = _asyncToGenerator(function* (location, originalSource) { + if (!isOriginalId(location.sourceId)) { + return []; + } + + const generatedSourceId = originalToGeneratedId(location.sourceId); + const map = yield getSourceMap(generatedSourceId); + if (!map) { + return []; + } + + const positions = map.allGeneratedPositionsFor({ + source: originalSource.url, + line: location.line, + column: location.column == null ? 0 : location.column, + bias: SourceMapConsumer.LEAST_UPPER_BOUND + }); + + return positions.map(function ({ line, column }) { + return { + sourceId: generatedSourceId, + line, + column + }; + }); + }); + + return function getAllGeneratedLocations(_x4, _x5) { + return _ref3.apply(this, arguments); + }; +})(); + +let getOriginalLocation = (() => { + var _ref4 = _asyncToGenerator(function* (location) { + if (!isGeneratedId(location.sourceId)) { + return location; + } + + const map = yield getSourceMap(location.sourceId); + if (!map) { + return location; + } + + const { source: sourceUrl, line, column } = map.originalPositionFor({ + line: location.line, + column: location.column == null ? 0 : location.column + }); + + if (sourceUrl == null) { + // No url means the location didn't map. + return location; + } + + return { + sourceId: generatedToOriginalId(location.sourceId, sourceUrl), + sourceUrl, + line, + column + }; + }); + + return function getOriginalLocation(_x6) { + return _ref4.apply(this, arguments); + }; +})(); + +let getOriginalSourceText = (() => { + var _ref5 = _asyncToGenerator(function* (originalSource) { + assert(isOriginalId(originalSource.id), "Source is not an original source"); + + const generatedSourceId = originalToGeneratedId(originalSource.id); + const map = yield getSourceMap(generatedSourceId); + if (!map) { + return null; + } + + let text = map.sourceContentFor(originalSource.url); + if (!text) { + text = (yield networkRequest(originalSource.url, { loadFromCache: false })).content; + } + + return { + text, + contentType: getContentType(originalSource.url || "") + }; + }); + + return function getOriginalSourceText(_x7) { + return _ref5.apply(this, arguments); + }; +})(); + +let hasMappedSource = (() => { + var _ref6 = _asyncToGenerator(function* (location) { + if (isOriginalId(location.sourceId)) { + return true; + } + + const loc = yield getOriginalLocation(location); + return loc.sourceId !== location.sourceId; + }); + + return function hasMappedSource(_x8) { + return _ref6.apply(this, arguments); + }; +})(); + +function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; } + +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Source Map Worker + * @module utils/source-map-worker + */ + +const { networkRequest } = __webpack_require__(1); +const { SourceMapConsumer, SourceMapGenerator } = __webpack_require__(10); + +const assert = __webpack_require__(24); +const { fetchSourceMap } = __webpack_require__(25); +const { + getSourceMap, + setSourceMap, + clearSourceMaps +} = __webpack_require__(9); +const { + originalToGeneratedId, + generatedToOriginalId, + isGeneratedId, + isOriginalId, + getContentType +} = __webpack_require__(3); + +const { WasmRemap } = __webpack_require__(14); + +function applySourceMap(generatedId, url, code, mappings) { + const generator = new SourceMapGenerator({ file: url }); + mappings.forEach(mapping => generator.addMapping(mapping)); + generator.setSourceContent(url, code); + + const map = SourceMapConsumer(generator.toJSON()); + setSourceMap(generatedId, Promise.resolve(map)); +} + +module.exports = { + getOriginalURLs, + getGeneratedLocation, + getAllGeneratedLocations, + getOriginalLocation, + getOriginalSourceText, + applySourceMap, + clearSourceMaps, + hasMappedSource +}; + +/***/ }), +/* 18 */ +/***/ (function(module, exports) { + +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + */ + +var intToCharMap = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'.split(''); + +/** + * Encode an integer in the range of 0 to 63 to a single base 64 digit. + */ +exports.encode = function (number) { + if (0 <= number && number < intToCharMap.length) { + return intToCharMap[number]; + } + throw new TypeError("Must be between 0 and 63: " + number); +}; + +/** + * Decode a single base 64 character code digit to an integer. Returns -1 on + * failure. + */ +exports.decode = function (charCode) { + var bigA = 65; // 'A' + var bigZ = 90; // 'Z' + + var littleA = 97; // 'a' + var littleZ = 122; // 'z' + + var zero = 48; // '0' + var nine = 57; // '9' + + var plus = 43; // '+' + var slash = 47; // '/' + + var littleOffset = 26; + var numberOffset = 52; + + // 0 - 25: ABCDEFGHIJKLMNOPQRSTUVWXYZ + if (bigA <= charCode && charCode <= bigZ) { + return (charCode - bigA); + } + + // 26 - 51: abcdefghijklmnopqrstuvwxyz + if (littleA <= charCode && charCode <= littleZ) { + return (charCode - littleA + littleOffset); + } + + // 52 - 61: 0123456789 + if (zero <= charCode && charCode <= nine) { + return (charCode - zero + numberOffset); + } + + // 62: + + if (charCode == plus) { + return 62; + } + + // 63: / + if (charCode == slash) { + return 63; + } + + // Invalid base64 digit. + return -1; +}; + + +/***/ }), +/* 19 */ +/***/ (function(module, exports, __webpack_require__) { + +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2014 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + */ + +var util = __webpack_require__(2); + +/** + * Determine whether mappingB is after mappingA with respect to generated + * position. + */ +function generatedPositionAfter(mappingA, mappingB) { + // Optimized for most common case + var lineA = mappingA.generatedLine; + var lineB = mappingB.generatedLine; + var columnA = mappingA.generatedColumn; + var columnB = mappingB.generatedColumn; + return lineB > lineA || lineB == lineA && columnB >= columnA || + util.compareByGeneratedPositionsInflated(mappingA, mappingB) <= 0; +} + +/** + * A data structure to provide a sorted view of accumulated mappings in a + * performance conscious manner. It trades a neglibable overhead in general + * case for a large speedup in case of mappings being added in order. + */ +function MappingList() { + this._array = []; + this._sorted = true; + // Serves as infimum + this._last = {generatedLine: -1, generatedColumn: 0}; +} + +/** + * Iterate through internal items. This method takes the same arguments that + * `Array.prototype.forEach` takes. + * + * NOTE: The order of the mappings is NOT guaranteed. + */ +MappingList.prototype.unsortedForEach = + function MappingList_forEach(aCallback, aThisArg) { + this._array.forEach(aCallback, aThisArg); + }; + +/** + * Add the given source mapping. + * + * @param Object aMapping + */ +MappingList.prototype.add = function MappingList_add(aMapping) { + if (generatedPositionAfter(this._last, aMapping)) { + this._last = aMapping; + this._array.push(aMapping); + } else { + this._sorted = false; + this._array.push(aMapping); + } +}; + +/** + * Returns the flat, sorted array of mappings. The mappings are sorted by + * generated position. + * + * WARNING: This method returns internal data without copying, for + * performance. The return value must NOT be mutated, and should be treated as + * an immutable borrow. If you want to take ownership, you must make your own + * copy. + */ +MappingList.prototype.toArray = function MappingList_toArray() { + if (!this._sorted) { + this._array.sort(util.compareByGeneratedPositionsInflated); + this._sorted = true; + } + return this._array; +}; + +exports.MappingList = MappingList; + + +/***/ }), +/* 20 */ +/***/ (function(module, exports, __webpack_require__) { + +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + */ + +var util = __webpack_require__(2); +var binarySearch = __webpack_require__(21); +var ArraySet = __webpack_require__(13).ArraySet; +var base64VLQ = __webpack_require__(12); +var quickSort = __webpack_require__(22).quickSort; + +function SourceMapConsumer(aSourceMap, aSourceMapURL) { + var sourceMap = aSourceMap; + if (typeof aSourceMap === 'string') { + sourceMap = util.parseSourceMapInput(aSourceMap); + } + + return sourceMap.sections != null + ? new IndexedSourceMapConsumer(sourceMap, aSourceMapURL) + : new BasicSourceMapConsumer(sourceMap, aSourceMapURL); +} + +SourceMapConsumer.fromSourceMap = function(aSourceMap, aSourceMapURL) { + return BasicSourceMapConsumer.fromSourceMap(aSourceMap, aSourceMapURL); +} + +/** + * The version of the source mapping spec that we are consuming. + */ +SourceMapConsumer.prototype._version = 3; + +// `__generatedMappings` and `__originalMappings` are arrays that hold the +// parsed mapping coordinates from the source map's "mappings" attribute. They +// are lazily instantiated, accessed via the `_generatedMappings` and +// `_originalMappings` getters respectively, and we only parse the mappings +// and create these arrays once queried for a source location. We jump through +// these hoops because there can be many thousands of mappings, and parsing +// them is expensive, so we only want to do it if we must. +// +// Each object in the arrays is of the form: +// +// { +// generatedLine: The line number in the generated code, +// generatedColumn: The column number in the generated code, +// source: The path to the original source file that generated this +// chunk of code, +// originalLine: The line number in the original source that +// corresponds to this chunk of generated code, +// originalColumn: The column number in the original source that +// corresponds to this chunk of generated code, +// name: The name of the original symbol which generated this chunk of +// code. +// } +// +// All properties except for `generatedLine` and `generatedColumn` can be +// `null`. +// +// `_generatedMappings` is ordered by the generated positions. +// +// `_originalMappings` is ordered by the original positions. + +SourceMapConsumer.prototype.__generatedMappings = null; +Object.defineProperty(SourceMapConsumer.prototype, '_generatedMappings', { + configurable: true, + enumerable: true, + get: function () { + if (!this.__generatedMappings) { + this._parseMappings(this._mappings, this.sourceRoot); + } + + return this.__generatedMappings; + } +}); + +SourceMapConsumer.prototype.__originalMappings = null; +Object.defineProperty(SourceMapConsumer.prototype, '_originalMappings', { + configurable: true, + enumerable: true, + get: function () { + if (!this.__originalMappings) { + this._parseMappings(this._mappings, this.sourceRoot); + } + + return this.__originalMappings; + } +}); + +SourceMapConsumer.prototype._charIsMappingSeparator = + function SourceMapConsumer_charIsMappingSeparator(aStr, index) { + var c = aStr.charAt(index); + return c === ";" || c === ","; + }; + +/** + * Parse the mappings in a string in to a data structure which we can easily + * query (the ordered arrays in the `this.__generatedMappings` and + * `this.__originalMappings` properties). + */ +SourceMapConsumer.prototype._parseMappings = + function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { + throw new Error("Subclasses must implement _parseMappings"); + }; + +SourceMapConsumer.GENERATED_ORDER = 1; +SourceMapConsumer.ORIGINAL_ORDER = 2; + +SourceMapConsumer.GREATEST_LOWER_BOUND = 1; +SourceMapConsumer.LEAST_UPPER_BOUND = 2; + +/** + * Iterate over each mapping between an original source/line/column and a + * generated line/column in this source map. + * + * @param Function aCallback + * The function that is called with each mapping. + * @param Object aContext + * Optional. If specified, this object will be the value of `this` every + * time that `aCallback` is called. + * @param aOrder + * Either `SourceMapConsumer.GENERATED_ORDER` or + * `SourceMapConsumer.ORIGINAL_ORDER`. Specifies whether you want to + * iterate over the mappings sorted by the generated file's line/column + * order or the original's source/line/column order, respectively. Defaults to + * `SourceMapConsumer.GENERATED_ORDER`. + */ +SourceMapConsumer.prototype.eachMapping = + function SourceMapConsumer_eachMapping(aCallback, aContext, aOrder) { + var context = aContext || null; + var order = aOrder || SourceMapConsumer.GENERATED_ORDER; + + var mappings; + switch (order) { + case SourceMapConsumer.GENERATED_ORDER: + mappings = this._generatedMappings; + break; + case SourceMapConsumer.ORIGINAL_ORDER: + mappings = this._originalMappings; + break; + default: + throw new Error("Unknown order of iteration."); + } + + var sourceRoot = this.sourceRoot; + mappings.map(function (mapping) { + var source = mapping.source === null ? null : this._sources.at(mapping.source); + source = util.computeSourceURL(sourceRoot, source, this._sourceMapURL); + return { + source: source, + generatedLine: mapping.generatedLine, + generatedColumn: mapping.generatedColumn, + originalLine: mapping.originalLine, + originalColumn: mapping.originalColumn, + name: mapping.name === null ? null : this._names.at(mapping.name) + }; + }, this).forEach(aCallback, context); + }; + +/** + * Returns all generated line and column information for the original source, + * line, and column provided. If no column is provided, returns all mappings + * corresponding to a either the line we are searching for or the next + * closest line that has any mappings. Otherwise, returns all mappings + * corresponding to the given line and either the column we are searching for + * or the next closest column that has any offsets. + * + * The only argument is an object with the following properties: + * + * - source: The filename of the original source. + * - line: The line number in the original source. The line number is 1-based. + * - column: Optional. the column number in the original source. + * The column number is 0-based. + * + * and an array of objects is returned, each with the following properties: + * + * - line: The line number in the generated source, or null. The + * line number is 1-based. + * - column: The column number in the generated source, or null. + * The column number is 0-based. + */ +SourceMapConsumer.prototype.allGeneratedPositionsFor = + function SourceMapConsumer_allGeneratedPositionsFor(aArgs) { + var line = util.getArg(aArgs, 'line'); + + // When there is no exact match, BasicSourceMapConsumer.prototype._findMapping + // returns the index of the closest mapping less than the needle. By + // setting needle.originalColumn to 0, we thus find the last mapping for + // the given line, provided such a mapping exists. + var needle = { + source: util.getArg(aArgs, 'source'), + originalLine: line, + originalColumn: util.getArg(aArgs, 'column', 0) + }; + + needle.source = this._findSourceIndex(needle.source); + if (needle.source < 0) { + return []; + } + + var mappings = []; + + var index = this._findMapping(needle, + this._originalMappings, + "originalLine", + "originalColumn", + util.compareByOriginalPositions, + binarySearch.LEAST_UPPER_BOUND); + if (index >= 0) { + var mapping = this._originalMappings[index]; + + if (aArgs.column === undefined) { + var originalLine = mapping.originalLine; + + // Iterate until either we run out of mappings, or we run into + // a mapping for a different line than the one we found. Since + // mappings are sorted, this is guaranteed to find all mappings for + // the line we found. + while (mapping && mapping.originalLine === originalLine) { + mappings.push({ + line: util.getArg(mapping, 'generatedLine', null), + column: util.getArg(mapping, 'generatedColumn', null), + lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) + }); + + mapping = this._originalMappings[++index]; + } + } else { + var originalColumn = mapping.originalColumn; + + // Iterate until either we run out of mappings, or we run into + // a mapping for a different line than the one we were searching for. + // Since mappings are sorted, this is guaranteed to find all mappings for + // the line we are searching for. + while (mapping && + mapping.originalLine === line && + mapping.originalColumn == originalColumn) { + mappings.push({ + line: util.getArg(mapping, 'generatedLine', null), + column: util.getArg(mapping, 'generatedColumn', null), + lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) + }); + + mapping = this._originalMappings[++index]; + } + } + } + + return mappings; + }; + +exports.SourceMapConsumer = SourceMapConsumer; + +/** + * A BasicSourceMapConsumer instance represents a parsed source map which we can + * query for information about the original file positions by giving it a file + * position in the generated source. + * + * The first parameter is the raw source map (either as a JSON string, or + * already parsed to an object). According to the spec, source maps have the + * following attributes: + * + * - version: Which version of the source map spec this map is following. + * - sources: An array of URLs to the original source files. + * - names: An array of identifiers which can be referrenced by individual mappings. + * - sourceRoot: Optional. The URL root from which all sources are relative. + * - sourcesContent: Optional. An array of contents of the original source files. + * - mappings: A string of base64 VLQs which contain the actual mappings. + * - file: Optional. The generated file this source map is associated with. + * + * Here is an example source map, taken from the source map spec[0]: + * + * { + * version : 3, + * file: "out.js", + * sourceRoot : "", + * sources: ["foo.js", "bar.js"], + * names: ["src", "maps", "are", "fun"], + * mappings: "AA,AB;;ABCDE;" + * } + * + * The second parameter, if given, is a string whose value is the URL + * at which the source map was found. This URL is used to compute the + * sources array. + * + * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1# + */ +function BasicSourceMapConsumer(aSourceMap, aSourceMapURL) { + var sourceMap = aSourceMap; + if (typeof aSourceMap === 'string') { + sourceMap = util.parseSourceMapInput(aSourceMap); + } + + var version = util.getArg(sourceMap, 'version'); + var sources = util.getArg(sourceMap, 'sources'); + // Sass 3.3 leaves out the 'names' array, so we deviate from the spec (which + // requires the array) to play nice here. + var names = util.getArg(sourceMap, 'names', []); + var sourceRoot = util.getArg(sourceMap, 'sourceRoot', null); + var sourcesContent = util.getArg(sourceMap, 'sourcesContent', null); + var mappings = util.getArg(sourceMap, 'mappings'); + var file = util.getArg(sourceMap, 'file', null); + + // Once again, Sass deviates from the spec and supplies the version as a + // string rather than a number, so we use loose equality checking here. + if (version != this._version) { + throw new Error('Unsupported version: ' + version); + } + + if (sourceRoot) { + sourceRoot = util.normalize(sourceRoot); + } + + sources = sources + .map(String) + // Some source maps produce relative source paths like "./foo.js" instead of + // "foo.js". Normalize these first so that future comparisons will succeed. + // See bugzil.la/1090768. + .map(util.normalize) + // Always ensure that absolute sources are internally stored relative to + // the source root, if the source root is absolute. Not doing this would + // be particularly problematic when the source root is a prefix of the + // source (valid, but why??). See github issue #199 and bugzil.la/1188982. + .map(function (source) { + return sourceRoot && util.isAbsolute(sourceRoot) && util.isAbsolute(source) + ? util.relative(sourceRoot, source) + : source; + }); + + // Pass `true` below to allow duplicate names and sources. While source maps + // are intended to be compressed and deduplicated, the TypeScript compiler + // sometimes generates source maps with duplicates in them. See Github issue + // #72 and bugzil.la/889492. + this._names = ArraySet.fromArray(names.map(String), true); + this._sources = ArraySet.fromArray(sources, true); + + this._absoluteSources = this._sources.toArray().map(function (s) { + return util.computeSourceURL(sourceRoot, s, aSourceMapURL); + }); + + this.sourceRoot = sourceRoot; + this.sourcesContent = sourcesContent; + this._mappings = mappings; + this._sourceMapURL = aSourceMapURL; + this.file = file; +} + +BasicSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); +BasicSourceMapConsumer.prototype.consumer = SourceMapConsumer; + +/** + * Utility function to find the index of a source. Returns -1 if not + * found. + */ +BasicSourceMapConsumer.prototype._findSourceIndex = function(aSource) { + var relativeSource = aSource; + if (this.sourceRoot != null) { + relativeSource = util.relative(this.sourceRoot, relativeSource); + } + + if (this._sources.has(relativeSource)) { + return this._sources.indexOf(relativeSource); + } + + // Maybe aSource is an absolute URL as returned by |sources|. In + // this case we can't simply undo the transform. + var i; + for (i = 0; i < this._absoluteSources.length; ++i) { + if (this._absoluteSources[i] == aSource) { + return i; + } + } + + return -1; +}; + +/** + * Create a BasicSourceMapConsumer from a SourceMapGenerator. + * + * @param SourceMapGenerator aSourceMap + * The source map that will be consumed. + * @param String aSourceMapURL + * The URL at which the source map can be found (optional) + * @returns BasicSourceMapConsumer + */ +BasicSourceMapConsumer.fromSourceMap = + function SourceMapConsumer_fromSourceMap(aSourceMap, aSourceMapURL) { + var smc = Object.create(BasicSourceMapConsumer.prototype); + + var names = smc._names = ArraySet.fromArray(aSourceMap._names.toArray(), true); + var sources = smc._sources = ArraySet.fromArray(aSourceMap._sources.toArray(), true); + smc.sourceRoot = aSourceMap._sourceRoot; + smc.sourcesContent = aSourceMap._generateSourcesContent(smc._sources.toArray(), + smc.sourceRoot); + smc.file = aSourceMap._file; + smc._sourceMapURL = aSourceMapURL; + smc._absoluteSources = smc._sources.toArray().map(function (s) { + return util.computeSourceURL(smc.sourceRoot, s, aSourceMapURL); + }); + + // Because we are modifying the entries (by converting string sources and + // names to indices into the sources and names ArraySets), we have to make + // a copy of the entry or else bad things happen. Shared mutable state + // strikes again! See github issue #191. + + var generatedMappings = aSourceMap._mappings.toArray().slice(); + var destGeneratedMappings = smc.__generatedMappings = []; + var destOriginalMappings = smc.__originalMappings = []; + + for (var i = 0, length = generatedMappings.length; i < length; i++) { + var srcMapping = generatedMappings[i]; + var destMapping = new Mapping; + destMapping.generatedLine = srcMapping.generatedLine; + destMapping.generatedColumn = srcMapping.generatedColumn; + + if (srcMapping.source) { + destMapping.source = sources.indexOf(srcMapping.source); + destMapping.originalLine = srcMapping.originalLine; + destMapping.originalColumn = srcMapping.originalColumn; + + if (srcMapping.name) { + destMapping.name = names.indexOf(srcMapping.name); + } + + destOriginalMappings.push(destMapping); + } + + destGeneratedMappings.push(destMapping); + } + + quickSort(smc.__originalMappings, util.compareByOriginalPositions); + + return smc; + }; + +/** + * The version of the source mapping spec that we are consuming. + */ +BasicSourceMapConsumer.prototype._version = 3; + +/** + * The list of original sources. + */ +Object.defineProperty(BasicSourceMapConsumer.prototype, 'sources', { + get: function () { + return this._absoluteSources.slice(); + } +}); + +/** + * Provide the JIT with a nice shape / hidden class. + */ +function Mapping() { + this.generatedLine = 0; + this.generatedColumn = 0; + this.source = null; + this.originalLine = null; + this.originalColumn = null; + this.name = null; +} + +/** + * Parse the mappings in a string in to a data structure which we can easily + * query (the ordered arrays in the `this.__generatedMappings` and + * `this.__originalMappings` properties). + */ +BasicSourceMapConsumer.prototype._parseMappings = + function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { + var generatedLine = 1; + var previousGeneratedColumn = 0; + var previousOriginalLine = 0; + var previousOriginalColumn = 0; + var previousSource = 0; + var previousName = 0; + var length = aStr.length; + var index = 0; + var cachedSegments = {}; + var temp = {}; + var originalMappings = []; + var generatedMappings = []; + var mapping, str, segment, end, value; + + while (index < length) { + if (aStr.charAt(index) === ';') { + generatedLine++; + index++; + previousGeneratedColumn = 0; + } + else if (aStr.charAt(index) === ',') { + index++; + } + else { + mapping = new Mapping(); + mapping.generatedLine = generatedLine; + + // Because each offset is encoded relative to the previous one, + // many segments often have the same encoding. We can exploit this + // fact by caching the parsed variable length fields of each segment, + // allowing us to avoid a second parse if we encounter the same + // segment again. + for (end = index; end < length; end++) { + if (this._charIsMappingSeparator(aStr, end)) { + break; + } + } + str = aStr.slice(index, end); + + segment = cachedSegments[str]; + if (segment) { + index += str.length; + } else { + segment = []; + while (index < end) { + base64VLQ.decode(aStr, index, temp); + value = temp.value; + index = temp.rest; + segment.push(value); + } + + if (segment.length === 2) { + throw new Error('Found a source, but no line and column'); + } + + if (segment.length === 3) { + throw new Error('Found a source and line, but no column'); + } + + cachedSegments[str] = segment; + } + + // Generated column. + mapping.generatedColumn = previousGeneratedColumn + segment[0]; + previousGeneratedColumn = mapping.generatedColumn; + + if (segment.length > 1) { + // Original source. + mapping.source = previousSource + segment[1]; + previousSource += segment[1]; + + // Original line. + mapping.originalLine = previousOriginalLine + segment[2]; + previousOriginalLine = mapping.originalLine; + // Lines are stored 0-based + mapping.originalLine += 1; + + // Original column. + mapping.originalColumn = previousOriginalColumn + segment[3]; + previousOriginalColumn = mapping.originalColumn; + + if (segment.length > 4) { + // Original name. + mapping.name = previousName + segment[4]; + previousName += segment[4]; + } + } + + generatedMappings.push(mapping); + if (typeof mapping.originalLine === 'number') { + originalMappings.push(mapping); + } + } + } + + quickSort(generatedMappings, util.compareByGeneratedPositionsDeflated); + this.__generatedMappings = generatedMappings; + + quickSort(originalMappings, util.compareByOriginalPositions); + this.__originalMappings = originalMappings; + }; + +/** + * Find the mapping that best matches the hypothetical "needle" mapping that + * we are searching for in the given "haystack" of mappings. + */ +BasicSourceMapConsumer.prototype._findMapping = + function SourceMapConsumer_findMapping(aNeedle, aMappings, aLineName, + aColumnName, aComparator, aBias) { + // To return the position we are searching for, we must first find the + // mapping for the given position and then return the opposite position it + // points to. Because the mappings are sorted, we can use binary search to + // find the best mapping. + + if (aNeedle[aLineName] <= 0) { + throw new TypeError('Line must be greater than or equal to 1, got ' + + aNeedle[aLineName]); + } + if (aNeedle[aColumnName] < 0) { + throw new TypeError('Column must be greater than or equal to 0, got ' + + aNeedle[aColumnName]); + } + + return binarySearch.search(aNeedle, aMappings, aComparator, aBias); + }; + +/** + * Compute the last column for each generated mapping. The last column is + * inclusive. + */ +BasicSourceMapConsumer.prototype.computeColumnSpans = + function SourceMapConsumer_computeColumnSpans() { + for (var index = 0; index < this._generatedMappings.length; ++index) { + var mapping = this._generatedMappings[index]; + + // Mappings do not contain a field for the last generated columnt. We + // can come up with an optimistic estimate, however, by assuming that + // mappings are contiguous (i.e. given two consecutive mappings, the + // first mapping ends where the second one starts). + if (index + 1 < this._generatedMappings.length) { + var nextMapping = this._generatedMappings[index + 1]; + + if (mapping.generatedLine === nextMapping.generatedLine) { + mapping.lastGeneratedColumn = nextMapping.generatedColumn - 1; + continue; + } + } + + // The last mapping for each line spans the entire line. + mapping.lastGeneratedColumn = Infinity; + } + }; + +/** + * Returns the original source, line, and column information for the generated + * source's line and column positions provided. The only argument is an object + * with the following properties: + * + * - line: The line number in the generated source. The line number + * is 1-based. + * - column: The column number in the generated source. The column + * number is 0-based. + * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or + * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the + * closest element that is smaller than or greater than the one we are + * searching for, respectively, if the exact element cannot be found. + * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. + * + * and an object is returned with the following properties: + * + * - source: The original source file, or null. + * - line: The line number in the original source, or null. The + * line number is 1-based. + * - column: The column number in the original source, or null. The + * column number is 0-based. + * - name: The original identifier, or null. + */ +BasicSourceMapConsumer.prototype.originalPositionFor = + function SourceMapConsumer_originalPositionFor(aArgs) { + var needle = { + generatedLine: util.getArg(aArgs, 'line'), + generatedColumn: util.getArg(aArgs, 'column') + }; + + var index = this._findMapping( + needle, + this._generatedMappings, + "generatedLine", + "generatedColumn", + util.compareByGeneratedPositionsDeflated, + util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND) + ); + + if (index >= 0) { + var mapping = this._generatedMappings[index]; + + if (mapping.generatedLine === needle.generatedLine) { + var source = util.getArg(mapping, 'source', null); + if (source !== null) { + source = this._sources.at(source); + source = util.computeSourceURL(this.sourceRoot, source, this._sourceMapURL); + } + var name = util.getArg(mapping, 'name', null); + if (name !== null) { + name = this._names.at(name); + } + return { + source: source, + line: util.getArg(mapping, 'originalLine', null), + column: util.getArg(mapping, 'originalColumn', null), + name: name + }; + } + } + + return { + source: null, + line: null, + column: null, + name: null + }; + }; + +/** + * Return true if we have the source content for every source in the source + * map, false otherwise. + */ +BasicSourceMapConsumer.prototype.hasContentsOfAllSources = + function BasicSourceMapConsumer_hasContentsOfAllSources() { + if (!this.sourcesContent) { + return false; + } + return this.sourcesContent.length >= this._sources.size() && + !this.sourcesContent.some(function (sc) { return sc == null; }); + }; + +/** + * Returns the original source content. The only argument is the url of the + * original source file. Returns null if no original source content is + * available. + */ +BasicSourceMapConsumer.prototype.sourceContentFor = + function SourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { + if (!this.sourcesContent) { + return null; + } + + var index = this._findSourceIndex(aSource); + if (index >= 0) { + return this.sourcesContent[index]; + } + + var relativeSource = aSource; + if (this.sourceRoot != null) { + relativeSource = util.relative(this.sourceRoot, relativeSource); + } + + var url; + if (this.sourceRoot != null + && (url = util.urlParse(this.sourceRoot))) { + // XXX: file:// URIs and absolute paths lead to unexpected behavior for + // many users. We can help them out when they expect file:// URIs to + // behave like it would if they were running a local HTTP server. See + // https://bugzilla.mozilla.org/show_bug.cgi?id=885597. + var fileUriAbsPath = relativeSource.replace(/^file:\/\//, ""); + if (url.scheme == "file" + && this._sources.has(fileUriAbsPath)) { + return this.sourcesContent[this._sources.indexOf(fileUriAbsPath)] + } + + if ((!url.path || url.path == "/") + && this._sources.has("/" + relativeSource)) { + return this.sourcesContent[this._sources.indexOf("/" + relativeSource)]; + } + } + + // This function is used recursively from + // IndexedSourceMapConsumer.prototype.sourceContentFor. In that case, we + // don't want to throw if we can't find the source - we just want to + // return null, so we provide a flag to exit gracefully. + if (nullOnMissing) { + return null; + } + else { + throw new Error('"' + relativeSource + '" is not in the SourceMap.'); + } + }; + +/** + * Returns the generated line and column information for the original source, + * line, and column positions provided. The only argument is an object with + * the following properties: + * + * - source: The filename of the original source. + * - line: The line number in the original source. The line number + * is 1-based. + * - column: The column number in the original source. The column + * number is 0-based. + * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or + * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the + * closest element that is smaller than or greater than the one we are + * searching for, respectively, if the exact element cannot be found. + * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. + * + * and an object is returned with the following properties: + * + * - line: The line number in the generated source, or null. The + * line number is 1-based. + * - column: The column number in the generated source, or null. + * The column number is 0-based. + */ +BasicSourceMapConsumer.prototype.generatedPositionFor = + function SourceMapConsumer_generatedPositionFor(aArgs) { + var source = util.getArg(aArgs, 'source'); + source = this._findSourceIndex(source); + if (source < 0) { + return { + line: null, + column: null, + lastColumn: null + }; + } + + var needle = { + source: source, + originalLine: util.getArg(aArgs, 'line'), + originalColumn: util.getArg(aArgs, 'column') + }; + + var index = this._findMapping( + needle, + this._originalMappings, + "originalLine", + "originalColumn", + util.compareByOriginalPositions, + util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND) + ); + + if (index >= 0) { + var mapping = this._originalMappings[index]; + + if (mapping.source === needle.source) { + return { + line: util.getArg(mapping, 'generatedLine', null), + column: util.getArg(mapping, 'generatedColumn', null), + lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) + }; + } + } + + return { + line: null, + column: null, + lastColumn: null + }; + }; + +exports.BasicSourceMapConsumer = BasicSourceMapConsumer; + +/** + * An IndexedSourceMapConsumer instance represents a parsed source map which + * we can query for information. It differs from BasicSourceMapConsumer in + * that it takes "indexed" source maps (i.e. ones with a "sections" field) as + * input. + * + * The first parameter is a raw source map (either as a JSON string, or already + * parsed to an object). According to the spec for indexed source maps, they + * have the following attributes: + * + * - version: Which version of the source map spec this map is following. + * - file: Optional. The generated file this source map is associated with. + * - sections: A list of section definitions. + * + * Each value under the "sections" field has two fields: + * - offset: The offset into the original specified at which this section + * begins to apply, defined as an object with a "line" and "column" + * field. + * - map: A source map definition. This source map could also be indexed, + * but doesn't have to be. + * + * Instead of the "map" field, it's also possible to have a "url" field + * specifying a URL to retrieve a source map from, but that's currently + * unsupported. + * + * Here's an example source map, taken from the source map spec[0], but + * modified to omit a section which uses the "url" field. + * + * { + * version : 3, + * file: "app.js", + * sections: [{ + * offset: {line:100, column:10}, + * map: { + * version : 3, + * file: "section.js", + * sources: ["foo.js", "bar.js"], + * names: ["src", "maps", "are", "fun"], + * mappings: "AAAA,E;;ABCDE;" + * } + * }], + * } + * + * The second parameter, if given, is a string whose value is the URL + * at which the source map was found. This URL is used to compute the + * sources array. + * + * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.535es3xeprgt + */ +function IndexedSourceMapConsumer(aSourceMap, aSourceMapURL) { + var sourceMap = aSourceMap; + if (typeof aSourceMap === 'string') { + sourceMap = util.parseSourceMapInput(aSourceMap); + } + + var version = util.getArg(sourceMap, 'version'); + var sections = util.getArg(sourceMap, 'sections'); + + if (version != this._version) { + throw new Error('Unsupported version: ' + version); + } + + this._sources = new ArraySet(); + this._names = new ArraySet(); + + var lastOffset = { + line: -1, + column: 0 + }; + this._sections = sections.map(function (s) { + if (s.url) { + // The url field will require support for asynchronicity. + // See https://github.com/mozilla/source-map/issues/16 + throw new Error('Support for url field in sections not implemented.'); + } + var offset = util.getArg(s, 'offset'); + var offsetLine = util.getArg(offset, 'line'); + var offsetColumn = util.getArg(offset, 'column'); + + if (offsetLine < lastOffset.line || + (offsetLine === lastOffset.line && offsetColumn < lastOffset.column)) { + throw new Error('Section offsets must be ordered and non-overlapping.'); + } + lastOffset = offset; + + return { + generatedOffset: { + // The offset fields are 0-based, but we use 1-based indices when + // encoding/decoding from VLQ. + generatedLine: offsetLine + 1, + generatedColumn: offsetColumn + 1 + }, + consumer: new SourceMapConsumer(util.getArg(s, 'map'), aSourceMapURL) + } + }); +} + +IndexedSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); +IndexedSourceMapConsumer.prototype.constructor = SourceMapConsumer; + +/** + * The version of the source mapping spec that we are consuming. + */ +IndexedSourceMapConsumer.prototype._version = 3; + +/** + * The list of original sources. + */ +Object.defineProperty(IndexedSourceMapConsumer.prototype, 'sources', { + get: function () { + var sources = []; + for (var i = 0; i < this._sections.length; i++) { + for (var j = 0; j < this._sections[i].consumer.sources.length; j++) { + sources.push(this._sections[i].consumer.sources[j]); + } + } + return sources; + } +}); + +/** + * Returns the original source, line, and column information for the generated + * source's line and column positions provided. The only argument is an object + * with the following properties: + * + * - line: The line number in the generated source. The line number + * is 1-based. + * - column: The column number in the generated source. The column + * number is 0-based. + * + * and an object is returned with the following properties: + * + * - source: The original source file, or null. + * - line: The line number in the original source, or null. The + * line number is 1-based. + * - column: The column number in the original source, or null. The + * column number is 0-based. + * - name: The original identifier, or null. + */ +IndexedSourceMapConsumer.prototype.originalPositionFor = + function IndexedSourceMapConsumer_originalPositionFor(aArgs) { + var needle = { + generatedLine: util.getArg(aArgs, 'line'), + generatedColumn: util.getArg(aArgs, 'column') + }; + + // Find the section containing the generated position we're trying to map + // to an original position. + var sectionIndex = binarySearch.search(needle, this._sections, + function(needle, section) { + var cmp = needle.generatedLine - section.generatedOffset.generatedLine; + if (cmp) { + return cmp; + } + + return (needle.generatedColumn - + section.generatedOffset.generatedColumn); + }); + var section = this._sections[sectionIndex]; + + if (!section) { + return { + source: null, + line: null, + column: null, + name: null + }; + } + + return section.consumer.originalPositionFor({ + line: needle.generatedLine - + (section.generatedOffset.generatedLine - 1), + column: needle.generatedColumn - + (section.generatedOffset.generatedLine === needle.generatedLine + ? section.generatedOffset.generatedColumn - 1 + : 0), + bias: aArgs.bias + }); + }; + +/** + * Return true if we have the source content for every source in the source + * map, false otherwise. + */ +IndexedSourceMapConsumer.prototype.hasContentsOfAllSources = + function IndexedSourceMapConsumer_hasContentsOfAllSources() { + return this._sections.every(function (s) { + return s.consumer.hasContentsOfAllSources(); + }); + }; + +/** + * Returns the original source content. The only argument is the url of the + * original source file. Returns null if no original source content is + * available. + */ +IndexedSourceMapConsumer.prototype.sourceContentFor = + function IndexedSourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { + for (var i = 0; i < this._sections.length; i++) { + var section = this._sections[i]; + + var content = section.consumer.sourceContentFor(aSource, true); + if (content) { + return content; + } + } + if (nullOnMissing) { + return null; + } + else { + throw new Error('"' + aSource + '" is not in the SourceMap.'); + } + }; + +/** + * Returns the generated line and column information for the original source, + * line, and column positions provided. The only argument is an object with + * the following properties: + * + * - source: The filename of the original source. + * - line: The line number in the original source. The line number + * is 1-based. + * - column: The column number in the original source. The column + * number is 0-based. + * + * and an object is returned with the following properties: + * + * - line: The line number in the generated source, or null. The + * line number is 1-based. + * - column: The column number in the generated source, or null. + * The column number is 0-based. + */ +IndexedSourceMapConsumer.prototype.generatedPositionFor = + function IndexedSourceMapConsumer_generatedPositionFor(aArgs) { + for (var i = 0; i < this._sections.length; i++) { + var section = this._sections[i]; + + // Only consider this section if the requested source is in the list of + // sources of the consumer. + if (section.consumer._findSourceIndex(util.getArg(aArgs, 'source')) === -1) { + continue; + } + var generatedPosition = section.consumer.generatedPositionFor(aArgs); + if (generatedPosition) { + var ret = { + line: generatedPosition.line + + (section.generatedOffset.generatedLine - 1), + column: generatedPosition.column + + (section.generatedOffset.generatedLine === generatedPosition.line + ? section.generatedOffset.generatedColumn - 1 + : 0) + }; + return ret; + } + } + + return { + line: null, + column: null + }; + }; + +/** + * Parse the mappings in a string in to a data structure which we can easily + * query (the ordered arrays in the `this.__generatedMappings` and + * `this.__originalMappings` properties). + */ +IndexedSourceMapConsumer.prototype._parseMappings = + function IndexedSourceMapConsumer_parseMappings(aStr, aSourceRoot) { + this.__generatedMappings = []; + this.__originalMappings = []; + for (var i = 0; i < this._sections.length; i++) { + var section = this._sections[i]; + var sectionMappings = section.consumer._generatedMappings; + for (var j = 0; j < sectionMappings.length; j++) { + var mapping = sectionMappings[j]; + + var source = section.consumer._sources.at(mapping.source); + source = util.computeSourceURL(section.consumer.sourceRoot, source, this._sourceMapURL); + this._sources.add(source); + source = this._sources.indexOf(source); + + var name = null; + if (mapping.name) { + name = section.consumer._names.at(mapping.name); + this._names.add(name); + name = this._names.indexOf(name); + } + + // The mappings coming from the consumer for the section have + // generated positions relative to the start of the section, so we + // need to offset them to be relative to the start of the concatenated + // generated file. + var adjustedMapping = { + source: source, + generatedLine: mapping.generatedLine + + (section.generatedOffset.generatedLine - 1), + generatedColumn: mapping.generatedColumn + + (section.generatedOffset.generatedLine === mapping.generatedLine + ? section.generatedOffset.generatedColumn - 1 + : 0), + originalLine: mapping.originalLine, + originalColumn: mapping.originalColumn, + name: name + }; + + this.__generatedMappings.push(adjustedMapping); + if (typeof adjustedMapping.originalLine === 'number') { + this.__originalMappings.push(adjustedMapping); + } + } + } + + quickSort(this.__generatedMappings, util.compareByGeneratedPositionsDeflated); + quickSort(this.__originalMappings, util.compareByOriginalPositions); + }; + +exports.IndexedSourceMapConsumer = IndexedSourceMapConsumer; + + +/***/ }), +/* 21 */ +/***/ (function(module, exports) { + +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + */ + +exports.GREATEST_LOWER_BOUND = 1; +exports.LEAST_UPPER_BOUND = 2; + +/** + * Recursive implementation of binary search. + * + * @param aLow Indices here and lower do not contain the needle. + * @param aHigh Indices here and higher do not contain the needle. + * @param aNeedle The element being searched for. + * @param aHaystack The non-empty array being searched. + * @param aCompare Function which takes two elements and returns -1, 0, or 1. + * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or + * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the + * closest element that is smaller than or greater than the one we are + * searching for, respectively, if the exact element cannot be found. + */ +function recursiveSearch(aLow, aHigh, aNeedle, aHaystack, aCompare, aBias) { + // This function terminates when one of the following is true: + // + // 1. We find the exact element we are looking for. + // + // 2. We did not find the exact element, but we can return the index of + // the next-closest element. + // + // 3. We did not find the exact element, and there is no next-closest + // element than the one we are searching for, so we return -1. + var mid = Math.floor((aHigh - aLow) / 2) + aLow; + var cmp = aCompare(aNeedle, aHaystack[mid], true); + if (cmp === 0) { + // Found the element we are looking for. + return mid; + } + else if (cmp > 0) { + // Our needle is greater than aHaystack[mid]. + if (aHigh - mid > 1) { + // The element is in the upper half. + return recursiveSearch(mid, aHigh, aNeedle, aHaystack, aCompare, aBias); + } + + // The exact needle element was not found in this haystack. Determine if + // we are in termination case (3) or (2) and return the appropriate thing. + if (aBias == exports.LEAST_UPPER_BOUND) { + return aHigh < aHaystack.length ? aHigh : -1; + } else { + return mid; + } + } + else { + // Our needle is less than aHaystack[mid]. + if (mid - aLow > 1) { + // The element is in the lower half. + return recursiveSearch(aLow, mid, aNeedle, aHaystack, aCompare, aBias); + } + + // we are in termination case (3) or (2) and return the appropriate thing. + if (aBias == exports.LEAST_UPPER_BOUND) { + return mid; + } else { + return aLow < 0 ? -1 : aLow; + } + } +} + +/** + * This is an implementation of binary search which will always try and return + * the index of the closest element if there is no exact hit. This is because + * mappings between original and generated line/col pairs are single points, + * and there is an implicit region between each of them, so a miss just means + * that you aren't on the very start of a region. + * + * @param aNeedle The element you are looking for. + * @param aHaystack The array that is being searched. + * @param aCompare A function which takes the needle and an element in the + * array and returns -1, 0, or 1 depending on whether the needle is less + * than, equal to, or greater than the element, respectively. + * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or + * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the + * closest element that is smaller than or greater than the one we are + * searching for, respectively, if the exact element cannot be found. + * Defaults to 'binarySearch.GREATEST_LOWER_BOUND'. + */ +exports.search = function search(aNeedle, aHaystack, aCompare, aBias) { + if (aHaystack.length === 0) { + return -1; + } + + var index = recursiveSearch(-1, aHaystack.length, aNeedle, aHaystack, + aCompare, aBias || exports.GREATEST_LOWER_BOUND); + if (index < 0) { + return -1; + } + + // We have found either the exact element, or the next-closest element than + // the one we are searching for. However, there may be more than one such + // element. Make sure we always return the smallest of these. + while (index - 1 >= 0) { + if (aCompare(aHaystack[index], aHaystack[index - 1], true) !== 0) { + break; + } + --index; + } + + return index; +}; + + +/***/ }), +/* 22 */ +/***/ (function(module, exports) { + +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + */ + +// It turns out that some (most?) JavaScript engines don't self-host +// `Array.prototype.sort`. This makes sense because C++ will likely remain +// faster than JS when doing raw CPU-intensive sorting. However, when using a +// custom comparator function, calling back and forth between the VM's C++ and +// JIT'd JS is rather slow *and* loses JIT type information, resulting in +// worse generated code for the comparator function than would be optimal. In +// fact, when sorting with a comparator, these costs outweigh the benefits of +// sorting in C++. By using our own JS-implemented Quick Sort (below), we get +// a ~3500ms mean speed-up in `bench/bench.html`. + +/** + * Swap the elements indexed by `x` and `y` in the array `ary`. + * + * @param {Array} ary + * The array. + * @param {Number} x + * The index of the first item. + * @param {Number} y + * The index of the second item. + */ +function swap(ary, x, y) { + var temp = ary[x]; + ary[x] = ary[y]; + ary[y] = temp; +} + +/** + * Returns a random integer within the range `low .. high` inclusive. + * + * @param {Number} low + * The lower bound on the range. + * @param {Number} high + * The upper bound on the range. + */ +function randomIntInRange(low, high) { + return Math.round(low + (Math.random() * (high - low))); +} + +/** + * The Quick Sort algorithm. + * + * @param {Array} ary + * An array to sort. + * @param {function} comparator + * Function to use to compare two items. + * @param {Number} p + * Start index of the array + * @param {Number} r + * End index of the array + */ +function doQuickSort(ary, comparator, p, r) { + // If our lower bound is less than our upper bound, we (1) partition the + // array into two pieces and (2) recurse on each half. If it is not, this is + // the empty array and our base case. + + if (p < r) { + // (1) Partitioning. + // + // The partitioning chooses a pivot between `p` and `r` and moves all + // elements that are less than or equal to the pivot to the before it, and + // all the elements that are greater than it after it. The effect is that + // once partition is done, the pivot is in the exact place it will be when + // the array is put in sorted order, and it will not need to be moved + // again. This runs in O(n) time. + + // Always choose a random pivot so that an input array which is reverse + // sorted does not cause O(n^2) running time. + var pivotIndex = randomIntInRange(p, r); + var i = p - 1; + + swap(ary, pivotIndex, r); + var pivot = ary[r]; + + // Immediately after `j` is incremented in this loop, the following hold + // true: + // + // * Every element in `ary[p .. i]` is less than or equal to the pivot. + // + // * Every element in `ary[i+1 .. j-1]` is greater than the pivot. + for (var j = p; j < r; j++) { + if (comparator(ary[j], pivot) <= 0) { + i += 1; + swap(ary, i, j); + } + } + + swap(ary, i + 1, j); + var q = i + 1; + + // (2) Recurse on each half. + + doQuickSort(ary, comparator, p, q - 1); + doQuickSort(ary, comparator, q + 1, r); + } +} + +/** + * Sort the given array in-place with the given comparator function. + * + * @param {Array} ary + * An array to sort. + * @param {function} comparator + * Function to use to compare two items. + */ +exports.quickSort = function (ary, comparator) { + doQuickSort(ary, comparator, 0, ary.length - 1); +}; + + +/***/ }), +/* 23 */ +/***/ (function(module, exports, __webpack_require__) { + +/* -*- Mode: js; js-indent-level: 2; -*- */ +/* + * Copyright 2011 Mozilla Foundation and contributors + * Licensed under the New BSD license. See LICENSE or: + * http://opensource.org/licenses/BSD-3-Clause + */ + +var SourceMapGenerator = __webpack_require__(11).SourceMapGenerator; +var util = __webpack_require__(2); + +// Matches a Windows-style `\r\n` newline or a `\n` newline used by all other +// operating systems these days (capturing the result). +var REGEX_NEWLINE = /(\r?\n)/; + +// Newline character code for charCodeAt() comparisons +var NEWLINE_CODE = 10; + +// Private symbol for identifying `SourceNode`s when multiple versions of +// the source-map library are loaded. This MUST NOT CHANGE across +// versions! +var isSourceNode = "$$$isSourceNode$$$"; + +/** + * SourceNodes provide a way to abstract over interpolating/concatenating + * snippets of generated JavaScript source code while maintaining the line and + * column information associated with the original source code. + * + * @param aLine The original line number. + * @param aColumn The original column number. + * @param aSource The original source's filename. + * @param aChunks Optional. An array of strings which are snippets of + * generated JS, or other SourceNodes. + * @param aName The original identifier. + */ +function SourceNode(aLine, aColumn, aSource, aChunks, aName) { + this.children = []; + this.sourceContents = {}; + this.line = aLine == null ? null : aLine; + this.column = aColumn == null ? null : aColumn; + this.source = aSource == null ? null : aSource; + this.name = aName == null ? null : aName; + this[isSourceNode] = true; + if (aChunks != null) this.add(aChunks); +} + +/** + * Creates a SourceNode from generated code and a SourceMapConsumer. + * + * @param aGeneratedCode The generated code + * @param aSourceMapConsumer The SourceMap for the generated code + * @param aRelativePath Optional. The path that relative sources in the + * SourceMapConsumer should be relative to. + */ +SourceNode.fromStringWithSourceMap = + function SourceNode_fromStringWithSourceMap(aGeneratedCode, aSourceMapConsumer, aRelativePath) { + // The SourceNode we want to fill with the generated code + // and the SourceMap + var node = new SourceNode(); + + // All even indices of this array are one line of the generated code, + // while all odd indices are the newlines between two adjacent lines + // (since `REGEX_NEWLINE` captures its match). + // Processed fragments are accessed by calling `shiftNextLine`. + var remainingLines = aGeneratedCode.split(REGEX_NEWLINE); + var remainingLinesIndex = 0; + var shiftNextLine = function() { + var lineContents = getNextLine(); + // The last line of a file might not have a newline. + var newLine = getNextLine() || ""; + return lineContents + newLine; + + function getNextLine() { + return remainingLinesIndex < remainingLines.length ? + remainingLines[remainingLinesIndex++] : undefined; + } + }; + + // We need to remember the position of "remainingLines" + var lastGeneratedLine = 1, lastGeneratedColumn = 0; + + // The generate SourceNodes we need a code range. + // To extract it current and last mapping is used. + // Here we store the last mapping. + var lastMapping = null; + + aSourceMapConsumer.eachMapping(function (mapping) { + if (lastMapping !== null) { + // We add the code from "lastMapping" to "mapping": + // First check if there is a new line in between. + if (lastGeneratedLine < mapping.generatedLine) { + // Associate first line with "lastMapping" + addMappingWithCode(lastMapping, shiftNextLine()); + lastGeneratedLine++; + lastGeneratedColumn = 0; + // The remaining code is added without mapping + } else { + // There is no new line in between. + // Associate the code between "lastGeneratedColumn" and + // "mapping.generatedColumn" with "lastMapping" + var nextLine = remainingLines[remainingLinesIndex] || ''; + var code = nextLine.substr(0, mapping.generatedColumn - + lastGeneratedColumn); + remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn - + lastGeneratedColumn); + lastGeneratedColumn = mapping.generatedColumn; + addMappingWithCode(lastMapping, code); + // No more remaining code, continue + lastMapping = mapping; + return; + } + } + // We add the generated code until the first mapping + // to the SourceNode without any mapping. + // Each line is added as separate string. + while (lastGeneratedLine < mapping.generatedLine) { + node.add(shiftNextLine()); + lastGeneratedLine++; + } + if (lastGeneratedColumn < mapping.generatedColumn) { + var nextLine = remainingLines[remainingLinesIndex] || ''; + node.add(nextLine.substr(0, mapping.generatedColumn)); + remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn); + lastGeneratedColumn = mapping.generatedColumn; + } + lastMapping = mapping; + }, this); + // We have processed all mappings. + if (remainingLinesIndex < remainingLines.length) { + if (lastMapping) { + // Associate the remaining code in the current line with "lastMapping" + addMappingWithCode(lastMapping, shiftNextLine()); + } + // and add the remaining lines without any mapping + node.add(remainingLines.splice(remainingLinesIndex).join("")); + } + + // Copy sourcesContent into SourceNode + aSourceMapConsumer.sources.forEach(function (sourceFile) { + var content = aSourceMapConsumer.sourceContentFor(sourceFile); + if (content != null) { + if (aRelativePath != null) { + sourceFile = util.join(aRelativePath, sourceFile); + } + node.setSourceContent(sourceFile, content); + } + }); + + return node; + + function addMappingWithCode(mapping, code) { + if (mapping === null || mapping.source === undefined) { + node.add(code); + } else { + var source = aRelativePath + ? util.join(aRelativePath, mapping.source) + : mapping.source; + node.add(new SourceNode(mapping.originalLine, + mapping.originalColumn, + source, + code, + mapping.name)); + } + } + }; + +/** + * Add a chunk of generated JS to this source node. + * + * @param aChunk A string snippet of generated JS code, another instance of + * SourceNode, or an array where each member is one of those things. + */ +SourceNode.prototype.add = function SourceNode_add(aChunk) { + if (Array.isArray(aChunk)) { + aChunk.forEach(function (chunk) { + this.add(chunk); + }, this); + } + else if (aChunk[isSourceNode] || typeof aChunk === "string") { + if (aChunk) { + this.children.push(aChunk); + } + } + else { + throw new TypeError( + "Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk + ); + } + return this; +}; + +/** + * Add a chunk of generated JS to the beginning of this source node. + * + * @param aChunk A string snippet of generated JS code, another instance of + * SourceNode, or an array where each member is one of those things. + */ +SourceNode.prototype.prepend = function SourceNode_prepend(aChunk) { + if (Array.isArray(aChunk)) { + for (var i = aChunk.length-1; i >= 0; i--) { + this.prepend(aChunk[i]); + } + } + else if (aChunk[isSourceNode] || typeof aChunk === "string") { + this.children.unshift(aChunk); + } + else { + throw new TypeError( + "Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk + ); + } + return this; +}; + +/** + * Walk over the tree of JS snippets in this node and its children. The + * walking function is called once for each snippet of JS and is passed that + * snippet and the its original associated source's line/column location. + * + * @param aFn The traversal function. + */ +SourceNode.prototype.walk = function SourceNode_walk(aFn) { + var chunk; + for (var i = 0, len = this.children.length; i < len; i++) { + chunk = this.children[i]; + if (chunk[isSourceNode]) { + chunk.walk(aFn); + } + else { + if (chunk !== '') { + aFn(chunk, { source: this.source, + line: this.line, + column: this.column, + name: this.name }); + } + } + } +}; + +/** + * Like `String.prototype.join` except for SourceNodes. Inserts `aStr` between + * each of `this.children`. + * + * @param aSep The separator. + */ +SourceNode.prototype.join = function SourceNode_join(aSep) { + var newChildren; + var i; + var len = this.children.length; + if (len > 0) { + newChildren = []; + for (i = 0; i < len-1; i++) { + newChildren.push(this.children[i]); + newChildren.push(aSep); + } + newChildren.push(this.children[i]); + this.children = newChildren; + } + return this; +}; + +/** + * Call String.prototype.replace on the very right-most source snippet. Useful + * for trimming whitespace from the end of a source node, etc. + * + * @param aPattern The pattern to replace. + * @param aReplacement The thing to replace the pattern with. + */ +SourceNode.prototype.replaceRight = function SourceNode_replaceRight(aPattern, aReplacement) { + var lastChild = this.children[this.children.length - 1]; + if (lastChild[isSourceNode]) { + lastChild.replaceRight(aPattern, aReplacement); + } + else if (typeof lastChild === 'string') { + this.children[this.children.length - 1] = lastChild.replace(aPattern, aReplacement); + } + else { + this.children.push(''.replace(aPattern, aReplacement)); + } + return this; +}; + +/** + * Set the source content for a source file. This will be added to the SourceMapGenerator + * in the sourcesContent field. + * + * @param aSourceFile The filename of the source file + * @param aSourceContent The content of the source file + */ +SourceNode.prototype.setSourceContent = + function SourceNode_setSourceContent(aSourceFile, aSourceContent) { + this.sourceContents[util.toSetString(aSourceFile)] = aSourceContent; + }; + +/** + * Walk over the tree of SourceNodes. The walking function is called for each + * source file content and is passed the filename and source content. + * + * @param aFn The traversal function. + */ +SourceNode.prototype.walkSourceContents = + function SourceNode_walkSourceContents(aFn) { + for (var i = 0, len = this.children.length; i < len; i++) { + if (this.children[i][isSourceNode]) { + this.children[i].walkSourceContents(aFn); + } + } + + var sources = Object.keys(this.sourceContents); + for (var i = 0, len = sources.length; i < len; i++) { + aFn(util.fromSetString(sources[i]), this.sourceContents[sources[i]]); + } + }; + +/** + * Return the string representation of this source node. Walks over the tree + * and concatenates all the various snippets together to one string. + */ +SourceNode.prototype.toString = function SourceNode_toString() { + var str = ""; + this.walk(function (chunk) { + str += chunk; + }); + return str; +}; + +/** + * Returns the string representation of this source node along with a source + * map. + */ +SourceNode.prototype.toStringWithSourceMap = function SourceNode_toStringWithSourceMap(aArgs) { + var generated = { + code: "", + line: 1, + column: 0 + }; + var map = new SourceMapGenerator(aArgs); + var sourceMappingActive = false; + var lastOriginalSource = null; + var lastOriginalLine = null; + var lastOriginalColumn = null; + var lastOriginalName = null; + this.walk(function (chunk, original) { + generated.code += chunk; + if (original.source !== null + && original.line !== null + && original.column !== null) { + if(lastOriginalSource !== original.source + || lastOriginalLine !== original.line + || lastOriginalColumn !== original.column + || lastOriginalName !== original.name) { + map.addMapping({ + source: original.source, + original: { + line: original.line, + column: original.column + }, + generated: { + line: generated.line, + column: generated.column + }, + name: original.name + }); + } + lastOriginalSource = original.source; + lastOriginalLine = original.line; + lastOriginalColumn = original.column; + lastOriginalName = original.name; + sourceMappingActive = true; + } else if (sourceMappingActive) { + map.addMapping({ + generated: { + line: generated.line, + column: generated.column + } + }); + lastOriginalSource = null; + sourceMappingActive = false; + } + for (var idx = 0, length = chunk.length; idx < length; idx++) { + if (chunk.charCodeAt(idx) === NEWLINE_CODE) { + generated.line++; + generated.column = 0; + // Mappings end at eol + if (idx + 1 === length) { + lastOriginalSource = null; + sourceMappingActive = false; + } else if (sourceMappingActive) { + map.addMapping({ + source: original.source, + original: { + line: original.line, + column: original.column + }, + generated: { + line: generated.line, + column: generated.column + }, + name: original.name + }); + } + } else { + generated.column++; + } + } + }); + this.walkSourceContents(function (sourceFile, sourceContent) { + map.setSourceContent(sourceFile, sourceContent); + }); + + return { code: generated.code, map: map }; +}; + +exports.SourceNode = SourceNode; + + +/***/ }), +/* 24 */ +/***/ (function(module, exports) { + +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +function assert(condition, message) { + if (!condition) { + throw new Error(`Assertion failure: ${message}`); + } +} + +module.exports = assert; + +/***/ }), +/* 25 */ +/***/ (function(module, exports, __webpack_require__) { + +let _resolveAndFetch = (() => { + var _ref = _asyncToGenerator(function* (generatedSource) { + // Fetch the sourcemap over the network and create it. + const { sourceMapURL, baseURL } = _resolveSourceMapURL(generatedSource); + + const fetched = yield networkRequest(sourceMapURL, { loadFromCache: false }); + + // Create the source map and fix it up. + let map = new SourceMapConsumer(fetched.content, baseURL); + if (generatedSource.isWasm) { + map = new WasmRemap(map); + } + + return map; + }); + + return function _resolveAndFetch(_x) { + return _ref.apply(this, arguments); + }; +})(); + +function _asyncToGenerator(fn) { return function () { var gen = fn.apply(this, arguments); return new Promise(function (resolve, reject) { function step(key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { return Promise.resolve(value).then(function (value) { step("next", value); }, function (err) { step("throw", err); }); } } return step("next"); }); }; } + +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const { networkRequest } = __webpack_require__(1); +const { getSourceMap, setSourceMap } = __webpack_require__(9); +const { WasmRemap } = __webpack_require__(14); +const { SourceMapConsumer } = __webpack_require__(10); + +function _resolveSourceMapURL(source) { + const { url = "", sourceMapURL = "" } = source; + + if (!url) { + // If the source doesn't have a URL, don't resolve anything. + return { sourceMapURL, baseURL: sourceMapURL }; + } + + const resolvedURL = new URL(sourceMapURL, url); + const resolvedString = resolvedURL.toString(); + + let baseURL = resolvedString; + // When the sourceMap is a data: URL, fall back to using the + // source's URL, if possible. + if (resolvedURL.protocol == "data:") { + baseURL = url; + } + + return { sourceMapURL: resolvedString, baseURL }; +} + +function fetchSourceMap(generatedSource) { + const existingRequest = getSourceMap(generatedSource.id); + + // If it has already been requested, return the request. Make sure + // to do this even if sourcemapping is turned off, because + // pretty-printing uses sourcemaps. + // + // An important behavior here is that if it's in the middle of + // requesting it, all subsequent calls will block on the initial + // request. + if (existingRequest) { + return existingRequest; + } + + if (!generatedSource.sourceMapURL) { + return Promise.resolve(null); + } + + // Fire off the request, set it in the cache, and return it. + const req = _resolveAndFetch(generatedSource); + // Make sure the cached promise does not reject, because we only + // want to report the error once. + setSourceMap(generatedSource.id, req.catch(() => null)); + return req; +} + +module.exports = { fetchSourceMap }; + +/***/ }) +/******/ ]); +}); \ No newline at end of file diff --git a/dom/base/nsContentUtils.cpp b/dom/base/nsContentUtils.cpp index 1ab2d92bfe91..06929a12093f 100644 --- a/dom/base/nsContentUtils.cpp +++ b/dom/base/nsContentUtils.cpp @@ -10103,12 +10103,12 @@ nsContentUtils::LookupCustomElementDefinition(nsIDocument* aDoc, return nullptr; } - nsCOMPtr window(aDoc->GetInnerWindow()); + nsPIDOMWindowInner* window = aDoc->GetInnerWindow(); if (!window) { return nullptr; } - RefPtr registry(window->CustomElements()); + CustomElementRegistry* registry = window->CustomElements(); if (!registry) { return nullptr; } diff --git a/dom/media/test/crashtests/crashtests.list b/dom/media/test/crashtests/crashtests.list index 7cc122b82557..9f5c3cff7d50 100644 --- a/dom/media/test/crashtests/crashtests.list +++ b/dom/media/test/crashtests/crashtests.list @@ -95,7 +95,7 @@ load 1291702.html load 1378826.html load 1384248.html load 1389304.html -skip-if(webrender&&winWidget) load 1393272.webm # bug 1426199 for webrender +load 1393272.webm load 1411322.html load disconnect-wrong-destination.html load analyser-channels-1.html diff --git a/dom/plugins/ipc/PluginModuleParent.cpp b/dom/plugins/ipc/PluginModuleParent.cpp index ab2d1ff087b0..d528a2f58d5a 100644 --- a/dom/plugins/ipc/PluginModuleParent.cpp +++ b/dom/plugins/ipc/PluginModuleParent.cpp @@ -8,6 +8,7 @@ #include "base/process_util.h" #include "mozilla/Attributes.h" +#include "mozilla/AutoRestore.h" #include "mozilla/dom/ContentParent.h" #include "mozilla/dom/ContentChild.h" #include "mozilla/ipc/CrashReporterClient.h" diff --git a/dom/serviceworkers/ServiceWorkerRegistrar.cpp b/dom/serviceworkers/ServiceWorkerRegistrar.cpp index 953a87bd0991..c348145ea97b 100644 --- a/dom/serviceworkers/ServiceWorkerRegistrar.cpp +++ b/dom/serviceworkers/ServiceWorkerRegistrar.cpp @@ -789,7 +789,6 @@ public: ServiceWorkerRegistrarSaveDataRunnable() : Runnable("dom::ServiceWorkerRegistrarSaveDataRunnable") , mEventTarget(GetCurrentThreadEventTarget()) - , mRetryCounter(0) { AssertIsOnBackgroundThread(); } @@ -800,33 +799,21 @@ public: RefPtr service = ServiceWorkerRegistrar::Get(); MOZ_ASSERT(service); - // If the save fails, try again once in case it was a temporary - // problem due to virus scanning, etc. - static const uint32_t kMaxSaveRetryCount = 1; - - nsresult rv = service->SaveData(); - if (NS_FAILED(rv) && mRetryCounter < kMaxSaveRetryCount) { - rv = GetCurrentThreadEventTarget()->Dispatch(this, NS_DISPATCH_NORMAL); - if (NS_SUCCEEDED(rv)) { - mRetryCounter += 1; - return NS_OK; - } - } + // If the save fails do nothing for now. + // Consider adding a retry mechanism again in bug 1452373. + Unused << service->SaveData(); RefPtr runnable = NewRunnableMethod("ServiceWorkerRegistrar::DataSaved", service, &ServiceWorkerRegistrar::DataSaved); - rv = mEventTarget->Dispatch(runnable, NS_DISPATCH_NORMAL); - if (NS_WARN_IF(NS_FAILED(rv))) { - return rv; - } + nsresult rv = mEventTarget->Dispatch(runnable, NS_DISPATCH_NORMAL); + NS_ENSURE_SUCCESS(rv, rv); return NS_OK; } private: nsCOMPtr mEventTarget; - uint32_t mRetryCounter; }; void diff --git a/editor/libeditor/EditorEventListener.cpp b/editor/libeditor/EditorEventListener.cpp index 5cfd6816a51a..41b7be197502 100644 --- a/editor/libeditor/EditorEventListener.cpp +++ b/editor/libeditor/EditorEventListener.cpp @@ -7,6 +7,7 @@ #include "EditorEventListener.h" #include "mozilla/Assertions.h" // for MOZ_ASSERT, etc. +#include "mozilla/AutoRestore.h" #include "mozilla/ContentEvents.h" // for InternalFocusEvent #include "mozilla/EditorBase.h" // for EditorBase, etc. #include "mozilla/EventListenerManager.h" // for EventListenerManager diff --git a/gfx/layers/wr/WebRenderCommandBuilder.cpp b/gfx/layers/wr/WebRenderCommandBuilder.cpp index a55882138721..eb7a37c764cf 100644 --- a/gfx/layers/wr/WebRenderCommandBuilder.cpp +++ b/gfx/layers/wr/WebRenderCommandBuilder.cpp @@ -7,6 +7,7 @@ #include "WebRenderCommandBuilder.h" #include "BasicLayers.h" +#include "mozilla/AutoRestore.h" #include "mozilla/gfx/2D.h" #include "mozilla/gfx/Types.h" #include "mozilla/gfx/DrawEventRecorder.h" diff --git a/gfx/thebes/gfxFont.cpp b/gfx/thebes/gfxFont.cpp index 6882eaf24027..a981e09f5a0c 100644 --- a/gfx/thebes/gfxFont.cpp +++ b/gfx/thebes/gfxFont.cpp @@ -4166,19 +4166,6 @@ gfxFontStyle::gfxFontStyle(uint8_t aStyle, uint16_t aWeight, int16_t aStretch, } } -int8_t -gfxFontStyle::ComputeWeight() const -{ - int8_t baseWeight = (weight + 50) / 100; - - if (baseWeight < 0) - baseWeight = 0; - if (baseWeight > 9) - baseWeight = 9; - - return baseWeight; -} - void gfxFontStyle::AdjustForSubSuperscript(int32_t aAppUnitsPerDevPixel) { diff --git a/gfx/thebes/gfxFont.h b/gfx/thebes/gfxFont.h index f82e07693787..4889b8d890e5 100644 --- a/gfx/thebes/gfxFont.h +++ b/gfx/thebes/gfxFont.h @@ -189,8 +189,6 @@ struct gfxFontStyle { nsRefPtrHashKey::HashKey(language); } - int8_t ComputeWeight() const; - // Adjust this style to simulate sub/superscript (as requested in the // variantSubSuper field) using size and baselineOffset instead. void AdjustForSubSuperscript(int32_t aAppUnitsPerDevPixel); diff --git a/gfx/thebes/gfxFontEntry.cpp b/gfx/thebes/gfxFontEntry.cpp index 6ab035902ea8..cc7c76f73e35 100644 --- a/gfx/thebes/gfxFontEntry.cpp +++ b/gfx/thebes/gfxFontEntry.cpp @@ -1307,8 +1307,7 @@ gfxFontFamily::FindAllFontsForStyle(const gfxFontStyle& aFontStyle, aNeedsSyntheticBold = false; - int8_t baseWeight = aFontStyle.ComputeWeight(); - bool wantBold = baseWeight >= 6; + bool wantBold = aFontStyle.weight >= 600; gfxFontEntry *fe = nullptr; // If the family has only one face, we simply return it; no further @@ -1502,7 +1501,7 @@ CalcStyleMatch(gfxFontEntry *aFontEntry, const gfxFontStyle *aStyle) } // measure of closeness of weight to the desired value - rank += 9 - DeprecatedAbs(aFontEntry->Weight() / 100 - aStyle->ComputeWeight()); + rank += 9 - Abs((aFontEntry->Weight() - aStyle->weight) / 100); } else { // if no font to match, prefer non-bold, non-italic fonts if (aFontEntry->IsUpright()) { diff --git a/gfx/thebes/gfxTextRun.cpp b/gfx/thebes/gfxTextRun.cpp index 4fcaa98346d1..ad41d704d375 100644 --- a/gfx/thebes/gfxTextRun.cpp +++ b/gfx/thebes/gfxTextRun.cpp @@ -3444,7 +3444,7 @@ gfxFontGroup::WhichSystemFontSupportsChar(uint32_t aCh, uint32_t aNextCh, gfxPlatformFontList::PlatformFontList()-> SystemFindFontForChar(aCh, aNextCh, aRunScript, &mStyle); if (fe) { - bool wantBold = mStyle.ComputeWeight() >= 6; + bool wantBold = mStyle.weight >= 600; return fe->FindOrMakeFont(&mStyle, wantBold && !fe->IsBold()); } diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp index 315ce01fe063..d291cbe3e0d8 100644 --- a/js/src/builtin/TestingFunctions.cpp +++ b/js/src/builtin/TestingFunctions.cpp @@ -1241,6 +1241,23 @@ GetSavedFrameCount(JSContext* cx, unsigned argc, Value* vp) return true; } +static bool +ClearSavedFrames(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + + js::SavedStacks& savedStacks = cx->compartment()->savedStacks(); + if (savedStacks.initialized()) + savedStacks.clear(); + + for (ActivationIterator iter(cx); !iter.done(); ++iter) { + iter->clearLiveSavedFrameCache(); + } + + args.rval().setUndefined(); + return true; +} + static bool SaveStack(JSContext* cx, unsigned argc, Value* vp) { @@ -5170,6 +5187,12 @@ static const JSFunctionSpecWithHelp TestingFunctions[] = { " Return the number of SavedFrame instances stored in this compartment's\n" " SavedStacks cache."), + JS_FN_HELP("clearSavedFrames", ClearSavedFrames, 0, 0, +"clearSavedFrames()", +" Empty the current compartment's cache of SavedFrame objects, so that\n" +" subsequent stack captures allocate fresh objects to represent frames.\n" +" Clear the current stack's LiveSavedFrameCaches."), + JS_FN_HELP("saveStack", SaveStack, 0, 0, "saveStack([maxDepth [, compartment]])", " Capture a stack. If 'maxDepth' is given, capture at most 'maxDepth' number\n" diff --git a/js/src/jit-test/tests/saved-stacks/bug-1445973-quick.js b/js/src/jit-test/tests/saved-stacks/bug-1445973-quick.js new file mode 100644 index 000000000000..e689b5a594ed --- /dev/null +++ b/js/src/jit-test/tests/saved-stacks/bug-1445973-quick.js @@ -0,0 +1,56 @@ +// |jit-test| --no-baseline +// +// For background, see the comments for LiveSavedFrameCache in js/src/vm/Stack.h. +// +// The cache would like to assert that, assuming the cache hasn't been +// completely flushed due to a compartment mismatch, if a stack frame's +// hasCachedSavedFrame bit is set, then that frame does indeed have an entry in +// the cache. +// +// When LiveSavedFrameCache::find finds an entry whose frame address matches, +// but whose pc does not match, it removes that entry from the cache. Usually, a +// fresh entry for that frame will be pushed into the cache in short order as we +// rebuild the SavedFrame chain, but if the creation of the SavedFrame fails due +// to OOM, then we are left with no cache entry for that frame. +// +// The fix for 1445973 is simply to clear the frame's bit when we remove the +// cache entry for a pc mismatch. Previously the code neglected to do this, but +// usually got away with it because the cache would be re-populated. OOM fuzzing +// interrupted the code at the proper place and revealed the crash, but did so +// with a test that took 90s to run. This test runs in a fraction of a second. + +if (!('oomTest' in this)) + quit(); + +function f() { + // Ensure that we will try to allocate fresh SavedFrame objects. + clearSavedFrames(); + + // Ensure that all frames have their hasCachedSavedFrame bits set. + saveStack(); + + try { + // Capture the stack again. The entry for this frame will be removed due to + // a pc mismatch. The OOM must occur here, causing the cache not to be + // repopulated. + saveStack(); + } catch (e) { } + + // Capture the stack a third time. This will see that f's frame has its bit + // set, even though it has no entry in the cache. + saveStack(); +} + +// This ensures that there is a frame below f's in the same Activation, so that +// the assertion doesn't get skipped because the LiveSavedFrameCache is entirely +// empty, to handle caches flushed by compartment mismatches. +function g() { f(); } + +// Call all the code once, to ensure that everything has been delazified. When +// different calls to g perform different amounts of allocation, oomTest's +// simple strategy for choosing which allocation should fail can neglect to hit +// the SavedFrame creation. This is also why we disable the baseline compiler in +// the test metaline. +g(); + +oomTest(g); diff --git a/js/src/jit/JitFrames.h b/js/src/jit/JitFrames.h index ffab4cf11b34..2d72f2b8e428 100644 --- a/js/src/jit/JitFrames.h +++ b/js/src/jit/JitFrames.h @@ -374,6 +374,9 @@ class CommonFrameLayout void setHasCachedSavedFrame() { descriptor_ |= HASCACHEDSAVEDFRAME_BIT; } + void clearHasCachedSavedFrame() { + descriptor_ &= ~HASCACHEDSAVEDFRAME_BIT; + } uint8_t* returnAddress() const { return returnAddress_; } diff --git a/js/src/jit/RematerializedFrame.h b/js/src/jit/RematerializedFrame.h index 0b279ea7503a..abce558d08d1 100644 --- a/js/src/jit/RematerializedFrame.h +++ b/js/src/jit/RematerializedFrame.h @@ -190,6 +190,10 @@ class RematerializedFrame hasCachedSavedFrame_ = true; } + void clearHasCachedSavedFrame() { + hasCachedSavedFrame_ = false; + } + unsigned numFormalArgs() const { return isFunctionFrame() ? callee()->nargs() : 0; } diff --git a/js/src/vm/SavedStacks.cpp b/js/src/vm/SavedStacks.cpp index 25d08e0bcc4a..7870de951090 100644 --- a/js/src/vm/SavedStacks.cpp +++ b/js/src/vm/SavedStacks.cpp @@ -149,6 +149,11 @@ LiveSavedFrameCache::find(JSContext* cx, FramePtr& framePtr, const jsbytecode* p // this frame for that to happen, but this frame's bit is set. if (pc != frames->back().pc) { frames->popBack(); + + // Since we've removed this entry from the cache, clear the bit on its + // frame. Usually we'll repopulate the cache anyway, but if there's an + // OOM that might not happen. + framePtr.clearHasCachedSavedFrame(); frame.set(nullptr); return; } diff --git a/js/src/vm/Stack-inl.h b/js/src/vm/Stack-inl.h index 6602ac25c6ad..bf2056e458c2 100644 --- a/js/src/vm/Stack-inl.h +++ b/js/src/vm/Stack-inl.h @@ -1037,6 +1037,16 @@ LiveSavedFrameCache::FramePtr::setHasCachedSavedFrame() { ptr.match(SetHasCachedMatcher()); } +struct LiveSavedFrameCache::FramePtr::ClearHasCachedMatcher { + template + void match(Frame* f) { f->clearHasCachedSavedFrame(); } +}; + +inline void +LiveSavedFrameCache::FramePtr::clearHasCachedSavedFrame() { + ptr.match(ClearHasCachedMatcher()); +} + } /* namespace js */ #endif /* vm_Stack_inl_h */ diff --git a/js/src/vm/Stack.h b/js/src/vm/Stack.h index f782a324beff..8856650521ea 100644 --- a/js/src/vm/Stack.h +++ b/js/src/vm/Stack.h @@ -777,6 +777,9 @@ class InterpreterFrame void setHasCachedSavedFrame() { flags_ |= HAS_CACHED_SAVED_FRAME; } + void clearHasCachedSavedFrame() { + flags_ &= ~HAS_CACHED_SAVED_FRAME; + } public: void trace(JSTracer* trc, Value* sp, jsbytecode* pc); @@ -1195,7 +1198,7 @@ struct DefaultHasher { // recently. When we find a cache hit, we check the entry's SavedFrame's // compartment against the current compartment; if they do not match, we flush // the entire cache. This means that it is not always true that, if a frame's -// bit it set, it must have an entry in the cache. But we can still assert +// bit is set, it must have an entry in the cache. But we can still assert // that, if a frame's bit is set and the cache is not completely empty, the // frame will have an entry. When the cache is flushed, it will be repopulated // immediately with the new capture's frames. @@ -1248,6 +1251,7 @@ class LiveSavedFrameCache struct HasCachedMatcher; struct SetHasCachedMatcher; + struct ClearHasCachedMatcher; public: // If iter's frame is of a type that can be cached, construct a FramePtr @@ -1259,6 +1263,7 @@ class LiveSavedFrameCache inline bool hasCachedSavedFrame() const; inline void setHasCachedSavedFrame(); + inline void clearHasCachedSavedFrame(); // Return true if this FramePtr refers to an interpreter frame. inline bool isInterpreterFrame() const { return ptr.is(); } diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h index 3da45cfec887..f43177d77649 100644 --- a/js/src/wasm/WasmTypes.h +++ b/js/src/wasm/WasmTypes.h @@ -2011,6 +2011,7 @@ class DebugFrame bool hasCachedSavedFrame() const { return hasCachedSavedFrame_; } void setHasCachedSavedFrame() { hasCachedSavedFrame_ = true; } + void clearHasCachedSavedFrame() { hasCachedSavedFrame_ = false; } // DebugFrame is accessed directly by JIT code. diff --git a/layout/base/AccessibleCaretManager.cpp b/layout/base/AccessibleCaretManager.cpp index 76af734383db..1aa1eddb9f33 100644 --- a/layout/base/AccessibleCaretManager.cpp +++ b/layout/base/AccessibleCaretManager.cpp @@ -10,6 +10,7 @@ #include "AccessibleCaretEventHub.h" #include "AccessibleCaretLogger.h" #include "mozilla/AsyncEventDispatcher.h" +#include "mozilla/AutoRestore.h" #include "mozilla/dom/Element.h" #include "mozilla/dom/MouseEventBinding.h" #include "mozilla/dom/NodeFilterBinding.h" diff --git a/layout/base/PresShell.cpp b/layout/base/PresShell.cpp index bafb1899925c..3a5422bffd6a 100644 --- a/layout/base/PresShell.cpp +++ b/layout/base/PresShell.cpp @@ -11,6 +11,7 @@ #include "mozilla/dom/FontFaceSet.h" #include "mozilla/ArrayUtils.h" #include "mozilla/Attributes.h" +#include "mozilla/AutoRestore.h" #include "mozilla/StyleSheetInlines.h" #include "mozilla/EventDispatcher.h" #include "mozilla/EventStateManager.h" diff --git a/layout/base/RestyleLogging.h b/layout/base/RestyleLogging.h deleted file mode 100644 index fffa963a683b..000000000000 --- a/layout/base/RestyleLogging.h +++ /dev/null @@ -1,48 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* vim: set ts=8 sts=2 et sw=2 tw=80: */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -/** - * Macros used to log restyle events. - */ - -#ifndef mozilla_RestyleLogging_h -#define mozilla_RestyleLogging_h - -#include "mozilla/AutoRestore.h" - -#ifdef DEBUG -#endif - -#ifdef RESTYLE_LOGGING -#define LOG_RESTYLE_VAR2(prefix_, suffix_) prefix_##suffix_ -#define LOG_RESTYLE_VAR(prefix_, suffix_) LOG_RESTYLE_VAR2(prefix_, suffix_) -#define LOG_RESTYLE_DEPTH LOG_RESTYLE_VAR(restyle_depth_, __LINE__) -#define LOG_RESTYLE_IF(object_, cond_, message_, ...) \ - PR_BEGIN_MACRO \ - if (object_->ShouldLogRestyle() && (cond_)) { \ - nsCString line; \ - for (int32_t LOG_RESTYLE_VAR(rs_depth_, __LINE__) = 0; \ - LOG_RESTYLE_VAR(rs_depth_, __LINE__) < object_->LoggingDepth(); \ - LOG_RESTYLE_VAR(rs_depth_, __LINE__)++) { \ - line.AppendLiteral(" "); \ - } \ - line.AppendPrintf(message_, ##__VA_ARGS__); \ - printf_stderr("%s\n", line.get()); \ - } \ - PR_END_MACRO -#define LOG_RESTYLE(message_, ...) \ - LOG_RESTYLE_IF(this, true, message_, ##__VA_ARGS__) -// Beware that LOG_RESTYLE_INDENT is two statements not wrapped in a block. -#define LOG_RESTYLE_INDENT() \ - AutoRestore LOG_RESTYLE_VAR(ar_depth_, __LINE__)(LoggingDepth()); \ - ++LoggingDepth(); -#else -#define LOG_RESTYLE_IF(cond_, message_, ...) /* nothing */ -#define LOG_RESTYLE(message_, ...) /* nothing */ -#define LOG_RESTYLE_INDENT() /* nothing */ -#endif - -#endif /* mozilla_RestyleLogging_h */ diff --git a/layout/base/RestyleManager.h b/layout/base/RestyleManager.h index 9ab8af29273c..b6f7faae61dd 100644 --- a/layout/base/RestyleManager.h +++ b/layout/base/RestyleManager.h @@ -7,6 +7,7 @@ #ifndef mozilla_RestyleManager_h #define mozilla_RestyleManager_h +#include "mozilla/AutoRestore.h" #include "mozilla/EventStates.h" #include "mozilla/Maybe.h" #include "mozilla/OverflowChangedTracker.h" diff --git a/layout/base/RestyleTrackerInlines.h b/layout/base/RestyleTrackerInlines.h deleted file mode 100644 index 67375b335794..000000000000 --- a/layout/base/RestyleTrackerInlines.h +++ /dev/null @@ -1,24 +0,0 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ -/* vim: set ts=8 sts=2 et sw=2 tw=80: */ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef mozilla_RestyleTrackerInlines_h -#define mozilla_RestyleTrackerInlines_h - -#ifdef RESTYLE_LOGGING -bool -mozilla::RestyleTracker::ShouldLogRestyle() -{ - return mRestyleManager->ShouldLogRestyle(); -} - -int32_t& -mozilla::RestyleTracker::LoggingDepth() -{ - return mRestyleManager->LoggingDepth(); -} -#endif - -#endif // !defined(mozilla_RestyleTrackerInlines_h) diff --git a/layout/base/crashtests/crashtests.list b/layout/base/crashtests/crashtests.list index 4f9ab59a9bd9..affd8355f95c 100644 --- a/layout/base/crashtests/crashtests.list +++ b/layout/base/crashtests/crashtests.list @@ -383,7 +383,7 @@ load 629035-1.html load 629908-1.html load 635329.html load 636229-1.html -skip-if(webrender&&winWidget) == 640272.html 640272-ref.html # bug 1426199 for webrender +== 640272.html 640272-ref.html load 645193.html load 645572-1.html load 650475.xhtml diff --git a/layout/base/moz.build b/layout/base/moz.build index 606066fa6b4d..a677b49866b1 100644 --- a/layout/base/moz.build +++ b/layout/base/moz.build @@ -79,7 +79,6 @@ EXPORTS.mozilla += [ 'GeometryUtils.h', 'OverflowChangedTracker.h', 'PresShell.h', - 'RestyleLogging.h', 'RestyleManager.h', 'ShapeUtils.h', 'StaticPresData.h', diff --git a/layout/base/nsPresContext.cpp b/layout/base/nsPresContext.cpp index 6be2651da94f..121634e68c9a 100644 --- a/layout/base/nsPresContext.cpp +++ b/layout/base/nsPresContext.cpp @@ -265,9 +265,6 @@ nsPresContext::nsPresContext(nsIDocument* aDocument, nsPresContextType aType) mQuirkSheetAdded(false), mNeedsPrefUpdate(false), mHadNonBlankPaint(false) -#ifdef RESTYLE_LOGGING - , mRestyleLoggingEnabled(false) -#endif #ifdef DEBUG , mInitialized(false) #endif @@ -944,10 +941,6 @@ nsPresContext::Init(nsDeviceContext* aDeviceContext) mEventManager->SetPresContext(this); -#ifdef RESTYLE_LOGGING - mRestyleLoggingEnabled = GeckoRestyleManager::RestyleLoggingInitiallyEnabled(); -#endif - #ifdef DEBUG mInitialized = true; #endif diff --git a/layout/base/nsPresContext.h b/layout/base/nsPresContext.h index 2a51de0cba02..c3b0574b0682 100644 --- a/layout/base/nsPresContext.h +++ b/layout/base/nsPresContext.h @@ -43,7 +43,6 @@ #include "nsThreadUtils.h" #include "ScrollbarStyles.h" #include "nsIMessageManager.h" -#include "mozilla/RestyleLogging.h" #include "Units.h" #include "prenv.h" #include "mozilla/StaticPresData.h" @@ -1259,14 +1258,6 @@ public: */ bool MayHavePaintEventListenerInSubDocument(); -#ifdef RESTYLE_LOGGING - // Controls for whether debug information about restyling in this - // document should be output. - bool RestyleLoggingEnabled() const { return mRestyleLoggingEnabled; } - void StartRestyleLogging() { mRestyleLoggingEnabled = true; } - void StopRestyleLogging() { mRestyleLoggingEnabled = false; } -#endif - void InvalidatePaintedLayers(); protected: @@ -1504,11 +1495,6 @@ protected: // Has NotifyNonBlankPaint been called on this PresContext? unsigned mHadNonBlankPaint : 1; -#ifdef RESTYLE_LOGGING - // Should we output debug information about restyling for this document? - unsigned mRestyleLoggingEnabled : 1; -#endif - #ifdef DEBUG unsigned mInitialized : 1; #endif diff --git a/layout/generic/nsGridContainerFrame.cpp b/layout/generic/nsGridContainerFrame.cpp index 5d60a84f3074..44bb50b9ed07 100644 --- a/layout/generic/nsGridContainerFrame.cpp +++ b/layout/generic/nsGridContainerFrame.cpp @@ -12,6 +12,7 @@ #include #include #include "gfxContext.h" +#include "mozilla/AutoRestore.h" #include "mozilla/ComputedStyle.h" #include "mozilla/CSSAlignUtils.h" #include "mozilla/CSSOrderAwareFrameIterator.h" diff --git a/layout/style/ComputedStyle.h b/layout/style/ComputedStyle.h index 7a5cd46ceb21..edc993635822 100644 --- a/layout/style/ComputedStyle.h +++ b/layout/style/ComputedStyle.h @@ -13,7 +13,6 @@ #include #include "mozilla/ArenaObjectID.h" #include "mozilla/Assertions.h" -#include "mozilla/RestyleLogging.h" #include "mozilla/ServoTypes.h" #include "mozilla/ServoUtils.h" #include "mozilla/StyleComplexColor.h" diff --git a/taskcluster/docker/periodic-updates/scripts/periodic_file_updates.sh b/taskcluster/docker/periodic-updates/scripts/periodic_file_updates.sh index 28940f56cb0e..71aa710324cc 100755 --- a/taskcluster/docker/periodic-updates/scripts/periodic_file_updates.sh +++ b/taskcluster/docker/periodic-updates/scripts/periodic_file_updates.sh @@ -408,6 +408,17 @@ function push_repo { then return 1 fi + + # Clean up older review requests + # Turn Needs Review D624: No bug, Automated HSTS ... + # into D624 + for diff in $($ARC list | grep "Needs Review" | grep -E "Automated HSTS|Automated HPKP|Automated blocklist" | awk 'match($0, /D[0-9]+[^: ]/, arr) { print arr[0] }') + do + echo "Removing old request $diff" + # There is no 'arc abandon', see bug 1452082 + echo '{"transactions": [{"type":"abandon"}], "objectIdentifier": "'"${diff}"'"}' | arc call-conduit differential.revision.edit + done + $ARC diff --verbatim --reviewers "${REVIEWERS}" } diff --git a/taskcluster/docker/periodic-updates/setup.sh b/taskcluster/docker/periodic-updates/setup.sh index 12561af75942..813721a65383 100755 --- a/taskcluster/docker/periodic-updates/setup.sh +++ b/taskcluster/docker/periodic-updates/setup.sh @@ -15,20 +15,20 @@ useradd -d /home/worker -s /bin/bash -m worker apt-get update -q apt-get install -y --no-install-recommends \ - arcanist=0~git20170812-1 \ - bzip2=1.0.6-8.1 \ + arcanist \ + bzip2 \ ca-certificates \ curl \ - jq=1.5+dfsg-2 \ - libdbus-glib-1-2=0.110-2 \ - libgtk-3-0=3.22.28-1ubuntu3 \ - libxml2-utils=2.9.4+dfsg1-6.1ubuntu1 \ - libxt6=1:1.1.5-1 \ + jq \ + libdbus-glib-1-2 \ + libgtk-3-0 \ + libxml2-utils \ + libxt6 \ python \ - python3=3.6.4-1 \ - shellcheck=0.4.6-1 \ - unzip=6.0-21ubuntu1 \ - wget=1.19.4-1ubuntu2 \ + python3 \ + shellcheck \ + unzip \ + wget \ apt-get clean diff --git a/toolkit/components/extensions/test/xpcshell/test_ext_redirects.js b/toolkit/components/extensions/test/xpcshell/test_ext_redirects.js index 63104677deda..1a137173c814 100644 --- a/toolkit/components/extensions/test/xpcshell/test_ext_redirects.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_redirects.js @@ -5,6 +5,8 @@ ChromeUtils.defineModuleGetter(this, "TestUtils", "resource://testing-common/TestUtils.jsm"); Cu.importGlobalProperties(["XMLHttpRequest"]); +PromiseTestUtils.whitelistRejectionsGlobally(/Message manager disconnected/); + const server = createHttpServer(); const gServerUrl = `http://localhost:${server.identity.primaryPort}`; diff --git a/widget/windows/KeyboardLayout.cpp b/widget/windows/KeyboardLayout.cpp index 2f7c43695f44..ff28a2011eff 100644 --- a/widget/windows/KeyboardLayout.cpp +++ b/widget/windows/KeyboardLayout.cpp @@ -6,6 +6,7 @@ #include "mozilla/Logging.h" #include "mozilla/ArrayUtils.h" +#include "mozilla/AutoRestore.h" #include "mozilla/DebugOnly.h" #include "mozilla/MouseEvents.h" #include "mozilla/MiscEvents.h" diff --git a/widget/windows/nsWindow.cpp b/widget/windows/nsWindow.cpp index 6975d9890c22..7a1d39c8d76f 100644 --- a/widget/windows/nsWindow.cpp +++ b/widget/windows/nsWindow.cpp @@ -58,6 +58,7 @@ #include "gfxEnv.h" #include "gfxPlatform.h" #include "gfxPrefs.h" +#include "mozilla/AutoRestore.h" #include "mozilla/Logging.h" #include "mozilla/MathAlgorithms.h" #include "mozilla/MiscEvents.h"