diff --git a/internal/driverutil/operation.go b/internal/driverutil/operation.go index 32704312ff..e37cba5903 100644 --- a/internal/driverutil/operation.go +++ b/internal/driverutil/operation.go @@ -28,4 +28,5 @@ const ( ListIndexesOp = "listIndexes" // ListIndexesOp is the name for listing indexes ListDatabasesOp = "listDatabases" // ListDatabasesOp is the name for listing databases UpdateOp = "update" // UpdateOp is the name for updating + BulkWriteOp = "bulkWrite" // BulkWriteOp is the name for client-level bulk write ) diff --git a/mongo/bulk_write.go b/mongo/bulk_write.go index 40f1181e0e..402497b13e 100644 --- a/mongo/bulk_write.go +++ b/mongo/bulk_write.go @@ -164,8 +164,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (operation.InsertResult, error) { docs := make([]bsoncore.Document, len(batch.models)) - var i int - for _, model := range batch.models { + for i, model := range batch.models { converted := model.(*InsertOneModel) doc, err := marshal(converted.Document, bw.collection.bsonOpts, bw.collection.registry) if err != nil { @@ -177,7 +176,6 @@ func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (opera } docs[i] = doc - i++ } op := operation.NewInsert(docs...). diff --git a/mongo/client.go b/mongo/client.go index 0ce6d2e24b..aacf7b49f0 100644 --- a/mongo/client.go +++ b/mongo/client.go @@ -73,14 +73,15 @@ type Client struct { logger *logger.Logger // client-side encryption fields - keyVaultClientFLE *Client - keyVaultCollFLE *Collection - mongocryptdFLE *mongocryptdClient - cryptFLE driver.Crypt - metadataClientFLE *Client - internalClientFLE *Client - encryptedFieldsMap map[string]interface{} - authenticator driver.Authenticator + isAutoEncryptionSet bool + keyVaultClientFLE *Client + keyVaultCollFLE *Collection + mongocryptdFLE *mongocryptdClient + cryptFLE driver.Crypt + metadataClientFLE *Client + internalClientFLE *Client + encryptedFieldsMap map[string]interface{} + authenticator driver.Authenticator } // Connect creates a new Client and then initializes it using the Connect method. This is equivalent to calling @@ -194,6 +195,7 @@ func NewClient(opts ...*options.ClientOptions) (*Client, error) { } // AutoEncryptionOptions if clientOpt.AutoEncryptionOptions != nil { + client.isAutoEncryptionSet = true if err := client.configureAutoEncryption(clientOpt); err != nil { return nil, err } @@ -424,8 +426,6 @@ func (c *Client) StartSession(opts ...*options.SessionOptions) (Session, error) return nil, replaceErrors(err) } - // Writes are not retryable on standalones, so let operation determine whether to retry - sess.RetryWrite = false sess.RetryRead = c.retryReads return &sessionImpl{ @@ -851,6 +851,81 @@ func (c *Client) createBaseCursorOptions() driver.CursorOptions { } } +// BulkWrite performs a client-level bulk write operation. +func (c *Client) BulkWrite(ctx context.Context, models *ClientWriteModels, + opts ...*options.ClientBulkWriteOptions) (*ClientBulkWriteResult, error) { + // TODO: Remove once DRIVERS-2888 is implemented. + if c.isAutoEncryptionSet { + return nil, errors.New("bulkWrite does not currently support automatic encryption") + } + bwo := options.MergeClientBulkWriteOptions(opts...) + + if ctx == nil { + ctx = context.Background() + } + + sess := sessionFromContext(ctx) + if sess == nil && c.sessionPool != nil { + sess = session.NewImplicitClientSession(c.sessionPool, c.id) + defer sess.EndSession() + } + + err := c.validSession(sess) + if err != nil { + return nil, err + } + + transactionRunning := sess.TransactionRunning() + wc := c.writeConcern + if transactionRunning { + wc = nil + } + if bwo.WriteConcern != nil { + if transactionRunning { + return nil, errors.New("cannot set write concern after starting a transaction") + } + wc = bwo.WriteConcern + } + acknowledged := writeconcern.AckWrite(wc) + if !acknowledged { + if bwo.Ordered == nil || *bwo.Ordered { + return nil, errors.New("cannot request unacknowledged write concern and ordered writes") + } + sess = nil + } + + writeSelector := description.CompositeSelector([]description.ServerSelector{ + description.WriteSelector(), + description.LatencySelector(c.localThreshold), + }) + selector := makePinnedSelector(sess, writeSelector) + + op := clientBulkWrite{ + models: models.models, + ordered: bwo.Ordered, + bypassDocumentValidation: bwo.BypassDocumentValidation, + comment: bwo.Comment, + let: bwo.Let, + session: sess, + client: c, + selector: selector, + writeConcern: wc, + } + if bwo.VerboseResults == nil || !(*bwo.VerboseResults) { + op.errorsOnly = true + } else if !acknowledged { + return nil, errors.New("cannot request unacknowledged write concern and verbose results") + } + if err = op.execute(ctx); err != nil { + return nil, replaceErrors(err) + } + var results *ClientBulkWriteResult + if acknowledged { + results = &op.result + } + return results, nil +} + // newLogger will use the LoggerOptions to create an internal logger and publish // messages using a LogSink. func newLogger(opts *options.LoggerOptions) (*logger.Logger, error) { diff --git a/mongo/client_bulk_write.go b/mongo/client_bulk_write.go new file mode 100644 index 0000000000..8a187ee1c4 --- /dev/null +++ b/mongo/client_bulk_write.go @@ -0,0 +1,708 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package mongo + +import ( + "context" + "errors" + "io" + "strconv" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/internal/driverutil" + "go.mongodb.org/mongo-driver/mongo/description" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/writeconcern" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "go.mongodb.org/mongo-driver/x/mongo/driver" + "go.mongodb.org/mongo-driver/x/mongo/driver/session" + "go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage" +) + +const ( + database = "admin" +) + +type clientBulkWrite struct { + models []clientWriteModel + errorsOnly bool + ordered *bool + bypassDocumentValidation *bool + comment interface{} + let interface{} + session *session.Client + client *Client + selector description.ServerSelector + writeConcern *writeconcern.WriteConcern + + result ClientBulkWriteResult +} + +func (bw *clientBulkWrite) execute(ctx context.Context) error { + if len(bw.models) == 0 { + return ErrEmptySlice + } + for _, m := range bw.models { + if m.model == nil { + return ErrNilDocument + } + } + batches := &modelBatches{ + session: bw.session, + client: bw.client, + ordered: bw.ordered == nil || *bw.ordered, + models: bw.models, + result: &bw.result, + retryMode: driver.RetryOnce, + } + err := driver.Operation{ + CommandFn: bw.newCommand(), + ProcessResponseFn: batches.processResponse, + Client: bw.session, + Clock: bw.client.clock, + RetryMode: &batches.retryMode, + Type: driver.Write, + Batches: batches, + CommandMonitor: bw.client.monitor, + Database: database, + Deployment: bw.client.deployment, + Selector: bw.selector, + WriteConcern: bw.writeConcern, + Crypt: bw.client.cryptFLE, + ServerAPI: bw.client.serverAPI, + Timeout: bw.client.timeout, + Logger: bw.client.logger, + Authenticator: bw.client.authenticator, + Name: driverutil.BulkWriteOp, + }.Execute(ctx) + var exception *ClientBulkWriteException + switch tt := err.(type) { + case CommandError: + exception = &ClientBulkWriteException{ + TopLevelError: &WriteError{ + Code: int(tt.Code), + Message: tt.Message, + Raw: tt.Raw, + }, + } + default: + if errors.Is(err, driver.ErrUnacknowledgedWrite) { + err = nil + } + } + if len(batches.writeConcernErrors) > 0 || len(batches.writeErrors) > 0 { + if exception == nil { + exception = new(ClientBulkWriteException) + } + exception.WriteConcernErrors = batches.writeConcernErrors + exception.WriteErrors = batches.writeErrors + } + if exception != nil { + var hasSuccess bool + if batches.ordered { + _, ok := batches.writeErrors[0] + hasSuccess = !ok + } else { + hasSuccess = len(batches.writeErrors) < len(bw.models) + } + if hasSuccess { + exception.PartialResult = batches.result + } + return *exception + } + return err +} + +func (bw *clientBulkWrite) newCommand() func([]byte, description.SelectedServer) ([]byte, error) { + return func(dst []byte, desc description.SelectedServer) ([]byte, error) { + dst = bsoncore.AppendInt32Element(dst, "bulkWrite", 1) + + dst = bsoncore.AppendBooleanElement(dst, "errorsOnly", bw.errorsOnly) + if bw.bypassDocumentValidation != nil && (desc.WireVersion != nil && desc.WireVersion.Includes(4)) { + dst = bsoncore.AppendBooleanElement(dst, "bypassDocumentValidation", *bw.bypassDocumentValidation) + } + if bw.comment != nil { + comment, err := marshalValue(bw.comment, bw.client.bsonOpts, bw.client.registry) + if err != nil { + return nil, err + } + dst = bsoncore.AppendValueElement(dst, "comment", comment) + } + dst = bsoncore.AppendBooleanElement(dst, "ordered", bw.ordered == nil || *bw.ordered) + if bw.let != nil { + let, err := marshal(bw.let, bw.client.bsonOpts, bw.client.registry) + if err != nil { + return nil, err + } + dst = bsoncore.AppendDocumentElement(dst, "let", let) + } + return dst, nil + } +} + +type cursorInfo struct { + Ok bool + Idx int32 + Code *int32 + Errmsg *string + ErrInfo bson.Raw + N int32 + NModified *int32 + Upserted *struct { + ID interface{} `bson:"_id"` + } +} + +func (cur *cursorInfo) extractError() *WriteError { + if cur.Ok { + return nil + } + err := &WriteError{ + Index: int(cur.Idx), + Details: cur.ErrInfo, + } + if cur.Code != nil { + err.Code = int(*cur.Code) + } + if cur.Errmsg != nil { + err.Message = *cur.Errmsg + } + return err +} + +type modelBatches struct { + session *session.Client + client *Client + + ordered bool + models []clientWriteModel + + offset int + + retryMode driver.RetryMode // RetryNone by default + cursorHandlers []func(*cursorInfo, bson.Raw) bool + newIDMap map[int]interface{} + + result *ClientBulkWriteResult + writeConcernErrors []WriteConcernError + writeErrors map[int]WriteError +} + +func (mb *modelBatches) IsOrdered() *bool { + return &mb.ordered +} + +func (mb *modelBatches) AdvanceBatches(n int) { + mb.offset += n + if mb.offset > len(mb.models) { + mb.offset = len(mb.models) + } +} + +func (mb *modelBatches) Size() int { + if mb.offset > len(mb.models) { + return 0 + } + return len(mb.models) - mb.offset +} + +func (mb *modelBatches) AppendBatchSequence(dst []byte, maxCount, maxDocSize, totalSize int) (int, []byte, error) { + fn := functionSet{ + appendStart: func(dst []byte, identifier string) (int32, []byte) { + var idx int32 + dst = wiremessage.AppendMsgSectionType(dst, wiremessage.DocumentSequence) + idx, dst = bsoncore.ReserveLength(dst) + dst = append(dst, identifier...) + dst = append(dst, 0x00) + return idx, dst + }, + appendDocument: func(dst []byte, _ string, doc []byte) []byte { + dst = append(dst, doc...) + return dst + }, + updateLength: func(dst []byte, idx, length int32) []byte { + dst = bsoncore.UpdateLength(dst, idx, length) + return dst + }, + } + return mb.appendBatches(fn, dst, maxCount, maxDocSize, totalSize) +} + +func (mb *modelBatches) AppendBatchArray(dst []byte, maxCount, maxDocSize, totalSize int) (int, []byte, error) { + fn := functionSet{ + appendStart: bsoncore.AppendArrayElementStart, + appendDocument: bsoncore.AppendDocumentElement, + updateLength: func(dst []byte, idx, _ int32) []byte { + dst, _ = bsoncore.AppendArrayEnd(dst, idx) + return dst + }, + } + return mb.appendBatches(fn, dst, maxCount, maxDocSize, totalSize) +} + +type functionSet struct { + appendStart func([]byte, string) (int32, []byte) + appendDocument func([]byte, string, []byte) []byte + updateLength func([]byte, int32, int32) []byte +} + +func (mb *modelBatches) appendBatches(fn functionSet, dst []byte, maxCount, maxDocSize, totalSize int) (int, []byte, error) { + if mb.Size() == 0 { + return 0, dst, io.EOF + } + + mb.cursorHandlers = mb.cursorHandlers[:0] + mb.newIDMap = make(map[int]interface{}) + + nsMap := make(map[string]int) + getNsIndex := func(namespace string) (int, bool) { + v, ok := nsMap[namespace] + if ok { + return v, ok + } + nsIdx := len(nsMap) + nsMap[namespace] = nsIdx + return nsIdx, ok + } + + canRetry := true + checkSize := true + + l := len(dst) + + opsIdx, dst := fn.appendStart(dst, "ops") + nsIdx, nsDst := fn.appendStart(nil, "nsInfo") + + totalSize -= 1000 + size := len(dst) + len(nsDst) + var n int + for i := mb.offset; i < len(mb.models); i++ { + if n == maxCount { + break + } + + ns := mb.models[i].namespace + nsIdx, exists := getNsIndex(ns) + + var doc bsoncore.Document + var err error + switch model := mb.models[i].model.(type) { + case *ClientInsertOneModel: + checkSize = false + mb.cursorHandlers = append(mb.cursorHandlers, mb.appendInsertResult) + var id interface{} + id, doc, err = (&clientInsertDoc{ + namespace: nsIdx, + document: model.Document, + sizeLimit: maxDocSize, + }).marshal(mb.client.bsonOpts, mb.client.registry) + if err != nil { + break + } + mb.newIDMap[i] = id + case *ClientUpdateOneModel: + mb.cursorHandlers = append(mb.cursorHandlers, mb.appendUpdateResult) + doc, err = (&clientUpdateDoc{ + namespace: nsIdx, + filter: model.Filter, + update: model.Update, + hint: model.Hint, + arrayFilters: model.ArrayFilters, + collation: model.Collation, + upsert: model.Upsert, + multi: false, + checkDollarKey: true, + }).marshal(mb.client.bsonOpts, mb.client.registry) + case *ClientUpdateManyModel: + canRetry = false + mb.cursorHandlers = append(mb.cursorHandlers, mb.appendUpdateResult) + doc, err = (&clientUpdateDoc{ + namespace: nsIdx, + filter: model.Filter, + update: model.Update, + hint: model.Hint, + arrayFilters: model.ArrayFilters, + collation: model.Collation, + upsert: model.Upsert, + multi: true, + checkDollarKey: true, + }).marshal(mb.client.bsonOpts, mb.client.registry) + case *ClientReplaceOneModel: + checkSize = false + mb.cursorHandlers = append(mb.cursorHandlers, mb.appendUpdateResult) + doc, err = (&clientUpdateDoc{ + namespace: nsIdx, + filter: model.Filter, + update: model.Replacement, + hint: model.Hint, + arrayFilters: nil, + collation: model.Collation, + upsert: model.Upsert, + multi: false, + checkDollarKey: false, + sizeLimit: maxDocSize, + }).marshal(mb.client.bsonOpts, mb.client.registry) + case *ClientDeleteOneModel: + mb.cursorHandlers = append(mb.cursorHandlers, mb.appendDeleteResult) + doc, err = (&clientDeleteDoc{ + namespace: nsIdx, + filter: model.Filter, + collation: model.Collation, + hint: model.Hint, + multi: false, + }).marshal(mb.client.bsonOpts, mb.client.registry) + case *ClientDeleteManyModel: + canRetry = false + mb.cursorHandlers = append(mb.cursorHandlers, mb.appendDeleteResult) + doc, err = (&clientDeleteDoc{ + namespace: nsIdx, + filter: model.Filter, + collation: model.Collation, + hint: model.Hint, + multi: true, + }).marshal(mb.client.bsonOpts, mb.client.registry) + default: + mb.cursorHandlers = append(mb.cursorHandlers, nil) + } + if err != nil { + return 0, nil, err + } + length := len(doc) + if maxDocSize > 0 && length > maxDocSize+16*1024 { + return 0, nil, driver.ErrDocumentTooLarge + } + if !exists { + length += len(ns) + } + size += length + if size >= totalSize { + break + } + + dst = fn.appendDocument(dst, strconv.Itoa(n), doc) + if !exists { + idx, doc := bsoncore.AppendDocumentStart(nil) + doc = bsoncore.AppendStringElement(doc, "ns", ns) + doc, _ = bsoncore.AppendDocumentEnd(doc, idx) + nsDst = fn.appendDocument(nsDst, strconv.Itoa(n), doc) + } + n++ + } + if n == 0 { + return 0, dst[:l], nil + } + + dst = fn.updateLength(dst, opsIdx, int32(len(dst[opsIdx:]))) + nsDst = fn.updateLength(nsDst, nsIdx, int32(len(nsDst[nsIdx:]))) + dst = append(dst, nsDst...) + if checkSize && maxDocSize > 0 && len(dst)-l > maxDocSize+16*1024 { + return 0, nil, driver.ErrDocumentTooLarge + } + + mb.retryMode = driver.RetryNone + if mb.client.retryWrites && canRetry { + mb.retryMode = driver.RetryOnce + } + return n, dst, nil +} + +func (mb *modelBatches) processResponse(ctx context.Context, resp bsoncore.Document, info driver.ResponseInfo) error { + var writeCmdErr driver.WriteCommandError + if errors.As(info.Error, &writeCmdErr) && writeCmdErr.WriteConcernError != nil { + wce := convertDriverWriteConcernError(writeCmdErr.WriteConcernError) + if wce != nil { + mb.writeConcernErrors = append(mb.writeConcernErrors, *wce) + } + } + if len(resp) == 0 { + return nil + } + var res struct { + Ok bool + Cursor bsoncore.Document + NDeleted int32 + NInserted int32 + NMatched int32 + NModified int32 + NUpserted int32 + NErrors int32 + Code int32 + Errmsg string + } + err := bson.UnmarshalWithRegistry(mb.client.registry, resp, &res) + if err != nil { + return err + } + if !res.Ok { + return ClientBulkWriteException{ + TopLevelError: &WriteError{ + Code: int(res.Code), + Message: res.Errmsg, + Raw: bson.Raw(resp), + }, + WriteConcernErrors: mb.writeConcernErrors, + WriteErrors: mb.writeErrors, + PartialResult: mb.result, + } + } + + mb.result.DeletedCount += int64(res.NDeleted) + mb.result.InsertedCount += int64(res.NInserted) + mb.result.MatchedCount += int64(res.NMatched) + mb.result.ModifiedCount += int64(res.NModified) + mb.result.UpsertedCount += int64(res.NUpserted) + + var cursorRes driver.CursorResponse + cursorRes, err = driver.NewCursorResponse(res.Cursor, info) + if err != nil { + return err + } + var bCursor *driver.BatchCursor + bCursor, err = driver.NewBatchCursor(cursorRes, mb.session, mb.client.clock, + driver.CursorOptions{ + CommandMonitor: mb.client.monitor, + Crypt: mb.client.cryptFLE, + ServerAPI: mb.client.serverAPI, + MarshalValueEncoderFn: newEncoderFn(mb.client.bsonOpts, mb.client.registry), + }, + ) + if err != nil { + return err + } + var cursor *Cursor + cursor, err = newCursor(bCursor, mb.client.bsonOpts, mb.client.registry) + if err != nil { + return err + } + defer cursor.Close(ctx) + + ok := true + for cursor.Next(ctx) { + var cur cursorInfo + err = cursor.Decode(&cur) + if err != nil { + return err + } + if int(cur.Idx) >= len(mb.cursorHandlers) { + continue + } + ok = mb.cursorHandlers[int(cur.Idx)](&cur, cursor.Current) && ok + } + err = cursor.Err() + if err != nil { + return err + } + if mb.ordered && (writeCmdErr.WriteConcernError != nil || !ok || !res.Ok || res.NErrors > 0) { + return ClientBulkWriteException{ + WriteConcernErrors: mb.writeConcernErrors, + WriteErrors: mb.writeErrors, + PartialResult: mb.result, + } + } + return nil +} + +func (mb *modelBatches) appendDeleteResult(cur *cursorInfo, raw bson.Raw) bool { + idx := int(cur.Idx) + mb.offset + if err := cur.extractError(); err != nil { + err.Raw = raw + if mb.writeErrors == nil { + mb.writeErrors = make(map[int]WriteError) + } + mb.writeErrors[idx] = *err + return false + } + + if mb.result.DeleteResults == nil { + mb.result.DeleteResults = make(map[int]ClientDeleteResult) + } + mb.result.DeleteResults[idx] = ClientDeleteResult{int64(cur.N)} + + return true +} + +func (mb *modelBatches) appendInsertResult(cur *cursorInfo, raw bson.Raw) bool { + idx := int(cur.Idx) + mb.offset + if err := cur.extractError(); err != nil { + err.Raw = raw + if mb.writeErrors == nil { + mb.writeErrors = make(map[int]WriteError) + } + mb.writeErrors[idx] = *err + return false + } + + if mb.result.InsertResults == nil { + mb.result.InsertResults = make(map[int]ClientInsertResult) + } + mb.result.InsertResults[idx] = ClientInsertResult{mb.newIDMap[idx]} + + return true +} + +func (mb *modelBatches) appendUpdateResult(cur *cursorInfo, raw bson.Raw) bool { + idx := int(cur.Idx) + mb.offset + if err := cur.extractError(); err != nil { + err.Raw = raw + if mb.writeErrors == nil { + mb.writeErrors = make(map[int]WriteError) + } + mb.writeErrors[idx] = *err + return false + } + + if mb.result.UpdateResults == nil { + mb.result.UpdateResults = make(map[int]ClientUpdateResult) + } + result := ClientUpdateResult{ + MatchedCount: int64(cur.N), + } + if cur.NModified != nil { + result.ModifiedCount = int64(*cur.NModified) + } + if cur.Upserted != nil { + result.UpsertedID = cur.Upserted.ID + } + mb.result.UpdateResults[idx] = result + + return true +} + +type clientInsertDoc struct { + namespace int + document interface{} + + sizeLimit int +} + +func (d *clientInsertDoc) marshal(bsonOpts *options.BSONOptions, registry *bsoncodec.Registry) (interface{}, bsoncore.Document, error) { + uidx, doc := bsoncore.AppendDocumentStart(nil) + + doc = bsoncore.AppendInt32Element(doc, "insert", int32(d.namespace)) + f, err := marshal(d.document, bsonOpts, registry) + if err != nil { + return nil, nil, err + } + if d.sizeLimit > 0 && len(f) > d.sizeLimit { + return nil, nil, driver.ErrDocumentTooLarge + } + var id interface{} + f, id, err = ensureID(f, primitive.NilObjectID, bsonOpts, registry) + if err != nil { + return nil, nil, err + } + doc = bsoncore.AppendDocumentElement(doc, "document", f) + doc, err = bsoncore.AppendDocumentEnd(doc, uidx) + return id, doc, err +} + +type clientUpdateDoc struct { + namespace int + filter interface{} + update interface{} + hint interface{} + arrayFilters *options.ArrayFilters + collation *options.Collation + upsert *bool + multi bool + checkDollarKey bool + + sizeLimit int +} + +func (d *clientUpdateDoc) marshal(bsonOpts *options.BSONOptions, registry *bsoncodec.Registry) (bsoncore.Document, error) { + uidx, doc := bsoncore.AppendDocumentStart(nil) + + doc = bsoncore.AppendInt32Element(doc, "update", int32(d.namespace)) + + f, err := marshal(d.filter, bsonOpts, registry) + if err != nil { + return nil, err + } + doc = bsoncore.AppendDocumentElement(doc, "filter", f) + + u, err := marshalUpdateValue(d.update, bsonOpts, registry, d.checkDollarKey) + if err != nil { + return nil, err + } + if d.sizeLimit > 0 && len(u.Data) > d.sizeLimit { + return nil, driver.ErrDocumentTooLarge + } + doc = bsoncore.AppendValueElement(doc, "updateMods", u) + doc = bsoncore.AppendBooleanElement(doc, "multi", d.multi) + + if d.arrayFilters != nil { + reg := registry + if d.arrayFilters.Registry != nil { + reg = d.arrayFilters.Registry + } + arr, err := marshalValue(d.arrayFilters.Filters, bsonOpts, reg) + if err != nil { + return nil, err + } + doc = bsoncore.AppendArrayElement(doc, "arrayFilters", arr.Data) + } + + if d.collation != nil { + doc = bsoncore.AppendDocumentElement(doc, "collation", bsoncore.Document(d.collation.ToDocument())) + } + + if d.upsert != nil { + doc = bsoncore.AppendBooleanElement(doc, "upsert", *d.upsert) + } + + if d.hint != nil { + if isUnorderedMap(d.hint) { + return nil, ErrMapForOrderedArgument{"hint"} + } + hintVal, err := marshalValue(d.hint, bsonOpts, registry) + if err != nil { + return nil, err + } + doc = bsoncore.AppendValueElement(doc, "hint", hintVal) + } + + return bsoncore.AppendDocumentEnd(doc, uidx) +} + +type clientDeleteDoc struct { + namespace int + filter interface{} + collation *options.Collation + hint interface{} + multi bool +} + +func (d *clientDeleteDoc) marshal(bsonOpts *options.BSONOptions, registry *bsoncodec.Registry) (bsoncore.Document, error) { + didx, doc := bsoncore.AppendDocumentStart(nil) + + doc = bsoncore.AppendInt32Element(doc, "delete", int32(d.namespace)) + + f, err := marshal(d.filter, bsonOpts, registry) + if err != nil { + return nil, err + } + doc = bsoncore.AppendDocumentElement(doc, "filter", f) + doc = bsoncore.AppendBooleanElement(doc, "multi", d.multi) + + if d.collation != nil { + doc = bsoncore.AppendDocumentElement(doc, "collation", d.collation.ToDocument()) + } + if d.hint != nil { + if isUnorderedMap(d.hint) { + return nil, ErrMapForOrderedArgument{"hint"} + } + hintVal, err := marshalValue(d.hint, bsonOpts, registry) + if err != nil { + return nil, err + } + doc = bsoncore.AppendValueElement(doc, "hint", hintVal) + } + return bsoncore.AppendDocumentEnd(doc, didx) +} diff --git a/mongo/client_bulk_write_models.go b/mongo/client_bulk_write_models.go new file mode 100644 index 0000000000..526e209e50 --- /dev/null +++ b/mongo/client_bulk_write_models.go @@ -0,0 +1,329 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package mongo + +import ( + "fmt" + + "go.mongodb.org/mongo-driver/mongo/options" +) + +// ClientWriteModels is a struct that can be used in a client-level BulkWrite operation. +type ClientWriteModels struct { + models []clientWriteModel +} +type clientWriteModel struct { + namespace string + model interface{} +} + +// AppendInsertOne appends ClientInsertOneModels. +func (m *ClientWriteModels) AppendInsertOne(database, collection string, models ...*ClientInsertOneModel) *ClientWriteModels { + if m == nil { + m = &ClientWriteModels{} + } + for _, model := range models { + m.models = append(m.models, clientWriteModel{ + namespace: fmt.Sprintf("%s.%s", database, collection), + model: model, + }) + } + return m +} + +// AppendUpdateOne appends ClientUpdateOneModels. +func (m *ClientWriteModels) AppendUpdateOne(database, collection string, models ...*ClientUpdateOneModel) *ClientWriteModels { + if m == nil { + m = &ClientWriteModels{} + } + for _, model := range models { + m.models = append(m.models, clientWriteModel{ + namespace: fmt.Sprintf("%s.%s", database, collection), + model: model, + }) + } + return m +} + +// AppendUpdateMany appends ClientUpdateManyModels. +func (m *ClientWriteModels) AppendUpdateMany(database, collection string, models ...*ClientUpdateManyModel) *ClientWriteModels { + if m == nil { + m = &ClientWriteModels{} + } + for _, model := range models { + m.models = append(m.models, clientWriteModel{ + namespace: fmt.Sprintf("%s.%s", database, collection), + model: model, + }) + } + return m +} + +// AppendReplaceOne appends ClientReplaceOneModels. +func (m *ClientWriteModels) AppendReplaceOne(database, collection string, models ...*ClientReplaceOneModel) *ClientWriteModels { + if m == nil { + m = &ClientWriteModels{} + } + for _, model := range models { + m.models = append(m.models, clientWriteModel{ + namespace: fmt.Sprintf("%s.%s", database, collection), + model: model, + }) + } + return m +} + +// AppendDeleteOne appends ClientDeleteOneModels. +func (m *ClientWriteModels) AppendDeleteOne(database, collection string, models ...*ClientDeleteOneModel) *ClientWriteModels { + if m == nil { + m = &ClientWriteModels{} + } + for _, model := range models { + m.models = append(m.models, clientWriteModel{ + namespace: fmt.Sprintf("%s.%s", database, collection), + model: model, + }) + } + return m +} + +// AppendDeleteMany appends ClientDeleteManyModels. +func (m *ClientWriteModels) AppendDeleteMany(database, collection string, models ...*ClientDeleteManyModel) *ClientWriteModels { + if m == nil { + m = &ClientWriteModels{} + } + for _, model := range models { + m.models = append(m.models, clientWriteModel{ + namespace: fmt.Sprintf("%s.%s", database, collection), + model: model, + }) + } + return m +} + +// ClientInsertOneModel is used to insert a single document in a client-level BulkWrite operation. +type ClientInsertOneModel struct { + Document interface{} +} + +// SetDocument specifies the document to be inserted. The document cannot be nil. If it does not have an _id field when +// transformed into BSON, one will be added automatically to the marshalled document. The original document will not be +// modified. +func (iom *ClientInsertOneModel) SetDocument(doc interface{}) *ClientInsertOneModel { + iom.Document = doc + return iom +} + +// ClientUpdateOneModel is used to update at most one document in a client-level BulkWrite operation. +type ClientUpdateOneModel struct { + Collation *options.Collation + Upsert *bool + Filter interface{} + Update interface{} + ArrayFilters *options.ArrayFilters + Hint interface{} +} + +// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index +// specification as a document. The default value is nil, which means that no hint will be sent. +func (uom *ClientUpdateOneModel) SetHint(hint interface{}) *ClientUpdateOneModel { + uom.Hint = hint + return uom +} + +// SetFilter specifies a filter to use to select the document to update. The filter must be a document containing query +// operators. It cannot be nil. If the filter matches multiple documents, one will be selected from the matching +// documents. +func (uom *ClientUpdateOneModel) SetFilter(filter interface{}) *ClientUpdateOneModel { + uom.Filter = filter + return uom +} + +// SetUpdate specifies the modifications to be made to the selected document. The value must be a document containing +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. +func (uom *ClientUpdateOneModel) SetUpdate(update interface{}) *ClientUpdateOneModel { + uom.Update = update + return uom +} + +// SetArrayFilters specifies a set of filters to determine which elements should be modified when updating an array +// field. +func (uom *ClientUpdateOneModel) SetArrayFilters(filters options.ArrayFilters) *ClientUpdateOneModel { + uom.ArrayFilters = &filters + return uom +} + +// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be +// used. +func (uom *ClientUpdateOneModel) SetCollation(collation *options.Collation) *ClientUpdateOneModel { + uom.Collation = collation + return uom +} + +// SetUpsert specifies whether or not a new document should be inserted if no document matching the filter is found. If +// an upsert is performed, the _id of the upserted document can be retrieved from the UpdateResults field of the +// ClientBulkWriteResult. +func (uom *ClientUpdateOneModel) SetUpsert(upsert bool) *ClientUpdateOneModel { + uom.Upsert = &upsert + return uom +} + +// ClientUpdateManyModel is used to update multiple documents in a client-level BulkWrite operation. +type ClientUpdateManyModel struct { + Collation *options.Collation + Upsert *bool + Filter interface{} + Update interface{} + ArrayFilters *options.ArrayFilters + Hint interface{} +} + +// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index +// specification as a document. The default value is nil, which means that no hint will be sent. +func (umm *ClientUpdateManyModel) SetHint(hint interface{}) *ClientUpdateManyModel { + umm.Hint = hint + return umm +} + +// SetFilter specifies a filter to use to select documents to update. The filter must be a document containing query +// operators. It cannot be nil. +func (umm *ClientUpdateManyModel) SetFilter(filter interface{}) *ClientUpdateManyModel { + umm.Filter = filter + return umm +} + +// SetUpdate specifies the modifications to be made to the selected documents. The value must be a document containing +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. +func (umm *ClientUpdateManyModel) SetUpdate(update interface{}) *ClientUpdateManyModel { + umm.Update = update + return umm +} + +// SetArrayFilters specifies a set of filters to determine which elements should be modified when updating an array +// field. +func (umm *ClientUpdateManyModel) SetArrayFilters(filters options.ArrayFilters) *ClientUpdateManyModel { + umm.ArrayFilters = &filters + return umm +} + +// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be +// used. +func (umm *ClientUpdateManyModel) SetCollation(collation *options.Collation) *ClientUpdateManyModel { + umm.Collation = collation + return umm +} + +// SetUpsert specifies whether or not a new document should be inserted if no document matching the filter is found. If +// an upsert is performed, the _id of the upserted document can be retrieved from the UpdateResults field of the +// ClientBulkWriteResult. +func (umm *ClientUpdateManyModel) SetUpsert(upsert bool) *ClientUpdateManyModel { + umm.Upsert = &upsert + return umm +} + +// ClientReplaceOneModel is used to replace at most one document in a client-level BulkWrite operation. +type ClientReplaceOneModel struct { + Collation *options.Collation + Upsert *bool + Filter interface{} + Replacement interface{} + Hint interface{} +} + +// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index +// specification as a document. The default value is nil, which means that no hint will be sent. +func (rom *ClientReplaceOneModel) SetHint(hint interface{}) *ClientReplaceOneModel { + rom.Hint = hint + return rom +} + +// SetFilter specifies a filter to use to select the document to replace. The filter must be a document containing query +// operators. It cannot be nil. If the filter matches multiple documents, one will be selected from the matching +// documents. +func (rom *ClientReplaceOneModel) SetFilter(filter interface{}) *ClientReplaceOneModel { + rom.Filter = filter + return rom +} + +// SetReplacement specifies a document that will be used to replace the selected document. It cannot be nil and cannot +// contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). +func (rom *ClientReplaceOneModel) SetReplacement(rep interface{}) *ClientReplaceOneModel { + rom.Replacement = rep + return rom +} + +// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be +// used. +func (rom *ClientReplaceOneModel) SetCollation(collation *options.Collation) *ClientReplaceOneModel { + rom.Collation = collation + return rom +} + +// SetUpsert specifies whether or not the replacement document should be inserted if no document matching the filter is +// found. If an upsert is performed, the _id of the upserted document can be retrieved from the UpdateResults field of the +// BulkWriteResult. +func (rom *ClientReplaceOneModel) SetUpsert(upsert bool) *ClientReplaceOneModel { + rom.Upsert = &upsert + return rom +} + +// ClientDeleteOneModel is used to delete at most one document in a client-level BulkWriteOperation. +type ClientDeleteOneModel struct { + Filter interface{} + Collation *options.Collation + Hint interface{} +} + +// SetFilter specifies a filter to use to select the document to delete. The filter must be a document containing query +// operators. It cannot be nil. If the filter matches multiple documents, one will be selected from the matching +// documents. +func (dom *ClientDeleteOneModel) SetFilter(filter interface{}) *ClientDeleteOneModel { + dom.Filter = filter + return dom +} + +// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be +// used. +func (dom *ClientDeleteOneModel) SetCollation(collation *options.Collation) *ClientDeleteOneModel { + dom.Collation = collation + return dom +} + +// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index +// specification as a document. The default value is nil, which means that no hint will be sent. +func (dom *ClientDeleteOneModel) SetHint(hint interface{}) *ClientDeleteOneModel { + dom.Hint = hint + return dom +} + +// ClientDeleteManyModel is used to delete multiple documents in a client-level BulkWrite operation. +type ClientDeleteManyModel struct { + Filter interface{} + Collation *options.Collation + Hint interface{} +} + +// SetFilter specifies a filter to use to select documents to delete. The filter must be a document containing query +// operators. It cannot be nil. +func (dmm *ClientDeleteManyModel) SetFilter(filter interface{}) *ClientDeleteManyModel { + dmm.Filter = filter + return dmm +} + +// SetCollation specifies a collation to use for string comparisons. The default is nil, meaning no collation will be +// used. +func (dmm *ClientDeleteManyModel) SetCollation(collation *options.Collation) *ClientDeleteManyModel { + dmm.Collation = collation + return dmm +} + +// SetHint specifies the index to use for the operation. This should either be the index name as a string or the index +// specification as a document. The default value is nil, which means that no hint will be sent. +func (dmm *ClientDeleteManyModel) SetHint(hint interface{}) *ClientDeleteManyModel { + dmm.Hint = hint + return dmm +} diff --git a/mongo/client_bulk_write_test.go b/mongo/client_bulk_write_test.go new file mode 100644 index 0000000000..8287f6acbb --- /dev/null +++ b/mongo/client_bulk_write_test.go @@ -0,0 +1,66 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package mongo + +import ( + "testing" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/internal/assert" + "go.mongodb.org/mongo-driver/internal/require" +) + +func TestBatches(t *testing.T) { + t.Run("test Addvancing", func(t *testing.T) { + batches := &modelBatches{ + models: make([]clientWriteModel, 2), + } + batches.AdvanceBatches(3) + size := batches.Size() + assert.Equal(t, 0, size, "expected: %d, got: %d", 1, size) + }) + t.Run("test appendBatches", func(t *testing.T) { + client, err := NewClient() + require.NoError(t, err, "NewClient error: %v", err) + batches := &modelBatches{ + client: client, + models: []clientWriteModel{ + {"ns0", nil}, + {"ns1", &ClientInsertOneModel{ + Document: bson.D{{"foo", 42}}, + }}, + {"ns2", &ClientReplaceOneModel{ + Filter: bson.D{{"foo", "bar"}}, + Replacement: bson.D{{"foo", "baz"}}, + }}, + {"ns1", &ClientDeleteOneModel{ + Filter: bson.D{{"qux", "quux"}}, + }}, + }, + offset: 1, + result: &ClientBulkWriteResult{}, + } + var n int + n, _, err = batches.AppendBatchSequence(nil, 4, 16_000, 16_000) + require.NoError(t, err, "AppendBatchSequence error: %v", err) + assert.Equal(t, 3, n, "expected %d appendings, got: %d", 3, n) + + _ = batches.cursorHandlers[0](&cursorInfo{Ok: true, Idx: 0}, nil) + _ = batches.cursorHandlers[1](&cursorInfo{Ok: true, Idx: 1}, nil) + _ = batches.cursorHandlers[2](&cursorInfo{Ok: true, Idx: 2}, nil) + + ins, ok := batches.result.InsertResults[1] + assert.True(t, ok, "expected an insert results") + assert.NotNil(t, ins.InsertedID, "expected an ID") + + _, ok = batches.result.UpdateResults[2] + assert.True(t, ok, "expected an insert results") + + _, ok = batches.result.DeleteResults[3] + assert.True(t, ok, "expected an insert results") + }) +} diff --git a/mongo/errors.go b/mongo/errors.go index d92c9ca9bd..5340d632cc 100644 --- a/mongo/errors.go +++ b/mongo/errors.go @@ -609,6 +609,56 @@ func (bwe BulkWriteException) HasErrorCodeWithMessage(code int, message string) // serverError implements the ServerError interface. func (bwe BulkWriteException) serverError() {} +// ClientBulkWriteException is the error type returned by ClientBulkWrite operations. +type ClientBulkWriteException struct { + // A top-level error that occurred when attempting to communicate with the server + // or execute the bulk write. This value may not be populated if the exception was + // thrown due to errors occurring on individual writes. + TopLevelError *WriteError + + // The write concern errors that occurred. + WriteConcernErrors []WriteConcernError + + // The write errors that occurred during individual operation execution. + // This map will contain at most one entry if the bulk write was ordered. + WriteErrors map[int]WriteError + + // The results of any successful operations that were performed before the error + // was encountered. + PartialResult *ClientBulkWriteResult +} + +// Error implements the error interface. +func (bwe ClientBulkWriteException) Error() string { + causes := make([]string, 0, 4) + if bwe.TopLevelError != nil { + causes = append(causes, "top level error: "+bwe.TopLevelError.Error()) + } + if len(bwe.WriteConcernErrors) > 0 { + errs := make([]error, len(bwe.WriteConcernErrors)) + for i := 0; i < len(bwe.WriteConcernErrors); i++ { + errs[i] = bwe.WriteConcernErrors[i] + } + causes = append(causes, "write concern errors: "+joinBatchErrors(errs)) + } + if len(bwe.WriteErrors) > 0 { + errs := make([]error, 0, len(bwe.WriteErrors)) + for _, v := range bwe.WriteErrors { + errs = append(errs, v) + } + causes = append(causes, "write errors: "+joinBatchErrors(errs)) + } + if bwe.PartialResult != nil { + causes = append(causes, fmt.Sprintf("result: %v", *bwe.PartialResult)) + } + + message := "bulk write exception: " + if len(causes) == 0 { + return message + "no causes" + } + return "bulk write exception: " + strings.Join(causes, ", ") +} + // returnResult is used to determine if a function calling processWriteError should return // the result or return nil. Since the processWriteError function is used by many different // methods, both *One and *Many, we need a way to differentiate if the method should return diff --git a/mongo/integration/client_side_encryption_test.go b/mongo/integration/client_side_encryption_test.go index abff32ff62..ff4281a970 100644 --- a/mongo/integration/client_side_encryption_test.go +++ b/mongo/integration/client_side_encryption_test.go @@ -396,7 +396,7 @@ func TestClientSideEncryptionCustomCrypt(t *testing.T) { "expected 0 calls to DecryptExplicit, got %v", cc.numDecryptExplicitCalls) assert.Equal(mt, cc.numCloseCalls, 0, "expected 0 calls to Close, got %v", cc.numCloseCalls) - assert.Equal(mt, cc.numBypassAutoEncryptionCalls, 2, + assert.Equal(mt, cc.numBypassAutoEncryptionCalls, 1, "expected 2 calls to BypassAutoEncryption, got %v", cc.numBypassAutoEncryptionCalls) }) } diff --git a/mongo/integration/crud_prose_test.go b/mongo/integration/crud_prose_test.go index 00a1f0ff36..7e29df07f4 100644 --- a/mongo/integration/crud_prose_test.go +++ b/mongo/integration/crud_prose_test.go @@ -10,14 +10,20 @@ import ( "bytes" "context" "errors" + "os" + "strings" "testing" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/internal/assert" + "go.mongodb.org/mongo-driver/internal/require" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/integration/mtest" "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "go.mongodb.org/mongo-driver/x/mongo/driver" ) func TestWriteErrorsWithLabels(t *testing.T) { @@ -408,3 +414,550 @@ func TestErrorsCodeNamePropagated(t *testing.T) { assert.Equal(mt, expectedCodeName, wce.Name, "expected code name %q, got %q", expectedCodeName, wce.Name) }) } + +func TestClientBulkWrite(t *testing.T) { + mtOpts := mtest.NewOptions().MinServerVersion("8.0").AtlasDataLake(false).ClientType(mtest.Pinned) + mt := mtest.New(t, mtOpts) + + mt.Run("bulkWrite batch splits a writeModels input with greater than maxWriteBatchSize operations", func(mt *mtest.T) { + var opsCnt []int + monitor := &event.CommandMonitor{ + Started: func(_ context.Context, e *event.CommandStartedEvent) { + if e.CommandName == "bulkWrite" { + var c struct { + Ops []bson.D + } + err := bson.Unmarshal(e.Command, &c) + require.NoError(mt, err) + opsCnt = append(opsCnt, len(c.Ops)) + } + }, + } + mt.ResetClient(options.Client().SetMonitor(monitor)) + var hello struct { + MaxWriteBatchSize int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + models := &mongo.ClientWriteModels{} + numModels := hello.MaxWriteBatchSize + 1 + for i := 0; i < numModels; i++ { + models. + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", "b"}}, + }) + } + result, err := mt.Client.BulkWrite(context.Background(), models) + require.NoError(mt, err, "BulkWrite error: %v", err) + assert.Equal(mt, numModels, int(result.InsertedCount), "expected InsertedCount: %d, got %d", numModels, result.InsertedCount) + require.Len(mt, opsCnt, 2, "expected %d bulkWrite commands, got: %d", 2, len(opsCnt)) + assert.Equal(mt, numModels-1, opsCnt[0], "expected %d firstEvent.command.ops, got: %d", numModels-1, opsCnt[0]) + assert.Equal(mt, 1, opsCnt[1], "expected %d secondEvent.command.ops, got: %d", 1, opsCnt[1]) + }) + + mt.Run("bulkWrite batch splits when an ops payload exceeds maxMessageSizeBytes", func(mt *mtest.T) { + var opsCnt []int + monitor := &event.CommandMonitor{ + Started: func(_ context.Context, e *event.CommandStartedEvent) { + if e.CommandName == "bulkWrite" { + var c struct { + Ops []bson.D + } + err := bson.Unmarshal(e.Command, &c) + require.NoError(mt, err) + opsCnt = append(opsCnt, len(c.Ops)) + } + }, + } + mt.ResetClient(options.Client().SetMonitor(monitor)) + var hello struct { + MaxBsonObjectSize int + MaxMessageSizeBytes int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + models := &mongo.ClientWriteModels{} + numModels := hello.MaxMessageSizeBytes/hello.MaxBsonObjectSize + 1 + for i := 0; i < numModels; i++ { + models. + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", strings.Repeat("b", hello.MaxBsonObjectSize-500)}}, + }) + } + result, err := mt.Client.BulkWrite(context.Background(), models) + require.NoError(mt, err, "BulkWrite error: %v", err) + assert.Equal(mt, numModels, int(result.InsertedCount), "expected InsertedCount: %d, got: %d", numModels, result.InsertedCount) + require.Len(mt, opsCnt, 2, "expected %d bulkWrite commands, got: %d", 2, len(opsCnt)) + assert.Equal(mt, numModels-1, opsCnt[0], "expected %d firstEvent.command.ops, got: %d", numModels-1, opsCnt[0]) + assert.Equal(mt, 1, opsCnt[1], "expected %d secondEvent.command.ops, got: %d", 1, opsCnt[1]) + }) + + mt.Run("bulkWrite collects WriteConcernErrors across batches", func(mt *mtest.T) { + var eventCnt int + monitor := &event.CommandMonitor{ + Started: func(_ context.Context, e *event.CommandStartedEvent) { + if e.CommandName == "bulkWrite" { + eventCnt++ + } + }, + } + mt.ResetClient(options.Client().SetRetryWrites(false).SetMonitor(monitor)) + var hello struct { + MaxWriteBatchSize int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + + mt.SetFailPoint(mtest.FailPoint{ + ConfigureFailPoint: "failCommand", + Mode: mtest.FailPointMode{ + Times: 2, + }, + Data: mtest.FailPointData{ + FailCommands: []string{"bulkWrite"}, + WriteConcernError: &mtest.WriteConcernErrorData{ + Code: 91, + Errmsg: "Replication is being shut down", + }, + }, + }) + + models := &mongo.ClientWriteModels{} + numModels := hello.MaxWriteBatchSize + 1 + for i := 0; i < numModels; i++ { + models. + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", "b"}}, + }) + } + _, err = mt.Client.BulkWrite(context.Background(), models) + require.Error(mt, err, "expected a BulkWrite error") + bwe, ok := err.(mongo.ClientBulkWriteException) + require.True(mt, ok, "expected a BulkWriteException, got %T: %v", err, err) + assert.Len(mt, bwe.WriteConcernErrors, 2, "expected %d writeConcernErrors, got: %d", 2, len(bwe.WriteConcernErrors)) + require.NotNil(mt, bwe.PartialResult) + assert.Equal(mt, numModels, int(bwe.PartialResult.InsertedCount), + "expected InsertedCount: %d, got: %d", numModels, bwe.PartialResult.InsertedCount) + require.Equal(mt, 2, eventCnt, "expected %d bulkWrite commands, got: %d", 2, eventCnt) + }) + + mt.Run("bulkWrite handles individual WriteErrors across batches", func(mt *mtest.T) { + var eventCnt int + monitor := &event.CommandMonitor{ + Started: func(_ context.Context, e *event.CommandStartedEvent) { + if e.CommandName == "bulkWrite" { + eventCnt++ + } + }, + } + + mt.ResetClient(options.Client()) + var hello struct { + MaxWriteBatchSize int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + + coll := mt.CreateCollection(mtest.Collection{DB: "db", Name: "coll"}, false) + err = coll.Drop(context.Background()) + require.NoError(mt, err, "Drop error: %v", err) + _, err = coll.InsertOne(context.Background(), bson.D{{"_id", 1}}) + require.NoError(mt, err, "InsertOne error: %v", err) + + models := &mongo.ClientWriteModels{} + numModels := hello.MaxWriteBatchSize + 1 + for i := 0; i < numModels; i++ { + models. + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"_id", 1}}, + }) + } + + mt.Run("unordered", func(mt *mtest.T) { + eventCnt = 0 + mt.ResetClient(options.Client().SetMonitor(monitor)) + _, err := mt.Client.BulkWrite(context.Background(), models, options.ClientBulkWrite().SetOrdered(false)) + require.Error(mt, err, "expected a BulkWrite error") + bwe, ok := err.(mongo.ClientBulkWriteException) + require.True(mt, ok, "expected a BulkWriteException, got %T: %v", err, err) + assert.Len(mt, bwe.WriteErrors, numModels, "expected %d writeErrors, got %d", numModels, len(bwe.WriteErrors)) + require.Equal(mt, 2, eventCnt, "expected %d bulkWrite commands, got: %d", 2, eventCnt) + }) + mt.Run("ordered", func(mt *mtest.T) { + eventCnt = 0 + mt.ResetClient(options.Client().SetMonitor(monitor)) + _, err := mt.Client.BulkWrite(context.Background(), models, options.ClientBulkWrite().SetOrdered(true)) + require.Error(mt, err, "expected a BulkWrite error") + bwe, ok := err.(mongo.ClientBulkWriteException) + require.True(mt, ok, "expected a BulkWriteException, got %T: %v", err, err) + assert.Len(mt, bwe.WriteErrors, 1, "expected %d writeErrors, got: %d", 1, len(bwe.WriteErrors)) + require.Equal(mt, 1, eventCnt, "expected %d bulkWrite commands, got: %d", 1, eventCnt) + }) + }) + + mt.Run("bulkWrite handles a cursor requiring a getMore", func(mt *mtest.T) { + var getMoreCalled int + monitor := &event.CommandMonitor{ + Started: func(_ context.Context, e *event.CommandStartedEvent) { + if e.CommandName == "getMore" { + getMoreCalled++ + } + }, + } + mt.ResetClient(options.Client().SetMonitor(monitor)) + var hello struct { + MaxBsonObjectSize int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + + coll := mt.CreateCollection(mtest.Collection{DB: "db", Name: "coll"}, false) + err = coll.Drop(context.Background()) + require.NoError(mt, err, "Drop error: %v", err) + + upsert := true + models := (&mongo.ClientWriteModels{}). + AppendUpdateOne("db", "coll", &mongo.ClientUpdateOneModel{ + Filter: bson.D{{"_id", strings.Repeat("a", hello.MaxBsonObjectSize/2)}}, + Update: bson.D{{"$set", bson.D{{"x", 1}}}}, + Upsert: &upsert, + }). + AppendUpdateOne("db", "coll", &mongo.ClientUpdateOneModel{ + Filter: bson.D{{"_id", strings.Repeat("b", hello.MaxBsonObjectSize/2)}}, + Update: bson.D{{"$set", bson.D{{"x", 1}}}}, + Upsert: &upsert, + }) + result, err := mt.Client.BulkWrite(context.Background(), models, options.ClientBulkWrite().SetVerboseResults(true)) + require.NoError(mt, err, "BulkWrite error: %v", err) + assert.Equal(mt, int64(2), result.UpsertedCount, "expected InsertedCount: %d, got: %d", 2, result.UpsertedCount) + assert.Len(mt, result.UpdateResults, 2, "expected %d UpdateResults, got: %d", 2, len(result.UpdateResults)) + assert.Equal(mt, 1, getMoreCalled, "expected %d getMore call, got: %d", 1, getMoreCalled) + }) + + mt.RunOpts("bulkWrite handles a cursor requiring a getMore within a transaction", + mtest.NewOptions().MinServerVersion("8.0").AtlasDataLake(false).ClientType(mtest.Pinned). + Topologies(mtest.ReplicaSet, mtest.Sharded, mtest.LoadBalanced, mtest.ShardedReplicaSet), + func(mt *mtest.T) { + var getMoreCalled int + monitor := &event.CommandMonitor{ + Started: func(_ context.Context, e *event.CommandStartedEvent) { + if e.CommandName == "getMore" { + getMoreCalled++ + } + }, + } + mt.ResetClient(options.Client().SetMonitor(monitor)) + var hello struct { + MaxBsonObjectSize int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + + coll := mt.CreateCollection(mtest.Collection{DB: "db", Name: "coll"}, false) + err = coll.Drop(context.Background()) + require.NoError(mt, err, "Drop error: %v", err) + + session, err := mt.Client.StartSession() + require.NoError(mt, err, "StartSession error: %v", err) + defer session.EndSession(context.Background()) + + upsert := true + models := (&mongo.ClientWriteModels{}). + AppendUpdateOne("db", "coll", &mongo.ClientUpdateOneModel{ + Filter: bson.D{{"_id", strings.Repeat("a", hello.MaxBsonObjectSize/2)}}, + Update: bson.D{{"$set", bson.D{{"x", 1}}}}, + Upsert: &upsert, + }). + AppendUpdateOne("db", "coll", &mongo.ClientUpdateOneModel{ + Filter: bson.D{{"_id", strings.Repeat("b", hello.MaxBsonObjectSize/2)}}, + Update: bson.D{{"$set", bson.D{{"x", 1}}}}, + Upsert: &upsert, + }) + result, err := session.WithTransaction(context.Background(), func(ctx mongo.SessionContext) (interface{}, error) { + return mt.Client.BulkWrite(ctx, models, options.ClientBulkWrite().SetVerboseResults(true)) + }) + require.NoError(mt, err, "BulkWrite error: %v", err) + cbwResult, ok := result.(*mongo.ClientBulkWriteResult) + require.True(mt, ok, "expected a ClientBulkWriteResult, got %T", result) + assert.Equal(mt, int64(2), cbwResult.UpsertedCount, "expected InsertedCount: %d, got: %d", 2, cbwResult.UpsertedCount) + assert.Len(mt, cbwResult.UpdateResults, 2, "expected %d UpdateResults, got: %d", 2, len(cbwResult.UpdateResults)) + assert.Equal(mt, 1, getMoreCalled, "expected %d getMore call, got: %d", 1, getMoreCalled) + }) + + mt.Run("bulkWrite handles a getMore error", func(mt *mtest.T) { + var getMoreCalled int + var killCursorsCalled int + monitor := &event.CommandMonitor{ + Started: func(_ context.Context, e *event.CommandStartedEvent) { + switch e.CommandName { + case "getMore": + getMoreCalled++ + case "killCursors": + killCursorsCalled++ + } + }, + } + mt.ResetClient(options.Client().SetMonitor(monitor)) + var hello struct { + MaxBsonObjectSize int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + + mt.SetFailPoint(mtest.FailPoint{ + ConfigureFailPoint: "failCommand", + Mode: mtest.FailPointMode{ + Times: 1, + }, + Data: mtest.FailPointData{ + FailCommands: []string{"getMore"}, + ErrorCode: 8, + }, + }) + + coll := mt.CreateCollection(mtest.Collection{DB: "db", Name: "coll"}, false) + err = coll.Drop(context.Background()) + require.NoError(mt, err, "Drop error: %v", err) + + upsert := true + models := (&mongo.ClientWriteModels{}). + AppendUpdateOne("db", "coll", &mongo.ClientUpdateOneModel{ + Filter: bson.D{{"_id", strings.Repeat("a", hello.MaxBsonObjectSize/2)}}, + Update: bson.D{{"$set", bson.D{{"x", 1}}}}, + Upsert: &upsert, + }). + AppendUpdateOne("db", "coll", &mongo.ClientUpdateOneModel{ + Filter: bson.D{{"_id", strings.Repeat("b", hello.MaxBsonObjectSize/2)}}, + Update: bson.D{{"$set", bson.D{{"x", 1}}}}, + Upsert: &upsert, + }) + _, err = mt.Client.BulkWrite(context.Background(), models, options.ClientBulkWrite().SetVerboseResults(true)) + assert.Error(mt, err, "expected a BulkWrite error") + bwe, ok := err.(mongo.ClientBulkWriteException) + require.True(mt, ok, "expected a BulkWriteException, got %T: %v", err, err) + require.NotNil(mt, bwe.TopLevelError) + assert.Equal(mt, 8, bwe.TopLevelError.Code, "expected top level error code: %d, got; %d", 8, bwe.TopLevelError.Code) + require.NotNil(mt, bwe.PartialResult) + assert.Equal(mt, int64(2), bwe.PartialResult.UpsertedCount, "expected UpsertedCount: %d, got: %d", 2, bwe.PartialResult.UpsertedCount) + assert.Len(mt, bwe.PartialResult.UpdateResults, 1, "expected %d UpdateResults, got: %d", 1, len(bwe.PartialResult.UpdateResults)) + assert.Equal(mt, 1, getMoreCalled, "expected %d getMore call, got: %d", 1, getMoreCalled) + assert.Equal(mt, 1, killCursorsCalled, "expected %d killCursors call, got: %d", 1, killCursorsCalled) + }) + + mt.Run("bulkWrite returns error for unacknowledged too-large insert", func(mt *mtest.T) { + mt.ResetClient(options.Client()) + var hello struct { + MaxBsonObjectSize int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + mt.Run("insert", func(mt *mtest.T) { + models := (&mongo.ClientWriteModels{}). + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", strings.Repeat("b", hello.MaxBsonObjectSize)}}, + }) + _, err := mt.Client.BulkWrite(context.Background(), models, options.ClientBulkWrite().SetOrdered(false).SetWriteConcern(writeconcern.Unacknowledged())) + require.EqualError(mt, err, driver.ErrDocumentTooLarge.Error()) + }) + mt.Run("replace", func(mt *mtest.T) { + models := (&mongo.ClientWriteModels{}). + AppendReplaceOne("db", "coll", &mongo.ClientReplaceOneModel{ + Filter: bson.D{}, + Replacement: bson.D{{"a", strings.Repeat("b", hello.MaxBsonObjectSize)}}, + }) + _, err := mt.Client.BulkWrite(context.Background(), models, options.ClientBulkWrite().SetOrdered(false).SetWriteConcern(writeconcern.Unacknowledged())) + require.EqualError(mt, err, driver.ErrDocumentTooLarge.Error()) + }) + }) + + mt.Run("bulkWrite batch splits when the addition of a new namespace exceeds the maximum message size", func(mt *mtest.T) { + type cmd struct { + Ops []bson.D + NsInfo []struct { + Ns string + } + } + var bwCmd []cmd + monitor := &event.CommandMonitor{ + Started: func(_ context.Context, e *event.CommandStartedEvent) { + if e.CommandName == "bulkWrite" { + var c cmd + err := bson.Unmarshal(e.Command, &c) + require.NoError(mt, err, "Unmarshal error: %v", err) + bwCmd = append(bwCmd, c) + } + }, + } + mt.ResetClient(options.Client()) + var hello struct { + MaxBsonObjectSize int + MaxMessageSizeBytes int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + + newModels := func() (int, *mongo.ClientWriteModels) { + maxBsonObjectSize := hello.MaxBsonObjectSize + opsBytes := hello.MaxMessageSizeBytes - 1122 + numModels := opsBytes / maxBsonObjectSize + + models := &mongo.ClientWriteModels{} + n := numModels + for i := 0; i < n; i++ { + models. + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", strings.Repeat("b", maxBsonObjectSize-57)}}, + }) + } + if remainderBytes := opsBytes % maxBsonObjectSize; remainderBytes > 217 { + n++ + models. + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", strings.Repeat("b", remainderBytes-57)}}, + }) + } + return n, models + } + mt.Run("no batch-splitting required", func(mt *mtest.T) { + bwCmd = bwCmd[:0] + mt.ResetClient(options.Client().SetMonitor(monitor)) + + numModels, models := newModels() + models.AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", "b"}}, + }) + result, err := mt.Client.BulkWrite(context.Background(), models) + require.NoError(mt, err, "BulkWrite error: %v", err) + assert.Equal(mt, numModels+1, int(result.InsertedCount), "expected insertedCound: %d, got: %d", numModels+1, result.InsertedCount) + require.Len(mt, bwCmd, 1, "expected %d bulkWrite call, got: %d", 1, len(bwCmd)) + + assert.Len(mt, bwCmd[0].Ops, numModels+1, "expected %d ops, got: %d", numModels+1, len(bwCmd[0].Ops)) + require.Len(mt, bwCmd[0].NsInfo, 1, "expected %d nsInfo, got: %d", 1, len(bwCmd[0].NsInfo)) + assert.Equal(mt, "db.coll", bwCmd[0].NsInfo[0].Ns, "expected namespace: %s, got: %s", "db.coll", bwCmd[0].NsInfo[0].Ns) + }) + mt.Run("batch-splitting required", func(mt *mtest.T) { + bwCmd = bwCmd[:0] + mt.ResetClient(options.Client().SetMonitor(monitor)) + + coll := strings.Repeat("c", 200) + numModels, models := newModels() + models.AppendInsertOne("db", coll, &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", "b"}}, + }) + result, err := mt.Client.BulkWrite(context.Background(), models) + require.NoError(mt, err, "BulkWrite error: %v", err) + assert.Equal(mt, numModels+1, int(result.InsertedCount), "expected insertedCound: %d, got: %d", numModels+1, result.InsertedCount) + require.Len(mt, bwCmd, 2, "expected %d bulkWrite calls, got: %d", 2, len(bwCmd)) + + assert.Len(mt, bwCmd[0].Ops, numModels, "expected %d ops, got: %d", numModels, len(bwCmd[0].Ops)) + require.Len(mt, bwCmd[0].NsInfo, 1, "expected %d nsInfo, got: %d", 1, len(bwCmd[0].NsInfo)) + assert.Equal(mt, "db.coll", bwCmd[0].NsInfo[0].Ns, "expected namespace: %s, got: %s", "db.coll", bwCmd[0].NsInfo[0].Ns) + + assert.Len(mt, bwCmd[1].Ops, 1, "expected %d ops, got: %d", 1, len(bwCmd[1].Ops)) + require.Len(mt, bwCmd[1].NsInfo, 1, "expected %d nsInfo, got: %d", 1, len(bwCmd[1].NsInfo)) + assert.Equal(mt, "db."+coll, bwCmd[1].NsInfo[0].Ns, "expected namespace: %s, got: %s", "db."+coll, bwCmd[1].NsInfo[0].Ns) + }) + }) + + mt.Run("bulkWrite returns an error if no operations can be added to ops", func(mt *mtest.T) { + mt.ResetClient(options.Client()) + var hello struct { + MaxMessageSizeBytes int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + mt.Run("document too large", func(mt *mtest.T) { + models := (&mongo.ClientWriteModels{}). + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", strings.Repeat("b", hello.MaxMessageSizeBytes)}}, + }) + _, err := mt.Client.BulkWrite(context.Background(), models) + require.EqualError(mt, err, driver.ErrDocumentTooLarge.Error()) + }) + mt.Run("namespace too large", func(mt *mtest.T) { + models := (&mongo.ClientWriteModels{}). + AppendInsertOne("db", strings.Repeat("c", hello.MaxMessageSizeBytes), &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", "b"}}, + }) + _, err := mt.Client.BulkWrite(context.Background(), models) + require.EqualError(mt, err, driver.ErrDocumentTooLarge.Error()) + }) + }) + + mt.Run("bulkWrite returns an error if auto-encryption is configured", func(mt *mtest.T) { + if os.Getenv("DOCKER_RUNNING") != "" { + mt.Skip("skipping test in docker environment") + } + + autoEncryptionOpts := options.AutoEncryption(). + SetKeyVaultNamespace("db.coll"). + SetKmsProviders(map[string]map[string]interface{}{ + "aws": { + "accessKeyId": "foo", + "secretAccessKey": "bar", + }, + }) + mt.ResetClient(options.Client().SetAutoEncryptionOptions(autoEncryptionOpts)) + models := (&mongo.ClientWriteModels{}). + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", "b"}}, + }) + _, err := mt.Client.BulkWrite(context.Background(), models) + require.ErrorContains(mt, err, "bulkWrite does not currently support automatic encryption") + }) + + mt.Run("bulkWrite with unacknowledged write concern uses w:0 for all batches", func(mt *mtest.T) { + type cmd struct { + Ops []bson.D + WriteConcern struct { + W interface{} + } + } + var bwCmd []cmd + monitor := &event.CommandMonitor{ + Started: func(_ context.Context, e *event.CommandStartedEvent) { + if e.CommandName == "bulkWrite" { + var c cmd + err := bson.Unmarshal(e.Command, &c) + require.NoError(mt, err, "Unmarshal error: %v", err) + + bwCmd = append(bwCmd, c) + } + }, + } + mt.ResetClient(options.Client().SetMonitor(monitor)) + var hello struct { + MaxBsonObjectSize int + MaxMessageSizeBytes int + } + err := mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + + coll := mt.CreateCollection(mtest.Collection{DB: "db", Name: "coll"}, false) + err = coll.Drop(context.Background()) + require.NoError(mt, err, "Drop error: %v", err) + + numModels := hello.MaxMessageSizeBytes/hello.MaxBsonObjectSize + 1 + models := &mongo.ClientWriteModels{} + for i := 0; i < numModels; i++ { + models. + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", strings.Repeat("b", hello.MaxBsonObjectSize-500)}}, + }) + } + result, err := mt.Client.BulkWrite(context.Background(), models, options.ClientBulkWrite().SetOrdered(false).SetWriteConcern(writeconcern.Unacknowledged())) + require.NoError(mt, err, "BulkWrite error: %v", err) + assert.Nil(mt, result, "expected a nil result, got: %v", result) + require.Len(mt, bwCmd, 2, "expected %d bulkWrite calls, got: %d", 2, len(bwCmd)) + + assert.Len(mt, bwCmd[0].Ops, numModels-1, "expected %d ops, got: %d", numModels-1, len(bwCmd[0].Ops)) + assert.Equal(mt, int32(0), bwCmd[0].WriteConcern.W, "expected writeConcern: %d, got: %v", 0, bwCmd[0].WriteConcern.W) + + assert.Len(mt, bwCmd[1].Ops, 1, "expected %d ops, got: %d", 1, len(bwCmd[1].Ops)) + assert.Equal(mt, int32(0), bwCmd[1].WriteConcern.W, "expected writeConcern: %d, got: %v", 0, bwCmd[1].WriteConcern.W) + + n, err := coll.CountDocuments(context.Background(), bson.D{}) + require.NoError(mt, err, "CountDocuments error: %v", err) + assert.Equal(mt, numModels, int(n), "expected %d documents, got: %d", numModels, n) + }) +} diff --git a/mongo/integration/csot_prose_test.go b/mongo/integration/csot_prose_test.go index c8ddfd68df..775bd7037b 100644 --- a/mongo/integration/csot_prose_test.go +++ b/mongo/integration/csot_prose_test.go @@ -16,6 +16,7 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/internal/assert" "go.mongodb.org/mongo-driver/internal/integtest" + "go.mongodb.org/mongo-driver/internal/require" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/integration/mtest" "go.mongodb.org/mongo-driver/mongo/options" @@ -160,4 +161,57 @@ func TestCSOTProse(t *testing.T) { "expected ping to fail within 150ms") }) }) + mt.RunOpts("11. multi-batch bulkWrites", mtest.NewOptions().MinServerVersion("8.0"). + AtlasDataLake(false).Topologies(mtest.Single), func(mt *mtest.T) { + coll := mt.CreateCollection(mtest.Collection{DB: "db", Name: "coll"}, false) + err := coll.Drop(context.Background()) + require.NoError(mt, err, "Drop error: %v", err) + + mt.SetFailPoint(mtest.FailPoint{ + ConfigureFailPoint: "failCommand", + Mode: mtest.FailPointMode{ + Times: 2, + }, + Data: mtest.FailPointData{ + FailCommands: []string{"bulkWrite"}, + BlockConnection: true, + BlockTimeMS: 1010, + }, + }) + + var hello struct { + MaxBsonObjectSize int + MaxMessageSizeBytes int + } + err = mt.DB.RunCommand(context.Background(), bson.D{{"hello", 1}}).Decode(&hello) + require.NoError(mt, err, "Hello error: %v", err) + + models := &mongo.ClientWriteModels{} + n := hello.MaxMessageSizeBytes/hello.MaxBsonObjectSize + 1 + for i := 0; i < n; i++ { + models. + AppendInsertOne("db", "coll", &mongo.ClientInsertOneModel{ + Document: bson.D{{"a", strings.Repeat("b", hello.MaxBsonObjectSize-500)}}, + }) + } + + var cnt int + cm := &event.CommandMonitor{ + Started: func(_ context.Context, evt *event.CommandStartedEvent) { + if evt.CommandName == "bulkWrite" { + cnt++ + } + }, + } + cliOptions := options.Client(). + SetTimeout(2 * time.Second). + SetMonitor(cm). + ApplyURI(mtest.ClusterURI()) + integtest.AddTestServerAPIVersion(cliOptions) + cli, err := mongo.Connect(context.Background(), cliOptions) + require.NoError(mt, err, "Connect error: %v", err) + _, err = cli.BulkWrite(context.Background(), models) + assert.ErrorContains(mt, err, "context deadline exceeded", "expected a timeout error, got: %v", err) + assert.Equal(mt, 2, cnt, "expected bulkWrite calls: %d, got: %d", 2, cnt) + }) } diff --git a/mongo/integration/unified/client_operation_execution.go b/mongo/integration/unified/client_operation_execution.go index 5a69e77b1e..6e18af44c9 100644 --- a/mongo/integration/unified/client_operation_execution.go +++ b/mongo/integration/unified/client_operation_execution.go @@ -8,9 +8,13 @@ package unified import ( "context" + "errors" "fmt" + "strconv" + "strings" "time" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/internal/bsonutil" "go.mongodb.org/mongo-driver/mongo" @@ -167,3 +171,329 @@ func executeListDatabases(ctx context.Context, operation *operation, nameOnly bo Build() return newDocumentResult(raw, nil), nil } + +func executeClientBulkWrite(ctx context.Context, operation *operation) (*operationResult, error) { + client, err := entities(ctx).client(operation.Object) + if err != nil { + return nil, err + } + + wirteModels := &mongo.ClientWriteModels{} + opts := options.ClientBulkWrite() + + elems, err := operation.Arguments.Elements() + if err != nil { + return nil, err + } + for _, elem := range elems { + key := elem.Key() + val := elem.Value() + + switch key { + case "models": + models, err := val.Array().Values() + if err != nil { + return nil, err + } + for _, m := range models { + model := m.Document().Index(0) + err = appendClientBulkWriteModel(model.Key(), model.Value().Document(), wirteModels) + if err != nil { + return nil, err + } + } + case "bypassDocumentValidation": + opts.SetBypassDocumentValidation(val.Boolean()) + case "comment": + opts.SetComment(val) + case "let": + opts.SetLet(val.Document()) + case "ordered": + opts.SetOrdered(val.Boolean()) + case "verboseResults": + opts.SetVerboseResults(val.Boolean()) + case "writeConcern": + var wc writeConcern + err := bson.Unmarshal(val.Value, &wc) + if err != nil { + return nil, err + } + c, err := wc.toWriteConcernOption() + if err != nil { + return nil, err + } + opts.SetWriteConcern(c) + default: + return nil, fmt.Errorf("unrecognized bulkWrite option %q", key) + } + } + + res, err := client.BulkWrite(ctx, wirteModels, opts) + if res == nil { + var bwe mongo.ClientBulkWriteException + if !errors.As(err, &bwe) || bwe.PartialResult == nil { + return newDocumentResult(emptyCoreDocument, err), nil + } + res = bwe.PartialResult + } + rawBuilder := bsoncore.NewDocumentBuilder(). + AppendInt64("deletedCount", res.DeletedCount). + AppendInt64("insertedCount", res.InsertedCount). + AppendInt64("matchedCount", res.MatchedCount). + AppendInt64("modifiedCount", res.ModifiedCount). + AppendInt64("upsertedCount", res.UpsertedCount) + + var resBuilder *bsoncore.DocumentBuilder + + resBuilder = bsoncore.NewDocumentBuilder() + for k, v := range res.DeleteResults { + resBuilder.AppendDocument(strconv.Itoa(k), + bsoncore.NewDocumentBuilder(). + AppendInt64("deletedCount", v.DeletedCount). + Build(), + ) + } + rawBuilder.AppendDocument("deleteResults", resBuilder.Build()) + + resBuilder = bsoncore.NewDocumentBuilder() + for k, v := range res.InsertResults { + t, d, err := bson.MarshalValue(v.InsertedID) + if err != nil { + return nil, err + } + resBuilder.AppendDocument(strconv.Itoa(k), + bsoncore.NewDocumentBuilder(). + AppendValue("insertedId", bsoncore.Value{Type: t, Data: d}). + Build(), + ) + } + rawBuilder.AppendDocument("insertResults", resBuilder.Build()) + + resBuilder = bsoncore.NewDocumentBuilder() + for k, v := range res.UpdateResults { + b := bsoncore.NewDocumentBuilder(). + AppendInt64("matchedCount", v.MatchedCount). + AppendInt64("modifiedCount", v.ModifiedCount) + if v.UpsertedID != nil { + t, d, err := bson.MarshalValue(v.UpsertedID) + if err != nil { + return nil, err + } + b.AppendValue("upsertedId", bsoncore.Value{Type: t, Data: d}) + } + resBuilder.AppendDocument(strconv.Itoa(k), b.Build()) + } + rawBuilder.AppendDocument("updateResults", resBuilder.Build()) + + return newDocumentResult(rawBuilder.Build(), err), nil +} + +func appendClientBulkWriteModel(key string, value bson.Raw, model *mongo.ClientWriteModels) error { + switch key { + case "insertOne": + namespace, m, err := createClientInsertOneModel(value) + if err != nil { + return err + } + ns := strings.SplitN(namespace, ".", 2) + model.AppendInsertOne(ns[0], ns[1], m) + case "updateOne": + namespace, m, err := createClientUpdateOneModel(value) + if err != nil { + return err + } + ns := strings.SplitN(namespace, ".", 2) + model.AppendUpdateOne(ns[0], ns[1], m) + case "updateMany": + namespace, m, err := createClientUpdateManyModel(value) + if err != nil { + return err + } + ns := strings.SplitN(namespace, ".", 2) + model.AppendUpdateMany(ns[0], ns[1], m) + case "replaceOne": + namespace, m, err := createClientReplaceOneModel(value) + if err != nil { + return err + } + ns := strings.SplitN(namespace, ".", 2) + model.AppendReplaceOne(ns[0], ns[1], m) + case "deleteOne": + namespace, m, err := createClientDeleteOneModel(value) + if err != nil { + return err + } + ns := strings.SplitN(namespace, ".", 2) + model.AppendDeleteOne(ns[0], ns[1], m) + case "deleteMany": + namespace, m, err := createClientDeleteManyModel(value) + if err != nil { + return err + } + ns := strings.SplitN(namespace, ".", 2) + model.AppendDeleteMany(ns[0], ns[1], m) + } + return nil +} + +func createClientInsertOneModel(value bson.Raw) (string, *mongo.ClientInsertOneModel, error) { + var v struct { + Namespace string + Document bson.Raw + } + err := bson.Unmarshal(value, &v) + if err != nil { + return "", nil, err + } + return v.Namespace, &mongo.ClientInsertOneModel{ + Document: v.Document, + }, nil +} + +func createClientUpdateOneModel(value bson.Raw) (string, *mongo.ClientUpdateOneModel, error) { + var v struct { + Namespace string + Filter bson.Raw + Update interface{} + ArrayFilters []interface{} + Collation *options.Collation + Hint *bson.RawValue + Upsert *bool + } + err := bson.Unmarshal(value, &v) + if err != nil { + return "", nil, err + } + var hint interface{} + if v.Hint != nil { + hint, err = createHint(*v.Hint) + if err != nil { + return "", nil, err + } + } + model := &mongo.ClientUpdateOneModel{ + Filter: v.Filter, + Update: v.Update, + Collation: v.Collation, + Hint: hint, + Upsert: v.Upsert, + } + if len(v.ArrayFilters) > 0 { + model.ArrayFilters = &options.ArrayFilters{Filters: v.ArrayFilters} + } + return v.Namespace, model, nil + +} + +func createClientUpdateManyModel(value bson.Raw) (string, *mongo.ClientUpdateManyModel, error) { + var v struct { + Namespace string + Filter bson.Raw + Update interface{} + ArrayFilters []interface{} + Collation *options.Collation + Hint *bson.RawValue + Upsert *bool + } + err := bson.Unmarshal(value, &v) + if err != nil { + return "", nil, err + } + var hint interface{} + if v.Hint != nil { + hint, err = createHint(*v.Hint) + if err != nil { + return "", nil, err + } + } + model := &mongo.ClientUpdateManyModel{ + Filter: v.Filter, + Update: v.Update, + Collation: v.Collation, + Hint: hint, + Upsert: v.Upsert, + } + if len(v.ArrayFilters) > 0 { + model.ArrayFilters = &options.ArrayFilters{Filters: v.ArrayFilters} + } + return v.Namespace, model, nil +} + +func createClientReplaceOneModel(value bson.Raw) (string, *mongo.ClientReplaceOneModel, error) { + var v struct { + Namespace string + Filter bson.Raw + Replacement bson.Raw + Collation *options.Collation + Hint *bson.RawValue + Upsert *bool + } + err := bson.Unmarshal(value, &v) + if err != nil { + return "", nil, err + } + var hint interface{} + if v.Hint != nil { + hint, err = createHint(*v.Hint) + if err != nil { + return "", nil, err + } + } + return v.Namespace, &mongo.ClientReplaceOneModel{ + Filter: v.Filter, + Replacement: v.Replacement, + Collation: v.Collation, + Hint: hint, + Upsert: v.Upsert, + }, nil +} + +func createClientDeleteOneModel(value bson.Raw) (string, *mongo.ClientDeleteOneModel, error) { + var v struct { + Namespace string + Filter bson.Raw + Collation *options.Collation + Hint *bson.RawValue + } + err := bson.Unmarshal(value, &v) + if err != nil { + return "", nil, err + } + var hint interface{} + if v.Hint != nil { + hint, err = createHint(*v.Hint) + if err != nil { + return "", nil, err + } + } + return v.Namespace, &mongo.ClientDeleteOneModel{ + Filter: v.Filter, + Collation: v.Collation, + Hint: hint, + }, nil +} + +func createClientDeleteManyModel(value bson.Raw) (string, *mongo.ClientDeleteManyModel, error) { + var v struct { + Namespace string + Filter bson.Raw + Collation *options.Collation + Hint *bson.RawValue + } + err := bson.Unmarshal(value, &v) + if err != nil { + return "", nil, err + } + var hint interface{} + if v.Hint != nil { + hint, err = createHint(*v.Hint) + if err != nil { + return "", nil, err + } + } + return v.Namespace, &mongo.ClientDeleteManyModel{ + Filter: v.Filter, + Collation: v.Collation, + Hint: hint, + }, nil +} diff --git a/mongo/integration/unified/error.go b/mongo/integration/unified/error.go index 0edc79428a..66b357f58d 100644 --- a/mongo/integration/unified/error.go +++ b/mongo/integration/unified/error.go @@ -19,15 +19,22 @@ import ( // expectedError represents an error that is expected to occur during a test. This type ignores the "isError" field in // test files because it is always true if it is specified, so the runner can simply assert that an error occurred. type expectedError struct { - IsClientError *bool `bson:"isClientError"` - IsTimeoutError *bool `bson:"isTimeoutError"` - ErrorSubstring *string `bson:"errorContains"` - Code *int32 `bson:"errorCode"` - CodeName *string `bson:"errorCodeName"` - IncludedLabels []string `bson:"errorLabelsContain"` - OmittedLabels []string `bson:"errorLabelsOmit"` - ExpectedResult *bson.RawValue `bson:"expectResult"` - ErrorResponse *bson.Raw `bson:"errorResponse"` + IsClientError *bool `bson:"isClientError"` + IsTimeoutError *bool `bson:"isTimeoutError"` + ErrorSubstring *string `bson:"errorContains"` + Code *int32 `bson:"errorCode"` + CodeName *string `bson:"errorCodeName"` + IncludedLabels []string `bson:"errorLabelsContain"` + OmittedLabels []string `bson:"errorLabelsOmit"` + ExpectedResult *bson.RawValue `bson:"expectResult"` + ErrorResponse *bson.Raw `bson:"errorResponse"` + WriteErrors map[int]clientBulkWriteException `bson:"writeErrors"` + WriteConcernErrors []clientBulkWriteException `bson:"writeConcernErrors"` +} + +type clientBulkWriteException struct { + Code *int `bson:"code"` + Message *string `bson:"message"` } // verifyOperationError compares the expected error to the actual operation result. If the expected parameter is nil, @@ -140,6 +147,40 @@ func verifyOperationError(ctx context.Context, expected *expectedError, result * return fmt.Errorf("error response comparison error: %w", err) } } + if expected.WriteErrors != nil { + var exception mongo.ClientBulkWriteException + if !errors.As(result.Err, &exception) { + return fmt.Errorf("expected a ClientBulkWriteException, got %T: %v", result.Err, result.Err) + } + if len(expected.WriteErrors) != len(exception.WriteErrors) { + return fmt.Errorf("expected errors: %v, got: %v", expected.WriteErrors, exception.WriteErrors) + } + for k, e := range expected.WriteErrors { + if e.Code != nil && *e.Code != exception.WriteErrors[k].Code { + return fmt.Errorf("expected errors: %v, got: %v", expected.WriteConcernErrors, exception.WriteConcernErrors) + } + if e.Message != nil && *e.Message != exception.WriteErrors[k].Message { + return fmt.Errorf("expected errors: %v, got: %v", expected.WriteConcernErrors, exception.WriteConcernErrors) + } + } + } + if expected.WriteConcernErrors != nil { + var exception mongo.ClientBulkWriteException + if !errors.As(result.Err, &exception) { + return fmt.Errorf("expected a ClientBulkWriteException, got %T: %v", result.Err, result.Err) + } + if len(expected.WriteConcernErrors) != len(exception.WriteConcernErrors) { + return fmt.Errorf("expected errors: %v, got: %v", expected.WriteConcernErrors, exception.WriteConcernErrors) + } + for i, e := range expected.WriteConcernErrors { + if e.Code != nil && *e.Code != exception.WriteConcernErrors[i].Code { + return fmt.Errorf("expected errors: %v, got: %v", expected.WriteConcernErrors, exception.WriteConcernErrors) + } + if e.Message != nil && *e.Message != exception.WriteConcernErrors[i].Message { + return fmt.Errorf("expected errors: %v, got: %v", expected.WriteConcernErrors, exception.WriteConcernErrors) + } + } + } return nil } @@ -182,6 +223,11 @@ func extractErrorDetails(err error) (errorDetails, bool) { details.raw = we.Raw } details.labels = converted.Labels + case mongo.ClientBulkWriteException: + if converted.TopLevelError != nil { + details.raw = converted.TopLevelError.Raw + details.codes = append(details.codes, int32(converted.TopLevelError.Code)) + } default: return errorDetails{}, false } diff --git a/mongo/integration/unified/operation.go b/mongo/integration/unified/operation.go index 59aa36ae8c..dc0bbcbb62 100644 --- a/mongo/integration/unified/operation.go +++ b/mongo/integration/unified/operation.go @@ -126,6 +126,8 @@ func (op *operation) run(ctx context.Context, loopDone <-chan struct{}) (*operat return executeListDatabases(ctx, op, false) case "listDatabaseNames": return executeListDatabases(ctx, op, true) + case "clientBulkWrite": + return executeClientBulkWrite(ctx, op) // Database operations case "createCollection": diff --git a/mongo/integration/unified/schema_version.go b/mongo/integration/unified/schema_version.go index 9aec89a18d..c3d02068a3 100644 --- a/mongo/integration/unified/schema_version.go +++ b/mongo/integration/unified/schema_version.go @@ -16,7 +16,7 @@ import ( var ( supportedSchemaVersions = map[int]string{ - 1: "1.17", + 1: "1.21", } ) diff --git a/mongo/options/clientbulkwriteoptions.go b/mongo/options/clientbulkwriteoptions.go new file mode 100644 index 0000000000..7c460e47ce --- /dev/null +++ b/mongo/options/clientbulkwriteoptions.go @@ -0,0 +1,116 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package options + +import ( + "go.mongodb.org/mongo-driver/mongo/writeconcern" +) + +// ClientBulkWriteOptions represents options that can be used to configure a client-level BulkWrite operation. +type ClientBulkWriteOptions struct { + // If true, writes executed as part of the operation will opt out of document-level validation on the server. The + // default value is false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information + // about document validation. + BypassDocumentValidation *bool + + // A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace + // the operation. The default value is nil, which means that no comment will be included in the logs. + Comment interface{} + + // If true, no writes will be executed after one fails. The default value is true. + Ordered *bool + + // Specifies parameters for all update and delete commands in the BulkWrite. This must be a document mapping + // parameter names to values. Values must be constant or closed expressions that do not reference document fields. + // Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} + + // The write concern to use for this bulk write. + WriteConcern *writeconcern.WriteConcern + + // Whether detailed results for each successful operation should be included in the returned BulkWriteResult. + VerboseResults *bool +} + +// ClientBulkWrite creates a new *ClientBulkWriteOptions instance. +func ClientBulkWrite() *ClientBulkWriteOptions { + return &ClientBulkWriteOptions{ + Ordered: &DefaultOrdered, + } +} + +// SetComment sets the value for the Comment field. +func (b *ClientBulkWriteOptions) SetComment(comment interface{}) *ClientBulkWriteOptions { + b.Comment = comment + return b +} + +// SetOrdered sets the value for the Ordered field. +func (b *ClientBulkWriteOptions) SetOrdered(ordered bool) *ClientBulkWriteOptions { + b.Ordered = &ordered + return b +} + +// SetBypassDocumentValidation sets the value for the BypassDocumentValidation field. +func (b *ClientBulkWriteOptions) SetBypassDocumentValidation(bypass bool) *ClientBulkWriteOptions { + b.BypassDocumentValidation = &bypass + return b +} + +// SetLet sets the value for the Let field. Let specifies parameters for all update and delete commands in the ClientBulkWrite. +// This must be a document mapping parameter names to values. Values must be constant or closed expressions that do not +// reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). +func (b *ClientBulkWriteOptions) SetLet(let interface{}) *ClientBulkWriteOptions { + b.Let = &let + return b +} + +// SetWriteConcern sets the value for the WriteConcern field. +func (b *ClientBulkWriteOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *ClientBulkWriteOptions { + b.WriteConcern = wc + return b +} + +// SetVerboseResults sets the value for the VerboseResults field. +func (b *ClientBulkWriteOptions) SetVerboseResults(verboseResults bool) *ClientBulkWriteOptions { + b.VerboseResults = &verboseResults + return b +} + +// MergeClientBulkWriteOptions combines the given ClientBulkWriteOptions instances into a single +// ClientBulkWriteOptions in a last-one-wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeClientBulkWriteOptions(opts ...*ClientBulkWriteOptions) *ClientBulkWriteOptions { + b := ClientBulkWrite() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.Comment != nil { + b.Comment = opt.Comment + } + if opt.Ordered != nil { + b.Ordered = opt.Ordered + } + if opt.BypassDocumentValidation != nil { + b.BypassDocumentValidation = opt.BypassDocumentValidation + } + if opt.Let != nil { + b.Let = opt.Let + } + if opt.WriteConcern != nil { + b.WriteConcern = opt.WriteConcern + } + if opt.VerboseResults != nil { + b.VerboseResults = opt.VerboseResults + } + } + + return b +} diff --git a/mongo/results.go b/mongo/results.go index 2dbaf2af62..f7658306f5 100644 --- a/mongo/results.go +++ b/mongo/results.go @@ -14,6 +14,51 @@ import ( "go.mongodb.org/mongo-driver/x/mongo/driver/operation" ) +// ClientBulkWriteResult is the result type returned by a client-level BulkWrite operation. +type ClientBulkWriteResult struct { + // The number of documents inserted. + InsertedCount int64 + + // The number of documents matched by filters in update and replace operations. + MatchedCount int64 + + // The number of documents modified by update and replace operations. + ModifiedCount int64 + + // The number of documents deleted. + DeletedCount int64 + + // The number of documents upserted by update and replace operations. + UpsertedCount int64 + + // A map of operation index to the _id of each inserted document. + InsertResults map[int]ClientInsertResult + + // A map of operation index to the _id of each updated document. + UpdateResults map[int]ClientUpdateResult + + // A map of operation index to the _id of each deleted document. + DeleteResults map[int]ClientDeleteResult +} + +// ClientInsertResult is the result type returned by a client-level bulk write of InsertOne operation. +type ClientInsertResult struct { + // The _id of the inserted document. A value generated by the driver will be of type primitive.ObjectID. + InsertedID interface{} +} + +// ClientUpdateResult is the result type returned from a client-level bulk write of UpdateOne, UpdateMany, and ReplaceOne operation. +type ClientUpdateResult struct { + MatchedCount int64 // The number of documents matched by the filter. + ModifiedCount int64 // The number of documents modified by the operation. + UpsertedID interface{} // The _id field of the upserted document, or nil if no upsert was done. +} + +// ClientDeleteResult is the result type returned by a client-level bulk write DeleteOne and DeleteMany operation. +type ClientDeleteResult struct { + DeletedCount int64 // The number of documents deleted. +} + // BulkWriteResult is the result type returned by a BulkWrite operation. type BulkWriteResult struct { // The number of documents inserted. diff --git a/testdata/command-monitoring/unacknowledged-client-bulkWrite.json b/testdata/command-monitoring/unacknowledged-client-bulkWrite.json new file mode 100644 index 0000000000..61bb00726c --- /dev/null +++ b/testdata/command-monitoring/unacknowledged-client-bulkWrite.json @@ -0,0 +1,220 @@ +{ + "description": "unacknowledged-client-bulkWrite", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ], + "uriOptions": { + "w": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "tests": [ + { + "description": "A successful mixed client bulkWrite", + "operations": [ + { + "object": "client", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "command-monitoring-tests.test", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "command-monitoring-tests.test", + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ], + "ordered": false + }, + "expectResult": { + "insertedCount": { + "$$unsetOrMatches": 0 + }, + "upsertedCount": { + "$$unsetOrMatches": 0 + }, + "matchedCount": { + "$$unsetOrMatches": 0 + }, + "modifiedCount": { + "$$unsetOrMatches": 0 + }, + "deletedCount": { + "$$unsetOrMatches": 0 + }, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 3 + }, + "updateMods": { + "$set": { + "x": 333 + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "command-monitoring-tests.test" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite", + "reply": { + "ok": 1, + "nInserted": { + "$$exists": false + }, + "nMatched": { + "$$exists": false + }, + "nModified": { + "$$exists": false + }, + "nUpserted": { + "$$exists": false + }, + "nDeleted": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/testdata/command-monitoring/unacknowledged-client-bulkWrite.yml b/testdata/command-monitoring/unacknowledged-client-bulkWrite.yml new file mode 100644 index 0000000000..2d54525953 --- /dev/null +++ b/testdata/command-monitoring/unacknowledged-client-bulkWrite.yml @@ -0,0 +1,111 @@ +description: "unacknowledged-client-bulkWrite" + +schemaVersion: "1.7" + +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - commandSucceededEvent + - commandFailedEvent + uriOptions: + w: 0 + - database: + id: &database database + client: *client + databaseName: &databaseName command-monitoring-tests + - collection: + id: &collection collection + database: *database + collectionName: &collectionName test + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "command-monitoring-tests.test" + +tests: + - description: 'A successful mixed client bulkWrite' + operations: + - object: *client + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 3 } + update: { $set: { x: 333 } } + ordered: false + expectResult: + insertedCount: + $$unsetOrMatches: 0 + upsertedCount: + $$unsetOrMatches: 0 + matchedCount: + $$unsetOrMatches: 0 + modifiedCount: + $$unsetOrMatches: 0 + deletedCount: + $$unsetOrMatches: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + # Force completion of the w:0 write by executing a find on the same connection + - object: *collection + name: find + arguments: + filter: {} + expectResult: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 333 } + - { _id: 4, x: 44 } + + expectEvents: + - + client: *client + ignoreExtraEvents: true + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: false + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 3 } + updateMods: { $set: { x: 333 } } + multi: false + nsInfo: + - ns: *namespace + - commandSucceededEvent: + commandName: bulkWrite + reply: + ok: 1 + nInserted: { $$exists: false } + nMatched: { $$exists: false } + nModified: { $$exists: false } + nUpserted: { $$exists: false } + nDeleted: { $$exists: false } diff --git a/testdata/command-monitoring/unacknowledgedBulkWrite.json b/testdata/command-monitoring/unacknowledgedBulkWrite.json index 4c16d6df11..61bb00726c 100644 --- a/testdata/command-monitoring/unacknowledgedBulkWrite.json +++ b/testdata/command-monitoring/unacknowledgedBulkWrite.json @@ -1,15 +1,25 @@ { - "description": "unacknowledgedBulkWrite", - "schemaVersion": "1.0", + "description": "unacknowledged-client-bulkWrite", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], "createEntities": [ { "client": { "id": "client", + "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent", "commandSucceededEvent", "commandFailedEvent" - ] + ], + "uriOptions": { + "w": 0 + } } }, { @@ -23,12 +33,7 @@ "collection": { "id": "collection", "database": "database", - "collectionName": "test", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } + "collectionName": "test" } } ], @@ -40,64 +45,171 @@ { "_id": 1, "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 } ] } ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, "tests": [ { - "description": "A successful unordered bulk write with an unacknowledged write concern", + "description": "A successful mixed client bulkWrite", "operations": [ { - "name": "bulkWrite", - "object": "collection", + "object": "client", + "name": "clientBulkWrite", "arguments": { - "requests": [ + "models": [ { "insertOne": { + "namespace": "command-monitoring-tests.test", "document": { - "_id": "unorderedBulkWriteInsertW0", + "_id": 4, "x": 44 } } + }, + { + "updateOne": { + "namespace": "command-monitoring-tests.test", + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } } ], "ordered": false + }, + "expectResult": { + "insertedCount": { + "$$unsetOrMatches": 0 + }, + "upsertedCount": { + "$$unsetOrMatches": 0 + }, + "matchedCount": { + "$$unsetOrMatches": 0 + }, + "modifiedCount": { + "$$unsetOrMatches": 0 + }, + "deletedCount": { + "$$unsetOrMatches": 0 + }, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } } + }, + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 44 + } + ] } ], "expectEvents": [ { "client": "client", + "ignoreExtraEvents": true, "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { - "insert": "test", - "documents": [ + "bulkWrite": 1, + "errorsOnly": true, + "ordered": false, + "ops": [ { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 3 + }, + "updateMods": { + "$set": { + "x": 333 + } + }, + "multi": false } ], - "ordered": false, - "writeConcern": { - "w": 0 - } - }, - "commandName": "insert", - "databaseName": "command-monitoring-tests" + "nsInfo": [ + { + "ns": "command-monitoring-tests.test" + } + ] + } } }, { "commandSucceededEvent": { + "commandName": "bulkWrite", "reply": { "ok": 1, - "n": { + "nInserted": { + "$$exists": false + }, + "nMatched": { + "$$exists": false + }, + "nModified": { + "$$exists": false + }, + "nUpserted": { + "$$exists": false + }, + "nDeleted": { "$$exists": false } - }, - "commandName": "insert" + } } } ] diff --git a/testdata/command-monitoring/unacknowledgedBulkWrite.yml b/testdata/command-monitoring/unacknowledgedBulkWrite.yml index d7c8ce0d0c..2d54525953 100644 --- a/testdata/command-monitoring/unacknowledgedBulkWrite.yml +++ b/testdata/command-monitoring/unacknowledgedBulkWrite.yml @@ -1,14 +1,21 @@ -description: "unacknowledgedBulkWrite" +description: "unacknowledged-client-bulkWrite" -schemaVersion: "1.0" +schemaVersion: "1.7" + +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid createEntities: - client: id: &client client + useMultipleMongoses: false observeEvents: - commandStartedEvent - commandSucceededEvent - commandFailedEvent + uriOptions: + w: 0 - database: id: &database database client: *client @@ -17,39 +24,88 @@ createEntities: id: &collection collection database: *database collectionName: &collectionName test - collectionOptions: - writeConcern: { w: 0 } initialData: - collectionName: *collectionName databaseName: *databaseName documents: - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "command-monitoring-tests.test" tests: - - description: "A successful unordered bulk write with an unacknowledged write concern" + - description: 'A successful mixed client bulkWrite' operations: - - name: bulkWrite - object: *collection + - object: *client + name: clientBulkWrite arguments: - requests: + models: - insertOne: - document: { _id: "unorderedBulkWriteInsertW0", x: 44 } + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 3 } + update: { $set: { x: 333 } } ordered: false + expectResult: + insertedCount: + $$unsetOrMatches: 0 + upsertedCount: + $$unsetOrMatches: 0 + matchedCount: + $$unsetOrMatches: 0 + modifiedCount: + $$unsetOrMatches: 0 + deletedCount: + $$unsetOrMatches: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + # Force completion of the w:0 write by executing a find on the same connection + - object: *collection + name: find + arguments: + filter: {} + expectResult: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 333 } + - { _id: 4, x: 44 } + expectEvents: - - client: *client + - + client: *client + ignoreExtraEvents: true events: - commandStartedEvent: + commandName: bulkWrite + databaseName: admin command: - insert: *collectionName - documents: - - { _id: "unorderedBulkWriteInsertW0", x: 44 } + bulkWrite: 1 + errorsOnly: true ordered: false - writeConcern: { w: 0 } - commandName: insert - databaseName: *databaseName + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 3 } + updateMods: { $set: { x: 333 } } + multi: false + nsInfo: + - ns: *namespace - commandSucceededEvent: + commandName: bulkWrite reply: ok: 1 - n: { $$exists: false } - commandName: insert + nInserted: { $$exists: false } + nMatched: { $$exists: false } + nModified: { $$exists: false } + nUpserted: { $$exists: false } + nDeleted: { $$exists: false } diff --git a/testdata/crud/unified/client-bulkWrite-delete-options.json b/testdata/crud/unified/client-bulkWrite-delete-options.json new file mode 100644 index 0000000000..d9987897dc --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-delete-options.json @@ -0,0 +1,268 @@ +{ + "description": "client bulkWrite delete options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulk write delete with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + }, + { + "description": "client bulk write delete with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "hint": "_id_", + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-delete-options.yml b/testdata/crud/unified/client-bulkWrite-delete-options.yml new file mode 100644 index 0000000000..9297838535 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-delete-options.yml @@ -0,0 +1,137 @@ +description: "client bulkWrite delete options" +schemaVersion: "1.4" # To support `serverless: forbid` +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + collation: &collation { "locale": "simple" } + hint: &hint _id_ + +tests: + - description: "client bulk write delete with collation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + collation: *collation + - deleteMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + collation: *collation + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 3 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 1: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - delete: 0 + filter: { _id: 1 } + collation: *collation + multi: false + - delete: 0 + filter: { _id: { $gt: 1 } } + collation: *collation + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] + - description: "client bulk write delete with hint" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + hint: *hint + - deleteMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + hint: *hint + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 3 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 1: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - delete: 0 + filter: { _id: 1 } + hint: *hint + multi: false + - delete: 0 + filter: { _id: { $gt: 1 } } + hint: *hint + multi: true + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] diff --git a/testdata/crud/unified/client-bulkWrite-errorResponse.json b/testdata/crud/unified/client-bulkWrite-errorResponse.json new file mode 100644 index 0000000000..b828aad3b9 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-errorResponse.json @@ -0,0 +1,69 @@ +{ + "description": "client bulkWrite errorResponse", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite operations support errorResponse assertions", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-errorResponse.yml b/testdata/crud/unified/client-bulkWrite-errorResponse.yml new file mode 100644 index 0000000000..d63010afc7 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-errorResponse.yml @@ -0,0 +1,38 @@ +description: "client bulkWrite errorResponse" +schemaVersion: "1.12" +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + useMultipleMongoses: false # Avoid setting fail points with multiple mongoses + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite operations support errorResponse assertions" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ bulkWrite ] + errorCode: &errorCode 8 # UnknownError + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1 } + expectError: + errorCode: *errorCode + errorResponse: + code: *errorCode diff --git a/testdata/crud/unified/client-bulkWrite-errors.json b/testdata/crud/unified/client-bulkWrite-errors.json new file mode 100644 index 0000000000..015bd95c99 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-errors.json @@ -0,0 +1,513 @@ +{ + "description": "client bulkWrite errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "writeConcernErrorCode": 91, + "writeConcernErrorMessage": "Replication is being shut down", + "undefinedVarCode": 17276 + }, + "tests": [ + { + "description": "an individual operation fails during an ordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "an individual operation fails during an unordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "2": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "detailed results are omitted from error when verboseResults is false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a top-level failure occurs during a bulkWrite", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "x": 1 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "errorCode": 8 + } + } + ] + }, + { + "description": "a bulk write with only errors does not report a partial result", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": {} + }, + "writeErrors": { + "0": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a write concern error occurs during a bulkWrite", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 10 + } + }, + "updateResults": {}, + "deleteResults": {} + }, + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ] + }, + { + "description": "an empty list of write models is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [], + "verboseResults": true + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "Requesting unacknowledged write with verboseResults is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true, + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and verbose results" + } + } + ] + }, + { + "description": "Requesting unacknowledged write with ordered is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and ordered writes" + } + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-errors.yml b/testdata/crud/unified/client-bulkWrite-errors.yml new file mode 100644 index 0000000000..79c0496161 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-errors.yml @@ -0,0 +1,270 @@ +description: "client bulkWrite errors" +schemaVersion: "1.21" +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + uriOptions: + retryWrites: false + useMultipleMongoses: false # Target a single mongos with failpoint + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + writeConcernErrorCode: &writeConcernErrorCode 91 + writeConcernErrorMessage: &writeConcernErrorMessage "Replication is being shut down" + undefinedVarCode: &undefinedVarCode 17276 # Use of an undefined variable + +tests: + - description: "an individual operation fails during an ordered bulkWrite" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 1 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + writeErrors: + 1: + code: *undefinedVarCode + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "an individual operation fails during an unordered bulkWrite" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + ordered: false + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 2 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 2: + deletedCount: 1 + writeErrors: + 1: + code: *undefinedVarCode + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 2, x: 22 } + - description: "detailed results are omitted from error when verboseResults is false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: false + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 1 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + writeErrors: + 1: + code: *undefinedVarCode + - description: "a top-level failure occurs during a bulkWrite" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - bulkWrite + errorCode: 8 # UnknownError + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { x: 1 } + verboseResults: true + expectError: + errorCode: 8 + - description: "a bulk write with only errors does not report a partial result" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + verboseResults: true + expectError: + expectResult: + $$unsetOrMatches: {} # Empty or nonexistent result when no successful writes occurred + writeErrors: + 0: + code: *undefinedVarCode + - description: "a write concern error occurs during a bulkWrite" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - bulkWrite + writeConcernError: + code: *writeConcernErrorCode + errmsg: *writeConcernErrorMessage + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 10 } + verboseResults: true + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 10 + updateResults: {} + deleteResults: {} + writeConcernErrors: + - code: *writeConcernErrorCode + message: *writeConcernErrorMessage + - description: "an empty list of write models is a client-side error" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: [] + verboseResults: true + expectError: + isClientError: true + - description: "Requesting unacknowledged write with verboseResults is a client-side error" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 10 } + verboseResults: true + ordered: false + writeConcern: { w: 0 } + expectError: + isClientError: true + errorContains: "Cannot request unacknowledged write concern and verbose results" + - description: "Requesting unacknowledged write with ordered is a client-side error" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 10 } + # Omit `ordered` option. Defaults to true. + writeConcern: { w: 0 } + expectError: + isClientError: true + errorContains: "Cannot request unacknowledged write concern and ordered writes" diff --git a/testdata/crud/unified/client-bulkWrite-mixed-namespaces.json b/testdata/crud/unified/client-bulkWrite-mixed-namespaces.json new file mode 100644 index 0000000000..55f0618923 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-mixed-namespaces.json @@ -0,0 +1,315 @@ +{ + "description": "client bulkWrite with mixed namespaces", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "db1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "_yamlAnchors": { + "db0Coll0Namespace": "db0.coll0", + "db0Coll1Namespace": "db0.coll1", + "db1Coll2Namespace": "db1.coll2" + }, + "tests": [ + { + "description": "client bulkWrite with mixed namespaces", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 1 + } + } + }, + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 2 + } + } + }, + { + "updateOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 2 + } + } + }, + { + "replaceOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 45 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 2, + "insertResults": { + "0": { + "insertedId": 1 + }, + "1": { + "insertedId": 2 + } + }, + "updateResults": { + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "5": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + }, + "4": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1 + } + }, + { + "insert": 0, + "document": { + "_id": 2 + } + }, + { + "update": 1, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 2, + "filter": { + "_id": 3 + }, + "multi": false + }, + { + "delete": 1, + "filter": { + "_id": 2 + }, + "multi": false + }, + { + "update": 2, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 45 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "db0.coll0" + }, + { + "ns": "db0.coll1" + }, + { + "ns": "db1.coll2" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 4, + "x": 45 + } + ] + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-mixed-namespaces.yml b/testdata/crud/unified/client-bulkWrite-mixed-namespaces.yml new file mode 100644 index 0000000000..9788bce8c5 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-mixed-namespaces.yml @@ -0,0 +1,147 @@ +description: "client bulkWrite with mixed namespaces" +schemaVersion: "1.4" # To support `serverless: forbid` +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name db0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + - collection: + id: &collection1 collection1 + database: *database0 + collectionName: &collection1Name coll1 + - database: + id: &database1 database1 + client: *client0 + databaseName: &database1Name db1 + - collection: + id: &collection2 collection2 + database: *database1 + collectionName: &collection2Name coll2 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] + - databaseName: *database0Name + collectionName: *collection1Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - databaseName: *database1Name + collectionName: *collection2Name + documents: + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + +_yamlAnchors: + db0Coll0Namespace: &db0Coll0Namespace "db0.coll0" + db0Coll1Namespace: &db0Coll1Namespace "db0.coll1" + db1Coll2Namespace: &db1Coll2Namespace "db1.coll2" + +tests: + - description: "client bulkWrite with mixed namespaces" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *db0Coll0Namespace + document: { _id: 1 } + - insertOne: + namespace: *db0Coll0Namespace + document: { _id: 2 } + - updateOne: + namespace: *db0Coll1Namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - deleteOne: + namespace: *db1Coll2Namespace + filter: { _id: 3 } + - deleteOne: + namespace: *db0Coll1Namespace + filter: { _id: 2 } + - replaceOne: + namespace: *db1Coll2Namespace + filter: { _id: 4 } + replacement: { x: 45 } + verboseResults: true + expectResult: + insertedCount: 2 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 2 + insertResults: + 0: + insertedId: 1 + 1: + insertedId: 2 + updateResults: + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 5: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + 4: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + bulkWrite: 1 + ops: + - insert: 0 + document: { _id: 1 } + - insert: 0 + document: { _id: 2 } + - update: 1 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - delete: 2 + filter: { _id: 3 } + multi: false + - delete: 1 + filter: { _id: 2 } + multi: false + - update: 2 + filter: { _id: 4 } + updateMods: { x: 45 } + multi: false + nsInfo: + - ns: *db0Coll0Namespace + - ns: *db0Coll1Namespace + - ns: *db1Coll2Namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1 } + - { _id: 2 } + - databaseName: *database0Name + collectionName: *collection1Name + documents: + - { _id: 1, x: 12 } + - databaseName: *database1Name + collectionName: *collection2Name + documents: + - { _id: 4, x: 45 } diff --git a/testdata/crud/unified/client-bulkWrite-options.json b/testdata/crud/unified/client-bulkWrite-options.json new file mode 100644 index 0000000000..708fe4e85b --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-options.json @@ -0,0 +1,716 @@ +{ + "description": "client bulkWrite top-level options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "writeConcernClient", + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "comment": { + "bulk": "write" + }, + "let": { + "id1": 1, + "id2": 2 + }, + "writeConcern": { + "w": "majority" + } + }, + "tests": [ + { + "description": "client bulkWrite comment", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "comment": { + "bulk": "write" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "comment": { + "bulk": "write" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": true, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite let", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "let": { + "id1": 1, + "id2": 2 + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "1": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "let": { + "id1": 1, + "id2": 2 + }, + "ops": [ + { + "update": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation: false is sent", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": false, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite inherits writeConcern from client", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": 1 + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern option overrides client writeConcern", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-options.yml b/testdata/crud/unified/client-bulkWrite-options.yml new file mode 100644 index 0000000000..e0cbe747b3 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-options.yml @@ -0,0 +1,351 @@ +description: "client bulkWrite top-level options" +schemaVersion: "1.4" # To support `serverless: forbid` +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - client: + id: &writeConcernClient writeConcernClient + uriOptions: + &clientWriteConcern { w: 1 } + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + comment: &comment { bulk: "write" } + let: &let { id1: 1, id2: 2 } + writeConcern: &majorityWriteConcern { w: "majority" } + +tests: + - description: "client bulkWrite comment" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + comment: *comment + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + comment: *comment + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite bypassDocumentValidation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + bypassDocumentValidation: true + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + bypassDocumentValidation: true + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite let" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id1" ] + update: + $inc: { x: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] + let: *let + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 1 + modifiedCount: 1 + deletedCount: 1 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 1: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + let: *let + ops: + - update: 0 + filter: + $expr: + $eq: [ "$_id", "$$id1" ] + updateMods: { $inc: { x: 1 } } + multi: false + - delete: 0 + filter: + $expr: + $eq: [ "$_id", "$$id2" ] + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, x: 12 } + - description: "client bulkWrite bypassDocumentValidation: false is sent" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + bypassDocumentValidation: false + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + bypassDocumentValidation: false + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite writeConcern" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + writeConcern: *majorityWriteConcern + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: *majorityWriteConcern + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + - description: "client bulkWrite inherits writeConcern from client" + operations: + - object: *writeConcernClient + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *writeConcernClient + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: { w: 1 } + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + - description: "client bulkWrite writeConcern option overrides client writeConcern" + operations: + - object: *writeConcernClient + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + writeConcern: *majorityWriteConcern + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *writeConcernClient + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: *majorityWriteConcern + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace diff --git a/testdata/crud/unified/client-bulkWrite-ordered.json b/testdata/crud/unified/client-bulkWrite-ordered.json new file mode 100644 index 0000000000..6fb10d992f --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-ordered.json @@ -0,0 +1,291 @@ +{ + "description": "client bulkWrite with ordered option", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with ordered: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite with ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-ordered.yml b/testdata/crud/unified/client-bulkWrite-ordered.yml new file mode 100644 index 0000000000..48aa8ad40a --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-ordered.yml @@ -0,0 +1,153 @@ +description: "client bulkWrite with ordered option" +schemaVersion: "1.4" # To support `serverless: forbid` +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite with ordered: false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + ordered: false + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: false + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - description: "client bulkWrite with ordered: true" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + ordered: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - description: "client bulkWrite defaults to ordered: true" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } diff --git a/testdata/crud/unified/client-bulkWrite-partialResults.json b/testdata/crud/unified/client-bulkWrite-partialResults.json new file mode 100644 index 0000000000..1b75e37834 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-partialResults.json @@ -0,0 +1,540 @@ +{ + "description": "client bulkWrite partial results", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "newDocument": { + "_id": 2, + "x": 22 + } + }, + "tests": [ + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is unset when all operations fail during an unordered bulk write", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "1": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-partialResults.yml b/testdata/crud/unified/client-bulkWrite-partialResults.yml new file mode 100644 index 0000000000..1cda7318f8 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-partialResults.yml @@ -0,0 +1,262 @@ +description: "client bulkWrite partial results" +schemaVersion: "1.4" # To support `serverless: forbid` +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - &existingDocument { _id: 1, x: 11 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + newDocument: &newDocument { _id: 2, x: 22 } + +tests: + - description: "partialResult is unset when first operation fails during an ordered bulk write (verbose)" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: *existingDocument + - insertOne: + namespace: *namespace + document: *newDocument + ordered: true + verboseResults: true + expectError: + expectResult: + $$unsetOrMatches: + insertedCount: { $$exists: false } + upsertedCount: { $$exists: false } + matchedCount: { $$exists: false } + modifiedCount: { $$exists: false } + deletedCount: { $$exists: false } + insertResults: { $$exists: false } + updateResults: { $$exists: false } + deleteResults: { $$exists: false } + - description: "partialResult is unset when first operation fails during an ordered bulk write (summary)" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: *existingDocument + - insertOne: + namespace: *namespace + document: *newDocument + ordered: true + verboseResults: false + expectError: + expectResult: + $$unsetOrMatches: + insertedCount: { $$exists: false } + upsertedCount: { $$exists: false } + matchedCount: { $$exists: false } + modifiedCount: { $$exists: false } + deletedCount: { $$exists: false } + insertResults: { $$exists: false } + updateResults: { $$exists: false } + deleteResults: { $$exists: false } + - description: "partialResult is set when second operation fails during an ordered bulk write (verbose)" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: *newDocument + - insertOne: + namespace: *namespace + document: *existingDocument + ordered: true + verboseResults: true + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 2 + updateResults: {} + deleteResults: {} + - description: "partialResult is set when second operation fails during an ordered bulk write (summary)" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: *newDocument + - insertOne: + namespace: *namespace + document: *existingDocument + ordered: true + verboseResults: false + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + - description: "partialResult is unset when all operations fail during an unordered bulk write" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: *existingDocument + - insertOne: + namespace: *namespace + document: *existingDocument + ordered: false + expectError: + expectResult: + $$unsetOrMatches: + insertedCount: { $$exists: false } + upsertedCount: { $$exists: false } + matchedCount: { $$exists: false } + modifiedCount: { $$exists: false } + deletedCount: { $$exists: false } + insertResults: { $$exists: false } + updateResults: { $$exists: false } + deleteResults: { $$exists: false } + - description: "partialResult is set when first operation fails during an unordered bulk write (verbose)" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: *existingDocument + - insertOne: + namespace: *namespace + document: *newDocument + ordered: false + verboseResults: true + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 1: + insertedId: 2 + updateResults: {} + deleteResults: {} + - description: "partialResult is set when first operation fails during an unordered bulk write (summary)" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: *existingDocument + - insertOne: + namespace: *namespace + document: *newDocument + ordered: false + verboseResults: false + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + - description: "partialResult is set when second operation fails during an unordered bulk write (verbose)" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: *newDocument + - insertOne: + namespace: *namespace + document: *existingDocument + ordered: false + verboseResults: true + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 2 + updateResults: {} + deleteResults: {} + - description: "partialResult is set when second operation fails during an unordered bulk write (summary)" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: *newDocument + - insertOne: + namespace: *namespace + document: *existingDocument + ordered: false + verboseResults: false + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} diff --git a/testdata/crud/unified/client-bulkWrite-results.json b/testdata/crud/unified/client-bulkWrite-results.json new file mode 100644 index 0000000000..accf5a9cbf --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-results.json @@ -0,0 +1,833 @@ +{ + "description": "client bulkWrite results", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with verboseResults: true returns detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with verboseResults: false omits detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to verboseResults: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-results.yml b/testdata/crud/unified/client-bulkWrite-results.yml new file mode 100644 index 0000000000..86cb5346ae --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-results.yml @@ -0,0 +1,312 @@ +description: "client bulkWrite results" +schemaVersion: "1.4" # To support `serverless: forbid` +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite with verboseResults: true returns detailed results" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + 0: + insertedId: 8 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 3: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 4 + deleteResults: + 4: + deletedCount: 1 + 5: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: "client bulkWrite with verboseResults: false omits detailed results" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: false + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: "client bulkWrite defaults to verboseResults: false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } diff --git a/testdata/crud/unified/client-bulkWrite-update-options.json b/testdata/crud/unified/client-bulkWrite-update-options.json new file mode 100644 index 0000000000..ce6241c681 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-update-options.json @@ -0,0 +1,949 @@ +{ + "description": "client bulkWrite update options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulkWrite update with arrayFilters", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 4, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_", + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_", + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_", + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with upsert", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 6 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 5 + }, + "1": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 6 + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 5 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 6 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 5, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 6, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-update-options.yml b/testdata/crud/unified/client-bulkWrite-update-options.yml new file mode 100644 index 0000000000..c5cc20d480 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-update-options.yml @@ -0,0 +1,338 @@ +description: "client bulkWrite update options" +schemaVersion: "1.4" # To support `serverless: forbid` +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, array: [ 1, 2, 3 ] } + - { _id: 2, array: [ 1, 2, 3 ] } + - { _id: 3, array: [ 1, 2, 3 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + collation: &collation { "locale": "simple" } + hint: &hint _id_ + +tests: + - description: "client bulkWrite update with arrayFilters" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $set: + array.$[i]: 4 + arrayFilters: [ i: { $gte: 2 } ] + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: + $set: + array.$[i]: 5 + arrayFilters: [ i: { $gte: 2 } ] + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $set: + array.$[i]: 4 + arrayFilters: [ i: { $gte: 2 } ] + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: + $set: + array.$[i]: 5 + arrayFilters: [ i: { $gte: 2 } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 4, 4 ] } + - { _id: 2, array: [ 1, 5, 5 ] } + - { _id: 3, array: [ 1, 5, 5 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + - description: "client bulkWrite update with collation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $set: { array: [ 1, 2, 4 ] } } + collation: *collation + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $set: { array: [ 1, 2, 5 ] } } + collation: *collation + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { array: [ 1, 2, 6 ] } + collation: *collation + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 4 + modifiedCount: 4 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + collation: *collation + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $set: { array: [ 1, 2, 5 ] } } + collation: *collation + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { array: [ 1, 2, 6 ] } + collation: *collation + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 4 ] } + - { _id: 2, array: [ 1, 2, 5 ] } + - { _id: 3, array: [ 1, 2, 5 ] } + - { _id: 4, array: [ 1, 2, 6 ] } + - description: "client bulkWrite update with hint" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $set: { array: [ 1, 2, 4 ] } } + hint: *hint + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $set: { array: [ 1, 2, 5 ] } } + hint: *hint + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { array: [ 1, 2, 6 ] } + hint: *hint + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 4 + modifiedCount: 4 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + hint: *hint + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $set: { array: [ 1, 2, 5 ] } } + hint: *hint + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { array: [ 1, 2, 6 ] } + hint: *hint + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 4 ] } + - { _id: 2, array: [ 1, 2, 5 ] } + - { _id: 3, array: [ 1, 2, 5 ] } + - { _id: 4, array: [ 1, 2, 6 ] } + - description: "client bulkWrite update with upsert" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 5 } + update: { $set: { array: [ 1, 2, 4 ] } } + upsert: true + - replaceOne: + namespace: *namespace + filter: { _id: 6 } + replacement: { array: [ 1, 2, 6 ] } + upsert: true + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 2 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 5 + 1: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 6 + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 5 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + upsert: true + multi: false + - update: 0 + filter: { _id: 6 } + updateMods: { array: [ 1, 2, 6 ] } + upsert: true + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 3 ] } + - { _id: 2, array: [ 1, 2, 3 ] } + - { _id: 3, array: [ 1, 2, 3 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + - { _id: 5, array: [ 1, 2, 4 ] } + - { _id: 6, array: [ 1, 2, 6 ] } diff --git a/testdata/crud/unified/client-bulkWrite-update-pipeline.json b/testdata/crud/unified/client-bulkWrite-update-pipeline.json new file mode 100644 index 0000000000..9dba5ee6c5 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-update-pipeline.json @@ -0,0 +1,258 @@ +{ + "description": "client bulkWrite update pipeline", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": {}, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": {}, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-update-pipeline.yml b/testdata/crud/unified/client-bulkWrite-update-pipeline.yml new file mode 100644 index 0000000000..c90e93b47c --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-update-pipeline.yml @@ -0,0 +1,133 @@ +description: "client bulkWrite update pipeline" +schemaVersion: "1.4" # To support `serverless: forbid` +runOnRequirements: + - minServerVersion: "8.0" + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - {_id: 1, x: 1} + - {_id: 2, x: 2} + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite updateOne with pipeline" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + - $addFields: + foo: 1 + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 1 + modifiedCount: 1 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { "$$exists": false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + - $addFields: + foo: 1 + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - {_id: 1, x: 1, foo: 1} + - {_id: 2, x: 2 } + + - description: "client bulkWrite updateMany with pipeline" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: {} + update: + - $addFields: + foo: 1 + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { "$$exists": false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { } + updateMods: + - $addFields: + foo: 1 + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - {_id: 1, x: 1, foo: 1} + - {_id: 2, x: 2, foo: 1} diff --git a/testdata/crud/unified/client-bulkWrite-update-validation.json b/testdata/crud/unified/client-bulkWrite-update-validation.json new file mode 100644 index 0000000000..617e711338 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-update-validation.json @@ -0,0 +1,216 @@ +{ + "description": "client-bulkWrite-update-validation", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/testdata/crud/unified/client-bulkWrite-update-validation.yml b/testdata/crud/unified/client-bulkWrite-update-validation.yml new file mode 100644 index 0000000000..478554c322 --- /dev/null +++ b/testdata/crud/unified/client-bulkWrite-update-validation.yml @@ -0,0 +1,79 @@ +description: "client-bulkWrite-update-validation" + +schemaVersion: "1.1" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: &initialData + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite replaceOne prohibits atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - replaceOne: + namespace: *namespace + filter: { _id: 1 } + replacement: { $set: { x: 22 } } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData + + - description: "client bulkWrite updateOne requires atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { x: 22 } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData + + - description: "client bulkWrite updateMany requires atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + update: { x: 44 } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData diff --git a/testdata/retryable-writes/unified/client-bulkWrite-clientErrors.json b/testdata/retryable-writes/unified/client-bulkWrite-clientErrors.json new file mode 100644 index 0000000000..d16e0c9c8d --- /dev/null +++ b/testdata/retryable-writes/unified/client-bulkWrite-clientErrors.json @@ -0,0 +1,351 @@ +{ + "description": "client bulkWrite retryable writes with client errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with one network error succeeds after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with two network errors fails after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/testdata/retryable-writes/unified/client-bulkWrite-clientErrors.yml b/testdata/retryable-writes/unified/client-bulkWrite-clientErrors.yml new file mode 100644 index 0000000000..e5214b90f8 --- /dev/null +++ b/testdata/retryable-writes/unified/client-bulkWrite-clientErrors.yml @@ -0,0 +1,173 @@ +description: "client bulkWrite retryable writes with client errors" +schemaVersion: "1.21" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with one network error succeeds after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 4 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with two network errors fails after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 2 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectError: + isClientError: true + errorLabelsContain: ["RetryableWriteError"] # Error label added by driver. + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } diff --git a/testdata/retryable-writes/unified/client-bulkWrite-serverErrors.json b/testdata/retryable-writes/unified/client-bulkWrite-serverErrors.json new file mode 100644 index 0000000000..f58c82bcc7 --- /dev/null +++ b/testdata/retryable-writes/unified/client-bulkWrite-serverErrors.json @@ -0,0 +1,873 @@ +{ + "description": "client bulkWrite retryable writes", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "clientRetryWritesFalse", + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 222 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with retryWrites: false does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "clientRetryWritesFalse", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "clientRetryWritesFalse", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "clientRetryWritesFalse", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/testdata/retryable-writes/unified/client-bulkWrite-serverErrors.yml b/testdata/retryable-writes/unified/client-bulkWrite-serverErrors.yml new file mode 100644 index 0000000000..722e5cc8e0 --- /dev/null +++ b/testdata/retryable-writes/unified/client-bulkWrite-serverErrors.yml @@ -0,0 +1,413 @@ +description: "client bulkWrite retryable writes" +schemaVersion: "1.21" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - client: + id: &clientRetryWritesFalse clientRetryWritesFalse + uriOptions: + retryWrites: false + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with no multi: true operations succeeds after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 222 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with multi: true operations fails after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace + - description: "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - description: "client bulkWrite with multi: true operations fails after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + writeConcernErrors: + - code: 91 + message: "Replication is being shut down" + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace + - description: "client bulkWrite with retryWrites: false does not retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *clientRetryWritesFalse + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *clientRetryWritesFalse + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *clientRetryWritesFalse + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace diff --git a/testdata/server-selection/logging/operation-id.json b/testdata/server-selection/logging/operation-id.json index 276e4b8d6d..72ebff60d8 100644 --- a/testdata/server-selection/logging/operation-id.json +++ b/testdata/server-selection/logging/operation-id.json @@ -47,6 +47,9 @@ } } ], + "_yamlAnchors": { + "namespace": "logging-tests.server-selection" + }, "tests": [ { "description": "Successful bulkWrite operation: log messages have operationIds", @@ -224,6 +227,192 @@ ] } ] + }, + { + "description": "Successful client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "Failed client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] } ] } diff --git a/testdata/server-selection/logging/operation-id.yml b/testdata/server-selection/logging/operation-id.yml index 21be7b7d14..e5732893fa 100644 --- a/testdata/server-selection/logging/operation-id.yml +++ b/testdata/server-selection/logging/operation-id.yml @@ -1,4 +1,3 @@ - description: "operation-id" schemaVersion: "1.14" @@ -31,6 +30,9 @@ createEntities: - client: id: &failPointClient failPointClient +_yamlAnchors: + namespace: &namespace "logging-tests.server-selection" + tests: - description: "Successful bulkWrite operation: log messages have operationIds" operations: @@ -122,3 +124,100 @@ tests: message: "Server selection failed" operationId: { $$type: [int, long] } operation: insert + + - description: "Successful client bulkWrite operation: log messages have operationIds" + runOnRequirements: + - minServerVersion: "8.0" # required for bulkWrite command + serverless: forbid + operations: + # ensure we've discovered the server so it is immediately available + # and no extra "waiting for suitable server" messages are emitted. + # expected topology events reflect initial server discovery and server connect event. + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + topologyDescriptionChangedEvent: {} + count: 2 + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: *namespace + document: { x: 1 } + expectLogMessages: + - client: *client + messages: + - level: debug + component: serverSelection + data: + message: "Server selection started" + operationId: { $$type: [int, long] } + operation: bulkWrite + - level: debug + component: serverSelection + data: + message: "Server selection succeeded" + operationId: { $$type: [int, long] } + operation: bulkWrite + + - description: "Failed client bulkWrite operation: log messages have operationIds" + runOnRequirements: + - minServerVersion: "8.0" # required for bulkWrite command + serverless: forbid + operations: + # fail all hello/legacy hello commands for the main client. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: alwaysOn + data: + failCommands: ["hello", "ismaster"] + appName: *appName + closeConnection: true + # wait until we've marked the server unknown due + # to a failed heartbeat. + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + serverDescriptionChangedEvent: + newDescription: + type: Unknown + count: 1 + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: *namespace + document: { x: 1 } + expectError: + isClientError: true # server selection timeout + expectLogMessages: + - client: *client + messages: + - level: debug + component: serverSelection + data: + message: "Server selection started" + operationId: { $$type: [int, long] } + operation: bulkWrite + - level: info + component: serverSelection + data: + message: "Waiting for suitable server to become available" + operationId: { $$type: [int, long] } + operation: bulkWrite + - level: debug + component: serverSelection + data: + message: "Server selection failed" + operationId: { $$type: [int, long] } + operation: bulkWrite diff --git a/testdata/transactions/legacy/mongos-pin-auto-tests.py b/testdata/transactions/legacy/mongos-pin-auto-tests.py deleted file mode 100644 index 1072ec2907..0000000000 --- a/testdata/transactions/legacy/mongos-pin-auto-tests.py +++ /dev/null @@ -1,340 +0,0 @@ -import itertools -import sys - -# Require Python 3.7+ for ordered dictionaries so that the order of the -# generated tests remain the same. -# Usage: -# python3.7 mongos-pin-auto-tests.py > mongos-pin-auto.yml -if sys.version_info[:2] < (3, 7): - print('ERROR: This script requires Python >= 3.7, not:') - print(sys.version) - print('Usage: python3.7 mongos-pin-auto-tests.py > mongos-pin-auto.yml') - exit(1) - -HEADER = '''# Autogenerated tests that transient errors in a transaction unpin the session. -# See mongos-pin-auto-tests.py -runOn: - - - minServerVersion: "4.1.8" - topology: ["sharded"] - # serverless proxy doesn't append error labels to errors in transactions - # caused by failpoints (CLOUDP-88216) - serverless: "forbid" - -database_name: &database_name "transaction-tests" -collection_name: &collection_name "test" - -data: &data - - {_id: 1} - - {_id: 2} - -tests: - - description: remain pinned after non-transient Interrupted error on insertOne - useMultipleMongoses: true - operations: - - &startTransaction - name: startTransaction - object: session0 - - &initialCommand - name: insertOne - object: collection - arguments: - session: session0 - document: {_id: 3} - result: - insertedId: 3 - - name: targetedFailPoint - object: testRunner - arguments: - session: session0 - failPoint: - configureFailPoint: failCommand - mode: {times: 1} - data: - failCommands: ["insert"] - errorCode: 11601 - - name: insertOne - object: collection - arguments: - session: session0 - document: - _id: 4 - result: - errorLabelsOmit: ["TransientTransactionError", "UnknownTransactionCommitResult"] - errorCodeName: Interrupted - - &assertSessionPinned - name: assertSessionPinned - object: testRunner - arguments: - session: session0 - - &commitTransaction - name: commitTransaction - object: session0 - - expectations: - - command_started_event: - command: - insert: *collection_name - documents: - - _id: 3 - ordered: true - readConcern: - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: true - autocommit: false - writeConcern: - command_name: insert - database_name: *database_name - - command_started_event: - command: - insert: *collection_name - documents: - - _id: 4 - ordered: true - readConcern: - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: - autocommit: false - writeConcern: - command_name: insert - database_name: *database_name - - command_started_event: - command: - commitTransaction: 1 - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: - autocommit: false - writeConcern: - recoveryToken: 42 - command_name: commitTransaction - database_name: admin - - outcome: &outcome - collection: - data: - - {_id: 1} - - {_id: 2} - - {_id: 3} - - - description: unpin after transient error within a transaction - useMultipleMongoses: true - operations: - - &startTransaction - name: startTransaction - object: session0 - - &initialCommand - name: insertOne - object: collection - arguments: - session: session0 - document: - _id: 3 - result: - insertedId: 3 - - name: targetedFailPoint - object: testRunner - arguments: - session: session0 - failPoint: - configureFailPoint: failCommand - mode: { times: 1 } - data: - failCommands: ["insert"] - closeConnection: true - - name: insertOne - object: collection - arguments: - session: session0 - document: - _id: 4 - result: - errorLabelsContain: ["TransientTransactionError"] - errorLabelsOmit: ["UnknownTransactionCommitResult"] - # Session unpins from the first mongos after the insert error and - # abortTransaction succeeds immediately on any mongos. - - &assertSessionUnpinned - name: assertSessionUnpinned - object: testRunner - arguments: - session: session0 - - &abortTransaction - name: abortTransaction - object: session0 - - expectations: - - command_started_event: - command: - insert: *collection_name - documents: - - _id: 3 - ordered: true - readConcern: - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: true - autocommit: false - writeConcern: - command_name: insert - database_name: *database_name - - command_started_event: - command: - insert: *collection_name - documents: - - _id: 4 - ordered: true - readConcern: - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: - autocommit: false - writeConcern: - command_name: insert - database_name: *database_name - - command_started_event: - command: - abortTransaction: 1 - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: - autocommit: false - writeConcern: - recoveryToken: 42 - command_name: abortTransaction - database_name: admin - - outcome: &outcome - collection: - data: *data - - # The rest of the tests in this file test every operation type against - # multiple types of transient errors (connection and error code).''' - -TEMPLATE = ''' - - description: {test_name} {error_name} error on {op_name} {command_name} - useMultipleMongoses: true - operations: - - *startTransaction - - *initialCommand - - name: targetedFailPoint - object: testRunner - arguments: - session: session0 - failPoint: - configureFailPoint: failCommand - mode: {{times: 1}} - data: - failCommands: ["{command_name}"] - {error_data} - - name: {op_name} - object: {object_name} - arguments: - session: session0 - {op_args} - result: - {error_labels}: ["TransientTransactionError"] - - *{assertion} - - *abortTransaction - outcome: *outcome -''' - - -# Maps from op_name to (command_name, object_name, op_args) -OPS = { - # Write ops: - 'insertOne': ('insert', 'collection', r'document: {_id: 4}'), - 'insertMany': ('insert', 'collection', r'documents: [{_id: 4}, {_id: 5}]'), - 'updateOne': ('update', 'collection', r'''filter: {_id: 1} - update: {$inc: {x: 1}}'''), - 'replaceOne': ('update', 'collection', r'''filter: {_id: 1} - replacement: {y: 1}'''), - 'updateMany': ('update', 'collection', r'''filter: {_id: {$gte: 1}} - update: {$set: {z: 1}}'''), - 'deleteOne': ('delete', 'collection', r'filter: {_id: 1}'), - 'deleteMany': ('delete', 'collection', r'filter: {_id: {$gte: 1}}'), - 'findOneAndDelete': ('findAndModify', 'collection', r'filter: {_id: 1}'), - 'findOneAndUpdate': ('findAndModify', 'collection', r'''filter: {_id: 1} - update: {$inc: {x: 1}} - returnDocument: Before'''), - 'findOneAndReplace': ('findAndModify', 'collection', r'''filter: {_id: 1} - replacement: {y: 1} - returnDocument: Before'''), - # Bulk write insert/update/delete: - 'bulkWrite insert': ('insert', 'collection', r'''requests: - - name: insertOne - arguments: - document: {_id: 1}'''), - 'bulkWrite update': ('update', 'collection', r'''requests: - - name: updateOne - arguments: - filter: {_id: 1} - update: {$set: {x: 1}}'''), - 'bulkWrite delete': ('delete', 'collection', r'''requests: - - name: deleteOne - arguments: - filter: {_id: 1}'''), - # Read ops: - 'find': ('find', 'collection', r'filter: {_id: 1}'), - 'countDocuments': ('aggregate', 'collection', r'filter: {}'), - 'aggregate': ('aggregate', 'collection', r'pipeline: []'), - 'distinct': ('distinct', 'collection', r'fieldName: _id'), - # runCommand: - 'runCommand': ( - 'insert', - r'''database - command_name: insert''', # runCommand requires command_name. - r'''command: - insert: *collection_name - documents: - - _id : 1'''), -} - -# Maps from error_name to error_data. -NON_TRANSIENT_ERRORS = { - 'Interrupted': 'errorCode: 11601', -} - -# Maps from error_name to error_data. -TRANSIENT_ERRORS = { - 'connection': 'closeConnection: true', - 'ShutdownInProgress': 'errorCode: 91', -} - - -def create_pin_test(op_name, error_name): - test_name = 'remain pinned after non-transient' - assertion = 'assertSessionPinned' - error_labels = 'errorLabelsOmit' - command_name, object_name, op_args = OPS[op_name] - error_data = NON_TRANSIENT_ERRORS[error_name] - if op_name.startswith('bulkWrite'): - op_name = 'bulkWrite' - return TEMPLATE.format(**locals()) - - -def create_unpin_test(op_name, error_name): - test_name = 'unpin after transient' - assertion = 'assertSessionUnpinned' - error_labels = 'errorLabelsContain' - command_name, object_name, op_args = OPS[op_name] - error_data = TRANSIENT_ERRORS[error_name] - if op_name.startswith('bulkWrite'): - op_name = 'bulkWrite' - return TEMPLATE.format(**locals()) - -tests = [] -for op_name, error_name in itertools.product(OPS, NON_TRANSIENT_ERRORS): - tests.append(create_pin_test(op_name, error_name)) -for op_name, error_name in itertools.product(OPS, TRANSIENT_ERRORS): - tests.append(create_unpin_test(op_name, error_name)) - -print(HEADER) -print(''.join(tests)) diff --git a/testdata/transactions/unified/client-bulkWrite.json b/testdata/transactions/unified/client-bulkWrite.json new file mode 100644 index 0000000000..4a8d013f8d --- /dev/null +++ b/testdata/transactions/unified/client-bulkWrite.json @@ -0,0 +1,593 @@ +{ + "description": "client bulkWrite transactions", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client_with_wmajority", + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "session": { + "id": "session_with_wmajority", + "client": "client_with_wmajority" + } + } + ], + "_yamlAnchors": { + "namespace": "transaction-tests.coll0" + }, + "initialData": [ + { + "databaseName": "transaction-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite in a transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for client bulkWrite in transaction", + "operations": [ + { + "object": "session_with_wmajority", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "client_with_wmajority", + "name": "clientBulkWrite", + "arguments": { + "session": "session_with_wmajority", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "session_with_wmajority", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client_with_wmajority", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with writeConcern in a transaction causes a transaction error", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "writeConcern": { + "w": 1 + }, + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot set write concern after starting a transaction" + } + } + ] + } + ] +} diff --git a/testdata/transactions/unified/client-bulkWrite.yml b/testdata/transactions/unified/client-bulkWrite.yml new file mode 100644 index 0000000000..d80e618728 --- /dev/null +++ b/testdata/transactions/unified/client-bulkWrite.yml @@ -0,0 +1,263 @@ +description: "client bulkWrite transactions" +schemaVersion: "1.4" # To support `serverless: forbid` +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + serverless: forbid + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name transaction-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + - session: + id: &session0 session0 + client: *client0 + - client: + id: &client_with_wmajority client_with_wmajority + uriOptions: + w: majority + observeEvents: + - commandStartedEvent + - session: + id: &session_with_wmajority session_with_wmajority + client: *client_with_wmajority + +_yamlAnchors: + namespace: &namespace "transaction-tests.coll0" + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + +tests: + - description: "client bulkWrite in a transaction" + operations: + - object: *session0 + name: startTransaction + - object: *client0 + name: clientBulkWrite + arguments: + session: *session0 + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + 0: + insertedId: 8 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 3: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 4 + deleteResults: + 4: + deletedCount: 1 + 5: + deletedCount: 2 + - object: *session0 + name: commitTransaction + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: 'client writeConcern ignored for client bulkWrite in transaction' + operations: + - object: *session_with_wmajority + name: startTransaction + arguments: + writeConcern: + w: 1 + - object: *client_with_wmajority + name: clientBulkWrite + arguments: + session: *session_with_wmajority + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + - object: *session_with_wmajority + name: commitTransaction + expectEvents: + - + client: *client_with_wmajority + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + lsid: { $$sessionLsid: *session_with_wmajority } + txnNumber: 1 + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + nsInfo: + - ns: *namespace + - + commandStartedEvent: + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session_with_wmajority } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: + w: 1 + commandName: commitTransaction + databaseName: admin + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + - { _id: 8, x: 88 } + - description: "client bulkWrite with writeConcern in a transaction causes a transaction error" + operations: + - object: *session0 + name: startTransaction + - object: *client0 + name: clientBulkWrite + arguments: + session: *session0 + writeConcern: + w: 1 + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + expectError: + isClientError: true + errorContains: "Cannot set write concern after starting a transaction" diff --git a/testdata/transactions/unified/mongos-pin-auto-tests.py b/testdata/transactions/unified/mongos-pin-auto-tests.py new file mode 100644 index 0000000000..09306780fa --- /dev/null +++ b/testdata/transactions/unified/mongos-pin-auto-tests.py @@ -0,0 +1,353 @@ +import itertools +import sys + +# Require Python 3.7+ for ordered dictionaries so that the order of the +# generated tests remain the same. +# Usage: +# python3.7 mongos-pin-auto-tests.py > mongos-pin-auto.yml +if sys.version_info[:2] < (3, 7): + print('ERROR: This script requires Python >= 3.7, not:') + print(sys.version) + print('Usage: python3.7 mongos-pin-auto-tests.py > mongos-pin-auto.yml') + exit(1) + +HEADER = '''# Autogenerated tests that transient errors in a transaction unpin the session. +# See mongos-pin-auto-tests.py + +description: mongos-pin-auto + +schemaVersion: '1.4' + +runOnRequirements: + - minServerVersion: "4.1.8" + # Note: tests utilize targetedFailPoint, which is incompatible with + # load-balanced and useMultipleMongoses:true + topologies: [ sharded ] + # serverless proxy doesn't append error labels to errors in transactions + # caused by failpoints (CLOUDP-88216) + serverless: "forbid" + +createEntities: + - client: + id: &client0 client0 + useMultipleMongoses: true + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database_name transaction-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection_name test + - session: + id: &session0 session0 + client: *client0 + +initialData: + - collectionName: *collection_name + databaseName: *database_name + documents: &data + - { _id: 1 } + - { _id: 2 } + +tests: + - description: remain pinned after non-transient Interrupted error on insertOne + operations: + - &startTransaction + object: session0 + name: startTransaction + - &initialCommand + object: *collection0 + name: insertOne + arguments: + session: *session0 + document: { _id: 3 } + expectResult: { $$unsetOrMatches: { insertedId: { $$unsetOrMatches: 3 } } } + - object: testRunner + name: targetedFailPoint + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ "insert" ] + errorCode: 11601 + - object: *collection0 + name: insertOne + arguments: + session: *session0 + document: { _id: 4 } + expectError: + errorLabelsOmit: ["TransientTransactionError", "UnknownTransactionCommitResult"] + errorCodeName: Interrupted + - &assertSessionPinned + object: testRunner + name: assertSessionPinned + arguments: + session: *session0 + - &commitTransaction + object: *session0 + name: commitTransaction + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection_name + documents: + - { _id: 3 } + ordered: true + readConcern: { $$exists: false } + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + commandName: insert + databaseName: *database_name + - commandStartedEvent: + command: + insert: *collection_name + documents: + - { _id: 4 } + ordered: true + readConcern: { $$exists: false } + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + commandName: insert + databaseName: *database_name + - commandStartedEvent: + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + recoveryToken: { $$exists: true } + commandName: commitTransaction + databaseName: admin + outcome: + - collectionName: *collection_name + databaseName: *database_name + documents: + - { _id: 1 } + - { _id: 2 } + - { _id: 3 } + + - description: 'unpin after transient error within a transaction' + operations: + - *startTransaction + - *initialCommand + - object: testRunner + name: targetedFailPoint + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ "insert" ] + closeConnection: true + - object: *collection0 + name: insertOne + arguments: + session: *session0 + document: { _id: 4 } + expectError: + errorLabelsContain: ["TransientTransactionError"] + errorLabelsOmit: ["UnknownTransactionCommitResult"] + - &assertSessionUnpinned + object: testRunner + name: assertSessionUnpinned + arguments: + session: *session0 + - &abortTransaction + object: *session0 + name: abortTransaction + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection_name + documents: + - { _id: 3 } + ordered: true + readConcern: { $$exists: false } + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + commandName: insert + databaseName: *database_name + - commandStartedEvent: + command: + insert: *collection_name + documents: + - { _id: 4 } + ordered: true + readConcern: { $$exists: false } + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + commandName: insert + databaseName: *database_name + - commandStartedEvent: + command: + abortTransaction: 1 + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + recoveryToken: { $$exists: true } + commandName: abortTransaction + databaseName: admin + outcome: &outcome + - collectionName: *collection_name + databaseName: *database_name + documents: *data + + # The rest of the tests in this file test every operation type against + # multiple types of transient errors (connection and error code).''' + +TEMPLATE = ''' + - description: {test_name} {error_name} error on {op_name} {command_name} + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {{times: 1}} + data: + failCommands: ["{command_name}"] + {error_data} + - name: {op_name} + object: {object_name} + arguments: + session: *session0 + {op_args} + expectError: + {error_labels}: ["TransientTransactionError"] + - *{assertion} + - *abortTransaction + outcome: *outcome +''' + + +# Maps from op_name to (command_name, object_name, op_args) +OPS = { + # Write ops: + 'insertOne': ('insert', '*collection0', r'document: { _id: 4 }'), + 'insertMany': ('insert', '*collection0', r'documents: [ { _id: 4 }, { _id: 5 } ]'), + 'updateOne': ('update', '*collection0', r'''filter: { _id: 1 } + update: { $inc: { x: 1 } }'''), + 'replaceOne': ('update', '*collection0', r'''filter: { _id: 1 } + replacement: { y: 1 }'''), + 'updateMany': ('update', '*collection0', r'''filter: { _id: { $gte: 1 } } + update: {$set: { z: 1 } }'''), + 'deleteOne': ('delete', '*collection0', r'filter: { _id: 1 }'), + 'deleteMany': ('delete', '*collection0', r'filter: { _id: { $gte: 1 } }'), + 'findOneAndDelete': ('findAndModify', '*collection0', r'filter: { _id: 1 }'), + 'findOneAndUpdate': ('findAndModify', '*collection0', r'''filter: { _id: 1 } + update: { $inc: { x: 1 } } + returnDocument: Before'''), + 'findOneAndReplace': ('findAndModify', '*collection0', r'''filter: { _id: 1 } + replacement: { y: 1 } + returnDocument: Before'''), + # Bulk write insert/update/delete: + 'bulkWrite insert': ('insert', '*collection0', r'''requests: + - insertOne: + document: { _id: 1 }'''), + 'bulkWrite update': ('update', '*collection0', r'''requests: + - updateOne: + filter: { _id: 1 } + update: { $set: { x: 1 } }'''), + 'bulkWrite delete': ('delete', '*collection0', r'''requests: + - deleteOne: + filter: { _id: 1 }'''), + # Read ops: + 'find': ('find', '*collection0', r'filter: { _id: 1 }'), + 'countDocuments': ('aggregate', '*collection0', r'filter: {}'), + 'aggregate': ('aggregate', '*collection0', r'pipeline: []'), + 'distinct': ('distinct', '*collection0', r'''fieldName: _id + filter: {}'''), + # runCommand: + 'runCommand': ('insert', '*database0', r'''commandName: insert + command: + insert: *collection_name + documents: + - { _id : 1 }'''), + # clientBulkWrite: + 'clientBulkWrite': ('bulkWrite', '*client0', r'''models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 }'''), +} + +# Maps from error_name to error_data. +NON_TRANSIENT_ERRORS = { + 'Interrupted': 'errorCode: 11601', +} + +# Maps from error_name to error_data. +TRANSIENT_ERRORS = { + 'connection': 'closeConnection: true', + 'ShutdownInProgress': 'errorCode: 91', +} + + +def create_pin_test(op_name, error_name): + test_name = 'remain pinned after non-transient' + assertion = 'assertSessionPinned' + error_labels = 'errorLabelsOmit' + command_name, object_name, op_args = OPS[op_name] + error_data = NON_TRANSIENT_ERRORS[error_name] + if op_name.startswith('bulkWrite'): + op_name = 'bulkWrite' + test = TEMPLATE.format(**locals()) + if op_name == 'clientBulkWrite': + test += ' runOnRequirements:\n' + test += ' - minServerVersion: "8.0" # `bulkWrite` added to server 8.0"\n' + test += ' serverless: forbid\n' + return test + + +def create_unpin_test(op_name, error_name): + test_name = 'unpin after transient' + assertion = 'assertSessionUnpinned' + error_labels = 'errorLabelsContain' + command_name, object_name, op_args = OPS[op_name] + error_data = TRANSIENT_ERRORS[error_name] + if op_name.startswith('bulkWrite'): + op_name = 'bulkWrite' + test = TEMPLATE.format(**locals()) + if op_name == 'clientBulkWrite': + test += ' runOnRequirements:\n' + test += ' - minServerVersion: "8.0" # `bulkWrite` added to server 8.0"\n' + test += ' serverless: forbid\n' + return test + + + +tests = [] +for op_name, error_name in itertools.product(OPS, NON_TRANSIENT_ERRORS): + tests.append(create_pin_test(op_name, error_name)) +for op_name, error_name in itertools.product(OPS, TRANSIENT_ERRORS): + tests.append(create_unpin_test(op_name, error_name)) + +print(HEADER) +print(''.join(tests)) diff --git a/testdata/transactions/legacy/mongos-pin-auto.json b/testdata/transactions/unified/mongos-pin-auto.json similarity index 66% rename from testdata/transactions/legacy/mongos-pin-auto.json rename to testdata/transactions/unified/mongos-pin-auto.json index 037f212f49..27db520401 100644 --- a/testdata/transactions/legacy/mongos-pin-auto.json +++ b/testdata/transactions/unified/mongos-pin-auto.json @@ -1,48 +1,88 @@ { - "runOn": [ + "description": "mongos-pin-auto", + "schemaVersion": "1.4", + "runOnRequirements": [ { "minServerVersion": "4.1.8", - "topology": [ + "topologies": [ "sharded" ], "serverless": "forbid" } ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ + "createEntities": [ { - "_id": 1 + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } }, { - "_id": 2 + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] } ], "tests": [ { "description": "remain pinned after non-transient Interrupted error on insertOne", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { - "name": "targetedFailPoint", "object": "testRunner", + "name": "targetedFailPoint", "arguments": { "session": "session0", "failPoint": { @@ -60,15 +100,15 @@ } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError", "UnknownTransactionCommitResult" @@ -77,85 +117,114 @@ } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "commitTransaction", - "object": "session0" + "object": "session0", + "name": "commitTransaction" } ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, + "expectEvents": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "commitTransaction", - "database_name": "admin" - } + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -167,32 +236,35 @@ } ] } - } + ] }, { "description": "unpin after transient error within a transaction", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { - "name": "targetedFailPoint", "object": "testRunner", + "name": "targetedFailPoint", "arguments": { "session": "session0", "failPoint": { @@ -210,15 +282,15 @@ } }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ], @@ -228,85 +300,114 @@ } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, + "expectEvents": [ { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } }, - "command_name": "abortTransaction", - "database_name": "admin" - } + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -315,27 +416,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on insertOne insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -359,34 +463,36 @@ }, { "name": "insertOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -395,27 +501,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on insertMany insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -439,7 +548,7 @@ }, { "name": "insertMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "documents": [ @@ -451,27 +560,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -480,27 +591,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on updateOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -524,7 +638,7 @@ }, { "name": "updateOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -536,27 +650,29 @@ } } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -565,27 +681,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on replaceOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -609,7 +728,7 @@ }, { "name": "replaceOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -619,27 +738,29 @@ "y": 1 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -648,27 +769,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on updateMany update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -692,7 +816,7 @@ }, { "name": "updateMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -706,27 +830,29 @@ } } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -735,27 +861,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on deleteOne delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -779,34 +908,36 @@ }, { "name": "deleteOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -815,27 +946,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on deleteMany delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -859,7 +993,7 @@ }, { "name": "deleteMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -868,27 +1002,29 @@ } } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -897,27 +1033,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on findOneAndDelete findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -941,34 +1080,36 @@ }, { "name": "findOneAndDelete", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -977,27 +1118,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on findOneAndUpdate findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1021,7 +1165,7 @@ }, { "name": "findOneAndUpdate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -1034,27 +1178,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1063,27 +1209,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on findOneAndReplace findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1107,7 +1256,7 @@ }, { "name": "findOneAndReplace", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -1118,27 +1267,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1147,27 +1298,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on bulkWrite insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1191,13 +1345,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "insertOne", - "arguments": { + "insertOne": { "document": { "_id": 1 } @@ -1205,27 +1358,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1234,27 +1389,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on bulkWrite update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1278,13 +1436,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -1297,27 +1454,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1326,27 +1485,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on bulkWrite delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1370,13 +1532,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 1 } @@ -1384,27 +1545,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1413,27 +1576,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on find find", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1457,34 +1623,36 @@ }, { "name": "find", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1493,27 +1661,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on countDocuments aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1537,32 +1708,34 @@ }, { "name": "countDocuments", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": {} }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1571,27 +1744,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on aggregate aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1615,32 +1791,34 @@ }, { "name": "aggregate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "pipeline": [] }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1649,27 +1827,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on distinct distinct", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1693,32 +1874,35 @@ }, { "name": "distinct", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", - "fieldName": "_id" + "fieldName": "_id", + "filter": {} }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1727,27 +1911,30 @@ } ] } - } + ] }, { "description": "remain pinned after non-transient Interrupted error on runCommand insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1771,10 +1958,10 @@ }, { "name": "runCommand", - "object": "database", - "command_name": "insert", + "object": "database0", "arguments": { "session": "session0", + "commandName": "insert", "command": { "insert": "test", "documents": [ @@ -1784,27 +1971,29 @@ ] } }, - "result": { + "expectError": { "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionPinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1813,27 +2002,30 @@ } ] } - } + ] }, { - "description": "unpin after transient connection error on insertOne insert", - "useMultipleMongoses": true, + "description": "remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite", "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1848,43 +2040,53 @@ }, "data": { "failCommands": [ - "insert" + "bulkWrite" ], - "closeConnection": true + "errorCode": 11601 } } } }, { - "name": "insertOne", - "object": "collection", + "name": "clientBulkWrite", + "object": "client0", "arguments": { "session": "session0", - "document": { - "_id": 4 - } + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] }, - "result": { - "errorLabelsContain": [ + "expectError": { + "errorLabelsOmit": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionPinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1893,27 +2095,120 @@ } ] } - } - }, - { + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient connection error on insertOne insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { "description": "unpin after transient ShutdownInProgress error on insertOne insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -1937,34 +2232,36 @@ }, { "name": "insertOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "document": { "_id": 4 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -1973,27 +2270,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on insertMany insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2017,7 +2317,7 @@ }, { "name": "insertMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "documents": [ @@ -2029,27 +2329,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2058,27 +2360,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on insertMany insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2102,7 +2407,7 @@ }, { "name": "insertMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "documents": [ @@ -2114,27 +2419,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2143,27 +2450,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on updateOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2187,7 +2497,7 @@ }, { "name": "updateOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2199,27 +2509,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2228,27 +2540,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on updateOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2272,7 +2587,7 @@ }, { "name": "updateOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2284,27 +2599,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2313,27 +2630,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on replaceOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2357,7 +2677,7 @@ }, { "name": "replaceOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2367,27 +2687,29 @@ "y": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2396,27 +2718,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on replaceOne update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2440,7 +2765,7 @@ }, { "name": "replaceOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2450,27 +2775,29 @@ "y": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2479,27 +2806,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on updateMany update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2523,7 +2853,7 @@ }, { "name": "updateMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2537,27 +2867,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2566,27 +2898,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on updateMany update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2610,7 +2945,7 @@ }, { "name": "updateMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2624,27 +2959,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2653,27 +2990,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on deleteOne delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2697,34 +3037,36 @@ }, { "name": "deleteOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2733,27 +3075,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on deleteOne delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2777,34 +3122,36 @@ }, { "name": "deleteOne", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2813,27 +3160,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on deleteMany delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2857,7 +3207,7 @@ }, { "name": "deleteMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2866,27 +3216,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2895,27 +3247,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on deleteMany delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -2939,7 +3294,7 @@ }, { "name": "deleteMany", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -2948,27 +3303,29 @@ } } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -2977,27 +3334,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on findOneAndDelete findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3021,34 +3381,36 @@ }, { "name": "findOneAndDelete", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3057,27 +3419,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on findOneAndDelete findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3101,34 +3466,36 @@ }, { "name": "findOneAndDelete", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3137,27 +3504,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on findOneAndUpdate findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3181,7 +3551,7 @@ }, { "name": "findOneAndUpdate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -3194,27 +3564,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3223,27 +3595,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on findOneAndUpdate findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3267,7 +3642,7 @@ }, { "name": "findOneAndUpdate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -3280,27 +3655,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3309,27 +3686,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on findOneAndReplace findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3353,7 +3733,7 @@ }, { "name": "findOneAndReplace", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -3364,27 +3744,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3393,27 +3775,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on findOneAndReplace findAndModify", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3437,7 +3822,7 @@ }, { "name": "findOneAndReplace", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { @@ -3448,27 +3833,29 @@ }, "returnDocument": "Before" }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3477,27 +3864,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on bulkWrite insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3521,13 +3911,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "insertOne", - "arguments": { + "insertOne": { "document": { "_id": 1 } @@ -3535,27 +3924,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3564,27 +3955,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on bulkWrite insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3608,13 +4002,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "insertOne", - "arguments": { + "insertOne": { "document": { "_id": 1 } @@ -3622,27 +4015,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3651,27 +4046,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on bulkWrite update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3695,13 +4093,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -3714,27 +4111,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3743,27 +4142,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on bulkWrite update", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3787,13 +4189,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -3806,27 +4207,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3835,27 +4238,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on bulkWrite delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3879,13 +4285,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 1 } @@ -3893,27 +4298,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -3922,27 +4329,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on bulkWrite delete", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -3966,13 +4376,12 @@ }, { "name": "bulkWrite", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "requests": [ { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 1 } @@ -3980,27 +4389,29 @@ } ] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4009,27 +4420,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on find find", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4053,34 +4467,36 @@ }, { "name": "find", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4089,27 +4505,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on find find", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4133,34 +4552,36 @@ }, { "name": "find", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": { "_id": 1 } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4169,27 +4590,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on countDocuments aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4213,32 +4637,34 @@ }, { "name": "countDocuments", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": {} }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4247,27 +4673,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on countDocuments aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4291,32 +4720,34 @@ }, { "name": "countDocuments", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "filter": {} }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4325,27 +4756,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on aggregate aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4369,32 +4803,34 @@ }, { "name": "aggregate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "pipeline": [] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4403,27 +4839,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on aggregate aggregate", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4447,32 +4886,34 @@ }, { "name": "aggregate", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", "pipeline": [] }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4481,27 +4922,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on distinct distinct", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4525,32 +4969,35 @@ }, { "name": "distinct", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", - "fieldName": "_id" + "fieldName": "_id", + "filter": {} }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4559,27 +5006,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on distinct distinct", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4603,32 +5053,35 @@ }, { "name": "distinct", - "object": "collection", + "object": "collection0", "arguments": { "session": "session0", - "fieldName": "_id" + "fieldName": "_id", + "filter": {} }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4637,27 +5090,30 @@ } ] } - } + ] }, { "description": "unpin after transient connection error on runCommand insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4681,10 +5137,10 @@ }, { "name": "runCommand", - "object": "database", - "command_name": "insert", + "object": "database0", "arguments": { "session": "session0", + "commandName": "insert", "command": { "insert": "test", "documents": [ @@ -4694,27 +5150,29 @@ ] } }, - "result": { + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { - "name": "assertSessionUnpinned", "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4723,27 +5181,30 @@ } ] } - } + ] }, { "description": "unpin after transient ShutdownInProgress error on runCommand insert", - "useMultipleMongoses": true, "operations": [ { - "name": "startTransaction", - "object": "session0" + "object": "session0", + "name": "startTransaction" }, { + "object": "collection0", "name": "insertOne", - "object": "collection", "arguments": { "session": "session0", "document": { "_id": 3 } }, - "result": { - "insertedId": 3 + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } } }, { @@ -4767,10 +5228,10 @@ }, { "name": "runCommand", - "object": "database", - "command_name": "insert", + "object": "database0", "arguments": { "session": "session0", + "commandName": "insert", "command": { "insert": "test", "documents": [ @@ -4780,27 +5241,220 @@ ] } }, - "result": { + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { "errorLabelsContain": [ "TransientTransactionError" ] } }, { + "object": "testRunner", "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", "arguments": { "session": "session0" } }, { - "name": "abortTransaction", - "object": "session0" + "object": "session0", + "name": "abortTransaction" } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ { "_id": 1 }, @@ -4809,7 +5463,12 @@ } ] } - } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] } ] } diff --git a/testdata/transactions/legacy/mongos-pin-auto.yml b/testdata/transactions/unified/mongos-pin-auto.yml similarity index 67% rename from testdata/transactions/legacy/mongos-pin-auto.yml rename to testdata/transactions/unified/mongos-pin-auto.yml index 7e2e3e4453..db620c371b 100644 --- a/testdata/transactions/legacy/mongos-pin-auto.yml +++ b/testdata/transactions/unified/mongos-pin-auto.yml @@ -1,219 +1,220 @@ # Autogenerated tests that transient errors in a transaction unpin the session. # See mongos-pin-auto-tests.py -runOn: - - - minServerVersion: "4.1.8" - topology: ["sharded"] - # serverless proxy doesn't append error labels to errors in transactions - # caused by failpoints (CLOUDP-88216) - serverless: "forbid" -database_name: &database_name "transaction-tests" -collection_name: &collection_name "test" - -data: &data - - {_id: 1} - - {_id: 2} +description: mongos-pin-auto + +schemaVersion: '1.4' + +runOnRequirements: + - minServerVersion: "4.1.8" + # Note: tests utilize targetedFailPoint, which is incompatible with + # load-balanced and useMultipleMongoses:true + topologies: [ sharded ] + # serverless proxy doesn't append error labels to errors in transactions + # caused by failpoints (CLOUDP-88216) + serverless: "forbid" + +createEntities: + - client: + id: &client0 client0 + useMultipleMongoses: true + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database_name transaction-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection_name test + - session: + id: &session0 session0 + client: *client0 + +initialData: + - collectionName: *collection_name + databaseName: *database_name + documents: &data + - { _id: 1 } + - { _id: 2 } tests: - description: remain pinned after non-transient Interrupted error on insertOne - useMultipleMongoses: true operations: - &startTransaction - name: startTransaction object: session0 + name: startTransaction - &initialCommand + object: *collection0 name: insertOne - object: collection arguments: - session: session0 - document: {_id: 3} - result: - insertedId: 3 - - name: targetedFailPoint - object: testRunner + session: *session0 + document: { _id: 3 } + expectResult: { $$unsetOrMatches: { insertedId: { $$unsetOrMatches: 3 } } } + - object: testRunner + name: targetedFailPoint arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand - mode: {times: 1} + mode: { times: 1 } data: - failCommands: ["insert"] + failCommands: [ "insert" ] errorCode: 11601 - - name: insertOne - object: collection + - object: *collection0 + name: insertOne arguments: - session: session0 - document: - _id: 4 - result: + session: *session0 + document: { _id: 4 } + expectError: errorLabelsOmit: ["TransientTransactionError", "UnknownTransactionCommitResult"] errorCodeName: Interrupted - &assertSessionPinned - name: assertSessionPinned object: testRunner + name: assertSessionPinned arguments: - session: session0 + session: *session0 - &commitTransaction + object: *session0 name: commitTransaction - object: session0 - - expectations: - - command_started_event: - command: - insert: *collection_name - documents: - - _id: 3 - ordered: true - readConcern: - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: true - autocommit: false - writeConcern: - command_name: insert - database_name: *database_name - - command_started_event: - command: - insert: *collection_name - documents: - - _id: 4 - ordered: true - readConcern: - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: - autocommit: false - writeConcern: - command_name: insert - database_name: *database_name - - command_started_event: - command: - commitTransaction: 1 - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: - autocommit: false - writeConcern: - recoveryToken: 42 - command_name: commitTransaction - database_name: admin - - outcome: &outcome - collection: - data: - - {_id: 1} - - {_id: 2} - - {_id: 3} - - - description: unpin after transient error within a transaction - useMultipleMongoses: true + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection_name + documents: + - { _id: 3 } + ordered: true + readConcern: { $$exists: false } + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + commandName: insert + databaseName: *database_name + - commandStartedEvent: + command: + insert: *collection_name + documents: + - { _id: 4 } + ordered: true + readConcern: { $$exists: false } + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + commandName: insert + databaseName: *database_name + - commandStartedEvent: + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + recoveryToken: { $$exists: true } + commandName: commitTransaction + databaseName: admin + outcome: + - collectionName: *collection_name + databaseName: *database_name + documents: + - { _id: 1 } + - { _id: 2 } + - { _id: 3 } + + - description: 'unpin after transient error within a transaction' operations: - - &startTransaction - name: startTransaction - object: session0 - - &initialCommand - name: insertOne - object: collection - arguments: - session: session0 - document: - _id: 3 - result: - insertedId: 3 - - name: targetedFailPoint - object: testRunner + - *startTransaction + - *initialCommand + - object: testRunner + name: targetedFailPoint arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: { times: 1 } data: - failCommands: ["insert"] + failCommands: [ "insert" ] closeConnection: true - - name: insertOne - object: collection + - object: *collection0 + name: insertOne arguments: - session: session0 - document: - _id: 4 - result: + session: *session0 + document: { _id: 4 } + expectError: errorLabelsContain: ["TransientTransactionError"] errorLabelsOmit: ["UnknownTransactionCommitResult"] - # Session unpins from the first mongos after the insert error and - # abortTransaction succeeds immediately on any mongos. - &assertSessionUnpinned - name: assertSessionUnpinned object: testRunner + name: assertSessionUnpinned arguments: - session: session0 + session: *session0 - &abortTransaction + object: *session0 name: abortTransaction - object: session0 - - expectations: - - command_started_event: - command: - insert: *collection_name - documents: - - _id: 3 - ordered: true - readConcern: - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: true - autocommit: false - writeConcern: - command_name: insert - database_name: *database_name - - command_started_event: - command: - insert: *collection_name - documents: - - _id: 4 - ordered: true - readConcern: - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: - autocommit: false - writeConcern: - command_name: insert - database_name: *database_name - - command_started_event: - command: - abortTransaction: 1 - lsid: session0 - txnNumber: - $numberLong: "1" - startTransaction: - autocommit: false - writeConcern: - recoveryToken: 42 - command_name: abortTransaction - database_name: admin - + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection_name + documents: + - { _id: 3 } + ordered: true + readConcern: { $$exists: false } + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + commandName: insert + databaseName: *database_name + - commandStartedEvent: + command: + insert: *collection_name + documents: + - { _id: 4 } + ordered: true + readConcern: { $$exists: false } + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + commandName: insert + databaseName: *database_name + - commandStartedEvent: + command: + abortTransaction: 1 + lsid: { $$sessionLsid: *session0 } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + recoveryToken: { $$exists: true } + commandName: abortTransaction + databaseName: admin outcome: &outcome - collection: - data: *data + - collectionName: *collection_name + databaseName: *database_name + documents: *data # The rest of the tests in this file test every operation type against # multiple types of transient errors (connection and error code). - description: remain pinned after non-transient Interrupted error on insertOne insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -221,25 +222,24 @@ tests: failCommands: ["insert"] errorCode: 11601 - name: insertOne - object: collection + object: *collection0 arguments: - session: session0 - document: {_id: 4} - result: + session: *session0 + document: { _id: 4 } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on insertMany insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -247,25 +247,24 @@ tests: failCommands: ["insert"] errorCode: 11601 - name: insertMany - object: collection + object: *collection0 arguments: - session: session0 - documents: [{_id: 4}, {_id: 5}] - result: + session: *session0 + documents: [ { _id: 4 }, { _id: 5 } ] + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on updateOne update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -273,26 +272,25 @@ tests: failCommands: ["update"] errorCode: 11601 - name: updateOne - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - update: {$inc: {x: 1}} - result: + session: *session0 + filter: { _id: 1 } + update: { $inc: { x: 1 } } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on replaceOne update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -300,26 +298,25 @@ tests: failCommands: ["update"] errorCode: 11601 - name: replaceOne - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - replacement: {y: 1} - result: + session: *session0 + filter: { _id: 1 } + replacement: { y: 1 } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on updateMany update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -327,26 +324,25 @@ tests: failCommands: ["update"] errorCode: 11601 - name: updateMany - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: {$gte: 1}} - update: {$set: {z: 1}} - result: + session: *session0 + filter: { _id: { $gte: 1 } } + update: {$set: { z: 1 } } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on deleteOne delete - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -354,25 +350,24 @@ tests: failCommands: ["delete"] errorCode: 11601 - name: deleteOne - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - result: + session: *session0 + filter: { _id: 1 } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on deleteMany delete - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -380,25 +375,24 @@ tests: failCommands: ["delete"] errorCode: 11601 - name: deleteMany - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: {$gte: 1}} - result: + session: *session0 + filter: { _id: { $gte: 1 } } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on findOneAndDelete findAndModify - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -406,25 +400,24 @@ tests: failCommands: ["findAndModify"] errorCode: 11601 - name: findOneAndDelete - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - result: + session: *session0 + filter: { _id: 1 } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on findOneAndUpdate findAndModify - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -432,27 +425,26 @@ tests: failCommands: ["findAndModify"] errorCode: 11601 - name: findOneAndUpdate - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - update: {$inc: {x: 1}} + session: *session0 + filter: { _id: 1 } + update: { $inc: { x: 1 } } returnDocument: Before - result: + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on findOneAndReplace findAndModify - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -460,27 +452,26 @@ tests: failCommands: ["findAndModify"] errorCode: 11601 - name: findOneAndReplace - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - replacement: {y: 1} + session: *session0 + filter: { _id: 1 } + replacement: { y: 1 } returnDocument: Before - result: + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on bulkWrite insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -488,28 +479,26 @@ tests: failCommands: ["insert"] errorCode: 11601 - name: bulkWrite - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 requests: - - name: insertOne - arguments: - document: {_id: 1} - result: + - insertOne: + document: { _id: 1 } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on bulkWrite update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -517,29 +506,27 @@ tests: failCommands: ["update"] errorCode: 11601 - name: bulkWrite - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 requests: - - name: updateOne - arguments: - filter: {_id: 1} - update: {$set: {x: 1}} - result: + - updateOne: + filter: { _id: 1 } + update: { $set: { x: 1 } } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on bulkWrite delete - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -547,28 +534,26 @@ tests: failCommands: ["delete"] errorCode: 11601 - name: bulkWrite - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 requests: - - name: deleteOne - arguments: - filter: {_id: 1} - result: + - deleteOne: + filter: { _id: 1 } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on find find - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -576,25 +561,24 @@ tests: failCommands: ["find"] errorCode: 11601 - name: find - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - result: + session: *session0 + filter: { _id: 1 } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on countDocuments aggregate - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -602,25 +586,24 @@ tests: failCommands: ["aggregate"] errorCode: 11601 - name: countDocuments - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 filter: {} - result: + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on aggregate aggregate - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -628,25 +611,24 @@ tests: failCommands: ["aggregate"] errorCode: 11601 - name: aggregate - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 pipeline: [] - result: + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on distinct distinct - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -654,25 +636,25 @@ tests: failCommands: ["distinct"] errorCode: 11601 - name: distinct - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 fieldName: _id - result: + filter: {} + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome - description: remain pinned after non-transient Interrupted error on runCommand insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -680,29 +662,58 @@ tests: failCommands: ["insert"] errorCode: 11601 - name: runCommand - object: database - command_name: insert + object: *database0 arguments: - session: session0 + session: *session0 + commandName: insert command: insert: *collection_name documents: - - _id : 1 - result: + - { _id : 1 } + expectError: errorLabelsOmit: ["TransientTransactionError"] - *assertSessionPinned - *abortTransaction outcome: *outcome + - description: remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + errorCode: 11601 + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsOmit: ["TransientTransactionError"] + - *assertSessionPinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + - description: unpin after transient connection error on insertOne insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -710,25 +721,24 @@ tests: failCommands: ["insert"] closeConnection: true - name: insertOne - object: collection + object: *collection0 arguments: - session: session0 - document: {_id: 4} - result: + session: *session0 + document: { _id: 4 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on insertOne insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -736,25 +746,24 @@ tests: failCommands: ["insert"] errorCode: 91 - name: insertOne - object: collection + object: *collection0 arguments: - session: session0 - document: {_id: 4} - result: + session: *session0 + document: { _id: 4 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on insertMany insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -762,25 +771,24 @@ tests: failCommands: ["insert"] closeConnection: true - name: insertMany - object: collection + object: *collection0 arguments: - session: session0 - documents: [{_id: 4}, {_id: 5}] - result: + session: *session0 + documents: [ { _id: 4 }, { _id: 5 } ] + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on insertMany insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -788,25 +796,24 @@ tests: failCommands: ["insert"] errorCode: 91 - name: insertMany - object: collection + object: *collection0 arguments: - session: session0 - documents: [{_id: 4}, {_id: 5}] - result: + session: *session0 + documents: [ { _id: 4 }, { _id: 5 } ] + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on updateOne update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -814,26 +821,25 @@ tests: failCommands: ["update"] closeConnection: true - name: updateOne - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - update: {$inc: {x: 1}} - result: + session: *session0 + filter: { _id: 1 } + update: { $inc: { x: 1 } } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on updateOne update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -841,26 +847,25 @@ tests: failCommands: ["update"] errorCode: 91 - name: updateOne - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - update: {$inc: {x: 1}} - result: + session: *session0 + filter: { _id: 1 } + update: { $inc: { x: 1 } } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on replaceOne update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -868,26 +873,25 @@ tests: failCommands: ["update"] closeConnection: true - name: replaceOne - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - replacement: {y: 1} - result: + session: *session0 + filter: { _id: 1 } + replacement: { y: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on replaceOne update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -895,26 +899,25 @@ tests: failCommands: ["update"] errorCode: 91 - name: replaceOne - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - replacement: {y: 1} - result: + session: *session0 + filter: { _id: 1 } + replacement: { y: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on updateMany update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -922,26 +925,25 @@ tests: failCommands: ["update"] closeConnection: true - name: updateMany - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: {$gte: 1}} - update: {$set: {z: 1}} - result: + session: *session0 + filter: { _id: { $gte: 1 } } + update: {$set: { z: 1 } } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on updateMany update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -949,26 +951,25 @@ tests: failCommands: ["update"] errorCode: 91 - name: updateMany - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: {$gte: 1}} - update: {$set: {z: 1}} - result: + session: *session0 + filter: { _id: { $gte: 1 } } + update: {$set: { z: 1 } } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on deleteOne delete - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -976,25 +977,24 @@ tests: failCommands: ["delete"] closeConnection: true - name: deleteOne - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - result: + session: *session0 + filter: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on deleteOne delete - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1002,25 +1002,24 @@ tests: failCommands: ["delete"] errorCode: 91 - name: deleteOne - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - result: + session: *session0 + filter: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on deleteMany delete - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1028,25 +1027,24 @@ tests: failCommands: ["delete"] closeConnection: true - name: deleteMany - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: {$gte: 1}} - result: + session: *session0 + filter: { _id: { $gte: 1 } } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on deleteMany delete - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1054,25 +1052,24 @@ tests: failCommands: ["delete"] errorCode: 91 - name: deleteMany - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: {$gte: 1}} - result: + session: *session0 + filter: { _id: { $gte: 1 } } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on findOneAndDelete findAndModify - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1080,25 +1077,24 @@ tests: failCommands: ["findAndModify"] closeConnection: true - name: findOneAndDelete - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - result: + session: *session0 + filter: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on findOneAndDelete findAndModify - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1106,25 +1102,24 @@ tests: failCommands: ["findAndModify"] errorCode: 91 - name: findOneAndDelete - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - result: + session: *session0 + filter: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on findOneAndUpdate findAndModify - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1132,27 +1127,26 @@ tests: failCommands: ["findAndModify"] closeConnection: true - name: findOneAndUpdate - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - update: {$inc: {x: 1}} + session: *session0 + filter: { _id: 1 } + update: { $inc: { x: 1 } } returnDocument: Before - result: + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on findOneAndUpdate findAndModify - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1160,27 +1154,26 @@ tests: failCommands: ["findAndModify"] errorCode: 91 - name: findOneAndUpdate - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - update: {$inc: {x: 1}} + session: *session0 + filter: { _id: 1 } + update: { $inc: { x: 1 } } returnDocument: Before - result: + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on findOneAndReplace findAndModify - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1188,27 +1181,26 @@ tests: failCommands: ["findAndModify"] closeConnection: true - name: findOneAndReplace - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - replacement: {y: 1} + session: *session0 + filter: { _id: 1 } + replacement: { y: 1 } returnDocument: Before - result: + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on findOneAndReplace findAndModify - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1216,27 +1208,26 @@ tests: failCommands: ["findAndModify"] errorCode: 91 - name: findOneAndReplace - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - replacement: {y: 1} + session: *session0 + filter: { _id: 1 } + replacement: { y: 1 } returnDocument: Before - result: + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on bulkWrite insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1244,28 +1235,26 @@ tests: failCommands: ["insert"] closeConnection: true - name: bulkWrite - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 requests: - - name: insertOne - arguments: - document: {_id: 1} - result: + - insertOne: + document: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on bulkWrite insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1273,28 +1262,26 @@ tests: failCommands: ["insert"] errorCode: 91 - name: bulkWrite - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 requests: - - name: insertOne - arguments: - document: {_id: 1} - result: + - insertOne: + document: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on bulkWrite update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1302,29 +1289,27 @@ tests: failCommands: ["update"] closeConnection: true - name: bulkWrite - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 requests: - - name: updateOne - arguments: - filter: {_id: 1} - update: {$set: {x: 1}} - result: + - updateOne: + filter: { _id: 1 } + update: { $set: { x: 1 } } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on bulkWrite update - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1332,29 +1317,27 @@ tests: failCommands: ["update"] errorCode: 91 - name: bulkWrite - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 requests: - - name: updateOne - arguments: - filter: {_id: 1} - update: {$set: {x: 1}} - result: + - updateOne: + filter: { _id: 1 } + update: { $set: { x: 1 } } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on bulkWrite delete - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1362,28 +1345,26 @@ tests: failCommands: ["delete"] closeConnection: true - name: bulkWrite - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 requests: - - name: deleteOne - arguments: - filter: {_id: 1} - result: + - deleteOne: + filter: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on bulkWrite delete - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1391,28 +1372,26 @@ tests: failCommands: ["delete"] errorCode: 91 - name: bulkWrite - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 requests: - - name: deleteOne - arguments: - filter: {_id: 1} - result: + - deleteOne: + filter: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on find find - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1420,25 +1399,24 @@ tests: failCommands: ["find"] closeConnection: true - name: find - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - result: + session: *session0 + filter: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on find find - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1446,25 +1424,24 @@ tests: failCommands: ["find"] errorCode: 91 - name: find - object: collection + object: *collection0 arguments: - session: session0 - filter: {_id: 1} - result: + session: *session0 + filter: { _id: 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on countDocuments aggregate - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1472,25 +1449,24 @@ tests: failCommands: ["aggregate"] closeConnection: true - name: countDocuments - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 filter: {} - result: + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on countDocuments aggregate - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1498,25 +1474,24 @@ tests: failCommands: ["aggregate"] errorCode: 91 - name: countDocuments - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 filter: {} - result: + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on aggregate aggregate - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1524,25 +1499,24 @@ tests: failCommands: ["aggregate"] closeConnection: true - name: aggregate - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 pipeline: [] - result: + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on aggregate aggregate - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1550,25 +1524,24 @@ tests: failCommands: ["aggregate"] errorCode: 91 - name: aggregate - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 pipeline: [] - result: + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on distinct distinct - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1576,25 +1549,25 @@ tests: failCommands: ["distinct"] closeConnection: true - name: distinct - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 fieldName: _id - result: + filter: {} + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on distinct distinct - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1602,25 +1575,25 @@ tests: failCommands: ["distinct"] errorCode: 91 - name: distinct - object: collection + object: *collection0 arguments: - session: session0 + session: *session0 fieldName: _id - result: + filter: {} + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient connection error on runCommand insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1628,29 +1601,28 @@ tests: failCommands: ["insert"] closeConnection: true - name: runCommand - object: database - command_name: insert + object: *database0 arguments: - session: session0 + session: *session0 + commandName: insert command: insert: *collection_name documents: - - _id : 1 - result: + - { _id : 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome - description: unpin after transient ShutdownInProgress error on runCommand insert - useMultipleMongoses: true operations: - *startTransaction - *initialCommand - name: targetedFailPoint object: testRunner arguments: - session: session0 + session: *session0 failPoint: configureFailPoint: failCommand mode: {times: 1} @@ -1658,17 +1630,76 @@ tests: failCommands: ["insert"] errorCode: 91 - name: runCommand - object: database - command_name: insert + object: *database0 arguments: - session: session0 + session: *session0 + commandName: insert command: insert: *collection_name documents: - - _id : 1 - result: + - { _id : 1 } + expectError: errorLabelsContain: ["TransientTransactionError"] - *assertSessionUnpinned - *abortTransaction outcome: *outcome + - description: unpin after transient connection error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + closeConnection: true + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsContain: ["TransientTransactionError"] + - *assertSessionUnpinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + + - description: unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + errorCode: 91 + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsContain: ["TransientTransactionError"] + - *assertSessionUnpinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" diff --git a/testdata/versioned-api/crud-api-version-1.json b/testdata/versioned-api/crud-api-version-1.json index a387d0587e..23ef59a6d9 100644 --- a/testdata/versioned-api/crud-api-version-1.json +++ b/testdata/versioned-api/crud-api-version-1.json @@ -50,7 +50,8 @@ }, "apiDeprecationErrors": true } - ] + ], + "namespace": "versioned-api-tests.test" }, "initialData": [ { @@ -426,6 +427,86 @@ } ] }, + { + "description": "client bulkWrite appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "versioned-api-tests.test", + "document": { + "_id": 6, + "x": 6 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 6 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 6, + "x": 6 + } + } + ], + "nsInfo": [ + { + "ns": "versioned-api-tests.test" + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, { "description": "countDocuments appends declared API version", "operations": [ diff --git a/testdata/versioned-api/crud-api-version-1.yml b/testdata/versioned-api/crud-api-version-1.yml index 50135c1458..01e0323420 100644 --- a/testdata/versioned-api/crud-api-version-1.yml +++ b/testdata/versioned-api/crud-api-version-1.yml @@ -34,6 +34,7 @@ _yamlAnchors: apiVersion: "1" apiStrict: { $$unsetOrMatches: false } apiDeprecationErrors: true + namespace: &namespace "versioned-api-tests.test" initialData: - collectionName: *collectionName @@ -155,6 +156,47 @@ tests: multi: { $$unsetOrMatches: false } upsert: true <<: *expectedApiVersion + + - description: "client bulkWrite appends declared API version" + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + serverless: forbid + operations: + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 6, x: 6 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 6 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 6, x: 6 } + nsInfo: + - { ns: *namespace } + <<: *expectedApiVersion - description: "countDocuments appends declared API version" operations: diff --git a/x/mongo/driver/batch_cursor.go b/x/mongo/driver/batch_cursor.go index 2aa0aca694..656aeeddb2 100644 --- a/x/mongo/driver/batch_cursor.go +++ b/x/mongo/driver/batch_cursor.go @@ -71,25 +71,29 @@ type CursorResponse struct { postBatchResumeToken bsoncore.Document } -// NewCursorResponse constructs a cursor response from the given response and -// server. If the provided database response does not contain a cursor, it -// returns ErrNoCursor. -// -// NewCursorResponse can be used within the ProcessResponse method for an operation. -func NewCursorResponse(info ResponseInfo) (CursorResponse, error) { - response := info.ServerResponse +// ExtractCursorDocument retrieves cursor document from a database response. If the +// provided response does not contain a cursor, it returns ErrNoCursor. +func ExtractCursorDocument(response bsoncore.Document) (bsoncore.Document, error) { cur, err := response.LookupErr("cursor") if errors.Is(err, bsoncore.ErrElementNotFound) { - return CursorResponse{}, ErrNoCursor + return nil, ErrNoCursor } if err != nil { - return CursorResponse{}, fmt.Errorf("error getting cursor from database response: %w", err) + return nil, fmt.Errorf("error getting cursor from database response: %w", err) } curDoc, ok := cur.DocumentOK() if !ok { - return CursorResponse{}, fmt.Errorf("cursor should be an embedded document but is BSON type %s", cur.Type) + return nil, fmt.Errorf("cursor should be an embedded document but is BSON type %s", cur.Type) } - elems, err := curDoc.Elements() + return curDoc, nil +} + +// NewCursorResponse constructs a cursor response from the given cursor document +// extracted from a database response. +// +// NewCursorResponse can be used within the ProcessResponse method for an operation. +func NewCursorResponse(response bsoncore.Document, info ResponseInfo) (CursorResponse, error) { + elems, err := response.Elements() if err != nil { return CursorResponse{}, fmt.Errorf("error getting elements from cursor: %w", err) } @@ -115,15 +119,17 @@ func NewCursorResponse(info ResponseInfo) (CursorResponse, error) { curresp.Database = database curresp.Collection = collection case "id": - curresp.ID, ok = elem.Value().Int64OK() + id, ok := elem.Value().Int64OK() if !ok { return CursorResponse{}, fmt.Errorf("id should be an int64 but it is a BSON %s", elem.Value().Type) } + curresp.ID = id case "postBatchResumeToken": - curresp.postBatchResumeToken, ok = elem.Value().DocumentOK() + token, ok := elem.Value().DocumentOK() if !ok { return CursorResponse{}, fmt.Errorf("post batch resume token should be a document but it is a BSON %s", elem.Value().Type) } + curresp.postBatchResumeToken = token } } @@ -393,8 +399,7 @@ func (bc *BatchCursor) getMore(ctx context.Context) { }, Database: bc.database, Deployment: bc.getOperationDeployment(), - ProcessResponseFn: func(info ResponseInfo) error { - response := info.ServerResponse + ProcessResponseFn: func(_ context.Context, response bsoncore.Document, _ ResponseInfo) error { id, ok := response.Lookup("cursor", "id").Int64OK() if !ok { return fmt.Errorf("cursor.id should be an int64 but is a BSON %s", response.Lookup("cursor", "id").Type) diff --git a/x/mongo/driver/batches.go b/x/mongo/driver/batches.go index be430afa15..0fd0a218a5 100644 --- a/x/mongo/driver/batches.go +++ b/x/mongo/driver/batches.go @@ -7,70 +7,114 @@ package driver import ( - "errors" + "io" + "strconv" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage" ) -// ErrDocumentTooLarge occurs when a document that is larger than the maximum size accepted by a -// server is passed to an insert command. -var ErrDocumentTooLarge = errors.New("an inserted document is too large") - // Batches contains the necessary information to batch split an operation. This is only used for write // operations. type Batches struct { Identifier string Documents []bsoncore.Document - Current []bsoncore.Document Ordered *bool -} - -// Valid returns true if Batches contains both an identifier and the length of Documents is greater -// than zero. -func (b *Batches) Valid() bool { return b != nil && b.Identifier != "" && len(b.Documents) > 0 } - -// ClearBatch clears the Current batch. This must be called before AdvanceBatch will advance to the -// next batch. -func (b *Batches) ClearBatch() { b.Current = b.Current[:0] } -// AdvanceBatch splits the next batch using maxCount and targetBatchSize. This method will do nothing if -// the current batch has not been cleared. We do this so that when this is called during execute we -// can call it without first needing to check if we already have a batch, which makes the code -// simpler and makes retrying easier. -// The maxDocSize parameter is used to check that any one document is not too large. If the first document is bigger -// than targetBatchSize but smaller than maxDocSize, a batch of size 1 containing that document will be created. -func (b *Batches) AdvanceBatch(maxCount, targetBatchSize, maxDocSize int) error { - if len(b.Current) > 0 { - return nil - } + offset int +} - if maxCount <= 0 { - maxCount = 1 +// AppendBatchSequence appends dst with document sequence of batches as long as the limits of max count, max +// document size, or total size allows. It returns the number of batches appended, the new appended slice, and +// any error raised. It returns the origenal input slice if nothing can be appends within the limits. +func (b *Batches) AppendBatchSequence(dst []byte, maxCount, maxDocSize, _ int) (int, []byte, error) { + if b.Size() == 0 { + return 0, dst, io.EOF } - - splitAfter := 0 - size := 0 - for i, doc := range b.Documents { - if i == maxCount { + l := len(dst) + var idx int32 + dst = wiremessage.AppendMsgSectionType(dst, wiremessage.DocumentSequence) + idx, dst = bsoncore.ReserveLength(dst) + dst = append(dst, b.Identifier...) + dst = append(dst, 0x00) + var size int + var n int + for i := b.offset; i < len(b.Documents); i++ { + if n == maxCount { break } + doc := b.Documents[i] if len(doc) > maxDocSize { - return ErrDocumentTooLarge + break } - if size+len(doc) > targetBatchSize { + size += len(doc) + if size > maxDocSize { break } + dst = append(dst, doc...) + n++ + } + if n == 0 { + return 0, dst[:l], nil + } + dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:]))) + return n, dst, nil +} +// AppendBatchArray appends dst with array of batches as long as the limits of max count, max document size, or +// total size allows. It returns the number of batches appended, the new appended slice, and any error raised. It +// returns the origenal input slice if nothing can be appends within the limits. +func (b *Batches) AppendBatchArray(dst []byte, maxCount, maxDocSize, _ int) (int, []byte, error) { + if b.Size() == 0 { + return 0, dst, io.EOF + } + l := len(dst) + aidx, dst := bsoncore.AppendArrayElementStart(dst, b.Identifier) + var size int + var n int + for i := b.offset; i < len(b.Documents); i++ { + if n == maxCount { + break + } + doc := b.Documents[i] + if len(doc) > maxDocSize { + break + } size += len(doc) - splitAfter++ + if size > maxDocSize { + break + } + dst = bsoncore.AppendDocumentElement(dst, strconv.Itoa(n), doc) + n++ + } + if n == 0 { + return 0, dst[:l], nil } + var err error + dst, err = bsoncore.AppendArrayEnd(dst, aidx) + if err != nil { + return 0, nil, err + } + return n, dst, nil +} + +// IsOrdered indicates if the batches are ordered. +func (b *Batches) IsOrdered() *bool { + return b.Ordered +} - // if there are no documents, take the first one. - // this can happen if there is a document that is smaller than maxDocSize but greater than targetBatchSize. - if splitAfter == 0 { - splitAfter = 1 +// AdvanceBatches advances the batches with the given input. +func (b *Batches) AdvanceBatches(n int) { + b.offset += n + if b.offset > len(b.Documents) { + b.offset = len(b.Documents) } +} - b.Current, b.Documents = b.Documents[:splitAfter], b.Documents[splitAfter:] - return nil +// Size returns the size of batches remained. +func (b *Batches) Size() int { + if b.offset > len(b.Documents) { + return 0 + } + return len(b.Documents) - b.offset } diff --git a/x/mongo/driver/batches_test.go b/x/mongo/driver/batches_test.go index 353368a33c..59c52f8514 100644 --- a/x/mongo/driver/batches_test.go +++ b/x/mongo/driver/batches_test.go @@ -9,129 +9,93 @@ package driver import ( "testing" - "github.com/google/go-cmp/cmp" "go.mongodb.org/mongo-driver/internal/assert" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" + "go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage" ) -func TestBatches(t *testing.T) { - t.Run("Valid", func(t *testing.T) { - testCases := []struct { - name string - batches *Batches - want bool - }{ - {"nil", nil, false}, - {"missing identifier", &Batches{}, false}, - {"no documents", &Batches{Identifier: "documents"}, false}, - {"valid", &Batches{Identifier: "documents", Documents: make([]bsoncore.Document, 5)}, true}, - } +func newTestBatches(t *testing.T) *Batches { + t.Helper() + return &Batches{ + Identifier: "foobar", + Documents: []bsoncore.Document{ + []byte("Lorem ipsum dolor sit amet"), + []byte("consectetur adipiscing elit"), + }, + } +} - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - want := tc.want - got := tc.batches.Valid() - if got != want { - t.Errorf("Did not get expected result from Valid. got %t; want %t", got, want) - } - }) - } - }) - t.Run("ClearBatch", func(t *testing.T) { - batches := &Batches{Identifier: "documents", Current: make([]bsoncore.Document, 2, 10)} - if len(batches.Current) != 2 { - t.Fatalf("Length of current batch should be 2, but is %d", len(batches.Current)) - } - batches.ClearBatch() - if len(batches.Current) != 0 { - t.Fatalf("Length of current batch should be 0, but is %d", len(batches.Current)) - } +func TestAdvancing(t *testing.T) { + batches := newTestBatches(t) + batches.AdvanceBatches(3) + size := batches.Size() + assert.Equal(t, 0, size, "expected Size(): %d, got: %d", 1, size) +} + +func TestAppendBatchSequence(t *testing.T) { + t.Run("Append 0", func(t *testing.T) { + batches := newTestBatches(t) + + got := []byte{42} + var n int + var err error + n, got, err = batches.AppendBatchSequence(got, 2, len(batches.Documents[0])-1, 0) + assert.NoError(t, err) + assert.Equal(t, 0, n) + + assert.Equal(t, []byte{42}, got) }) - t.Run("AdvanceBatch", func(t *testing.T) { - documents := make([]bsoncore.Document, 0) - for i := 0; i < 5; i++ { - doc := make(bsoncore.Document, 100) - documents = append(documents, doc) - } + t.Run("Append 1", func(t *testing.T) { + batches := newTestBatches(t) - testCases := []struct { - name string - batches *Batches - maxCount int - targetBatchSize int - maxDocSize int - err error - want *Batches - }{ - { - "current batch non-zero", - &Batches{Current: make([]bsoncore.Document, 2, 10)}, - 0, 0, 0, nil, - &Batches{Current: make([]bsoncore.Document, 2, 10)}, - }, - { - // all of the documents in the batch fit in targetBatchSize so the batch is created successfully - "documents fit in targetBatchSize", - &Batches{Documents: documents}, - 10, 600, 1000, nil, - &Batches{Documents: documents[:0], Current: documents[0:]}, - }, - { - // the first doc is bigger than targetBatchSize but smaller than maxDocSize so it is taken alone - "first document larger than targetBatchSize, smaller than maxDocSize", - &Batches{Documents: documents}, - 10, 5, 100, nil, - &Batches{Documents: documents[1:], Current: documents[:1]}, - }, - } + got := []byte{42} + var n int + var err error + n, got, err = batches.AppendBatchSequence(got, 2, len(batches.Documents[0]), 0) + assert.NoError(t, err) + assert.Equal(t, 1, n) - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - err := tc.batches.AdvanceBatch(tc.maxCount, tc.targetBatchSize, tc.maxDocSize) - if !cmp.Equal(err, tc.err, cmp.Comparer(compareErrors)) { - t.Errorf("Errors do not match. got %v; want %v", err, tc.err) - } - if !cmp.Equal(tc.batches, tc.want) { - t.Errorf("Batches is not in correct state after AdvanceBatch. got %v; want %v", tc.batches, tc.want) - } - }) - } + var idx int32 + dst := []byte{42} + dst = wiremessage.AppendMsgSectionType(dst, wiremessage.DocumentSequence) + idx, dst = bsoncore.ReserveLength(dst) + dst = append(dst, "foobar"...) + dst = append(dst, 0x00) + dst = append(dst, "Lorem ipsum dolor sit amet"...) + dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:]))) + assert.Equal(t, dst, got) + }) +} - t.Run("middle document larger than targetBatchSize, smaller than maxDocSize", func(t *testing.T) { - // a batch is made but one document is too big, so everything before it is taken. - // on the second call to AdvanceBatch, only the large document is taken +func TestAppendBatchArray(t *testing.T) { + t.Run("Append 0", func(t *testing.T) { + batches := newTestBatches(t) - middleLargeDoc := make([]bsoncore.Document, 0) - for i := 0; i < 5; i++ { - doc := make(bsoncore.Document, 100) - middleLargeDoc = append(middleLargeDoc, doc) - } - largeDoc := make(bsoncore.Document, 900) - middleLargeDoc[2] = largeDoc - batches := &Batches{Documents: middleLargeDoc} - maxCount := 10 - targetSize := 600 - maxDocSize := 1000 + got := []byte{42} + var n int + var err error + n, got, err = batches.AppendBatchArray(got, 2, len(batches.Documents[0])-1, 0) + assert.NoError(t, err) + assert.Equal(t, 0, n) - // first batch should take first 2 docs (size 100 each) - err := batches.AdvanceBatch(maxCount, targetSize, maxDocSize) - assert.Nil(t, err, "AdvanceBatch error: %v", err) - want := &Batches{Current: middleLargeDoc[:2], Documents: middleLargeDoc[2:]} - assert.Equal(t, want, batches, "expected batches %v, got %v", want, batches) + assert.Equal(t, []byte{42}, got) + }) + t.Run("Append 1", func(t *testing.T) { + batches := newTestBatches(t) - // second batch should take single large doc (size 900) - batches.ClearBatch() - err = batches.AdvanceBatch(maxCount, targetSize, maxDocSize) - assert.Nil(t, err, "AdvanceBatch error: %v", err) - want = &Batches{Current: middleLargeDoc[2:3], Documents: middleLargeDoc[3:]} - assert.Equal(t, want, batches, "expected batches %v, got %v", want, batches) + got := []byte{42} + var n int + var err error + n, got, err = batches.AppendBatchArray(got, 2, len(batches.Documents[0]), 0) + assert.NoError(t, err) + assert.Equal(t, 1, n) - // last batch should take last 2 docs (size 100 each) - batches.ClearBatch() - err = batches.AdvanceBatch(maxCount, targetSize, maxDocSize) - assert.Nil(t, err, "AdvanceBatch error: %v", err) - want = &Batches{Current: middleLargeDoc[3:], Documents: middleLargeDoc[:0]} - assert.Equal(t, want, batches, "expected batches %v, got %v", want, batches) - }) + var idx int32 + dst := []byte{42} + idx, dst = bsoncore.AppendArrayElementStart(dst, "foobar") + dst = bsoncore.AppendDocumentElement(dst, "0", []byte("Lorem ipsum dolor sit amet")) + dst, err = bsoncore.AppendArrayEnd(dst, idx) + assert.NoError(t, err) + assert.Equal(t, dst, got) }) } diff --git a/x/mongo/driver/operation.go b/x/mongo/driver/operation.go index ec6f69eca0..e491564c83 100644 --- a/x/mongo/driver/operation.go +++ b/x/mongo/driver/operation.go @@ -47,13 +47,16 @@ var ( ErrReplyDocumentMismatch = errors.New("number of documents returned does not match numberReturned field") // ErrNonPrimaryReadPref is returned when a read is attempted in a transaction with a non-primary read preference. ErrNonPrimaryReadPref = errors.New("read preference in a transaction must be primary") + // ErrDocumentTooLarge occurs when a document that is larger than the maximum size accepted by a + // server is passed to an insert command. + ErrDocumentTooLarge = errors.New("an inserted document is too large") // errDatabaseNameEmpty occurs when a database name is not provided. errDatabaseNameEmpty = errors.New("database name cannot be empty") ) const ( // maximum BSON object size when client side encryption is enabled - cryptMaxBsonObjectSize uint32 = 2097152 + cryptMaxBsonObjectSize int = 2097152 // minimum wire version necessary to use automatic encryption cryptMinWireVersion int32 = 8 // minimum wire version necessary to use read snapshots @@ -92,16 +95,17 @@ type opReply struct { // startedInformation keeps track of all of the information necessary for monitoring started events. type startedInformation struct { - cmd bsoncore.Document - requestID int32 - cmdName string - documentSequenceIncluded bool - connID string - driverConnectionID uint64 // TODO(GODRIVER-2824): change type to int64. - serverConnID *int64 - redacted bool - serviceID *primitive.ObjectID - serverAddress address.Address + cmd bsoncore.Document + requestID int32 + cmdName string + documentSequence []byte + processedBatches int + connID string + driverConnectionID uint64 // TODO(GODRIVER-2824): change type to int64. + serverConnID *int64 + redacted bool + serviceID *primitive.ObjectID + serverAddress address.Address } // finishedInformation keeps track of all of the information necessary for monitoring success and failure events. @@ -151,28 +155,27 @@ func (info finishedInformation) success() bool { // ResponseInfo contains the context required to parse a server response. type ResponseInfo struct { - ServerResponse bsoncore.Document Server Server Connection Connection ConnectionDescription description.Server CurrentIndex int + Error error } -func redactStartedInformationCmd(op Operation, info startedInformation) bson.Raw { +func redactStartedInformationCmd(info startedInformation) bson.Raw { var cmdCopy bson.Raw // Make a copy of the command. Redact if the command is security // sensitive and cannot be monitored. If there was a type 1 payload for // the current batch, convert it to a BSON array if !info.redacted { - cmdCopy = make([]byte, len(info.cmd)) - copy(cmdCopy, info.cmd) + cmdCopy = make([]byte, 0, len(info.cmd)) + cmdCopy = append(cmdCopy, info.cmd...) - if info.documentSequenceIncluded { + if len(info.documentSequence) > 0 { // remove 0 byte at end cmdCopy = cmdCopy[:len(info.cmd)-1] - cmdCopy = op.addBatchArray(cmdCopy) - + cmdCopy = append(cmdCopy, info.documentSequence...) // add back 0 byte and update length cmdCopy, _ = bsoncore.AppendDocumentEnd(cmdCopy, 0) } @@ -218,7 +221,7 @@ type Operation struct { // ProcessResponseFn is called after a response to the command is returned. The server is // provided for types like Cursor that are required to run subsequent commands using the same // server. - ProcessResponseFn func(ResponseInfo) error + ProcessResponseFn func(context.Context, bsoncore.Document, ResponseInfo) error // Selector is the server selector that's used during both initial server selection and // subsequent selection for retries. Depending on the Deployment implementation, the @@ -276,7 +279,13 @@ type Operation struct { // has more documents than can fit in a single command. This should only be specified for // commands that are batch compatible. For more information, please refer to the definition of // Batches. - Batches *Batches + Batches interface { + AppendBatchSequence(dst []byte, maxCount int, maxDocSize int, totalSize int) (int, []byte, error) + AppendBatchArray(dst []byte, maxCount int, maxDocSize int, totalSize int) (int, []byte, error) + IsOrdered() *bool + AdvanceBatches(n int) + Size() int + } // Legacy sets the legacy type for this operation. There are only 3 types that require legacy // support: find, getMore, and killCursors. For more information about LegacyOperationKind, @@ -545,12 +554,12 @@ func (op Operation) Execute(ctx context.Context) error { retries = -1 } } - } - // If context is a Timeout context, automatically set retries to -1 (infinite) if retrying is - // enabled. - retryEnabled := op.RetryMode != nil && op.RetryMode.Enabled() - if csot.IsTimeoutContext(ctx) && retryEnabled { - retries = -1 + + // If context is a Timeout context, automatically set retries to -1 (infinite) if retrying is + // enabled. + if csot.IsTimeoutContext(ctx) && op.RetryMode.Enabled() { + retries = -1 + } } var srvr Server @@ -559,7 +568,6 @@ func (op Operation) Execute(ctx context.Context) error { var operationErr WriteCommandError var prevErr error var prevIndefiniteErr error - batching := op.Batches.Valid() retrySupported := false first := true currIndex := 0 @@ -598,7 +606,7 @@ func (op Operation) Execute(ctx context.Context) error { if conn != nil { // If we are dealing with a sharded cluster, then mark the failed server // as "deprioritized". - if desc := conn.Description; desc != nil && op.Deployment.Kind() == description.Sharded { + if op.Deployment.Kind() == description.Sharded { deprioritizedServers = []description.Server{conn.Description()} } @@ -685,14 +693,10 @@ func (op Operation) Execute(ctx context.Context) error { // Calling IncrementTxnNumber() for server descriptions or topologies that do not // support retries (e.g. standalone topologies) will cause server errors. Only do this // check for the first attempt to keep retried writes in the same transaction. - if retrySupported && op.RetryMode != nil && op.Type == Write && op.Client != nil { - op.Client.RetryWrite = false - if op.RetryMode.Enabled() { - op.Client.RetryWrite = true - if !op.Client.Committing && !op.Client.Aborting { - op.Client.IncrementTxnNumber() - } - } + retryEnabled := op.RetryMode != nil && op.RetryMode.Enabled() + needToIncrease := op.Client != nil && !op.Client.Committing && !op.Client.Aborting + if retrySupported && op.Type == Write && retryEnabled && needToIncrease { + op.Client.IncrementTxnNumber() } first = false @@ -711,30 +715,14 @@ func (op Operation) Execute(ctx context.Context) error { desc := description.SelectedServer{Server: conn.Description(), Kind: op.Deployment.Kind()} - if batching { - targetBatchSize := desc.MaxDocumentSize - maxDocSize := desc.MaxDocumentSize - if op.shouldEncrypt() { - // For client-side encryption, we want the batch to be split at 2 MiB instead of 16MiB. - // If there's only one document in the batch, it can be up to 16MiB, so we set target batch size to - // 2MiB but max document size to 16MiB. This will allow the AdvanceBatch call to create a batch - // with a single large document. - targetBatchSize = cryptMaxBsonObjectSize - } - - err = op.Batches.AdvanceBatch(int(desc.MaxBatchCount), int(targetBatchSize), int(maxDocSize)) - if err != nil { - // TODO(GODRIVER-982): Should we also be returning operationErr? - return err - } - } - + var moreToCome bool var startedInfo startedInformation - *wm, startedInfo, err = op.createWireMessage(ctx, maxTimeMS, (*wm)[:0], desc, conn, requestID) + *wm, moreToCome, startedInfo, err = op.createWireMessage(ctx, maxTimeMS, (*wm)[:0], desc, conn, requestID) if err != nil { return err } + retryEnabled := op.RetryMode != nil && op.RetryMode.Enabled() // set extra data and send event if possible startedInfo.connID = conn.ID() @@ -756,9 +744,6 @@ func (op Operation) Execute(ctx context.Context) error { op.publishStartedEvent(ctx, startedInfo) - // get the moreToCome flag information before we compress - moreToCome := wiremessage.IsMsgMoreToCome(*wm) - // compress wiremessage if allowed if compressor, ok := conn.(Compressor); ok && op.canCompress(startedInfo.cmdName) { b := memoryPool.Get().(*[]byte) @@ -827,7 +812,6 @@ func (op Operation) Execute(ctx context.Context) error { // TODO(GODRIVER-2579): When refactoring the "Execute" method, consider creating a separate method for the // error handling logic below. This will remove the necessity of the "checkError" goto label. checkError: - var perr error switch tt := err.(type) { case WriteCommandError: if e := err.(WriteCommandError); retrySupported && op.Type == Write && e.UnsupportedStorageEngine() { @@ -848,7 +832,7 @@ func (op Operation) Execute(ctx context.Context) error { // If retries are supported for the current operation on the first server description, // the error is considered retryable, and there are retries remaining (negative retries // means retry indefinitely), then retry the operation. - if retrySupported && retryableErr && retries != 0 { + if retrySupported && retryEnabled && retryableErr && retries != 0 { if op.Client != nil && op.Client.Committing { // Apply majority write concern for retries op.Client.UpdateCommitTransactionWriteConcern() @@ -871,25 +855,26 @@ func (op Operation) Execute(ctx context.Context) error { // If the operation isn't being retried, process the response if op.ProcessResponseFn != nil { info := ResponseInfo{ - ServerResponse: res, Server: srvr, Connection: conn, ConnectionDescription: desc.Server, CurrentIndex: currIndex, + Error: tt, } - _ = op.ProcessResponseFn(info) - } - - if batching && len(tt.WriteErrors) > 0 && currIndex > 0 { - for i := range tt.WriteErrors { - tt.WriteErrors[i].Index += int64(currIndex) - } + _ = op.ProcessResponseFn(ctx, res, info) } // If batching is enabled and either ordered is the default (which is true) or // explicitly set to true and we have write errors, return the errors. - if batching && (op.Batches.Ordered == nil || *op.Batches.Ordered) && len(tt.WriteErrors) > 0 { - return tt + if op.Batches != nil && len(tt.WriteErrors) > 0 { + if currIndex > 0 { + for i := range tt.WriteErrors { + tt.WriteErrors[i].Index += int64(currIndex) + } + } + if isOrdered := op.Batches.IsOrdered(); isOrdered == nil || *isOrdered { + return tt + } } if op.Client != nil && op.Client.Committing && tt.WriteConcernError != nil { // When running commitTransaction we return WriteConcernErrors as an Error. @@ -967,7 +952,7 @@ func (op Operation) Execute(ctx context.Context) error { // If retries are supported for the current operation on the first server description, // the error is considered retryable, and there are retries remaining (negative retries // means retry indefinitely), then retry the operation. - if retrySupported && retryableErr && retries != 0 { + if retrySupported && retryEnabled && retryableErr && retries != 0 { if op.Client != nil && op.Client.Committing { // Apply majority write concern for retries op.Client.UpdateCommitTransactionWriteConcern() @@ -990,13 +975,13 @@ func (op Operation) Execute(ctx context.Context) error { // If the operation isn't being retried, process the response if op.ProcessResponseFn != nil { info := ResponseInfo{ - ServerResponse: res, Server: srvr, Connection: conn, ConnectionDescription: desc.Server, CurrentIndex: currIndex, + Error: tt, } - _ = op.ProcessResponseFn(info) + _ = op.ProcessResponseFn(ctx, res, info) } if op.Client != nil && op.Client.Committing && (retryableErr || tt.Code == 50) { @@ -1010,27 +995,27 @@ func (op Operation) Execute(ctx context.Context) error { } if op.ProcessResponseFn != nil { info := ResponseInfo{ - ServerResponse: res, Server: srvr, Connection: conn, ConnectionDescription: desc.Server, CurrentIndex: currIndex, + Error: tt, + } + perr := op.ProcessResponseFn(ctx, res, info) + if perr != nil { + return perr } - perr = op.ProcessResponseFn(info) - } - if perr != nil { - return perr } default: if op.ProcessResponseFn != nil { info := ResponseInfo{ - ServerResponse: res, Server: srvr, Connection: conn, ConnectionDescription: desc.Server, CurrentIndex: currIndex, + Error: tt, } - _ = op.ProcessResponseFn(info) + _ = op.ProcessResponseFn(ctx, res, info) } return err } @@ -1038,23 +1023,22 @@ func (op Operation) Execute(ctx context.Context) error { // If we're batching and there are batches remaining, advance to the next batch. This isn't // a retry, so increment the transaction number, reset the retries number, and don't set // server or connection to nil to continue using the same connection. - if batching && len(op.Batches.Documents) > 0 { + if op.Batches != nil && op.Batches.Size() > startedInfo.processedBatches { // If retries are supported for the current operation on the current server description, // the session isn't nil, and client retries are enabled, increment the txn number. // Calling IncrementTxnNumber() for server descriptions or topologies that do not // support retries (e.g. standalone topologies) will cause server errors. - if retrySupported && op.Client != nil && op.RetryMode != nil { - if op.RetryMode.Enabled() { - op.Client.IncrementTxnNumber() - } + if retrySupported && op.Client != nil && retryEnabled { + op.Client.IncrementTxnNumber() + // Reset the retries number for RetryOncePerCommand unless context is a Timeout context, in // which case retries should remain as -1 (as many times as possible). if *op.RetryMode == RetryOncePerCommand && !csot.IsTimeoutContext(ctx) { retries = 1 } } - currIndex += len(op.Batches.Current) - op.Batches.ClearBatch() + currIndex += startedInfo.processedBatches + op.Batches.AdvanceBatches(startedInfo.processedBatches) continue } break @@ -1211,25 +1195,13 @@ func (Operation) decompressWireMessage(wm []byte) (wiremessage.OpCode, []byte, e return opcode, uncompressed, nil } -func (op Operation) addBatchArray(dst []byte) []byte { - aidx, dst := bsoncore.AppendArrayElementStart(dst, op.Batches.Identifier) - for i, doc := range op.Batches.Current { - dst = bsoncore.AppendDocumentElement(dst, strconv.Itoa(i), doc) - } - dst, _ = bsoncore.AppendArrayEnd(dst, aidx) - return dst -} - func (op Operation) createLegacyHandshakeWireMessage( maxTimeMS uint64, dst []byte, desc description.SelectedServer, -) ([]byte, startedInformation, error) { - var info startedInformation + cmdFn func([]byte, description.SelectedServer) ([]byte, error), +) ([]byte, []byte, error) { flags := op.secondaryOK(desc) - var wmindex int32 - info.requestID = wiremessage.NextRequestID() - wmindex, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpQuery) dst = wiremessage.AppendQueryFlags(dst, flags) dollarCmd := [...]byte{'.', '$', 'c', 'm', 'd'} @@ -1244,35 +1216,31 @@ func (op Operation) createLegacyHandshakeWireMessage( wrapper := int32(-1) rp, err := op.createReadPref(desc, true) if err != nil { - return dst, info, err + return dst, nil, err } if len(rp) > 0 { wrapper, dst = bsoncore.AppendDocumentStart(dst) dst = bsoncore.AppendHeader(dst, bsontype.EmbeddedDocument, "$query") } idx, dst := bsoncore.AppendDocumentStart(dst) - dst, err = op.CommandFn(dst, desc) + dst, err = cmdFn(dst, desc) if err != nil { - return dst, info, err - } - - if op.Batches != nil && len(op.Batches.Current) > 0 { - dst = op.addBatchArray(dst) + return dst, nil, err } dst, err = op.addReadConcern(dst, desc) if err != nil { - return dst, info, err + return dst, nil, err } dst, err = op.addWriteConcern(dst, desc) if err != nil { - return dst, info, err + return dst, nil, err } - dst, err = op.addSession(dst, desc) + dst, err = op.addSession(dst, desc, false) if err != nil { - return dst, info, err + return dst, nil, err } dst = op.addClusterTime(dst, desc) @@ -1284,66 +1252,59 @@ func (op Operation) createLegacyHandshakeWireMessage( } dst, _ = bsoncore.AppendDocumentEnd(dst, idx) - // Command monitoring only reports the document inside $query - info.cmd = dst[idx:] if len(rp) > 0 { + idx = wrapper var err error dst = bsoncore.AppendDocumentElement(dst, "$readPreference", rp) - dst, err = bsoncore.AppendDocumentEnd(dst, wrapper) + dst, err = bsoncore.AppendDocumentEnd(dst, idx) if err != nil { - return dst, info, err + return dst, nil, err } } - return bsoncore.UpdateLength(dst, wmindex, int32(len(dst[wmindex:]))), info, nil + return dst, dst[idx:], nil } func (op Operation) createMsgWireMessage( - ctx context.Context, maxTimeMS uint64, dst []byte, desc description.SelectedServer, conn Connection, - requestID int32, -) ([]byte, startedInformation, error) { - var info startedInformation + cmdFn func([]byte, description.SelectedServer) ([]byte, error), +) ([]byte, []byte, error) { var flags wiremessage.MsgFlag - var wmindex int32 - // We set the MoreToCome bit if we have a write concern, it's unacknowledged, and we either - // aren't batching or we are encoding the last batch. - if op.WriteConcern != nil && !writeconcern.AckWrite(op.WriteConcern) && (op.Batches == nil || len(op.Batches.Documents) == 0) { - flags = wiremessage.MoreToCome - } // Set the ExhaustAllowed flag if the connection supports streaming. This will tell the server that it can // respond with the MoreToCome flag and then stream responses over this connection. if streamer, ok := conn.(StreamerConnection); ok && streamer.SupportsStreaming() { - flags |= wiremessage.ExhaustAllowed + flags = wiremessage.ExhaustAllowed } - - info.requestID = requestID - wmindex, dst = wiremessage.AppendHeaderStart(dst, info.requestID, 0, wiremessage.OpMsg) dst = wiremessage.AppendMsgFlags(dst, flags) // Body dst = wiremessage.AppendMsgSectionType(dst, wiremessage.SingleDocument) idx, dst := bsoncore.AppendDocumentStart(dst) - dst, err := op.addCommandFields(ctx, dst, desc) + var err error + dst, err = cmdFn(dst, desc) if err != nil { - return dst, info, err + return dst, nil, err } dst, err = op.addReadConcern(dst, desc) if err != nil { - return dst, info, err + return dst, nil, err } dst, err = op.addWriteConcern(dst, desc) if err != nil { - return dst, info, err + return dst, nil, err } - dst, err = op.addSession(dst, desc) + retryWrite := false + if op.retryable(conn.Description()) && op.RetryMode != nil && op.RetryMode.Enabled() { + retryWrite = true + } + dst, err = op.addSession(dst, desc, retryWrite) if err != nil { - return dst, info, err + return dst, nil, err } dst = op.addClusterTime(dst, desc) @@ -1357,34 +1318,15 @@ func (op Operation) createMsgWireMessage( dst = bsoncore.AppendStringElement(dst, "$db", op.Database) rp, err := op.createReadPref(desc, false) if err != nil { - return dst, info, err + return dst, nil, err } if len(rp) > 0 { dst = bsoncore.AppendDocumentElement(dst, "$readPreference", rp) } dst, _ = bsoncore.AppendDocumentEnd(dst, idx) - // The command document for monitoring shouldn't include the type 1 payload as a document sequence - info.cmd = dst[idx:] - - // add batch as a document sequence if auto encryption is not enabled - // if auto encryption is enabled, the batch will already be an array in the command document - if !op.shouldEncrypt() && op.Batches != nil && len(op.Batches.Current) > 0 { - info.documentSequenceIncluded = true - dst = wiremessage.AppendMsgSectionType(dst, wiremessage.DocumentSequence) - idx, dst = bsoncore.ReserveLength(dst) - - dst = append(dst, op.Batches.Identifier...) - dst = append(dst, 0x00) - - for _, doc := range op.Batches.Current { - dst = append(dst, doc...) - } - - dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:]))) - } - return bsoncore.UpdateLength(dst, wmindex, int32(len(dst[wmindex:]))), info, nil + return dst, dst[idx:], nil } // isLegacyHandshake returns True if the operation is the first message of @@ -1402,46 +1344,167 @@ func (op Operation) createWireMessage( desc description.SelectedServer, conn Connection, requestID int32, -) ([]byte, startedInformation, error) { - if isLegacyHandshake(op, desc) { - return op.createLegacyHandshakeWireMessage(maxTimeMS, dst, desc) - } +) ([]byte, bool, startedInformation, error) { + var info startedInformation + var wmindex int32 + var err error - return op.createMsgWireMessage(ctx, maxTimeMS, dst, desc, conn, requestID) -} + unacknowledged := op.WriteConcern != nil && !writeconcern.AckWrite(op.WriteConcern) -// addCommandFields adds the fields for a command to the wire message in dst. This assumes that the start of the document -// has already been added and does not add the final 0 byte. -func (op Operation) addCommandFields(ctx context.Context, dst []byte, desc description.SelectedServer) ([]byte, error) { - if !op.shouldEncrypt() { - return op.CommandFn(dst, desc) + fIdx := -1 + isLegacy := isLegacyHandshake(op, desc) + switch { + case isLegacy: + cmdFn := func(dst []byte, desc description.SelectedServer) ([]byte, error) { + info.processedBatches, dst, err = op.addLegacyCommandFields(dst, desc) + return dst, err + } + requestID := wiremessage.NextRequestID() + wmindex, dst = wiremessage.AppendHeaderStart(dst, requestID, 0, wiremessage.OpQuery) + dst, info.cmd, err = op.createLegacyHandshakeWireMessage(maxTimeMS, dst, desc, cmdFn) + case op.shouldEncrypt(): + if desc.WireVersion.Max < cryptMinWireVersion { + return dst, false, info, errors.New("auto-encryption requires a MongoDB version of 4.2") + } + cmdFn := func(dst []byte, desc description.SelectedServer) ([]byte, error) { + info.processedBatches, dst, err = op.addEncryptCommandFields(ctx, dst, desc) + return dst, err + } + wmindex, dst = wiremessage.AppendHeaderStart(dst, requestID, 0, wiremessage.OpMsg) + fIdx = len(dst) + dst, info.cmd, err = op.createMsgWireMessage(maxTimeMS, dst, desc, conn, cmdFn) + default: + wmindex, dst = wiremessage.AppendHeaderStart(dst, requestID, 0, wiremessage.OpMsg) + fIdx = len(dst) + + batchOffset := -1 + switch op.Batches.(type) { + case *Batches: + dst, info.cmd, err = op.createMsgWireMessage(maxTimeMS, dst, desc, conn, op.CommandFn) + if err == nil && op.Batches != nil { + batchOffset = len(dst) + info.processedBatches, dst, err = op.Batches.AppendBatchSequence(dst, + int(desc.MaxBatchCount), int(desc.MaxDocumentSize), int(desc.MaxDocumentSize), + ) + if err != nil { + break + } + if info.processedBatches == 0 { + err = ErrDocumentTooLarge + } + } + default: + var batches []byte + if op.Batches != nil { + maxDocSize := -1 + if unacknowledged { + maxDocSize = int(desc.MaxDocumentSize) + } + info.processedBatches, batches, err = op.Batches.AppendBatchSequence(batches, + int(desc.MaxBatchCount), maxDocSize, int(desc.MaxMessageSize), + ) + if err != nil { + break + } + if info.processedBatches == 0 { + err = ErrDocumentTooLarge + break + } + } + dst, info.cmd, err = op.createMsgWireMessage(maxTimeMS, dst, desc, conn, op.CommandFn) + if err == nil && len(batches) > 0 { + batchOffset = len(dst) + dst = append(dst, batches...) + } + } + if err == nil && batchOffset > 0 { + for b := dst[batchOffset:]; len(b) > 0; /* nothing */ { + var seq []byte + var ok bool + seq, b, ok = wiremessage.DocumentSequenceToArray(b) + if !ok { + break + } + info.documentSequence = append(info.documentSequence, seq...) + } + } + } + if err != nil { + return nil, false, info, err } - if desc.WireVersion.Max < cryptMinWireVersion { - return dst, errors.New("auto-encryption requires a MongoDB version of 4.2") + var moreToCome bool + // We set the MoreToCome bit if we have a write concern, it's unacknowledged, and we either + // aren't batching or we are encoding the last batch. + batching := op.Batches != nil && op.Batches.Size() > info.processedBatches + if fIdx > 0 && unacknowledged && !batching { + dst[fIdx] |= byte(wiremessage.MoreToCome) + moreToCome = true } + info.requestID = requestID + return bsoncore.UpdateLength(dst, wmindex, int32(len(dst[wmindex:]))), moreToCome, info, nil +} - // create temporary command document - cidx, cmdDst := bsoncore.AppendDocumentStart(nil) +func (op Operation) addEncryptCommandFields(ctx context.Context, dst []byte, desc description.SelectedServer) (int, []byte, error) { + idx, cmdDst := bsoncore.AppendDocumentStart(nil) var err error + // create temporary command document cmdDst, err = op.CommandFn(cmdDst, desc) if err != nil { - return dst, err + return 0, nil, err } - // use a BSON array instead of a type 1 payload because mongocryptd will convert to arrays regardless - if op.Batches != nil && len(op.Batches.Current) > 0 { - cmdDst = op.addBatchArray(cmdDst) + var n int + if op.Batches != nil { + if maxBatchCount := int(desc.MaxBatchCount); maxBatchCount > 1 { + n, cmdDst, err = op.Batches.AppendBatchArray(cmdDst, maxBatchCount, cryptMaxBsonObjectSize, cryptMaxBsonObjectSize) + if err != nil { + return 0, nil, err + } + } + if n == 0 { + maxDocumentSize := int(desc.MaxDocumentSize) + n, cmdDst, err = op.Batches.AppendBatchArray(cmdDst, 1, maxDocumentSize, maxDocumentSize) + if err != nil { + return 0, nil, err + } + if n == 0 { + return 0, nil, ErrDocumentTooLarge + } + } + } + cmdDst, err = bsoncore.AppendDocumentEnd(cmdDst, idx) + if err != nil { + return 0, nil, err } - cmdDst, _ = bsoncore.AppendDocumentEnd(cmdDst, cidx) - // encrypt the command encrypted, err := op.Crypt.Encrypt(ctx, op.Database, cmdDst) if err != nil { - return dst, err + return 0, nil, err } // append encrypted command to original destination, removing the first 4 bytes (length) and final byte (terminator) dst = append(dst, encrypted[4:len(encrypted)-1]...) - return dst, nil + return n, dst, nil +} + +func (op Operation) addLegacyCommandFields(dst []byte, desc description.SelectedServer) (int, []byte, error) { + var err error + dst, err = op.CommandFn(dst, desc) + if err != nil { + return 0, nil, err + } + if op.Batches == nil { + return 0, dst, nil + } + var n int + maxDocumentSize := int(desc.MaxDocumentSize) + n, dst, err = op.Batches.AppendBatchArray(dst, int(desc.MaxBatchCount), maxDocumentSize, maxDocumentSize) + if err != nil { + return 0, nil, err + } + if n == 0 { + return 0, nil, ErrDocumentTooLarge + } + return n, dst, nil } // addServerAPI adds the relevant fields for server API specification to the wire message in dst. @@ -1532,7 +1595,7 @@ func (op Operation) addWriteConcern(dst []byte, desc description.SelectedServer) return append(bsoncore.AppendHeader(dst, t, "writeConcern"), data...), nil } -func (op Operation) addSession(dst []byte, desc description.SelectedServer) ([]byte, error) { +func (op Operation) addSession(dst []byte, desc description.SelectedServer, retryWrite bool) ([]byte, error) { client := op.Client // If the operation is defined for an explicit session but the server @@ -1550,7 +1613,7 @@ func (op Operation) addSession(dst []byte, desc description.SelectedServer) ([]b dst = bsoncore.AppendDocumentElement(dst, "lsid", client.SessionID) var addedTxnNumber bool - if op.Type == Write && client.RetryWrite { + if op.Type == Write && retryWrite { addedTxnNumber = true dst = bsoncore.AppendInt64Element(dst, "txnNumber", op.Client.TxnNumber) } @@ -1977,7 +2040,7 @@ func (op Operation) publishStartedEvent(ctx context.Context, info startedInforma if op.canLogCommandMessage() { host, port, _ := net.SplitHostPort(info.serverAddress.String()) - redactedCmd := redactStartedInformationCmd(op, info).String() + redactedCmd := redactStartedInformationCmd(info).String() formattedCmd := logger.FormatMessage(redactedCmd, op.Logger.MaxDocumentLength) op.Logger.Print(logger.LevelDebug, @@ -2000,7 +2063,7 @@ func (op Operation) publishStartedEvent(ctx context.Context, info startedInforma if op.canPublishStartedEvent() { started := &event.CommandStartedEvent{ - Command: redactStartedInformationCmd(op, info), + Command: redactStartedInformationCmd(info), DatabaseName: op.Database, CommandName: info.cmdName, RequestID: int64(info.requestID), diff --git a/x/mongo/driver/operation/abort_transaction.go b/x/mongo/driver/operation/abort_transaction.go index aeee533533..c3104cdb90 100644 --- a/x/mongo/driver/operation/abort_transaction.go +++ b/x/mongo/driver/operation/abort_transaction.go @@ -41,9 +41,8 @@ func NewAbortTransaction() *AbortTransaction { return &AbortTransaction{} } -func (at *AbortTransaction) processResponse(driver.ResponseInfo) error { - var err error - return err +func (at *AbortTransaction) processResponse(context.Context, bsoncore.Document, driver.ResponseInfo) error { + return nil } // Execute runs this operations and returns an error if the operation did not execute successfully. diff --git a/x/mongo/driver/operation/aggregate.go b/x/mongo/driver/operation/aggregate.go index df6b8fa9dd..b95a8205af 100644 --- a/x/mongo/driver/operation/aggregate.go +++ b/x/mongo/driver/operation/aggregate.go @@ -79,10 +79,12 @@ func (a *Aggregate) ResultCursorResponse() driver.CursorResponse { return a.result } -func (a *Aggregate) processResponse(info driver.ResponseInfo) error { - var err error - - a.result, err = driver.NewCursorResponse(info) +func (a *Aggregate) processResponse(_ context.Context, resp bsoncore.Document, info driver.ResponseInfo) error { + curDoc, err := driver.ExtractCursorDocument(resp) + if err != nil { + return err + } + a.result, err = driver.NewCursorResponse(curDoc, info) return err } diff --git a/x/mongo/driver/operation/command.go b/x/mongo/driver/operation/command.go index 64c98ba19a..42de03a49e 100644 --- a/x/mongo/driver/operation/command.go +++ b/x/mongo/driver/operation/command.go @@ -82,11 +82,15 @@ func (c *Command) Execute(ctx context.Context) error { CommandFn: func(dst []byte, _ description.SelectedServer) ([]byte, error) { return append(dst, c.command[4:len(c.command)-1]...), nil }, - ProcessResponseFn: func(info driver.ResponseInfo) error { - c.resultResponse = info.ServerResponse + ProcessResponseFn: func(_ context.Context, resp bsoncore.Document, info driver.ResponseInfo) error { + c.resultResponse = resp if c.createCursor { - cursorRes, err := driver.NewCursorResponse(info) + curDoc, err := driver.ExtractCursorDocument(resp) + if err != nil { + return err + } + cursorRes, err := driver.NewCursorResponse(curDoc, info) if err != nil { return err } diff --git a/x/mongo/driver/operation/commit_transaction.go b/x/mongo/driver/operation/commit_transaction.go index 6b402bdf63..fda31da542 100644 --- a/x/mongo/driver/operation/commit_transaction.go +++ b/x/mongo/driver/operation/commit_transaction.go @@ -42,9 +42,8 @@ func NewCommitTransaction() *CommitTransaction { return &CommitTransaction{} } -func (ct *CommitTransaction) processResponse(driver.ResponseInfo) error { - var err error - return err +func (ct *CommitTransaction) processResponse(context.Context, bsoncore.Document, driver.ResponseInfo) error { + return nil } // Execute runs this operations and returns an error if the operation did not execute successfully. diff --git a/x/mongo/driver/operation/count.go b/x/mongo/driver/operation/count.go index eaafc9a244..7ef3f549e3 100644 --- a/x/mongo/driver/operation/count.go +++ b/x/mongo/driver/operation/count.go @@ -99,9 +99,9 @@ func NewCount() *Count { // Result returns the result of executing this operation. func (c *Count) Result() CountResult { return c.result } -func (c *Count) processResponse(info driver.ResponseInfo) error { +func (c *Count) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - c.result, err = buildCountResult(info.ServerResponse) + c.result, err = buildCountResult(resp) return err } diff --git a/x/mongo/driver/operation/create.go b/x/mongo/driver/operation/create.go index 4878e2c777..73a02feb4d 100644 --- a/x/mongo/driver/operation/create.go +++ b/x/mongo/driver/operation/create.go @@ -56,7 +56,7 @@ func NewCreate(collectionName string) *Create { } } -func (c *Create) processResponse(driver.ResponseInfo) error { +func (c *Create) processResponse(context.Context, bsoncore.Document, driver.ResponseInfo) error { return nil } diff --git a/x/mongo/driver/operation/create_indexes.go b/x/mongo/driver/operation/create_indexes.go index 464c1762de..ae73d49e5e 100644 --- a/x/mongo/driver/operation/create_indexes.go +++ b/x/mongo/driver/operation/create_indexes.go @@ -93,9 +93,9 @@ func NewCreateIndexes(indexes bsoncore.Document) *CreateIndexes { // Result returns the result of executing this operation. func (ci *CreateIndexes) Result() CreateIndexesResult { return ci.result } -func (ci *CreateIndexes) processResponse(info driver.ResponseInfo) error { +func (ci *CreateIndexes) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - ci.result, err = buildCreateIndexesResult(info.ServerResponse) + ci.result, err = buildCreateIndexesResult(resp) return err } diff --git a/x/mongo/driver/operation/create_search_indexes.go b/x/mongo/driver/operation/create_search_indexes.go index 8185d27fe1..180858b3ec 100644 --- a/x/mongo/driver/operation/create_search_indexes.go +++ b/x/mongo/driver/operation/create_search_indexes.go @@ -93,9 +93,9 @@ func NewCreateSearchIndexes(indexes bsoncore.Document) *CreateSearchIndexes { // Result returns the result of executing this operation. func (csi *CreateSearchIndexes) Result() CreateSearchIndexesResult { return csi.result } -func (csi *CreateSearchIndexes) processResponse(info driver.ResponseInfo) error { +func (csi *CreateSearchIndexes) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - csi.result, err = buildCreateSearchIndexesResult(info.ServerResponse) + csi.result, err = buildCreateSearchIndexesResult(resp) return err } diff --git a/x/mongo/driver/operation/delete.go b/x/mongo/driver/operation/delete.go index 4b520a5480..04308cc239 100644 --- a/x/mongo/driver/operation/delete.go +++ b/x/mongo/driver/operation/delete.go @@ -81,8 +81,8 @@ func NewDelete(deletes ...bsoncore.Document) *Delete { // Result returns the result of executing this operation. func (d *Delete) Result() DeleteResult { return d.result } -func (d *Delete) processResponse(info driver.ResponseInfo) error { - dr, err := buildDeleteResult(info.ServerResponse) +func (d *Delete) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { + dr, err := buildDeleteResult(resp) d.result.N += dr.N return err } @@ -92,31 +92,30 @@ func (d *Delete) Execute(ctx context.Context) error { if d.deployment == nil { return errors.New("the Delete operation must have a Deployment set before Execute can be called") } - batches := &driver.Batches{ - Identifier: "deletes", - Documents: d.deletes, - Ordered: d.ordered, - } return driver.Operation{ CommandFn: d.command, ProcessResponseFn: d.processResponse, - Batches: batches, - RetryMode: d.retry, - Type: driver.Write, - Client: d.session, - Clock: d.clock, - CommandMonitor: d.monitor, - Crypt: d.crypt, - Database: d.database, - Deployment: d.deployment, - Selector: d.selector, - WriteConcern: d.writeConcern, - ServerAPI: d.serverAPI, - Timeout: d.timeout, - Logger: d.logger, - Name: driverutil.DeleteOp, - Authenticator: d.authenticator, + Batches: &driver.Batches{ + Identifier: "deletes", + Documents: d.deletes, + Ordered: d.ordered, + }, + RetryMode: d.retry, + Type: driver.Write, + Client: d.session, + Clock: d.clock, + CommandMonitor: d.monitor, + Crypt: d.crypt, + Database: d.database, + Deployment: d.deployment, + Selector: d.selector, + WriteConcern: d.writeConcern, + ServerAPI: d.serverAPI, + Timeout: d.timeout, + Logger: d.logger, + Name: driverutil.DeleteOp, + Authenticator: d.authenticator, }.Execute(ctx) } diff --git a/x/mongo/driver/operation/distinct.go b/x/mongo/driver/operation/distinct.go index 0c39027e76..882b4c558e 100644 --- a/x/mongo/driver/operation/distinct.go +++ b/x/mongo/driver/operation/distinct.go @@ -77,9 +77,9 @@ func NewDistinct(key string, query bsoncore.Document) *Distinct { // Result returns the result of executing this operation. func (d *Distinct) Result() DistinctResult { return d.result } -func (d *Distinct) processResponse(info driver.ResponseInfo) error { +func (d *Distinct) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - d.result, err = buildDistinctResult(info.ServerResponse) + d.result, err = buildDistinctResult(resp) return err } diff --git a/x/mongo/driver/operation/drop_collection.go b/x/mongo/driver/operation/drop_collection.go index 5a32c2f8d4..b1c18d4083 100644 --- a/x/mongo/driver/operation/drop_collection.go +++ b/x/mongo/driver/operation/drop_collection.go @@ -79,9 +79,9 @@ func NewDropCollection() *DropCollection { // Result returns the result of executing this operation. func (dc *DropCollection) Result() DropCollectionResult { return dc.result } -func (dc *DropCollection) processResponse(info driver.ResponseInfo) error { +func (dc *DropCollection) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - dc.result, err = buildDropCollectionResult(info.ServerResponse) + dc.result, err = buildDropCollectionResult(resp) return err } diff --git a/x/mongo/driver/operation/drop_indexes.go b/x/mongo/driver/operation/drop_indexes.go index a22496b1e8..1171d81937 100644 --- a/x/mongo/driver/operation/drop_indexes.go +++ b/x/mongo/driver/operation/drop_indexes.go @@ -74,9 +74,9 @@ func NewDropIndexes(index any) *DropIndexes { // Result returns the result of executing this operation. func (di *DropIndexes) Result() DropIndexesResult { return di.result } -func (di *DropIndexes) processResponse(info driver.ResponseInfo) error { +func (di *DropIndexes) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - di.result, err = buildDropIndexesResult(info.ServerResponse) + di.result, err = buildDropIndexesResult(resp) return err } diff --git a/x/mongo/driver/operation/drop_search_index.go b/x/mongo/driver/operation/drop_search_index.go index 94e4ddfb0d..55d675a84f 100644 --- a/x/mongo/driver/operation/drop_search_index.go +++ b/x/mongo/driver/operation/drop_search_index.go @@ -69,9 +69,9 @@ func NewDropSearchIndex(index string) *DropSearchIndex { // Result returns the result of executing this operation. func (dsi *DropSearchIndex) Result() DropSearchIndexResult { return dsi.result } -func (dsi *DropSearchIndex) processResponse(info driver.ResponseInfo) error { +func (dsi *DropSearchIndex) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - dsi.result, err = buildDropSearchIndexResult(info.ServerResponse) + dsi.result, err = buildDropSearchIndexResult(resp) return err } diff --git a/x/mongo/driver/operation/end_sessions.go b/x/mongo/driver/operation/end_sessions.go index 8b24b3d8c2..eaf03d2ced 100644 --- a/x/mongo/driver/operation/end_sessions.go +++ b/x/mongo/driver/operation/end_sessions.go @@ -39,9 +39,8 @@ func NewEndSessions(sessionIDs bsoncore.Document) *EndSessions { } } -func (es *EndSessions) processResponse(driver.ResponseInfo) error { - var err error - return err +func (es *EndSessions) processResponse(context.Context, bsoncore.Document, driver.ResponseInfo) error { + return nil } // Execute runs this operations and returns an error if the operation did not execute successfully. diff --git a/x/mongo/driver/operation/find.go b/x/mongo/driver/operation/find.go index c71b7d755e..e8f7640e94 100644 --- a/x/mongo/driver/operation/find.go +++ b/x/mongo/driver/operation/find.go @@ -80,9 +80,12 @@ func (f *Find) Result(opts driver.CursorOptions) (*driver.BatchCursor, error) { return driver.NewBatchCursor(f.result, f.session, f.clock, opts) } -func (f *Find) processResponse(info driver.ResponseInfo) error { - var err error - f.result, err = driver.NewCursorResponse(info) +func (f *Find) processResponse(_ context.Context, resp bsoncore.Document, info driver.ResponseInfo) error { + curDoc, err := driver.ExtractCursorDocument(resp) + if err != nil { + return err + } + f.result, err = driver.NewCursorResponse(curDoc, info) return err } diff --git a/x/mongo/driver/operation/find_and_modify.go b/x/mongo/driver/operation/find_and_modify.go index ea365ccb23..76f34b9255 100644 --- a/x/mongo/driver/operation/find_and_modify.go +++ b/x/mongo/driver/operation/find_and_modify.go @@ -114,10 +114,10 @@ func NewFindAndModify(query bsoncore.Document) *FindAndModify { // Result returns the result of executing this operation. func (fam *FindAndModify) Result() FindAndModifyResult { return fam.result } -func (fam *FindAndModify) processResponse(info driver.ResponseInfo) error { +func (fam *FindAndModify) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - fam.result, err = buildFindAndModifyResult(info.ServerResponse) + fam.result, err = buildFindAndModifyResult(resp) return err } diff --git a/x/mongo/driver/operation/hello.go b/x/mongo/driver/operation/hello.go index 60c99f063d..77086f8dc6 100644 --- a/x/mongo/driver/operation/hello.go +++ b/x/mongo/driver/operation/hello.go @@ -586,8 +586,8 @@ func (h *Hello) createOperation() driver.Operation { CommandFn: h.command, Database: "admin", Deployment: h.d, - ProcessResponseFn: func(info driver.ResponseInfo) error { - h.res = info.ServerResponse + ProcessResponseFn: func(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { + h.res = resp return nil }, ServerAPI: h.serverAPI, @@ -610,8 +610,8 @@ func (h *Hello) GetHandshakeInformation(ctx context.Context, _ address.Address, CommandFn: h.handshakeCommand, Deployment: deployment, Database: "admin", - ProcessResponseFn: func(info driver.ResponseInfo) error { - h.res = info.ServerResponse + ProcessResponseFn: func(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { + h.res = resp return nil }, ServerAPI: h.serverAPI, diff --git a/x/mongo/driver/operation/insert.go b/x/mongo/driver/operation/insert.go index a65a4895f0..7f7c6a5453 100644 --- a/x/mongo/driver/operation/insert.go +++ b/x/mongo/driver/operation/insert.go @@ -80,8 +80,8 @@ func NewInsert(documents ...bsoncore.Document) *Insert { // Result returns the result of executing this operation. func (i *Insert) Result() InsertResult { return i.result } -func (i *Insert) processResponse(info driver.ResponseInfo) error { - ir, err := buildInsertResult(info.ServerResponse) +func (i *Insert) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { + ir, err := buildInsertResult(resp) i.result.N += ir.N return err } @@ -91,31 +91,30 @@ func (i *Insert) Execute(ctx context.Context) error { if i.deployment == nil { return errors.New("the Insert operation must have a Deployment set before Execute can be called") } - batches := &driver.Batches{ - Identifier: "documents", - Documents: i.documents, - Ordered: i.ordered, - } return driver.Operation{ CommandFn: i.command, ProcessResponseFn: i.processResponse, - Batches: batches, - RetryMode: i.retry, - Type: driver.Write, - Client: i.session, - Clock: i.clock, - CommandMonitor: i.monitor, - Crypt: i.crypt, - Database: i.database, - Deployment: i.deployment, - Selector: i.selector, - WriteConcern: i.writeConcern, - ServerAPI: i.serverAPI, - Timeout: i.timeout, - Logger: i.logger, - Name: driverutil.InsertOp, - Authenticator: i.authenticator, + Batches: &driver.Batches{ + Identifier: "documents", + Documents: i.documents, + Ordered: i.ordered, + }, + RetryMode: i.retry, + Type: driver.Write, + Client: i.session, + Clock: i.clock, + CommandMonitor: i.monitor, + Crypt: i.crypt, + Database: i.database, + Deployment: i.deployment, + Selector: i.selector, + WriteConcern: i.writeConcern, + ServerAPI: i.serverAPI, + Timeout: i.timeout, + Logger: i.logger, + Name: driverutil.InsertOp, + Authenticator: i.authenticator, }.Execute(ctx) } diff --git a/x/mongo/driver/operation/list_collections.go b/x/mongo/driver/operation/list_collections.go index 1e39f5bfbe..701f7ea01e 100644 --- a/x/mongo/driver/operation/list_collections.go +++ b/x/mongo/driver/operation/list_collections.go @@ -55,9 +55,12 @@ func (lc *ListCollections) Result(opts driver.CursorOptions) (*driver.BatchCurso return driver.NewBatchCursor(lc.result, lc.session, lc.clock, opts) } -func (lc *ListCollections) processResponse(info driver.ResponseInfo) error { - var err error - lc.result, err = driver.NewCursorResponse(info) +func (lc *ListCollections) processResponse(_ context.Context, resp bsoncore.Document, info driver.ResponseInfo) error { + curDoc, err := driver.ExtractCursorDocument(resp) + if err != nil { + return err + } + lc.result, err = driver.NewCursorResponse(curDoc, info) return err } diff --git a/x/mongo/driver/operation/listDatabases.go b/x/mongo/driver/operation/list_databases.go similarity index 98% rename from x/mongo/driver/operation/listDatabases.go rename to x/mongo/driver/operation/list_databases.go index 3df171e37a..37371d49cb 100644 --- a/x/mongo/driver/operation/listDatabases.go +++ b/x/mongo/driver/operation/list_databases.go @@ -135,10 +135,10 @@ func NewListDatabases(filter bsoncore.Document) *ListDatabases { // Result returns the result of executing this operation. func (ld *ListDatabases) Result() ListDatabasesResult { return ld.result } -func (ld *ListDatabases) processResponse(info driver.ResponseInfo) error { +func (ld *ListDatabases) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - ld.result, err = buildListDatabasesResult(info.ServerResponse) + ld.result, err = buildListDatabasesResult(resp) return err } diff --git a/x/mongo/driver/operation/list_indexes.go b/x/mongo/driver/operation/list_indexes.go index 433344f307..a9cf200779 100644 --- a/x/mongo/driver/operation/list_indexes.go +++ b/x/mongo/driver/operation/list_indexes.go @@ -54,10 +54,12 @@ func (li *ListIndexes) Result(opts driver.CursorOptions) (*driver.BatchCursor, e return driver.NewBatchCursor(li.result, clientSession, clock, opts) } -func (li *ListIndexes) processResponse(info driver.ResponseInfo) error { - var err error - - li.result, err = driver.NewCursorResponse(info) +func (li *ListIndexes) processResponse(_ context.Context, resp bsoncore.Document, info driver.ResponseInfo) error { + curDoc, err := driver.ExtractCursorDocument(resp) + if err != nil { + return err + } + li.result, err = driver.NewCursorResponse(curDoc, info) return err } diff --git a/x/mongo/driver/operation/update.go b/x/mongo/driver/operation/update.go index 1070e7ca70..2c9c9cba28 100644 --- a/x/mongo/driver/operation/update.go +++ b/x/mongo/driver/operation/update.go @@ -124,8 +124,8 @@ func NewUpdate(updates ...bsoncore.Document) *Update { // Result returns the result of executing this operation. func (u *Update) Result() UpdateResult { return u.result } -func (u *Update) processResponse(info driver.ResponseInfo) error { - ur, err := buildUpdateResult(info.ServerResponse) +func (u *Update) processResponse(_ context.Context, resp bsoncore.Document, info driver.ResponseInfo) error { + ur, err := buildUpdateResult(resp) u.result.N += ur.N u.result.NModified += ur.NModified @@ -144,31 +144,30 @@ func (u *Update) Execute(ctx context.Context) error { if u.deployment == nil { return errors.New("the Update operation must have a Deployment set before Execute can be called") } - batches := &driver.Batches{ - Identifier: "updates", - Documents: u.updates, - Ordered: u.ordered, - } return driver.Operation{ CommandFn: u.command, ProcessResponseFn: u.processResponse, - Batches: batches, - RetryMode: u.retry, - Type: driver.Write, - Client: u.session, - Clock: u.clock, - CommandMonitor: u.monitor, - Database: u.database, - Deployment: u.deployment, - Selector: u.selector, - WriteConcern: u.writeConcern, - Crypt: u.crypt, - ServerAPI: u.serverAPI, - Timeout: u.timeout, - Logger: u.logger, - Name: driverutil.UpdateOp, - Authenticator: u.authenticator, + Batches: &driver.Batches{ + Identifier: "updates", + Documents: u.updates, + Ordered: u.ordered, + }, + RetryMode: u.retry, + Type: driver.Write, + Client: u.session, + Clock: u.clock, + CommandMonitor: u.monitor, + Database: u.database, + Deployment: u.deployment, + Selector: u.selector, + WriteConcern: u.writeConcern, + Crypt: u.crypt, + ServerAPI: u.serverAPI, + Timeout: u.timeout, + Logger: u.logger, + Name: driverutil.UpdateOp, + Authenticator: u.authenticator, }.Execute(ctx) } diff --git a/x/mongo/driver/operation/update_search_index.go b/x/mongo/driver/operation/update_search_index.go index c63e048f21..60fc0d4c04 100644 --- a/x/mongo/driver/operation/update_search_index.go +++ b/x/mongo/driver/operation/update_search_index.go @@ -71,9 +71,9 @@ func NewUpdateSearchIndex(index string, definition bsoncore.Document) *UpdateSea // Result returns the result of executing this operation. func (usi *UpdateSearchIndex) Result() UpdateSearchIndexResult { return usi.result } -func (usi *UpdateSearchIndex) processResponse(info driver.ResponseInfo) error { +func (usi *UpdateSearchIndex) processResponse(_ context.Context, resp bsoncore.Document, _ driver.ResponseInfo) error { var err error - usi.result, err = buildUpdateSearchIndexResult(info.ServerResponse) + usi.result, err = buildUpdateSearchIndexResult(resp) return err } diff --git a/x/mongo/driver/operation_exhaust.go b/x/mongo/driver/operation_exhaust.go index e0879de316..db1bd881e9 100644 --- a/x/mongo/driver/operation_exhaust.go +++ b/x/mongo/driver/operation_exhaust.go @@ -25,10 +25,9 @@ func (op Operation) ExecuteExhaust(ctx context.Context, conn StreamerConnection) if op.ProcessResponseFn != nil { // Server, ConnectionDescription, and CurrentIndex are unused in this mode. info := ResponseInfo{ - ServerResponse: res, - Connection: conn, + Connection: conn, } - if err = op.ProcessResponseFn(info); err != nil { + if err = op.ProcessResponseFn(ctx, res, info); err != nil { return err } } diff --git a/x/mongo/driver/session/client_session.go b/x/mongo/driver/session/client_session.go index eff27bfe33..5403f49c20 100644 --- a/x/mongo/driver/session/client_session.go +++ b/x/mongo/driver/session/client_session.go @@ -112,7 +112,6 @@ type Client struct { RetryingCommit bool Committing bool Aborting bool - RetryWrite bool RetryRead bool Snapshot bool diff --git a/x/mongo/driver/wiremessage/wiremessage.go b/x/mongo/driver/wiremessage/wiremessage.go index 987ae16c08..64f50e7836 100644 --- a/x/mongo/driver/wiremessage/wiremessage.go +++ b/x/mongo/driver/wiremessage/wiremessage.go @@ -16,6 +16,7 @@ package wiremessage import ( "bytes" "encoding/binary" + "strconv" "strings" "sync/atomic" @@ -422,6 +423,39 @@ func ReadMsgSectionRawDocumentSequence(src []byte) (identifier string, data []by return identifier, rem, rest, true } +// DocumentSequenceToArray converts a document sequence in byte slice to an array. +func DocumentSequenceToArray(src []byte) (dst, rem []byte, ok bool) { + stype, rem, ok := ReadMsgSectionType(src) + if !ok || stype != DocumentSequence { + return nil, src, false + } + var identifier string + var ret []byte + identifier, rem, ret, ok = ReadMsgSectionRawDocumentSequence(rem) + if !ok { + return nil, src, false + } + + aidx, dst := bsoncore.AppendArrayElementStart(nil, identifier) + i := 0 + for { + var doc bsoncore.Document + doc, rem, ok = bsoncore.ReadDocument(rem) + if !ok { + break + } + dst = bsoncore.AppendDocumentElement(dst, strconv.Itoa(i), doc) + i++ + } + if len(rem) > 0 { + return nil, src, false + } + + dst, _ = bsoncore.AppendArrayEnd(dst, aidx) + + return dst, ret, true +} + // ReadMsgChecksum reads a checksum from src. func ReadMsgChecksum(src []byte) (checksum uint32, rem []byte, ok bool) { i32, rem, ok := readi32(src)