GVKun编程网logo

Node.js文件上传(Express 4,MongoDB,GridFS,GridFS-Stream)(node.js文件上传)

9

在本文中,我们将给您介绍关于Node.js文件上传的详细内容,并且为您解答Express4,MongoDB,GridFS,GridFS-Stream的相关问题,此外,我们还将为您提供关于com.mon

在本文中,我们将给您介绍关于Node.js文件上传的详细内容,并且为您解答Express 4,MongoDB,GridFS,GridFS-Stream的相关问题,此外,我们还将为您提供关于com.mongodb.client.gridfs.GridFSBuckets的实例源码、com.mongodb.client.gridfs.GridFSBucket的实例源码、com.mongodb.client.gridfs.model.GridFSFile的实例源码、com.mongodb.client.gridfs.model.GridFSUploadOptions的实例源码的知识。

本文目录一览:

Node.js文件上传(Express 4,MongoDB,GridFS,GridFS-Stream)(node.js文件上传)

Node.js文件上传(Express 4,MongoDB,GridFS,GridFS-Stream)(node.js文件上传)

我试图在我的node.js应用程序中设置文件API。我的目标是能够直接将文件流写入gridfs,而无需最初将文件存储到磁盘。似乎我的创建代码正在运行。我能够将文件上传保存到gridfs。问题是正在读取文件。当我尝试通过Web浏览器窗口下载保存的文件时,看到文件内容包裹着以下内容:

------WebKitFormBoundarye38W9pfG1wiA100lContent-Disposition: form-data; name="file"; filename="myfile.txt"Content-Type: text/javascript***File contents here***------WebKitFormBoundarye38W9pfG1wiA100l--

所以我的问题是,在将文件保存到gridfs之前,我该怎么做才能从文件流中剥离边界信息?这是我正在使用的代码:

''use strict'';var mongoose = require(''mongoose'');var _ = require(''lodash'');var Grid = require(''gridfs-stream'');Grid.mongo = mongoose.mongo;var gfs = new Grid(mongoose.connection.db);// I think this works. I see the file record in fs.filesexports.create = function(req, res) {    var fileId = new mongoose.Types.ObjectId();    var writeStream = gfs.createWriteStream({        _id: fileId,        filename: req.query.name,        mode: ''w'',        content_type: req.query.type,        metadata: {            uploadedBy: req.user._id,        }    });    writeStream.on(''finish'', function() {        return res.status(200).send({            message: fileId.toString()        });    });    req.pipe(writeStream);};// File data is returned, but it''s wrapped with// WebKitFormBoundary and has headers.exports.read = function(req, res) {    gfs.findOne({ _id: req.params.id }, function (err, file) {        if (err) return res.status(400).send(err);        // With this commented out, my browser will prompt        // me to download the raw file where I can see the        // webkit boundary and request headers        //res.writeHead(200, { ''Content-Type'': file.contentType });        var readstream = gfs.createReadStream({            _id: req.params.id            // I also tried this way:            //_id: file._id        });        readstream.pipe(res);    });};

顺便说一句,我目前没有为这些路由使用任何中间件,但愿意这样做。我只是不希望文件在发送到gridfs之前先打入磁盘。

编辑:

对于每个@fardjad,我添加了用于多部分/表单数据解析的node-
multiparty模块,并且这种方法可行。但是,当我下载一个上传的文件并与原始文件(以文本形式)进行比较时,编码存在很多差异,并且下载的文件将无法打开。这是我最近的尝试。

''use strict'';var mongoose = require(''mongoose'');var _ = require(''lodash'');var multiparty = require(''multiparty'');var Grid = require(''gridfs-stream'');Grid.mongo = mongoose.mongo;var gfs = new Grid(mongoose.connection.db);exports.create = function(req, res) {    var form = new multiparty.Form();    var fileId = new mongoose.Types.ObjectId();    form.on(''error'', function(err) {      console.log(''Error parsing form: '' + err.stack);    });    form.on(''part'', function(part) {        if (part.filename) {            var writeStream = gfs.createWriteStream({                _id: fileId,                filename: part.filename,                mode: ''w'',                content_type: part.headers[''content-type''],                metadata: {                    uploadedBy: req.user._id,                }            })            part.pipe(writeStream);        }    });    // Close emitted after form parsed    form.on(''close'', function() {        return res.status(200).send({            message: fileId.toString()        });    });    // Parse req    form.parse(req);};exports.read = function(req, res) {    gfs.findOne({ _id: req.params.id }, function (err, file) {        if (err) return res.status(400).send(err);        res.writeHead(200, { ''Content-Type'': file.contentType });        var readstream = gfs.createReadStream({            _id: req.params.id        });        readstream.pipe(res);    });};

最终编辑:

这是我从另一个开发人员复制并修改的简单实现。这对我有用:(我仍在尝试弄清楚为什么它在我的原始Express应用程序中不起作用。似乎有些干扰)

https://gist.github.com/pos1tron/094ac862c9d116096572

var Busboy = require(''busboy''); // 0.2.9var express = require(''express''); // 4.12.3var mongo = require(''mongodb''); // 2.0.31var Grid = require(''gridfs-stream''); // 1.1.1"var app = express();var server = app.listen(9002);var db = new mongo.Db(''test'', new mongo.Server(''127.0.0.1'', 27017));var gfs;db.open(function(err, db) {  if (err) throw err;  gfs = Grid(db, mongo);});app.post(''/file'', function(req, res) {  var busboy = new Busboy({ headers : req.headers });  var fileId = new mongo.ObjectId();  busboy.on(''file'', function(fieldname, file, filename, encoding, mimetype) {    console.log(''got file'', filename, mimetype, encoding);    var writeStream = gfs.createWriteStream({      _id: fileId,      filename: filename,      mode: ''w'',      content_type: mimetype,    });    file.pipe(writeStream);  }).on(''finish'', function() {    // show a link to the uploaded file    res.writeHead(200, {''content-type'': ''text/html''});    res.end(''<a href="/file/'' + fileId.toString() + ''">download file</a>'');  });  req.pipe(busboy);});app.get(''/'', function(req, res) {  // show a file upload form  res.writeHead(200, {''content-type'': ''text/html''});  res.end(    ''<form action="/file" enctype="multipart/form-data" method="post">''+    ''<input type="file" name="file"><br>''+    ''<input type="submit" value="Upload">''+    ''</form>''  );});app.get(''/file/:id'', function(req, res) {  gfs.findOne({ _id: req.params.id }, function (err, file) {    if (err) return res.status(400).send(err);    if (!file) return res.status(404).send('''');    res.set(''Content-Type'', file.contentType);    res.set(''Content-Disposition'', ''attachment; filename="'' + file.filename + ''"'');    var readstream = gfs.createReadStream({      _id: file._id    });    readstream.on("error", function(err) {      console.log("Got error while processing stream " + err.message);      res.end();    });    readstream.pipe(res);  });});

答案1

小编典典

请参阅我对您在github上创建的问题的评论。我遇到了同样的问题,但是我设法调试了该问题。我将其范围缩小到我确信问题出在某个快速中间件修改了请求的位置。我一一禁用了中间件,直到发现了不太可能的罪魁祸首:connect-
livereload

我注释掉了app.use(require(’connect-livereload’)());
问题就解决了。我相信它是将livereload脚本注入响应(一个二进制图像文件)中。

com.mongodb.client.gridfs.GridFSBuckets的实例源码

com.mongodb.client.gridfs.GridFSBuckets的实例源码

项目:georocket    文件:MongoDBChunkReadStreamTest.java   
/**
 * Create a file in GridFS with the given filename and write
 * some random data to it.
 * @param filename the name of the file to create
 * @param size the number of random bytes to write
 * @param vertx the Vert.x instance
 * @param handler a handler that will be called when the file
 * has been written
 */
private void prepareData(String filename,int size,Vertx vertx,Handler<AsyncResult<String>> handler) {
  vertx.<String>executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);
      try (GridFsuploadStream os = gridFS.openUploadStream(filename)) {
        for (int i = 0; i < size; ++i) {
          os.write((byte)(i & 0xFF));
        }
      }
    }
    f.complete(filename);
  },handler);
}
项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void validateAfterStoreAdd(TestContext context,String path,Handler<AsyncResult<Void>> handler) {
  vertx.executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);

      GridFSFindIterable files = gridFS.find();

      GridFSFile file = files.first();
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      gridFS.downloadToStream(file.getFilename(),baos);
      String contents = new String(baos.toByteArray(),StandardCharsets.UTF_8);
      context.assertEquals(CHUNK_CONTENT,contents);
    }
    f.complete();
  },handler);
}
项目:reactive-hamster    文件:ResumeTest.java   
@Test
public void testWriteReplacePage() throws IOException,ClassNotFoundException {
    HamsterPage page = createTestPage(testUIEngine);        
    Document mo=testCollection.createNew();
    TestComponent c = new TestComponent();
    c.m=mo;
    c.test="test";
    page.addComponent(c);
    testUIEngine.initDB(db,GridFSBuckets.create(db));
    testUIEngine.persistPage(page);
    HamsterPage p2= testUIEngine.resumePage( page.getId());
    assertNotNull(p2);
    TestComponent t=(TestComponent) p2.components.get(0);
    assertNotNull(t);
    assertEquals("test",t.test);
    assertNotNull(t.m);
    assertTrue(t.m == mo);
}
项目:opensearchserver    文件:MongoDbCrawlCache.java   
@Override
public void init(String configString) throws IOException {
    rwl.w.lock();
    try {
        closeNoLock();
        final MongoClientURI connectionString = new MongoClientURI(configString);
        mongoClient = new MongoClient(connectionString);
        final MongoDatabase database = mongoClient.getDatabase(
                connectionString.getDatabase() == null ? DEFAULT_DATABASE : connectionString.getDatabase());
        MetaCollection = database.getCollection(Meta_COLLECTION);
        MetaCollection.createIndex(Document.parse("{\"uri\":1}"));
        indexedCollection = database.getCollection(INDEXED_COLLECTION);
        indexedCollection.createIndex(Document.parse("{\"uri\":1}"));
        contentGrid = GridFSBuckets.create(database);
    } finally {
        rwl.w.unlock();
    }
}
项目:awplab-core    文件:LogAdminProvider.java   
private void deleteLog(Date olderThan) {
    MongoCollection<Log> logCollection = mongoService.getMongoClient().getDatabase(database).getCollection(collection,Log.class);
    Bson filter = Filters.lt("timeStamp",olderThan);
    logCollection.find(filter).forEach((Block<? super Log>) log -> {
        log.getLogFiles().forEach(logFile -> {
            GridFSBucket gridFSBucket = GridFSBuckets.create(mongoService.getMongoClient().getDatabase(database),logFile.getBucket());
            gridFSBucket.delete(logFile.getFileObjectId());
        });
    });
    DeleteResult deleteResult = logCollection.deleteMany(filter);
}
项目:awplab-core    文件:BucketStreamResource.java   
public BucketStreamResource(MongoDatabase database,String bucket,ObjectId objectId) {
    super((StreamSource) () -> {
        GridFSBucket gridFSBucket = GridFSBuckets.create(database,bucket);
        return gridFSBucket.openDownloadStream(objectId);
    },gridFSFile(database,bucket,objectId).getFilename());

    this.database = database;
    this.bucket = bucket;
    this.objectId = objectId;
}
项目:awplab-core    文件:LogFile.java   
public void save(MongoDatabase database) throws IOException {
    if (temporaryFile == null) return;

    try (FileInputStream fileInputStream = new FileInputStream(temporaryFile)) {
        ObjectId objectId = GridFSBuckets.create(database,bucket).uploadFromStream(temporaryFile.getName(),fileInputStream);
        this.setFileObjectId(objectId);
        this.setBucket(bucket);
    }
    finally {
        temporaryFile.close();
    }

    temporaryFile = null;

}
项目:georocket    文件:MongoDBChunkReadStreamTest.java   
/**
 * Connect to MongoDB and get the GridFS chunk size
 * @param vertx the Vert.x instance
 * @param handler a handler that will be called with the chunk size
 */
private void getChunkSize(Vertx vertx,Handler<AsyncResult<Integer>> handler) {
  vertx.<Integer>executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);
      f.complete(gridFS.getChunkSizeBytes());
    }
  },handler);
}
项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void prepareData(TestContext context,Handler<AsyncResult<String>> handler) {
  String filename = PathUtils.join(path,ID);
  vertx.<String>executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);
      byte[] contents = CHUNK_CONTENT.getBytes(StandardCharsets.UTF_8);
      gridFS.uploadFromStream(filename,new ByteArrayInputStream(contents));
      f.complete(filename);
    }
  },handler);
}
项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void validateAfterStoreDelete(TestContext context,Handler<AsyncResult<Void>> handler) {
  vertx.executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);

      GridFSFindIterable files = gridFS.find();
      context.assertTrue(Iterables.isEmpty(files));
    }
    f.complete();
  },handler);
}
项目:reactive-hamster    文件:UITest.java   
@Override
public UIEngine createTestEngine() {
    if(testEngine !=null) {
        testEngine.destroy();
    }
    loader = new HamsterLoader();
    UIEngine engine = createUITestEngine();
    engine.initDB(db,GridFSBuckets.create(db));
    loader.setEngine(engine);
    testEngine=engine;
    CometProcessor.setEngine(engine);  
    return engine;
}
项目:reactive-hamster    文件:ExampleChatEngine.java   
protected void initDB(String dbname) {
    //set up the persistence layer
    //Connect to the local MongoDB instance
    MongoClient m = new MongoClient();
    //get the DB with the given Name
    MongoDatabase chatDB = m.getDatabase(dbname);
    //initialize our collections
    DocumentCollections.init(this,chatDB);
    //set up GridFs for storing files
    GridFSBucket fs = GridFSBuckets.create(chatDB,"persistedPages");
        //the base class UIEngine needs the gridFS for
    //persisting sessions
    super.initDB(chatDB,fs);

}
项目:mandrel    文件:MongoBlobStore.java   
public MongoBlobStore(TaskContext context,MongoClient mongoClient,String databaseName,String bucketName,int batchSize) {
    super(context);
    this.mongoClient = mongoClient;
    this.databaseName = databaseName;
    this.bucket = GridFSBuckets.create(mongoClient.getDatabase(databaseName),bucketName);
    this.batchSize = batchSize;
    this.mapper = new ObjectMapper();
}
项目:restheart    文件:GridFsDAO.java   
@Override
public OperationResult deleteFile(
        final Database db,final String dbname,final String bucketName,final BsonValue fileId,final String requestEtag,final boolean checkEtag) {
    final String bucket = extractBucketName(bucketName);

    GridFSBucket gridFSBucket = GridFSBuckets.create(
            db.getDatabase(dbname),bucket);

    GridFSFile file = gridFSBucket
            .find(eq("_id",fileId))
            .limit(1).iterator().tryNext();

    if (file == null) {
        return new OperationResult(HttpStatus.SC_NOT_FOUND);
    } else if (checkEtag) {
        Object oldEtag = file.getMetadata().get("_etag");

        if (oldEtag != null) {
            if (requestEtag == null) {
                return new OperationResult(HttpStatus.SC_CONFLICT,oldEtag);
            } else if (!Objects.equals(oldEtag.toString(),requestEtag)) {
                return new OperationResult(
                        HttpStatus.SC_PRECONDITION_Failed,oldEtag);
            }
        }
    }

    gridFSBucket.delete(fileId);

    return new OperationResult(HttpStatus.SC_NO_CONTENT);
}
项目:restheart    文件:GetFileBinaryHandler.java   
@Override
public void handleRequest(
        HttpServerExchange exchange,RequestContext context)
        throws Exception {
    if (context.isInError()) {
        next(exchange,context);
        return;
    }

    LOGGER.trace("GET " + exchange.getRequestURL());
    final String bucket = extractBucketName(context.getCollectionName());

    GridFSBucket gridFSBucket = GridFSBuckets.create(
            MongoDBClientSingleton.getInstance().getClient()
                    .getDatabase(context.getdbname()),bucket);

    GridFSFile dbsfile = gridFSBucket
            .find(eq("_id",context.getDocumentId()))
            .limit(1).iterator().tryNext();

    if (dbsfile == null) {
        fileNotFound(context,exchange);
    } else if (!checkEtag(exchange,dbsfile)) {
        sendBinaryContent(context,gridFSBucket,dbsfile,exchange);
    }

    next(exchange,context);
}
项目:otus-api    文件:FileStoreBucket.java   
@postconstruct
public void setUp() {
    fileStore = GridFSBuckets.create(db,FILESTORE);
}
项目:mongo-obj-framework    文件:Smof.java   
public void loadBucket(String bucketName) {
    final GridFSBucket bucket = GridFSBuckets.create(database,bucketName);
    dispatcher.put(bucketName,bucket);
}
项目:awplab-core    文件:LogFile.java   
@BeanCodecKey(ignore = true)
public GridFSFile getGridFSFile(MongoDatabase mongoDatabase) {
    return GridFSBuckets.create(mongoDatabase,bucket).find(Filters.eq("_id",fileObjectId)).first();
}
项目:georocket    文件:MongoDBChunkReadStreamTest.java   
/**
 * The actual test method. Creates a temporary file with random contents. Writes
 * <code>size</code> bytes to it and reads it again through
 * {@link MongoDBChunkReadStream}. Finally,checks if the file has been read correctly.
 * @param size the number of bytes to write/read
 * @param chunkSize the GridFS chunk size
 * @param vertx the Vert.x instance
 * @param context the current test context
 */
private void doRead(int size,int chunkSize,TestContext context) {
  Async async = context.async();

  // create a test file in GridFS
  prepareData("test_" + size + ".bin",size,vertx,context.asyncAssertSuccess(filename -> {
    // connect to GridFS
    com.mongodb.async.client.MongoClient client = createAsyncclient();
    com.mongodb.async.client.MongoDatabase db =
        client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
    com.mongodb.async.client.gridfs.GridFSBucket gridfs =
        com.mongodb.async.client.gridfs.GridFSBuckets.create(db);

    // open the test file
    GridFSDownloadStream is = gridfs.openDownloadStream(filename);
    MongoDBChunkReadStream rs = new MongoDBChunkReadStream(is,chunkSize,vertx.getorCreateContext());

    // read from the test file
    rs.exceptionHandler(context::fail);

    int[] pos = { 0 };

    rs.endHandler(v -> {
      // the file has been completely read
      rs.close();
      context.assertEquals(size,pos[0]);
      async.complete();
    });

    rs.handler(buf -> {
      // check number of read bytes
      if (size - pos[0] > chunkSize) {
        context.assertEquals(chunkSize,buf.length());
      } else {
        context.assertEquals(size - pos[0],buf.length());
      }

      // check file contents
      for (int i = pos[0]; i < pos[0] + buf.length(); ++i) {
        context.assertEquals((byte)(i & 0xFF),buf.getByte(i - pos[0]));
      }

      pos[0] += buf.length();
    });
  }));
}
项目:eds-starter6-mongodb    文件:MongoDb.java   
public GridFSBucket createBucket(String bucketName) {
    return GridFSBuckets.create(this.getMongoDatabase(),bucketName);
}
项目:lumongo    文件:MongodocumentStorage.java   
private GridFSBucket createGridFSConnection() {
    MongoDatabase db = mongoClient.getDatabase(database);
    return GridFSBuckets.create(db,ASSOCIATED_FILES);
}
项目:awplab-core    文件:BucketStreamResource.java   
public static GridFSFile gridFSFile(MongoDatabase database,ObjectId objectId) {
    return  GridFSBuckets.create(database,objectId)).first();

}

com.mongodb.client.gridfs.GridFSBucket的实例源码

com.mongodb.client.gridfs.GridFSBucket的实例源码

项目:georocket    文件:MongoDBChunkReadStreamTest.java   
/**
 * Create a file in GridFS with the given filename and write
 * some random data to it.
 * @param filename the name of the file to create
 * @param size the number of random bytes to write
 * @param vertx the Vert.x instance
 * @param handler a handler that will be called when the file
 * has been written
 */
private void prepareData(String filename,int size,Vertx vertx,Handler<AsyncResult<String>> handler) {
  vertx.<String>executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);
      try (GridFsuploadStream os = gridFS.openUploadStream(filename)) {
        for (int i = 0; i < size; ++i) {
          os.write((byte)(i & 0xFF));
        }
      }
    }
    f.complete(filename);
  },handler);
}
项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void validateAfterStoreAdd(TestContext context,String path,Handler<AsyncResult<Void>> handler) {
  vertx.executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);

      GridFSFindIterable files = gridFS.find();

      GridFSFile file = files.first();
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      gridFS.downloadToStream(file.getFilename(),baos);
      String contents = new String(baos.toByteArray(),StandardCharsets.UTF_8);
      context.assertEquals(CHUNK_CONTENT,contents);
    }
    f.complete();
  },handler);
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public InputStream getAssociatedDocumentStream(String uniqueId,String fileName) {
    GridFSBucket gridFS = createGridFSConnection();
    GridFSFile file = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,getGridFsId(uniqueId,fileName))).first();

    if (file == null) {
        return null;
    }

    InputStream is = gridFS.openDownloadStream(file.getobjectId());
    ;

    Document Metadata = file.getMetadata();
    if (Metadata.containsKey(COMpressed_FLAG)) {
        boolean compressed = (boolean) Metadata.remove(COMpressed_FLAG);
        if (compressed) {
            is = new InflaterInputStream(is);
        }
    }

    return is;
}
项目:mongo-obj-framework    文件:SmofGridStreamManagerImpl.java   
private void uploadStream(SmofGridRef ref,String name,InputStream stream) {
    final String bucketName = ref.getBucketName();
    final ObjectId id;
    final GridFSBucket bucket;
    Preconditions.checkNotNull(bucketName,"No bucket specified");
    final GridFsuploadOptions options = new GridFsuploadOptions().Metadata(ref.getMetadata());
    bucket = pool.getBucket(bucketName);
    id = bucket.uploadFromStream(name,stream,options);
    ref.setId(id);
}
项目:mongo-obj-framework    文件:SmofGridStreamManagerImpl.java   
@Override
public InputStream download(SmofGridRef ref) {
    final String bucketName = ref.getBucketName();
    final ObjectId id = ref.getId();
    Preconditions.checkArgument(id != null,"No download source found");
    Preconditions.checkArgument(bucketName != null,"No bucket specified");
    final GridFSBucket bucket = pool.getBucket(bucketName);
    return bucket.openDownloadStream(id);
}
项目:mongo-obj-framework    文件:SmofGridStreamManagerImpl.java   
@Override
public void drop(SmofGridRef ref) {
    final String bucketName = ref.getBucketName();
    final ObjectId id = ref.getId();
    Preconditions.checkArgument(id != null,"No bucket specified");
    final GridFSBucket bucket = pool.getBucket(bucketName);
    bucket.delete(id);
}
项目:awplab-core    文件:LogAdminProvider.java   
private void deleteLog(Date olderThan) {
    MongoCollection<Log> logCollection = mongoService.getMongoClient().getDatabase(database).getCollection(collection,Log.class);
    Bson filter = Filters.lt("timeStamp",olderThan);
    logCollection.find(filter).forEach((Block<? super Log>) log -> {
        log.getLogFiles().forEach(logFile -> {
            GridFSBucket gridFSBucket = GridFSBuckets.create(mongoService.getMongoClient().getDatabase(database),logFile.getBucket());
            gridFSBucket.delete(logFile.getFileObjectId());
        });
    });
    DeleteResult deleteResult = logCollection.deleteMany(filter);
}
项目:awplab-core    文件:BucketStreamResource.java   
public BucketStreamResource(MongoDatabase database,String bucket,ObjectId objectId) {
    super((StreamSource) () -> {
        GridFSBucket gridFSBucket = GridFSBuckets.create(database,bucket);
        return gridFSBucket.openDownloadStream(objectId);
    },gridFSFile(database,bucket,objectId).getFilename());

    this.database = database;
    this.bucket = bucket;
    this.objectId = objectId;
}
项目:georocket    文件:MongoDBChunkReadStreamTest.java   
/**
 * Connect to MongoDB and get the GridFS chunk size
 * @param vertx the Vert.x instance
 * @param handler a handler that will be called with the chunk size
 */
private void getChunkSize(Vertx vertx,Handler<AsyncResult<Integer>> handler) {
  vertx.<Integer>executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);
      f.complete(gridFS.getChunkSizeBytes());
    }
  },handler);
}
项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void prepareData(TestContext context,Handler<AsyncResult<String>> handler) {
  String filename = PathUtils.join(path,ID);
  vertx.<String>executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);
      byte[] contents = CHUNK_CONTENT.getBytes(StandardCharsets.UTF_8);
      gridFS.uploadFromStream(filename,new ByteArrayInputStream(contents));
      f.complete(filename);
    }
  },handler);
}
项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void validateAfterStoreDelete(TestContext context,Handler<AsyncResult<Void>> handler) {
  vertx.executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);

      GridFSFindIterable files = gridFS.find();
      context.assertTrue(Iterables.isEmpty(files));
    }
    f.complete();
  },handler);
}
项目:reactive-hamster    文件:ExampleChatEngine.java   
protected void initDB(String dbname) {
    //set up the persistence layer
    //Connect to the local MongoDB instance
    MongoClient m = new MongoClient();
    //get the DB with the given Name
    MongoDatabase chatDB = m.getDatabase(dbname);
    //initialize our collections
    DocumentCollections.init(this,chatDB);
    //set up GridFs for storing files
    GridFSBucket fs = GridFSBuckets.create(chatDB,"persistedPages");
        //the base class UIEngine needs the gridFS for
    //persisting sessions
    super.initDB(chatDB,fs);

}
项目:restheart    文件:GridFsDAO.java   
@Override
public OperationResult deleteFile(
        final Database db,final String dbname,final String bucketName,final BsonValue fileId,final String requestEtag,final boolean checkEtag) {
    final String bucket = extractBucketName(bucketName);

    GridFSBucket gridFSBucket = GridFSBuckets.create(
            db.getDatabase(dbname),bucket);

    GridFSFile file = gridFSBucket
            .find(eq("_id",fileId))
            .limit(1).iterator().tryNext();

    if (file == null) {
        return new OperationResult(HttpStatus.SC_NOT_FOUND);
    } else if (checkEtag) {
        Object oldEtag = file.getMetadata().get("_etag");

        if (oldEtag != null) {
            if (requestEtag == null) {
                return new OperationResult(HttpStatus.SC_CONFLICT,oldEtag);
            } else if (!Objects.equals(oldEtag.toString(),requestEtag)) {
                return new OperationResult(
                        HttpStatus.SC_PRECONDITION_Failed,oldEtag);
            }
        }
    }

    gridFSBucket.delete(fileId);

    return new OperationResult(HttpStatus.SC_NO_CONTENT);
}
项目:restheart    文件:GetFileBinaryHandler.java   
@Override
public void handleRequest(
        HttpServerExchange exchange,RequestContext context)
        throws Exception {
    if (context.isInError()) {
        next(exchange,context);
        return;
    }

    LOGGER.trace("GET " + exchange.getRequestURL());
    final String bucket = extractBucketName(context.getCollectionName());

    GridFSBucket gridFSBucket = GridFSBuckets.create(
            MongoDBClientSingleton.getInstance().getClient()
                    .getDatabase(context.getdbname()),bucket);

    GridFSFile dbsfile = gridFSBucket
            .find(eq("_id",context.getDocumentId()))
            .limit(1).iterator().tryNext();

    if (dbsfile == null) {
        fileNotFound(context,exchange);
    } else if (!checkEtag(exchange,dbsfile)) {
        sendBinaryContent(context,gridFSBucket,dbsfile,exchange);
    }

    next(exchange,context);
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public void deleteallDocuments() {
    GridFSBucket gridFS = createGridFSConnection();
    gridFS.drop();

    MongoDatabase db = mongoClient.getDatabase(database);
    MongoCollection<Document> coll = db.getCollection(rawCollectionName);
    coll.deleteMany(new Document());
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public List<AssociatedDocument> getAssociatedDocuments(String uniqueId,FetchType fetchType) throws Exception {
    GridFSBucket gridFS = createGridFSConnection();
    List<AssociatedDocument> assocDocs = new ArrayList<>();
    if (!FetchType.NONE.equals(fetchType)) {
        GridFSFindIterable files = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId));
        for (GridFSFile file : files) {
            AssociatedDocument ad = loadGridFSToAssociatedDocument(gridFS,file,fetchType);
            assocDocs.add(ad);
        }

    }
    return assocDocs;
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public AssociatedDocument getAssociatedDocument(String uniqueId,String fileName,FetchType fetchType) throws Exception {
    GridFSBucket gridFS = createGridFSConnection();
    if (!FetchType.NONE.equals(fetchType)) {
        GridFSFile file = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,fileName))).first();
        if (null != file) {
            return loadGridFSToAssociatedDocument(gridFS,fetchType);
        }
    }
    return null;
}
项目:lumongo    文件:MongodocumentStorage.java   
private AssociatedDocument loadGridFSToAssociatedDocument(GridFSBucket gridFS,GridFSFile file,FetchType fetchType) throws IOException {
    AssociatedDocument.Builder aBuilder = AssociatedDocument.newBuilder();
    aBuilder.setFilename(file.getFilename());
    Document Metadata = file.getMetadata();

    boolean compressed = false;
    if (Metadata.containsKey(COMpressed_FLAG)) {
        compressed = (boolean) Metadata.remove(COMpressed_FLAG);
    }

    long timestamp = (long) Metadata.remove(TIMESTAMP);

    aBuilder.setCompressed(compressed);
    aBuilder.setTimestamp(timestamp);

    aBuilder.setDocumentUniqueId((String) Metadata.remove(DOCUMENT_UNIQUE_ID_KEY));
    for (String field : Metadata.keySet()) {
        aBuilder.addMetadata(Metadata.newBuilder().setKey(field).setValue((String) Metadata.get(field)));
    }

    if (FetchType.FULL.equals(fetchType)) {

        ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
        gridFS.downloadToStream(file.getobjectId(),byteArrayOutputStream);
        byte[] bytes = byteArrayOutputStream.toByteArray();
        if (null != bytes) {
            if (compressed) {
                bytes = CommonCompression.uncompressZlib(bytes);
            }
            aBuilder.setDocument(ByteString.copyFrom(bytes));
        }
    }
    aBuilder.setIndexName(indexName);
    return aBuilder.build();
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public List<String> getAssociatedFilenames(String uniqueId) throws Exception {
    GridFSBucket gridFS = createGridFSConnection();
    ArrayList<String> fileNames = new ArrayList<>();
    gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId))
            .forEach((Consumer<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> fileNames.add(gridFSFile.getFilename()));

    return fileNames;
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public void deleteAssociatedDocument(String uniqueId,String fileName) {
    GridFSBucket gridFS = createGridFSConnection();
    gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,fileName)))
            .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getobjectId()));

}
项目:mongo-obj-framework    文件:SmofGridStreamManagerImpl.java   
@Override
public GridFSFile loadFileMetadata(SmofGridRef ref) {
    final GridFSBucket bucket = pool.getBucket(ref.getBucketName());
    return bucket.find(Filters.eq(Element.ID,ref.getId())).first();
}
项目:mongo-obj-framework    文件:Smof.java   
public void loadBucket(String bucketName) {
    final GridFSBucket bucket = GridFSBuckets.create(database,bucketName);
    dispatcher.put(bucketName,bucket);
}
项目:mongo-obj-framework    文件:SmofdispatcherImpl.java   
@Override
   public void put(String bucketName,GridFSBucket bucket) {
    collections.put(bucketName,bucket);
}
项目:mongo-obj-framework    文件:SmofdispatcherImpl.java   
@Override
   public void dropBucket(String bucketName) {
    final GridFSBucket bucket = collections.getBucket(bucketName);
    bucket.drop();
    collections.dropBucket(bucketName);
}
项目:mongo-obj-framework    文件:CollectionsPoolImpl.java   
@Override
   public void put(String bucketName,GridFSBucket bucket) {
    fsBuckets.put(bucketName,bucket);
}
项目:mongo-obj-framework    文件:CollectionsPoolImpl.java   
@Override
   public GridFSBucket getBucket(String bucketName) {
    return fsBuckets.get(bucketName);
}
项目:mongo-obj-framework    文件:CollectionsPoolImpl.java   
@Override
   public void dropAllBuckets() {
    fsBuckets.values().forEach(GridFSBucket::drop);
    fsBuckets.clear();
}
项目:georocket    文件:MongoDBChunkReadStreamTest.java   
/**
 * The actual test method. Creates a temporary file with random contents. Writes
 * <code>size</code> bytes to it and reads it again through
 * {@link MongoDBChunkReadStream}. Finally,checks if the file has been read correctly.
 * @param size the number of bytes to write/read
 * @param chunkSize the GridFS chunk size
 * @param vertx the Vert.x instance
 * @param context the current test context
 */
private void doRead(int size,int chunkSize,TestContext context) {
  Async async = context.async();

  // create a test file in GridFS
  prepareData("test_" + size + ".bin",size,vertx,context.asyncAssertSuccess(filename -> {
    // connect to GridFS
    com.mongodb.async.client.MongoClient client = createAsyncclient();
    com.mongodb.async.client.MongoDatabase db =
        client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
    com.mongodb.async.client.gridfs.GridFSBucket gridfs =
        com.mongodb.async.client.gridfs.GridFSBuckets.create(db);

    // open the test file
    GridFSDownloadStream is = gridfs.openDownloadStream(filename);
    MongoDBChunkReadStream rs = new MongoDBChunkReadStream(is,chunkSize,vertx.getorCreateContext());

    // read from the test file
    rs.exceptionHandler(context::fail);

    int[] pos = { 0 };

    rs.endHandler(v -> {
      // the file has been completely read
      rs.close();
      context.assertEquals(size,pos[0]);
      async.complete();
    });

    rs.handler(buf -> {
      // check number of read bytes
      if (size - pos[0] > chunkSize) {
        context.assertEquals(chunkSize,buf.length());
      } else {
        context.assertEquals(size - pos[0],buf.length());
      }

      // check file contents
      for (int i = pos[0]; i < pos[0] + buf.length(); ++i) {
        context.assertEquals((byte)(i & 0xFF),buf.getByte(i - pos[0]));
      }

      pos[0] += buf.length();
    });
  }));
}
项目:reactive-hamster    文件:UIEngine.java   
public void initDB(MongoDatabase db,GridFSBucket gridFS) {
    this.gridFS = gridFS;
    persistedPages = new PersistedPages(engine,db,gridFS);
    super.initDB(db);
}
项目:reactive-hamster    文件:PersistedPages.java   
public PersistedPages(UIEngine engine,MongoDatabase db,GridFSBucket gridFS) {
    super(engine,"persistedPages");
    this.gridFS = gridFS;
    ensureIndex(false,true,USERID,CREATION_TIME);
}
项目:eds-starter6-mongodb    文件:MongoDb.java   
public GridFSBucket createBucket(String bucketName) {
    return GridFSBuckets.create(this.getMongoDatabase(),bucketName);
}
项目:restheart    文件:GetFileBinaryHandler.java   
private void sendBinaryContent(
        final RequestContext context,final GridFSBucket gridFSBucket,final GridFSFile file,final HttpServerExchange exchange)
        throws IOException {
    LOGGER.trace("Filename = {}",file.getFilename());
    LOGGER.trace("Content length = {}",file.getLength());

    if (file.getMetadata() != null
            && file.getMetadata().get("contentType") != null) {
        exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,file.getMetadata().get("contentType").toString());
    } else if (file.getMetadata() != null
            && file.getMetadata().get("contentType") != null) {
        exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,file.getMetadata().get("contentType").toString());
    } else {
        exchange.getResponseHeaders().put(
                Headers.CONTENT_TYPE,APPLICATION_OCTET_STREAM);
    }

    exchange.getResponseHeaders().put(
            Headers.CONTENT_LENGTH,file.getLength());

    exchange.getResponseHeaders().put(
            Headers.CONTENT_disPOSITION,String.format("inline; filename=\"%s\"",extractFilename(file)));

    exchange.getResponseHeaders().put(
            Headers.CONTENT_TRANSFER_ENCODING,CONTENT_TRANSFER_ENCODING_BINARY);

    ResponseHelper.injectEtagHeader(exchange,file.getMetadata());

    context.setResponseStatusCode(HttpStatus.SC_OK);

    gridFSBucket.downloadToStream(
            file.getId(),exchange.getoutputStream());
}
项目:lumongo    文件:MongodocumentStorage.java   
private GridFSBucket createGridFSConnection() {
    MongoDatabase db = mongoClient.getDatabase(database);
    return GridFSBuckets.create(db,ASSOCIATED_FILES);
}
项目:lumongo    文件:MongodocumentStorage.java   
public void getAssociatedDocuments(OutputStream outputstream,Document filter) throws IOException {
    Charset charset = Charset.forName("UTF-8");

    GridFSBucket gridFS = createGridFSConnection();
    GridFSFindIterable gridFSFiles = gridFS.find(filter);
    outputstream.write("{\n".getBytes(charset));
    outputstream.write(" \"associatedDocs\": [\n".getBytes(charset));

    boolean first = true;
    for (GridFSFile gridFSFile : gridFSFiles) {
        if (first) {
            first = false;
        }
        else {
            outputstream.write(",\n".getBytes(charset));
        }

        Document Metadata = gridFSFile.getMetadata();

        String uniqueId = Metadata.getString(DOCUMENT_UNIQUE_ID_KEY);
        String uniquieIdkeyvalue = "  { \"uniqueId\": \"" + uniqueId + "\",";
        outputstream.write(uniquieIdkeyvalue.getBytes(charset));

        String filename = gridFSFile.getFilename();
        String filenamekeyvalue = "\"filename\": \"" + filename + "\",";
        outputstream.write(filenamekeyvalue.getBytes(charset));

        Date uploadDate = gridFSFile.getUploadDate();
        String uploadDatekeyvalue = "\"uploadDate\": {\"$date\":" + uploadDate.getTime() + "}";
        outputstream.write(uploadDatekeyvalue.getBytes(charset));

        Metadata.remove(TIMESTAMP);
        Metadata.remove(COMpressed_FLAG);
        Metadata.remove(DOCUMENT_UNIQUE_ID_KEY);
        Metadata.remove(FILE_UNIQUE_ID_KEY);

        if (!Metadata.isEmpty()) {
            String MetaJson = Metadata.toJson();
            String MetaString = ",\"Meta\": " + MetaJson;
            outputstream.write(MetaString.getBytes(charset));
        }

        outputstream.write(" }".getBytes(charset));

    }
    outputstream.write("\n ]\n}".getBytes(charset));
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public void deleteAssociatedDocuments(String uniqueId) {
    GridFSBucket gridFS = createGridFSConnection();
    gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId))
            .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getobjectId()));
}
项目:mongo-obj-framework    文件:CollectionsPool.java   
void put(String bucketName,GridFSBucket bucket);
项目:mongo-obj-framework    文件:CollectionsPool.java   
GridFSBucket getBucket(String bucketName);
项目:mongo-obj-framework    文件:Smofdispatcher.java   
void put(String bucketName,GridFSBucket bucket);
项目:reactive-hamster    文件:FileSystemProvider.java   
public abstract GridFSBucket getFileSystem();

com.mongodb.client.gridfs.model.GridFSFile的实例源码

com.mongodb.client.gridfs.model.GridFSFile的实例源码

项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void validateAfterStoreAdd(TestContext context,Vertx vertx,String path,Handler<AsyncResult<Void>> handler) {
  vertx.executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);

      GridFSFindIterable files = gridFS.find();

      GridFSFile file = files.first();
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      gridFS.downloadToStream(file.getFilename(),baos);
      String contents = new String(baos.toByteArray(),StandardCharsets.UTF_8);
      context.assertEquals(CHUNK_CONTENT,contents);
    }
    f.complete();
  },handler);
}
项目:reactive-hamster    文件:PersistedPages.java   
public boolean checkAndCleanup(String userId,String fileName) {
    List<Document> l = query(new Query().equals(USERID,userId).addSortCriteria(CREATION_TIME,false));
    if (l.size() >= maxPersistedPagesPerUser) {
        Document oldest = l.iterator().next();
        if ((System.currentTimeMillis() - oldest.get(CREATION_TIME).getTime()) < minimumDelay) {
            //there have been to many page persistences for this user in a short time,so don't persist
            return false;
        } else {
            //clean up oldest to free space for new persisted page
            gridFS.find(Filters.eq("filename",oldest.get(FILENAME))).forEach(new Block<GridFSFile>() {
                @Override
                public void apply(GridFSFile file) {
                    gridFS.delete(file.getobjectId());
                }
            });
            oldest.delete();
        }
    }
    //create new entry
    Document newOne = createNew();
    newOne.set(USERID,userId);
    newOne.set(FILENAME,fileName);
    newOne.set(CREATION_TIME,new Date());
    newOne.writetoDatabase(false);
    return true;
}
项目:mandrel    文件:MongoBlobStore.java   
@Override
public void byPages(int pageSize,Callback callback) {
    MongoCursor<GridFSFile> cursor = bucket.find().iterator();
    boolean loop = true;
    try {
        while (loop) {
            List<GridFSFile> files = new ArrayList<>(batchSize);
            int i = 0;
            while (cursor.hasNext() && i < batchSize) {
                files.add(cursor.next());
                i++;
            }
            loop = callback.on(files.stream().map(file -> bucket.openDownloadStream(file.getobjectId())).map(fromFile).collect(Collectors.toList()));
        }
    } finally {
        cursor.close();
    }
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public InputStream getAssociatedDocumentStream(String uniqueId,String fileName) {
    GridFSBucket gridFS = createGridFSConnection();
    GridFSFile file = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,getGridFsId(uniqueId,fileName))).first();

    if (file == null) {
        return null;
    }

    InputStream is = gridFS.openDownloadStream(file.getobjectId());
    ;

    Document Metadata = file.getMetadata();
    if (Metadata.containsKey(COMpressed_FLAG)) {
        boolean compressed = (boolean) Metadata.remove(COMpressed_FLAG);
        if (compressed) {
            is = new InflaterInputStream(is);
        }
    }

    return is;
}
项目:otus-api    文件:FileStoreBucket.java   
public Document findMetadata(String oid) {
    GridFSFile first = fileStore.find(eq("_id",new ObjectId(oid))).first();
    if(first == null) {
        return null;
    } else {
        return first.getMetadata();
    }
}
项目:mongo-obj-framework    文件:ObjectParser.java   
private SmofGridRef toSmofGridRef(BsonDocument refBson) {
    final String bucketName = refBson.getString("bucket").getValue();
    final ObjectId id = refBson.getobjectId("id").getValue();
    final SmofGridRef ref = SmofGridRefFactory.newFromDB(id,bucketName);
    final GridFSFile file = dispatcher.loadMetadata(ref);
    ref.putMetadata(file.getMetadata());
    return ref;
}
项目:mongo-obj-framework    文件:SmofGridStreamManagerTest.java   
@Test
public final void testMetadata() throws IOException {
    final Document Metadata = new Document("randomkey",45);
    ref.putMetadata(Metadata);
    streamManager.uploadFile(ref);
    ref.putMetadata(new Document());
    final GridFSFile file = streamManager.loadFileMetadata(ref);
    assertEquals(Metadata,file.getMetadata());
}
项目:reactive-hamster    文件:FileAttribute.java   
public void deleteFile(Document mo) {
        String filename = mo.get(this);
        if (filename != null) {
            fileSystemProvider.getFileSystem().find(Filters.eq("filename",filename)).forEach(new Block<GridFSFile>() {
                @Override
                public void apply(GridFSFile file) {
                    fileSystemProvider.getFileSystem().delete(file.getobjectId());
                }
            });
            mo.getDataObject().remove(this.getName());
//            mo.writetoDatabase(false);
        }
    }
项目:mongo-java-driver-rx    文件:GridFSFindobservableImpl.java   
@Override
public Observable<GridFSFile> first() {
    return RxObservables.create(Observables.observe(new Block<SingleResultCallback<GridFSFile>>(){
        @Override
        public void apply(final SingleResultCallback<GridFSFile> callback) {
            wrapped.first(callback);
        }
    }),observableAdapter);
}
项目:mongo-java-driver-rx    文件:GridFSDownloadStreamImpl.java   
@Override
public Observable<GridFSFile> getGridFSFile() {
    return RxObservables.create(Observables.observe(new Block<SingleResultCallback<GridFSFile>>() {
        @Override
        public void apply(final SingleResultCallback<GridFSFile> callback) {
            wrapped.getGridFSFile(callback);
        }
    }),observableAdapter);
}
项目:mongo-java-driver-reactivestreams    文件:GridFSDownloadStreamImpl.java   
@Override
public Publisher<GridFSFile> getGridFSFile() {
    return new ObservabletoPublisher<GridFSFile>(observe(new Block<SingleResultCallback<GridFSFile>>() {
        @Override
        public void apply(final SingleResultCallback<GridFSFile> callback) {
            wrapped.getGridFSFile(callback);
        }
    }));
}
项目:mongo-java-driver-reactivestreams    文件:GridFSFindPublisherImpl.java   
@Override
public Publisher<GridFSFile> first() {
    return new ObservabletoPublisher<GridFSFile>(observe(new Block<SingleResultCallback<GridFSFile>>(){
        @Override
        public void apply(final SingleResultCallback<GridFSFile> callback) {
            wrapped.first(callback);
        }
    }));
}
项目:restheart    文件:GridFsDAO.java   
@Override
public OperationResult deleteFile(
        final Database db,final String dbname,final String bucketName,final BsonValue fileId,final String requestEtag,final boolean checkEtag) {
    final String bucket = extractBucketName(bucketName);

    GridFSBucket gridFSBucket = GridFSBuckets.create(
            db.getDatabase(dbname),bucket);

    GridFSFile file = gridFSBucket
            .find(eq("_id",fileId))
            .limit(1).iterator().tryNext();

    if (file == null) {
        return new OperationResult(HttpStatus.SC_NOT_FOUND);
    } else if (checkEtag) {
        Object oldEtag = file.getMetadata().get("_etag");

        if (oldEtag != null) {
            if (requestEtag == null) {
                return new OperationResult(HttpStatus.SC_CONFLICT,oldEtag);
            } else if (!Objects.equals(oldEtag.toString(),requestEtag)) {
                return new OperationResult(
                        HttpStatus.SC_PRECONDITION_Failed,oldEtag);
            }
        }
    }

    gridFSBucket.delete(fileId);

    return new OperationResult(HttpStatus.SC_NO_CONTENT);
}
项目:restheart    文件:GetFileBinaryHandler.java   
@Override
public void handleRequest(
        HttpServerExchange exchange,RequestContext context)
        throws Exception {
    if (context.isInError()) {
        next(exchange,context);
        return;
    }

    LOGGER.trace("GET " + exchange.getRequestURL());
    final String bucket = extractBucketName(context.getCollectionName());

    GridFSBucket gridFSBucket = GridFSBuckets.create(
            MongoDBClientSingleton.getInstance().getClient()
                    .getDatabase(context.getdbname()),bucket);

    GridFSFile dbsfile = gridFSBucket
            .find(eq("_id",context.getDocumentId()))
            .limit(1).iterator().tryNext();

    if (dbsfile == null) {
        fileNotFound(context,exchange);
    } else if (!checkEtag(exchange,dbsfile)) {
        sendBinaryContent(context,gridFSBucket,dbsfile,exchange);
    }

    next(exchange,context);
}
项目:restheart    文件:GetFileBinaryHandler.java   
private boolean checkEtag(HttpServerExchange exchange,GridFSFile dbsfile) {
    if (dbsfile != null) {
        Object etag;

        if (dbsfile.getMetadata() != null
                && dbsfile.getMetadata().containsKey("_etag")) {
            etag = dbsfile.getMetadata().get("_etag");
        } else {
            etag = null;
        }

        if (etag != null && etag instanceof ObjectId) {
            ObjectId _etag = (ObjectId) etag;

            BsonObjectId __etag = new BsonObjectId(_etag);

            // in case the request contains the IF_NONE_MATCH header with the current etag value,// just return 304 NOT_MODIFIED code
            if (RequestHelper.checkReadEtag(exchange,__etag)) {
                exchange.setStatusCode(HttpStatus.SC_NOT_MODIFIED);
                exchange.endExchange();
                return true;
            }
        }
    }

    return false;
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public List<AssociatedDocument> getAssociatedDocuments(String uniqueId,FetchType fetchType) throws Exception {
    GridFSBucket gridFS = createGridFSConnection();
    List<AssociatedDocument> assocDocs = new ArrayList<>();
    if (!FetchType.NONE.equals(fetchType)) {
        GridFSFindIterable files = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId));
        for (GridFSFile file : files) {
            AssociatedDocument ad = loadGridFSToAssociatedDocument(gridFS,file,fetchType);
            assocDocs.add(ad);
        }

    }
    return assocDocs;
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public AssociatedDocument getAssociatedDocument(String uniqueId,String fileName,FetchType fetchType) throws Exception {
    GridFSBucket gridFS = createGridFSConnection();
    if (!FetchType.NONE.equals(fetchType)) {
        GridFSFile file = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,fileName))).first();
        if (null != file) {
            return loadGridFSToAssociatedDocument(gridFS,fetchType);
        }
    }
    return null;
}
项目:lumongo    文件:MongodocumentStorage.java   
private AssociatedDocument loadGridFSToAssociatedDocument(GridFSBucket gridFS,GridFSFile file,FetchType fetchType) throws IOException {
    AssociatedDocument.Builder aBuilder = AssociatedDocument.newBuilder();
    aBuilder.setFilename(file.getFilename());
    Document Metadata = file.getMetadata();

    boolean compressed = false;
    if (Metadata.containsKey(COMpressed_FLAG)) {
        compressed = (boolean) Metadata.remove(COMpressed_FLAG);
    }

    long timestamp = (long) Metadata.remove(TIMESTAMP);

    aBuilder.setCompressed(compressed);
    aBuilder.setTimestamp(timestamp);

    aBuilder.setDocumentUniqueId((String) Metadata.remove(DOCUMENT_UNIQUE_ID_KEY));
    for (String field : Metadata.keySet()) {
        aBuilder.addMetadata(Metadata.newBuilder().setKey(field).setValue((String) Metadata.get(field)));
    }

    if (FetchType.FULL.equals(fetchType)) {

        ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
        gridFS.downloadToStream(file.getobjectId(),byteArrayOutputStream);
        byte[] bytes = byteArrayOutputStream.toByteArray();
        if (null != bytes) {
            if (compressed) {
                bytes = CommonCompression.uncompressZlib(bytes);
            }
            aBuilder.setDocument(ByteString.copyFrom(bytes));
        }
    }
    aBuilder.setIndexName(indexName);
    return aBuilder.build();
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public List<String> getAssociatedFilenames(String uniqueId) throws Exception {
    GridFSBucket gridFS = createGridFSConnection();
    ArrayList<String> fileNames = new ArrayList<>();
    gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId))
            .forEach((Consumer<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> fileNames.add(gridFSFile.getFilename()));

    return fileNames;
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public void deleteAssociatedDocument(String uniqueId,String fileName) {
    GridFSBucket gridFS = createGridFSConnection();
    gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,fileName)))
            .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getobjectId()));

}
项目:opensearchserver    文件:MongoDbCrawlCache.java   
@Override
public long flush(long expiration) throws IOException {
    rwl.r.lock();
    try {
        final Bson filter = expiration == 0 ? Filters.exists("uri") : Filters.lt("_id",new ObjectId(new Date(expiration)));
        indexedCollection.deleteMany(filter);
        for (GridFSFile f : contentGrid.find(filter))
            contentGrid.delete(f.getobjectId());
        long l = MetaCollection.deleteMany(filter).getDeletedCount();
        return l;
    } finally {
        rwl.r.unlock();
    }
}
项目:mongo-obj-framework    文件:SmofGridStreamManagerImpl.java   
@Override
public GridFSFile loadFileMetadata(SmofGridRef ref) {
    final GridFSBucket bucket = pool.getBucket(ref.getBucketName());
    return bucket.find(Filters.eq(Element.ID,ref.getId())).first();
}
项目:mongo-obj-framework    文件:SmofdispatcherImpl.java   
@Override
   public GridFSFile loadMetadata(SmofGridRef ref) {
    return streamManager.loadFileMetadata(ref);
}
项目:awplab-core    文件:BucketStreamResource.java   
public GridFSFile gridFSFile() {
    return gridFSFile(database,bucket,objectId);
}
项目:awplab-core    文件:LogFile.java   
@BeanCodecKey(ignore = true)
public GridFSFile getGridFSFile(MongoDatabase mongoDatabase) {
    return GridFSBuckets.create(mongoDatabase,bucket).find(Filters.eq("_id",fileObjectId)).first();
}
项目:mongo-java-driver-rx    文件:GridFSFindobservableImpl.java   
@Override
public Observable<GridFSFile> toObservable() {
    return RxObservables.create(Observables.observe(wrapped),observableAdapter);
}
项目:mongo-java-driver-rx    文件:GridFSFindobservableImpl.java   
@Override
public Subscription subscribe(final Subscriber<? super GridFSFile> s) {
    return toObservable().subscribe(s);
}
项目:mongo-java-driver-reactivestreams    文件:GridFSFindPublisherImpl.java   
@Override
public void subscribe(final Subscriber<? super GridFSFile> s) {
    new ObservabletoPublisher<GridFSFile>(observe(wrapped)).subscribe(s);
}
项目:restheart    文件:GetFileBinaryHandler.java   
private void sendBinaryContent(
        final RequestContext context,final GridFSBucket gridFSBucket,final GridFSFile file,final HttpServerExchange exchange)
        throws IOException {
    LOGGER.trace("Filename = {}",file.getFilename());
    LOGGER.trace("Content length = {}",file.getLength());

    if (file.getMetadata() != null
            && file.getMetadata().get("contentType") != null) {
        exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,file.getMetadata().get("contentType").toString());
    } else if (file.getMetadata() != null
            && file.getMetadata().get("contentType") != null) {
        exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,file.getMetadata().get("contentType").toString());
    } else {
        exchange.getResponseHeaders().put(
                Headers.CONTENT_TYPE,APPLICATION_OCTET_STREAM);
    }

    exchange.getResponseHeaders().put(
            Headers.CONTENT_LENGTH,file.getLength());

    exchange.getResponseHeaders().put(
            Headers.CONTENT_disPOSITION,String.format("inline; filename=\"%s\"",extractFilename(file)));

    exchange.getResponseHeaders().put(
            Headers.CONTENT_TRANSFER_ENCODING,CONTENT_TRANSFER_ENCODING_BINARY);

    ResponseHelper.injectEtagHeader(exchange,file.getMetadata());

    context.setResponseStatusCode(HttpStatus.SC_OK);

    gridFSBucket.downloadToStream(
            file.getId(),exchange.getoutputStream());
}
项目:restheart    文件:GetFileBinaryHandler.java   
private String extractFilename(final GridFSFile dbsfile) {
    return dbsfile.getFilename() != null
            ? dbsfile.getFilename()
            : dbsfile.getId().toString();
}
项目:lumongo    文件:MongodocumentStorage.java   
public void getAssociatedDocuments(OutputStream outputstream,Document filter) throws IOException {
    Charset charset = Charset.forName("UTF-8");

    GridFSBucket gridFS = createGridFSConnection();
    GridFSFindIterable gridFSFiles = gridFS.find(filter);
    outputstream.write("{\n".getBytes(charset));
    outputstream.write(" \"associatedDocs\": [\n".getBytes(charset));

    boolean first = true;
    for (GridFSFile gridFSFile : gridFSFiles) {
        if (first) {
            first = false;
        }
        else {
            outputstream.write(",\n".getBytes(charset));
        }

        Document Metadata = gridFSFile.getMetadata();

        String uniqueId = Metadata.getString(DOCUMENT_UNIQUE_ID_KEY);
        String uniquieIdkeyvalue = "  { \"uniqueId\": \"" + uniqueId + "\",";
        outputstream.write(uniquieIdkeyvalue.getBytes(charset));

        String filename = gridFSFile.getFilename();
        String filenamekeyvalue = "\"filename\": \"" + filename + "\",";
        outputstream.write(filenamekeyvalue.getBytes(charset));

        Date uploadDate = gridFSFile.getUploadDate();
        String uploadDatekeyvalue = "\"uploadDate\": {\"$date\":" + uploadDate.getTime() + "}";
        outputstream.write(uploadDatekeyvalue.getBytes(charset));

        Metadata.remove(TIMESTAMP);
        Metadata.remove(COMpressed_FLAG);
        Metadata.remove(DOCUMENT_UNIQUE_ID_KEY);
        Metadata.remove(FILE_UNIQUE_ID_KEY);

        if (!Metadata.isEmpty()) {
            String MetaJson = Metadata.toJson();
            String MetaString = ",\"Meta\": " + MetaJson;
            outputstream.write(MetaString.getBytes(charset));
        }

        outputstream.write(" }".getBytes(charset));

    }
    outputstream.write("\n ]\n}".getBytes(charset));
}
项目:lumongo    文件:MongodocumentStorage.java   
@Override
public void deleteAssociatedDocuments(String uniqueId) {
    GridFSBucket gridFS = createGridFSConnection();
    gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId))
            .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getobjectId()));
}
项目:awplab-core    文件:BucketStreamResource.java   
public static GridFSFile gridFSFile(MongoDatabase database,String bucket,ObjectId objectId) {
    return  GridFSBuckets.create(database,objectId)).first();

}
项目:mongo-java-driver-rx    文件:GridFSDownloadStream.java   
/**
 * Gets the corresponding {@link GridFSFile} for the file being downloaded
 *
 * @return an observable with a single element,the corresponding GridFSFile for the file being downloaded
 */
Observable<GridFSFile> getGridFSFile();
项目:mongo-java-driver-rx    文件:GridFSFindobservable.java   
/**
 * Helper to return an observable limited first from the query.
 *
 * @return an observable with a single element
 */
Observable<GridFSFile> first();
项目:mongo-java-driver-reactivestreams    文件:GridFSDownloadStream.java   
/**
 * Gets the corresponding {@link GridFSFile} for the file being downloaded
 *
 * @return a publisher with a single element,the corresponding GridFSFile for the file being downloaded
 */
Publisher<GridFSFile> getGridFSFile();
项目:mongo-java-driver-reactivestreams    文件:GridFSFindPublisher.java   
/**
 * Helper to return a publisher limited first from the query.
 *
 * @return a publisher with a single element
 */
Publisher<GridFSFile> first();
项目:mongo-obj-framework    文件:SmofGridStreamManager.java   
GridFSFile loadFileMetadata(SmofGridRef ref);
项目:mongo-obj-framework    文件:Smofdispatcher.java   
GridFSFile loadMetadata(SmofGridRef ref);

com.mongodb.client.gridfs.model.GridFSUploadOptions的实例源码

com.mongodb.client.gridfs.model.GridFSUploadOptions的实例源码

项目:mandrel    文件:MongoBlobStore.java   
@Override
public Uri putBlob(Uri uri,Blob blob) {
    GridFsuploadOptions options = new GridFsuploadOptions();

    Document document = JsonBsonCodec.toBson(mapper,blob.getMetadata());
    options.Metadata(document);

    GridFsuploadStream file = bucket.openUploadStream(uri.toString(),options);
    try {
        IoUtils.copy(blob.getPayload().openStream(),file);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
    file.close();

    return Uri.create("mongodb://" + databaseName + "/" + bucket.getBucketName() + "/" + file.getFileId().toString());
}
项目:opensearchserver    文件:MongoDbCrawlCache.java   
@Override
public InputStream store(DownloadItem downloadItem) throws IOException,JSONException {
    rwl.r.lock();
    try {
        final URI uri = downloadItem.getUri();
        if (!uri.equals(this.uri))
            throw new IOException("The URI does not match: " + uri + " / " + this.uri);

        final Document newDocument = Document.parse(downloadItem.getMetaAsJson());
        newDocument.put("uri",uriString);

        final BsonValue id = MetaCollection.replaceOne(eq("uri",uriString),newDocument,UPSERT)
                .getUpsertedId();

        final GridFsuploadOptions options = new GridFsuploadOptions().Metadata(new Document("_id",id));

        contentGrid.uploadFromStream(id,uriString,downloadItem.getContentInputStream(),options);

        return contentGrid.openDownloadStream(id);
    } finally {
        rwl.r.unlock();
    }
}
项目:mongo-obj-framework    文件:SmofGridStreamManagerImpl.java   
private void uploadStream(SmofGridRef ref,String name,InputStream stream) {
    final String bucketName = ref.getBucketName();
    final ObjectId id;
    final GridFSBucket bucket;
    Preconditions.checkNotNull(bucketName,"No bucket specified");
    final GridFsuploadOptions options = new GridFsuploadOptions().Metadata(ref.getMetadata());
    bucket = pool.getBucket(bucketName);
    id = bucket.uploadFromStream(name,stream,options);
    ref.setId(id);
}
项目:mongo-java-driver-rx    文件:GridFSBucketImpl.java   
@Override
public Observable<ObjectId> uploadFromStream(final String filename,final AsyncInputStream source,final GridFsuploadOptions options) {
    return RxObservables.create(Observables.observe(new Block<SingleResultCallback<ObjectId>>() {
        @Override
        public void apply(final SingleResultCallback<ObjectId> callback) {
            wrapped.uploadFromStream(filename,toCallbackAsyncInputStream(source),options,callback);
        }
    }),observableAdapter);
}
项目:mongo-java-driver-rx    文件:GridFSBucketImpl.java   
@Override
public Observable<Success> uploadFromStream(final BsonValue id,final String filename,final GridFsuploadOptions options) {
    return RxObservables.create(Observables.observe(new Block<SingleResultCallback<Success>>() {
        @Override
        public void apply(final SingleResultCallback<Success> callback) {
            wrapped.uploadFromStream(id,filename,voidToSuccessCallback(callback));
        }
    }),observableAdapter);
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public Publisher<ObjectId> uploadFromStream(final String filename,final GridFsuploadOptions options) {
    return new ObservabletoPublisher<ObjectId>(observe(new Block<SingleResultCallback<ObjectId>>() {
        @Override
        public void apply(final SingleResultCallback<ObjectId> callback) {
            wrapped.uploadFromStream(filename,callback);
        }
    }));
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public Publisher<Success> uploadFromStream(final BsonValue id,final GridFsuploadOptions options) {
    return new ObservabletoPublisher<Success>(observe(new Block<SingleResultCallback<Success>>() {
        @Override
        public void apply(final SingleResultCallback<Success> callback) {
            wrapped.uploadFromStream(id,voidToSuccessCallback(callback));
        }
    }));
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public Publisher<ObjectId> uploadFromStream(final ClientSession clientSession,final GridFsuploadOptions options) {
    return new ObservabletoPublisher<ObjectId>(observe(new Block<SingleResultCallback<ObjectId>>() {
        @Override
        public void apply(final SingleResultCallback<ObjectId> callback) {
            wrapped.uploadFromStream(clientSession,callback);
        }
    }));
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public Publisher<Success> uploadFromStream(final ClientSession clientSession,final BsonValue id,final GridFsuploadOptions options) {
    return new ObservabletoPublisher<Success>(observe(new Block<SingleResultCallback<Success>>() {
        @Override
        public void apply(final SingleResultCallback<Success> callback) {
            wrapped.uploadFromStream(clientSession,id,voidToSuccessCallback(callback));
        }
    }));
}
项目:lumongo    文件:MongodocumentStorage.java   
private GridFsuploadOptions getGridFsuploadOptions(String uniqueId,String fileName,boolean compress,long timestamp,Map<String,String> MetadataMap) {
    Document Metadata = new Document();
    if (MetadataMap != null) {
        for (String key : MetadataMap.keySet()) {
            Metadata.put(key,MetadataMap.get(key));
        }
    }
    Metadata.put(TIMESTAMP,timestamp);
    Metadata.put(COMpressed_FLAG,compress);
    Metadata.put(DOCUMENT_UNIQUE_ID_KEY,uniqueId);
    Metadata.put(FILE_UNIQUE_ID_KEY,getGridFsId(uniqueId,fileName));

    return new GridFsuploadOptions().chunkSizeBytes(1024).Metadata(Metadata);
}
项目:otus-api    文件:FileStoreBucket.java   
public FileStoreBucket() {
    gridFsuploadOptions = new GridFsuploadOptions();
    gridFsuploadOptions.chunkSizeBytes(CHUNK_SIZE_BYTES);
}
项目:mongo-java-driver-rx    文件:GridFSBucketImpl.java   
@Override
public GridFsuploadStream openUploadStream(final String filename,final GridFsuploadOptions options) {
    return new GridFsuploadStreamImpl(wrapped.openUploadStream(filename,options),observableAdapter);
}
项目:mongo-java-driver-rx    文件:GridFSBucketImpl.java   
@Override
public GridFsuploadStream openUploadStream(final BsonValue id,final GridFsuploadOptions options) {
    return new GridFsuploadStreamImpl(wrapped.openUploadStream(id,observableAdapter);
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public GridFsuploadStream openUploadStream(final String filename) {
    return openUploadStream(filename,new GridFsuploadOptions());
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public GridFsuploadStream openUploadStream(final String filename,options));
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public GridFsuploadStream openUploadStream(final BsonValue id,final String filename) {
    return openUploadStream(id,new GridFsuploadOptions());
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public GridFsuploadStream openUploadStream(final BsonValue id,options));
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public GridFsuploadStream openUploadStream(final ClientSession clientSession,final String filename) {
    return openUploadStream(clientSession,new GridFsuploadOptions());
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public GridFsuploadStream openUploadStream(final ClientSession clientSession,final GridFsuploadOptions options) {
    return new GridFsuploadStreamImpl(wrapped.openUploadStream(clientSession,options));
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public Publisher<ObjectId> uploadFromStream(final String filename,final AsyncInputStream source) {
    return uploadFromStream(filename,source,new GridFsuploadOptions());
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public Publisher<Success> uploadFromStream(final BsonValue id,final AsyncInputStream source) {
    return uploadFromStream(id,new GridFsuploadOptions());
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public Publisher<ObjectId> uploadFromStream(final ClientSession clientSession,final AsyncInputStream source) {
    return uploadFromStream(clientSession,new GridFsuploadOptions());
}
项目:mongo-java-driver-reactivestreams    文件:GridFSBucketImpl.java   
@Override
public Publisher<Success> uploadFromStream(final ClientSession clientSession,new GridFsuploadOptions());
}
项目:mongo-java-driver-rx    文件:GridFSBucket.java   
/**
 * Opens a AsyncOutputStream that the application can write the contents of the file to.
 * <p>
 * As the application writes the contents to the returned Stream,the contents are uploaded as chunks in the chunks collection. When
 * the application signals it is done writing the contents of the file by calling close on the returned Stream,a files collection
 * document is created in the files collection.
 * </p>
 *
 * @param filename the filename for the stream
 * @param options  the GridFsuploadOptions
 * @return the GridFsuploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the
 * application will write the contents.
 */
GridFsuploadStream openUploadStream(String filename,GridFsuploadOptions options);
项目:mongo-java-driver-rx    文件:GridFSBucket.java   
/**
 * Opens a AsyncOutputStream that the application can write the contents of the file to.
 * <p>
 * As the application writes the contents to the returned Stream,a files collection
 * document is created in the files collection.
 * </p>
 *
 * @param id the custom id value of the file
 * @param filename the filename for the stream
 * @param options  the GridFsuploadOptions
 * @return the GridFsuploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the
 * application will write the contents.
 */
GridFsuploadStream openUploadStream(BsonValue id,String filename,GridFsuploadOptions options);
项目:mongo-java-driver-rx    文件:GridFSBucket.java   
/**
 * Uploads the contents of the given {@code AsyncInputStream} to a GridFS bucket.
 * <p>
 * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the
 * chunks have been uploaded,it creates a files collection document for {@code filename} in the files collection.
 * </p>
 *
 * @param filename the filename for the stream
 * @param source   the Stream providing the file data
 * @param options  the GridFsuploadOptions
 * @return an observable with a single element,the ObjectId of the uploaded file.
 */
Observable<ObjectId> uploadFromStream(String filename,AsyncInputStream source,it creates a files collection document for {@code filename} in the files collection.
 * </p>
 *
 * @param id       the custom id value of the file
 * @param filename the filename for the stream
 * @param source   the Stream providing the file data
 * @param options  the GridFsuploadOptions
 * @return an observable with a single element,representing when the successful upload of the source.
 */
Observable<Success> uploadFromStream(BsonValue id,GridFsuploadOptions options);
项目:mongo-java-driver-reactivestreams    文件:GridFSBucket.java   
/**
 * Opens a AsyncOutputStream that the application can write the contents of the file to.
 * <p>
 * As the application writes the contents to the returned Stream,a files collection
 * document is created in the files collection.
 * </p>
 *
 * @param clientSession the client session with which to associate this operation
 * @param filename the filename for the stream
 * @param options  the GridFsuploadOptions
 * @return the GridFsuploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the
 * application will write the contents.
 * @mongodb.server.release 3.6
 * @since 1.7
 */
GridFsuploadStream openUploadStream(ClientSession clientSession,a files collection
 * document is created in the files collection.
 * </p>
 *
 * @param clientSession the client session with which to associate this operation
 * @param id the custom id value of the file
 * @param filename the filename for the stream
 * @param options  the GridFsuploadOptions
 * @return the GridFsuploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the
 * application will write the contents.
 * @mongodb.server.release 3.6
 * @since 1.7
 */
GridFsuploadStream openUploadStream(ClientSession clientSession,BsonValue id,GridFsuploadOptions options);
项目:mongo-java-driver-reactivestreams    文件:GridFSBucket.java   
/**
 * Uploads the contents of the given {@code AsyncInputStream} to a GridFS bucket.
 * <p>
 * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the
 * chunks have been uploaded,it creates a files collection document for {@code filename} in the files collection.
 * </p>
 *
 * @param filename the filename for the stream
 * @param source   the Stream providing the file data
 * @param options  the GridFsuploadOptions
 * @return a publisher with a single element,the ObjectId of the uploaded file.
 */
Publisher<ObjectId> uploadFromStream(String filename,it creates a files collection document for {@code filename} in the files collection.
 * </p>
 *
 * @param id       the custom id value of the file
 * @param filename the filename for the stream
 * @param source   the Stream providing the file data
 * @param options  the GridFsuploadOptions
 * @return a publisher with a single element,representing when the successful upload of the source.
 */
Publisher<Success> uploadFromStream(BsonValue id,it creates a files collection document for {@code filename} in the files collection.
 * </p>
 *
 * @param clientSession the client session with which to associate this operation
 * @param filename the filename for the stream
 * @param source   the Stream providing the file data
 * @param options  the GridFsuploadOptions
 * @return a publisher with a single element,the ObjectId of the uploaded file.
 * @mongodb.server.release 3.6
 * @since 1.7
 */
Publisher<ObjectId> uploadFromStream(ClientSession clientSession,it creates a files collection document for {@code filename} in the files collection.
 * </p>
 *
 * @param clientSession the client session with which to associate this operation
 * @param id       the custom id value of the file
 * @param filename the filename for the stream
 * @param source   the Stream providing the file data
 * @param options  the GridFsuploadOptions
 * @return a publisher with a single element,representing when the successful upload of the source.
 * @mongodb.server.release 3.6
 * @since 1.7
 */
Publisher<Success> uploadFromStream(ClientSession clientSession,GridFsuploadOptions options);

关于Node.js文件上传Express 4,MongoDB,GridFS,GridFS-Stream的问题我们已经讲解完毕,感谢您的阅读,如果还想了解更多关于com.mongodb.client.gridfs.GridFSBuckets的实例源码、com.mongodb.client.gridfs.GridFSBucket的实例源码、com.mongodb.client.gridfs.model.GridFSFile的实例源码、com.mongodb.client.gridfs.model.GridFSUploadOptions的实例源码等相关内容,可以在本站寻找。

本文标签: