在本文中,我们将给您介绍关于Node.js文件上传的详细内容,并且为您解答Express4,MongoDB,GridFS,GridFS-Stream的相关问题,此外,我们还将为您提供关于com.mon
在本文中,我们将给您介绍关于Node.js文件上传的详细内容,并且为您解答Express 4,MongoDB,GridFS,GridFS-Stream的相关问题,此外,我们还将为您提供关于com.mongodb.client.gridfs.GridFSBuckets的实例源码、com.mongodb.client.gridfs.GridFSBucket的实例源码、com.mongodb.client.gridfs.model.GridFSFile的实例源码、com.mongodb.client.gridfs.model.GridFSUploadOptions的实例源码的知识。
本文目录一览:- Node.js文件上传(Express 4,MongoDB,GridFS,GridFS-Stream)(node.js文件上传)
- com.mongodb.client.gridfs.GridFSBuckets的实例源码
- com.mongodb.client.gridfs.GridFSBucket的实例源码
- com.mongodb.client.gridfs.model.GridFSFile的实例源码
- com.mongodb.client.gridfs.model.GridFSUploadOptions的实例源码
Node.js文件上传(Express 4,MongoDB,GridFS,GridFS-Stream)(node.js文件上传)
我试图在我的node.js应用程序中设置文件API。我的目标是能够直接将文件流写入gridfs,而无需最初将文件存储到磁盘。似乎我的创建代码正在运行。我能够将文件上传保存到gridfs。问题是正在读取文件。当我尝试通过Web浏览器窗口下载保存的文件时,看到文件内容包裹着以下内容:
------WebKitFormBoundarye38W9pfG1wiA100lContent-Disposition: form-data; name="file"; filename="myfile.txt"Content-Type: text/javascript***File contents here***------WebKitFormBoundarye38W9pfG1wiA100l--
所以我的问题是,在将文件保存到gridfs之前,我该怎么做才能从文件流中剥离边界信息?这是我正在使用的代码:
''use strict'';var mongoose = require(''mongoose'');var _ = require(''lodash'');var Grid = require(''gridfs-stream'');Grid.mongo = mongoose.mongo;var gfs = new Grid(mongoose.connection.db);// I think this works. I see the file record in fs.filesexports.create = function(req, res) { var fileId = new mongoose.Types.ObjectId(); var writeStream = gfs.createWriteStream({ _id: fileId, filename: req.query.name, mode: ''w'', content_type: req.query.type, metadata: { uploadedBy: req.user._id, } }); writeStream.on(''finish'', function() { return res.status(200).send({ message: fileId.toString() }); }); req.pipe(writeStream);};// File data is returned, but it''s wrapped with// WebKitFormBoundary and has headers.exports.read = function(req, res) { gfs.findOne({ _id: req.params.id }, function (err, file) { if (err) return res.status(400).send(err); // With this commented out, my browser will prompt // me to download the raw file where I can see the // webkit boundary and request headers //res.writeHead(200, { ''Content-Type'': file.contentType }); var readstream = gfs.createReadStream({ _id: req.params.id // I also tried this way: //_id: file._id }); readstream.pipe(res); });};
顺便说一句,我目前没有为这些路由使用任何中间件,但愿意这样做。我只是不希望文件在发送到gridfs之前先打入磁盘。
编辑:
对于每个@fardjad,我添加了用于多部分/表单数据解析的node-
multiparty模块,并且这种方法可行。但是,当我下载一个上传的文件并与原始文件(以文本形式)进行比较时,编码存在很多差异,并且下载的文件将无法打开。这是我最近的尝试。
''use strict'';var mongoose = require(''mongoose'');var _ = require(''lodash'');var multiparty = require(''multiparty'');var Grid = require(''gridfs-stream'');Grid.mongo = mongoose.mongo;var gfs = new Grid(mongoose.connection.db);exports.create = function(req, res) { var form = new multiparty.Form(); var fileId = new mongoose.Types.ObjectId(); form.on(''error'', function(err) { console.log(''Error parsing form: '' + err.stack); }); form.on(''part'', function(part) { if (part.filename) { var writeStream = gfs.createWriteStream({ _id: fileId, filename: part.filename, mode: ''w'', content_type: part.headers[''content-type''], metadata: { uploadedBy: req.user._id, } }) part.pipe(writeStream); } }); // Close emitted after form parsed form.on(''close'', function() { return res.status(200).send({ message: fileId.toString() }); }); // Parse req form.parse(req);};exports.read = function(req, res) { gfs.findOne({ _id: req.params.id }, function (err, file) { if (err) return res.status(400).send(err); res.writeHead(200, { ''Content-Type'': file.contentType }); var readstream = gfs.createReadStream({ _id: req.params.id }); readstream.pipe(res); });};
最终编辑:
这是我从另一个开发人员复制并修改的简单实现。这对我有用:(我仍在尝试弄清楚为什么它在我的原始Express应用程序中不起作用。似乎有些干扰)
https://gist.github.com/pos1tron/094ac862c9d116096572
var Busboy = require(''busboy''); // 0.2.9var express = require(''express''); // 4.12.3var mongo = require(''mongodb''); // 2.0.31var Grid = require(''gridfs-stream''); // 1.1.1"var app = express();var server = app.listen(9002);var db = new mongo.Db(''test'', new mongo.Server(''127.0.0.1'', 27017));var gfs;db.open(function(err, db) { if (err) throw err; gfs = Grid(db, mongo);});app.post(''/file'', function(req, res) { var busboy = new Busboy({ headers : req.headers }); var fileId = new mongo.ObjectId(); busboy.on(''file'', function(fieldname, file, filename, encoding, mimetype) { console.log(''got file'', filename, mimetype, encoding); var writeStream = gfs.createWriteStream({ _id: fileId, filename: filename, mode: ''w'', content_type: mimetype, }); file.pipe(writeStream); }).on(''finish'', function() { // show a link to the uploaded file res.writeHead(200, {''content-type'': ''text/html''}); res.end(''<a href="/file/'' + fileId.toString() + ''">download file</a>''); }); req.pipe(busboy);});app.get(''/'', function(req, res) { // show a file upload form res.writeHead(200, {''content-type'': ''text/html''}); res.end( ''<form action="/file" enctype="multipart/form-data" method="post">''+ ''<input type="file" name="file"><br>''+ ''<input type="submit" value="Upload">''+ ''</form>'' );});app.get(''/file/:id'', function(req, res) { gfs.findOne({ _id: req.params.id }, function (err, file) { if (err) return res.status(400).send(err); if (!file) return res.status(404).send(''''); res.set(''Content-Type'', file.contentType); res.set(''Content-Disposition'', ''attachment; filename="'' + file.filename + ''"''); var readstream = gfs.createReadStream({ _id: file._id }); readstream.on("error", function(err) { console.log("Got error while processing stream " + err.message); res.end(); }); readstream.pipe(res); });});
答案1
小编典典请参阅我对您在github上创建的问题的评论。我遇到了同样的问题,但是我设法调试了该问题。我将其范围缩小到我确信问题出在某个快速中间件修改了请求的位置。我一一禁用了中间件,直到发现了不太可能的罪魁祸首:connect-
livereload
我注释掉了app.use(require(’connect-livereload’)());
问题就解决了。我相信它是将livereload脚本注入响应(一个二进制图像文件)中。
com.mongodb.client.gridfs.GridFSBuckets的实例源码
/** * Create a file in GridFS with the given filename and write * some random data to it. * @param filename the name of the file to create * @param size the number of random bytes to write * @param vertx the Vert.x instance * @param handler a handler that will be called when the file * has been written */ private void prepareData(String filename,int size,Vertx vertx,Handler<AsyncResult<String>> handler) { vertx.<String>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); try (GridFsuploadStream os = gridFS.openUploadStream(filename)) { for (int i = 0; i < size; ++i) { os.write((byte)(i & 0xFF)); } } } f.complete(filename); },handler); }
@Override protected void validateAfterStoreAdd(TestContext context,String path,Handler<AsyncResult<Void>> handler) { vertx.executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); GridFSFindIterable files = gridFS.find(); GridFSFile file = files.first(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getFilename(),baos); String contents = new String(baos.toByteArray(),StandardCharsets.UTF_8); context.assertEquals(CHUNK_CONTENT,contents); } f.complete(); },handler); }
@Test public void testWriteReplacePage() throws IOException,ClassNotFoundException { HamsterPage page = createTestPage(testUIEngine); Document mo=testCollection.createNew(); TestComponent c = new TestComponent(); c.m=mo; c.test="test"; page.addComponent(c); testUIEngine.initDB(db,GridFSBuckets.create(db)); testUIEngine.persistPage(page); HamsterPage p2= testUIEngine.resumePage( page.getId()); assertNotNull(p2); TestComponent t=(TestComponent) p2.components.get(0); assertNotNull(t); assertEquals("test",t.test); assertNotNull(t.m); assertTrue(t.m == mo); }
@Override public void init(String configString) throws IOException { rwl.w.lock(); try { closeNoLock(); final MongoClientURI connectionString = new MongoClientURI(configString); mongoClient = new MongoClient(connectionString); final MongoDatabase database = mongoClient.getDatabase( connectionString.getDatabase() == null ? DEFAULT_DATABASE : connectionString.getDatabase()); MetaCollection = database.getCollection(Meta_COLLECTION); MetaCollection.createIndex(Document.parse("{\"uri\":1}")); indexedCollection = database.getCollection(INDEXED_COLLECTION); indexedCollection.createIndex(Document.parse("{\"uri\":1}")); contentGrid = GridFSBuckets.create(database); } finally { rwl.w.unlock(); } }
private void deleteLog(Date olderThan) { MongoCollection<Log> logCollection = mongoService.getMongoClient().getDatabase(database).getCollection(collection,Log.class); Bson filter = Filters.lt("timeStamp",olderThan); logCollection.find(filter).forEach((Block<? super Log>) log -> { log.getLogFiles().forEach(logFile -> { GridFSBucket gridFSBucket = GridFSBuckets.create(mongoService.getMongoClient().getDatabase(database),logFile.getBucket()); gridFSBucket.delete(logFile.getFileObjectId()); }); }); DeleteResult deleteResult = logCollection.deleteMany(filter); }
public BucketStreamResource(MongoDatabase database,String bucket,ObjectId objectId) { super((StreamSource) () -> { GridFSBucket gridFSBucket = GridFSBuckets.create(database,bucket); return gridFSBucket.openDownloadStream(objectId); },gridFSFile(database,bucket,objectId).getFilename()); this.database = database; this.bucket = bucket; this.objectId = objectId; }
public void save(MongoDatabase database) throws IOException { if (temporaryFile == null) return; try (FileInputStream fileInputStream = new FileInputStream(temporaryFile)) { ObjectId objectId = GridFSBuckets.create(database,bucket).uploadFromStream(temporaryFile.getName(),fileInputStream); this.setFileObjectId(objectId); this.setBucket(bucket); } finally { temporaryFile.close(); } temporaryFile = null; }
/** * Connect to MongoDB and get the GridFS chunk size * @param vertx the Vert.x instance * @param handler a handler that will be called with the chunk size */ private void getChunkSize(Vertx vertx,Handler<AsyncResult<Integer>> handler) { vertx.<Integer>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); f.complete(gridFS.getChunkSizeBytes()); } },handler); }
@Override protected void prepareData(TestContext context,Handler<AsyncResult<String>> handler) { String filename = PathUtils.join(path,ID); vertx.<String>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); byte[] contents = CHUNK_CONTENT.getBytes(StandardCharsets.UTF_8); gridFS.uploadFromStream(filename,new ByteArrayInputStream(contents)); f.complete(filename); } },handler); }
@Override protected void validateAfterStoreDelete(TestContext context,Handler<AsyncResult<Void>> handler) { vertx.executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); GridFSFindIterable files = gridFS.find(); context.assertTrue(Iterables.isEmpty(files)); } f.complete(); },handler); }
@Override public UIEngine createTestEngine() { if(testEngine !=null) { testEngine.destroy(); } loader = new HamsterLoader(); UIEngine engine = createUITestEngine(); engine.initDB(db,GridFSBuckets.create(db)); loader.setEngine(engine); testEngine=engine; CometProcessor.setEngine(engine); return engine; }
protected void initDB(String dbname) { //set up the persistence layer //Connect to the local MongoDB instance MongoClient m = new MongoClient(); //get the DB with the given Name MongoDatabase chatDB = m.getDatabase(dbname); //initialize our collections DocumentCollections.init(this,chatDB); //set up GridFs for storing files GridFSBucket fs = GridFSBuckets.create(chatDB,"persistedPages"); //the base class UIEngine needs the gridFS for //persisting sessions super.initDB(chatDB,fs); }
public MongoBlobStore(TaskContext context,MongoClient mongoClient,String databaseName,String bucketName,int batchSize) { super(context); this.mongoClient = mongoClient; this.databaseName = databaseName; this.bucket = GridFSBuckets.create(mongoClient.getDatabase(databaseName),bucketName); this.batchSize = batchSize; this.mapper = new ObjectMapper(); }
@Override public OperationResult deleteFile( final Database db,final String dbname,final String bucketName,final BsonValue fileId,final String requestEtag,final boolean checkEtag) { final String bucket = extractBucketName(bucketName); GridFSBucket gridFSBucket = GridFSBuckets.create( db.getDatabase(dbname),bucket); GridFSFile file = gridFSBucket .find(eq("_id",fileId)) .limit(1).iterator().tryNext(); if (file == null) { return new OperationResult(HttpStatus.SC_NOT_FOUND); } else if (checkEtag) { Object oldEtag = file.getMetadata().get("_etag"); if (oldEtag != null) { if (requestEtag == null) { return new OperationResult(HttpStatus.SC_CONFLICT,oldEtag); } else if (!Objects.equals(oldEtag.toString(),requestEtag)) { return new OperationResult( HttpStatus.SC_PRECONDITION_Failed,oldEtag); } } } gridFSBucket.delete(fileId); return new OperationResult(HttpStatus.SC_NO_CONTENT); }
@Override public void handleRequest( HttpServerExchange exchange,RequestContext context) throws Exception { if (context.isInError()) { next(exchange,context); return; } LOGGER.trace("GET " + exchange.getRequestURL()); final String bucket = extractBucketName(context.getCollectionName()); GridFSBucket gridFSBucket = GridFSBuckets.create( MongoDBClientSingleton.getInstance().getClient() .getDatabase(context.getdbname()),bucket); GridFSFile dbsfile = gridFSBucket .find(eq("_id",context.getDocumentId())) .limit(1).iterator().tryNext(); if (dbsfile == null) { fileNotFound(context,exchange); } else if (!checkEtag(exchange,dbsfile)) { sendBinaryContent(context,gridFSBucket,dbsfile,exchange); } next(exchange,context); }
@postconstruct public void setUp() { fileStore = GridFSBuckets.create(db,FILESTORE); }
public void loadBucket(String bucketName) { final GridFSBucket bucket = GridFSBuckets.create(database,bucketName); dispatcher.put(bucketName,bucket); }
@BeanCodecKey(ignore = true) public GridFSFile getGridFSFile(MongoDatabase mongoDatabase) { return GridFSBuckets.create(mongoDatabase,bucket).find(Filters.eq("_id",fileObjectId)).first(); }
/** * The actual test method. Creates a temporary file with random contents. Writes * <code>size</code> bytes to it and reads it again through * {@link MongoDBChunkReadStream}. Finally,checks if the file has been read correctly. * @param size the number of bytes to write/read * @param chunkSize the GridFS chunk size * @param vertx the Vert.x instance * @param context the current test context */ private void doRead(int size,int chunkSize,TestContext context) { Async async = context.async(); // create a test file in GridFS prepareData("test_" + size + ".bin",size,vertx,context.asyncAssertSuccess(filename -> { // connect to GridFS com.mongodb.async.client.MongoClient client = createAsyncclient(); com.mongodb.async.client.MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); com.mongodb.async.client.gridfs.GridFSBucket gridfs = com.mongodb.async.client.gridfs.GridFSBuckets.create(db); // open the test file GridFSDownloadStream is = gridfs.openDownloadStream(filename); MongoDBChunkReadStream rs = new MongoDBChunkReadStream(is,chunkSize,vertx.getorCreateContext()); // read from the test file rs.exceptionHandler(context::fail); int[] pos = { 0 }; rs.endHandler(v -> { // the file has been completely read rs.close(); context.assertEquals(size,pos[0]); async.complete(); }); rs.handler(buf -> { // check number of read bytes if (size - pos[0] > chunkSize) { context.assertEquals(chunkSize,buf.length()); } else { context.assertEquals(size - pos[0],buf.length()); } // check file contents for (int i = pos[0]; i < pos[0] + buf.length(); ++i) { context.assertEquals((byte)(i & 0xFF),buf.getByte(i - pos[0])); } pos[0] += buf.length(); }); })); }
public GridFSBucket createBucket(String bucketName) { return GridFSBuckets.create(this.getMongoDatabase(),bucketName); }
private GridFSBucket createGridFSConnection() { MongoDatabase db = mongoClient.getDatabase(database); return GridFSBuckets.create(db,ASSOCIATED_FILES); }
public static GridFSFile gridFSFile(MongoDatabase database,ObjectId objectId) { return GridFSBuckets.create(database,objectId)).first(); }
com.mongodb.client.gridfs.GridFSBucket的实例源码
/** * Create a file in GridFS with the given filename and write * some random data to it. * @param filename the name of the file to create * @param size the number of random bytes to write * @param vertx the Vert.x instance * @param handler a handler that will be called when the file * has been written */ private void prepareData(String filename,int size,Vertx vertx,Handler<AsyncResult<String>> handler) { vertx.<String>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); try (GridFsuploadStream os = gridFS.openUploadStream(filename)) { for (int i = 0; i < size; ++i) { os.write((byte)(i & 0xFF)); } } } f.complete(filename); },handler); }
@Override protected void validateAfterStoreAdd(TestContext context,String path,Handler<AsyncResult<Void>> handler) { vertx.executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); GridFSFindIterable files = gridFS.find(); GridFSFile file = files.first(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getFilename(),baos); String contents = new String(baos.toByteArray(),StandardCharsets.UTF_8); context.assertEquals(CHUNK_CONTENT,contents); } f.complete(); },handler); }
@Override public InputStream getAssociatedDocumentStream(String uniqueId,String fileName) { GridFSBucket gridFS = createGridFSConnection(); GridFSFile file = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,getGridFsId(uniqueId,fileName))).first(); if (file == null) { return null; } InputStream is = gridFS.openDownloadStream(file.getobjectId()); ; Document Metadata = file.getMetadata(); if (Metadata.containsKey(COMpressed_FLAG)) { boolean compressed = (boolean) Metadata.remove(COMpressed_FLAG); if (compressed) { is = new InflaterInputStream(is); } } return is; }
private void uploadStream(SmofGridRef ref,String name,InputStream stream) { final String bucketName = ref.getBucketName(); final ObjectId id; final GridFSBucket bucket; Preconditions.checkNotNull(bucketName,"No bucket specified"); final GridFsuploadOptions options = new GridFsuploadOptions().Metadata(ref.getMetadata()); bucket = pool.getBucket(bucketName); id = bucket.uploadFromStream(name,stream,options); ref.setId(id); }
@Override public InputStream download(SmofGridRef ref) { final String bucketName = ref.getBucketName(); final ObjectId id = ref.getId(); Preconditions.checkArgument(id != null,"No download source found"); Preconditions.checkArgument(bucketName != null,"No bucket specified"); final GridFSBucket bucket = pool.getBucket(bucketName); return bucket.openDownloadStream(id); }
@Override public void drop(SmofGridRef ref) { final String bucketName = ref.getBucketName(); final ObjectId id = ref.getId(); Preconditions.checkArgument(id != null,"No bucket specified"); final GridFSBucket bucket = pool.getBucket(bucketName); bucket.delete(id); }
private void deleteLog(Date olderThan) { MongoCollection<Log> logCollection = mongoService.getMongoClient().getDatabase(database).getCollection(collection,Log.class); Bson filter = Filters.lt("timeStamp",olderThan); logCollection.find(filter).forEach((Block<? super Log>) log -> { log.getLogFiles().forEach(logFile -> { GridFSBucket gridFSBucket = GridFSBuckets.create(mongoService.getMongoClient().getDatabase(database),logFile.getBucket()); gridFSBucket.delete(logFile.getFileObjectId()); }); }); DeleteResult deleteResult = logCollection.deleteMany(filter); }
public BucketStreamResource(MongoDatabase database,String bucket,ObjectId objectId) { super((StreamSource) () -> { GridFSBucket gridFSBucket = GridFSBuckets.create(database,bucket); return gridFSBucket.openDownloadStream(objectId); },gridFSFile(database,bucket,objectId).getFilename()); this.database = database; this.bucket = bucket; this.objectId = objectId; }
/** * Connect to MongoDB and get the GridFS chunk size * @param vertx the Vert.x instance * @param handler a handler that will be called with the chunk size */ private void getChunkSize(Vertx vertx,Handler<AsyncResult<Integer>> handler) { vertx.<Integer>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); f.complete(gridFS.getChunkSizeBytes()); } },handler); }
@Override protected void prepareData(TestContext context,Handler<AsyncResult<String>> handler) { String filename = PathUtils.join(path,ID); vertx.<String>executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); byte[] contents = CHUNK_CONTENT.getBytes(StandardCharsets.UTF_8); gridFS.uploadFromStream(filename,new ByteArrayInputStream(contents)); f.complete(filename); } },handler); }
@Override protected void validateAfterStoreDelete(TestContext context,Handler<AsyncResult<Void>> handler) { vertx.executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); GridFSFindIterable files = gridFS.find(); context.assertTrue(Iterables.isEmpty(files)); } f.complete(); },handler); }
protected void initDB(String dbname) { //set up the persistence layer //Connect to the local MongoDB instance MongoClient m = new MongoClient(); //get the DB with the given Name MongoDatabase chatDB = m.getDatabase(dbname); //initialize our collections DocumentCollections.init(this,chatDB); //set up GridFs for storing files GridFSBucket fs = GridFSBuckets.create(chatDB,"persistedPages"); //the base class UIEngine needs the gridFS for //persisting sessions super.initDB(chatDB,fs); }
@Override public OperationResult deleteFile( final Database db,final String dbname,final String bucketName,final BsonValue fileId,final String requestEtag,final boolean checkEtag) { final String bucket = extractBucketName(bucketName); GridFSBucket gridFSBucket = GridFSBuckets.create( db.getDatabase(dbname),bucket); GridFSFile file = gridFSBucket .find(eq("_id",fileId)) .limit(1).iterator().tryNext(); if (file == null) { return new OperationResult(HttpStatus.SC_NOT_FOUND); } else if (checkEtag) { Object oldEtag = file.getMetadata().get("_etag"); if (oldEtag != null) { if (requestEtag == null) { return new OperationResult(HttpStatus.SC_CONFLICT,oldEtag); } else if (!Objects.equals(oldEtag.toString(),requestEtag)) { return new OperationResult( HttpStatus.SC_PRECONDITION_Failed,oldEtag); } } } gridFSBucket.delete(fileId); return new OperationResult(HttpStatus.SC_NO_CONTENT); }
@Override public void handleRequest( HttpServerExchange exchange,RequestContext context) throws Exception { if (context.isInError()) { next(exchange,context); return; } LOGGER.trace("GET " + exchange.getRequestURL()); final String bucket = extractBucketName(context.getCollectionName()); GridFSBucket gridFSBucket = GridFSBuckets.create( MongoDBClientSingleton.getInstance().getClient() .getDatabase(context.getdbname()),bucket); GridFSFile dbsfile = gridFSBucket .find(eq("_id",context.getDocumentId())) .limit(1).iterator().tryNext(); if (dbsfile == null) { fileNotFound(context,exchange); } else if (!checkEtag(exchange,dbsfile)) { sendBinaryContent(context,gridFSBucket,dbsfile,exchange); } next(exchange,context); }
@Override public void deleteallDocuments() { GridFSBucket gridFS = createGridFSConnection(); gridFS.drop(); MongoDatabase db = mongoClient.getDatabase(database); MongoCollection<Document> coll = db.getCollection(rawCollectionName); coll.deleteMany(new Document()); }
@Override public List<AssociatedDocument> getAssociatedDocuments(String uniqueId,FetchType fetchType) throws Exception { GridFSBucket gridFS = createGridFSConnection(); List<AssociatedDocument> assocDocs = new ArrayList<>(); if (!FetchType.NONE.equals(fetchType)) { GridFSFindIterable files = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId)); for (GridFSFile file : files) { AssociatedDocument ad = loadGridFSToAssociatedDocument(gridFS,file,fetchType); assocDocs.add(ad); } } return assocDocs; }
@Override public AssociatedDocument getAssociatedDocument(String uniqueId,String fileName,FetchType fetchType) throws Exception { GridFSBucket gridFS = createGridFSConnection(); if (!FetchType.NONE.equals(fetchType)) { GridFSFile file = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,fileName))).first(); if (null != file) { return loadGridFSToAssociatedDocument(gridFS,fetchType); } } return null; }
private AssociatedDocument loadGridFSToAssociatedDocument(GridFSBucket gridFS,GridFSFile file,FetchType fetchType) throws IOException { AssociatedDocument.Builder aBuilder = AssociatedDocument.newBuilder(); aBuilder.setFilename(file.getFilename()); Document Metadata = file.getMetadata(); boolean compressed = false; if (Metadata.containsKey(COMpressed_FLAG)) { compressed = (boolean) Metadata.remove(COMpressed_FLAG); } long timestamp = (long) Metadata.remove(TIMESTAMP); aBuilder.setCompressed(compressed); aBuilder.setTimestamp(timestamp); aBuilder.setDocumentUniqueId((String) Metadata.remove(DOCUMENT_UNIQUE_ID_KEY)); for (String field : Metadata.keySet()) { aBuilder.addMetadata(Metadata.newBuilder().setKey(field).setValue((String) Metadata.get(field))); } if (FetchType.FULL.equals(fetchType)) { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getobjectId(),byteArrayOutputStream); byte[] bytes = byteArrayOutputStream.toByteArray(); if (null != bytes) { if (compressed) { bytes = CommonCompression.uncompressZlib(bytes); } aBuilder.setDocument(ByteString.copyFrom(bytes)); } } aBuilder.setIndexName(indexName); return aBuilder.build(); }
@Override public List<String> getAssociatedFilenames(String uniqueId) throws Exception { GridFSBucket gridFS = createGridFSConnection(); ArrayList<String> fileNames = new ArrayList<>(); gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId)) .forEach((Consumer<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> fileNames.add(gridFSFile.getFilename())); return fileNames; }
@Override public void deleteAssociatedDocument(String uniqueId,String fileName) { GridFSBucket gridFS = createGridFSConnection(); gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,fileName))) .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getobjectId())); }
@Override public GridFSFile loadFileMetadata(SmofGridRef ref) { final GridFSBucket bucket = pool.getBucket(ref.getBucketName()); return bucket.find(Filters.eq(Element.ID,ref.getId())).first(); }
public void loadBucket(String bucketName) { final GridFSBucket bucket = GridFSBuckets.create(database,bucketName); dispatcher.put(bucketName,bucket); }
@Override public void put(String bucketName,GridFSBucket bucket) { collections.put(bucketName,bucket); }
@Override public void dropBucket(String bucketName) { final GridFSBucket bucket = collections.getBucket(bucketName); bucket.drop(); collections.dropBucket(bucketName); }
@Override public void put(String bucketName,GridFSBucket bucket) { fsBuckets.put(bucketName,bucket); }
@Override public GridFSBucket getBucket(String bucketName) { return fsBuckets.get(bucketName); }
@Override public void dropAllBuckets() { fsBuckets.values().forEach(GridFSBucket::drop); fsBuckets.clear(); }
/** * The actual test method. Creates a temporary file with random contents. Writes * <code>size</code> bytes to it and reads it again through * {@link MongoDBChunkReadStream}. Finally,checks if the file has been read correctly. * @param size the number of bytes to write/read * @param chunkSize the GridFS chunk size * @param vertx the Vert.x instance * @param context the current test context */ private void doRead(int size,int chunkSize,TestContext context) { Async async = context.async(); // create a test file in GridFS prepareData("test_" + size + ".bin",size,vertx,context.asyncAssertSuccess(filename -> { // connect to GridFS com.mongodb.async.client.MongoClient client = createAsyncclient(); com.mongodb.async.client.MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); com.mongodb.async.client.gridfs.GridFSBucket gridfs = com.mongodb.async.client.gridfs.GridFSBuckets.create(db); // open the test file GridFSDownloadStream is = gridfs.openDownloadStream(filename); MongoDBChunkReadStream rs = new MongoDBChunkReadStream(is,chunkSize,vertx.getorCreateContext()); // read from the test file rs.exceptionHandler(context::fail); int[] pos = { 0 }; rs.endHandler(v -> { // the file has been completely read rs.close(); context.assertEquals(size,pos[0]); async.complete(); }); rs.handler(buf -> { // check number of read bytes if (size - pos[0] > chunkSize) { context.assertEquals(chunkSize,buf.length()); } else { context.assertEquals(size - pos[0],buf.length()); } // check file contents for (int i = pos[0]; i < pos[0] + buf.length(); ++i) { context.assertEquals((byte)(i & 0xFF),buf.getByte(i - pos[0])); } pos[0] += buf.length(); }); })); }
public void initDB(MongoDatabase db,GridFSBucket gridFS) { this.gridFS = gridFS; persistedPages = new PersistedPages(engine,db,gridFS); super.initDB(db); }
public PersistedPages(UIEngine engine,MongoDatabase db,GridFSBucket gridFS) { super(engine,"persistedPages"); this.gridFS = gridFS; ensureIndex(false,true,USERID,CREATION_TIME); }
public GridFSBucket createBucket(String bucketName) { return GridFSBuckets.create(this.getMongoDatabase(),bucketName); }
private void sendBinaryContent( final RequestContext context,final GridFSBucket gridFSBucket,final GridFSFile file,final HttpServerExchange exchange) throws IOException { LOGGER.trace("Filename = {}",file.getFilename()); LOGGER.trace("Content length = {}",file.getLength()); if (file.getMetadata() != null && file.getMetadata().get("contentType") != null) { exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,file.getMetadata().get("contentType").toString()); } else if (file.getMetadata() != null && file.getMetadata().get("contentType") != null) { exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,file.getMetadata().get("contentType").toString()); } else { exchange.getResponseHeaders().put( Headers.CONTENT_TYPE,APPLICATION_OCTET_STREAM); } exchange.getResponseHeaders().put( Headers.CONTENT_LENGTH,file.getLength()); exchange.getResponseHeaders().put( Headers.CONTENT_disPOSITION,String.format("inline; filename=\"%s\"",extractFilename(file))); exchange.getResponseHeaders().put( Headers.CONTENT_TRANSFER_ENCODING,CONTENT_TRANSFER_ENCODING_BINARY); ResponseHelper.injectEtagHeader(exchange,file.getMetadata()); context.setResponseStatusCode(HttpStatus.SC_OK); gridFSBucket.downloadToStream( file.getId(),exchange.getoutputStream()); }
private GridFSBucket createGridFSConnection() { MongoDatabase db = mongoClient.getDatabase(database); return GridFSBuckets.create(db,ASSOCIATED_FILES); }
public void getAssociatedDocuments(OutputStream outputstream,Document filter) throws IOException { Charset charset = Charset.forName("UTF-8"); GridFSBucket gridFS = createGridFSConnection(); GridFSFindIterable gridFSFiles = gridFS.find(filter); outputstream.write("{\n".getBytes(charset)); outputstream.write(" \"associatedDocs\": [\n".getBytes(charset)); boolean first = true; for (GridFSFile gridFSFile : gridFSFiles) { if (first) { first = false; } else { outputstream.write(",\n".getBytes(charset)); } Document Metadata = gridFSFile.getMetadata(); String uniqueId = Metadata.getString(DOCUMENT_UNIQUE_ID_KEY); String uniquieIdkeyvalue = " { \"uniqueId\": \"" + uniqueId + "\","; outputstream.write(uniquieIdkeyvalue.getBytes(charset)); String filename = gridFSFile.getFilename(); String filenamekeyvalue = "\"filename\": \"" + filename + "\","; outputstream.write(filenamekeyvalue.getBytes(charset)); Date uploadDate = gridFSFile.getUploadDate(); String uploadDatekeyvalue = "\"uploadDate\": {\"$date\":" + uploadDate.getTime() + "}"; outputstream.write(uploadDatekeyvalue.getBytes(charset)); Metadata.remove(TIMESTAMP); Metadata.remove(COMpressed_FLAG); Metadata.remove(DOCUMENT_UNIQUE_ID_KEY); Metadata.remove(FILE_UNIQUE_ID_KEY); if (!Metadata.isEmpty()) { String MetaJson = Metadata.toJson(); String MetaString = ",\"Meta\": " + MetaJson; outputstream.write(MetaString.getBytes(charset)); } outputstream.write(" }".getBytes(charset)); } outputstream.write("\n ]\n}".getBytes(charset)); }
@Override public void deleteAssociatedDocuments(String uniqueId) { GridFSBucket gridFS = createGridFSConnection(); gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId)) .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getobjectId())); }
void put(String bucketName,GridFSBucket bucket);
GridFSBucket getBucket(String bucketName);
void put(String bucketName,GridFSBucket bucket);
public abstract GridFSBucket getFileSystem();
com.mongodb.client.gridfs.model.GridFSFile的实例源码
@Override protected void validateAfterStoreAdd(TestContext context,Vertx vertx,String path,Handler<AsyncResult<Void>> handler) { vertx.executeBlocking(f -> { try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) { MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_dbnAME); GridFSBucket gridFS = GridFSBuckets.create(db); GridFSFindIterable files = gridFS.find(); GridFSFile file = files.first(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getFilename(),baos); String contents = new String(baos.toByteArray(),StandardCharsets.UTF_8); context.assertEquals(CHUNK_CONTENT,contents); } f.complete(); },handler); }
public boolean checkAndCleanup(String userId,String fileName) { List<Document> l = query(new Query().equals(USERID,userId).addSortCriteria(CREATION_TIME,false)); if (l.size() >= maxPersistedPagesPerUser) { Document oldest = l.iterator().next(); if ((System.currentTimeMillis() - oldest.get(CREATION_TIME).getTime()) < minimumDelay) { //there have been to many page persistences for this user in a short time,so don't persist return false; } else { //clean up oldest to free space for new persisted page gridFS.find(Filters.eq("filename",oldest.get(FILENAME))).forEach(new Block<GridFSFile>() { @Override public void apply(GridFSFile file) { gridFS.delete(file.getobjectId()); } }); oldest.delete(); } } //create new entry Document newOne = createNew(); newOne.set(USERID,userId); newOne.set(FILENAME,fileName); newOne.set(CREATION_TIME,new Date()); newOne.writetoDatabase(false); return true; }
@Override public void byPages(int pageSize,Callback callback) { MongoCursor<GridFSFile> cursor = bucket.find().iterator(); boolean loop = true; try { while (loop) { List<GridFSFile> files = new ArrayList<>(batchSize); int i = 0; while (cursor.hasNext() && i < batchSize) { files.add(cursor.next()); i++; } loop = callback.on(files.stream().map(file -> bucket.openDownloadStream(file.getobjectId())).map(fromFile).collect(Collectors.toList())); } } finally { cursor.close(); } }
@Override public InputStream getAssociatedDocumentStream(String uniqueId,String fileName) { GridFSBucket gridFS = createGridFSConnection(); GridFSFile file = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,getGridFsId(uniqueId,fileName))).first(); if (file == null) { return null; } InputStream is = gridFS.openDownloadStream(file.getobjectId()); ; Document Metadata = file.getMetadata(); if (Metadata.containsKey(COMpressed_FLAG)) { boolean compressed = (boolean) Metadata.remove(COMpressed_FLAG); if (compressed) { is = new InflaterInputStream(is); } } return is; }
public Document findMetadata(String oid) { GridFSFile first = fileStore.find(eq("_id",new ObjectId(oid))).first(); if(first == null) { return null; } else { return first.getMetadata(); } }
private SmofGridRef toSmofGridRef(BsonDocument refBson) { final String bucketName = refBson.getString("bucket").getValue(); final ObjectId id = refBson.getobjectId("id").getValue(); final SmofGridRef ref = SmofGridRefFactory.newFromDB(id,bucketName); final GridFSFile file = dispatcher.loadMetadata(ref); ref.putMetadata(file.getMetadata()); return ref; }
@Test public final void testMetadata() throws IOException { final Document Metadata = new Document("randomkey",45); ref.putMetadata(Metadata); streamManager.uploadFile(ref); ref.putMetadata(new Document()); final GridFSFile file = streamManager.loadFileMetadata(ref); assertEquals(Metadata,file.getMetadata()); }
public void deleteFile(Document mo) { String filename = mo.get(this); if (filename != null) { fileSystemProvider.getFileSystem().find(Filters.eq("filename",filename)).forEach(new Block<GridFSFile>() { @Override public void apply(GridFSFile file) { fileSystemProvider.getFileSystem().delete(file.getobjectId()); } }); mo.getDataObject().remove(this.getName()); // mo.writetoDatabase(false); } }
@Override public Observable<GridFSFile> first() { return RxObservables.create(Observables.observe(new Block<SingleResultCallback<GridFSFile>>(){ @Override public void apply(final SingleResultCallback<GridFSFile> callback) { wrapped.first(callback); } }),observableAdapter); }
@Override public Observable<GridFSFile> getGridFSFile() { return RxObservables.create(Observables.observe(new Block<SingleResultCallback<GridFSFile>>() { @Override public void apply(final SingleResultCallback<GridFSFile> callback) { wrapped.getGridFSFile(callback); } }),observableAdapter); }
@Override public Publisher<GridFSFile> getGridFSFile() { return new ObservabletoPublisher<GridFSFile>(observe(new Block<SingleResultCallback<GridFSFile>>() { @Override public void apply(final SingleResultCallback<GridFSFile> callback) { wrapped.getGridFSFile(callback); } })); }
@Override public Publisher<GridFSFile> first() { return new ObservabletoPublisher<GridFSFile>(observe(new Block<SingleResultCallback<GridFSFile>>(){ @Override public void apply(final SingleResultCallback<GridFSFile> callback) { wrapped.first(callback); } })); }
@Override public OperationResult deleteFile( final Database db,final String dbname,final String bucketName,final BsonValue fileId,final String requestEtag,final boolean checkEtag) { final String bucket = extractBucketName(bucketName); GridFSBucket gridFSBucket = GridFSBuckets.create( db.getDatabase(dbname),bucket); GridFSFile file = gridFSBucket .find(eq("_id",fileId)) .limit(1).iterator().tryNext(); if (file == null) { return new OperationResult(HttpStatus.SC_NOT_FOUND); } else if (checkEtag) { Object oldEtag = file.getMetadata().get("_etag"); if (oldEtag != null) { if (requestEtag == null) { return new OperationResult(HttpStatus.SC_CONFLICT,oldEtag); } else if (!Objects.equals(oldEtag.toString(),requestEtag)) { return new OperationResult( HttpStatus.SC_PRECONDITION_Failed,oldEtag); } } } gridFSBucket.delete(fileId); return new OperationResult(HttpStatus.SC_NO_CONTENT); }
@Override public void handleRequest( HttpServerExchange exchange,RequestContext context) throws Exception { if (context.isInError()) { next(exchange,context); return; } LOGGER.trace("GET " + exchange.getRequestURL()); final String bucket = extractBucketName(context.getCollectionName()); GridFSBucket gridFSBucket = GridFSBuckets.create( MongoDBClientSingleton.getInstance().getClient() .getDatabase(context.getdbname()),bucket); GridFSFile dbsfile = gridFSBucket .find(eq("_id",context.getDocumentId())) .limit(1).iterator().tryNext(); if (dbsfile == null) { fileNotFound(context,exchange); } else if (!checkEtag(exchange,dbsfile)) { sendBinaryContent(context,gridFSBucket,dbsfile,exchange); } next(exchange,context); }
private boolean checkEtag(HttpServerExchange exchange,GridFSFile dbsfile) { if (dbsfile != null) { Object etag; if (dbsfile.getMetadata() != null && dbsfile.getMetadata().containsKey("_etag")) { etag = dbsfile.getMetadata().get("_etag"); } else { etag = null; } if (etag != null && etag instanceof ObjectId) { ObjectId _etag = (ObjectId) etag; BsonObjectId __etag = new BsonObjectId(_etag); // in case the request contains the IF_NONE_MATCH header with the current etag value,// just return 304 NOT_MODIFIED code if (RequestHelper.checkReadEtag(exchange,__etag)) { exchange.setStatusCode(HttpStatus.SC_NOT_MODIFIED); exchange.endExchange(); return true; } } } return false; }
@Override public List<AssociatedDocument> getAssociatedDocuments(String uniqueId,FetchType fetchType) throws Exception { GridFSBucket gridFS = createGridFSConnection(); List<AssociatedDocument> assocDocs = new ArrayList<>(); if (!FetchType.NONE.equals(fetchType)) { GridFSFindIterable files = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId)); for (GridFSFile file : files) { AssociatedDocument ad = loadGridFSToAssociatedDocument(gridFS,file,fetchType); assocDocs.add(ad); } } return assocDocs; }
@Override public AssociatedDocument getAssociatedDocument(String uniqueId,String fileName,FetchType fetchType) throws Exception { GridFSBucket gridFS = createGridFSConnection(); if (!FetchType.NONE.equals(fetchType)) { GridFSFile file = gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,fileName))).first(); if (null != file) { return loadGridFSToAssociatedDocument(gridFS,fetchType); } } return null; }
private AssociatedDocument loadGridFSToAssociatedDocument(GridFSBucket gridFS,GridFSFile file,FetchType fetchType) throws IOException { AssociatedDocument.Builder aBuilder = AssociatedDocument.newBuilder(); aBuilder.setFilename(file.getFilename()); Document Metadata = file.getMetadata(); boolean compressed = false; if (Metadata.containsKey(COMpressed_FLAG)) { compressed = (boolean) Metadata.remove(COMpressed_FLAG); } long timestamp = (long) Metadata.remove(TIMESTAMP); aBuilder.setCompressed(compressed); aBuilder.setTimestamp(timestamp); aBuilder.setDocumentUniqueId((String) Metadata.remove(DOCUMENT_UNIQUE_ID_KEY)); for (String field : Metadata.keySet()) { aBuilder.addMetadata(Metadata.newBuilder().setKey(field).setValue((String) Metadata.get(field))); } if (FetchType.FULL.equals(fetchType)) { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getobjectId(),byteArrayOutputStream); byte[] bytes = byteArrayOutputStream.toByteArray(); if (null != bytes) { if (compressed) { bytes = CommonCompression.uncompressZlib(bytes); } aBuilder.setDocument(ByteString.copyFrom(bytes)); } } aBuilder.setIndexName(indexName); return aBuilder.build(); }
@Override public List<String> getAssociatedFilenames(String uniqueId) throws Exception { GridFSBucket gridFS = createGridFSConnection(); ArrayList<String> fileNames = new ArrayList<>(); gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId)) .forEach((Consumer<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> fileNames.add(gridFSFile.getFilename())); return fileNames; }
@Override public void deleteAssociatedDocument(String uniqueId,String fileName) { GridFSBucket gridFS = createGridFSConnection(); gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + FILE_UNIQUE_ID_KEY,fileName))) .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getobjectId())); }
@Override public long flush(long expiration) throws IOException { rwl.r.lock(); try { final Bson filter = expiration == 0 ? Filters.exists("uri") : Filters.lt("_id",new ObjectId(new Date(expiration))); indexedCollection.deleteMany(filter); for (GridFSFile f : contentGrid.find(filter)) contentGrid.delete(f.getobjectId()); long l = MetaCollection.deleteMany(filter).getDeletedCount(); return l; } finally { rwl.r.unlock(); } }
@Override public GridFSFile loadFileMetadata(SmofGridRef ref) { final GridFSBucket bucket = pool.getBucket(ref.getBucketName()); return bucket.find(Filters.eq(Element.ID,ref.getId())).first(); }
@Override public GridFSFile loadMetadata(SmofGridRef ref) { return streamManager.loadFileMetadata(ref); }
public GridFSFile gridFSFile() { return gridFSFile(database,bucket,objectId); }
@BeanCodecKey(ignore = true) public GridFSFile getGridFSFile(MongoDatabase mongoDatabase) { return GridFSBuckets.create(mongoDatabase,bucket).find(Filters.eq("_id",fileObjectId)).first(); }
@Override public Observable<GridFSFile> toObservable() { return RxObservables.create(Observables.observe(wrapped),observableAdapter); }
@Override public Subscription subscribe(final Subscriber<? super GridFSFile> s) { return toObservable().subscribe(s); }
@Override public void subscribe(final Subscriber<? super GridFSFile> s) { new ObservabletoPublisher<GridFSFile>(observe(wrapped)).subscribe(s); }
private void sendBinaryContent( final RequestContext context,final GridFSBucket gridFSBucket,final GridFSFile file,final HttpServerExchange exchange) throws IOException { LOGGER.trace("Filename = {}",file.getFilename()); LOGGER.trace("Content length = {}",file.getLength()); if (file.getMetadata() != null && file.getMetadata().get("contentType") != null) { exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,file.getMetadata().get("contentType").toString()); } else if (file.getMetadata() != null && file.getMetadata().get("contentType") != null) { exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,file.getMetadata().get("contentType").toString()); } else { exchange.getResponseHeaders().put( Headers.CONTENT_TYPE,APPLICATION_OCTET_STREAM); } exchange.getResponseHeaders().put( Headers.CONTENT_LENGTH,file.getLength()); exchange.getResponseHeaders().put( Headers.CONTENT_disPOSITION,String.format("inline; filename=\"%s\"",extractFilename(file))); exchange.getResponseHeaders().put( Headers.CONTENT_TRANSFER_ENCODING,CONTENT_TRANSFER_ENCODING_BINARY); ResponseHelper.injectEtagHeader(exchange,file.getMetadata()); context.setResponseStatusCode(HttpStatus.SC_OK); gridFSBucket.downloadToStream( file.getId(),exchange.getoutputStream()); }
private String extractFilename(final GridFSFile dbsfile) { return dbsfile.getFilename() != null ? dbsfile.getFilename() : dbsfile.getId().toString(); }
public void getAssociatedDocuments(OutputStream outputstream,Document filter) throws IOException { Charset charset = Charset.forName("UTF-8"); GridFSBucket gridFS = createGridFSConnection(); GridFSFindIterable gridFSFiles = gridFS.find(filter); outputstream.write("{\n".getBytes(charset)); outputstream.write(" \"associatedDocs\": [\n".getBytes(charset)); boolean first = true; for (GridFSFile gridFSFile : gridFSFiles) { if (first) { first = false; } else { outputstream.write(",\n".getBytes(charset)); } Document Metadata = gridFSFile.getMetadata(); String uniqueId = Metadata.getString(DOCUMENT_UNIQUE_ID_KEY); String uniquieIdkeyvalue = " { \"uniqueId\": \"" + uniqueId + "\","; outputstream.write(uniquieIdkeyvalue.getBytes(charset)); String filename = gridFSFile.getFilename(); String filenamekeyvalue = "\"filename\": \"" + filename + "\","; outputstream.write(filenamekeyvalue.getBytes(charset)); Date uploadDate = gridFSFile.getUploadDate(); String uploadDatekeyvalue = "\"uploadDate\": {\"$date\":" + uploadDate.getTime() + "}"; outputstream.write(uploadDatekeyvalue.getBytes(charset)); Metadata.remove(TIMESTAMP); Metadata.remove(COMpressed_FLAG); Metadata.remove(DOCUMENT_UNIQUE_ID_KEY); Metadata.remove(FILE_UNIQUE_ID_KEY); if (!Metadata.isEmpty()) { String MetaJson = Metadata.toJson(); String MetaString = ",\"Meta\": " + MetaJson; outputstream.write(MetaString.getBytes(charset)); } outputstream.write(" }".getBytes(charset)); } outputstream.write("\n ]\n}".getBytes(charset)); }
@Override public void deleteAssociatedDocuments(String uniqueId) { GridFSBucket gridFS = createGridFSConnection(); gridFS.find(new Document(ASSOCIATED_MetaDATA + "." + DOCUMENT_UNIQUE_ID_KEY,uniqueId)) .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getobjectId())); }
public static GridFSFile gridFSFile(MongoDatabase database,String bucket,ObjectId objectId) { return GridFSBuckets.create(database,objectId)).first(); }
/** * Gets the corresponding {@link GridFSFile} for the file being downloaded * * @return an observable with a single element,the corresponding GridFSFile for the file being downloaded */ Observable<GridFSFile> getGridFSFile();
/** * Helper to return an observable limited first from the query. * * @return an observable with a single element */ Observable<GridFSFile> first();
/** * Gets the corresponding {@link GridFSFile} for the file being downloaded * * @return a publisher with a single element,the corresponding GridFSFile for the file being downloaded */ Publisher<GridFSFile> getGridFSFile();
/** * Helper to return a publisher limited first from the query. * * @return a publisher with a single element */ Publisher<GridFSFile> first();
GridFSFile loadFileMetadata(SmofGridRef ref);
GridFSFile loadMetadata(SmofGridRef ref);
com.mongodb.client.gridfs.model.GridFSUploadOptions的实例源码
@Override public Uri putBlob(Uri uri,Blob blob) { GridFsuploadOptions options = new GridFsuploadOptions(); Document document = JsonBsonCodec.toBson(mapper,blob.getMetadata()); options.Metadata(document); GridFsuploadStream file = bucket.openUploadStream(uri.toString(),options); try { IoUtils.copy(blob.getPayload().openStream(),file); } catch (IOException e) { throw Throwables.propagate(e); } file.close(); return Uri.create("mongodb://" + databaseName + "/" + bucket.getBucketName() + "/" + file.getFileId().toString()); }
@Override public InputStream store(DownloadItem downloadItem) throws IOException,JSONException { rwl.r.lock(); try { final URI uri = downloadItem.getUri(); if (!uri.equals(this.uri)) throw new IOException("The URI does not match: " + uri + " / " + this.uri); final Document newDocument = Document.parse(downloadItem.getMetaAsJson()); newDocument.put("uri",uriString); final BsonValue id = MetaCollection.replaceOne(eq("uri",uriString),newDocument,UPSERT) .getUpsertedId(); final GridFsuploadOptions options = new GridFsuploadOptions().Metadata(new Document("_id",id)); contentGrid.uploadFromStream(id,uriString,downloadItem.getContentInputStream(),options); return contentGrid.openDownloadStream(id); } finally { rwl.r.unlock(); } }
private void uploadStream(SmofGridRef ref,String name,InputStream stream) { final String bucketName = ref.getBucketName(); final ObjectId id; final GridFSBucket bucket; Preconditions.checkNotNull(bucketName,"No bucket specified"); final GridFsuploadOptions options = new GridFsuploadOptions().Metadata(ref.getMetadata()); bucket = pool.getBucket(bucketName); id = bucket.uploadFromStream(name,stream,options); ref.setId(id); }
@Override public Observable<ObjectId> uploadFromStream(final String filename,final AsyncInputStream source,final GridFsuploadOptions options) { return RxObservables.create(Observables.observe(new Block<SingleResultCallback<ObjectId>>() { @Override public void apply(final SingleResultCallback<ObjectId> callback) { wrapped.uploadFromStream(filename,toCallbackAsyncInputStream(source),options,callback); } }),observableAdapter); }
@Override public Observable<Success> uploadFromStream(final BsonValue id,final String filename,final GridFsuploadOptions options) { return RxObservables.create(Observables.observe(new Block<SingleResultCallback<Success>>() { @Override public void apply(final SingleResultCallback<Success> callback) { wrapped.uploadFromStream(id,filename,voidToSuccessCallback(callback)); } }),observableAdapter); }
@Override public Publisher<ObjectId> uploadFromStream(final String filename,final GridFsuploadOptions options) { return new ObservabletoPublisher<ObjectId>(observe(new Block<SingleResultCallback<ObjectId>>() { @Override public void apply(final SingleResultCallback<ObjectId> callback) { wrapped.uploadFromStream(filename,callback); } })); }
@Override public Publisher<Success> uploadFromStream(final BsonValue id,final GridFsuploadOptions options) { return new ObservabletoPublisher<Success>(observe(new Block<SingleResultCallback<Success>>() { @Override public void apply(final SingleResultCallback<Success> callback) { wrapped.uploadFromStream(id,voidToSuccessCallback(callback)); } })); }
@Override public Publisher<ObjectId> uploadFromStream(final ClientSession clientSession,final GridFsuploadOptions options) { return new ObservabletoPublisher<ObjectId>(observe(new Block<SingleResultCallback<ObjectId>>() { @Override public void apply(final SingleResultCallback<ObjectId> callback) { wrapped.uploadFromStream(clientSession,callback); } })); }
@Override public Publisher<Success> uploadFromStream(final ClientSession clientSession,final BsonValue id,final GridFsuploadOptions options) { return new ObservabletoPublisher<Success>(observe(new Block<SingleResultCallback<Success>>() { @Override public void apply(final SingleResultCallback<Success> callback) { wrapped.uploadFromStream(clientSession,id,voidToSuccessCallback(callback)); } })); }
private GridFsuploadOptions getGridFsuploadOptions(String uniqueId,String fileName,boolean compress,long timestamp,Map<String,String> MetadataMap) { Document Metadata = new Document(); if (MetadataMap != null) { for (String key : MetadataMap.keySet()) { Metadata.put(key,MetadataMap.get(key)); } } Metadata.put(TIMESTAMP,timestamp); Metadata.put(COMpressed_FLAG,compress); Metadata.put(DOCUMENT_UNIQUE_ID_KEY,uniqueId); Metadata.put(FILE_UNIQUE_ID_KEY,getGridFsId(uniqueId,fileName)); return new GridFsuploadOptions().chunkSizeBytes(1024).Metadata(Metadata); }
public FileStoreBucket() { gridFsuploadOptions = new GridFsuploadOptions(); gridFsuploadOptions.chunkSizeBytes(CHUNK_SIZE_BYTES); }
@Override public GridFsuploadStream openUploadStream(final String filename,final GridFsuploadOptions options) { return new GridFsuploadStreamImpl(wrapped.openUploadStream(filename,options),observableAdapter); }
@Override public GridFsuploadStream openUploadStream(final BsonValue id,final GridFsuploadOptions options) { return new GridFsuploadStreamImpl(wrapped.openUploadStream(id,observableAdapter); }
@Override public GridFsuploadStream openUploadStream(final String filename) { return openUploadStream(filename,new GridFsuploadOptions()); }
@Override public GridFsuploadStream openUploadStream(final String filename,options)); }
@Override public GridFsuploadStream openUploadStream(final BsonValue id,final String filename) { return openUploadStream(id,new GridFsuploadOptions()); }
@Override public GridFsuploadStream openUploadStream(final BsonValue id,options)); }
@Override public GridFsuploadStream openUploadStream(final ClientSession clientSession,final String filename) { return openUploadStream(clientSession,new GridFsuploadOptions()); }
@Override public GridFsuploadStream openUploadStream(final ClientSession clientSession,final GridFsuploadOptions options) { return new GridFsuploadStreamImpl(wrapped.openUploadStream(clientSession,options)); }
@Override public Publisher<ObjectId> uploadFromStream(final String filename,final AsyncInputStream source) { return uploadFromStream(filename,source,new GridFsuploadOptions()); }
@Override public Publisher<Success> uploadFromStream(final BsonValue id,final AsyncInputStream source) { return uploadFromStream(id,new GridFsuploadOptions()); }
@Override public Publisher<ObjectId> uploadFromStream(final ClientSession clientSession,final AsyncInputStream source) { return uploadFromStream(clientSession,new GridFsuploadOptions()); }
@Override public Publisher<Success> uploadFromStream(final ClientSession clientSession,new GridFsuploadOptions()); }
/** * Opens a AsyncOutputStream that the application can write the contents of the file to. * <p> * As the application writes the contents to the returned Stream,the contents are uploaded as chunks in the chunks collection. When * the application signals it is done writing the contents of the file by calling close on the returned Stream,a files collection * document is created in the files collection. * </p> * * @param filename the filename for the stream * @param options the GridFsuploadOptions * @return the GridFsuploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the * application will write the contents. */ GridFsuploadStream openUploadStream(String filename,GridFsuploadOptions options);
/** * Opens a AsyncOutputStream that the application can write the contents of the file to. * <p> * As the application writes the contents to the returned Stream,a files collection * document is created in the files collection. * </p> * * @param id the custom id value of the file * @param filename the filename for the stream * @param options the GridFsuploadOptions * @return the GridFsuploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the * application will write the contents. */ GridFsuploadStream openUploadStream(BsonValue id,String filename,GridFsuploadOptions options);
/** * Uploads the contents of the given {@code AsyncInputStream} to a GridFS bucket. * <p> * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded,it creates a files collection document for {@code filename} in the files collection. * </p> * * @param filename the filename for the stream * @param source the Stream providing the file data * @param options the GridFsuploadOptions * @return an observable with a single element,the ObjectId of the uploaded file. */ Observable<ObjectId> uploadFromStream(String filename,AsyncInputStream source,it creates a files collection document for {@code filename} in the files collection. * </p> * * @param id the custom id value of the file * @param filename the filename for the stream * @param source the Stream providing the file data * @param options the GridFsuploadOptions * @return an observable with a single element,representing when the successful upload of the source. */ Observable<Success> uploadFromStream(BsonValue id,GridFsuploadOptions options);
/** * Opens a AsyncOutputStream that the application can write the contents of the file to. * <p> * As the application writes the contents to the returned Stream,a files collection * document is created in the files collection. * </p> * * @param clientSession the client session with which to associate this operation * @param filename the filename for the stream * @param options the GridFsuploadOptions * @return the GridFsuploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the * application will write the contents. * @mongodb.server.release 3.6 * @since 1.7 */ GridFsuploadStream openUploadStream(ClientSession clientSession,a files collection * document is created in the files collection. * </p> * * @param clientSession the client session with which to associate this operation * @param id the custom id value of the file * @param filename the filename for the stream * @param options the GridFsuploadOptions * @return the GridFsuploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the * application will write the contents. * @mongodb.server.release 3.6 * @since 1.7 */ GridFsuploadStream openUploadStream(ClientSession clientSession,BsonValue id,GridFsuploadOptions options);
/** * Uploads the contents of the given {@code AsyncInputStream} to a GridFS bucket. * <p> * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the * chunks have been uploaded,it creates a files collection document for {@code filename} in the files collection. * </p> * * @param filename the filename for the stream * @param source the Stream providing the file data * @param options the GridFsuploadOptions * @return a publisher with a single element,the ObjectId of the uploaded file. */ Publisher<ObjectId> uploadFromStream(String filename,it creates a files collection document for {@code filename} in the files collection. * </p> * * @param id the custom id value of the file * @param filename the filename for the stream * @param source the Stream providing the file data * @param options the GridFsuploadOptions * @return a publisher with a single element,representing when the successful upload of the source. */ Publisher<Success> uploadFromStream(BsonValue id,it creates a files collection document for {@code filename} in the files collection. * </p> * * @param clientSession the client session with which to associate this operation * @param filename the filename for the stream * @param source the Stream providing the file data * @param options the GridFsuploadOptions * @return a publisher with a single element,the ObjectId of the uploaded file. * @mongodb.server.release 3.6 * @since 1.7 */ Publisher<ObjectId> uploadFromStream(ClientSession clientSession,it creates a files collection document for {@code filename} in the files collection. * </p> * * @param clientSession the client session with which to associate this operation * @param id the custom id value of the file * @param filename the filename for the stream * @param source the Stream providing the file data * @param options the GridFsuploadOptions * @return a publisher with a single element,representing when the successful upload of the source. * @mongodb.server.release 3.6 * @since 1.7 */ Publisher<Success> uploadFromStream(ClientSession clientSession,GridFsuploadOptions options);
关于Node.js文件上传和Express 4,MongoDB,GridFS,GridFS-Stream的问题我们已经讲解完毕,感谢您的阅读,如果还想了解更多关于com.mongodb.client.gridfs.GridFSBuckets的实例源码、com.mongodb.client.gridfs.GridFSBucket的实例源码、com.mongodb.client.gridfs.model.GridFSFile的实例源码、com.mongodb.client.gridfs.model.GridFSUploadOptions的实例源码等相关内容,可以在本站寻找。
本文标签: