Mongo
- Java Code Examples for com.mongodb.client.gridfs.GridFSBucket
- 以下是显示如何使用com.mongodb.client.gridfs.GridFSBucket的示例。
- Example 1
- Example 2
- Example 3
- Example 4
- Example 5
- Example 6
- Example 7
- Example 8
- Example 9
- Example 10
- Example 11
- Example 12
- Example 13
- Example 14
- Example 15
- Example 16
- Example 17
- Example 18
- Example 19
- Example 20
- Example 21
- Example 22
- Example 23
- Example 24
- Example 25
- Example 26
- Example 27
- Example 28
- Example 29
- Example 30
- Example31
- Example32
- Example33
- Example34
- Example35
- Example36
- Example37
- Example38
- Example39
- That's all. Thank you!
Java Code Examples for com.mongodb.client.gridfs.GridFSBucket
以下是显示如何使用com.mongodb.client.gridfs.GridFSBucket的示例。
Example 1
private void prepareData(String filename, int size, Vertx vertx,
Handler<AsyncResult<String>> handler) {
vertx.<String>executeBlocking(f -> {
try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
GridFSBucket gridFS = GridFSBuckets.create(db);
try (GridFSUploadStream os = gridFS.openUploadStream(filename)) {
for (int i = 0; i < size; ++i) {
os.write((byte)(i & 0xFF));
}
}
}
f.complete(filename);
}, handler);
}
Example 2
@Override
protected void validateAfterStoreAdd(TestContext context, Vertx vertx,
String path, Handler<AsyncResult<Void>> handler) {
vertx.executeBlocking(f -> {
try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
GridFSBucket gridFS = GridFSBuckets.create(db);
GridFSFindIterable files = gridFS.find();
GridFSFile file = files.first();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
gridFS.downloadToStream(file.getFilename(), baos);
String contents = new String(baos.toByteArray(), StandardCharsets.UTF_8);
context.assertEquals(CHUNK_CONTENT, contents);
}
f.complete();
}, handler);
}
Example 3
@Override
public InputStream getAssociatedDocumentStream(String uniqueId, String fileName) {
GridFSBucket gridFS = createGridFSConnection();
GridFSFile file = gridFS.find(new Document(ASSOCIATED_METADATA + "." + FILE_UNIQUE_ID_KEY, getGridFsId(uniqueId, fileName))).first();
if (file == null) {
return null;
}
InputStream is = gridFS.openDownloadStream(file.getObjectId());
;
Document metadata = file.getMetadata();
if (metadata.containsKey(COMPRESSED_FLAG)) {
boolean compressed = (boolean) metadata.remove(COMPRESSED_FLAG);
if (compressed) {
is = new InflaterInputStream(is);
}
}
return is;
}
Example 4
private void uploadStream(SmofGridRef ref, String name, InputStream stream) {
final String bucketName = ref.getBucketName();
final ObjectId id;
final GridFSBucket bucket;
Preconditions.checkNotNull(bucketName, "No bucket specified");
final GridFSUploadOptions options = new GridFSUploadOptions().metadata(ref.getMetadata());
bucket = pool.getBucket(bucketName);
id = bucket.uploadFromStream(name, stream, options);
ref.setId(id);
}
Example 5
@Override
public InputStream download(SmofGridRef ref) {
final String bucketName = ref.getBucketName();
final ObjectId id = ref.getId();
Preconditions.checkArgument(id != null, "No download source found");
Preconditions.checkArgument(bucketName != null, "No bucket specified");
final GridFSBucket bucket = pool.getBucket(bucketName);
return bucket.openDownloadStream(id);
}
Example 6
@Override
public void drop(SmofGridRef ref) {
final String bucketName = ref.getBucketName();
final ObjectId id = ref.getId();
Preconditions.checkArgument(id != null, "No download source found");
Preconditions.checkArgument(bucketName != null, "No bucket specified");
final GridFSBucket bucket = pool.getBucket(bucketName);
bucket.delete(id);
}
Example 7
private void deleteLog(Date olderThan) {
MongoCollection<Log> logCollection = mongoService.getMongoClient().getDatabase(database).getCollection(collection, Log.class);
Bson filter = Filters.lt("timeStamp", olderThan);
logCollection.find(filter).forEach((Block<? super Log>) log -> {
log.getLogFiles().forEach(logFile -> {
GridFSBucket gridFSBucket = GridFSBuckets.create(mongoService.getMongoClient().getDatabase(database), logFile.getBucket());
gridFSBucket.delete(logFile.getFileObjectId());
});
});
DeleteResult deleteResult = logCollection.deleteMany(filter);
}
Example 8
public BucketStreamResource(MongoDatabase database, String bucket, ObjectId objectId) {
super((StreamSource) () -> {
GridFSBucket gridFSBucket = GridFSBuckets.create(database, bucket);
return gridFSBucket.openDownloadStream(objectId);
}, gridFSFile(database, bucket, objectId).getFilename());
this.database = database;
this.bucket = bucket;
this.objectId = objectId;
}
Example 9
private void getChunkSize(Vertx vertx, Handler<AsyncResult<Integer>> handler) {
vertx.<Integer>executeBlocking(f -> {
try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
GridFSBucket gridFS = GridFSBuckets.create(db);
f.complete(gridFS.getChunkSizeBytes());
}
}, handler);
}
Example 10
@Override
protected void prepareData(TestContext context, Vertx vertx, String path,
Handler<AsyncResult<String>> handler) {
String filename = PathUtils.join(path, ID);
vertx.<String>executeBlocking(f -> {
try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
GridFSBucket gridFS = GridFSBuckets.create(db);
byte[] contents = CHUNK_CONTENT.getBytes(StandardCharsets.UTF_8);
gridFS.uploadFromStream(filename, new ByteArrayInputStream(contents));
f.complete(filename);
}
}, handler);
}
Example 11
@Override
protected void validateAfterStoreDelete(TestContext context, Vertx vertx,
String path, Handler<AsyncResult<Void>> handler) {
vertx.executeBlocking(f -> {
try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
GridFSBucket gridFS = GridFSBuckets.create(db);
GridFSFindIterable files = gridFS.find();
context.assertTrue(Iterables.isEmpty(files));
}
f.complete();
}, handler);
}
Example 12
protected void initDB(String dbName) {
MongoClient m = new MongoClient();
MongoDatabase chatDB = m.getDatabase(dbName);
DocumentCollections.init(this, chatDB);
GridFSBucket fs = GridFSBuckets.create(chatDB,"persistedPages");
super.initDB(chatDB, fs);
}
Example 13
@Override
public OperationResult deleteFile(
final Database db,
final String dbName,
final String bucketName,
final BsonValue fileId,
final String requestEtag,
final boolean checkEtag) {
final String bucket = extractBucketName(bucketName);
GridFSBucket gridFSBucket = GridFSBuckets.create(
db.getDatabase(dbName),
bucket);
GridFSFile file = gridFSBucket
.find(eq("_id", fileId))
.limit(1).iterator().tryNext();
if (file == null) {
return new OperationResult(HttpStatus.SC_NOT_FOUND);
} else if (checkEtag) {
Object oldEtag = file.getMetadata().get("_etag");
if (oldEtag != null) {
if (requestEtag == null) {
return new OperationResult(HttpStatus.SC_CONFLICT, oldEtag);
} else if (!Objects.equals(oldEtag.toString(), requestEtag)) {
return new OperationResult(
HttpStatus.SC_PRECONDITION_FAILED, oldEtag);
}
}
}
gridFSBucket.delete(fileId);
return new OperationResult(HttpStatus.SC_NO_CONTENT);
}
Example 14
@Override
public void handleRequest(
HttpServerExchange exchange,
RequestContext context)
throws Exception {
if (context.isInError()) {
next(exchange, context);
return;
}
LOGGER.trace("GET " + exchange.getRequestURL());
final String bucket = extractBucketName(context.getCollectionName());
GridFSBucket gridFSBucket = GridFSBuckets.create(
MongoDBClientSingleton.getInstance().getClient()
.getDatabase(context.getDBName()),
bucket);
GridFSFile dbsfile = gridFSBucket
.find(eq("_id", context.getDocumentId()))
.limit(1).iterator().tryNext();
if (dbsfile == null) {
fileNotFound(context, exchange);
} else if (!checkEtag(exchange, dbsfile)) {
sendBinaryContent(context, gridFSBucket, dbsfile, exchange);
}
next(exchange, context);
}
Example 15
@Override
public void deleteAllDocuments() {
GridFSBucket gridFS = createGridFSConnection();
gridFS.drop();
MongoDatabase db = mongoClient.getDatabase(database);
MongoCollection<Document> coll = db.getCollection(rawCollectionName);
coll.deleteMany(new Document());
}
Example 16
@Override
public List<AssociatedDocument> getAssociatedDocuments(String uniqueId, FetchType fetchType) throws Exception {
GridFSBucket gridFS = createGridFSConnection();
List<AssociatedDocument> assocDocs = new ArrayList<>();
if (!FetchType.NONE.equals(fetchType)) {
GridFSFindIterable files = gridFS.find(new Document(ASSOCIATED_METADATA + "." + DOCUMENT_UNIQUE_ID_KEY, uniqueId));
for (GridFSFile file : files) {
AssociatedDocument ad = loadGridFSToAssociatedDocument(gridFS, file, fetchType);
assocDocs.add(ad);
}
}
return assocDocs;
}
Example 17
@Override
public AssociatedDocument getAssociatedDocument(String uniqueId, String fileName, FetchType fetchType) throws Exception {
GridFSBucket gridFS = createGridFSConnection();
if (!FetchType.NONE.equals(fetchType)) {
GridFSFile file = gridFS.find(new Document(ASSOCIATED_METADATA + "." + FILE_UNIQUE_ID_KEY, getGridFsId(uniqueId, fileName))).first();
if (null != file) {
return loadGridFSToAssociatedDocument(gridFS, file, fetchType);
}
}
return null;
}
Example 18
private AssociatedDocument loadGridFSToAssociatedDocument(GridFSBucket gridFS, GridFSFile file, FetchType fetchType) throws IOException {
AssociatedDocument.Builder aBuilder = AssociatedDocument.newBuilder();
aBuilder.setFilename(file.getFilename());
Document metadata = file.getMetadata();
boolean compressed = false;
if (metadata.containsKey(COMPRESSED_FLAG)) {
compressed = (boolean) metadata.remove(COMPRESSED_FLAG);
}
long timestamp = (long) metadata.remove(TIMESTAMP);
aBuilder.setCompressed(compressed);
aBuilder.setTimestamp(timestamp);
aBuilder.setDocumentUniqueId((String) metadata.remove(DOCUMENT_UNIQUE_ID_KEY));
for (String field : metadata.keySet()) {
aBuilder.addMetadata(Metadata.newBuilder().setKey(field).setValue((String) metadata.get(field)));
}
if (FetchType.FULL.equals(fetchType)) {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
gridFS.downloadToStream(file.getObjectId(), byteArrayOutputStream);
byte[] bytes = byteArrayOutputStream.toByteArray();
if (null != bytes) {
if (compressed) {
bytes = CommonCompression.uncompressZlib(bytes);
}
aBuilder.setDocument(ByteString.copyFrom(bytes));
}
}
aBuilder.setIndexName(indexName);
return aBuilder.build();
}
Example 19
@Override
public List<String> getAssociatedFilenames(String uniqueId) throws Exception {
GridFSBucket gridFS = createGridFSConnection();
ArrayList<String> fileNames = new ArrayList<>();
gridFS.find(new Document(ASSOCIATED_METADATA + "." + DOCUMENT_UNIQUE_ID_KEY, uniqueId))
.forEach((Consumer<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> fileNames.add(gridFSFile.getFilename()));
return fileNames;
}
Example 20
@Override
public void deleteAssociatedDocument(String uniqueId, String fileName) {
GridFSBucket gridFS = createGridFSConnection();
gridFS.find(new Document(ASSOCIATED_METADATA + "." + FILE_UNIQUE_ID_KEY, getGridFsId(uniqueId, fileName)))
.forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getObjectId()));
}
Example 21
@Override
public GridFSFile loadFileMetadata(SmofGridRef ref) {
final GridFSBucket bucket = pool.getBucket(ref.getBucketName());
return bucket.find(Filters.eq(Element.ID, ref.getId())).first();
}
Example 22
public void loadBucket(String bucketName) {
final GridFSBucket bucket = GridFSBuckets.create(database, bucketName);
dispatcher.put(bucketName, bucket);
}
Example 23
@Override
public void put(String bucketName, GridFSBucket bucket) {
collections.put(bucketName, bucket);
}
Example 24
@Override
public void dropBucket(String bucketName) {
final GridFSBucket bucket = collections.getBucket(bucketName);
bucket.drop();
collections.dropBucket(bucketName);
}
Example 25
@Override
public void put(String bucketName, GridFSBucket bucket) {
fsBuckets.put(bucketName, bucket);
}
Example 26
@Override
public GridFSBucket getBucket(String bucketName) {
return fsBuckets.get(bucketName);
}
Example 27
@Override
public void dropAllBuckets() {
fsBuckets.values().forEach(GridFSBucket::drop);
fsBuckets.clear();
}
Example 28
private void doRead(int size, int chunkSize, Vertx vertx, TestContext context) {
Async async = context.async();
prepareData("test_" + size + ".bin", size, vertx, context.asyncAssertSuccess(filename -> {
com.mongodb.async.client.MongoClient client = createAsyncClient();
com.mongodb.async.client.MongoDatabase db =
client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
com.mongodb.async.client.gridfs.GridFSBucket gridfs =
com.mongodb.async.client.gridfs.GridFSBuckets.create(db);
GridFSDownloadStream is = gridfs.openDownloadStream(filename);
MongoDBChunkReadStream rs = new MongoDBChunkReadStream(is, size, chunkSize,
vertx.getOrCreateContext());
rs.exceptionHandler(context::fail);
int[] pos = { 0 };
rs.endHandler(v -> {
rs.close();
context.assertEquals(size, pos[0]);
async.complete();
});
rs.handler(buf -> {
if (size - pos[0] > chunkSize) {
context.assertEquals(chunkSize, buf.length());
} else {
context.assertEquals(size - pos[0], buf.length());
}
for (int i = pos[0]; i < pos[0] + buf.length(); ++i) {
context.assertEquals((byte)(i & 0xFF), buf.getByte(i - pos[0]));
}
pos[0] += buf.length();
});
}));
}
Example 29
public void initDB(MongoDatabase db, GridFSBucket gridFS) {
this.gridFS = gridFS;
persistedPages = new PersistedPages(engine, db, gridFS);
super.initDB(db);
}
Example 30
public PersistedPages(UIEngine engine, MongoDatabase db, GridFSBucket gridFS) {
super(engine, db, "persistedPages");
this.gridFS = gridFS;
ensureIndex(false, true, USERID, CREATION_TIME);
}
Example31
public GridFSBucket createBucket(String bucketName) {
return GridFSBuckets.create(this.getMongoDatabase(), bucketName);
}
Example32
private void sendBinaryContent(
final RequestContext context,
final GridFSBucket gridFSBucket,
final GridFSFile file,
final HttpServerExchange exchange)
throws IOException {
LOGGER.trace("Filename = {}", file.getFilename());
LOGGER.trace("Content length = {}", file.getLength());
if (file.getMetadata() != null
&& file.getMetadata().get("contentType") != null) {
exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,
file.getMetadata().get("contentType").toString());
} else if (file.getMetadata() != null
&& file.getMetadata().get("contentType") != null) {
exchange.getResponseHeaders().put(Headers.CONTENT_TYPE,
file.getMetadata().get("contentType").toString());
} else {
exchange.getResponseHeaders().put(
Headers.CONTENT_TYPE,
APPLICATION_OCTET_STREAM);
}
exchange.getResponseHeaders().put(
Headers.CONTENT_LENGTH,
file.getLength());
exchange.getResponseHeaders().put(
Headers.CONTENT_DISPOSITION,
String.format("inline; filename=\"%s\"",
extractFilename(file)));
exchange.getResponseHeaders().put(
Headers.CONTENT_TRANSFER_ENCODING,
CONTENT_TRANSFER_ENCODING_BINARY);
ResponseHelper.injectEtagHeader(exchange, file.getMetadata());
context.setResponseStatusCode(HttpStatus.SC_OK);
gridFSBucket.downloadToStream(
file.getId(),
exchange.getOutputStream());
}
Example33
private GridFSBucket createGridFSConnection() {
MongoDatabase db = mongoClient.getDatabase(database);
return GridFSBuckets.create(db, ASSOCIATED_FILES);
}
Example34
public void getAssociatedDocuments(OutputStream outputstream, Document filter) throws IOException {
Charset charset = Charset.forName("UTF-8");
GridFSBucket gridFS = createGridFSConnection();
GridFSFindIterable gridFSFiles = gridFS.find(filter);
outputstream.write("{\n".getBytes(charset));
outputstream.write(" \"associatedDocs\": [\n".getBytes(charset));
boolean first = true;
for (GridFSFile gridFSFile : gridFSFiles) {
if (first) {
first = false;
}
else {
outputstream.write(",\n".getBytes(charset));
}
Document metadata = gridFSFile.getMetadata();
String uniqueId = metadata.getString(DOCUMENT_UNIQUE_ID_KEY);
String uniquieIdKeyValue = " { \"uniqueId\": \"" + uniqueId + "\", ";
outputstream.write(uniquieIdKeyValue.getBytes(charset));
String filename = gridFSFile.getFilename();
String filenameKeyValue = "\"filename\": \"" + filename + "\", ";
outputstream.write(filenameKeyValue.getBytes(charset));
Date uploadDate = gridFSFile.getUploadDate();
String uploadDateKeyValue = "\"uploadDate\": {\"$date\":" + uploadDate.getTime() + "}";
outputstream.write(uploadDateKeyValue.getBytes(charset));
metadata.remove(TIMESTAMP);
metadata.remove(COMPRESSED_FLAG);
metadata.remove(DOCUMENT_UNIQUE_ID_KEY);
metadata.remove(FILE_UNIQUE_ID_KEY);
if (!metadata.isEmpty()) {
String metaJson = metadata.toJson();
String metaString = ", \"meta\": " + metaJson;
outputstream.write(metaString.getBytes(charset));
}
outputstream.write(" }".getBytes(charset));
}
outputstream.write("\n ]\n}".getBytes(charset));
}
Example35
@Override
public void deleteAssociatedDocuments(String uniqueId) {
GridFSBucket gridFS = createGridFSConnection();
gridFS.find(new Document(ASSOCIATED_METADATA + "." + DOCUMENT_UNIQUE_ID_KEY, uniqueId))
.forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getObjectId()));
}
Example36
void put(String bucketName, GridFSBucket bucket);
Example37
GridFSBucket getBucket(String bucketName);
Example38
void put(String bucketName, GridFSBucket bucket);
Example39
public abstract GridFSBucket getFileSystem();
That’s all. Thank you!