Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut

// Add to double buffer.
omClientResponse = new OMBucketDeleteResponse(omResponse.build(),
volumeName, bucketName, omVolumeArgs.copyObject());
volumeName, bucketName, omVolumeArgs.copyObject(), transactionLogIndex);
} catch (IOException | InvalidPathException ex) {
success = false;
exception = ex;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
.setOpenVersion(openVersion).build())
.setCmdType(CreateFile);
omClientResponse = new OMFileCreateResponse(omResponse.build(),
omKeyInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
omKeyInfo, missingParentInfos, clientID, omBucketInfo.copyObject(),
isRecursive, isOverWrite);

result = Result.SUCCESS;
} catch (IOException | InvalidPathException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
.setCmdType(Type.CreateFile);
omClientResponse = new OMFileCreateResponseWithFSO(omResponse.build(),
omFileInfo, missingParentInfos, clientID,
omBucketInfo.copyObject(), volumeId);
omBucketInfo.copyObject(), volumeId,
isRecursive, isOverWrite);

result = Result.SUCCESS;
} catch (IOException | InvalidPathException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,8 @@ private OMClientResponse renameKey(OmKeyInfo toKeyParent, String toKeyName,
OMClientResponse omClientResponse = new OMKeyRenameResponseWithFSO(
omResponse.setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(),
dbFromKey, dbToKey, fromKeyParent, toKeyParent, fromKeyValue,
omBucketInfo, isRenameDirectory, getBucketLayout());
omBucketInfo, isRenameDirectory, getBucketLayout(),
fromKeyName, toKeyName);
return omClientResponse;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,12 @@
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;

/**
* Response for CreateBucket request.
Expand Down Expand Up @@ -80,12 +82,27 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
omVolumeArgs);
}

// Add to completed requests table.
omMetadataManager.getCompletedRequestInfoTable()
.putWithBatch(batchOperation, omBucketInfo.getUpdateID(),
getCompletedRequestInfo(omBucketInfo.getUpdateID()));
}

@Nullable
public OmBucketInfo getOmBucketInfo() {
return omBucketInfo;
}

protected OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex) {
return OmCompletedRequestInfo.newBuilder()
.setTrxLogIndex(trxnLogIndex)
.setCmdType(Type.CreateBucket)
.setCreationTime(System.currentTimeMillis())
.setVolumeName(omBucketInfo.getVolumeName())
.setBucketName(omBucketInfo.getBucketName())
.setOpArgs(new OmCompletedRequestInfo.OperationArgs.NoArgs())
.build();
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,12 @@
import java.io.IOException;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;

/**
* Response for DeleteBucket request.
Expand All @@ -38,13 +40,15 @@ public final class OMBucketDeleteResponse extends OMClientResponse {
private String volumeName;
private String bucketName;
private final OmVolumeArgs omVolumeArgs;
private final long updateID;

public OMBucketDeleteResponse(@Nonnull OMResponse omResponse,
String volumeName, String bucketName, OmVolumeArgs volumeArgs) {
String volumeName, String bucketName, OmVolumeArgs volumeArgs, long updateID) {
super(omResponse);
this.volumeName = volumeName;
this.bucketName = bucketName;
this.omVolumeArgs = volumeArgs;
this.updateID = updateID;
}

public OMBucketDeleteResponse(@Nonnull OMResponse omResponse,
Expand All @@ -53,6 +57,7 @@ public OMBucketDeleteResponse(@Nonnull OMResponse omResponse,
this.volumeName = volumeName;
this.bucketName = bucketName;
this.omVolumeArgs = null;
this.updateID = 0;
}

/**
Expand All @@ -63,6 +68,7 @@ public OMBucketDeleteResponse(@Nonnull OMResponse omResponse) {
super(omResponse);
checkStatusNotOK();
this.omVolumeArgs = null;
this.updateID = 0;
}

@Override
Expand All @@ -79,6 +85,12 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()),
omVolumeArgs);
}

// Add to completed requests table.
if (omVolumeArgs != null) {
omMetadataManager.getCompletedRequestInfoTable()
.putWithBatch(batchOperation, updateID, getCompletedRequestInfo(updateID));
}
}

public String getVolumeName() {
Expand All @@ -89,5 +101,14 @@ public String getBucketName() {
return bucketName;
}

protected OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex) {
return OmCompletedRequestInfo.newBuilder()
.setTrxLogIndex(trxnLogIndex)
.setCmdType(Type.DeleteBucket)
.setCreationTime(System.currentTimeMillis())
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setOpArgs(new OmCompletedRequestInfo.OperationArgs.NoArgs())
.build();
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,13 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.key.OmKeyResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -92,10 +94,28 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager,
bucketInfo.getVolumeName(), bucketInfo.getBucketName());
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
bucketKey, bucketInfo);

// Add to completed requests table.
omMetadataManager.getCompletedRequestInfoTable()
.putWithBatch(batchOperation, dirKeyInfo.getUpdateID(),
getCompletedRequestInfo(dirKeyInfo.getUpdateID()));

} else if (Result.DIRECTORY_ALREADY_EXISTS == result) {
// When directory already exists, we don't add it to cache. And it is
// not an error, in this case dirKeyInfo will be null.
LOG.debug("Directory already exists. addToDBBatch is a no-op");
}
}

protected OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex) {
return OmCompletedRequestInfo.newBuilder()
.setTrxLogIndex(trxnLogIndex)
.setCmdType(Type.CreateDirectory)
.setCreationTime(System.currentTimeMillis())
.setVolumeName(dirKeyInfo.getVolumeName())
.setBucketName(dirKeyInfo.getBucketName())
.setKeyName(dirKeyInfo.getKeyName())
.setOpArgs(new OmCompletedRequestInfo.OperationArgs.NoArgs())
.build();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,23 +24,32 @@
import java.util.List;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;

/**
* Response for crate file request.
*/
@CleanupTableInfo(cleanupTables = {KEY_TABLE, OPEN_KEY_TABLE})
public class OMFileCreateResponse extends OMKeyCreateResponse {

private boolean isRecursive;
private boolean isOverWrite;

public OMFileCreateResponse(@Nonnull OMResponse omResponse,
@Nonnull OmKeyInfo omKeyInfo, @Nonnull List<OmKeyInfo> parentKeyInfos,
long openKeySessionID,
@Nonnull OmBucketInfo omBucketInfo) {
@Nonnull OmBucketInfo omBucketInfo,
boolean isRecursive, boolean isOverWrite) {
super(omResponse, omKeyInfo, parentKeyInfos, openKeySessionID,
omBucketInfo);

this.isRecursive = isRecursive;
this.isOverWrite = isOverWrite;
}

/**
Expand All @@ -53,4 +62,13 @@ public OMFileCreateResponse(@Nonnull OMResponse omResponse, @Nonnull
checkStatusNotOK();
}

@Override
public Type getOperationType() {
return Type.CreateFile;
}

@Override
public OmCompletedRequestInfo.OperationArgs getCompletedRequestInfoArgs() {
return new OmCompletedRequestInfo.OperationArgs.CreateFileArgs(isRecursive, isOverWrite);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,29 @@
BUCKET_TABLE})
public class OMFileCreateResponseWithFSO extends OMFileCreateResponse {

private static final boolean DEFAULT_IS_RECURSIVE = false;
private static final boolean DEFAULT_IS_OVERWRITE = false;

private List<OmDirectoryInfo> parentDirInfos;
private long volumeId;

public OMFileCreateResponseWithFSO(@Nonnull OMResponse omResponse,
@Nonnull OmKeyInfo omKeyInfo,
@Nonnull List<OmDirectoryInfo> parentDirInfos, long openKeySessionID,
@Nonnull OmBucketInfo omBucketInfo, @Nonnull long volumeId) {
this(omResponse, omKeyInfo, parentDirInfos, openKeySessionID,
omBucketInfo, volumeId, DEFAULT_IS_RECURSIVE,
DEFAULT_IS_OVERWRITE);
}

@SuppressWarnings("checkstyle:ParameterNumber")
public OMFileCreateResponseWithFSO(@Nonnull OMResponse omResponse,
@Nonnull OmKeyInfo omKeyInfo,
@Nonnull List<OmDirectoryInfo> parentDirInfos, long openKeySessionID,
@Nonnull OmBucketInfo omBucketInfo, @Nonnull long volumeId,
boolean isRecursive, boolean isOverWrite) {
super(omResponse, omKeyInfo, new ArrayList<>(), openKeySessionID,
omBucketInfo);
omBucketInfo, isRecursive, isOverWrite);
this.parentDirInfos = parentDirInfos;
this.volumeId = volumeId;
}
Expand Down Expand Up @@ -95,6 +109,11 @@ public void addToDBBatch(OMMetadataManager omMetadataMgr,

OMFileRequest.addToOpenFileTable(omMetadataMgr, batchOp, getOmKeyInfo(),
getOpenKeySessionID(), volumeId, getOmBucketInfo().getObjectID());

// Add to completed requests table.
omMetadataMgr.getCompletedRequestInfoTable()
.putWithBatch(batchOp, getOmKeyInfo().getUpdateID(),
getCompletedRequestInfo(getOmKeyInfo().getUpdateID()));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,12 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;

/**
* Response for CommitKey request.
Expand Down Expand Up @@ -105,6 +107,11 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
omBucketInfo.getBucketName()), omBucketInfo);

// Add to completed requests table.
omMetadataManager.getCompletedRequestInfoTable()
.putWithBatch(batchOperation, omKeyInfo.getUpdateID(),
getCompletedRequestInfo(omKeyInfo.getUpdateID()));
}

protected String getOpenKeyName() {
Expand Down Expand Up @@ -154,4 +161,16 @@ protected boolean isHSync() {
public OmKeyInfo getNewOpenKeyInfo() {
return newOpenKeyInfo;
}

protected OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex) {
return OmCompletedRequestInfo.newBuilder()
.setTrxLogIndex(trxnLogIndex)
.setCmdType(Type.CommitKey)
.setCreationTime(System.currentTimeMillis())
.setVolumeName(omKeyInfo.getVolumeName())
.setBucketName(omKeyInfo.getBucketName())
.setKeyName(omKeyInfo.getKeyName())
.setOpArgs(new OmCompletedRequestInfo.OperationArgs.NoArgs())
.build();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,11 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -101,6 +103,11 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
omKeyInfo.getBucketName(), omKeyInfo.getKeyName(), openKeySessionID);
omMetadataManager.getOpenKeyTable(getBucketLayout())
.putWithBatch(batchOperation, openKey, omKeyInfo);

// Add to completed requests table.
omMetadataManager.getCompletedRequestInfoTable()
.putWithBatch(batchOperation, omKeyInfo.getUpdateID(),
getCompletedRequestInfo(omKeyInfo.getUpdateID()));
}

protected long getOpenKeySessionID() {
Expand All @@ -114,5 +121,25 @@ protected OmKeyInfo getOmKeyInfo() {
protected OmBucketInfo getOmBucketInfo() {
return omBucketInfo;
}

protected Type getOperationType() {
return Type.CreateKey;
}

protected OmCompletedRequestInfo.OperationArgs getCompletedRequestInfoArgs() {
return new OmCompletedRequestInfo.OperationArgs.NoArgs();
}

protected OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex) {
return OmCompletedRequestInfo.newBuilder()
.setTrxLogIndex(trxnLogIndex)
.setCmdType(getOperationType())
.setCreationTime(System.currentTimeMillis())
.setVolumeName(omKeyInfo.getVolumeName())
.setBucketName(omKeyInfo.getBucketName())
.setKeyName(omKeyInfo.getKeyName())
.setOpArgs(getCompletedRequestInfoArgs())
.build();
}
}

Loading
Loading