blob: f42902004e3b6d570b7be4ceda04f1919efe972e [file] [log] [blame]
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "storage/browser/blob/blob_url_request_job.h"
#include <algorithm>
#include <limits>
#include <string>
#include <vector>
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/files/file_util_proxy.h"
#include "base/format_macros.h"
#include "base/message_loop/message_loop.h"
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/disk_cache/disk_cache.h"
#include "net/http/http_request_headers.h"
#include "net/http/http_response_headers.h"
#include "net/http/http_response_info.h"
#include "net/http/http_util.h"
#include "net/url_request/url_request.h"
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_error_job.h"
#include "net/url_request/url_request_status.h"
#include "storage/browser/fileapi/file_stream_reader.h"
#include "storage/browser/fileapi/file_system_context.h"
#include "storage/browser/fileapi/file_system_url.h"
#include "storage/common/data_element.h"
namespace storage {
namespace {
bool IsFileType(DataElement::Type type) {
switch (type) {
case DataElement::TYPE_FILE:
case DataElement::TYPE_FILE_FILESYSTEM:
return true;
default:
return false;
}
}
} // namespace
BlobURLRequestJob::BlobURLRequestJob(
net::URLRequest* request,
net::NetworkDelegate* network_delegate,
scoped_ptr<BlobDataSnapshot> blob_data,
storage::FileSystemContext* file_system_context,
base::SingleThreadTaskRunner* file_task_runner)
: net::URLRequestJob(request, network_delegate),
blob_data_(blob_data.Pass()),
file_system_context_(file_system_context),
file_task_runner_(file_task_runner),
total_size_(0),
remaining_bytes_(0),
pending_get_file_info_count_(0),
current_item_index_(0),
current_item_offset_(0),
error_(false),
byte_range_set_(false),
weak_factory_(this) {
TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest", this, "uuid",
blob_data_ ? blob_data_->uuid() : "NotFound");
DCHECK(file_task_runner_.get());
}
void BlobURLRequestJob::Start() {
// Continue asynchronously.
base::MessageLoop::current()->PostTask(
FROM_HERE,
base::Bind(&BlobURLRequestJob::DidStart, weak_factory_.GetWeakPtr()));
}
void BlobURLRequestJob::Kill() {
DeleteCurrentFileReader();
net::URLRequestJob::Kill();
weak_factory_.InvalidateWeakPtrs();
}
bool BlobURLRequestJob::ReadRawData(net::IOBuffer* dest,
int dest_size,
int* bytes_read) {
DCHECK_NE(dest_size, 0);
DCHECK(bytes_read);
DCHECK_GE(remaining_bytes_, 0);
// Bail out immediately if we encounter an error.
if (error_) {
*bytes_read = 0;
return true;
}
if (remaining_bytes_ < dest_size)
dest_size = static_cast<int>(remaining_bytes_);
// If we should copy zero bytes because |remaining_bytes_| is zero, short
// circuit here.
if (!dest_size) {
*bytes_read = 0;
return true;
}
// Keep track of the buffer.
DCHECK(!read_buf_.get());
read_buf_ = new net::DrainableIOBuffer(dest, dest_size);
return ReadLoop(bytes_read);
}
bool BlobURLRequestJob::GetMimeType(std::string* mime_type) const {
if (!response_info_)
return false;
return response_info_->headers->GetMimeType(mime_type);
}
void BlobURLRequestJob::GetResponseInfo(net::HttpResponseInfo* info) {
if (response_info_)
*info = *response_info_;
}
int BlobURLRequestJob::GetResponseCode() const {
if (!response_info_)
return -1;
return response_info_->headers->response_code();
}
void BlobURLRequestJob::SetExtraRequestHeaders(
const net::HttpRequestHeaders& headers) {
std::string range_header;
if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) {
// We only care about "Range" header here.
std::vector<net::HttpByteRange> ranges;
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) {
if (ranges.size() == 1) {
byte_range_set_ = true;
byte_range_ = ranges[0];
} else {
// We don't support multiple range requests in one single URL request,
// because we need to do multipart encoding here.
// TODO(jianli): Support multipart byte range requests.
NotifyFailure(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE);
}
}
}
}
BlobURLRequestJob::~BlobURLRequestJob() {
STLDeleteValues(&index_to_reader_);
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest", this, "uuid",
blob_data_ ? blob_data_->uuid() : "NotFound");
}
void BlobURLRequestJob::DidStart() {
current_file_chunk_number_ = 0;
error_ = false;
// We only support GET request per the spec.
if (request()->method() != "GET") {
NotifyFailure(net::ERR_METHOD_NOT_SUPPORTED);
return;
}
// If the blob data is not present, bail out.
if (!blob_data_) {
NotifyFailure(net::ERR_FILE_NOT_FOUND);
return;
}
CountSize();
}
bool BlobURLRequestJob::AddItemLength(size_t index, int64 item_length) {
if (item_length > kint64max - total_size_) {
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::CountSize", this, "uuid",
blob_data_->uuid());
NotifyFailure(net::ERR_FAILED);
return false;
}
// Cache the size and add it to the total size.
DCHECK_LT(index, item_length_list_.size());
item_length_list_[index] = item_length;
total_size_ += item_length;
return true;
}
bool BlobURLRequestJob::CountSize() {
TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::CountSize", this, "uuid",
blob_data_->uuid());
pending_get_file_info_count_ = 0;
total_size_ = 0;
const auto& items = blob_data_->items();
item_length_list_.resize(items.size());
for (size_t i = 0; i < items.size(); ++i) {
const BlobDataItem& item = *items.at(i);
if (IsFileType(item.type())) {
++pending_get_file_info_count_;
storage::FileStreamReader* const reader = GetFileStreamReader(i);
if (!reader) {
NotifyFailure(net::ERR_FAILED);
return false;
}
if (!reader->GetLength(
base::Bind(&BlobURLRequestJob::DidGetFileItemLength,
weak_factory_.GetWeakPtr(), i))) {
NotifyFailure(net::ERR_FILE_NOT_FOUND);
return false;
}
continue;
}
if (!AddItemLength(i, item.length()))
return false;
}
if (pending_get_file_info_count_ == 0)
DidCountSize(net::OK);
return true;
}
void BlobURLRequestJob::DidCountSize(int error) {
DCHECK(!error_);
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::CountSize", this, "uuid",
blob_data_->uuid());
// If an error occured, bail out.
if (error != net::OK) {
NotifyFailure(error);
return;
}
// Apply the range requirement.
if (!byte_range_.ComputeBounds(total_size_)) {
NotifyFailure(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE);
return;
}
remaining_bytes_ = base::checked_cast<int64>(
byte_range_.last_byte_position() - byte_range_.first_byte_position() + 1);
DCHECK_GE(remaining_bytes_, 0);
// Do the seek at the beginning of the request.
if (byte_range_.first_byte_position())
Seek(byte_range_.first_byte_position());
NotifySuccess();
}
void BlobURLRequestJob::DidGetFileItemLength(size_t index, int64 result) {
// Do nothing if we have encountered an error.
if (error_)
return;
if (result == net::ERR_UPLOAD_FILE_CHANGED) {
NotifyFailure(net::ERR_FILE_NOT_FOUND);
return;
} else if (result < 0) {
NotifyFailure(result);
return;
}
const auto& items = blob_data_->items();
DCHECK_LT(index, items.size());
const BlobDataItem& item = *items.at(index);
DCHECK(IsFileType(item.type()));
uint64 file_length = result;
uint64 item_offset = item.offset();
uint64 item_length = item.length();
if (item_offset > file_length) {
NotifyFailure(net::ERR_FILE_NOT_FOUND);
return;
}
uint64 max_length = file_length - item_offset;
// If item length is undefined, then we need to use the file size being
// resolved in the real time.
if (item_length == std::numeric_limits<uint64>::max()) {
item_length = max_length;
} else if (item_length > max_length) {
NotifyFailure(net::ERR_FILE_NOT_FOUND);
return;
}
if (!AddItemLength(index, item_length))
return;
if (--pending_get_file_info_count_ == 0)
DidCountSize(net::OK);
}
void BlobURLRequestJob::Seek(int64 offset) {
// Skip the initial items that are not in the range.
const auto& items = blob_data_->items();
for (current_item_index_ = 0;
current_item_index_ < items.size() &&
offset >= item_length_list_[current_item_index_];
++current_item_index_) {
offset -= item_length_list_[current_item_index_];
}
// Set the offset that need to jump to for the first item in the range.
current_item_offset_ = offset;
if (offset == 0)
return;
// Adjust the offset of the first stream if it is of file type.
const BlobDataItem& item = *items.at(current_item_index_);
if (IsFileType(item.type())) {
DeleteCurrentFileReader();
CreateFileStreamReader(current_item_index_, offset);
}
}
bool BlobURLRequestJob::ReadItem() {
// Are we done with reading all the blob data?
if (remaining_bytes_ == 0)
return true;
const auto& items = blob_data_->items();
// If we get to the last item but still expect something to read, bail out
// since something is wrong.
if (current_item_index_ >= items.size()) {
NotifyFailure(net::ERR_FAILED);
return false;
}
// Compute the bytes to read for current item.
int bytes_to_read = ComputeBytesToRead();
// If nothing to read for current item, advance to next item.
if (bytes_to_read == 0) {
AdvanceItem();
return true;
}
// Do the reading.
const BlobDataItem& item = *items.at(current_item_index_);
if (item.type() == DataElement::TYPE_BYTES)
return ReadBytesItem(item, bytes_to_read);
if (item.type() == DataElement::TYPE_DISK_CACHE_ENTRY)
return ReadDiskCacheEntryItem(item, bytes_to_read);
if (!IsFileType(item.type())) {
NOTREACHED();
return false;
}
storage::FileStreamReader* const reader =
GetFileStreamReader(current_item_index_);
if (!reader) {
NotifyFailure(net::ERR_FAILED);
return false;
}
return ReadFileItem(reader, bytes_to_read);
}
void BlobURLRequestJob::AdvanceItem() {
// Close the file if the current item is a file.
DeleteCurrentFileReader();
// Advance to the next item.
current_item_index_++;
current_item_offset_ = 0;
}
void BlobURLRequestJob::AdvanceBytesRead(int result) {
DCHECK_GT(result, 0);
// Do we finish reading the current item?
current_item_offset_ += result;
if (current_item_offset_ == item_length_list_[current_item_index_])
AdvanceItem();
// Subtract the remaining bytes.
remaining_bytes_ -= result;
DCHECK_GE(remaining_bytes_, 0);
// Adjust the read buffer.
read_buf_->DidConsume(result);
DCHECK_GE(read_buf_->BytesRemaining(), 0);
}
bool BlobURLRequestJob::ReadBytesItem(const BlobDataItem& item,
int bytes_to_read) {
TRACE_EVENT1("Blob", "BlobRequest::ReadBytesItem", "uuid",
blob_data_->uuid());
DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
memcpy(read_buf_->data(),
item.bytes() + item.offset() + current_item_offset_,
bytes_to_read);
AdvanceBytesRead(bytes_to_read);
return true;
}
bool BlobURLRequestJob::ReadFileItem(FileStreamReader* reader,
int bytes_to_read) {
DCHECK(!GetStatus().is_io_pending())
<< "Can't begin IO while another IO operation is pending.";
DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
DCHECK(reader);
int chunk_number = current_file_chunk_number_++;
TRACE_EVENT_ASYNC_BEGIN1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
blob_data_->uuid());
const int result =
reader->Read(read_buf_.get(), bytes_to_read,
base::Bind(&BlobURLRequestJob::DidReadFile,
weak_factory_.GetWeakPtr(), chunk_number));
if (result >= 0) {
AdvanceBytesRead(result);
return true;
}
if (result == net::ERR_IO_PENDING)
SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
else
NotifyFailure(result);
return false;
}
void BlobURLRequestJob::DidReadFile(int chunk_number, int result) {
DCHECK(GetStatus().is_io_pending())
<< "Asynchronous IO completed while IO wasn't pending?";
TRACE_EVENT_ASYNC_END1("Blob", "BlobRequest::ReadFileItem", this, "uuid",
blob_data_->uuid());
if (result <= 0) {
NotifyFailure(result);
return;
}
SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
AdvanceBytesRead(result);
// Otherwise, continue the reading.
int bytes_read = 0;
if (ReadLoop(&bytes_read))
NotifyReadComplete(bytes_read);
}
void BlobURLRequestJob::DeleteCurrentFileReader() {
IndexToReaderMap::iterator found = index_to_reader_.find(current_item_index_);
if (found != index_to_reader_.end() && found->second) {
delete found->second;
index_to_reader_.erase(found);
}
}
bool BlobURLRequestJob::ReadDiskCacheEntryItem(const BlobDataItem& item,
int bytes_to_read) {
DCHECK(!GetStatus().is_io_pending())
<< "Can't begin IO while another IO operation is pending.";
DCHECK_GE(read_buf_->BytesRemaining(), bytes_to_read);
const int result = item.disk_cache_entry()->ReadData(
item.disk_cache_stream_index(), current_item_offset_, read_buf_.get(),
bytes_to_read, base::Bind(&BlobURLRequestJob::DidReadDiskCacheEntry,
weak_factory_.GetWeakPtr()));
if (result >= 0) {
AdvanceBytesRead(result);
return true;
}
if (result == net::ERR_IO_PENDING)
SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0));
else
NotifyFailure(result);
return false;
}
void BlobURLRequestJob::DidReadDiskCacheEntry(int result) {
DCHECK(GetStatus().is_io_pending())
<< "Asynchronous IO completed while IO wasn't pending?";
if (result <= 0) {
NotifyFailure(result);
return;
}
SetStatus(net::URLRequestStatus());
AdvanceBytesRead(result);
int bytes_read = 0;
if (ReadLoop(&bytes_read))
NotifyReadComplete(bytes_read);
}
int BlobURLRequestJob::BytesReadCompleted() {
int bytes_read = read_buf_->BytesConsumed();
read_buf_ = NULL;
return bytes_read;
}
int BlobURLRequestJob::ComputeBytesToRead() const {
int64 current_item_length = item_length_list_[current_item_index_];
int64 item_remaining = current_item_length - current_item_offset_;
int64 buf_remaining = read_buf_->BytesRemaining();
int64 max_remaining = std::numeric_limits<int>::max();
int64 min = std::min(std::min(std::min(item_remaining,
buf_remaining),
remaining_bytes_),
max_remaining);
return static_cast<int>(min);
}
bool BlobURLRequestJob::ReadLoop(int* bytes_read) {
// Read until we encounter an error or could not get the data immediately.
while (remaining_bytes_ > 0 && read_buf_->BytesRemaining() > 0) {
if (!ReadItem())
return false;
}
*bytes_read = BytesReadCompleted();
return true;
}
void BlobURLRequestJob::NotifySuccess() {
net::HttpStatusCode status_code = net::HTTP_OK;
if (byte_range_set_ && byte_range_.IsValid())
status_code = net::HTTP_PARTIAL_CONTENT;
HeadersCompleted(status_code);
}
void BlobURLRequestJob::NotifyFailure(int error_code) {
error_ = true;
// If we already return the headers on success, we can't change the headers
// now. Instead, we just error out.
if (response_info_) {
NotifyDone(net::URLRequestStatus(net::URLRequestStatus::FAILED,
error_code));
return;
}
net::HttpStatusCode status_code = net::HTTP_INTERNAL_SERVER_ERROR;
switch (error_code) {
case net::ERR_ACCESS_DENIED:
status_code = net::HTTP_FORBIDDEN;
break;
case net::ERR_FILE_NOT_FOUND:
status_code = net::HTTP_NOT_FOUND;
break;
case net::ERR_METHOD_NOT_SUPPORTED:
status_code = net::HTTP_METHOD_NOT_ALLOWED;
break;
case net::ERR_REQUEST_RANGE_NOT_SATISFIABLE:
status_code = net::HTTP_REQUESTED_RANGE_NOT_SATISFIABLE;
break;
case net::ERR_FAILED:
break;
default:
DCHECK(false);
break;
}
HeadersCompleted(status_code);
}
void BlobURLRequestJob::HeadersCompleted(net::HttpStatusCode status_code) {
std::string status("HTTP/1.1 ");
status.append(base::IntToString(status_code));
status.append(" ");
status.append(net::GetHttpReasonPhrase(status_code));
status.append("\0\0", 2);
net::HttpResponseHeaders* headers = new net::HttpResponseHeaders(status);
if (status_code == net::HTTP_OK || status_code == net::HTTP_PARTIAL_CONTENT) {
std::string content_length_header(net::HttpRequestHeaders::kContentLength);
content_length_header.append(": ");
content_length_header.append(base::Int64ToString(remaining_bytes_));
headers->AddHeader(content_length_header);
if (status_code == net::HTTP_PARTIAL_CONTENT) {
DCHECK(byte_range_set_);
DCHECK(byte_range_.IsValid());
std::string content_range_header(net::HttpResponseHeaders::kContentRange);
content_range_header.append(": bytes ");
content_range_header.append(base::StringPrintf(
"%" PRId64 "-%" PRId64,
byte_range_.first_byte_position(), byte_range_.last_byte_position()));
content_range_header.append("/");
content_range_header.append(base::StringPrintf("%" PRId64, total_size_));
headers->AddHeader(content_range_header);
}
if (!blob_data_->content_type().empty()) {
std::string content_type_header(net::HttpRequestHeaders::kContentType);
content_type_header.append(": ");
content_type_header.append(blob_data_->content_type());
headers->AddHeader(content_type_header);
}
if (!blob_data_->content_disposition().empty()) {
std::string content_disposition_header("Content-Disposition: ");
content_disposition_header.append(blob_data_->content_disposition());
headers->AddHeader(content_disposition_header);
}
}
response_info_.reset(new net::HttpResponseInfo());
response_info_->headers = headers;
set_expected_content_size(remaining_bytes_);
NotifyHeadersComplete();
}
FileStreamReader* BlobURLRequestJob::GetFileStreamReader(size_t index) {
const auto& items = blob_data_->items();
DCHECK_LT(index, items.size());
const BlobDataItem& item = *items.at(index);
if (!IsFileType(item.type()))
return nullptr;
if (index_to_reader_.find(index) == index_to_reader_.end()) {
if (!CreateFileStreamReader(index, 0))
return nullptr;
}
DCHECK(index_to_reader_[index]);
return index_to_reader_[index];
}
bool BlobURLRequestJob::CreateFileStreamReader(size_t index,
int64 additional_offset) {
const auto& items = blob_data_->items();
DCHECK_LT(index, items.size());
const BlobDataItem& item = *items.at(index);
DCHECK(IsFileType(item.type()));
DCHECK_EQ(0U, index_to_reader_.count(index));
FileStreamReader* reader = nullptr;
switch (item.type()) {
case DataElement::TYPE_FILE:
reader = FileStreamReader::CreateForLocalFile(
file_task_runner_.get(), item.path(),
item.offset() + additional_offset, item.expected_modification_time());
DCHECK(reader);
index_to_reader_[index] = reader;
return true;
case DataElement::TYPE_FILE_FILESYSTEM:
reader = file_system_context_
->CreateFileStreamReader(
storage::FileSystemURL(file_system_context_->CrackURL(
item.filesystem_url())),
item.offset() + additional_offset,
item.length() == std::numeric_limits<uint64>::max()
? storage::kMaximumLength
: item.length() - additional_offset,
item.expected_modification_time())
.release();
if (reader) {
index_to_reader_[index] = reader;
return true;
}
// The file stream reader may not be obtainable if the file is on an
// isolated file system, which has been unmounted.
return false;
default:
break;
}
NOTREACHED();
return false;
}
} // namespace storage