Commit 428d01b5 authored by Ivan.Shulga's avatar Ivan.Shulga Committed by Alexander Trofimov

linux

git-svn-id: svn://fileserver/activex/AVS/Sources/TeamlabOffice/trunk/ServerComponents@56553 954022d7-b5bf-4e40-9824-e11837661b57
parent 53cf16b3
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE QtCreatorProject>
<!-- Written by QtCreator 3.1.1, 2014-06-04T19:20:32. -->
<!-- Written by QtCreator 3.1.1, 2014-06-09T18:27:08. -->
<qtcreator>
<data>
<variable>ProjectExplorer.Project.ActiveTarget</variable>
......
......@@ -33,12 +33,12 @@ const int32 kExtendedASCIIStart = 0x80;
class DictionaryHiddenRootValue : public base::DictionaryValue {
public:
DictionaryHiddenRootValue(std::string* json, Value* root) : json_(json) {
DCHECK(root->IsType(Value::TYPE_DICTIONARY));
//DCHECK(root->IsType(Value::TYPE_DICTIONARY));
DictionaryValue::Swap(static_cast<DictionaryValue*>(root));
}
virtual void Swap(DictionaryValue* other) OVERRIDE {
DVLOG(1) << "Swap()ing a DictionaryValue inefficiently.";
//DVLOG(1) << "Swap()ing a DictionaryValue inefficiently.";
// First deep copy to convert JSONStringValue to std::string and swap that
// copy with |other|, which contains the new contents of |this|.
......@@ -61,7 +61,7 @@ class DictionaryHiddenRootValue : public base::DictionaryValue {
if (!out)
return DictionaryValue::RemoveWithoutPathExpansion(key, out);
DVLOG(1) << "Remove()ing from a DictionaryValue inefficiently.";
//DVLOG(1) << "Remove()ing from a DictionaryValue inefficiently.";
// Otherwise, remove the value while its still "owned" by this and copy it
// to convert any JSONStringValues to std::string.
......@@ -84,12 +84,12 @@ class DictionaryHiddenRootValue : public base::DictionaryValue {
class ListHiddenRootValue : public base::ListValue {
public:
ListHiddenRootValue(std::string* json, Value* root) : json_(json) {
DCHECK(root->IsType(Value::TYPE_LIST));
//DCHECK(root->IsType(Value::TYPE_LIST));
ListValue::Swap(static_cast<ListValue*>(root));
}
virtual void Swap(ListValue* other) OVERRIDE {
DVLOG(1) << "Swap()ing a ListValue inefficiently.";
//DVLOG(1) << "Swap()ing a ListValue inefficiently.";
// First deep copy to convert JSONStringValue to std::string and swap that
// copy with |other|, which contains the new contents of |this|.
......@@ -108,7 +108,7 @@ class ListHiddenRootValue : public base::ListValue {
if (!out)
return ListValue::Remove(index, out);
DVLOG(1) << "Remove()ing from a ListValue inefficiently.";
//DVLOG(1) << "Remove()ing from a ListValue inefficiently.";
// Otherwise, remove the value while its still "owned" by this and copy it
// to convert any JSONStringValues to std::string.
......@@ -168,7 +168,7 @@ class StackMarker {
public:
explicit StackMarker(int* depth) : depth_(depth) {
++(*depth_);
DCHECK_LE(*depth_, kStackMaxDepth);
//DCHECK_LE(*depth_, kStackMaxDepth);
}
~StackMarker() {
--(*depth_);
......@@ -300,8 +300,8 @@ JSONParser::StringBuilder::~StringBuilder() {
}
void JSONParser::StringBuilder::Append(const char& c) {
DCHECK_GE(c, 0);
DCHECK_LT(c, 128);
//DCHECK_GE(c, 0);
//DCHECK_LT(c, 128);
if (string_)
string_->push_back(c);
......@@ -310,7 +310,7 @@ void JSONParser::StringBuilder::Append(const char& c) {
}
void JSONParser::StringBuilder::AppendString(const std::string& str) {
DCHECK(string_);
//DCHECK(string_);
string_->append(str);
}
......@@ -343,14 +343,14 @@ inline bool JSONParser::CanConsume(int length) {
}
const char* JSONParser::NextChar() {
DCHECK(CanConsume(1));
//DCHECK(CanConsume(1));
++index_;
++pos_;
return pos_;
}
void JSONParser::NextNChars(int n) {
DCHECK(CanConsume(n));
//DCHECK(CanConsume(n));
index_ += n;
pos_ += n;
}
......@@ -781,7 +781,7 @@ bool JSONParser::DecodeUTF16(std::string* dest_string) {
CBU8_APPEND_UNSAFE(code_unit8, offset, code_point);
} else {
// Not a surrogate.
DCHECK(CBU16_IS_SINGLE(code_unit16_high));
//DCHECK(CBU16_IS_SINGLE(code_unit16_high));
CBU8_APPEND_UNSAFE(code_unit8, offset, code_unit16_high);
}
......
......@@ -254,13 +254,13 @@ class BASE_EXPORT_PRIVATE JSONParser {
int error_column_;
friend class JSONParserTest;
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, NextChar);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeDictionary);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeList);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeString);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeLiterals);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeNumbers);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ErrorMessages);
//FRIEND_TEST_ALL_PREFIXES(JSONParserTest, NextChar);
//FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeDictionary);
//FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeList);
//FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeString);
//FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeLiterals);
//FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeNumbers);
//FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ErrorMessages);
DISALLOW_COPY_AND_ASSIGN(JSONParser);
};
......
......@@ -90,7 +90,7 @@ std::string JSONReader::ErrorCodeToString(JsonParseError error_code) {
case JSON_UNQUOTED_DICTIONARY_KEY:
return kUnquotedDictionaryKey;
default:
NOTREACHED();
//NOTREACHED();
return std::string();
}
}
......
......@@ -58,7 +58,7 @@ JSONWriter::JSONWriter(bool escape, bool omit_binary_values,
omit_double_type_preservation_(omit_double_type_preservation),
pretty_print_(pretty_print),
json_string_(json) {
DCHECK(json);
//DCHECK(json);
}
void JSONWriter::BuildJSONString(const Value* const node, int depth) {
......@@ -71,7 +71,7 @@ void JSONWriter::BuildJSONString(const Value* const node, int depth) {
{
bool value;
bool result = node->GetAsBoolean(&value);
DCHECK(result);
//DCHECK(result);
json_string_->append(value ? "true" : "false");
break;
}
......@@ -80,7 +80,7 @@ void JSONWriter::BuildJSONString(const Value* const node, int depth) {
{
int value;
bool result = node->GetAsInteger(&value);
DCHECK(result);
//DCHECK(result);
base::StringAppendF(json_string_, "%d", value);
break;
}
......@@ -89,7 +89,7 @@ void JSONWriter::BuildJSONString(const Value* const node, int depth) {
{
double value;
bool result = node->GetAsDouble(&value);
DCHECK(result);
//DCHECK(result);
if (omit_double_type_preservation_ &&
value <= kint64max &&
value >= kint64min &&
......@@ -122,7 +122,7 @@ void JSONWriter::BuildJSONString(const Value* const node, int depth) {
{
std::string value;
bool result = node->GetAsString(&value);
DCHECK(result);
//DCHECK(result);
if (escape_) {
JsonDoubleQuote(UTF8ToUTF16(value), true, json_string_);
} else {
......@@ -141,7 +141,7 @@ void JSONWriter::BuildJSONString(const Value* const node, int depth) {
for (size_t i = 0; i < list->GetSize(); ++i) {
const Value* value = NULL;
bool result = list->Get(i, &value);
DCHECK(result);
//DCHECK(result);
if (omit_binary_values_ && value->GetType() == Value::TYPE_BINARY) {
continue;
......@@ -208,13 +208,14 @@ void JSONWriter::BuildJSONString(const Value* const node, int depth) {
case Value::TYPE_BINARY:
{
if (!omit_binary_values_) {
NOTREACHED() << "Cannot serialize binary value.";
//NOTREACHED() << "Cannot serialize binary value.";
}
break;
}
default:
NOTREACHED() << "unknown json type";
//NOTREACHED() << "unknown json type";
break;
}
}
......
......@@ -13,12 +13,12 @@ WeakReference::Flag::Flag() : is_valid_(true) {
void WeakReference::Flag::Invalidate() {
// The flag being invalidated with a single ref implies that there are no
// weak pointers in existence. Allow deletion on other thread in this case.
DCHECK(thread_checker_.CalledOnValidThread() || HasOneRef());
//DCHECK(thread_checker_.CalledOnValidThread() || HasOneRef());
is_valid_ = false;
}
bool WeakReference::Flag::IsValid() const {
DCHECK(thread_checker_.CalledOnValidThread());
//DCHECK(thread_checker_.CalledOnValidThread());
return is_valid_;
}
......
......@@ -45,7 +45,7 @@ bool ReadHistogramArguments(PickleIterator* iter,
!iter->ReadInt(declared_max) ||
!iter->ReadUInt64(bucket_count) ||
!iter->ReadUInt32(range_checksum)) {
DLOG(ERROR) << "Pickle error decoding Histogram: " << *histogram_name;
//DLOG(ERROR) << "Pickle error decoding Histogram: " << *histogram_name;
return false;
}
......@@ -56,13 +56,13 @@ bool ReadHistogramArguments(PickleIterator* iter,
*declared_max < *declared_min ||
INT_MAX / sizeof(HistogramBase::Count) <= *bucket_count ||
*bucket_count < 2) {
DLOG(ERROR) << "Values error decoding Histogram: " << histogram_name;
//DLOG(ERROR) << "Values error decoding Histogram: " << histogram_name;
return false;
}
// We use the arguments to find or create the local version of the histogram
// in this process, so we need to clear the IPC flag.
DCHECK(*flags & HistogramBase::kIPCSerializationSourceFlag);
//DCHECK(*flags & HistogramBase::kIPCSerializationSourceFlag);
*flags &= ~HistogramBase::kIPCSerializationSourceFlag;
return true;
......@@ -91,7 +91,7 @@ HistogramBase* Histogram::FactoryGet(const string& name,
int32 flags) {
bool valid_arguments =
InspectConstructionArguments(name, &minimum, &maximum, &bucket_count);
DCHECK(valid_arguments);
//DCHECK(valid_arguments);
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
if (!histogram) {
......@@ -109,8 +109,8 @@ HistogramBase* Histogram::FactoryGet(const string& name,
StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
}
DCHECK_EQ(HISTOGRAM, histogram->GetHistogramType());
CHECK(histogram->HasConstructionArguments(minimum, maximum, bucket_count));
//DCHECK_EQ(HISTOGRAM, histogram->GetHistogramType());
//CHECK(histogram->HasConstructionArguments(minimum, maximum, bucket_count));
return histogram;
}
......@@ -145,7 +145,7 @@ void Histogram::InitializeBucketRanges(Sample minimum,
Sample maximum,
size_t bucket_count,
BucketRanges* ranges) {
DCHECK_EQ(ranges->size(), bucket_count + 1);
//DCHECK_EQ(ranges->size(), bucket_count + 1);
double log_max = log(static_cast<double>(maximum));
double log_ratio;
double log_next;
......@@ -197,7 +197,7 @@ int Histogram::FindCorruption(const HistogramSamples& samples) const {
if (delta > kCommonRaceBasedCountMismatch)
inconsistencies |= COUNT_HIGH_ERROR;
} else {
DCHECK_GT(0, delta);
//DCHECK_GT(0, delta);
UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta);
if (-delta > kCommonRaceBasedCountMismatch)
inconsistencies |= COUNT_LOW_ERROR;
......@@ -221,16 +221,16 @@ bool Histogram::InspectConstructionArguments(const string& name,
size_t* bucket_count) {
// Defensive code for backward compatibility.
if (*minimum < 1) {
DVLOG(1) << "Histogram: " << name << " has bad minimum: " << *minimum;
//DVLOG(1) << "Histogram: " << name << " has bad minimum: " << *minimum;
*minimum = 1;
}
if (*maximum >= kSampleType_MAX) {
DVLOG(1) << "Histogram: " << name << " has bad maximum: " << *maximum;
//DVLOG(1) << "Histogram: " << name << " has bad maximum: " << *maximum;
*maximum = kSampleType_MAX - 1;
}
if (*bucket_count >= kBucketCount_MAX) {
DVLOG(1) << "Histogram: " << name << " has bad bucket_count: "
<< *bucket_count;
//DVLOG(1) << "Histogram: " << name << " has bad bucket_count: "
// << *bucket_count;
*bucket_count = kBucketCount_MAX - 1;
}
......@@ -255,8 +255,8 @@ bool Histogram::HasConstructionArguments(Sample minimum,
}
void Histogram::Add(int value) {
DCHECK_EQ(0, ranges(0));
DCHECK_EQ(kSampleType_MAX, ranges(bucket_count_));
//DCHECK_EQ(0, ranges(0));
//DCHECK_EQ(kSampleType_MAX, ranges(bucket_count_));
if (value > kSampleType_MAX - 1)
value = kSampleType_MAX - 1;
......@@ -290,7 +290,7 @@ void Histogram::WriteAscii(string* output) const {
}
bool Histogram::SerializeInfoImpl(Pickle* pickle) const {
DCHECK(bucket_ranges()->HasValidChecksum());
//DCHECK(bucket_ranges()->HasValidChecksum());
return pickle->WriteString(histogram_name()) &&
pickle->WriteInt(flags()) &&
pickle->WriteInt(declared_min()) &&
......@@ -326,7 +326,7 @@ bool Histogram::PrintEmptyBucket(size_t index) const {
// buckets), so we need this to make it possible to see what is going on and
// not have 0-graphical-height buckets.
double Histogram::GetBucketSize(Count current, size_t i) const {
DCHECK_GT(ranges(i + 1), ranges(i));
//DCHECK_GT(ranges(i + 1), ranges(i));
static const double kTransitionWidth = 5;
double denominator = ranges(i + 1) - ranges(i);
if (denominator > kTransitionWidth)
......@@ -436,7 +436,7 @@ void Histogram::WriteAsciiImpl(bool graph_it,
output->append(newline);
past += current;
}
DCHECK_EQ(sample_count, past);
//DCHECK_EQ(sample_count, past);
}
double Histogram::GetPeakBucketSize(const SampleVector& samples) const {
......@@ -457,7 +457,7 @@ void Histogram::WriteAsciiHeader(const SampleVector& samples,
histogram_name().c_str(),
sample_count);
if (0 == sample_count) {
DCHECK_EQ(samples.sum(), 0);
//DCHECK_EQ(samples.sum(), 0);
} else {
double average = static_cast<float>(samples.sum()) / sample_count;
......@@ -539,7 +539,7 @@ HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
const DescriptionPair descriptions[]) {
bool valid_arguments = Histogram::InspectConstructionArguments(
name, &minimum, &maximum, &bucket_count);
DCHECK(valid_arguments);
//DCHECK(valid_arguments);
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
if (!histogram) {
......@@ -566,8 +566,8 @@ HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
}
DCHECK_EQ(LINEAR_HISTOGRAM, histogram->GetHistogramType());
CHECK(histogram->HasConstructionArguments(minimum, maximum, bucket_count));
//DCHECK_EQ(LINEAR_HISTOGRAM, histogram->GetHistogramType());
//CHECK(histogram->HasConstructionArguments(minimum, maximum, bucket_count));
return histogram;
}
......@@ -584,7 +584,7 @@ LinearHistogram::LinearHistogram(const string& name,
}
double LinearHistogram::GetBucketSize(Count current, size_t i) const {
DCHECK_GT(ranges(i + 1), ranges(i));
//DCHECK_GT(ranges(i + 1), ranges(i));
// Adjacent buckets with different widths would have "surprisingly" many (few)
// samples in a histogram if we didn't normalize this way.
double denominator = ranges(i + 1) - ranges(i);
......@@ -608,7 +608,7 @@ void LinearHistogram::InitializeBucketRanges(Sample minimum,
Sample maximum,
size_t bucket_count,
BucketRanges* ranges) {
DCHECK_EQ(ranges->size(), bucket_count + 1);
//DCHECK_EQ(ranges->size(), bucket_count + 1);
double min = minimum;
double max = maximum;
size_t i;
......@@ -665,7 +665,7 @@ HistogramBase* BooleanHistogram::FactoryGet(const string& name, int32 flags) {
StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
}
DCHECK_EQ(BOOLEAN_HISTOGRAM, histogram->GetHistogramType());
//DCHECK_EQ(BOOLEAN_HISTOGRAM, histogram->GetHistogramType());
return histogram;
}
......@@ -706,7 +706,7 @@ HistogramBase* BooleanHistogram::DeserializeInfoImpl(PickleIterator* iter) {
HistogramBase* CustomHistogram::FactoryGet(const string& name,
const vector<Sample>& custom_ranges,
int32 flags) {
CHECK(ValidateCustomRanges(custom_ranges));
//CHECK(ValidateCustomRanges(custom_ranges));
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
if (!histogram) {
......@@ -724,7 +724,7 @@ HistogramBase* CustomHistogram::FactoryGet(const string& name,
StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
}
DCHECK_EQ(histogram->GetHistogramType(), CUSTOM_HISTOGRAM);
//DCHECK_EQ(histogram->GetHistogramType(), CUSTOM_HISTOGRAM);
return histogram;
}
......
......@@ -155,8 +155,6 @@ class Lock;
base::subtle::Release_Store(&atomic_histogram_pointer, \
reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); \
} \
DCHECK_EQ(histogram_pointer->histogram_name(), \
std::string(constant_histogram_name)); \
histogram_pointer->histogram_add_method_invocation; \
} while (0)
......
......@@ -32,7 +32,9 @@ std::string HistogramTypeToString(HistogramType type) {
case SPARSE_HISTOGRAM:
return "SPARSE_HISTOGRAM";
default:
NOTREACHED();
//NOTREACHED();
break;
}
return "UNKNOWN";
}
......@@ -64,8 +66,8 @@ void DeserializeHistogramAndAddSamples(PickleIterator* iter) {
return;
if (histogram->flags() & base::HistogramBase::kIPCSerializationSourceFlag) {
DVLOG(1) << "Single process mode, histogram observed and not copied: "
<< histogram->histogram_name();
//DVLOG(1) << "Single process mode, histogram observed and not copied: "
// << histogram->histogram_name();
return;
}
histogram->AddSamplesFromPickle(iter);
......
......@@ -44,7 +44,7 @@ class BASE_EXPORT_PRIVATE SampleVector : public HistogramSamples {
virtual size_t GetBucketIndex(HistogramBase::Sample value) const;
private:
FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
//FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
std::vector<HistogramBase::Count> counts_;
......
......@@ -54,7 +54,7 @@ HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
// Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
// twice if (lock_ == NULL) || (!histograms_).
if (lock_ == NULL) {
ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
//ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
return histogram;
}
......@@ -69,7 +69,7 @@ HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
HistogramMap::iterator it = histograms_->find(name);
if (histograms_->end() == it) {
(*histograms_)[name] = histogram;
ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
//ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
++number_of_histograms_;
histogram_to_return = histogram;
} else if (histogram == it->second) {
......@@ -89,17 +89,17 @@ HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
// static
const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
const BucketRanges* ranges) {
DCHECK(ranges->HasValidChecksum());
//DCHECK(ranges->HasValidChecksum());
scoped_ptr<const BucketRanges> ranges_deleter;
if (lock_ == NULL) {
ANNOTATE_LEAKING_OBJECT_PTR(ranges);
//ANNOTATE_LEAKING_OBJECT_PTR(ranges);
return ranges;
}
base::AutoLock auto_lock(*lock_);
if (ranges_ == NULL) {
ANNOTATE_LEAKING_OBJECT_PTR(ranges);
//ANNOTATE_LEAKING_OBJECT_PTR(ranges);
return ranges;
}
......@@ -108,7 +108,7 @@ const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
if (ranges_->end() == ranges_it) {
// Add a new matching list to map.
checksum_matching_list = new list<const BucketRanges*>();
ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
//ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
(*ranges_)[ranges->checksum()] = checksum_matching_list;
} else {
checksum_matching_list = ranges_it->second;
......@@ -228,7 +228,7 @@ void StatisticsRecorder::GetHistograms(Histograms* output) {
for (HistogramMap::iterator it = histograms_->begin();
histograms_->end() != it;
++it) {
DCHECK_EQ(it->first, it->second->histogram_name());
//DCHECK_EQ(it->first, it->second->histogram_name());
output->push_back(it->second);
}
}
......@@ -290,7 +290,7 @@ void StatisticsRecorder::GetSnapshot(const std::string& query,
// of main(), and hence it is not thread safe. It initializes globals to
// provide support for all future calls.
StatisticsRecorder::StatisticsRecorder() {
DCHECK(!histograms_);
//DCHECK(!histograms_);
if (lock_ == NULL) {
// This will leak on purpose. It's the only way to make sure we won't race
// against the static uninitialization of the module while one of our
......@@ -304,22 +304,22 @@ StatisticsRecorder::StatisticsRecorder() {
histograms_ = new HistogramMap;
ranges_ = new RangesMap;
if (VLOG_IS_ON(1))
AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
//if (VLOG_IS_ON(1))
// AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
}
// static
void StatisticsRecorder::DumpHistogramsToVlog(void* instance) {
DCHECK(VLOG_IS_ON(1));
//DCHECK(VLOG_IS_ON(1));
StatisticsRecorder* me = reinterpret_cast<StatisticsRecorder*>(instance);
string output;
me->WriteGraph(std::string(), &output);
VLOG(1) << output;
//VLOG(1) << output;
}
StatisticsRecorder::~StatisticsRecorder() {
DCHECK(histograms_ && ranges_ && lock_);
//DCHECK(histograms_ && ranges_ && lock_);
// Clean up.
scoped_ptr<HistogramMap> histograms_deleter;
......
......@@ -64,7 +64,7 @@ base::Closure RunLoop::QuitClosure() {
}
bool RunLoop::BeforeRun() {
DCHECK(!run_called_);
//DCHECK(!run_called_);
run_called_ = true;
// Allow Quit to be called before Run.
......
......@@ -21,12 +21,12 @@ ConditionVariable::ConditionVariable(Lock* user_lock)
#endif
{
int rv = pthread_cond_init(&condition_, NULL);
DCHECK_EQ(0, rv);
//DCHECK_EQ(0, rv);
}
ConditionVariable::~ConditionVariable() {
int rv = pthread_cond_destroy(&condition_);
DCHECK_EQ(0, rv);
//DCHECK_EQ(0, rv);
}
void ConditionVariable::Wait() {
......@@ -35,7 +35,7 @@ void ConditionVariable::Wait() {
user_lock_->CheckHeldAndUnmark();
#endif
int rv = pthread_cond_wait(&condition_, user_mutex_);
DCHECK_EQ(0, rv);
//DCHECK_EQ(0, rv);
#if !defined(NDEBUG)
user_lock_->CheckUnheldAndMark();
#endif
......@@ -55,13 +55,13 @@ void ConditionVariable::TimedWait(const TimeDelta& max_time) {
Time::kNanosecondsPerMicrosecond;
abstime.tv_sec += abstime.tv_nsec / Time::kNanosecondsPerSecond;
abstime.tv_nsec %= Time::kNanosecondsPerSecond;
DCHECK_GE(abstime.tv_sec, now.tv_sec); // Overflow paranoia
//DCHECK_GE(abstime.tv_sec, now.tv_sec); // Overflow paranoia
#if !defined(NDEBUG)
user_lock_->CheckHeldAndUnmark();
#endif
int rv = pthread_cond_timedwait(&condition_, user_mutex_, &abstime);
DCHECK(rv == 0 || rv == ETIMEDOUT);
//DCHECK(rv == 0 || rv == ETIMEDOUT);
#if !defined(NDEBUG)
user_lock_->CheckUnheldAndMark();
#endif
......@@ -69,12 +69,12 @@ void ConditionVariable::TimedWait(const TimeDelta& max_time) {
void ConditionVariable::Broadcast() {
int rv = pthread_cond_broadcast(&condition_);
DCHECK_EQ(0, rv);
//DCHECK_EQ(0, rv);
}
void ConditionVariable::Signal() {
int rv = pthread_cond_signal(&condition_);
DCHECK_EQ(0, rv);
//DCHECK_EQ(0, rv);
}
} // namespace base
......@@ -16,13 +16,13 @@ LockImpl::LockImpl() {
// In debug, setup attributes for lock error checking.
pthread_mutexattr_t mta;
int rv = pthread_mutexattr_init(&mta);
DCHECK_EQ(rv, 0);
//DCHECK_EQ(rv, 0);
rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK);
DCHECK_EQ(rv, 0);
//DCHECK_EQ(rv, 0);
rv = pthread_mutex_init(&os_lock_, &mta);
DCHECK_EQ(rv, 0);
//DCHECK_EQ(rv, 0);
rv = pthread_mutexattr_destroy(&mta);
DCHECK_EQ(rv, 0);
//DCHECK_EQ(rv, 0);
#else
// In release, go with the default lock attributes.
pthread_mutex_init(&os_lock_, NULL);
......@@ -31,23 +31,23 @@ LockImpl::LockImpl() {
LockImpl::~LockImpl() {
int rv = pthread_mutex_destroy(&os_lock_);
DCHECK_EQ(rv, 0);
//DCHECK_EQ(rv, 0);
}
bool LockImpl::Try() {
int rv = pthread_mutex_trylock(&os_lock_);
DCHECK(rv == 0 || rv == EBUSY);
//DCHECK(rv == 0 || rv == EBUSY);
return rv == 0;
}
void LockImpl::Lock() {
int rv = pthread_mutex_lock(&os_lock_);
DCHECK_EQ(rv, 0);
//DCHECK_EQ(rv, 0);
}
void LockImpl::Unlock() {
int rv = pthread_mutex_unlock(&os_lock_);
DCHECK_EQ(rv, 0);
//DCHECK_EQ(rv, 0);
}
} // namespace internal
......
......@@ -154,7 +154,7 @@ class SyncWaiter : public WaitableEvent::Waiter {
void WaitableEvent::Wait() {
bool result = TimedWait(TimeDelta::FromSeconds(-1));
DCHECK(result) << "TimedWait() should never fail with infinite timeout";
//DCHECK(result) << "TimedWait() should never fail with infinite timeout";
}
bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
......@@ -226,7 +226,7 @@ cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a,
size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
size_t count) {
base::ThreadRestrictions::AssertWaitAllowed();
DCHECK(count) << "Cannot wait on no events";
//DCHECK(count) << "Cannot wait on no events";
// We need to acquire the locks in a globally consistent order. Thus we sort
// the array of waitables by address. We actually sort a pairs so that we can
......@@ -236,7 +236,7 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
for (size_t i = 0; i < count; ++i)
waitables.push_back(std::make_pair(raw_waitables[i], i));
DCHECK_EQ(count, waitables.size());
//DCHECK_EQ(count, waitables.size());
sort(waitables.begin(), waitables.end(), cmp_fst_addr);
......@@ -244,7 +244,7 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
// address, we can check this cheaply by comparing pairs of consecutive
// elements.
for (size_t i = 0; i < waitables.size() - 1; ++i) {
DCHECK(waitables[i].first != waitables[i+1].first);
//DCHECK(waitables[i].first != waitables[i+1].first);
}
SyncWaiter sw;
......
......@@ -29,7 +29,7 @@ class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
PostTaskAndReplyTaskRunner::PostTaskAndReplyTaskRunner(
TaskRunner* destination) : destination_(destination) {
DCHECK(destination_);
//DCHECK(destination_);
}
bool PostTaskAndReplyTaskRunner::PostTask(
......
......@@ -20,7 +20,7 @@ base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle> >
// static
scoped_refptr<SingleThreadTaskRunner> ThreadTaskRunnerHandle::Get() {
ThreadTaskRunnerHandle* current = lazy_tls_ptr.Pointer()->Get();
DCHECK(current);
//DCHECK(current);
return current->task_runner_;
}
......@@ -32,14 +32,14 @@ bool ThreadTaskRunnerHandle::IsSet() {
ThreadTaskRunnerHandle::ThreadTaskRunnerHandle(
const scoped_refptr<SingleThreadTaskRunner>& task_runner)
: task_runner_(task_runner) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!lazy_tls_ptr.Pointer()->Get());
//DCHECK(task_runner_->BelongsToCurrentThread());
//DCHECK(!lazy_tls_ptr.Pointer()->Get());
lazy_tls_ptr.Pointer()->Set(this);
}
ThreadTaskRunnerHandle::~ThreadTaskRunnerHandle() {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
//DCHECK(task_runner_->BelongsToCurrentThread());
//DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
lazy_tls_ptr.Pointer()->Set(NULL);
}
......
......@@ -52,7 +52,7 @@ void SetCurrentThreadPriority(ThreadPriority priority) {
#if defined(OS_LINUX) || defined(OS_ANDROID)
switch (priority) {
case kThreadPriority_Normal:
NOTREACHED() << "Don't reset priority as not all processes can.";
//NOTREACHED() << "Don't reset priority as not all processes can.";
break;
case kThreadPriority_RealtimeAudio:
#if defined(OS_LINUX)
......@@ -62,7 +62,9 @@ void SetCurrentThreadPriority(ThreadPriority priority) {
// in the process. Setting this priority will only succeed if the user
// has been granted permission to adjust nice values on the system.
if (setpriority(PRIO_PROCESS, PlatformThread::CurrentId(), kNiceSetting))
DVLOG(1) << "Failed to set nice value of thread to " << kNiceSetting;
{
//DVLOG(1) << "Failed to set nice value of thread to " << kNiceSetting;
}
#elif defined(OS_ANDROID)
JNIEnv* env = base::android::AttachCurrentThread();
Java_ThreadUtils_setThreadPriorityAudio(env, PlatformThread::CurrentId());
......@@ -80,7 +82,9 @@ void* ThreadFunc(void* params) {
// where they were created. This sets all threads to the default.
// TODO(epenner): Move thread priorities to base. (crbug.com/170549)
if (setpriority(PRIO_PROCESS, PlatformThread::CurrentId(), 0))
DVLOG(1) << "Failed to reset initial thread nice value to zero.";
{
//DVLOG(1) << "Failed to reset initial thread nice value to zero.";
}
#endif
ThreadParams* thread_params = static_cast<ThreadParams*>(params);
PlatformThread::Delegate* delegate = thread_params->delegate;
......@@ -160,7 +164,7 @@ bool CreateThread(size_t stack_size, bool joinable,
success = !err;
if (!success) {
errno = err;
PLOG(ERROR) << "pthread_create";
//PLOG(ERROR) << "pthread_create";
}
pthread_attr_destroy(&attributes);
......@@ -234,7 +238,9 @@ void PlatformThread::SetName(const char* name) {
int err = prctl(PR_SET_NAME, name);
// We expect EPERM failures in sandboxed processes, just ignore those.
if (err < 0 && errno != EPERM)
DPLOG(ERROR) << "prctl(PR_SET_NAME)";
{
//DPLOG(ERROR) << "prctl(PR_SET_NAME)";
}
}
#elif defined(OS_MACOSX)
// Mac is implemented in platform_thread_mac.mm.
......
......@@ -72,7 +72,7 @@ void ThreadIdNameManager::RemoveName(PlatformThreadId id) {
AutoLock locked(lock_);
ThreadIdToInternedNameIterator iter = thread_id_to_interned_name_.find(id);
DCHECK((iter != thread_id_to_interned_name_.end()));
//DCHECK((iter != thread_id_to_interned_name_.end()));
thread_id_to_interned_name_.erase(iter);
}
......
......@@ -15,13 +15,13 @@ namespace internal {
// static
void ThreadLocalPlatform::AllocateSlot(SlotType& slot) {
int error = pthread_key_create(&slot, NULL);
CHECK_EQ(error, 0);
//CHECK_EQ(error, 0);
}
// static
void ThreadLocalPlatform::FreeSlot(SlotType& slot) {
int error = pthread_key_delete(slot);
DCHECK_EQ(0, error);
//DCHECK_EQ(0, error);
}
// static
......@@ -32,7 +32,7 @@ void* ThreadLocalPlatform::GetValueFromSlot(SlotType& slot) {
// static
void ThreadLocalPlatform::SetValueInSlot(SlotType& slot, void* value) {
int error = pthread_setspecific(slot, value);
DCHECK_EQ(error, 0);
//DCHECK_EQ(error, 0);
}
} // namespace internal
......
......@@ -15,10 +15,10 @@ ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
}
bool ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
DCHECK(!initialized_);
//DCHECK(!initialized_);
int error = pthread_key_create(&key_, destructor);
if (error) {
NOTREACHED();
//NOTREACHED();
return false;
}
......@@ -27,23 +27,27 @@ bool ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
}
void ThreadLocalStorage::StaticSlot::Free() {
DCHECK(initialized_);
//DCHECK(initialized_);
int error = pthread_key_delete(key_);
if (error)
NOTREACHED();
{
//NOTREACHED();
}
initialized_ = false;
}
void* ThreadLocalStorage::StaticSlot::Get() const {
DCHECK(initialized_);
//DCHECK(initialized_);
return pthread_getspecific(key_);
}
void ThreadLocalStorage::StaticSlot::Set(void* value) {
DCHECK(initialized_);
//DCHECK(initialized_);
int error = pthread_setspecific(key_, value);
if (error)
NOTREACHED();
{
//NOTREACHED();
}
}
} // namespace base
......@@ -87,8 +87,8 @@ time_t Time::ToTimeT() const {
return std::numeric_limits<time_t>::max();
}
if (std::numeric_limits<int64>::max() - kTimeTToMicrosecondsOffset <= us_) {
DLOG(WARNING) << "Overflow when converting base::Time with internal " <<
"value " << us_ << " to time_t.";
//DLOG(WARNING) << "Overflow when converting base::Time with internal " <<
// "value " << us_ << " to time_t.";
return std::numeric_limits<time_t>::max();
}
return (us_ - kTimeTToMicrosecondsOffset) / kMicrosecondsPerSecond;
......@@ -160,7 +160,7 @@ Time Time::LocalMidnight() const {
bool Time::FromStringInternal(const char* time_string,
bool is_local,
Time* parsed_time) {
DCHECK((time_string != NULL) && (parsed_time != NULL));
//DCHECK((time_string != NULL) && (parsed_time != NULL));
if (time_string[0] == '\0')
return false;
......
......@@ -110,8 +110,8 @@ Time Time::Now() {
struct timeval tv;
struct timezone tz = { 0, 0 }; // UTC
if (gettimeofday(&tv, &tz) != 0) {
DCHECK(0) << "Could not determine time of day";
LOG_ERRNO(ERROR) << "Call to gettimeofday failed.";
//DCHECK(0) << "Could not determine time of day";
//LOG_ERRNO(ERROR) << "Call to gettimeofday failed.";
// Return null instead of uninitialized |tv| value, which contains random
// garbage data. This may result in the crash seen in crbug.com/147570.
return Time();
......@@ -237,7 +237,7 @@ TimeTicks TimeTicks::Now() {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
NOTREACHED() << "clock_gettime(CLOCK_MONOTONIC) failed.";
//NOTREACHED() << "clock_gettime(CLOCK_MONOTONIC) failed.";
return TimeTicks();
}
......@@ -292,8 +292,8 @@ TimeTicks TimeTicks::NowFromSystemTraceTime() {
// static
Time Time::FromTimeVal(struct timeval t) {
DCHECK_LT(t.tv_usec, static_cast<int>(Time::kMicrosecondsPerSecond));
DCHECK_GE(t.tv_usec, 0);
//DCHECK_LT(t.tv_usec, static_cast<int>(Time::kMicrosecondsPerSecond));
//DCHECK_GE(t.tv_usec, 0);
if (t.tv_usec == 0 && t.tv_sec == 0)
return Time();
if (t.tv_usec == static_cast<suseconds_t>(Time::kMicrosecondsPerSecond) - 1 &&
......
......@@ -92,7 +92,7 @@ void DeathData::RecordDeath(const int32 queue_duration,
// don't clamp count_... but that should be inconsequentially likely).
// We ignore the fact that we correlated our selection of a sample to the run
// and queue times (i.e., we used them to generate random_number).
CHECK_GT(count_, 0);
//CHECK_GT(count_, 0);
if (0 == (random_number % count_)) {
queue_duration_sample_ = queue_duration;
run_duration_sample_ = run_duration;
......@@ -238,7 +238,7 @@ ThreadData::ThreadData(const std::string& suggested_name)
next_retired_worker_(NULL),
worker_thread_number_(0),
incarnation_count_for_pool_(-1) {
DCHECK_GE(suggested_name.size(), 0u);
//DCHECK_GE(suggested_name.size(), 0u);
thread_name_ = suggested_name;
PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
}
......@@ -248,7 +248,7 @@ ThreadData::ThreadData(int thread_number)
next_retired_worker_(NULL),
worker_thread_number_(thread_number),
incarnation_count_for_pool_(-1) {
CHECK_GT(thread_number, 0);
//CHECK_GT(thread_number, 0);
base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
}
......@@ -263,7 +263,7 @@ void ThreadData::PushToHeadOfList() {
random_number_ += static_cast<int32>(this - static_cast<ThreadData*>(0));
random_number_ ^= (Now() - TrackedTime()).InMilliseconds();
DCHECK(!next_);
//DCHECK(!next_);
base::AutoLock lock(*list_lock_.Pointer());
incarnation_count_for_pool_ = incarnation_counter_;
next_ = all_thread_data_list_head_;
......@@ -314,10 +314,10 @@ ThreadData* ThreadData::Get() {
// If we can't find a previously used instance, then we have to create one.
if (!worker_thread_data) {
DCHECK_GT(worker_thread_number, 0);
//DCHECK_GT(worker_thread_number, 0);
worker_thread_data = new ThreadData(worker_thread_number);
}
DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
//DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
tls_index_.Set(worker_thread_data);
return worker_thread_data;
......@@ -325,7 +325,7 @@ ThreadData* ThreadData::Get() {
// static
void ThreadData::OnThreadTermination(void* thread_data) {
DCHECK(thread_data); // TLS should *never* call us with a NULL.
//DCHECK(thread_data); // TLS should *never* call us with a NULL.
// We must NOT do any allocations during this callback. There is a chance
// that the allocator is no longer active on this thread.
if (!kTrackAllTaskObjects)
......@@ -346,7 +346,7 @@ void ThreadData::OnThreadTerminationCleanup() {
}
// We must NOT do any allocations during this callback.
// Using the simple linked lists avoids all allocations.
DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
//DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
this->next_retired_worker_ = first_retired_worker_;
first_retired_worker_ = this;
}
......@@ -428,7 +428,7 @@ void ThreadData::TallyADeath(const Births& birth,
if (!kTrackParentChildLinks)
return;
if (!parent_stack_.empty()) { // We might get turned off.
DCHECK_EQ(parent_stack_.top(), &birth);
//DCHECK_EQ(parent_stack_.top(), &birth);
parent_stack_.pop();
}
}
......@@ -681,13 +681,13 @@ bool ThreadData::Initialize() {
// Perform the "real" TLS initialization now, and leave it intact through
// process termination.
if (!tls_index_.initialized()) { // Testing may have initialized this.
DCHECK_EQ(status_, UNINITIALIZED);
//DCHECK_EQ(status_, UNINITIALIZED);
tls_index_.Initialize(&ThreadData::OnThreadTermination);
if (!tls_index_.initialized())
return false;
} else {
// TLS was initialzed for us earlier.
DCHECK_EQ(status_, DORMANT_DURING_TESTS);
//DCHECK_EQ(status_, DORMANT_DURING_TESTS);
}
// Incarnation counter is only significant to testing, as it otherwise will
......@@ -701,14 +701,14 @@ bool ThreadData::Initialize() {
if (!kTrackParentChildLinks &&
kInitialStartupState == PROFILING_CHILDREN_ACTIVE)
status_ = PROFILING_ACTIVE;
DCHECK(status_ != UNINITIALIZED);
//DCHECK(status_ != UNINITIALIZED);
return true;
}
// static
bool ThreadData::InitializeAndSetTrackingStatus(Status status) {
DCHECK_GE(status, DEACTIVATED);
DCHECK_LE(status, PROFILING_CHILDREN_ACTIVE);
//DCHECK_GE(status, DEACTIVATED);
//DCHECK_LE(status, PROFILING_CHILDREN_ACTIVE);
if (!Initialize()) // No-op if already initialized.
return false; // Not compiled in.
......@@ -751,7 +751,7 @@ TrackedTime ThreadData::NowForEndOfRun() {
// static
void ThreadData::SetAlternateTimeSource(NowFunction* now_function) {
DCHECK(now_function);
//DCHECK(now_function);
if (kAllowAlternateTimeSourceHandling)
now_function_ = now_function;
}
......@@ -774,7 +774,7 @@ void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
// caller should tell us how many thread shutdowns should have taken place by
// now.
return; // TODO(jar): until this is working on XP, don't run the real test.
CHECK_GT(cleanup_count_, major_threads_shutdown_count);
//CHECK_GT(cleanup_count_, major_threads_shutdown_count);
}
// static
......@@ -793,7 +793,7 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
// To be clean, break apart the retired worker list (though we leak them).
while (first_retired_worker_) {
ThreadData* worker = first_retired_worker_;
CHECK_GT(worker->worker_thread_number_, 0);
//CHECK_GT(worker->worker_thread_number_, 0);
first_retired_worker_ = worker->next_retired_worker_;
worker->next_retired_worker_ = NULL;
}
......
......@@ -22,7 +22,7 @@
#include "ui/base/gtk/gtk_signal.h"
#include "ui/base/gtk/scoped_gobject.h"
#include "ui/base/x/x11_util.h"
#include "ui/gfx/canvas.h"
//#include "ui/gfx/canvas.h"
#include "ui/gfx/gtk_util.h"
#include "ui/gfx/size.h"
......
......@@ -11,8 +11,8 @@
#include "base/basictypes.h"
#include "base/command_line.h"
#include "base/memory/scoped_ptr.h"
#include "third_party/skia/include/core/SkBitmap.h"
#include "third_party/skia/include/core/SkUnPreMultiply.h"
//#include "third_party/skia/include/core/SkBitmap.h"
//#include "third_party/skia/include/core/SkUnPreMultiply.h"
#include "ui/gfx/rect.h"
namespace {
......@@ -83,6 +83,7 @@ void GdkInitFromCommandLine(const CommandLine& command_line) {
CommonInitFromCommandLine(command_line, gdk_init);
}
/*
GdkPixbuf* GdkPixbufFromSkBitmap(const SkBitmap& bitmap) {
if (bitmap.isNull())
return NULL;
......@@ -125,7 +126,7 @@ GdkPixbuf* GdkPixbufFromSkBitmap(const SkBitmap& bitmap) {
return pixbuf;
}
*/
void SubtractRectanglesFromRegion(GdkRegion* region,
const std::vector<Rect>& cutouts) {
for (size_t i = 0; i < cutouts.size(); ++i) {
......
......@@ -30,7 +30,7 @@ UI_EXPORT void GdkInitFromCommandLine(const CommandLine& command_line);
// Convert and copy a SkBitmap to a GdkPixbuf. NOTE: this uses BGRAToRGBA, so
// it is an expensive operation. The returned GdkPixbuf will have a refcount of
// 1, and the caller is responsible for unrefing it when done.
UI_EXPORT GdkPixbuf* GdkPixbufFromSkBitmap(const SkBitmap& bitmap);
//UI_EXPORT GdkPixbuf* GdkPixbufFromSkBitmap(const SkBitmap& bitmap);
// Modify the given region by subtracting the given rectangles.
UI_EXPORT void SubtractRectanglesFromRegion(GdkRegion* region,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment