ConcurrentDictionary源码概读
ConcurrentDictionary的数据结构主要由Tables和Node组成,其中Tables包括桶(Node,节点)数组、局部锁(Local lock)、每个锁保护的元素数量(PerLock)。Node包含用户实际操作的key和value,以及为实现链表数据结构的下一个节点(Next Node)的引用和当前节点key的原始(未取正)散列值。以及其它一些标识。
private class Tables
{
/// <summary>
/// 每个桶的单链表
/// </summary>
internal readonly Node[] m_buckets; /// <summary>
/// 锁数组,每个锁都锁住table的一部分
/// </summary>
internal readonly object[] m_locks; /// <summary>
/// 每个锁保护的元素的数量
/// </summary>
internal volatile int[] m_countPerLock; /// <summary>
/// key的比较器
/// </summary>
internal readonly IEqualityComparer<TKey> m_comparer; internal Tables(Node[] buckets, object[] locks, int[] countPerLock, IEqualityComparer<TKey> comparer)
{
m_buckets = buckets;
m_locks = locks;
m_countPerLock = countPerLock;
m_comparer = comparer;
}
} private class Node
{
internal TKey m_key;
internal TValue m_value;
internal volatile Node m_next;
internal int m_hashcode; internal Node(TKey key, TValue value, int hashcode, Node next)
{
m_key = key;
m_value = value;
m_next = next;
m_hashcode = hashcode;
}
}
当new一个ConcurrentDictionary时,默认调用无参构造函数,给定默认的并发数量(Environment.ProcessorCount)、默认的键比较器、默认的容量(桶的初始容量,为31),该容量是经过权衡得到,不能被最小的素数整除。之后再处理容量与并发数的关系、容量与锁的关系以及每个锁的最大元素数。将桶、锁对象、锁保护封装在一个对象中,并初始化。
//初始化 ConcurrentDictionary 类的新实例,
//该实例为空,具有默认的并发级别和默认的初始容量,并为键类型使用默认比较器。
public ConcurrentDictionary() :
this(DefaultConcurrencyLevel, DEFAULT_CAPACITY, true, EqualityComparer<TKey>.Default) { } /// <summary>
/// 无参构造函数真正调用的函数
/// </summary>
/// <param name="concurrencyLevel">并发线程的可能数量(更改字典的线程可能数量)</param>
/// <param name="capacity">容量</param>
/// <param name="growLockArray">是否动态增加 striped lock 的大小</param>
/// <param name="comparer">比较器</param>
internal ConcurrentDictionary(int concurrencyLevel, int capacity, bool growLockArray, IEqualityComparer<TKey> comparer)
{
if (concurrencyLevel < )
{
throw new ArgumentOutOfRangeException("concurrencyLevel", GetResource("ConcurrentDictionary_ConcurrencyLevelMustBePositive"));
}
if (capacity < )
{
throw new ArgumentOutOfRangeException("capacity", GetResource("ConcurrentDictionary_CapacityMustNotBeNegative"));
}
if (comparer == null) throw new ArgumentNullException("comparer"); //容量应当至少与并发数一致,否则会有锁对象浪费
if (capacity < concurrencyLevel)
{
capacity = concurrencyLevel;
} //锁对象数组,大小为 并发线程的可能数量
object[] locks = new object[concurrencyLevel];
for (int i = ; i < locks.Length; i++)
{
locks[i] = new object();
} //每个锁保护的元素的数量
int[] countPerLock = new int[locks.Length];
//单链表中的节点,表示特定的哈希存储桶(桶:Node类型的数组)。
Node[] buckets = new Node[capacity];
//可以保持字典内部状态的表,将桶、锁对象、锁保护封装在一个对象中,以便一次原子操作
m_tables = new Tables(buckets, locks, countPerLock, comparer);
//是否动态增加 striped lock 的大小
m_growLockArray = growLockArray;
//在调整大小操作被触发之前,每个锁可锁住的最大(预计)元素数
//默认按锁个数平均分配,即Node总个数除以锁总个数
m_budget = buckets.Length / locks.Length;
}
当调用TryAdd时,实际调用的是内部公共方法TryAddInternal。如果存在key,则始终返回false,如果updateIfExists为true,则更新value,如果不存在key,则始终返回true,并且添加value。详细解读见代码。
/// <summary>
/// 尝试将指定的键和值添加到字典
/// </summary>
/// <param name="key">要添加的元素的键</param>
/// <param name="value">要添加的元素的值。对于引用类型,该值可以是空引用</param>
/// <returns>键值对添加成功则返回true,否则false</returns>
/// 异常:
// T:System.ArgumentNullException:
// key 为 null。
// T:System.OverflowException:
// 字典中已包含元素的最大数量(System.Int32.MaxValue)。
public bool TryAdd(TKey key, TValue value)
{
if (key == null) throw new ArgumentNullException("key");
TValue dummy;
return TryAddInternal(key, value, false, true, out dummy);
} /// <summary>
/// 对字典添加和修改的内部公共方法
/// 如果存在key,则始终返回false,如果updateIfExists为true,则更新value
/// 如果不存在key,则始终返回true,并且添加value
/// </summary>
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
private bool TryAddInternal(TKey key, TValue value, bool updateIfExists, bool acquireLock, out TValue resultingValue)
{
while (true)
{
//桶序号(下标),锁序号(下标)
int bucketNo, lockNo;
int hashcode; Tables tables = m_tables;
IEqualityComparer<TKey> comparer = tables.m_comparer;
hashcode = comparer.GetHashCode(key); //获取桶下标、锁下标
GetBucketAndLockNo(hashcode, out bucketNo, out lockNo, tables.m_buckets.Length, tables.m_locks.Length); bool resizeDesired = false;
bool lockTaken = false;
#if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
bool resizeDueToCollisions = false;
#endif // !FEATURE_CORECLR
#endif try
{
if (acquireLock)
//根据上面得到的锁的下标(lockNo),获取对应(lockNo)的对象锁
//hash落在不同的锁对象上,因此不同线程获取锁的对象可能不同,降低了“抢锁”概率
Monitor.Enter(tables.m_locks[lockNo], ref lockTaken); //在这之前如果tables被修改则有可能未正确锁定,此时需要重试
if (tables != m_tables)
{
continue;
} #if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
int collisionCount = ;
#endif // !FEATURE_CORECLR
#endif // Try to find this key in the bucket
Node prev = null;
for (Node node = tables.m_buckets[bucketNo]; node != null; node = node.m_next)
{
Assert((prev == null && node == tables.m_buckets[bucketNo]) || prev.m_next == node);
//如果key已经存在
if (comparer.Equals(node.m_key, key))
{
//如果允许更新,则更新该键值对的值
if (updateIfExists)
{
//如果可以原子操作则直接赋值
if (s_isValueWriteAtomic)
{
node.m_value = value;
}
//否则需要为更新创建一个新的节点,以便支持不能以原子方式写的类型,
//因为无锁读取也可能在此时发生
else
{
//node.m_next 新节点指向下一个节点
Node newNode = new Node(node.m_key, value, hashcode, node.m_next);
if (prev == null)
{
tables.m_buckets[bucketNo] = newNode;
}
else
{
//上一个节点指向新节点。此时完成单链表的新旧节点替换
prev.m_next = newNode;
}
}
resultingValue = value;
}
else
{
resultingValue = node.m_value;
}
return false;
}
//循环到最后时,prev是最后一个node(node.m_next==null)
prev = node; #if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
collisionCount++;
#endif // !FEATURE_CORECLR
#endif
} #if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
if(collisionCount > HashHelpers.HashCollisionThreshold && HashHelpers.IsWellKnownEqualityComparer(comparer))
{
resizeDesired = true;
resizeDueToCollisions = true;
}
#endif // !FEATURE_CORECLR
#endif //使用可变内存操作插入键值对
Volatile.Write<Node>(ref tables.m_buckets[bucketNo], new Node(key, value, hashcode, tables.m_buckets[bucketNo]));
checked
{
//第lockNo个锁保护的元素数量,并检查是否益处
tables.m_countPerLock[lockNo]++;
} //
// If the number of elements guarded by this lock has exceeded the budget, resize the bucket table.
// It is also possible that GrowTable will increase the budget but won't resize the bucket table.
// That happens if the bucket table is found to be poorly utilized due to a bad hash function.
//如果第lockNo个锁要锁的元素超出预计,则需要调整
if (tables.m_countPerLock[lockNo] > m_budget)
{
resizeDesired = true;
}
}
finally
{
if (lockTaken)
//释放第lockNo个锁
Monitor.Exit(tables.m_locks[lockNo]);
} //
// The fact that we got here means that we just performed an insertion. If necessary, we will grow the table.
//
// Concurrency notes:
// - Notice that we are not holding any locks at when calling GrowTable. This is necessary to prevent deadlocks.
// - As a result, it is possible that GrowTable will be called unnecessarily. But, GrowTable will obtain lock 0
// and then verify that the table we passed to it as the argument is still the current table.
//
if (resizeDesired)
{
#if FEATURE_RANDOMIZED_STRING_HASHING
#if !FEATURE_CORECLR
if (resizeDueToCollisions)
{
GrowTable(tables, (IEqualityComparer<TKey>)HashHelpers.GetRandomizedEqualityComparer(comparer), true, m_keyRehashCount);
}
else
#endif // !FEATURE_CORECLR
{
GrowTable(tables, tables.m_comparer, false, m_keyRehashCount);
}
#else
GrowTable(tables, tables.m_comparer, false, m_keyRehashCount);
#endif
} resultingValue = value;
return true;
}
}
需要特别指出的是ConcurrentDictionary在插入、更新、获取键值对时对key的比较默认是使用的引用比较,不同于Dictionary使用引用加散列值。在Dictionary中,只有两者都一致才相等,ConcurrentDictionary则只判断引用相等。前提是未重写Equals。
/// <summary>
/// Attempts to get the value associated with the specified key from the <see
/// cref="ConcurrentDictionary{TKey,TValue}"/>.
/// </summary>
/// <param name="key">The key of the value to get.</param>
/// <param name="value">When this method returns, <paramref name="value"/> contains the object from
/// the
/// <see cref="ConcurrentDictionary{TKey,TValue}"/> with the specified key or the default value of
/// <typeparamref name="TValue"/>, if the operation failed.</param>
/// <returns>true if the key was found in the <see cref="ConcurrentDictionary{TKey,TValue}"/>;
/// otherwise, false.</returns>
/// <exception cref="T:System.ArgumentNullException"><paramref name="key"/> is a null reference
/// (Nothing in Visual Basic).</exception>
[SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")]
public bool TryGetValue(TKey key, out TValue value)
{
if (key == null) throw new ArgumentNullException("key"); int bucketNo, lockNoUnused; // We must capture the m_buckets field in a local variable. It is set to a new table on each table resize.
Tables tables = m_tables;
IEqualityComparer<TKey> comparer = tables.m_comparer;
GetBucketAndLockNo(comparer.GetHashCode(key), out bucketNo, out lockNoUnused, tables.m_buckets.Length, tables.m_locks.Length); // We can get away w/out a lock here.
// The Volatile.Read ensures that the load of the fields of 'n' doesn't move before the load from buckets[i].
Node n = Volatile.Read<Node>(ref tables.m_buckets[bucketNo]); while (n != null)
{
//默认比较的是引用
if (comparer.Equals(n.m_key, key))
{
value = n.m_value;
return true;
}
n = n.m_next;
} value = default(TValue);
return false;
}
其它一些需要知道的内容,比如默认并发数、如何为指定key计算桶号和锁号等
#if !FEATURE_CORECLR
[NonSerialized]
#endif
private volatile Tables m_tables; // Internal tables of the dictionary
// NOTE: this is only used for compat reasons to serialize the comparer.
// This should not be accessed from anywhere else outside of the serialization methods.
internal IEqualityComparer<TKey> m_comparer;
#if !FEATURE_CORECLR
[NonSerialized]
#endif
private readonly bool m_growLockArray; // Whether to dynamically increase the size of the striped lock // How many times we resized becaused of collisions.
// This is used to make sure we don't resize the dictionary because of multi-threaded Add() calls
// that generate collisions. Whenever a GrowTable() should be the only place that changes this
#if !FEATURE_CORECLR
// The field should be have been marked as NonSerialized but because we shipped it without that attribute in 4.5.1.
// we can't add it back without breaking compat. To maximize compat we are going to keep the OptionalField attribute
// This will prevent cases where the field was not serialized.
[OptionalField]
#endif
private int m_keyRehashCount; #if !FEATURE_CORECLR
[NonSerialized]
#endif
private int m_budget; // The maximum number of elements per lock before a resize operation is triggered #if !FEATURE_CORECLR // These fields are not used in CoreCLR
private KeyValuePair<TKey, TValue>[] m_serializationArray; // Used for custom serialization private int m_serializationConcurrencyLevel; // used to save the concurrency level in serialization private int m_serializationCapacity; // used to save the capacity in serialization
#endif // The default capacity, i.e. the initial # of buckets. When choosing this value, we are making
// a trade-off between the size of a very small dictionary, and the number of resizes when
// constructing a large dictionary. Also, the capacity should not be divisible by a small prime.
private const int DEFAULT_CAPACITY = ; // The maximum size of the striped lock that will not be exceeded when locks are automatically
// added as the dictionary grows. However, the user is allowed to exceed this limit by passing
// a concurrency level larger than MAX_LOCK_NUMBER into the constructor.
private const int MAX_LOCK_NUMBER = ; private const int PROCESSOR_COUNT_REFRESH_INTERVAL_MS = ; // How often to refresh the count, in milliseconds.
private static volatile int s_processorCount; // The last count seen.
private static volatile int s_lastProcessorCountRefreshTicks; // The last time we refreshed. /// <summary>
/// Gets the number of available processors
/// </summary>
private static int ProcessorCount
{
get
{
int now = Environment.TickCount;
int procCount = s_processorCount;
if (procCount == || (now - s_lastProcessorCountRefreshTicks) >= PROCESSOR_COUNT_REFRESH_INTERVAL_MS)
{
s_processorCount = procCount = Environment.ProcessorCount;
s_lastProcessorCountRefreshTicks = now;
} Contract.Assert(procCount > && procCount <= ,
"Processor count not within the expected range (1 - 64)."); return procCount;
}
} // Whether TValue is a type that can be written atomically (i.e., with no danger of torn reads)
private static readonly bool s_isValueWriteAtomic = IsValueWriteAtomic();
/// <summary>
/// The number of concurrent writes for which to optimize by default.
/// </summary>
private static int DefaultConcurrencyLevel
{
get { return ProcessorCount; }
}
/// <summary>
/// Replaces the bucket table with a larger one. To prevent multiple threads from resizing the
/// table as a result of ----s, the Tables instance that holds the table of buckets deemed too
/// small is passed in as an argument to GrowTable(). GrowTable() obtains a lock, and then checks
/// the Tables instance has been replaced in the meantime or not.
/// The <paramref name="rehashCount"/> will be used to ensure that we don't do two subsequent resizes
/// because of a collision
/// </summary>
private void GrowTable(Tables tables, IEqualityComparer<TKey> newComparer, bool regenerateHashKeys, int rehashCount)
{
int locksAcquired = ;
try
{
// The thread that first obtains m_locks[0] will be the one doing the resize operation
AcquireLocks(, , ref locksAcquired); if (regenerateHashKeys && rehashCount == m_keyRehashCount)
{
// This method is called with regenerateHashKeys==true when we detected
// more than HashHelpers.HashCollisionThreshold collisions when adding a new element.
// In that case we are in the process of switching to another (randomized) comparer
// and we have to re-hash all the keys in the table.
// We are only going to do this if we did not just rehash the entire table while waiting for the lock
tables = m_tables;
}
else
{
// If we don't require a regeneration of hash keys we want to make sure we don't do work when
// we don't have to
if (tables != m_tables)
{
// We assume that since the table reference is different, it was already resized (or the budget
// was adjusted). If we ever decide to do table shrinking, or replace the table for other reasons,
// we will have to revisit this logic.
return;
} // Compute the (approx.) total size. Use an Int64 accumulation variable to avoid an overflow.
long approxCount = ;
for (int i = ; i < tables.m_countPerLock.Length; i++)
{
approxCount += tables.m_countPerLock[i];
} //
// If the bucket array is too empty, double the budget instead of resizing the table
//
if (approxCount < tables.m_buckets.Length / )
{
m_budget = * m_budget;
if (m_budget < )
{
m_budget = int.MaxValue;
} return;
}
}
// Compute the new table size. We find the smallest integer larger than twice the previous table size, and not divisible by
// 2,3,5 or 7. We can consider a different table-sizing policy in the future.
int newLength = ;
bool maximizeTableSize = false;
try
{
checked
{
// Double the size of the buckets table and add one, so that we have an odd integer.
newLength = tables.m_buckets.Length * + ; // Now, we only need to check odd integers, and find the first that is not divisible
// by 3, 5 or 7.
while (newLength % == || newLength % == || newLength % == )
{
newLength += ;
} Assert(newLength % != ); if (newLength > Array.MaxArrayLength)
{
maximizeTableSize = true;
}
}
}
catch (OverflowException)
{
maximizeTableSize = true;
} if (maximizeTableSize)
{
newLength = Array.MaxArrayLength; // We want to make sure that GrowTable will not be called again, since table is at the maximum size.
// To achieve that, we set the budget to int.MaxValue.
//
// (There is one special case that would allow GrowTable() to be called in the future:
// calling Clear() on the ConcurrentDictionary will shrink the table and lower the budget.)
m_budget = int.MaxValue;
} // Now acquire all other locks for the table
AcquireLocks(, tables.m_locks.Length, ref locksAcquired); object[] newLocks = tables.m_locks; // Add more locks
if (m_growLockArray && tables.m_locks.Length < MAX_LOCK_NUMBER)
{
newLocks = new object[tables.m_locks.Length * ];
Array.Copy(tables.m_locks, newLocks, tables.m_locks.Length); for (int i = tables.m_locks.Length; i < newLocks.Length; i++)
{
newLocks[i] = new object();
}
} Node[] newBuckets = new Node[newLength];
int[] newCountPerLock = new int[newLocks.Length]; // Copy all data into a new table, creating new nodes for all elements
for (int i = ; i < tables.m_buckets.Length; i++)
{
Node current = tables.m_buckets[i];
while (current != null)
{
Node next = current.m_next;
int newBucketNo, newLockNo;
int nodeHashCode = current.m_hashcode; if (regenerateHashKeys)
{
// Recompute the hash from the key
nodeHashCode = newComparer.GetHashCode(current.m_key);
} GetBucketAndLockNo(nodeHashCode, out newBucketNo, out newLockNo, newBuckets.Length, newLocks.Length); newBuckets[newBucketNo] = new Node(current.m_key, current.m_value, nodeHashCode, newBuckets[newBucketNo]); checked
{
newCountPerLock[newLockNo]++;
} current = next;
}
} // If this resize regenerated the hashkeys, increment the count
if (regenerateHashKeys)
{
// We use unchecked here because we don't want to throw an exception if
// an overflow happens
unchecked
{
m_keyRehashCount++;
}
} // Adjust the budget
m_budget = Math.Max(, newBuckets.Length / newLocks.Length); // Replace tables with the new versions
m_tables = new Tables(newBuckets, newLocks, newCountPerLock, newComparer);
}
finally
{
// Release all locks that we took earlier
ReleaseLocks(, locksAcquired);
}
} /// <summary>
/// 为指定key计算桶号和锁号
/// </summary>
/// <param name="hashcode">key的hashcode</param>
/// <param name="bucketNo"></param>
/// <param name="lockNo"></param>
/// <param name="bucketCount">桶数量</param>
/// <param name="lockCount">锁数量</param>
private void GetBucketAndLockNo(int hashcode, out int bucketNo, out int lockNo, int bucketCount, int lockCount)
{
//取正hashcode,余数恒小于除数
bucketNo = (hashcode & 0x7fffffff) % bucketCount;
//若桶下标与锁个数的余数相同,则这一簇数据都使用同一个锁(局部锁)
lockNo = bucketNo % lockCount; Assert(bucketNo >= && bucketNo < bucketCount);
Assert(lockNo >= && lockNo < lockCount);
} /// <summary>
/// Determines whether type TValue can be written atomically
/// </summary>
private static bool IsValueWriteAtomic()
{
Type valueType = typeof(TValue); //
// Section 12.6.6 of ECMA CLI explains which types can be read and written atomically without
// the risk of tearing.
//
// See http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-335.pdf
//
if (valueType.IsClass)
{
return true;
}
switch (Type.GetTypeCode(valueType))
{
case TypeCode.Boolean:
case TypeCode.Byte:
case TypeCode.Char:
case TypeCode.Int16:
case TypeCode.Int32:
case TypeCode.SByte:
case TypeCode.Single:
case TypeCode.UInt16:
case TypeCode.UInt32:
return true;
case TypeCode.Int64:
case TypeCode.Double:
case TypeCode.UInt64:
return IntPtr.Size == ;
default:
return false;
}
}
ConcurrentDictionary源码概读的更多相关文章
- Spark 源码浅读-SparkSubmit
Spark 源码浅读-任务提交SparkSubmit main方法 main方法主要用于初始化日志,然后接着调用doSubmit方法. override def main(args: Array[St ...
- spring-cloud-square源码速读(spring-cloud-square-okhttp篇)
欢迎访问我的GitHub https://github.com/zq2599/blog_demos 内容:所有原创文章分类汇总及配套源码,涉及Java.Docker.Kubernetes.DevOPS ...
- spring-cloud-square源码速读(retrofit + okhttp篇)
欢迎访问我的GitHub 这里分类和汇总了欣宸的全部原创(含配套源码):https://github.com/zq2599/blog_demos spring-cloud-square系列文章 五分钟 ...
- Handlebars模板引擎中的each嵌套及源码浅读
若显示效果不佳,可移步到愚安的小窝 Handlebars模板引擎作为时下最流行的模板引擎之一,已然在开发中为我们提供了无数便利.作为一款无语义的模板引擎,Handlebars只提供极少的helper函 ...
- [源码分析]读写锁ReentrantReadWriteLock
一.简介 读写锁. 读锁之间是共享的. 写锁是独占的. 首先声明一点: 我在分析源码的时候, 把jdk源码复制出来进行中文的注释, 有时还进行编译调试什么的, 为了避免和jdk原生的类混淆, 我在类前 ...
- 【源码分析】HashMap源码再读-基于Java8
最近工作不是太忙,准备再读读一些源码,想来想去,还是先从JDK的源码读起吧,毕竟很久不去读了,很多东西都生疏了.当然,还是先从炙手可热的HashMap,每次读都会有一些收获.当然,JDK8对HashM ...
- AQS源码泛读,梳理设计流程(jdk8)
一.AQS介绍 AQS(AbstractQueuedSynchronizer)抽象队列同步器,属于多线程编程的基本工具:JDK对其定义得很详细,并提供了多种常用的工具类(重入锁,读写锁,信号量,Cyc ...
- MyBatis 之源码浅读
环境简介与入口 记录一下尝试阅读Mybatis源码的过程,这篇笔记是我一边读,一遍记录下来的,虽然内容也不多,对Mybatis整体的架构体系也没有摸的很清楚,起码也能把这个过程整理下来,这也是我比较喜 ...
- 读源码【读mybatis的源码的思路】
✿ 需要掌握的编译器知识 ★ 编译器为eclipse为例子 调试准备工作(步骤:Window -> Show View ->...): □ 打开调试断点Breakpoint: □ 打开变量 ...
随机推荐
- 微信小程序 左右联动菜单
根据左侧列表,联动跳右侧内容. 效果如图: wxml代码: <view class="page"> <!-- 左侧导航 --> <view class ...
- 学习SpringBoot整合SSM三大框架源码之SpringBoot
Spring Boot源码剖析 一.Spring Boot 项目的启动入口流程分析 Spring Boot项目的启动入口main线程上有一个@SpringBootApplication( @Confi ...
- elasticsearch 安装head
git clone https://github.com/mobz/elasticsearch-head.git yum install nodejs npm install 修改Elasticsea ...
- 阿里云ECS服务器 java JDK安装和配置 mysql安装和配置
最近配置了一下阿里云ecs服务的服务器环境,主要对java jdk环境的安装和配置,以及数据库mysql的安装和配置,趁着热乎,记录一下! 服务器用的系统是ubuntu_16_04_64的,版本16. ...
- RHCE\RHCSA
加油,老杨,所有的事情坚持到最后都是最好的,之所以现在觉得不好,是因为还没有坚持到最后,终于考过了,哈哈哈,下一个目标OCP
- confluence导出PDF格式文件不显示中文解决
由于confluence导出PDF格式文件需要应用字体文件,下载字体文件在confluence管理员界面安装即可. 我这里使用从simhei.ttf楷体,可以从windowns主机里下载(c:/win ...
- libtool
[从网上摘录的,忘了从哪摘的了] libtool常见于autoconf/automake,单独用的例子很少,所以我想仔细研究一下,为将来兄弟们看起来方便. 一.libtool的作用offer a ...
- 【转帖】PostgreSQL之 使用扩展Extension
PostgreSQL之 使用扩展Extension https://www.cnblogs.com/lnlvinso/p/11042677.html 挺好的文章.自己之前没有系统学习过 扩展.. 目前 ...
- Django Rest Framework 安装
1. 环境要求 Python (3.5, 3.6, 3.7): 查看 python版本:python -V Django (1.11, 2.0, 2.1, 2.2) 查看django版本:pip li ...
- Excel2016 保存\复制 卡死问题解决
遇到的问题: 工作中经常碰到一些Excel表, 复制一行, 再粘贴要等5s以上才能显示成功. 保存一下文档, 也会出现页面白屏卡死的情况, 经过网上多个帖子进行操作依旧无解, 最后找到了自己的方法得以 ...