java 7与java 8哈希映射(反)序列化

java 7与java 8哈希映射(反)序列化,java,serialization,hashmap,Java,Serialization,Hashmap,在Java7中,writeObject由于使用了迭代器(foreach)而快速失效 private void writeObject(java.io.ObjectOutputStream) 抛出IOException { 迭代器i= (大小>0)?entrySet0().iterator():null; //写出阈值、加载因子和任何隐藏的内容 s、 defaultWriteObject(); //写出桶的数量 s、 写入(表格长度); //写出大小(映射数) s、 写入(大小); //写出键和值

在Java7中,writeObject由于使用了迭代器(foreach)而快速失效

private void writeObject(java.io.ObjectOutputStream)
抛出IOException
{
迭代器i=
(大小>0)?entrySet0().iterator():null;
//写出阈值、加载因子和任何隐藏的内容
s、 defaultWriteObject();
//写出桶的数量
s、 写入(表格长度);
//写出大小(映射数)
s、 写入(大小);
//写出键和值(交替)
如果(大小>0){
对于(Map.Entry e:entrySet0()){
s、 writeObject(如getKey());
s、 writeObject(如getValue());
}
}
}
在Java8中,writeObject使用一个简单的for循环,不再是快速失败的

private void writeObject(java.io.ObjectOutputStream s)
    throws IOException {
    int buckets = capacity();
    // Write out the threshold, loadfactor, and any hidden stuff
    s.defaultWriteObject();
    s.writeInt(buckets);
    s.writeInt(size);
    internalWriteEntries(s);
}

// Called only from writeObject, to ensure compatible ordering.
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
    Node<K,V>[] tab;
    if (size > 0 && (tab = table) != null) {
        for (int i = 0; i < tab.length; ++i) {
            for (Node<K,V> e = tab[i]; e != null; e = e.next) {
                s.writeObject(e.key);
                s.writeObject(e.value);
            }
        }
    }
}

private void readObject(java.io.ObjectInputStream s)
    throws IOException, ClassNotFoundException {
    // Read in the threshold (ignored), loadfactor, and any hidden stuff
    s.defaultReadObject();
    reinitialize();
    if (loadFactor <= 0 || Float.isNaN(loadFactor))
        throw new InvalidObjectException("Illegal load factor: " +
                                         loadFactor);
    s.readInt();                // Read and ignore number of buckets
    int mappings = s.readInt(); // Read number of mappings (size)
    if (mappings < 0)
        throw new InvalidObjectException("Illegal mappings count: " +
                                         mappings);
    else if (mappings > 0) { // (if zero, use defaults)
        // Size the table using given load factor only if within
        // range of 0.25...4.0
        float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
        float fc = (float)mappings / lf + 1.0f;
        int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
                   DEFAULT_INITIAL_CAPACITY :
                   (fc >= MAXIMUM_CAPACITY) ?
                   MAXIMUM_CAPACITY :
                   tableSizeFor((int)fc));
        float ft = (float)cap * lf;
        threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
                     (int)ft : Integer.MAX_VALUE);
        @SuppressWarnings({"rawtypes","unchecked"})
            Node<K,V>[] tab = (Node<K,V>[])new Node[cap];
        table = tab;

        // Read the keys and values, and put the mappings in the HashMap
        for (int i = 0; i < mappings; i++) {
            @SuppressWarnings("unchecked")
                K key = (K) s.readObject();
            @SuppressWarnings("unchecked")
                V value = (V) s.readObject();
            putVal(hash(key), key, value, false, false);
        }
    }
}
private void writeObject(java.io.ObjectOutputStream)
抛出IOException{
int bucket=容量();
//写出阈值、加载因子和任何隐藏的内容
s、 defaultWriteObject();
s、 写入(桶);
s、 写入(大小);
内部书面记录;
}
//仅从writeObject调用,以确保排序兼容。
void internalWriteEntries(java.io.ObjectOutputStreams)引发IOException{
节点[]选项卡;
如果(大小>0&&(制表符=表格)!=null){
对于(int i=0;i=最大容量)?
最大容量:
tableSizeFor((int)fc));
浮动英尺=(浮动)盖*lf;
阈值=((上限<最大容量和英尺<最大容量)?
(int)ft:整数最大值);
@SuppressWarnings({“rawtypes”,“unchecked”})
节点[]选项卡=(节点[])新节点[cap];
表=选项卡;
//读取键和值,并将映射放入HashMap中
对于(int i=0;i
在Java8中,如果我们处理了我在这里所建议的(),那么它至少是故障安全的

也许最好同时回答这两个问题,但我们知道为什么Java会这样更改writeObject吗

同样,我知道HashMap不是线程安全的,但是这个更改可能会导致

  • 它将写入正确的大小,但写入更多的键/值对(不按插入顺序)
  • 它将写入正确的大小,但不写入任何元素
  • 它将写入正确的大小,但不会写入任何元素

  • 2和3总是会导致反序列化失败

    我似乎找不到
    internalWriteEntries()
    的实现。补充说@biziclopI认为这与以下原因相同:也许有人认为,只要我们不编写线程安全类,我们就不应该经历线程安全的运动。--正如我刚才在那里评论的那样:为什么这很重要?每个实现和每个版本都可以做不同的事情,只要它们遵守javadoc定义的契约。没有人说
    writeObject
    应该是fail fast,也没有人应该依赖fail fast。我不是在这里争论@Andreas。我所要做的就是找到一个理由——如果有理由的话。新的实现更糟糕(因为它既不是故障安全的,也不是故障快速的),并且调整大小的更改使它比以前更不接近。它不必是故障安全的、故障快速的或以前发生过的,所以当他们对它重新编码时,这些从来都不是考虑因素。新的实现并没有更糟,实际上要好得多,因为。
    private void writeObject(java.io.ObjectOutputStream s)
        throws IOException {
        int buckets = capacity();
        // Write out the threshold, loadfactor, and any hidden stuff
        s.defaultWriteObject();
        s.writeInt(buckets);
        s.writeInt(size);
        internalWriteEntries(s);
    }
    
    // Called only from writeObject, to ensure compatible ordering.
    void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
        Node<K,V>[] tab;
        if (size > 0 && (tab = table) != null) {
            for (int i = 0; i < tab.length; ++i) {
                for (Node<K,V> e = tab[i]; e != null; e = e.next) {
                    s.writeObject(e.key);
                    s.writeObject(e.value);
                }
            }
        }
    }
    
    private void readObject(java.io.ObjectInputStream s)
        throws IOException, ClassNotFoundException {
        // Read in the threshold (ignored), loadfactor, and any hidden stuff
        s.defaultReadObject();
        reinitialize();
        if (loadFactor <= 0 || Float.isNaN(loadFactor))
            throw new InvalidObjectException("Illegal load factor: " +
                                             loadFactor);
        s.readInt();                // Read and ignore number of buckets
        int mappings = s.readInt(); // Read number of mappings (size)
        if (mappings < 0)
            throw new InvalidObjectException("Illegal mappings count: " +
                                             mappings);
        else if (mappings > 0) { // (if zero, use defaults)
            // Size the table using given load factor only if within
            // range of 0.25...4.0
            float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
            float fc = (float)mappings / lf + 1.0f;
            int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
                       DEFAULT_INITIAL_CAPACITY :
                       (fc >= MAXIMUM_CAPACITY) ?
                       MAXIMUM_CAPACITY :
                       tableSizeFor((int)fc));
            float ft = (float)cap * lf;
            threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
                         (int)ft : Integer.MAX_VALUE);
            @SuppressWarnings({"rawtypes","unchecked"})
                Node<K,V>[] tab = (Node<K,V>[])new Node[cap];
            table = tab;
    
            // Read the keys and values, and put the mappings in the HashMap
            for (int i = 0; i < mappings; i++) {
                @SuppressWarnings("unchecked")
                    K key = (K) s.readObject();
                @SuppressWarnings("unchecked")
                    V value = (V) s.readObject();
                putVal(hash(key), key, value, false, false);
            }
        }
    }