Python Julia神经网络编码速度与PyPy相同

Python Julia神经网络编码速度与PyPy相同,python,performance,julia,Python,Performance,Julia,我用Python编写了一些神经网络代码,我在Julia中重写了这些代码。纯Python代码运行时间约为7秒,而Julia和PyPy代码运行时间约为0.75秒 sigmoid(z::Float64) = 1/(1 + exp(-z)) sigmoidPrime(z::Float64) = sigmoid(z) * (1 - sigmoid(z)) ### Types ### abstract AbstractNode type Edge source::AbstractNode

我用Python编写了一些神经网络代码,我在Julia中重写了这些代码。纯Python代码运行时间约为7秒,而Julia和PyPy代码运行时间约为0.75秒

sigmoid(z::Float64) = 1/(1 + exp(-z))
sigmoidPrime(z::Float64) = sigmoid(z) * (1 - sigmoid(z))

### Types ###

abstract AbstractNode

type Edge
    source::AbstractNode
    target::AbstractNode
    weight::Float64
    derivative::Float64
    augmented::Bool

    Edge(source::AbstractNode, target::AbstractNode) = new(source, target, randn(1,1)[1], 0.0, false)
end

type Node <: AbstractNode
    incomingEdges::Vector{Edge}
    outgoingEdges::Vector{Edge}
    activation::Float64
    activationPrime::Float64

    Node() = new([], [], -1.0, -1.0)
end

type InputNode <: AbstractNode
    index::Int
    incomingEdges::Vector{Edge}
    outgoingEdges::Vector{Edge}
    activation::Float64

    InputNode(index::Int) = new(index, [], [], -1.0)
end

type BiasNode <: AbstractNode
    incomingEdges::Vector{Edge}
    outgoingEdges::Vector{Edge}
    activation::Float64

    BiasNode() = new([], [], 1.0)
end

type Network
    inputNodes::Vector{InputNode}
    hiddenNodes::Vector{Node}
    outputNodes::Vector{Node}

    function Network(sizes::Array, bias::Bool=true)
        inputNodes = [InputNode(i) for i in 1:sizes[1]];
        hiddenNodes = [Node() for _ in 1:sizes[2]];
        outputNodes = [Node() for _ in 1:sizes[3]];

        for inputNode in inputNodes
            for node in hiddenNodes
                edge = Edge(inputNode, node);
                push!(inputNode.outgoingEdges, edge)
                push!(node.incomingEdges, edge)
            end
        end

        for node in hiddenNodes
            for outputNode in outputNodes
                edge = Edge(node, outputNode);
                push!(node.outgoingEdges, edge)
                push!(outputNode.incomingEdges, edge)
            end
        end

        if bias == true
            biasNode = BiasNode()
            for node in hiddenNodes
                edge = Edge(biasNode, node);
                push!(biasNode.outgoingEdges, edge)
                push!(node.incomingEdges, edge)
            end
        end

        new(inputNodes, hiddenNodes, outputNodes)
    end
end


### Methods ###

function evaluate(obj::Node, inputVector::Array)
    if obj.activation > -0.5
        return obj.activation
    else
        weightedSum = sum([d.weight * evaluate(d.source, inputVector) for d in obj.incomingEdges])
        obj.activation = sigmoid(weightedSum)
        obj.activationPrime = sigmoidPrime(weightedSum)

        return obj.activation
    end
end

function evaluate(obj::InputNode, inputVector::Array)
    obj.activation = inputVector[obj.index]
    return obj.activation
end

function evaluate(obj::BiasNode, inputVector::Array)
    obj.activation = 1.0
    return obj.activation
end

function updateWeights(obj::AbstractNode, learningRate::Float64)
    for d in obj.incomingEdges
        if d.augmented == false
            d.augmented = true
            d.weight -= learningRate * d.derivative
            updateWeights(d.source, learningRate)
            d.derivative = 0.0
        end
    end
end

function compute(obj::Network, inputVector::Array)
    output = [evaluate(node, inputVector) for node in obj.outputNodes]
    for node in obj.outputNodes
        clear(node)
    end
    return output
end

function clear(obj::AbstractNode)
    for d in obj.incomingEdges
        obj.activation = -1.0
        obj.activationPrime = -1.0
        d.augmented = false
        clear(d.source)
    end
end

function propagateDerivatives(obj::AbstractNode, error::Float64)
    for d in obj.incomingEdges
        if d.augmented == false
            d.augmented = true
            d.derivative += error * obj.activationPrime * d.source.activation
            propagateDerivatives(d.source, error * d.weight * obj.activationPrime)
        end
    end
end

function backpropagation(obj::Network, example::Array)
    output = [evaluate(node, example[1]) for node in obj.outputNodes]
    error = output - example[2]
    for (node, err) in zip(obj.outputNodes, error)
        propagateDerivatives(node, err)
    end

    for node in obj.outputNodes
        clear(node)
    end
end

function train(obj::Network, labeledExamples::Array, learningRate::Float64=0.7, iterations::Int=10000)
    for _ in 1:iterations
        for ex in labeledExamples
            backpropagation(obj, ex)
        end

        for node in obj.outputNodes
            updateWeights(node, learningRate)
        end

        for node in obj.outputNodes
            clear(node)
        end
    end
end


labeledExamples = Array[Array[[0,0,0], [0]],
                        Array[[0,0,1], [1]],
                        Array[[0,1,0], [0]],
                        Array[[0,1,1], [1]],
                        Array[[1,0,0], [0]],
                        Array[[1,0,1], [1]],
                        Array[[1,1,0], [1]],
                        Array[[1,1,1], [0]]];

neuralnetwork = Network([3,4,1])
@time train(neuralnetwork, labeledExamples)
sigmoid(z::Float64)=1/(1+exp(-z))
sigmoidtime(z::Float64)=sigmoid(z)*(1-sigmoid(z))
###类型###
抽象节点
类型边缘
source::AbstractNode
目标::抽象节点
重量::浮动64
导数::浮点64
增广::布尔
Edge(source::AbstractNode,target::AbstractNode)=新建(source,target,randn(1,1)[1],0.0,false)
结束

键入Node这看起来更像是一个代码检查,而不是一个问题(没有任何问号),但无论如何我还是要尝试一下。唯一明显的潜在性能问题是,您正在通过
评估
计算
反向传播
中的理解来分配数组。
evaluate
中的加权和计算与for循环相比效率更高。对于其他两种方法,您可能希望使用预先分配的数组而不是理解。您可以使用Julia's来查看代码大部分时间都花在哪里——这可能会揭示一些不明显的热点,您可以进一步优化这些热点


关于与PyPy的比较,Julia和PyPy很可能都很好地处理了这段代码——达到或接近C性能——在这种情况下,您不会期望Julia比PyPy快很多,因为它们都接近最优。与C实现的性能进行比较将是非常有用的,因为它将显示Julia和PyPy在表中留下了多少性能。幸运的是,这段代码似乎很容易移植到C。

这个问题可能更适合。理解在语法上非常好,在将来的构建中会优化它们吗?至于C,我学习Julia正是因为我不想学习C,:-)。重点不是你应该用C写所有的代码,而是如果你想知道为了基准测试,这段代码有多好,C是你应该比较的。如果PyPy在这段代码中的速度和C一样快,你就不能合理地期望Julia快10倍。是的,我理解,但仅仅为了测试这段代码而学习C是不值得的。理解非常好,但它们确实创建了新的数组对象——这就是语法的含义。将来可能会在obj.incomingEdges中为d写入
sum(d.weight*evaluate(d.source,inputVector)
,而不分配任何数组。这是合理的。这将不得不成为一个谜。