Thu, 02 May 2024 21:26:49 +0300
activation function trials
src/OpticalFlow.jl | file | annotate | diff | comparison | revisions | |
src/PredictPDPS.jl | file | annotate | diff | comparison | revisions |
--- a/src/OpticalFlow.jl Tue Apr 30 16:00:25 2024 +0300 +++ b/src/OpticalFlow.jl Thu May 02 21:26:49 2024 +0300 @@ -205,9 +205,23 @@ end function activation(x) - # return (1/(1 + exp(-(x - 1.5)))) # Works well with dual norm activation for lighthouse and brainphantom - # return 0.2/(1 + exp(-5(x-0.5))) + 0.3 # Works well with dual norm activation for lighthouse and brainphantom - return 1.0 - 0.5/(1+ exp(-100(x-0.7))) + return (1/(1 + exp(-10000(x - 0.05)))) # best for shepp logan + # return -abs(x-1)^1/5 + 1 # best for lighthouse and brainphantom + # return x^(5) + # return (1/(1 + exp(-1000(x - 0.075)))) + # return 4*(x-0.5)^3 + 0.5 + # return (1/(1 + exp(-1000(x - 0.05)))) + # return -3(x-0.5)^2 + 0.75 + # return (x-1)^51 + 1 + # return -abs(x-1)^1/3 + 1 + # return 16*(x-0.5)^5 + 0.5 +end + +function activationD(x) + # return (1/(1 + exp(-500(x - 0.1)))) + # return 4*(x-0.5)^3 + 0.5 + #return (1/(1 + exp(-1000(x - 0.1)))) + return 1.0 end # Experimental predictor for dual scaling based on activation @@ -216,15 +230,13 @@ oldx = copy(x) flow!(x, u; threads=(threads==:inner)) C = similar(y) - # cc = abs.(x-oldx) - # cm = max(1e-12,maximum(cc)) - # c = activation.(1 .- cc./ cm) - # C[1,:,:] .= c - # C[2,:,:] .= c - Δx .= activation.((sqrt.(abs.(y[1,:,:]).^2 + abs.(y[2,:,:]).^2))./0.25) - C[1,:,:] .= Δx - C[2,:,:] .= Δx - y .= C.*y + cc = abs.(x-oldx) + cm = max(1e-12,maximum(cc)) + c = activation.(cc ./ cm) + C[1,:,:] .= c + C[2,:,:] .= c + # y .= C.*y + y .= y .- 0.75.*C.*y # 0.75 for brain phantom and shepp logan end function pdflow!(x, Δx, y, Δy, u, :: ZeroDual; threads=:none) @@ -308,15 +320,18 @@ Δx = warp(x, tform, axes(x), fillvalue=Flat()) @. x = Δx C = similar(y) - # cc = abs.(x-oldx) - # cm = max(1e-12,maximum(cc)) - # c = activation.(1 .- cc./ cm) - # C[1,:,:] .= c - # C[2,:,:] .= c - Δx .= activation.((sqrt.(abs.(y[1,:,:]).^2 + abs.(y[2,:,:]).^2))./0.25) - C[1,:,:] .= Δx - C[2,:,:] .= Δx - y .= C.*y + cc = abs.(x-oldx) + cm = max(1e-12,maximum(cc)) + c = activation.(cc ./ cm) + C[1,:,:] .= c + C[2,:,:] .= c + # Δx .= activation.(sqrt.(abs.(y[1,:,:]).^2 + abs.(y[2,:,:]).^2))./0.25 + # D = similar(y) + # D[1,:,:] .= Δx + # D[2,:,:] .= Δx + # y .= C.*y + # y .= y .- 1.0.*C.*D.*y + y .= y .- 1.0.*C.*y end # Method for rotation prediction (exploiting property of inverse rotation)
--- a/src/PredictPDPS.jl Tue Apr 30 16:00:25 2024 +0300 +++ b/src/PredictPDPS.jl Thu May 02 21:26:49 2024 +0300 @@ -55,7 +55,7 @@ batchrun_denoising, batchrun_predictors, demo_denoising1, demo_denoising2, demo_denoising3, - demo_denoising4, demo_denoising5, demo_denoising6, demo_denoising7, + demo_denoising4, demo_denoising5, demo_denoising6, demo_denoising7, demo_denoising8, demo_petS1, demo_petS2, demo_petS3, demo_petS4, demo_petS5, demo_petS6, demo_petS7, demo_petB1, demo_petB2, demo_petB3, @@ -222,6 +222,7 @@ demo_denoising5 = () -> demo(denoising_experiments_pdps_known[5]) # Proximal (old) demo_denoising6 = () -> demo(denoising_experiments_pdps_known[6]) # Rotation demo_denoising7 = () -> demo(denoising_experiments_pdps_known[7]) # Zero dual +demo_denoising8 = () -> demo(denoising_experiments_pdps_known[8]) function batchrun_article(kwargs...) run_experiments(;experiments=experiments_all,