# HG changeset patch # User Neil Dizon # Date 1714745836 -10800 # Node ID 429f8c86aea144b572014b37d8ddaff31d9dcdcd # Parent b413b7df8cd6be3cb9060d3c2cd9a9f33a62b9a7# Parent 56cc16c66b3935d90957231cec95365623fd2594 merge diff -r 56cc16c66b39 -r 429f8c86aea1 src/OpticalFlow.jl --- a/src/OpticalFlow.jl Fri May 03 09:13:05 2024 -0500 +++ b/src/OpticalFlow.jl Fri May 03 17:17:16 2024 +0300 @@ -198,15 +198,16 @@ C = similar(y) cc = abs.(x-oldx) cm = max(flow.threshold,maximum(cc)) - c = 1 .* (1 .- cc./ cm) .^flow.exponent + # c = 1 .* (1 .- cc./ cm) .^flow.exponent + c = 1.0 .- 0.75.*activation.(cc./cm) C[1,:,:] .= c C[2,:,:] .= c y .= C.*y end -function activation(x) - return (1/(1 + exp(-10000(x - 0.05)))) # best for shepp logan - # return -abs(x-1)^1/5 + 1 # best for lighthouse and brainphantom +function activation(x :: Real) + return (1/(1 + exp(-1000(x - 0.05)))) # best for shepp logan + #return -abs(x-1)^1/5 + 1 # best for lighthouse and brainphantom # return x^(5) # return (1/(1 + exp(-1000(x - 0.075)))) # return 4*(x-0.5)^3 + 0.5 @@ -214,30 +215,24 @@ # return -3(x-0.5)^2 + 0.75 # return (x-1)^51 + 1 # return -abs(x-1)^1/3 + 1 - # return 16*(x-0.5)^5 + 0.5 -end - -function activationD(x) - # return (1/(1 + exp(-500(x - 0.1)))) - # return 4*(x-0.5)^3 + 0.5 - #return (1/(1 + exp(-1000(x - 0.1)))) - return 1.0 + # return 16*(x-0.5)^5 + 0.5 end -# Experimental predictor for dual scaling based on activation -function pdflow!(x, Δx, y, Δy, u, flow :: ActivatedDual; threads=:none) - @assert(size(u)==(2,)) - oldx = copy(x) - flow!(x, u; threads=(threads==:inner)) - C = similar(y) - cc = abs.(x-oldx) - cm = max(1e-12,maximum(cc)) - c = activation.(cc ./ cm) - C[1,:,:] .= c - C[2,:,:] .= c - # y .= C.*y - y .= y .- 0.75.*C.*y # 0.75 for brain phantom and shepp logan -end + +# # Experimental predictor for dual scaling based on activation +# function pdflow!(x, Δx, y, Δy, u, flow :: ActivatedDual; threads=:none) +# @assert(size(u)==(2,)) +# oldx = copy(x) +# flow!(x, u; threads=(threads==:inner)) +# C = similar(y) +# cc = abs.(x-oldx) +# cm = max(1e-12,maximum(cc)) +# c = 1.0 .- 0.75.*activation.(cc./cm) +# C[1,:,:] .= c +# C[2,:,:] .= c +# y .= C.*y +# #y .= y .- C.*y # 0.75 for brain phantom and shepp logan +# end function pdflow!(x, Δx, y, Δy, u, :: ZeroDual; threads=:none) @assert(size(u)==(2,)) @@ -306,33 +301,35 @@ C = similar(y) cc = abs.(x-oldx) cm = max(flow.threshold,maximum(cc)) - c = 1 .* (1 .- cc./ cm) .^flow.exponent + # c = 1 .* (1 .- cc./ cm) .^flow.exponent # Original dual scaling + # c = 1.0 .- 0.75.*activation.(cc./cm) # Best for brain phantom + c = 1.0 .- 1.0.*activation.(cc./cm) # Best for shepp logan phantom C[1,:,:] .= c C[2,:,:] .= c y .= C.*y end -# Experimental predictor for dual scaling based on activation -function petpdflow!(x, Δx, y, Δy, u, theta_known, flow :: ActivatedDual; threads=:none) - oldx = copy(x) - center_point = center(x) .+ u - tform = recenter(RotMatrix(theta_known[1]), center_point) - Δx = warp(x, tform, axes(x), fillvalue=Flat()) - @. x = Δx - C = similar(y) - cc = abs.(x-oldx) - cm = max(1e-12,maximum(cc)) - c = activation.(cc ./ cm) - C[1,:,:] .= c - C[2,:,:] .= c - # Δx .= activation.(sqrt.(abs.(y[1,:,:]).^2 + abs.(y[2,:,:]).^2))./0.25 - # D = similar(y) - # D[1,:,:] .= Δx - # D[2,:,:] .= Δx - # y .= C.*y - # y .= y .- 1.0.*C.*D.*y - y .= y .- 1.0.*C.*y -end +# # Experimental predictor for dual scaling based on activation +# function petpdflow!(x, Δx, y, Δy, u, theta_known, flow :: ActivatedDual; threads=:none) +# oldx = copy(x) +# center_point = center(x) .+ u +# tform = recenter(RotMatrix(theta_known[1]), center_point) +# Δx = warp(x, tform, axes(x), fillvalue=Flat()) +# @. x = Δx +# C = similar(y) +# cc = abs.(x-oldx) +# cm = max(1e-12,maximum(cc)) +# c = activation.(cc ./ cm) +# C[1,:,:] .= c +# C[2,:,:] .= c +# # Δx .= activation.(sqrt.(abs.(y[1,:,:]).^2 + abs.(y[2,:,:]).^2))./0.25 +# # D = similar(y) +# # D[1,:,:] .= Δx +# # D[2,:,:] .= Δx +# # y .= C.*y +# # y .= y .- 1.0.*C.*D.*y +# #y .= y .- 1.0.*C.*y +# end # Method for rotation prediction (exploiting property of inverse rotation) function petpdflow!(x, Δx, y, Δy, u, theta_known, flow :: Rotation; threads=:none) diff -r 56cc16c66b39 -r 429f8c86aea1 src/PET/PET.jl --- a/src/PET/PET.jl Fri May 03 09:13:05 2024 -0500 +++ b/src/PET/PET.jl Fri May 03 17:17:16 2024 +0300 @@ -122,8 +122,8 @@ p_known₀_pets ⬿ (predictor=Rotation(),)), Experiment(AlgorithmNew, DisplacementConstant, shepplogan, p_known₀_pets ⬿ (predictor=ZeroDual(),)), - Experiment(AlgorithmNew, DisplacementConstant, shepplogan, - p_known₀_pets ⬿ (predictor=ActivatedDual(),)), + # Experiment(AlgorithmNew, DisplacementConstant, shepplogan, + # p_known₀_pets ⬿ (predictor=ActivatedDual(),)), ) const brainphantom_experiments_pdps_known = ( @@ -141,8 +141,8 @@ p_known₀_petb ⬿ (predictor=Rotation(),)), Experiment(AlgorithmNew, DisplacementConstant, brainphantom, p_known₀_petb ⬿ (predictor=ZeroDual(),)), - Experiment(AlgorithmNew, DisplacementConstant, brainphantom, - p_known₀_petb ⬿ (predictor=ActivatedDual(),)), + # Experiment(AlgorithmNew, DisplacementConstant, brainphantom, + # p_known₀_petb ⬿ (predictor=ActivatedDual(),)), ) diff -r 56cc16c66b39 -r 429f8c86aea1 src/PredictPDPS.jl --- a/src/PredictPDPS.jl Fri May 03 09:13:05 2024 -0500 +++ b/src/PredictPDPS.jl Fri May 03 17:17:16 2024 +0300 @@ -55,7 +55,7 @@ batchrun_denoising, batchrun_predictors, demo_denoising1, demo_denoising2, demo_denoising3, - demo_denoising4, demo_denoising5, demo_denoising6, demo_denoising7, demo_denoising8, + demo_denoising4, demo_denoising5, demo_denoising6, demo_denoising7, #demo_denoising8, demo_petS1, demo_petS2, demo_petS3, demo_petS4, demo_petS5, demo_petS6, demo_petS7, demo_petB1, demo_petB2, demo_petB3, @@ -183,8 +183,8 @@ p_known₀_denoising ⬿ (predictor=Rotation(),)), Experiment(AlgorithmNew, DisplacementConstant, lighthouse, p_known₀_denoising ⬿ (predictor=ZeroDual(),)), - Experiment(AlgorithmNew, DisplacementConstant, lighthouse, - p_known₀_denoising ⬿ (predictor=ActivatedDual(),)), + # Experiment(AlgorithmNew, DisplacementConstant, lighthouse, + # p_known₀_denoising ⬿ (predictor=ActivatedDual(),)), ) const denoising_experiments_all = Iterators.flatten(( @@ -222,7 +222,7 @@ demo_denoising5 = () -> demo(denoising_experiments_pdps_known[5]) # Proximal (old) demo_denoising6 = () -> demo(denoising_experiments_pdps_known[6]) # Rotation demo_denoising7 = () -> demo(denoising_experiments_pdps_known[7]) # Zero dual -demo_denoising8 = () -> demo(denoising_experiments_pdps_known[8]) + function batchrun_article(kwargs...) run_experiments(;experiments=experiments_all,