activation function for dual scscaling

Fri, 03 May 2024 18:03:06 +0300

author
Neil Dizon <neil.dizon@helsinki.fi>
date
Fri, 03 May 2024 18:03:06 +0300
changeset 52
cb029cdb141a
parent 51
429f8c86aea1
child 53
a1ec1a15f3b0

activation function for dual scscaling

src/OpticalFlow.jl file | annotate | diff | comparison | revisions
src/PET/PET.jl file | annotate | diff | comparison | revisions
src/PredictPDPS.jl file | annotate | diff | comparison | revisions
--- a/src/OpticalFlow.jl	Fri May 03 17:17:16 2024 +0300
+++ b/src/OpticalFlow.jl	Fri May 03 18:03:06 2024 +0300
@@ -62,9 +62,10 @@
 # Struct for flow
 #################################
 struct DualScaling
-    exponent :: Integer
+    activation :: Function
+    factor :: Float64
     threshold :: Real
-    DualScaling(e = 50, t = 1e-12) = new(e, t)
+    DualScaling(a = x -> x, f = 1.0, t = 1e-12) = new(a, f, t)
 end
 
 struct Greedy end
@@ -198,8 +199,7 @@
     C = similar(y)
     cc = abs.(x-oldx)
     cm = max(flow.threshold,maximum(cc))
-    # c = 1 .* (1 .- cc./ cm) .^flow.exponent
-    c = 1.0 .- 0.75.*activation.(cc./cm)
+    c = 1.0 .- flow.factor.*flow.activation.(cc./cm)
     C[1,:,:] .= c
     C[2,:,:] .= c
     y .= C.*y
@@ -207,7 +207,7 @@
 
 function activation(x :: Real)
     return (1/(1 + exp(-1000(x - 0.05))))    # best for shepp logan
-    #return -abs(x-1)^1/5 + 1                  # best for lighthouse and brainphantom    
+    #return -abs(x-1)^1/5 + 1                # best for lighthouse and brainphantom    
     # return x^(5) 
     # return (1/(1 + exp(-1000(x - 0.075))))  
     # return 4*(x-0.5)^3 + 0.5  
@@ -219,21 +219,6 @@
 end
 
 
-# # Experimental predictor for dual scaling based on activation
-# function pdflow!(x, Δx, y, Δy, u, flow :: ActivatedDual; threads=:none)
-#     @assert(size(u)==(2,))
-#     oldx = copy(x)
-#     flow!(x, u; threads=(threads==:inner))
-#     C = similar(y)
-#     cc = abs.(x-oldx)
-#     cm = max(1e-12,maximum(cc))
-#     c = 1.0 .- 0.75.*activation.(cc./cm)
-#     C[1,:,:] .= c
-#     C[2,:,:] .= c
-#     y .= C.*y 
-#     #y .= y .- C.*y  # 0.75 for brain phantom and shepp logan
-# end
-
 function pdflow!(x, Δx, y, Δy, u, :: ZeroDual; threads=:none)
     @assert(size(u)==(2,))
     flow!(x, u; threads=(threads==:inner))
@@ -301,36 +286,12 @@
     C = similar(y)
     cc = abs.(x-oldx)
     cm = max(flow.threshold,maximum(cc))
-    # c = 1 .* (1 .- cc./ cm) .^flow.exponent   # Original dual scaling
-    # c = 1.0 .- 0.75.*activation.(cc./cm)      # Best for brain phantom
-    c = 1.0 .- 1.0.*activation.(cc./cm)         # Best for shepp logan phantom
+    c = 1.0 .- flow.factor.*flow.activation.(cc./cm)
     C[1,:,:] .= c
     C[2,:,:] .= c
     y .= C.*y
 end
 
-# # Experimental predictor for dual scaling based on activation
-# function petpdflow!(x, Δx, y, Δy, u, theta_known, flow :: ActivatedDual; threads=:none)
-#     oldx = copy(x)
-#     center_point = center(x) .+ u
-#     tform = recenter(RotMatrix(theta_known[1]), center_point)
-#     Δx = warp(x, tform, axes(x), fillvalue=Flat())
-#     @. x = Δx
-#     C = similar(y)
-#     cc = abs.(x-oldx)
-#     cm = max(1e-12,maximum(cc))
-#     c = activation.(cc ./ cm)
-#     C[1,:,:] .= c
-#     C[2,:,:] .= c
-#     # Δx .= activation.(sqrt.(abs.(y[1,:,:]).^2 + abs.(y[2,:,:]).^2))./0.25
-#     # D = similar(y)
-#     # D[1,:,:] .= Δx
-#     # D[2,:,:] .= Δx
-#     # y .= C.*y
-#     # y .= y .- 1.0.*C.*D.*y 
-#     #y .= y .- 1.0.*C.*y 
-# end
-
 # Method for rotation prediction (exploiting property of inverse rotation)
 function petpdflow!(x, Δx, y, Δy, u, theta_known, flow :: Rotation; threads=:none)
     @backgroundif (threads==:outer) begin
--- a/src/PET/PET.jl	Fri May 03 17:17:16 2024 +0300
+++ b/src/PET/PET.jl	Fri May 03 18:03:06 2024 +0300
@@ -109,7 +109,7 @@
 
 const shepplogan_experiments_pdps_known = (
     Experiment(AlgorithmNew, DisplacementConstant, shepplogan,
-               p_known₀_pets ⬿ (predictor=DualScaling(),)),
+               p_known₀_pets ⬿ (predictor=DualScaling(x -> (1/(1 + exp(-1000(x - 0.05)))), 1.0, 1e-12),)),
     Experiment(AlgorithmNew, DisplacementConstant, shepplogan,
                p_known₀_pets ⬿ (predictor=Greedy(),)),
     Experiment(AlgorithmNew, DisplacementConstant, shepplogan,
@@ -128,7 +128,7 @@
 
 const brainphantom_experiments_pdps_known = (
     Experiment(AlgorithmNew, DisplacementConstant, brainphantom,
-               p_known₀_petb ⬿ (predictor=DualScaling(),)),
+               p_known₀_petb ⬿ (predictor=DualScaling(x -> (-abs(x-1)^1/5 + 1), 0.75, 1e-12),)),
     Experiment(AlgorithmNew, DisplacementConstant, brainphantom,
                p_known₀_petb ⬿ (predictor=Greedy(),)),
     Experiment(AlgorithmNew, DisplacementConstant, brainphantom,
--- a/src/PredictPDPS.jl	Fri May 03 17:17:16 2024 +0300
+++ b/src/PredictPDPS.jl	Fri May 03 18:03:06 2024 +0300
@@ -170,7 +170,7 @@
 
 const denoising_experiments_pdps_known = (
     Experiment(AlgorithmNew, DisplacementConstant, lighthouse,
-               p_known₀_denoising ⬿ (predictor=DualScaling(),)),
+               p_known₀_denoising ⬿ (predictor=DualScaling(x -> (-abs(x-1)^1/5 + 1),0.75,1e-12),)),
     Experiment(AlgorithmNew, DisplacementConstant, lighthouse,
                p_known₀_denoising ⬿ (predictor=Greedy(),)),
     Experiment(AlgorithmNew, DisplacementConstant, lighthouse,
@@ -183,8 +183,6 @@
                p_known₀_denoising ⬿ (predictor=Rotation(),)),
     Experiment(AlgorithmNew, DisplacementConstant, lighthouse,
                p_known₀_denoising ⬿ (predictor=ZeroDual(),)),
-    # Experiment(AlgorithmNew, DisplacementConstant, lighthouse,
-    #            p_known₀_denoising ⬿ (predictor=ActivatedDual(),)),
 )
 
 const denoising_experiments_all = Iterators.flatten((

mercurial