| 196 oldx = copy(x) |
196 oldx = copy(x) |
| 197 flow!(x, u; threads=(threads==:inner)) |
197 flow!(x, u; threads=(threads==:inner)) |
| 198 C = similar(y) |
198 C = similar(y) |
| 199 cc = abs.(x-oldx) |
199 cc = abs.(x-oldx) |
| 200 cm = max(flow.threshold,maximum(cc)) |
200 cm = max(flow.threshold,maximum(cc)) |
| 201 c = 1 .* (1 .- cc./ cm) .^flow.exponent |
201 # c = 1 .* (1 .- cc./ cm) .^flow.exponent |
| |
202 c = 1.0 .- 0.75.*activation.(cc./cm) |
| 202 C[1,:,:] .= c |
203 C[1,:,:] .= c |
| 203 C[2,:,:] .= c |
204 C[2,:,:] .= c |
| 204 y .= C.*y |
205 y .= C.*y |
| 205 end |
206 end |
| 206 |
207 |
| 207 function activation(x) |
208 function activation(x :: Real) |
| 208 return (1/(1 + exp(-10000(x - 0.05)))) # best for shepp logan |
209 return (1/(1 + exp(-1000(x - 0.05)))) # best for shepp logan |
| 209 # return -abs(x-1)^1/5 + 1 # best for lighthouse and brainphantom |
210 #return -abs(x-1)^1/5 + 1 # best for lighthouse and brainphantom |
| 210 # return x^(5) |
211 # return x^(5) |
| 211 # return (1/(1 + exp(-1000(x - 0.075)))) |
212 # return (1/(1 + exp(-1000(x - 0.075)))) |
| 212 # return 4*(x-0.5)^3 + 0.5 |
213 # return 4*(x-0.5)^3 + 0.5 |
| 213 # return (1/(1 + exp(-1000(x - 0.05)))) |
214 # return (1/(1 + exp(-1000(x - 0.05)))) |
| 214 # return -3(x-0.5)^2 + 0.75 |
215 # return -3(x-0.5)^2 + 0.75 |
| 215 # return (x-1)^51 + 1 |
216 # return (x-1)^51 + 1 |
| 216 # return -abs(x-1)^1/3 + 1 |
217 # return -abs(x-1)^1/3 + 1 |
| 217 # return 16*(x-0.5)^5 + 0.5 |
218 # return 16*(x-0.5)^5 + 0.5 |
| 218 end |
219 end |
| 219 |
220 |
| 220 function activationD(x) |
221 |
| 221 # return (1/(1 + exp(-500(x - 0.1)))) |
222 # # Experimental predictor for dual scaling based on activation |
| 222 # return 4*(x-0.5)^3 + 0.5 |
223 # function pdflow!(x, Δx, y, Δy, u, flow :: ActivatedDual; threads=:none) |
| 223 #return (1/(1 + exp(-1000(x - 0.1)))) |
224 # @assert(size(u)==(2,)) |
| 224 return 1.0 |
225 # oldx = copy(x) |
| 225 end |
226 # flow!(x, u; threads=(threads==:inner)) |
| 226 |
227 # C = similar(y) |
| 227 # Experimental predictor for dual scaling based on activation |
228 # cc = abs.(x-oldx) |
| 228 function pdflow!(x, Δx, y, Δy, u, flow :: ActivatedDual; threads=:none) |
229 # cm = max(1e-12,maximum(cc)) |
| 229 @assert(size(u)==(2,)) |
230 # c = 1.0 .- 0.75.*activation.(cc./cm) |
| 230 oldx = copy(x) |
231 # C[1,:,:] .= c |
| 231 flow!(x, u; threads=(threads==:inner)) |
232 # C[2,:,:] .= c |
| 232 C = similar(y) |
233 # y .= C.*y |
| 233 cc = abs.(x-oldx) |
234 # #y .= y .- C.*y # 0.75 for brain phantom and shepp logan |
| 234 cm = max(1e-12,maximum(cc)) |
235 # end |
| 235 c = activation.(cc ./ cm) |
|
| 236 C[1,:,:] .= c |
|
| 237 C[2,:,:] .= c |
|
| 238 # y .= C.*y |
|
| 239 y .= y .- 0.75.*C.*y # 0.75 for brain phantom and shepp logan |
|
| 240 end |
|
| 241 |
236 |
| 242 function pdflow!(x, Δx, y, Δy, u, :: ZeroDual; threads=:none) |
237 function pdflow!(x, Δx, y, Δy, u, :: ZeroDual; threads=:none) |
| 243 @assert(size(u)==(2,)) |
238 @assert(size(u)==(2,)) |
| 244 flow!(x, u; threads=(threads==:inner)) |
239 flow!(x, u; threads=(threads==:inner)) |
| 245 y .= 0.0 |
240 y .= 0.0 |
| 304 Δx = warp(x, tform, axes(x), fillvalue=Flat()) |
299 Δx = warp(x, tform, axes(x), fillvalue=Flat()) |
| 305 @. x = Δx |
300 @. x = Δx |
| 306 C = similar(y) |
301 C = similar(y) |
| 307 cc = abs.(x-oldx) |
302 cc = abs.(x-oldx) |
| 308 cm = max(flow.threshold,maximum(cc)) |
303 cm = max(flow.threshold,maximum(cc)) |
| 309 c = 1 .* (1 .- cc./ cm) .^flow.exponent |
304 # c = 1 .* (1 .- cc./ cm) .^flow.exponent # Original dual scaling |
| |
305 # c = 1.0 .- 0.75.*activation.(cc./cm) # Best for brain phantom |
| |
306 c = 1.0 .- 1.0.*activation.(cc./cm) # Best for shepp logan phantom |
| 310 C[1,:,:] .= c |
307 C[1,:,:] .= c |
| 311 C[2,:,:] .= c |
308 C[2,:,:] .= c |
| 312 y .= C.*y |
309 y .= C.*y |
| 313 end |
310 end |
| 314 |
311 |
| 315 # Experimental predictor for dual scaling based on activation |
312 # # Experimental predictor for dual scaling based on activation |
| 316 function petpdflow!(x, Δx, y, Δy, u, theta_known, flow :: ActivatedDual; threads=:none) |
313 # function petpdflow!(x, Δx, y, Δy, u, theta_known, flow :: ActivatedDual; threads=:none) |
| 317 oldx = copy(x) |
314 # oldx = copy(x) |
| 318 center_point = center(x) .+ u |
315 # center_point = center(x) .+ u |
| 319 tform = recenter(RotMatrix(theta_known[1]), center_point) |
316 # tform = recenter(RotMatrix(theta_known[1]), center_point) |
| 320 Δx = warp(x, tform, axes(x), fillvalue=Flat()) |
317 # Δx = warp(x, tform, axes(x), fillvalue=Flat()) |
| 321 @. x = Δx |
318 # @. x = Δx |
| 322 C = similar(y) |
319 # C = similar(y) |
| 323 cc = abs.(x-oldx) |
320 # cc = abs.(x-oldx) |
| 324 cm = max(1e-12,maximum(cc)) |
321 # cm = max(1e-12,maximum(cc)) |
| 325 c = activation.(cc ./ cm) |
322 # c = activation.(cc ./ cm) |
| 326 C[1,:,:] .= c |
323 # C[1,:,:] .= c |
| 327 C[2,:,:] .= c |
324 # C[2,:,:] .= c |
| 328 # Δx .= activation.(sqrt.(abs.(y[1,:,:]).^2 + abs.(y[2,:,:]).^2))./0.25 |
325 # # Δx .= activation.(sqrt.(abs.(y[1,:,:]).^2 + abs.(y[2,:,:]).^2))./0.25 |
| 329 # D = similar(y) |
326 # # D = similar(y) |
| 330 # D[1,:,:] .= Δx |
327 # # D[1,:,:] .= Δx |
| 331 # D[2,:,:] .= Δx |
328 # # D[2,:,:] .= Δx |
| 332 # y .= C.*y |
329 # # y .= C.*y |
| 333 # y .= y .- 1.0.*C.*D.*y |
330 # # y .= y .- 1.0.*C.*D.*y |
| 334 y .= y .- 1.0.*C.*y |
331 # #y .= y .- 1.0.*C.*y |
| 335 end |
332 # end |
| 336 |
333 |
| 337 # Method for rotation prediction (exploiting property of inverse rotation) |
334 # Method for rotation prediction (exploiting property of inverse rotation) |
| 338 function petpdflow!(x, Δx, y, Δy, u, theta_known, flow :: Rotation; threads=:none) |
335 function petpdflow!(x, Δx, y, Δy, u, theta_known, flow :: Rotation; threads=:none) |
| 339 @backgroundif (threads==:outer) begin |
336 @backgroundif (threads==:outer) begin |
| 340 petflow!(x, Δx, u, theta_known; threads=(threads==:inner)) |
337 petflow!(x, Δx, u, theta_known; threads=(threads==:inner)) |