-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathheat_method.jl
More file actions
3917 lines (3226 loc) · 154 KB
/
heat_method.jl
File metadata and controls
3917 lines (3226 loc) · 154 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
### A Pluto.jl notebook ###
# v0.19.27
#> [frontmatter]
#> title = "Heat Method for Distance Computation"
#> date = "2023-09-01"
#> tags = ["blog", " "]
#> description = "Demonstration of the Heat Method"
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local iv = try Base.loaded_modules[Base.PkgId(Base.UUID("6e696c72-6542-2067-7265-42206c756150"), "AbstractPlutoDingetjes")].Bonds.initial_value catch; b -> missing; end
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : iv(el)
el
end
end
# ╔═╡ dab8582c-e8e2-443f-b52c-ac716b2ca12e
using PlutoUI
# ╔═╡ 358c1832-06c0-4918-b766-8c1de98c21d3
begin
using DifferentialEquations , Plots, Eikonal, LinearAlgebra, SparseArrays
end
# ╔═╡ d79f5864-5193-4673-a593-1057ec15e927
begin
using PlyIO
using Arpack
using ColorSchemes
using WGLMakie
using JSServe
Page()
WGLMakie.JSServe.SERVER_CONFIGURATION.listen_port[] = 8089
end
# ╔═╡ 7dc7f1eb-3e1f-4b75-8e4c-e3018b0259d6
# Note to self: What is Gridap.jl?
# ╔═╡ edce4ccf-7be7-4b0d-98aa-c572eac3d0ad
md"""
# Introduction
This notebook provides an overview of the Heat Method by Crane et al. used for geodesic computation [^1]. First we describe limitations with standard discretizations and shortest path algorithms and show the intuition behind the Heat Method. Then we provide some discretization details relevant to meshes and finally provide a Julia implementation. Originally this notebook was going to use pure Julia, but I think there is some value in highlighting some of the flagship packages in the ecosystem.
"""
# ╔═╡ efa0e0ba-8b30-4f69-9bc8-bdd89ca5f61a
PlutoUI.TableOfContents(depth=5)
# ╔═╡ 94ca9000-947d-44d2-8a6d-f9177c476345
md"""### What are Geodesics?
What is the shortest path between points $A$ and $B$? It's likely you have heard this question posed in the context of algorithms, and invariably is applicable to so many domains such as planning. For those of you in CS, you were most likely peppered with questions on finding shortest paths for graphs. The graph model turns out to be a perfect fit to solve shortest paths on because of the discrete nature of a graph. It is easy to store representations of vertices and edges as well as the paths along the graph by simply tabulating a collection of discrete objects. But can we ask the same question in other domains besides graphs, ones which are in fact continuous?
Take a look below at several surfaces where you can imagine a small ant walking on. In all of them, intuitively the shortest path from $A$ and $B$ is the one denoted by the red curve. Sometimes it is simple to compute this shortest path. The flat sheet on the left is Euclidean sspace, and it is well known that the shortest path between any two points is the line crossing the two. What about the sphere on the right? Well the shortest paths there lie on the [great circle] (https://en.wikipedia.org/wiki/Great_circle) of the sphere (think equator). Like on graphs, there may be several shortest paths connecting the points.
"""
# ╔═╡ ad43bc4b-3cc4-4960-bb46-eb6978620e61
let
# Sphere and bunny?
end
# ╔═╡ c235d925-3899-4cf7-9d9d-847ab81a7523
md"""
Now we see the problem is more general than graphs. When dealing with surfaces, the term [geodesic](https://en.wikipedia.org/wiki/Geodesic) has been coined to refer to shortest paths and is a central object in geometry. The question now is, given a surface and two points, what is the geodesic(s) connecting the two?
"""
# ╔═╡ ba6a8f2e-3e01-48c9-a07a-72fcde78e6f1
md"""
### Geodesics on Discretized Surfaces
There has been a concerted effort in the last two centuries to study geodesics on continuous, well-differentiable surfaces known as manifolds. While an interesting subject in its own right, there is not much focus on surfaces that arise naturally from computationally heavy domains such as graphics. By definition, our questions will be discrete in nature.
Often our surfaces can be represented as meshes or point clouds with a finite number of vertices and possibly edges connecting the vertices.
"""
# ╔═╡ 008abc67-4a20-4ba7-a7e0-a1922fd67387
# Add images of polygonal meshes + point clouds
# ╔═╡ 8c2aaf03-a48f-4241-9e78-3b7312ee71e2
md"""
### Issues with Discretization
Shortest path algorithms such as Dijkstra's seem to be the perfect fit for finding geodesics, right? At first this might seem true, but we have to remember that these algorithms answer a fundamentally different question - what is the shortest path along the edges of a graph? This means that all paths produced will necessarily follow the edges specified by the mesh. But why is this bad?
Let us consider again the square surface above where each side is unit length. Clearly the shortest path from $A$ to $B$ is the diagonal with length $\sqrt{2}$. Now how would a shortest path algorithm do here?
We need a graph to pass to our solver. So what if we simply use the four vertices and four sides of the square? Unfortunately we immediately run into problems - no matter how good the solver is, the optimal path from $A$ to $B$ is purportedly along the sides.
"""
# ╔═╡ 5a53c282-c0be-47b3-964f-2fedbfa25451
let
x = [0, 1, 1, 0, 0]
y = [0, 0, 1, 1, 0]
highlighted_side_x = [0, 1]
highlighted_side_y = [1, 1]
# Create the plot
Plots.plot(x, y, lw = 2, linecolor = :black, legend = false, xlims=(0,1), ylims=(0,1), size = (200,200), aspect_ratio=:equal, framestyle=:box, ticks=false, background_color=:white, margin=5*Plots.mm)
Plots.plot!(highlighted_side_x, highlighted_side_y, seriestype = :shape, lw = 10, linecolor = :red)
annotate!(-0.05,-0.05, "A", fontsize=5)
annotate!(1.05,1.05, "B", fontsize=5)
Plots.plot!([0,0], [0,1], lw=10, linecolor=:red)
end
# ╔═╡ a73d4efc-b9a7-4171-9bdc-c98e907dd2b7
md"""
Clearly our choice of the graph does not adequately describe the possible paths of a *continuous* surface, so what now? In many fields such as PDEs, numerical simulations commonly discretize continuous spaces into discrete ones, so we can try to use the same methods here.
What should the discretization be though? It turns out there is not obvious choice of discretizing the space. The figure below shows one possible strategy that cuts the square into smaller triangles.
"""
# ╔═╡ 6f2cdb0d-c00d-48ca-a2d0-ea462532895d
md"""Refine N: $(@bind refinement_n PlutoUI.Slider(2:2:16,8,true))"""
# ╔═╡ 97460bf6-ce4f-4210-9664-8c6c43a9a382
let
dx = range(0,1,refinement_n+1)
Plots.plot()
Plots.xlims!((0,1))
Plots.ylims!((0,1))
Plots.plot!(ticks=false, framestyle=:box, size=(300, 300), legend=false, margin=5*Plots.mm)
vline!(dx, line = :black, lw = 1)
hline!(dx, line = :black, lw=1)
xx = repeat(dx, inner=2)[2:end]
yy = repeat(dx, inner=2)[1:end-1]
Plots.plot!(xx,yy, color=:red, lw=3)
for x in dx
Plots.plot!([0,x], [x,0], color=:black)
end
for x in reverse(dx)
Plots.plot!([x,1], [1,x], color=:black)
end
Plots.plot!([0,1], [1,0], color=:blue, lw=3)
annotate!(-0.05, 1.05, "A", color = :black, fontsize = 5)
annotate!(-0.05, -.05, "B", color = :black, fontsize = 5)
annotate!(1.05, 1.05, "C", color = :black, fontsize = 5)
annotate!(1.05, -0.05, "D", color = :black, fontsize = 5)
end
# ╔═╡ 0526ff5b-952c-4cba-842b-1059c67c12e1
md"""
Like before, the partitioned square can be represented as a graph and running a shortest path algorithm will produce the two paths shown in red and blue. This strategy successfully recovers the geodesic from $A$ to $D$ because it just so happens that the graph has the edges to describe this path. Despite our initial successes, this choice leads to the path in red. A careful look shows that this path *still* has length $2$ despite refining the graph! Further discretization will yield **no** improvement for the path from $B$ to $C$.
Adding an edge from $B$ to $C$ of length $\sqrt{2}$ can fix the problem for this particular path, but this fails to address the more general case where the start and ending points lie in arbitrary places on the square. Of course, there is always the option to add many irregular edges to the graph to express more paths, but it will never be enough to precisely answer every query. The downsides are that more edges means longer runtimes and possibly larger memory consumption to represent all of the visited points.
Finally even if we were willing to take the hit in runtime and memory performance, this all assumes that we can compute the weights of the edges. In a square it is straightforward but asking the same question on more complicated surfaces becomes just as difficult as finding the geodesics themselves.
"""
# ╔═╡ f0eb3973-f9c4-41fc-8f38-3bcb71e76c7d
let
θ = range(0,2,100)
c1 = Plots.Shape([(0.9i,0.9j) for (i,j) in zip(cospi.(θ), sinpi.(θ))])
c2 = Plots.Shape([(0.5*i,0.5*j) for (i,j) in zip(cospi.(θ), sinpi.(θ))])
T = 1.0
Plots.plot(ticks=false, framestyle=:none, size=(200, 200), legend=false, xlims=(-T,T), ylims=(-T,T))
Plots.plot!(c1, color=:lightgray)
Plots.plot!(c2, color=:white)
Plots.scatter!([0.6], [0.1], markersize=5, markerstrokewidth=0,colormap=:red)
Plots.scatter!([-0.7], [-0.4], markersize=5, markerstrokewidth=0, colormap=:red)
end
# ╔═╡ c8fc76f7-f900-460b-a6e3-a33a3386a8e0
Markdown.MD(Markdown.Admonition("warning", "Question", [md"""
Is there good discretization for this surface? Even if one could be found, would it work well with shortest path algorithms
"""]))
# ╔═╡ 19696716-d7ae-4ffd-8b73-399e5f02831b
md"""
# Intuition by Leveraging Heat
How do we use heat to produce a notion of distance? For now we will simplify the picture by working on square grids and later Let's continue with the running example of a metal square surface that is initially $0^{\circ}$F everywhere. Now let us take a red-hot needle and touch the center of the plate. At $t=0$ only the center of the plate is hot - say $100^\circ$ F. After a while, the heat flows away from the concentrated center to the boundaries, and this process is governed according to a diffusion equation known as the [heat equation](https://en.wikipedia.org/wiki/Heat_equation) [^2]. Time elapsed means that more of the plate becomes warmer while the center slowly cools down.
"""
# ╔═╡ 0350b1d6-4245-4b86-8b45-be9c00a16c77
md"""t = $(@bind t_kernel PlutoUI.Slider(range(0,5,101), show_value=true, default=0.7))"""
# ╔═╡ ac43bbab-3014-4ece-b738-157e6367f734
md"""
Notably, the amount of heat at a given point on the plate is tied to how long it took for heat to diffuse from the center. Consider a point on the edge of the plate. We see that the temperature is consistently colder than the center, indicating that the point is quite far. Great. We have a notion of distance, but where do the geodesics come into play? By following where the heat flows away (e.g. decreasing temperatures), we can approximate the geodesics by finding paths that monotonically decrease in temperature. This is exactly what is happening when moving away from the center to the edge of the metal plate.
The next example shows a more complicated setup with *two* source points in a insulated system. In an insulated system, the heat remains on the plate. In the multisource problem, the distance of the geodesics is the shortest path from any of the source points. The plot on the right shows heat measurements for three different locations. Can you map each dot to the correct bar?
"""
# ╔═╡ 3a23522e-d7d6-4461-bd33-878e1c823ea6
md"""
t = $(@bind t_multi PlutoUI.Slider(range(0,1,101), show_value=true, default=0.02))
"""
# ╔═╡ 7564b349-5d51-44a0-b78a-654cd1bbfb61
md"""
There are two interesting insights from the bar plot. The first is confirmation of our previous observation - the furthest point from the sources is consistently colder than points closer to them. The second is that the quality of the measurements is time sensitive. For large $t$, the plate has reached pretty close to equilibrium at around ~$0.04$, and so it is tougher to say which points are truly farther or whether it is a fluke with how the heat disperses. However, for $t$ very close to $0$, the difference in measurements is striking and better reflects the range of geodesic lengths. Notice that $t=0$ is uninformative, forcing us to use $t>0$.
It seems that to get high quality measurements, we need to operate in a middle ground. On one extreme, we must avoid running the simulation for too long or else the surface will reach an equilibrium. On the other extreme, we need to run the simulation for some small non-zero time to begin diffusion. The next section formalizes this intuition and presents our first algorithm to compute geodesics."""
# ╔═╡ 96fdedc9-3adf-4e46-9a50-d0b38bd38caf
md"""
### Varadhan's Approximation
The relationship between heat flow and distance is captured by an elegant expression known as Varadhan's formula.
$$\lim\limits_{t\rightarrow 0^+} t \log h(t,x,y) = -4d(x,y)^2$$
Here $d(x,y)$ is the geodesic distance between points $x$ and $y$. The function $h(t,x,y)$ is known as the *heat kernel*. The heat kernel describes how heat evolves on the surface if we initially placed one unit of heat at $x$. Then $h(t,x,y)$ refers to the amount of heat at $y$ at time $t$ when one unit of heat was placed at $x$. For example, Fig ? shows a heat kernel where $x=(0,0)$.
This formula gives us a direct way to estimate $d(x,y)$. First we place a unit of heat at at the source $x$ and simulate for a short period of time. Then we measure the temperature at $y$ to find the value of $h(t,x,y)$. For small $t$, we can do a substitution to obtain $d(x,y)$ to get a decent approximation.
First let's write the formula.
"""
# ╔═╡ c89ce545-5229-418e-a174-e2e4eddc1115
varadhan_formula(t, heat) = sqrt.(-4t * log.(heat))
# ╔═╡ 2dad5bf2-ac9c-4767-ac37-3abd252f338a
md"""
Below we have defined $\texttt{heat\_solution}$ to be the heat kernel with initial unit heat at pixel $(7,9)$. Evaluating $\texttt{heat\_solution}$ at a time $t$ gives a matrix representing $h(t,(7,9),y)$. The variable $\texttt{t\_varadhan}$ corresponds is the value of the slider.
"""
# ╔═╡ c814f0d2-cbb0-4db9-9499-993d51f42356
@bind t_varadhan PlutoUI.Slider(range(0,0.1, 101), show_value=true, default=0.005)
# ╔═╡ e1c19aea-d671-4c01-8bcf-119e7abb295f
md"""
Not great but not bad. We do see that the estimates get moderately better for smaller $t$ though the relative error is still significant near the source. Why do these artifacts appear? First, we cannot push $t$ arbitrary close to $0$. For extremely small values of $t$, the solver's numerical accuracy degrades (move the slider to the left to see numerical errors). This is not just a limitation of the differential equation solver. To simulate heat diffusion, the square was already discretized into an $N\times N$ grid with a single unit of heat placed at the cell $(7,9)$. ...
"""
# ╔═╡ 54c232ba-1175-40a3-b5a9-729450905e9f
md"""
### Eikonal Equation
Well Varadhan's formula was good first attempt, but unfortunately the fundamental inaccuracities due to numerical and discretization error puts a hard limit on this approach. Instead of refining results from Varadhan's formula, the paper instead takes an alternative approach. From Section ??? we discussed that geodesics correspond to find paths that cool down the further down you travel. Crane et al. leverage this observation by relating it to the Eikonal equation:
$\vert \nabla u(x)\vert=1$ for $x\in M$ with boundary condition $u(s)=0$ for where $s$ is the source point. Here $\mathcal{M}$ represents the surface (e.g. square plate).
The Eikonal equation is a PDE used to describe shortest paths from a source point, and the solution $u$ represents the distance function from $s$. The boundary condition states that $u(x)$ must start at $0$ for the source point. More generally, the boundary condition can be expanded to include multiple source points or even a section of space. Below are some examples of solutions to the Eikonal equation with various sources [^3].
"""
# ╔═╡ dd03796a-0520-418a-88f3-f11547b05a19
let
tsize = 1000
solver = FastMarching(tsize, tsize)
solver.v .= 1;
npoints = 4
for _ in 1:npoints
(i, j) = rand(1:tsize, 2)
init!(solver, (i, j))
end
march!(solver)
Plots.contour(1:(tsize+1), 1:(tsize+1), solver.t, levels=30,
aspect_ratio=1, c=:coolwarm, size=(300, 300), ticks=false, framestyle=:box,
title = "W", xlims=(1,(tsize+1)), ylims=(1,(tsize+1)))
end
# ╔═╡ 217ac117-5415-4938-a543-8ebe5cca7898
md"""
Why does $u(x)$ describe the distance function? A helpful analogy is to think how spilt water moves across a surface. Tiny water particles flow away from the source, forming a puddle. The particle at the boundary, or wavefront, consist of particles that have traveled the furthest from the source and had to have traveled at the same rate to get to where they are at now. Thus the boundary represents all the particles that are equidistant from the source, in other words, the boundary is an *level set* mapping $u$ to the same distance. We can imagine letting this puddle continue growing and the wavefront expanding away. This of course represents $u$ increasing.
"""
# ╔═╡ 17ca6c6a-d282-457d-961d-40275a01927a
md"""
So how does the analogy connect with the constraint $\vert \nabla u(x)\vert = 1$? Recall that $\nabla u(x)$ is the gradient and represents the direction of the largest change in distance at $x$. But since $u(x)$ is a distance function, it changes the most by increasing the distance it self, which means expanding the wavefront away from the origin as indicated by the arrows above. The speed of the wavefront is governed by the magnitude of $\nabla u(x)$. Notice that there is something funny going on with the units of $\nabla u(x)$. It basically represents the rate of change of the distance function with respect to the change in distance. Aha! That's where the $\nabla u(x)=1$ comes from. All it is trying to say is that "distance changes $1$ meter per meter"[^4]. Almost tautological...
So instead of evaluating Varadhan's formula, we evaluate the Eikonal equation and problem solved? Sorry to dissappoint again, but unfortunately the Eikonal equation is a bit too difficult to solve accurately, even by simulation. This isn't to say that people don't try to solve it, they do and have succeeded in getting good estimates. Fast marching is a whole class of approximation algorithms that takes the wave front analogy to the extreme. The problem is that many of these algorithms need to approximate the wave front which can be some what fickly. There are exact solvers, but even these incur large runtime costs, almost on the order of $O(n^3)$. Once again, the paper takes another turn to save the day.
"""
# ╔═╡ c912af64-147f-4ca5-a567-45f5c5e50303
md"""
# The Heat Method
Instead of solving the Eikonal equation directly, the Heat Method repurposes heat diffusion to solve it in three stages: simulating heat, computing gradients, and solving a Poisson equation. It makes use of the following observation on the solution to the heat equation.
"""
# ╔═╡ 3e920b70-d5b6-44f4-b257-e7568a458173
Markdown.MD(Markdown.Admonition("info", "Observation #3", [md"""
Let $h(x)$ be the heat kernel for source $s$ and small time $t$. Then $h(x)$ decreases approximately in the same directions as the distance function $u(x)$.
"""]))
# ╔═╡ d7ccc528-a811-4c31-8d64-fa2ce1e813cb
let
# Picture of two vector fields. One for heat, one for eikonal.
end
# ╔═╡ e56e6e62-5f38-467d-83bd-daaf4a968044
md"""
Observation #3 says that $\nabla h(y)$ is roughly parallel to $\nabla u(x)$. But since $\vert\nabla u(x)\vert =1$, normalizing the vector field $\nabla h(x)$ gives a good estimate for $\nabla u(x)$. Although we have $\nabla u(x)$ instead of $u(x)$, this is a good first step so let's write out.
"""
# ╔═╡ 47e4da81-2c79-44bc-8d87-a9a0f2e45d4e
md"""
### Simulating Heat
On a flat surface, the heat flows according to the PDE [^5]
$$\Delta x = x_t$$
The operator $\Delta$ is the *Laplacian* and it usually takes the form $\Delta=\frac{\partial}{\partial x^2}+\frac{\partial}{\partial y^2}$ for $2$D surfaces. In the continuous setting, it is straightforward to compute the Laplacian of a function. In our setting, however, functions defined on the discretized structures (e.g. the grid or meshes) are not inherently continuous and instead are defined on a finite number of points. For example, the square grid defines functions on each of the $N\times N$ lattice points and can be represented as a $\mathbb{R}^{N\times N}$ vector. More generally discrete functions are $\mathbb{R}^{\vert V\vert}$ vectors where $\vert V \vert$ is the number of evaluable points. To adapt $\Delta$ for discrete functions, observe that $\Delta$ is a linear operator that maps scalar functions to new scalar functions. This means that finite version of $\Delta$ is simply a matrix mapping $\mathbb{R}^{\vert V\vert}$ to $\mathbb{R}^{\vert V\vert}$. Call this proposed matrix $L$.
Now defining the entries of $L$ is a whole ordeal on its own because it greatly depends on discretization and the domain (e.g. meshes, graphs, point clouds etc). Naturally, discretizing $\Delta$ means that $L$ loses some of the deseriable properties of $\Delta$. Lots of effort has been put in finding good and robust Laplacians for different domains, but luckily for us the square grid has a fairly simple $L$ that is intuitive and works great.
"""
# ╔═╡ 47b00e38-83d1-4888-baee-662bd716827c
md"""
##### Laplacian for a Grid
Recall that the square grid defines discrete functions for $N\times N$ points. Let $f\in \mathbb{R}^{N\times N}$ be one such function. The goal is to approxmimate
$$\Delta=\frac{\partial }{\partial x^2}+\frac{\partial}{\partial y^2}$$, so we first discretize $$\frac{\partial}{\partial x^2}$$. Intuitively, the second derivative at a lattice point will depend both neighbors on the $x$ axis. Expanding out the Taylor series confirms this:
$\begin{align}
f(a+dx,b)&=f(a,b) + \frac{\partial}{\partial x}f(x,y)dx+\frac{1}{2}\frac{\partial}{\partial x^2}(dx)^2+o((dx)^2)\\
f(a-dx, b) &= f(a,b) - \frac{\partial}{\partial x} f(x,y) dx + \frac{1}{2}\frac{\partial}{\partial x^2} (dx)^2 + o((dx)^2)\\
\end{align}$
Summing both expressions yields
$\begin{align}
f(a+dx,b)+f(a-dx,b) = 2f(a,b) + \frac{\partial}{\partial x^2} f(x,y) (dx)^2 + o((dx)^4)
\end{align}$
or
$$\frac{\partial}{\partial x^2} f(x,y) \approx\frac{[f(a+dx,b)-f(a,b)] + [f(a-dx,b) - f(a,b)]}{(dx)^2}$$
The same can be said for the derivative with respect to $y$:
$$\frac{\partial}{\partial y^2} f(x,y) \approx\frac{[f(x,b+dy)-f(a,b)] + [f(a,b-dy) - f(a,y)]}{(dy)^2}$$
Combining both derivatives yields our discrete Laplacian. Instead of using real-valued position $(a,b)$ we opt with indices representing the entries of a matrix. $L$ is then defined as
$\begin{align}
L_{ij} = &\frac{[f(i+1,j)-f(i,j)] + [f(i-1,j) - f(i,j)]}{(dx)^2}\\
&+\frac{[f(i,j+1)-f(i,j)]+[f(i,j-1)-f(i,j)]}{(dy)^2}\\
&=\frac{\sum_{k\in N(i,j)}[f(k)-f(i,j)]}{(dx)^2}
\end{align}$
where the last equality occurs when $dx=dy$. This is usually called the "five-point stencil" Laplacian since it requires a cross shape to evaluate.
To make sense of the boundary cells, the boundary conditions are used. There are two common ones, Dirichlet and Neumann. For Dirichlet, values at the boundary are fixed to $0$ (e.g. $f(s) = 0$) whereas for Neumann boundary conditions, the partial derivatives are fixed (e.g. $f_x(s) = c$).
First we define the Laplacian with Dirichlet boundary conditions.[^6]
"""
# ╔═╡ 387f2124-1802-405f-8d6e-3cfdcefe2f46
function laplacian_dirichlet(Δu,u,p,t)
α, Δx,Δy = p
N1, N2 = size(u)
Δx² = Δx^2
Δy² = Δy^2
for j in 2:(N2-1)
for i in 2:(N1-1)
Δu[i, j] =
(u[i+1, j] + u[i-1, j] -2u[i,j]) / Δx² +
(u[i, j+1] + u[i, j-1] -2u[i,j])/ Δy²
end
end
# Dirichlet condition enforced by dropping neighbors.
# left/right edges
for i in 2:(N1 - 1)
Δu[i, 1] = (u[i+1, 1] + u[i-1, 1] -2u[i,1])/Δx² + (u[i, 2] - 2u[i, 1])/Δy²
Δu[i, N2] = (u[i+1, N2] + u[i-1, N2] -2u[i,N2])/Δx² + (u[i, N2-1] - 2u[i, N2])/Δy²
end
# top/bottom edges
for j in 2:(N2-1)
Δu[1, j] = (u[1, j+1] + u[1, j-1] - 2u[1,j])/Δx² + (u[2, j] - 2u[1, j])/Δy²
Δu[N1, j] = (u[N1, j+1] + u[N1, j-1] -2u[N1,j])/Δx² + (u[N1-1, j] - 2u[N1, j])/Δy²
end
# corners
Δu[1, 1] = (u[2, 1]-2u[1,1])/Δx² + (u[1, 2]-2u[1, 1])/Δy²
Δu[N1, 1] = (u[N1-1, 1]-2u[N1,1])/Δx² + (u[N1, 2]-2u[N1, 1])/Δy²
Δu[1, N2] = (u[2, N2] -2u[1,N2])/Δx²+ (u[1, N2 - 1]-2u[1,N2])/Δy²
Δu[N1, N2] = (u[N1 - 1, N2]-u[N1,N2])/Δx²+ (u[N1, N2-1]-u[N1, N2])/Δy²
Δu .*= α
Δu
end
# ╔═╡ 3ce304a9-3dfd-42f1-b3cf-308b6ce3cbac
md"""
The Laplacian with Neumann conditions is similar except now...
"""
# ╔═╡ ff09b1b2-3990-4bc7-8f6b-962e2ecfee3d
function laplacian_neumann(Δu, u, p, t)
α, Δx, Δy = p
Δx² = Δx^2
Δy² = Δy^2
n1, n2 = size(u)
# internal nodes
for j in 2:(n2 - 1)
for i in 2:(n1 - 1)
Δu[i, j] = (u[i+1, j]+u[i-1, j]-2u[i,j])/Δx² + (u[i,j+1] + u[i,j-1] - 2u[i, j])/Δy²
end
end
# left/right edges
for i in 2:(n1 - 1)
Δu[i, 1] = (u[i+1, 1] + u[i-1, 1]-2u[i,1])/Δx² + (u[i, 2] - u[i, 1])/Δy²
Δu[i, n2] = (u[i+1, n2]+u[i-1, n2]-2u[i,n2])/Δx² + (u[i, n2 - 1]-u[i, n2])/Δy²
end
# top/bottom edges
for j in 2:(n2 - 1)
Δu[1, j] = (u[1, j+1]+u[1, j-1]-2u[1,j])/Δy² + (u[2, j]-u[1, j])/Δx²
Δu[n1, j] = (u[n1, j+1]+u[n1, j-1]-2u[n1,j])/Δy² + (u[n1-1, j]-u[n1, j])/Δx²
end
# corners
Δu[1, 1] = (u[2, 1]-u[1,1])/Δx² + (u[1, 2]-u[1, 1])/Δy²
Δu[n1, 1] = (u[n1-1, 1]-u[n1,1])/Δx² + (u[n1, 2]-u[n1, 1])/Δy²
Δu[1, n2] = (u[2, n2]-u[1,n2])/Δx² + (u[1, n2-1]-u[1, n2])/Δy²
Δu[n1, n2]= (u[n1-1, n2]-u[n1,n2])/Δx² + (u[n1, n2 - 1]-u[n1, n2])/Δy²
Δu *= α
end
# ╔═╡ 0bdff4e8-27ad-46ef-b9b3-a146d774cc6d
let
N = 31
u0 = zeros(N,N)
u0[1,1] = 1.0
u0[end,end] = 1.0
L = 1.0
dx = range(0,1,N)
dy = range(0,1,N)
p = (1, step(dx), step(dy))
problem = ODEProblem(laplacian_neumann, u0,(0, 5.0), p)
soln = solve(problem)
Plots.heatmap(dx, dy, soln(t_multi), aspect_ratio=:equal, xlims=(0,L), ylims=(0,L))
ii = [20, 9, 2]
jj = [20, 9, 20]
fig1 = Plots.scatter!(dx[ii],dy[jj], markersize=5, color=:green, markerstrokewidth=0, legend=false)
measurements = [soln(t_multi)[i,j] for (i,j) in zip(ii,jj)]
fig2 = Plots.plot(Plots.bar(["A","B","C"], measurements), ylabel="Heat", margin=5*Plots.mm)
layout = @layout([a b{0.2w}])
Plots.plot(fig1, fig2, layout = layout, legend = false)
end
# ╔═╡ ca26a33f-17c3-4a91-b95c-75b83409705e
md"""
To illustrate how DiffEq.jl works, let's write a function computing the heat kernel. Let's give the option as well to use either Dirichlet or Neumann boundary conditions.
"""
# ╔═╡ cffa4dc5-c9a8-4277-b87b-5dd3a5eff858
function heat_kernel(N, x, L=1.0, t=1.0, bc=:dirichlet)
u0 = zeros(N,N)
u0[x...] =1.0 # Initialize
dxy = range(0, L, N)
x = range(0,L,N)
y = x
p = (1.0, step(x), step(y))
if bc == :dirichlet
prob = ODEProblem(laplacian_dirichlet, u0, (0.0, t), p)
elseif bc == :neumann
prob = ODEProblem(laplacian_neumann, u0, (0.0, t), p)
else
error("BC not implemented")
end
return solve(prob)
end
# ╔═╡ e7080c15-ac7e-4106-8df4-65a668e39b83
let
N = 31
mid = ceil(Int, N/2)
x = y = range(0,1,N)
L = 1.0
heat_sol = heat_kernel(N, (mid, mid), L, 5.0)
cfunc(x) = (0.0, max(maximum(x), 0.001))
Plots.heatmap(x,y,heat_sol(t_kernel), aspect_ratio=:equal, framestyle=:box, ticks=false, xlims=(0,L), ylims=(0,L), background_color_subplot=false, clims=cfunc, size=(400,400))
end
# ╔═╡ 2e4e7cf6-0034-4992-9ea0-f212b4111fc1
let
N = 31
L = 0.5
mid = (7,9)
heat_solution = heat_kernel(N, (7,9), L, 5.0)
heat = heat_solution(t_varadhan)
dist_estimates = varadhan_formula(t_varadhan, heat)
# Compute true distances
d(x,y) = sqrt((x-mid[1])^2 + (y-mid[2])^2)/N
dist_true = d.((1:N)', 1:N)
relative_error = abs.(dist_true - dist_estimates) ./ dist_true
fig1 = Plots.heatmap(1:N,1:N, heat, aspect_ratio=:equal, framestyle=:box, ticks=false, xlims=(1,N), ylims=(1,N), background_color_subplot=false, title="Heat")
fig2 = Plots.heatmap(1:N,1:N,dist_estimates, aspect_ratio=:equal, framestyle=:box, ticks=false, xlims=(1,N), ylims=(1,N), background_color_subplot=false, colormap=:viridis, title="Estimated Distance")
fig3 = Plots.heatmap(1:N,1:N,dist_true, aspect_ratio=:equal, framestyle=:box, ticks=false, xlims=(1,N), ylims=(1,N), background_color_subplot=false, colormap=:viridis, title="True Distance")
fig4 = Plots.heatmap(1:N,1:N,relative_error, aspect_ratio=:equal, framestyle=:box, ticks=false, xlims=(1,N), ylims=(1,N), background_color_subplot=false, colormap=:viridis, title="Relative Error")
Plots.plot(fig1, fig2, fig3, fig4, label=(1,2,3,4))
end
# ╔═╡ 2a77d78c-1b8a-4690-9024-46a6794d8efd
md"""
### Computing Gradients
The next step is to take the gradient of the $h$. The discrete gradient is defined similarly.
$\begin{align}
\frac{\partial}{\partial x}h(a+dx,b) = h(a,b)+\frac{\partial}{\partial x}h(x,y)dx+o((dx)^2)\\
\frac{\partial}{\partial x}h(a-dx,b) = h(a,b)-\frac{\partial}{\partial x}h(x,y)dx+o((dx)^2)
\end{align}$
Combining terms yields
$$\frac{\partial}{\partial x}h(x,y) \approx \frac{h(a+dx,b)-h(a-dx,b)}{2dx}$$
Generally, the gradient field is a vector field and there are several equivalent ways of representing it for discrete structures. Here, the vector field is defined as a $\mathbb{R}^{\vert V\vert\times 2}$ matrix.
"""
# ╔═╡ 7a7a67a7-7958-43bb-bf54-36b80ecdf1de
function ∇(h::Matrix)
N1, N2 = size(h)
grad = zeros(N1+1, N2+1,2)
for j in 1:N2
for i in 1:N1
if j == 1
grad[i,j,1] = h[i,j]
else
grad[i,j,1] = h[i,j] - h[i,j-1]
end
if i == 1
grad[i,j,2] = h[i,j]
else
grad[i,j,2] = h[i,j] - h[i-1,j]
end
end
end
for i in 1:N1
grad[i,N2+1,1] = -h[i,N2]
end
for j in 1:N2
grad[N1+1,j,2] = -h[N1,j]
end
return grad
end
# ╔═╡ b89464c0-baaa-4571-a744-91d5480c6ae1
function normalize_field(grad)
Z = sqrt.(sum(grad.^2, dims=3))
Z[Z .== 0] .= 1
grad ./ Z
end
# ╔═╡ 160902db-e0b9-4d48-8fcb-41dbeaf429e8
begin
N=17
h = heat_kernel(N, (16,16), 1.0, 2.0, :dirichlet)
nothing
end
# ╔═╡ 2415e55d-bed0-4050-893e-b6a7af00ef45
@bind t_grad PlutoUI.Slider(range(1e-6,2,101), show_value=true, default=0.5)
# ╔═╡ c0c94bdd-a7d0-46c3-a61e-6c0d40d8a3c9
begin
heat_grid = h(t_grad)
Plots.heatmap(1:N, 1:N, heat_grid, xlims=(1,N), ylims=(1,N),aspect_ratio=:equal, ticks=false)
meshgrid(x, y) = (repeat(x, outer=length(y)), repeat(y, inner=length(x)))
heat_grad_grid = ∇(heat_grid)
heat_grad_normalized = normalize_field(heat_grad_grid)
dd = (1:(N+1))
Z = 2
x, y = meshgrid((1:(N+1)).-0.5, (1:(N+1)))
tx = vec(heat_grad_normalized[:,:,1]')/Z
Plots.quiver!(x,y, quiver=(tx, zeros(size(tx))))
ty = vec(heat_grad_normalized[:,:,2]')/Z
x, y = meshgrid((1:(N+1)), (1:(N+1)).-0.5)
Plots.quiver!(x,y, quiver=(zeros(size(ty)), ty))
end
# ╔═╡ 346c06a8-c91f-4c56-9840-67f2f02bd8c9
function div_grid(grad, dx, dy)
N1, N2, _ = size(grad) .- (1,1,0)
div = zeros(N1, N2)
for j=1:N2
for i=1:N1
div[i,j] = (grad[i,j+1,1]-grad[i,j,1]) /(dx^2) +(grad[i+1,j,2]-grad[i,j,2])/(dy^2)
end
end
div
end
# ╔═╡ 79d22285-1c69-495c-b05d-83d8c758ee46
function l(N,M, Δx, Δy)
idx(i,j) = (j-1)*M + i
Δx² = Δx^2
Δy² = Δy^2
Δ = spzeros(N*M, N*M)
for j=2:(M-1)
for i=2:(N-1)
ii = idx(i,j)
Δ[ii,ii] = -2/Δx² -2/Δy²
Δ[ii,idx(i-1,j)] = 1/Δx²
Δ[ii,idx(i+1,j)] = 1/Δx²
Δ[ii,idx(i,j-1)] = 1/Δy²
Δ[ii,idx(i,j+1)] = 1/Δy²
end
end
for i in 2:(N - 1)
ii = idx(i,1)
jj = idx(i,M)
Δ[jj, jj] = Δ[ii, ii] = -2/Δx²-2/Δy²
Δ[ii,idx(i-1,1)] = Δ[ii,idx(i+1,1)] = Δ[jj, idx(i-1,M)] = Δ[jj, idx(i+1,M)] = 1/Δy²
Δ[ii,idx(i,2)] = 1/Δx²
Δ[jj,idx(i,M-1)] = 1/Δx²
end
for j in 2:(M-1)
ii = idx(1,j)
jj = idx(N,j)
Δ[jj, jj] = Δ[ii, ii] = -2/Δx²-2/Δy²
Δ[ii,idx(1,j-1)] = Δ[ii,idx(1,j+1)] = Δ[jj, idx(N,j-1)] = Δ[jj, idx(N,j+1)] = 1/Δx²
Δ[ii,idx(2,j)] = 1/Δy²
Δ[jj,idx(N-1,j)] = 1/Δy²
end
# corners
ii = idx(1,1)
Δ[ii,ii] = -2/Δx²-2/Δy²
Δ[ii,idx(1,2)] = 1/Δx²
Δ[ii,idx(2,1)] = 1/Δy²
ii = idx(1,M)
Δ[ii,ii] = -2/Δx²-2/Δy²
Δ[ii,idx(1,M-1)] = 1/Δx²
Δ[ii,idx(2,M)] = 1/Δy²
ii = idx(N,1)
Δ[ii,ii] = -2/Δx²-2/Δy²
Δ[ii,idx(N,2)] = 1/Δx²
Δ[ii,idx(N-1,1)] = 1/Δy²
ii = idx(N,M)
Δ[ii,ii] = -2/Δx²-2/Δy²
Δ[ii,idx(N,M-1)] = 1/Δx²
Δ[ii,idx(N-1,M)] = 1/Δy²
Δ
end
# ╔═╡ a701e06d-553e-43b6-b36a-c68667bfd4b1
begin
s = step(range(0,1,N))
L_test = l(N,N, s, s)
φ = div_grid(heat_grad_normalized, s,s)
dist_grid = L_test \ vec(φ)
Plots.heatmap(1:N, 1:N, reshape(heat_grid, N,N), xlims=(1,N), ylims=(1,N),aspect_ratio=:equal, ticks=false)
# findall(x->x>0, φ)
end
# ╔═╡ 7c9b744e-72cd-449e-8228-a25b5c845233
md"""
### Solving the Poisson Equation
The final step is to transform the estimate of $\nabla u(x)$, $\vec{X}$, to an estimate of $u(x)$. To do so, we appeal to a general definition of the Laplacian using grad ($\nabla$) and div $(\nabla \cdot)$. The Laplacian is defined as
$$\Delta f= \nabla\cdot (\nabla f)$$
or "div grad of f". Verify that this definition is consistent when the Euclidean $\nabla$ and $\nabla \cdot$ are plugged in.
The right hand side of $\vec{X}=\nabla u(x)$ looks somewhat like the right hand side of the Laplacian. So applying $\nabla \cdot$ yields
$$\nabla\cdot \vec{X} = \nabla\cdot(\nabla u(x)) = \Delta u(x)$$
So solving this equation will yield $u(x)$, but is it any easier? In general no, but going back to the discrete setting simplifies this issue. Using our discrete operators so far, this amounts to solving
$$\nabla \cdot X = Lu$$
Notice that $\nabla\cdot X\in \mathbb{R}^{\vert V\vert}$, so as soon as we find a way to compute the "discrete div", the only thing left to do is to solve for $u$ as a system of linear equations!
"""
# ╔═╡ c3d64ff4-9d3e-44da-93f1-827b93042fc9
md"""
# Heat Method on Meshes
"""
# ╔═╡ 1bc67b2f-a1e9-4121-92cd-083b4ea9567b
Markdown.MD(Markdown.Admonition("danger", "", [md"""
**Warning**: I have tried my best but was unable to display the plots in this section for static viewing (what you are probably using right now). It seems that Pluto+WGLMakie is currently a bit brittle for static html files but hopefully this will be fixed soon. To my dismay, you will need to download and run Julia locally (or on Binder though this can take forever). For consolation, I left screenshots for those of you viewing online and a little suprise for the astute reader.
"""]))
# ╔═╡ 8a65dd84-2098-4765-8912-4ed6d32a9e0a
md"""
Up to this point, the examples were only of square plates, but the most interesting problems arise in 3D with meshes and point clouds. Luckily the Heat Method carries over to this regime. One thing we need to look out for is how how to do analogous calculations as those done one the square plate. For this, we need to propose discretizations for the operators.
"""
# ╔═╡ e48d51a3-debc-4339-84fe-20ee9613e808
let
# Feature some meshes
end
# ╔═╡ 9a0aa371-9fbf-493f-ba4e-cb0801c2d5ef
md"""
### Discretizing ∇, ∇⋅, and Δ
"""
# ╔═╡ 861049ba-49d9-4f8c-a186-f1f95b282904
md"""
##### Laplace-Beltrami
The first order of business is to discretize the Laplacian $\Delta$ for meshes. You might recall that the Laplacian in $\mathbb{R}^3$ is defined as
$$\Delta=\frac{\partial}{\partial x^2}+\frac{\partial}{\partial y^2}+\frac{\partial}{\partial z^2}$$
The problem is that this assumes that heat flows in a volume rather than a surface. To illustrate the difference, we plotted a sphere and a ball. The sphere is hollow and so the heat can only flow on its surface where as the ball conducts heat internally.
"""
# ╔═╡ 83f7c9c7-cf37-44f0-8e51-ea6596d83605
let
#simulation
end
# ╔═╡ 688712c4-b57f-49de-a1e9-3a3299eef60e
md"""
If the volumetric definition is used, then the heat values measured would correspond to geodesics that run *internal* to the mesh. Thus we need to use a different formulation of the Laplacian that handles surfaces of meshes well. For this, we introduce the [Laplace-Beltrami](https://en.wikipedia.org/wiki/Laplace–Beltrami_operator) operator.
The Laplace-Beltrami (referred also as the Laplacian for short) operator is the 2D analog for surfaces embedded $\mathbb{R}^3$. Why is that? When considering meshes, they are often assumed to be manifolds, meaning that at any given point, the surface looks locally like a flat plane. So at a local scale, heat diffusion looks approximately the same as if we were simulating a flat surface. The generalization is defined as
How would the discrete analog look for a mesh? A mesh consists of vertices and faces, and a function can be defined over its vertices or faces. Here, a heat function $u(v)$ is defined over the vertices $v\in \mathcal{M}$ which can be represented compactly as a $\mathbb{R}^{\vert V\vert}$ vector. So the Laplacian must take in a scalar field $f(v)\in \mathbb{R}^{\vert V\vert}$ and produce a new scalar field over the vertices. This implies that the shape of $\Delta$ is represented as a matrix, more precisely a $|V|\times |V|$ matrix. Like [convolution filters](https://en.wikipedia.org/wiki/Kernel_(image_processing)#Details), there are many proposed Laplacian matrices that seek to approximate the continuous Laplace-Beltrami operator. It turns out that no discrete Laplacian is able to satisfy all of the properties of the continuous one, so each matrix has its own pitfalls. Here we use a common one called the **cotangent Laplacian**.
"""
# ╔═╡ 92b945aa-b19f-4732-963b-a3e8b42a6b02
Markdown.MD(Markdown.Admonition("info", "Note", [md"""
By far the most popular discretization for the Laplace-Beltrami operator is the cotangent Laplacian. You will see this choice in virtually most geometry processing papers as their first choice. It's main weakness is it is not robust even for rigid transformations and is sensitive to mesh changes. Moreover, there is not guarantee that the entries are all positive.
"""]))
# ╔═╡ d5b9c28b-e9fc-4e04-bf05-5b0b93da804e
md"""
The cotangent Laplacian, $L$, is defined as
$L_{ij}=\begin{cases}
w_{ij} & \text{if }i\neq j\\
-\sum\limits_{k\neq i} w_{ik} & \text{if }i=j
\end{cases}$
where $w_{ij}=\cot \alpha_i + \cot \alpha_j$. The figure below defines $\alpha_i$ and $\alpha_j$ for a pair of opposing vertices.
"""
# ╔═╡ 5bcf708c-8dc2-4a2d-a284-c17db2ea8b9a
md"""
This is actusally referred to as the *unweighted* version. The weighted version incorporates the vertex-based areas to weight the entries of $L$ [^7]. Vertices that amass a larger area need to be weighted down whereas vertices that cover little area are upweighted. Concretely, we define a mass matrix $A=\text{diag}(a)$ where $a\in\mathbb{R}^{\vert V\vert}$ are the vertex-based areas. The weighted cotangent Laplacian is then $A^{−1}L$.
"""
# ╔═╡ 3ed68b8f-db59-40fe-87ef-8df03f81f9df
md"""
##### Div and Grad
With the Laplacian defined, we now turn to finding manifold analogs of $\nabla$ and $\nabla\cdot$ while keeping faithful to Equation ?. It turns out that if we want to use the cotangent Laplacian for our choice of $\Delta$, then we need to be careful when defining $\nabla$ and $\nabla\cdot$. These operators need to satisfy the relationship $\nabla\cdot (\nabla u) = \Delta u$ (show yourself why it is true for Euclidean case).
Here we define a face-based gradient operator which takes a scalar field $\mathbb{R}^|V|$ and produces vectors for each face of the mesh. The vectors lie in the plane of the faces by design. For every triangle, the gradient is
$$(\nabla u)_f = \frac{1}{2A_f}\sum\limits_{j=1}^3 u_j (\textbf{N} \times \textbf{e}_j)$$
I'm slightly abusing the notation - the sum is over the three vertices of the triangle. $u_j$ are the scalar values of the vertices and $\textbf{e}_j$ is the edge opposite to the vertex. See the Figure below.
Explain what the definition of gradient is.
With $\nabla$ defined, the correspondencing choice of $\nabla\cdot$ should satisfy the relationship. The paper defines it as
$$(\nabla\cdot X)_v = \frac{1}{2}\sum\limits_{j} \cot\theta_1(e_2\cdot X_j) + \cot \theta_2(e_1\cdot X_j)$$
where $X$ is the vector field and $j$ is a sum over all the adjacent triangles to a vertex $v$. The angles $\theta_1$ and $\theta_2$ are the angles opposite of vertex $v$ on the triangle. The edges $e_1$ and $e_2$ eminate from $v$ and end at the opposite vertices. Take note of the ordering and see Figure .
"""
# ╔═╡ 3300a2ee-bc41-439a-8ec4-a981aab32a93
multicross(x,y) = reduce(hcat, cross.(eachcol(x), eachcol(y)))
# ╔═╡ 61f3e733-6527-43b9-97bd-08459e0878fc
vdot(x,y; dims=1) = sum(x .* y, dims=dims)
# ╔═╡ 9dd097b6-5f82-4fbe-b0d1-86756b7747d2
function cotlaplacian(V,F)
nv = size(V,2)
nf = size(F,2)
#For all 3 shifts of the roles of triangle vertices
#to compute different cotangent weights
cots = zeros(nf, 3)
for perm in [(1,2,3), (2,3,1), (3,1,2)]
i, j, k = perm
u = V[:,F[i,:]] - V[:, F[k,:]]
v = V[:, F[j,:]] - V[:, F[k,:]]
cotAlpha = vec(vdot(u,v; dims=1)) ./ norm.(eachcol(multicross(u,v)))
cots[:,i] = cotAlpha
end
I = F[1,:]; J = F[2,:]; K = F[3,:];
L = sparse([I;J;K], [J;K;I], [cots[:,1];cots[:,2];cots[:,3]], nv, nv)
L = L + L'
rowsums = vec(sum(L,dims=2))
L = spdiagm(0 => rowsums) - L
return 0.5 * L
end
# ╔═╡ f7914d06-c58e-4033-b895-9c069ec6eb4e
function face_area_normals(V,F)
T = V[:,F]
u = T[:,2,:] - T[:,1,:]
v = T[:,3,:] - T[:,1,:]
return 0.5 * multicross(u,v)
end
# ╔═╡ dc7eece7-942a-491a-9eaa-033c19112d32
function face_normals(V,F)
A = face_area_normals(V,F)
normalize!.(eachcol(A))
A
end
# ╔═╡ 745e6b01-dfdc-4e41-a43b-8002cd5e8357
face_centers(V,F) = dropdims(sum(V[:,F], dims=2) ./ 3; dims=2)
# ╔═╡ 321527d2-068c-4e85-8947-f6d1d6fe4fd3
face_area(V,F) = norm.(eachcol(face_area_normals(V,F)))
# ╔═╡ 05d16cc7-3f0a-426c-954a-63d840708777
function vertex_area(V,F)
B = zeros(size(V)[2])
for f in eachcol(F)
T = V[:,f]
x = T[:,1] - T[:,2]
y = T[:,3] - T[:,2]
A = 0.5*(sum(cross(x,y).^2)^0.5)
B[f] .+= A
end
B ./= 3
return B
end
# ╔═╡ f324cbad-1f68-4359-9ea4-8eb8f13b27e1
function face_grad(V,F)
A = face_area(V,F)
N = face_normals(V,F)
u = repeat(F[1,:], inner=3)
v = repeat(F[2,:], inner=3)
w = repeat(F[3,:], inner=3)
uv = V[:,F[2,:]] - V[:,F[1,:]]
vw = V[:,F[3,:]] - V[:,F[2,:]]
wu = V[:,F[1,:]] - V[:,F[3,:]]
J = 1:3*size(F,2)
G2 = cross.(eachcol(N), eachcol(wu)) ./ A
G1 = cross.(eachcol(N), eachcol(vw)) ./ A
G3 = cross.(eachcol(N), eachcol(uv)) ./ A
G1 = collect(Iterators.flatten(G1))
G2 = collect(Iterators.flatten(G2))
G3 = collect(Iterators.flatten(G3))
g = sparse([J;J;J], [u;v;w], [G1; G2; G3], 3*size(F,2), size(V,2))
g ./= 2
return g
end
# ╔═╡ b7b393a9-e6f6-48ea-a4d5-d3e327f6bc18
function div(V,F)
# ∇⋅ is |V|×3|F|
uv = V[:,F[2,:]] - V[:,F[1,:]]
vw = V[:,F[3,:]] - V[:,F[2,:]]
wu = V[:,F[1,:]] - V[:,F[3,:]]
cotan = -[sum(uv.*wu; dims=1); sum(vw.*uv; dims=1); sum(wu.*vw; dims=1)]
s = [norm.(cross.(eachcol(uv), eachcol(wu)));;
norm.(cross.(eachcol(vw), eachcol(uv)));;
norm.(cross.(eachcol(wu), eachcol(vw)))]'
cotan ./= s
u = repeat(F[1,:], inner=3)
v = repeat(F[2,:], inner=3)
w = repeat(F[3,:], inner=3)
J = 1:3*size(F,2)
A = vec(-cotan[2,:]' .* wu + cotan[3,:]' .* uv)
B = vec(cotan[1,:]' .* vw - cotan[3,:]' .* uv)
C = vec(-cotan[1,:]' .* vw + cotan[2,:]' .* wu)
∇ = sparse([u;v;w], [J;J;J], [A;B;C], size(V,2), 3*size(F,2))
∇ ./= 2
end
# ╔═╡ 1e1b6bed-9224-4968-afb6-6bbc9d635191
md"""
### Examples
Now for some examples. We use the PlyIO package to read in meshes.
"""
# ╔═╡ 7d9647bb-b0a1-4a21-a496-b43f2c61e7fe
html"""
<head>
<title>Toggle Online Image Button</title>
<style>
/* Initially hide the image */
#toggleImage {
display: none;
}
</style>
</head>
<body>
<button id="toggleButton">?</button>
<img id="toggleImage" src="https://media.tenor.com/6xwjsmMIAIoAAAAd/happy-happy-dog.gif" alt="Image">
<script>
// Get references to the button and the image
const toggleButton = document.getElementById('toggleButton');
const toggleImage = document.getElementById('toggleImage');
// Add a click event listener to the button
toggleButton.addEventListener('click', function() {
// Toggle the image's visibility
if (toggleImage.style.display === 'none') {
toggleImage.style.display = 'block';
} else {
toggleImage.style.display = 'none';
}
});
</script>
</body>
"""
# ╔═╡ b18c2afe-855c-4dd4-858d-f50a8cdd92fd
begin
# Download Stanford bunny and do some data maniuplation...
bunny_file = download("https://raw.githubusercontent.com/naucoin/VTKData/master/Data/bunny.ply")
bunny_ply = load_ply(bunny_file)
V = stack(Array(bunny_ply["vertex"][i]) for i in ["x", "y", "z"])'
F = stack(Array(bunny_ply["face"]["vertex_indices"])) .+ 1
not_in = Set{Int}()
n = size(V,2)
for i=1:n
!(i in F) && push!(not_in, i)
end
V = V[:,setdiff(1:n, not_in)]
n = size(V, 2)
F = map(F) do i
!(i in not_in) && return i - sum(i .> not_in)
return i
end
end
# ╔═╡ 860530d1-3f6c-4774-91be-01b7aec16f91
begin
fig = Figure(resolution=(2500,900))
ax1 = Axis3(fig[1,1], viewmode=:fit, aspect = (1, 1, 1), azimuth=0, elevation=0)
hidespines!(ax1)
hidedecorations!(ax1)
ax2 = Axis3(fig[1,2], viewmode=:fit, aspect= (1,1,1), azimuth=0, elevation =0)
hidespines!(ax2)
hidedecorations!(ax2)
L = cotlaplacian(V,F)
A = vertex_area(V,F)
u0 = zeros(size(L,1))
u0[1] = 1.0
heat = Observable(u0)
∇_bunny = face_grad(V,F);
Δ_bunny = div(V,F)
anchor_points = face_centers(V,F)[:,1:20:end]
show_gradients = Observable(true)
heat_gradx = Observable(zeros(ceil(Int, size(F,2)/20)))
heat_grady = Observable(zeros(ceil(Int, size(F,2)/20)))
heat_gradz = Observable(zeros(ceil(Int, size(F,2)/20)))
geodesics = Observable(zeros(size(V,2)))
end
# ╔═╡ ffdbb945-0e85-409a-9bd7-a5224f2724f9
md"""
Drag the sliders and bunnies.
"""
# ╔═╡ cbc43250-5168-4211-a92f-4f99209b07a0
md"""t $(@bind t_bunny PlutoUI.Slider(1:200, default=10, show_value=true)) v1 $(@bind v1 PlutoUI.Slider(1:1000, default=488, show_value=true)) v2 $(@bind v2 PlutoUI.Slider(1:1000, default=488, show_value=true)) v3 $(@bind v3 PlutoUI.Slider(1:1000, default=488, show_value=true))"""
# ╔═╡ 450ad839-16ce-406e-9269-665dba06937e
md"""Show Gradient Field$(@bind has_grad PlutoUI.CheckBox())"""
# ╔═╡ 273a2353-32c6-4509-aa39-d94e45907000
let
mesh!(ax1,V[[3,1,2],:]',F[[2,1,3],:]', color = heat)
arrows!(ax1, anchor_points[3,:],anchor_points[1,:],anchor_points[2,:], heat_gradz, heat_gradx, heat_grady; linewidth=0.001, arrowsize=0.001, lengthscale=0.001, arrowcolor=:red, linecolor=:red)
mesh!(ax2,V[[3,1,2],:]',F[[2,1,3],:]', color = geodesics, colormap=:prism)
fig
end
# ╔═╡ 92915d09-067f-4ea6-a6a9-0f519c9ea84d
md"""
# Appendix
"""
# ╔═╡ 7d9c0d8d-e52f-44b9-ae77-bbda953c498c
md"""
### Handwriting DiffEq.jl