forked from taichi-dev/taichi
-
Notifications
You must be signed in to change notification settings - Fork 0
/
async_advection.py
126 lines (95 loc) · 3 KB
/
async_advection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import math
from utils import benchmark_async
import taichi as ti
# TODO: staggerred grid
@benchmark_async
def simple_advection(scale):
n = 256 * 2**int((math.log(scale, 2)) // 2)
x = ti.Vector.field(3, dtype=ti.f32, shape=(n, n))
new_x = ti.Vector.field(3, dtype=ti.f32, shape=(n, n))
v = ti.Vector.field(2, dtype=ti.f32, shape=(n, n))
dx = 1 / n
inv_dx = 1 / dx
dt = 0.01
stagger = ti.Vector([0.5, 0.5])
@ti.func
def Vector2(x, y):
return ti.Vector([x, y])
@ti.kernel
def init():
for i, j in v:
v[i, j] = ti.Vector([j / n - 0.5, 0.5 - i / n])
for i, j in ti.ndrange(n * 4, n * 4):
ret = ti.taichi_logo(ti.Vector([i, j]) / (n * 4))
x[i // 4, j // 4][0] += ret / 16
x[i // 4, j // 4][1] += ret / 16
x[i // 4, j // 4][2] += ret / 16
@ti.func
def vec(x, y):
return ti.Vector([x, y])
@ti.func
def clamp(p):
for d in ti.static(range(p.n)):
p[d] = min(1 - 1e-4 - dx + stagger[d] * dx,
max(p[d], stagger[d] * dx))
return p
@ti.func
def sample_bilinear(x, p):
p = clamp(p)
p_grid = p * inv_dx - stagger
I = ti.cast(ti.floor(p_grid), ti.i32)
f = p_grid - I
g = 1 - f
return x[I] * (g[0] * g[1]) + x[I + vec(1, 0)] * (f[0] * g[1]) + x[
I + vec(0, 1)] * (g[0] * f[1]) + x[I + vec(1, 1)] * (f[0] * f[1])
@ti.func
def velocity(p):
return sample_bilinear(v, p)
@ti.func
def sample_min(x, p):
p = clamp(p)
p_grid = p * inv_dx - stagger
I = ti.cast(ti.floor(p_grid), ti.i32)
return min(x[I], x[I + vec(1, 0)], x[I + vec(0, 1)], x[I + vec(1, 1)])
@ti.func
def sample_max(x, p):
p = clamp(p)
p_grid = p * inv_dx - stagger
I = ti.cast(ti.floor(p_grid), ti.i32)
return max(x[I], x[I + vec(1, 0)], x[I + vec(0, 1)], x[I + vec(1, 1)])
@ti.func
def backtrace(I, dt): # RK3
p = (I + stagger) * dx
v1 = velocity(p)
p1 = p - 0.5 * dt * v1
v2 = velocity(p1)
p2 = p - 0.75 * dt * v2
v3 = velocity(p2)
p -= dt * (2 / 9 * v1 + 1 / 3 * v2 + 4 / 9 * v3)
return p
@ti.func
def semi_lagrangian(x, new_x, dt):
for I in ti.grouped(x):
new_x[I] = sample_bilinear(x, backtrace(I, dt))
@ti.kernel
def advect():
semi_lagrangian(x(0), new_x(0), dt)
semi_lagrangian(x(1), new_x(1), dt)
semi_lagrangian(x(2), new_x(2), dt)
for I in ti.grouped(x):
x[I] = new_x[I]
init()
def task():
for i in range(10):
advect()
ti.benchmark(task, repeat=100)
visualize = False
if visualize:
gui = ti.GUI('Advection schemes', (n, n))
for i in range(10):
for _ in range(10):
advect()
gui.set_image(x.to_numpy())
gui.show()
if __name__ == '__main__':
simple_advection()