19 #include <xmmintrin.h>
23 #define LogDebug(exp) std::cout<<"DEBUG: " <<__FILE__<<": "<<__LINE__<<": "<< exp
24 #define LogError(exp) std::cout<<"ERROR: " <<__FILE__<<": "<<__LINE__<<": "<< exp
25 #define LogWarning(exp) std::cout<<"WARNING: "<<__FILE__<<": "<<__LINE__<<": "<< exp
28 using namespace Eigen;
29 using namespace SeamStress;
34 unsigned int t,
float c) :
35 hit1(h1), hit2(h2), hit3(h3),
track(t), chi2(c) {
41 return (hit1 < other.
hit1)
42 || ((hit2 < other.
hit2) && (hit1 == other.
hit1))
43 || ((hit3 < other.
hit3) && (hit1 == other.
hit1)
44 && (hit2 == other.
hit2));
48 return ((hit1 == other.
hit1) && (hit2 == other.
hit2)
49 && (hit3 == other.
hit3));
52 unsigned int hit1, hit2, hit3,
track;
59 unsigned int t,
float c) :
60 hit1(h1), hit2(h2), hit3(h3),
track(t), chi2(c) {
66 return (hit1 < other.
hit1)
67 || ((hit2 < other.
hit2) && (hit1 == other.
hit1))
68 || ((hit3 < other.
hit3) && (hit1 == other.
hit1)
69 && (hit2 == other.
hit2));
73 return ((hit1 == other.
hit1) && (hit2 == other.
hit2)
74 && (hit3 == other.
hit3));
77 unsigned int hit1, hit2, hit3,
track;
82 vector<SimpleTrack3D>& , vector<bool>& usetrack,
83 vector<float>& next_best_chi2) {
85 vector<hitTriplet> trips;
86 for (
unsigned int i = 0; i < input.size(); ++i) {
87 for (
unsigned int h1 = 0;
h1 < input[i].hits.size(); ++
h1) {
88 for (
unsigned int h2 = (
h1 + 1);
h2 < input[i].hits.size(); ++
h2) {
89 for (
unsigned int h3 = (
h2 + 1);
h3 < input[i].hits.size(); ++
h3) {
94 input[i].
hits[
h3].get_id(), i, track_states[i].chi2));
100 if (trips.size() == 0) {
103 sort(trips.begin(), trips.end());
104 unsigned int pos = 0;
107 while (pos < trips.size()) {
108 unsigned int next_pos = pos + 1;
109 if (next_pos >= trips.size()) {
112 while (trips[pos] == trips[next_pos]) {
114 if (next_pos >= trips.size()) {
118 if ((next_pos - pos) > 1) {
119 float best_chi2 = trips[
pos].chi2;
120 float next_chi2 = trips[pos + 1].chi2;
121 unsigned int best_pos =
pos;
122 for (
unsigned int i = (pos + 1); i < next_pos; ++i) {
124 input[trips[best_pos].track].hits.size()) {
126 }
else if ((input[trips[i].
track].
hits.size() >
127 input[trips[best_pos].track].hits.size()) ||
128 (input[trips[i].
track].
hits.back().get_layer() >
129 input[trips[best_pos].track].hits.back().get_layer())) {
130 next_chi2 = best_chi2;
131 best_chi2 = trips[i].chi2;
135 if ((trips[i].chi2 < best_chi2) ||
136 (usetrack[trips[best_pos].track] ==
false)) {
137 next_chi2 = best_chi2;
138 best_chi2 = trips[i].chi2;
140 }
else if (trips[i].chi2 < next_chi2) {
141 next_chi2 = trips[i].chi2;
144 for (
unsigned int i = pos; i < next_pos; ++i) {
146 usetrack[trips[i].track] =
false;
148 next_best_chi2[trips[i].track] = next_chi2;
159 unsigned int n_k,
unsigned int n_dzdl,
164 vector<float>&
radius,
float Bfield)
165 :
HelixHough(n_phi, n_d, n_k, n_dzdl, n_z0, min_resolution, max_resolution,
167 fast_chi2_cut_par0(12.),
168 fast_chi2_cut_par1(0.),
171 chi2_removal_cut(1.),
181 reject_ghosts(
false),
190 vector<float> detector_material;
192 for (
unsigned int i = 0; i < radius.size(); ++i) {
195 for (
unsigned int i = 0; i < material.size(); ++i) {
197 sqrt(3. * material[i]));
198 detector_material.push_back(3. * material[i]);
204 float total_scatter_2 = 0.;
213 vector<SimpleHit3D> one_layer;
215 for (
unsigned int i = 0; i < 4; ++i) {
224 float Bfield,
bool parallel,
225 unsigned int num_threads)
227 fast_chi2_cut_par0(12.),
228 fast_chi2_cut_par1(0.),
231 chi2_removal_cut(1.),
241 reject_ghosts(
false),
248 nthreads(num_threads),
251 is_parallel(parallel),
255 vector<float> detector_material;
257 for (
unsigned int i = 0; i < radius.size(); ++i) {
260 for (
unsigned int i = 0; i < material.size(); ++i) {
262 1.41421356237309515 * 0.0136 * sqrt(3. * material[i]));
263 detector_material.push_back(3. * material[i]);
269 float total_scatter_2 = 0.;
278 vector<SimpleHit3D> one_layer;
280 for (
unsigned int i = 0; i < 4; ++i) {
286 Seamstress::init_vector(num_threads,
vss);
288 vssp =
new vector<Seamstress*>();
289 for (
unsigned int i = 0; i <
vss.size(); i++) {
295 vector<vector<unsigned int> > zoom_profile_new;
296 for (
unsigned int i = 1; i < zoom_profile.size(); ++i) {
297 zoom_profile_new.push_back(zoom_profile[i]);
300 for (
unsigned int i = 0; i <
nthreads; ++i) {
303 material, radius, Bfield));
319 for (
unsigned int i = 0; i <
vss.size(); i++) {
408 float x0 = track.
d * cos(track.
phi);
409 float y0 = track.
d * sin(track.
phi);
414 (1. + track.
kappa * track.
d) * cos(track.
phi) - track.
kappa * x0);
417 float cosphi = cos(phi2);
418 float sinphi = sin(phi2);
419 float tx = cosphi + track.
kappa * (x0 - vx);
420 float ty = sinphi + track.
kappa * (y0 - vy);
421 float dk = sqrt(tx * tx + ty * ty) - 1.;
422 if (track.
kappa == 0.) {
423 d_out = (x0 - vx) * cosphi + (y0 - vy) * sinphi;
425 d_out = dk / track.
kappa;
431 vector<SimpleTrack3D>& output) {
434 for (
unsigned int i = 0; i < input.size(); ++i) {
435 output.push_back(input[i]);
440 unsigned int nt = input.size();
441 vector<bool> usetrack;
442 usetrack.assign(input.size(),
true);
443 vector<float> next_best_chi2;
444 next_best_chi2.assign(input.size(), 99999.);
452 vector<HelixKalmanState> states_new;
454 for (
unsigned int i = 0; i < nt; ++i) {
455 if (usetrack[i] ==
true) {
460 output.push_back(input[i]);
461 output.back().index = (output.size() - 1);
471 for (
unsigned int i = 0; i < output.size(); ++i) {
482 for (
int h = (output[i].
hits.size() - 1);
h >= 0; --
h) {
484 float err_scale = 1.;
510 cout <<
"# fits = " <<
nfits << endl;
511 cout <<
"findTracks called " <<
findtracksiter <<
" times" << endl;
512 cout <<
"CAtime = " <<
CAtime << endl;
513 cout <<
"KALime = " <<
KALtime << endl;
518 vector<SimpleTrack3D>& tracks,
const HelixRange& range) {
529 unsigned int layer_mask[4] = { 0, 0, 0, 0 };
530 for (
unsigned int i = 0; i < hits.size(); ++i) {
531 if (hits[i].get_layer() < 32) {
532 layer_mask[0] = layer_mask[0] | (1 << hits[i].get_layer());
533 }
else if (hits[i].get_layer() < 64) {
534 layer_mask[1] = layer_mask[1] | (1 << (hits[i].get_layer() - 32));
535 }
else if (hits[i].get_layer() < 96) {
536 layer_mask[2] = layer_mask[2] | (1 << (hits[i].get_layer() - 64));
537 }
else if (hits[i].get_layer() < 128) {
538 layer_mask[3] = layer_mask[3] | (1 << (hits[i].get_layer() - 96));
541 unsigned int nlayers = __builtin_popcount(layer_mask[0])
542 + __builtin_popcount(layer_mask[1])
543 + __builtin_popcount(layer_mask[2])
544 + __builtin_popcount(layer_mask[3]);
550 float min_d,
float max_d,
float ,
float ,
float ,
551 float max_dzdl,
bool pairvoting) {
560 prev_p_inv = 3.33333333333333314e+02 * max_k * Bfield_inv
561 * sqrt(1. - max_dzdl * max_dzdl);
564 float total_scatter_2 = 0.;
569 total_scatter_2 += this_scatter * this_scatter;
571 float angle = p_inv * sqrt(total_scatter_2) * 1.0;
572 float dsize = 0.5 * (max_d - min_d);
574 float returnval = 0.;
575 if (pairvoting ==
false) {
576 if (angle_from_d > angle) {
579 returnval = (angle - angle_from_d);
589 float ,
float ,
float min_z0,
float max_z0,
float ,
590 float max_dzdl,
bool pairvoting) {
599 prev_p_inv = 3.33333333333333314e+02 * max_k * Bfield_inv
600 * sqrt(1. - max_dzdl * max_dzdl);
603 float total_scatter_2 = 0.;
608 total_scatter_2 += this_scatter * this_scatter;
610 float angle = p_inv * sqrt(total_scatter_2) * 1.0;
611 float z0size = 0.5 * (max_z0 - min_z0);
613 float returnval = 0.;
614 if (pairvoting ==
false) {
615 if (angle_from_z0 > angle) {
618 returnval = (angle - angle_from_z0);
631 float dphi = 2. * sqrt(state->
C(0, 0));
632 float dd = 2. * sqrt(state->
C(1, 1));
633 float dk = 2. * state->
C(2, 2);
634 float dz0 = 2. * sqrt(state->
C(3, 3));
635 float ddzdl = 2. * sqrt(state->
C(4, 4));
645 range.
min_d = seed.
d - dd;
646 range.
max_d = seed.
d + dd;
649 if (range.
min_k < 0.) {
663 vector<float> chi2_hit;
668 vector<float>& chi2_hit) {
671 vector<float> xyres_inv;
673 vector<float> zres_inv;
674 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
680 (0.5 * sqrt(12.) * sqrt(track.
hits[i].get_size(0, 0)))
682 * sqrt(track.
hits[i].get_size(0, 0)))
684 * sqrt(track.
hits[i].get_size(1, 1)))
687 track.
hits[i].get_size(
689 xyres_inv.push_back(1. / xyres.back());
690 zres.push_back((0.5 * sqrt(12.) * sqrt(track.
hits[i].get_size(2, 2))));
691 zres_inv.push_back(1. / zres.back());
694 chi2_hit.resize(track.
hits.size(), 0.);
696 MatrixXf
y = MatrixXf::Zero(track.
hits.size(), 1);
697 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
699 (pow(track.
hits[i].get_x(), 2) + pow(track.
hits[i].get_y(), 2));
700 y(i, 0) *= xyres_inv[i];
703 MatrixXf
X = MatrixXf::Zero(track.
hits.size(), 3);
704 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
705 X(i, 0) = track.
hits[i].get_x();
706 X(i, 1) = track.
hits[i].get_y();
708 X(i, 0) *= xyres_inv[i];
709 X(i, 1) *= xyres_inv[i];
710 X(i, 2) *= xyres_inv[i];
713 MatrixXf Xt = X.transpose();
715 MatrixXf prod = Xt *
X;
717 MatrixXf Xty = Xt *
y;
718 MatrixXf beta = prod.ldlt().solve(Xty);
720 float cx = beta(0, 0) * 0.5;
721 float cy = beta(1, 0) * 0.5;
722 float r = sqrt(cx * cx + cy * cy - beta(2, 0));
724 float phi = atan2(cy, cx);
725 float d = sqrt(cx * cx + cy * cy) -
r;
728 MatrixXf diff = y - (X * beta);
729 MatrixXf chi2 = (diff.transpose()) * diff;
731 float dx = d * cos(phi);
732 float dy = d * sin(phi);
734 MatrixXf
y2 = MatrixXf::Zero(track.
hits.size(), 1);
735 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
736 y2(i, 0) = track.
hits[i].get_z();
737 y2(i, 0) *= zres_inv[i];
740 MatrixXf
X2 = MatrixXf::Zero(track.
hits.size(), 2);
741 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
743 pow(dx - track.
hits[i].get_x(), 2)
744 + pow(dy - track.
hits[i].get_y(), 2));
747 if (0.5 * k * D > 0.1) {
748 float v = 0.5 * k *
D;
752 s = 2. * asin(v) /
k;
754 float temp1 = k * D * 0.5;
756 float temp2 = D * 0.5;
761 s += (3. / 20.) *
temp2;
763 s += (5. / 56.) *
temp2;
769 X2(i, 0) *= zres_inv[i];
770 X2(i, 1) *= zres_inv[i];
773 MatrixXf Xt2 = X2.transpose();
774 MatrixXf prod2 = Xt2 *
X2;
776 MatrixXf Xty2 = Xt2 *
y2;
777 MatrixXf beta2 = prod2.ldlt().solve(Xty2);
779 MatrixXf diff2 = y2 - (X2 * beta2);
780 MatrixXf chi2_z = (diff2.transpose()) * diff2;
782 float z0 = beta2(1, 0);
783 float dzdl = beta2(0, 0) / sqrt(1. + beta2(0, 0) * beta2(0, 0));
791 if (track.
kappa != 0.) {
792 r = 1. / track.
kappa;
797 cx = (track.
d +
r) * cos(track.
phi);
798 cy = (track.
d +
r) * sin(track.
phi);
801 for (
unsigned int h = 0;
h < track.
hits.size();
h++) {
802 float dx1 = track.
hits[
h].get_x() - cx;
803 float dy1 = track.
hits[
h].get_y() - cy;
805 float dx2 = track.
hits[
h].get_x() + cx;
806 float dy2 = track.
hits[
h].get_y() + cy;
808 float xydiff1 = sqrt(dx1 * dx1 + dy1 * dy1) -
r;
809 float xydiff2 = sqrt(dx2 * dx2 + dy2 * dy2) -
r;
810 float xydiff = xydiff2;
811 if (fabs(xydiff1) < fabs(xydiff2)) {
815 float ls_xy = xyres[
h];
818 chi2_hit[
h] += xydiff * xydiff / (ls_xy * ls_xy);
819 chi2_hit[
h] += diff2(
h, 0) * diff2(
h, 0);
821 chi2_tot += chi2_hit[
h];
824 unsigned int deg_of_freedom = 2 * track.
hits.size() - 5;
826 return (chi2_tot) / ((float) (deg_of_freedom));
830 float* z1_a,
float* x2_a,
float* y2_a,
float* z2_a,
float* x3_a,
831 float* y3_a,
float* z3_a,
float* dx1_a,
float* dy1_a,
float* dz1_a,
832 float* dx2_a,
float* dy2_a,
float* dz2_a,
float* dx3_a,
float* dy3_a,
833 float* dz3_a,
float* kappa_a,
float* dkappa_a,
float* ux_mid_a,
834 float* uy_mid_a,
float* ux_end_a,
float* uy_end_a,
float* dzdl_1_a,
835 float* dzdl_2_a,
float* ddzdl_1_a,
float* ddzdl_2_a) {
836 static const __m128
two = { 2., 2., 2., 2. };
838 __m128
x1 = _mm_load_ps(x1_a);
839 __m128
x2 = _mm_load_ps(x2_a);
840 __m128
x3 = _mm_load_ps(x3_a);
841 __m128
y1 = _mm_load_ps(y1_a);
842 __m128
y2 = _mm_load_ps(y2_a);
843 __m128
y3 = _mm_load_ps(y3_a);
844 __m128
z1 = _mm_load_ps(z1_a);
845 __m128
z2 = _mm_load_ps(z2_a);
846 __m128 z3 = _mm_load_ps(z3_a);
848 __m128 dx1 = _mm_load_ps(dx1_a);
849 __m128 dx2 = _mm_load_ps(dx2_a);
850 __m128 dx3 = _mm_load_ps(dx3_a);
851 __m128 dy1 = _mm_load_ps(dy1_a);
852 __m128 dy2 = _mm_load_ps(dy2_a);
853 __m128 dy3 = _mm_load_ps(dy3_a);
854 __m128 dz1 = _mm_load_ps(dz1_a);
855 __m128
dz2 = _mm_load_ps(dz2_a);
856 __m128 dz3 = _mm_load_ps(dz3_a);
858 __m128 D12 = _mm_sub_ps(x2, x1);
859 D12 = _mm_mul_ps(D12, D12);
860 __m128
tmp1 = _mm_sub_ps(y2, y1);
861 tmp1 = _mm_mul_ps(tmp1, tmp1);
862 D12 = _mm_add_ps(D12, tmp1);
863 D12 = _vec_sqrt_ps(D12);
865 __m128 D23 = _mm_sub_ps(x3, x2);
866 D23 = _mm_mul_ps(D23, D23);
867 tmp1 = _mm_sub_ps(y3, y2);
868 tmp1 = _mm_mul_ps(tmp1, tmp1);
869 D23 = _mm_add_ps(D23, tmp1);
870 D23 = _vec_sqrt_ps(D23);
872 __m128 D31 = _mm_sub_ps(x1, x3);
873 D31 = _mm_mul_ps(D31, D31);
874 tmp1 = _mm_sub_ps(y1, y3);
875 tmp1 = _mm_mul_ps(tmp1, tmp1);
876 D31 = _mm_add_ps(D31, tmp1);
877 D31 = _vec_sqrt_ps(D31);
879 __m128
k = _mm_mul_ps(D12, D23);
880 k = _mm_mul_ps(k, D31);
882 tmp1 = (D12 + D23 + D31) * (D23 + D31 - D12) * (D12 + D31 - D23)
884 tmp1 = _vec_sqrt_ps(tmp1);
887 __m128
tmp2 = _mm_cmpgt_ps(tmp1,
zero);
888 tmp1 = _mm_and_ps(tmp2, k);
889 tmp2 = _mm_andnot_ps(tmp2,
zero);
890 k = _mm_xor_ps(tmp1, tmp2);
892 _mm_store_ps(kappa_a, k);
893 __m128 k_inv = _vec_rec_ps(k);
895 __m128 D12_inv = _vec_rec_ps(D12);
896 __m128 D23_inv = _vec_rec_ps(D23);
897 __m128 D31_inv = _vec_rec_ps(D31);
899 __m128 dr1 = dx1 * dx1 + dy1 * dy1;
900 dr1 = _vec_sqrt_ps(dr1);
901 __m128 dr2 = dx2 * dx2 + dy2 * dy2;
902 dr2 = _vec_sqrt_ps(dr2);
903 __m128 dr3 = dx3 * dx3 + dy3 * dy3;
904 dr3 = _vec_sqrt_ps(dr3);
906 __m128 dk1 = (dr1 + dr2) * D12_inv * D12_inv;
907 __m128 dk2 = (dr2 + dr3) * D23_inv * D23_inv;
908 __m128 dk = dk1 + dk2;
909 _mm_store_ps(dkappa_a, dk);
911 __m128 ux12 = (x2 -
x1) * D12_inv;
912 __m128 uy12 = (y2 -
y1) * D12_inv;
913 __m128 ux23 = (x3 -
x2) * D23_inv;
914 __m128 uy23 = (y3 -
y2) * D23_inv;
915 __m128 ux13 = (x3 -
x1) * D31_inv;
916 __m128 uy13 = (y3 -
y1) * D31_inv;
918 __m128 cosalpha = ux12 * ux13 + uy12 * uy13;
919 __m128 sinalpha = ux13 * uy12 - ux12 * uy13;
921 __m128 ux_mid = ux23 * cosalpha - uy23 * sinalpha;
922 __m128 uy_mid = ux23 * sinalpha + uy23 * cosalpha;
923 _mm_store_ps(ux_mid_a, ux_mid);
924 _mm_store_ps(uy_mid_a, uy_mid);
926 __m128 ux_end = ux23 * cosalpha + uy23 * sinalpha;
927 __m128 uy_end = uy23 * cosalpha - ux23 * sinalpha;
929 _mm_store_ps(ux_end_a, ux_end);
930 _mm_store_ps(uy_end_a, uy_end);
933 __m128
v =
one - sinalpha * sinalpha;
938 __m128 s2 = _vec_atan_ps(v);
941 tmp1 = _mm_cmpgt_ps(k,
zero);
942 tmp2 = _mm_and_ps(tmp1, s2);
943 tmp1 = _mm_andnot_ps(tmp1, D23);
944 s2 = _mm_xor_ps(tmp1, tmp2);
948 __m128 del_z_2 = z3 -
z2;
949 __m128 dzdl_2 = s2 * s2 + del_z_2 * del_z_2;
950 dzdl_2 = _vec_rsqrt_ps(dzdl_2);
952 __m128 ddzdl_2 = (dz2 + dz3) * D23_inv;
953 _mm_store_ps(dzdl_2_a, dzdl_2);
954 _mm_store_ps(ddzdl_2_a, ddzdl_2);
956 sinalpha = ux13 * uy23 - ux23 * uy13;
957 v =
one - sinalpha * sinalpha;
962 __m128
s1 = _vec_atan_ps(v);
965 tmp1 = _mm_cmpgt_ps(k,
zero);
966 tmp2 = _mm_and_ps(tmp1, s1);
967 tmp1 = _mm_andnot_ps(tmp1, D12);
968 s1 = _mm_xor_ps(tmp1, tmp2);
970 __m128 del_z_1 = z2 -
z1;
971 __m128 dzdl_1 = s1 * s1 + del_z_1 * del_z_1;
972 dzdl_1 = _vec_rsqrt_ps(dzdl_1);
974 __m128 ddzdl_1 = (dz1 +
dz2) * D12_inv;
975 _mm_store_ps(dzdl_1_a, dzdl_1);
976 _mm_store_ps(ddzdl_1_a, ddzdl_1);
980 float* z1_a,
float* x2_a,
float* y2_a,
float* z2_a,
float* x3_a,
981 float* y3_a,
float* z3_a,
float* dx1_a,
float* dy1_a,
float* dz1_a,
982 float* dx2_a,
float* dy2_a,
float* dz2_a,
float* dx3_a,
float* dy3_a,
983 float* dz3_a,
float* kappa_a,
float* dkappa_a,
float* ux_mid_a,
984 float* uy_mid_a,
float* ux_end_a,
float* uy_end_a,
float* dzdl_1_a,
985 float* dzdl_2_a,
float* ddzdl_1_a,
float* ddzdl_2_a,
float sinang_cut,
986 float cosang_diff_inv,
float* cur_kappa_a,
float* cur_dkappa_a,
987 float* cur_ux_a,
float* cur_uy_a,
float* cur_chi2_a,
float* chi2_a) {
988 static const __m128
two = { 2., 2., 2., 2. };
990 __m128
x1 = _mm_load_ps(x1_a);
991 __m128
x2 = _mm_load_ps(x2_a);
992 __m128
x3 = _mm_load_ps(x3_a);
993 __m128
y1 = _mm_load_ps(y1_a);
994 __m128
y2 = _mm_load_ps(y2_a);
995 __m128
y3 = _mm_load_ps(y3_a);
996 __m128
z1 = _mm_load_ps(z1_a);
997 __m128
z2 = _mm_load_ps(z2_a);
998 __m128 z3 = _mm_load_ps(z3_a);
1000 __m128 dx1 = _mm_load_ps(dx1_a);
1001 __m128 dx2 = _mm_load_ps(dx2_a);
1002 __m128 dx3 = _mm_load_ps(dx3_a);
1003 __m128 dy1 = _mm_load_ps(dy1_a);
1004 __m128 dy2 = _mm_load_ps(dy2_a);
1005 __m128 dy3 = _mm_load_ps(dy3_a);
1006 __m128 dz1 = _mm_load_ps(dz1_a);
1007 __m128
dz2 = _mm_load_ps(dz2_a);
1008 __m128 dz3 = _mm_load_ps(dz3_a);
1010 __m128 D12 = _mm_sub_ps(x2, x1);
1011 D12 = _mm_mul_ps(D12, D12);
1012 __m128
tmp1 = _mm_sub_ps(y2, y1);
1013 tmp1 = _mm_mul_ps(tmp1, tmp1);
1014 D12 = _mm_add_ps(D12, tmp1);
1015 D12 = _vec_sqrt_ps(D12);
1017 __m128 D23 = _mm_sub_ps(x3, x2);
1018 D23 = _mm_mul_ps(D23, D23);
1019 tmp1 = _mm_sub_ps(y3, y2);
1020 tmp1 = _mm_mul_ps(tmp1, tmp1);
1021 D23 = _mm_add_ps(D23, tmp1);
1022 D23 = _vec_sqrt_ps(D23);
1024 __m128 D31 = _mm_sub_ps(x1, x3);
1025 D31 = _mm_mul_ps(D31, D31);
1026 tmp1 = _mm_sub_ps(y1, y3);
1027 tmp1 = _mm_mul_ps(tmp1, tmp1);
1028 D31 = _mm_add_ps(D31, tmp1);
1029 D31 = _vec_sqrt_ps(D31);
1031 __m128
k = _mm_mul_ps(D12, D23);
1032 k = _mm_mul_ps(k, D31);
1034 tmp1 = (D12 + D23 + D31) * (D23 + D31 - D12) * (D12 + D31 - D23)
1035 * (D12 + D23 - D31);
1036 tmp1 = _vec_sqrt_ps(tmp1);
1039 __m128
tmp2 = _mm_cmpgt_ps(tmp1,
zero);
1040 tmp1 = _mm_and_ps(tmp2, k);
1041 tmp2 = _mm_andnot_ps(tmp2,
zero);
1042 k = _mm_xor_ps(tmp1, tmp2);
1044 _mm_store_ps(kappa_a, k);
1045 __m128 k_inv = _vec_rec_ps(k);
1047 __m128 D12_inv = _vec_rec_ps(D12);
1048 __m128 D23_inv = _vec_rec_ps(D23);
1049 __m128 D31_inv = _vec_rec_ps(D31);
1051 __m128 dr1 = dx1 * dx1 + dy1 * dy1;
1052 dr1 = _vec_sqrt_ps(dr1);
1053 __m128 dr2 = dx2 * dx2 + dy2 * dy2;
1054 dr2 = _vec_sqrt_ps(dr2);
1055 __m128 dr3 = dx3 * dx3 + dy3 * dy3;
1056 dr3 = _vec_sqrt_ps(dr3);
1058 __m128 dk1 = (dr1 + dr2) * D12_inv * D12_inv;
1059 __m128 dk2 = (dr2 + dr3) * D23_inv * D23_inv;
1060 __m128 dk = dk1 + dk2;
1061 _mm_store_ps(dkappa_a, dk);
1063 __m128 ux12 = (x2 -
x1) * D12_inv;
1064 __m128 uy12 = (y2 -
y1) * D12_inv;
1065 __m128 ux23 = (x3 -
x2) * D23_inv;
1066 __m128 uy23 = (y3 -
y2) * D23_inv;
1067 __m128 ux13 = (x3 -
x1) * D31_inv;
1068 __m128 uy13 = (y3 -
y1) * D31_inv;
1070 __m128 cosalpha = ux12 * ux13 + uy12 * uy13;
1071 __m128 sinalpha = ux13 * uy12 - ux12 * uy13;
1073 __m128 ux_mid = ux23 * cosalpha - uy23 * sinalpha;
1074 __m128 uy_mid = ux23 * sinalpha + uy23 * cosalpha;
1075 _mm_store_ps(ux_mid_a, ux_mid);
1076 _mm_store_ps(uy_mid_a, uy_mid);
1078 __m128 ux_end = ux23 * cosalpha + uy23 * sinalpha;
1079 __m128 uy_end = uy23 * cosalpha - ux23 * sinalpha;
1081 _mm_store_ps(ux_end_a, ux_end);
1082 _mm_store_ps(uy_end_a, uy_end);
1085 __m128
v =
one - sinalpha * sinalpha;
1086 v = _vec_sqrt_ps(v);
1090 __m128 s2 = _vec_atan_ps(v);
1093 tmp1 = _mm_cmpgt_ps(k,
zero);
1094 tmp2 = _mm_and_ps(tmp1, s2);
1095 tmp1 = _mm_andnot_ps(tmp1, D23);
1096 s2 = _mm_xor_ps(tmp1, tmp2);
1100 __m128 del_z_2 = z3 -
z2;
1101 __m128 dzdl_2 = s2 * s2 + del_z_2 * del_z_2;
1102 dzdl_2 = _vec_rsqrt_ps(dzdl_2);
1104 __m128 ddzdl_2 = (dz2 + dz3) * D23_inv;
1105 _mm_store_ps(dzdl_2_a, dzdl_2);
1106 _mm_store_ps(ddzdl_2_a, ddzdl_2);
1108 sinalpha = ux13 * uy23 - ux23 * uy13;
1109 v =
one - sinalpha * sinalpha;
1110 v = _vec_sqrt_ps(v);
1114 __m128
s1 = _vec_atan_ps(v);
1117 tmp1 = _mm_cmpgt_ps(k,
zero);
1118 tmp2 = _mm_and_ps(tmp1, s1);
1119 tmp1 = _mm_andnot_ps(tmp1, D12);
1120 s1 = _mm_xor_ps(tmp1, tmp2);
1122 __m128 del_z_1 = z2 -
z1;
1123 __m128 dzdl_1 = s1 * s1 + del_z_1 * del_z_1;
1124 dzdl_1 = _vec_rsqrt_ps(dzdl_1);
1126 __m128 ddzdl_1 = (dz1 +
dz2) * D12_inv;
1127 _mm_store_ps(dzdl_1_a, dzdl_1);
1128 _mm_store_ps(ddzdl_1_a, ddzdl_1);
1130 __m128 c_dk = _mm_load_ps(cur_dkappa_a);
1131 __m128 c_k = _mm_load_ps(cur_kappa_a);
1132 __m128 c_ux = _mm_load_ps(cur_ux_a);
1133 __m128 c_uy = _mm_load_ps(cur_uy_a);
1134 __m128 c_chi2 = _mm_load_ps(cur_chi2_a);
1135 __m128 sinang = _mm_load1_ps(&sinang_cut);
1136 __m128 cosdiff = _mm_load1_ps(&cosang_diff_inv);
1138 __m128 kdiff = c_k -
k;
1139 __m128 n_dk = c_dk + dk + sinang *
k;
1140 __m128 chi2_k = kdiff * kdiff / (n_dk * n_dk);
1141 __m128 cos_scatter = c_ux * ux_mid + c_uy * uy_mid;
1142 __m128 chi2_ang = (
one - cos_scatter) * (
one - cos_scatter) * cosdiff
1144 tmp1 = dzdl_1 * sinang;
1146 __m128 chi2_dzdl = (dzdl_1 - dzdl_2) / (ddzdl_1 + ddzdl_2 + tmp1);
1147 chi2_dzdl *= chi2_dzdl;
1150 __m128 n_chi2 = c_chi2 + chi2_ang + chi2_k + chi2_dzdl;
1151 _mm_store_ps(chi2_a, n_chi2);
1177 dummy_track.
d = 0.5 * (range.
min_d + range.
max_d);
1182 init_state.
nu = sqrt(dummy_track.
kappa);
1183 init_state.
phi = dummy_track.
phi;
1184 init_state.
d = dummy_track.
d;
1185 init_state.
dzdl = dummy_track.
dzdl;
1186 init_state.
z0 = dummy_track.
z0;
1188 init_state.
C = Matrix<float, 5, 5>::Zero(5, 5);
1190 init_state.
C(1, 1) = pow(range.
max_d - range.
min_d, 2.);
1191 init_state.
C(2, 2) = pow(10. * sqrt(range.
max_k - range.
min_k), 2.);
1194 init_state.
chi2 = 0.;
1196 init_state.
x_int = 0.;
1197 init_state.
y_int = 0.;
1198 init_state.
z_int = 0.;
1200 for (
unsigned int i = 0; i <
n_layers; ++i) {
1203 dummies[i].set_x(x);
1204 dummies[i].set_y(x);
1205 dummies[i].set_z(x);
1206 dummies[i].set_layer(i);
1283 vector<SimpleTrack3D>& tracks,
const HelixRange& ) {
1285 vector<TrackSegment>* cur_seg = &
segments1;
1286 vector<TrackSegment>* next_seg = &
segments2;
1287 unsigned int curseg_size = 0;
1288 unsigned int nextseg_size = 0;
1290 vector<TrackSegment> complete_segments;
1293 for (
unsigned int l = 0; l <
n_layers; ++l) {
1296 for (
unsigned int i = 0; i < hits.size(); ++i) {
1297 unsigned int min = (hits[i].get_layer() - allowed_missing);
1298 if (allowed_missing > (
unsigned int) hits[i].get_layer()) {
1301 for (
int l = min; l <= hits[i].get_layer(); l += 1) {
1305 for (
unsigned int l = 0; l <
n_layers; ++l) {
1315 gettimeofday(&t1, NULL);
1318 float cosang_diff_inv = 1. / cosang_diff;
1322 vector<float> inv_layer;
1323 inv_layer.assign(n_layers, 1.);
1324 for (
unsigned int l = 3; l <
n_layers; ++l) {
1325 inv_layer[l] = 1. / (((float) l) - 2.);
1328 unsigned int hit_counter = 0;
1369 unsigned int hit1[4];
1370 unsigned int hit2[4];
1371 unsigned int hit3[4];
1374 temp_segment.
hits.assign(n_layers, 0);
1376 for (
unsigned int i = 0, sizei =
layer_sorted[0].size(); i < sizei; ++i) {
1377 for (
unsigned int j = 0, sizej =
layer_sorted[1].size(); j < sizej;
1379 for (
unsigned int k = 0, sizek =
layer_sorted[2].size();
k < sizek;
1394 dx1_a[hit_counter] = 0.5 * sqrt(12.0)
1396 dy1_a[hit_counter] = 0.5 * sqrt(12.0)
1398 dz1_a[hit_counter] = 0.5 * sqrt(12.0)
1405 dx2_a[hit_counter] = 0.5 * sqrt(12.0)
1407 dy2_a[hit_counter] = 0.5 * sqrt(12.0)
1409 dz2_a[hit_counter] = 0.5 * sqrt(12.0)
1416 dx3_a[hit_counter] = 0.5 * sqrt(12.0)
1418 dy3_a[hit_counter] = 0.5 * sqrt(12.0)
1420 dz3_a[hit_counter] = 0.5 * sqrt(12.0)
1423 hit1[hit_counter] = i;
1424 hit2[hit_counter] = j;
1425 hit3[hit_counter] =
k;
1429 if (hit_counter == 4) {
1431 x3_a, y3_a, z3_a, dx1_a, dy1_a, dz1_a, dx2_a, dy2_a,
1432 dz2_a, dx3_a, dy3_a, dz3_a, kappa_a, dkappa_a,
1433 ux_mid_a, uy_mid_a, ux_end_a, uy_end_a, dzdl_1_a,
1434 dzdl_2_a, ddzdl_1_a, ddzdl_2_a);
1436 for (
unsigned int h = 0;
h < hit_counter; ++
h) {
1437 temp_segment.
chi2 = (dzdl_1_a[
h] - dzdl_2_a[
h])
1438 / (ddzdl_1_a[
h] + ddzdl_2_a[
h]
1439 + fabs(dzdl_1_a[
h] * sinang_cut));
1440 temp_segment.
chi2 *= temp_segment.
chi2;
1441 if (temp_segment.
chi2 > 2.0) {
1444 temp_segment.
ux = ux_end_a[
h];
1445 temp_segment.
uy = uy_end_a[
h];
1446 temp_segment.
kappa = kappa_a[
h];
1450 temp_segment.
dkappa = dkappa_a[
h];
1451 temp_segment.
hits[0] = hit1[
h];
1452 temp_segment.
hits[1] = hit2[
h];
1453 temp_segment.
hits[2] = hit3[
h];
1455 unsigned int outer_layer =
1457 if ((outer_layer - 2) > allowed_missing) {
1460 if ((n_layers - 3) <= allowed_missing) {
1461 complete_segments.push_back(temp_segment);
1463 if (next_seg->size() == nextseg_size) {
1464 next_seg->push_back(temp_segment);
1467 (*next_seg)[nextseg_size] = temp_segment;
1477 if (hit_counter != 0) {
1479 z3_a, dx1_a, dy1_a, dz1_a, dx2_a, dy2_a, dz2_a, dx3_a, dy3_a,
1480 dz3_a, kappa_a, dkappa_a, ux_mid_a, uy_mid_a, ux_end_a,
1481 uy_end_a, dzdl_1_a, dzdl_2_a, ddzdl_1_a, ddzdl_2_a);
1483 for (
unsigned int h = 0;
h < hit_counter; ++
h) {
1484 temp_segment.
chi2 = (dzdl_1_a[
h] - dzdl_2_a[
h])
1485 / (ddzdl_1_a[
h] + ddzdl_2_a[
h]
1486 + fabs(dzdl_1_a[
h] * sinang_cut));
1487 temp_segment.
chi2 *= temp_segment.
chi2;
1488 if (temp_segment.
chi2 > 2.0) {
1491 temp_segment.
ux = ux_end_a[
h];
1492 temp_segment.
uy = uy_end_a[
h];
1493 temp_segment.
kappa = kappa_a[
h];
1497 temp_segment.
dkappa = dkappa_a[
h];
1498 temp_segment.
hits[0] = hit1[
h];
1499 temp_segment.
hits[1] = hit2[
h];
1500 temp_segment.
hits[2] = hit3[
h];
1502 unsigned int outer_layer =
1504 if ((outer_layer - 2) > allowed_missing) {
1507 if ((n_layers - 3) <= allowed_missing) {
1508 complete_segments.push_back(temp_segment);
1510 if (next_seg->size() == nextseg_size) {
1511 next_seg->push_back(temp_segment);
1514 (*next_seg)[nextseg_size] = temp_segment;
1522 swap(cur_seg, next_seg);
1523 swap(curseg_size, nextseg_size);
1526 unsigned int whichseg[4];
1527 for (
unsigned int l = 3; l <
n_layers; ++l) {
1528 if (l == (n_layers - 1)) {
1529 easy_chi2_cut *= 0.25;
1532 for (
unsigned int i = 0, sizei = curseg_size; i < sizei; ++i) {
1533 for (
unsigned int j = 0, sizej =
layer_sorted[l].size(); j < sizej;
1535 if ((
layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_layer()
1541 layer_sorted[l - 2][(*cur_seg)[i].hits[l - 2]].get_x();
1543 layer_sorted[l - 2][(*cur_seg)[i].hits[l - 2]].get_y();
1545 layer_sorted[l - 2][(*cur_seg)[i].hits[l - 2]].get_z();
1547 layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_x();
1549 layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_y();
1551 layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_z();
1556 dx1_a[hit_counter] =
1560 - 2]].get_size(0, 0));
1561 dy1_a[hit_counter] =
1565 - 2]].get_size(1, 1));
1566 dz1_a[hit_counter] =
1570 - 2]].get_size(2, 2));
1571 dx2_a[hit_counter] =
1575 - 1]].get_size(0, 0));
1576 dy2_a[hit_counter] =
1580 - 1]].get_size(1, 1));
1581 dz2_a[hit_counter] =
1585 - 1]].get_size(2, 2));
1586 dx3_a[hit_counter] = 0.5 * sqrt(12.0)
1588 dy3_a[hit_counter] = 0.5 * sqrt(12.0)
1590 dz3_a[hit_counter] = 0.5 * sqrt(12.0)
1593 cur_kappa_a[hit_counter] = (*cur_seg)[i].kappa;
1594 cur_dkappa_a[hit_counter] = (*cur_seg)[i].dkappa;
1595 cur_ux_a[hit_counter] = (*cur_seg)[i].ux;
1596 cur_uy_a[hit_counter] = (*cur_seg)[i].uy;
1597 cur_chi2_a[hit_counter] = (*cur_seg)[i].chi2;
1599 whichseg[hit_counter] = i;
1600 hit1[hit_counter] = j;
1603 if (hit_counter == 4) {
1605 x3_a, y3_a, z3_a, dx1_a, dy1_a, dz1_a, dx2_a, dy2_a,
1606 dz2_a, dx3_a, dy3_a, dz3_a, kappa_a, dkappa_a,
1607 ux_mid_a, uy_mid_a, ux_end_a, uy_end_a, dzdl_1_a,
1608 dzdl_2_a, ddzdl_1_a, ddzdl_2_a, sinang_cut,
1609 cosang_diff_inv, cur_kappa_a, cur_dkappa_a,
1610 cur_ux_a, cur_uy_a, cur_chi2_a, chi2_a);
1612 for (
unsigned int h = 0;
h < hit_counter; ++
h) {
1613 if ((chi2_a[
h]) * inv_layer[l] < easy_chi2_cut) {
1614 temp_segment.
chi2 = chi2_a[
h];
1615 temp_segment.
ux = ux_end_a[
h];
1616 temp_segment.
uy = uy_end_a[
h];
1617 temp_segment.
kappa = kappa_a[
h];
1621 temp_segment.
dkappa = dkappa_a[
h];
1622 for (
unsigned int ll = 0; ll < l; ++ll) {
1623 temp_segment.
hits[ll] =
1624 (*cur_seg)[whichseg[
h]].hits[ll];
1626 temp_segment.
hits[l] = hit1[
h];
1627 unsigned int outer_layer =
1629 temp_segment.
n_hits = l + 1;
1630 if ((n_layers - (l + 1)) <= allowed_missing) {
1631 complete_segments.push_back(temp_segment);
1633 if ((outer_layer - l) > allowed_missing) {
1636 if (next_seg->size() == nextseg_size) {
1637 next_seg->push_back(temp_segment);
1640 (*next_seg)[nextseg_size] = temp_segment;
1649 if (hit_counter != 0) {
1651 y3_a, z3_a, dx1_a, dy1_a, dz1_a, dx2_a, dy2_a, dz2_a, dx3_a,
1652 dy3_a, dz3_a, kappa_a, dkappa_a, ux_mid_a, uy_mid_a,
1653 ux_end_a, uy_end_a, dzdl_1_a, dzdl_2_a, ddzdl_1_a,
1654 ddzdl_2_a, sinang_cut, cosang_diff_inv, cur_kappa_a,
1655 cur_dkappa_a, cur_ux_a, cur_uy_a, cur_chi2_a, chi2_a);
1657 for (
unsigned int h = 0;
h < hit_counter; ++
h) {
1658 if ((chi2_a[
h]) * inv_layer[l] < easy_chi2_cut) {
1659 temp_segment.
chi2 = chi2_a[
h];
1660 temp_segment.
ux = ux_end_a[
h];
1661 temp_segment.
uy = uy_end_a[
h];
1662 temp_segment.
kappa = kappa_a[
h];
1667 temp_segment.
dkappa = dkappa_a[
h];
1668 for (
unsigned int ll = 0; ll < l; ++ll) {
1669 temp_segment.
hits[ll] =
1670 (*cur_seg)[whichseg[
h]].hits[ll];
1672 temp_segment.
hits[l] = hit1[
h];
1673 unsigned int outer_layer =
1675 temp_segment.
n_hits = l + 1;
1676 if ((n_layers - (l + 1)) <= allowed_missing) {
1677 complete_segments.push_back(temp_segment);
1679 if ((outer_layer - l) > allowed_missing) {
1682 if (next_seg->size() == nextseg_size) {
1683 next_seg->push_back(temp_segment);
1686 (*next_seg)[nextseg_size] = temp_segment;
1693 swap(cur_seg, next_seg);
1694 swap(curseg_size, nextseg_size);
1697 for (
unsigned int i = 0; i < complete_segments.size(); ++i) {
1698 if (cur_seg->size() == curseg_size) {
1699 cur_seg->push_back(complete_segments[i]);
1702 (*cur_seg)[curseg_size] = complete_segments[i];
1707 gettimeofday(&t2, NULL);
1708 time1 = ((double) (t1.tv_sec) + (double) (t1.tv_usec) / 1000000.);
1709 time2 = ((double) (t2.tv_sec) + (double) (t2.tv_usec) / 1000000.);
1710 CAtime += (time2 - time1);
1713 vector<SimpleHit3D> temp_hits;
1714 for (
unsigned int i = 0, sizei = curseg_size; i < sizei; ++i) {
1717 temp_comb.assign((*cur_seg)[i].n_hits, 0);
1718 for (
unsigned int l = 0; l < (*cur_seg)[i].n_hits; ++l) {
1723 if (it !=
combos.end()) {
1726 if (
combos.size() > 10000) {
1731 for (
unsigned int l = 0; l < (*cur_seg)[i].n_hits; ++l) {
1735 gettimeofday(&t1, NULL);
1743 float init_chi2 =
fitTrack(temp_track);
1750 gettimeofday(&t2, NULL);
1752 ((double) (t1.tv_sec) + (double) (t1.tv_usec) / 1000000.);
1754 ((double) (t2.tv_sec) + (double) (t2.tv_usec) / 1000000.);
1760 state.
phi = temp_track.
phi;
1761 if (state.
phi < 0.) {
1764 state.
d = temp_track.
d;
1767 state.
z0 = temp_track.
z0;
1769 state.
C = Matrix<float, 5, 5>::Zero(5, 5);
1770 state.
C(0, 0) = pow(0.01, 2.);
1771 state.
C(1, 1) = pow(0.01, 2.);
1772 state.
C(2, 2) = pow(0.01 * state.
nu, 2.);
1773 state.
C(3, 3) = pow(0.05, 2.);
1774 state.
C(4, 4) = pow(0.05, 2.);
1781 for (
unsigned int h = 0;
h < temp_track.
hits.size(); ++
h) {
1790 gettimeofday(&t2, NULL);
1791 time1 = ((double) (t1.tv_sec) + (double) (t1.tv_usec) / 1000000.);
1792 time2 = ((double) (t2.tv_sec) + (double) (t2.tv_usec) / 1000000.);
1795 if (!(temp_track.
kappa == temp_track.
kappa)) {
1804 if (state.
chi2 / (2. * ((
float) (temp_track.
hits.size())) - 5.)
1810 if (fabs(temp_track.
d) >
dca_cut) {
1828 tracks.push_back(temp_track);
1832 for (
unsigned int i = 0; i < temp_track.
hits.size(); ++i) {
1833 (*hit_used)[temp_track.
hits[i].get_id()] =
true;
1840 return ((
double) (x > 0.)) - ((double) (x < 0.));
1844 float&
x,
float&
y,
float&
z) {
1851 float hitx = seed.
hits.back().get_x();
1852 float hity = seed.
hits.back().get_y();
1856 float cosphi = cos(phi);
1857 float sinphi = sin(phi);
1861 float kd = (d * k + 1.);
1862 float kcx = kd * cosphi;
1863 float kcy = kd * sinphi;
1864 float kd_inv = 1. / kd;
1865 float R2 = rad_det * rad_det;
1866 float a = 0.5 * (k * R2 + (d * d * k + 2. *
d)) * kd_inv;
1867 float tmp1 = a * kd_inv;
1868 float P2x = kcx *
tmp1;
1869 float P2y = kcy *
tmp1;
1871 float h = sqrt(R2 - a * a);
1873 float ux = -kcy * kd_inv;
1874 float uy = kcx * kd_inv;
1876 float x1 = P2x + ux *
h;
1877 float y1 = P2y + uy *
h;
1878 float x2 = P2x - ux *
h;
1879 float y2 = P2y - uy *
h;
1880 float diff1 = (x1 - hitx) * (x1 - hitx) + (y1 - hity) * (y1 - hity);
1881 float diff2 = (x2 - hitx) * (x2 - hitx) + (y2 - hity) * (y2 - hity);
1883 if (diff1 < diff2) {
1888 x = P2x + signk * ux *
h;
1889 y = P2y + signk * uy *
h;
1891 double sign_dzdl =
sign(dzdl);
1893 double startx = d * cosphi;
1894 double starty = d * sinphi;
1895 double D = sqrt((startx - x) * (startx - x) + (starty - y) * (starty - y));
1897 double v = 0.5 * k *
D;
1900 if (v >= 0.999999) {
1903 double s = 2. * asin(v) /
k;
1906 double dz = sqrt(s * s * dzdl * dzdl / (1. - dzdl * dzdl));
1907 z = z0 + sign_dzdl *
dz;
1910 double temp1 = k * D * 0.5;
1912 double temp2 = D * 0.5;
1917 s += (3. / 20.) *
temp2;
1919 s += (5. / 56.) *
temp2;
1921 double dz = sqrt(s * s * dzdl * dzdl / (1. - dzdl * dzdl));
1922 z = z0 + sign_dzdl *
dz;
1927 unsigned int min_hits,
unsigned int ) {
1937 vector<SimpleHit3D>&
hits,
unsigned int ,
unsigned int ,
1938 vector<SimpleTrack3D>& ) {
1940 unsigned int pos = 0;
1941 while (pos < hits.size()) {
1942 for (
unsigned int i = 0; i <
nthreads; ++i) {
1943 if (pos >= hits.size()) {
1946 for (
unsigned int j = 0; j < hits_per_thread; ++j) {
1947 if (pos >= hits.size()) {
1955 for (
unsigned int i = 0; i <
nthreads; ++i) {
1965 for (
unsigned int b = 0;
b < nbins; ++
b) {
1968 for (
unsigned int i = 0; i <
nthreads; ++i) {
1980 unsigned int min_hits,
unsigned int max_hits,
1981 vector<SimpleTrack3D>& tracks) {
1985 for (
unsigned int i = 0; i <
nthreads; ++i) {
1998 for (
unsigned int i = 0; i <
nthreads; ++i) {
2007 for (
unsigned int i = 0; i <
nthreads; ++i) {
2016 for (
unsigned int i = 0; i <
nthreads; ++i) {
2026 vector<SimpleTrack3D> temp_tracks;
2027 for (
unsigned int i = 0; i <
nthreads; ++i) {
2028 vector<HelixKalmanState>* states =
2030 for (
unsigned int j = 0; j <
thread_tracks[i].size(); ++j) {
2039 unsigned long int w = (*((
unsigned long int*) arg));
2045 unsigned long int w = (*((
unsigned long int*) arg));