18 #include <xmmintrin.h>
23 using namespace Eigen;
24 using namespace SeamStress;
30 : hit1(h1), hit2(h2), hit3(h3),
track(t), chi2(c) {}
34 return (hit1 < other.
hit1) ||
35 ((hit2 < other.
hit2) && (hit1 == other.
hit1)) ||
36 ((hit3 < other.
hit3) && (hit1 == other.
hit1) &&
37 (hit2 == other.
hit2));
41 return ((hit1 == other.
hit1) && (hit2 == other.
hit2) &&
42 (hit3 == other.
hit3));
45 unsigned int hit1, hit2, hit3,
track;
53 : hit1(h1), hit2(h2), hit3(h3),
track(t), chi2(c) {}
57 return (hit1 < other.
hit1) ||
58 ((hit2 < other.
hit2) && (hit1 == other.
hit1)) ||
59 ((hit3 < other.
hit3) && (hit1 == other.
hit1) &&
60 (hit2 == other.
hit2));
64 return ((hit1 == other.
hit1) && (hit2 == other.
hit2) &&
65 (hit3 == other.
hit3));
68 unsigned int hit1, hit2, hit3,
track;
73 vector<SimpleTrack3D>& ,
74 vector<bool>& usetrack,
75 vector<float>& next_best_chi2) {
76 vector<hitTriplet> trips;
77 for (
unsigned int i = 0; i < input.size(); ++i) {
78 for (
unsigned int h1 = 0;
h1 < input[i].hits.size(); ++
h1) {
79 for (
unsigned int h2 = (
h1 + 1);
h2 < input[i].hits.size(); ++
h2) {
80 for (
unsigned int h3 = (
h2 + 1);
h3 < input[i].hits.size(); ++
h3) {
81 if (cut_on_dca ==
false) {
84 input[i].
hits[
h3].get_id(), i, track_states[i].chi2));
90 if (trips.size() == 0) {
93 sort(trips.begin(), trips.end());
97 while (pos < trips.size()) {
98 unsigned int next_pos = pos + 1;
99 if (next_pos >= trips.size()) {
102 while (trips[pos] == trips[next_pos]) {
104 if (next_pos >= trips.size()) {
108 if ((next_pos - pos) > 1) {
109 float best_chi2 = trips[
pos].chi2;
110 float next_chi2 = trips[pos + 1].chi2;
111 unsigned int best_pos =
pos;
112 for (
unsigned int i = (pos + 1); i < next_pos; ++i) {
114 input[trips[best_pos].track].hits.size()) {
116 }
else if ((input[trips[i].
track].
hits.size() >
117 input[trips[best_pos].track].hits.size()) ||
118 (input[trips[i].
track].
hits.back().get_layer() >
119 input[trips[best_pos].track].hits.back().get_layer())) {
120 next_chi2 = best_chi2;
121 best_chi2 = trips[i].chi2;
125 if ((trips[i].chi2 < best_chi2) ||
126 (usetrack[trips[best_pos].track] ==
false)) {
127 next_chi2 = best_chi2;
128 best_chi2 = trips[i].chi2;
130 }
else if (trips[i].chi2 < next_chi2) {
131 next_chi2 = trips[i].chi2;
134 for (
unsigned int i = pos; i < next_pos; ++i) {
136 usetrack[trips[i].track] =
false;
138 next_best_chi2[trips[i].track] = next_chi2;
149 unsigned int n_k,
unsigned int n_dzdl,
154 vector<float>&
radius,
float Bfield)
155 :
HelixHough(n_phi, n_d, n_k, n_dzdl, n_z0, min_resolution, max_resolution,
157 fast_chi2_cut_par0(12.),
158 fast_chi2_cut_par1(0.),
161 chi2_removal_cut(1.),
171 reject_ghosts(
false),
180 vector<float> detector_material;
182 for (
unsigned int i = 0; i < radius.size(); ++i) {
185 for (
unsigned int i = 0; i < material.size(); ++i) {
187 sqrt(3. * material[i]));
188 detector_material.push_back(3. * material[i]);
194 float total_scatter_2 = 0.;
203 vector<SimpleHit3D> one_layer;
205 for (
unsigned int i = 0; i < 4; ++i) {
214 float Bfield,
bool parallel,
215 unsigned int num_threads)
217 fast_chi2_cut_par0(12.),
218 fast_chi2_cut_par1(0.),
221 chi2_removal_cut(1.),
231 reject_ghosts(
false),
238 nthreads(num_threads),
241 is_parallel(parallel),
245 vector<float> detector_material;
247 for (
unsigned int i = 0; i < radius.size(); ++i) {
250 for (
unsigned int i = 0; i < material.size(); ++i) {
252 sqrt(3. * material[i]));
253 detector_material.push_back(3. * material[i]);
259 float total_scatter_2 = 0.;
268 vector<SimpleHit3D> one_layer;
270 for (
unsigned int i = 0; i < 4; ++i) {
276 Seamstress::init_vector(num_threads,
vss);
278 vssp =
new vector<Seamstress*>();
279 for (
unsigned int i = 0; i <
vss.size(); i++) {
285 vector<vector<unsigned int> > zoom_profile_new;
286 for (
unsigned int i = 1; i < zoom_profile.size(); ++i) {
287 zoom_profile_new.push_back(zoom_profile[i]);
290 for (
unsigned int i = 0; i <
nthreads; ++i) {
292 material, radius, Bfield));
307 for (
unsigned int i = 0; i <
vss.size(); i++) {
394 float x0 = track.
d * cos(track.
phi);
395 float y0 = track.
d * sin(track.
phi);
399 atan2((1. + track.
kappa * track.
d) * sin(track.
phi) - track.
kappa * y0,
400 (1. + track.
kappa * track.
d) * cos(track.
phi) - track.
kappa * x0);
403 float cosphi = cos(phi2);
404 float sinphi = sin(phi2);
405 float tx = cosphi + track.
kappa * (x0 - vx);
406 float ty = sinphi + track.
kappa * (y0 - vy);
407 float dk = sqrt(tx * tx + ty * ty) - 1.;
408 if (track.
kappa == 0.) {
409 d_out = (x0 - vx) * cosphi + (y0 - vy) * sinphi;
411 d_out = dk / track.
kappa;
417 vector<SimpleTrack3D>& output) {
420 for (
unsigned int i = 0; i < input.size(); ++i) {
421 output.push_back(input[i]);
426 unsigned int nt = input.size();
427 vector<bool> usetrack;
428 usetrack.assign(input.size(),
true);
429 vector<float> next_best_chi2;
430 next_best_chi2.assign(input.size(), 99999.);
436 vector<HelixKalmanState> states_new;
438 for (
unsigned int i = 0; i < nt; ++i) {
439 if (usetrack[i] ==
true) {
444 output.push_back(input[i]);
445 output.back().index = (output.size() - 1);
452 for (
unsigned int i = 0; i < output.size(); ++i) {
463 for (
int h = (output[i].
hits.size() - 1);
h >= 0; --
h) {
465 float err_scale = 1.;
492 cout <<
"# fits = " <<
nfits << endl;
493 cout <<
"findTracks called " <<
findtracksiter <<
" times" << endl;
494 cout <<
"CAtime = " <<
CAtime << endl;
495 cout <<
"KALime = " <<
KALtime << endl;
500 vector<SimpleTrack3D>& tracks,
511 unsigned int layer_mask[4] = {0, 0, 0, 0};
512 for (
unsigned int i = 0; i < hits.size(); ++i) {
513 if (hits[i].get_layer() < 32) {
514 layer_mask[0] = layer_mask[0] | (1 << hits[i].get_layer());
515 }
else if (hits[i].get_layer() < 64) {
516 layer_mask[1] = layer_mask[1] | (1 << (hits[i].get_layer() - 32));
517 }
else if (hits[i].get_layer() < 96) {
518 layer_mask[2] = layer_mask[2] | (1 << (hits[i].get_layer() - 64));
519 }
else if (hits[i].get_layer() < 128) {
520 layer_mask[3] = layer_mask[3] | (1 << (hits[i].get_layer() - 96));
523 unsigned int nlayers =
524 __builtin_popcount(layer_mask[0]) + __builtin_popcount(layer_mask[1]) +
525 __builtin_popcount(layer_mask[2]) + __builtin_popcount(layer_mask[3]);
531 float min_d,
float max_d,
float ,
532 float ,
float ,
float max_dzdl,
542 prev_p_inv = 3.33333333333333314e+02 * max_k * Bfield_inv *
543 sqrt(1. - max_dzdl * max_dzdl);
546 float total_scatter_2 = 0.;
551 total_scatter_2 += this_scatter * this_scatter;
553 float angle = p_inv * sqrt(total_scatter_2) * 1.0;
554 float dsize = 0.5 * (max_d - min_d);
556 float returnval = 0.;
557 if (pairvoting ==
false) {
558 if (angle_from_d > angle) {
561 returnval = (angle - angle_from_d);
571 float ,
float ,
float min_z0,
572 float max_z0,
float ,
float max_dzdl,
582 prev_p_inv = 3.33333333333333314e+02 * max_k * Bfield_inv *
583 sqrt(1. - max_dzdl * max_dzdl);
586 float total_scatter_2 = 0.;
591 total_scatter_2 += this_scatter * this_scatter;
593 float angle = p_inv * sqrt(total_scatter_2) * 1.0;
594 float z0size = 0.5 * (max_z0 - min_z0);
596 float returnval = 0.;
597 if (pairvoting ==
false) {
598 if (angle_from_z0 > angle) {
601 returnval = (angle - angle_from_z0);
613 float dphi = 2. * sqrt(state->
C(0, 0));
614 float dd = 2. * sqrt(state->
C(1, 1));
615 float dk = 2. * state->
C(2, 2);
616 float dz0 = 2. * sqrt(state->
C(3, 3));
617 float ddzdl = 2. * sqrt(state->
C(4, 4));
627 range.
min_d = seed.
d - dd;
628 range.
max_d = seed.
d + dd;
631 if (range.
min_k < 0.) {
646 vector<float> chi2_hit;
653 vector<float> xyres_inv;
655 vector<float> zres_inv;
656 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
660 xyres.push_back(sqrt((0.5*sqrt(12.)*sqrt(track.
hits[i].get_size(0,0))) * (0.5*sqrt(12.)*sqrt(track.
hits[i].get_size(0,0))) +
661 (0.5*sqrt(12.)*sqrt(track.
hits[i].get_size(1,1))) * (0.5*sqrt(12.)*sqrt(track.
hits[i].get_size(1,1)))));
662 xyres_inv.push_back(1. / xyres.back());
663 zres.push_back((0.5*sqrt(12.)*sqrt(track.
hits[i].get_size(2,2))));
664 zres_inv.push_back(1. / zres.back());
667 chi2_hit.resize(track.
hits.size(), 0.);
669 MatrixXf
y = MatrixXf::Zero(track.
hits.size(), 1);
670 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
671 y(i, 0) = (pow(track.
hits[i].get_x(), 2) + pow(track.
hits[i].get_y(), 2));
672 y(i, 0) *= xyres_inv[i];
675 MatrixXf
X = MatrixXf::Zero(track.
hits.size(), 3);
676 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
677 X(i, 0) = track.
hits[i].get_x();
678 X(i, 1) = track.
hits[i].get_y();
680 X(i, 0) *= xyres_inv[i];
681 X(i, 1) *= xyres_inv[i];
682 X(i, 2) *= xyres_inv[i];
685 MatrixXf Xt = X.transpose();
687 MatrixXf prod = Xt *
X;
689 MatrixXf Xty = Xt *
y;
690 MatrixXf beta = prod.ldlt().solve(Xty);
692 float cx = beta(0, 0) * 0.5;
693 float cy = beta(1, 0) * 0.5;
694 float r = sqrt(cx * cx + cy * cy - beta(2, 0));
696 float phi = atan2(cy, cx);
697 float d = sqrt(cx * cx + cy * cy) -
r;
700 MatrixXf diff = y - (X * beta);
701 MatrixXf chi2 = (diff.transpose()) * diff;
703 float dx = d * cos(phi);
704 float dy = d * sin(phi);
706 MatrixXf
y2 = MatrixXf::Zero(track.
hits.size(), 1);
707 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
708 y2(i, 0) = track.
hits[i].get_z();
709 y2(i, 0) *= zres_inv[i];
712 MatrixXf
X2 = MatrixXf::Zero(track.
hits.size(), 2);
713 for (
unsigned int i = 0; i < track.
hits.size(); i++) {
714 float D = sqrt(pow(dx - track.
hits[i].get_x(), 2) + pow(dy - track.
hits[i].get_y(), 2));
717 if (0.5 * k * D > 0.1) {
718 float v = 0.5 * k *
D;
722 s = 2. * asin(v) /
k;
724 float temp1 = k * D * 0.5;
726 float temp2 = D * 0.5;
731 s += (3. / 20.) *
temp2;
733 s += (5. / 56.) *
temp2;
739 X2(i, 0) *= zres_inv[i];
740 X2(i, 1) *= zres_inv[i];
743 MatrixXf Xt2 = X2.transpose();
744 MatrixXf prod2 = Xt2 *
X2;
746 MatrixXf Xty2 = Xt2 *
y2;
747 MatrixXf beta2 = prod2.ldlt().solve(Xty2);
749 MatrixXf diff2 = y2 - (X2 * beta2);
750 MatrixXf chi2_z = (diff2.transpose()) * diff2;
752 float z0 = beta2(1, 0);
753 float dzdl = beta2(0, 0) / sqrt(1. + beta2(0, 0) * beta2(0, 0));
761 if (track.
kappa != 0.) {
762 r = 1. / track.
kappa;
767 cx = (track.
d +
r) * cos(track.
phi);
768 cy = (track.
d +
r) * sin(track.
phi);
771 for (
unsigned int h = 0;
h < track.
hits.size();
h++) {
772 float dx1 = track.
hits[
h].get_x() - cx;
773 float dy1 = track.
hits[
h].get_y() - cy;
775 float dx2 = track.
hits[
h].get_x() + cx;
776 float dy2 = track.
hits[
h].get_y() + cy;
778 float xydiff1 = sqrt(dx1 * dx1 + dy1 * dy1) -
r;
779 float xydiff2 = sqrt(dx2 * dx2 + dy2 * dy2) -
r;
780 float xydiff = xydiff2;
781 if (fabs(xydiff1) < fabs(xydiff2)) {
785 float ls_xy = xyres[
h];
788 chi2_hit[
h] += xydiff * xydiff / (ls_xy * ls_xy);
789 chi2_hit[
h] += diff2(
h, 0) * diff2(
h, 0);
791 chi2_tot += chi2_hit[
h];
794 unsigned int deg_of_freedom = 2 * track.
hits.size() - 5;
796 return (chi2_tot) / ((float)(deg_of_freedom));
800 float* x1_a,
float* y1_a,
float* z1_a,
float* x2_a,
float* y2_a,
801 float* z2_a,
float* x3_a,
float* y3_a,
float* z3_a,
float* dx1_a,
802 float* dy1_a,
float* dz1_a,
float* dx2_a,
float* dy2_a,
float* dz2_a,
803 float* dx3_a,
float* dy3_a,
float* dz3_a,
float* kappa_a,
float* dkappa_a,
804 float* ux_mid_a,
float* uy_mid_a,
float* ux_end_a,
float* uy_end_a,
805 float* dzdl_1_a,
float* dzdl_2_a,
float* ddzdl_1_a,
float* ddzdl_2_a) {
806 static const __m128
two = {2., 2., 2., 2.};
808 __m128
x1 = _mm_load_ps(x1_a);
809 __m128
x2 = _mm_load_ps(x2_a);
810 __m128
x3 = _mm_load_ps(x3_a);
811 __m128
y1 = _mm_load_ps(y1_a);
812 __m128
y2 = _mm_load_ps(y2_a);
813 __m128
y3 = _mm_load_ps(y3_a);
814 __m128
z1 = _mm_load_ps(z1_a);
815 __m128
z2 = _mm_load_ps(z2_a);
816 __m128 z3 = _mm_load_ps(z3_a);
818 __m128 dx1 = _mm_load_ps(dx1_a);
819 __m128 dx2 = _mm_load_ps(dx2_a);
820 __m128 dx3 = _mm_load_ps(dx3_a);
821 __m128 dy1 = _mm_load_ps(dy1_a);
822 __m128 dy2 = _mm_load_ps(dy2_a);
823 __m128 dy3 = _mm_load_ps(dy3_a);
824 __m128 dz1 = _mm_load_ps(dz1_a);
825 __m128
dz2 = _mm_load_ps(dz2_a);
826 __m128 dz3 = _mm_load_ps(dz3_a);
828 __m128 D12 = _mm_sub_ps(x2, x1);
829 D12 = _mm_mul_ps(D12, D12);
830 __m128
tmp1 = _mm_sub_ps(y2, y1);
831 tmp1 = _mm_mul_ps(tmp1, tmp1);
832 D12 = _mm_add_ps(D12, tmp1);
833 D12 = _vec_sqrt_ps(D12);
835 __m128 D23 = _mm_sub_ps(x3, x2);
836 D23 = _mm_mul_ps(D23, D23);
837 tmp1 = _mm_sub_ps(y3, y2);
838 tmp1 = _mm_mul_ps(tmp1, tmp1);
839 D23 = _mm_add_ps(D23, tmp1);
840 D23 = _vec_sqrt_ps(D23);
842 __m128 D31 = _mm_sub_ps(x1, x3);
843 D31 = _mm_mul_ps(D31, D31);
844 tmp1 = _mm_sub_ps(y1, y3);
845 tmp1 = _mm_mul_ps(tmp1, tmp1);
846 D31 = _mm_add_ps(D31, tmp1);
847 D31 = _vec_sqrt_ps(D31);
849 __m128
k = _mm_mul_ps(D12, D23);
850 k = _mm_mul_ps(k, D31);
852 tmp1 = (D12 + D23 + D31) * (D23 + D31 - D12) * (D12 + D31 - D23) *
854 tmp1 = _vec_sqrt_ps(tmp1);
857 __m128
tmp2 = _mm_cmpgt_ps(tmp1,
zero);
858 tmp1 = _mm_and_ps(tmp2, k);
859 tmp2 = _mm_andnot_ps(tmp2,
zero);
860 k = _mm_xor_ps(tmp1, tmp2);
862 _mm_store_ps(kappa_a, k);
863 __m128 k_inv = _vec_rec_ps(k);
865 __m128 D12_inv = _vec_rec_ps(D12);
866 __m128 D23_inv = _vec_rec_ps(D23);
867 __m128 D31_inv = _vec_rec_ps(D31);
869 __m128 dr1 = dx1 * dx1 + dy1 * dy1;
870 dr1 = _vec_sqrt_ps(dr1);
871 __m128 dr2 = dx2 * dx2 + dy2 * dy2;
872 dr2 = _vec_sqrt_ps(dr2);
873 __m128 dr3 = dx3 * dx3 + dy3 * dy3;
874 dr3 = _vec_sqrt_ps(dr3);
876 __m128 dk1 = (dr1 + dr2) * D12_inv * D12_inv;
877 __m128 dk2 = (dr2 + dr3) * D23_inv * D23_inv;
878 __m128 dk = dk1 + dk2;
879 _mm_store_ps(dkappa_a, dk);
881 __m128 ux12 = (x2 -
x1) * D12_inv;
882 __m128 uy12 = (y2 -
y1) * D12_inv;
883 __m128 ux23 = (x3 -
x2) * D23_inv;
884 __m128 uy23 = (y3 -
y2) * D23_inv;
885 __m128 ux13 = (x3 -
x1) * D31_inv;
886 __m128 uy13 = (y3 -
y1) * D31_inv;
888 __m128 cosalpha = ux12 * ux13 + uy12 * uy13;
889 __m128 sinalpha = ux13 * uy12 - ux12 * uy13;
891 __m128 ux_mid = ux23 * cosalpha - uy23 * sinalpha;
892 __m128 uy_mid = ux23 * sinalpha + uy23 * cosalpha;
893 _mm_store_ps(ux_mid_a, ux_mid);
894 _mm_store_ps(uy_mid_a, uy_mid);
896 __m128 ux_end = ux23 * cosalpha + uy23 * sinalpha;
897 __m128 uy_end = uy23 * cosalpha - ux23 * sinalpha;
899 _mm_store_ps(ux_end_a, ux_end);
900 _mm_store_ps(uy_end_a, uy_end);
903 __m128
v =
one - sinalpha * sinalpha;
908 __m128 s2 = _vec_atan_ps(v);
911 tmp1 = _mm_cmpgt_ps(k,
zero);
912 tmp2 = _mm_and_ps(tmp1, s2);
913 tmp1 = _mm_andnot_ps(tmp1, D23);
914 s2 = _mm_xor_ps(tmp1, tmp2);
918 __m128 del_z_2 = z3 -
z2;
919 __m128 dzdl_2 = s2 * s2 + del_z_2 * del_z_2;
920 dzdl_2 = _vec_rsqrt_ps(dzdl_2);
922 __m128 ddzdl_2 = (dz2 + dz3) * D23_inv;
923 _mm_store_ps(dzdl_2_a, dzdl_2);
924 _mm_store_ps(ddzdl_2_a, ddzdl_2);
926 sinalpha = ux13 * uy23 - ux23 * uy13;
927 v =
one - sinalpha * sinalpha;
932 __m128
s1 = _vec_atan_ps(v);
935 tmp1 = _mm_cmpgt_ps(k,
zero);
936 tmp2 = _mm_and_ps(tmp1, s1);
937 tmp1 = _mm_andnot_ps(tmp1, D12);
938 s1 = _mm_xor_ps(tmp1, tmp2);
940 __m128 del_z_1 = z2 -
z1;
941 __m128 dzdl_1 = s1 * s1 + del_z_1 * del_z_1;
942 dzdl_1 = _vec_rsqrt_ps(dzdl_1);
944 __m128 ddzdl_1 = (dz1 +
dz2) * D12_inv;
945 _mm_store_ps(dzdl_1_a, dzdl_1);
946 _mm_store_ps(ddzdl_1_a, ddzdl_1);
950 float* x1_a,
float* y1_a,
float* z1_a,
float* x2_a,
float* y2_a,
951 float* z2_a,
float* x3_a,
float* y3_a,
float* z3_a,
float* dx1_a,
952 float* dy1_a,
float* dz1_a,
float* dx2_a,
float* dy2_a,
float* dz2_a,
953 float* dx3_a,
float* dy3_a,
float* dz3_a,
float* kappa_a,
float* dkappa_a,
954 float* ux_mid_a,
float* uy_mid_a,
float* ux_end_a,
float* uy_end_a,
955 float* dzdl_1_a,
float* dzdl_2_a,
float* ddzdl_1_a,
float* ddzdl_2_a,
956 float sinang_cut,
float cosang_diff_inv,
float* cur_kappa_a,
957 float* cur_dkappa_a,
float* cur_ux_a,
float* cur_uy_a,
float* cur_chi2_a,
959 static const __m128
two = {2., 2., 2., 2.};
961 __m128
x1 = _mm_load_ps(x1_a);
962 __m128
x2 = _mm_load_ps(x2_a);
963 __m128
x3 = _mm_load_ps(x3_a);
964 __m128
y1 = _mm_load_ps(y1_a);
965 __m128
y2 = _mm_load_ps(y2_a);
966 __m128
y3 = _mm_load_ps(y3_a);
967 __m128
z1 = _mm_load_ps(z1_a);
968 __m128
z2 = _mm_load_ps(z2_a);
969 __m128 z3 = _mm_load_ps(z3_a);
971 __m128 dx1 = _mm_load_ps(dx1_a);
972 __m128 dx2 = _mm_load_ps(dx2_a);
973 __m128 dx3 = _mm_load_ps(dx3_a);
974 __m128 dy1 = _mm_load_ps(dy1_a);
975 __m128 dy2 = _mm_load_ps(dy2_a);
976 __m128 dy3 = _mm_load_ps(dy3_a);
977 __m128 dz1 = _mm_load_ps(dz1_a);
978 __m128
dz2 = _mm_load_ps(dz2_a);
979 __m128 dz3 = _mm_load_ps(dz3_a);
981 __m128 D12 = _mm_sub_ps(x2, x1);
982 D12 = _mm_mul_ps(D12, D12);
983 __m128
tmp1 = _mm_sub_ps(y2, y1);
984 tmp1 = _mm_mul_ps(tmp1, tmp1);
985 D12 = _mm_add_ps(D12, tmp1);
986 D12 = _vec_sqrt_ps(D12);
988 __m128 D23 = _mm_sub_ps(x3, x2);
989 D23 = _mm_mul_ps(D23, D23);
990 tmp1 = _mm_sub_ps(y3, y2);
991 tmp1 = _mm_mul_ps(tmp1, tmp1);
992 D23 = _mm_add_ps(D23, tmp1);
993 D23 = _vec_sqrt_ps(D23);
995 __m128 D31 = _mm_sub_ps(x1, x3);
996 D31 = _mm_mul_ps(D31, D31);
997 tmp1 = _mm_sub_ps(y1, y3);
998 tmp1 = _mm_mul_ps(tmp1, tmp1);
999 D31 = _mm_add_ps(D31, tmp1);
1000 D31 = _vec_sqrt_ps(D31);
1002 __m128
k = _mm_mul_ps(D12, D23);
1003 k = _mm_mul_ps(k, D31);
1005 tmp1 = (D12 + D23 + D31) * (D23 + D31 - D12) * (D12 + D31 - D23) *
1007 tmp1 = _vec_sqrt_ps(tmp1);
1010 __m128
tmp2 = _mm_cmpgt_ps(tmp1,
zero);
1011 tmp1 = _mm_and_ps(tmp2, k);
1012 tmp2 = _mm_andnot_ps(tmp2,
zero);
1013 k = _mm_xor_ps(tmp1, tmp2);
1015 _mm_store_ps(kappa_a, k);
1016 __m128 k_inv = _vec_rec_ps(k);
1018 __m128 D12_inv = _vec_rec_ps(D12);
1019 __m128 D23_inv = _vec_rec_ps(D23);
1020 __m128 D31_inv = _vec_rec_ps(D31);
1022 __m128 dr1 = dx1 * dx1 + dy1 * dy1;
1023 dr1 = _vec_sqrt_ps(dr1);
1024 __m128 dr2 = dx2 * dx2 + dy2 * dy2;
1025 dr2 = _vec_sqrt_ps(dr2);
1026 __m128 dr3 = dx3 * dx3 + dy3 * dy3;
1027 dr3 = _vec_sqrt_ps(dr3);
1029 __m128 dk1 = (dr1 + dr2) * D12_inv * D12_inv;
1030 __m128 dk2 = (dr2 + dr3) * D23_inv * D23_inv;
1031 __m128 dk = dk1 + dk2;
1032 _mm_store_ps(dkappa_a, dk);
1034 __m128 ux12 = (x2 -
x1) * D12_inv;
1035 __m128 uy12 = (y2 -
y1) * D12_inv;
1036 __m128 ux23 = (x3 -
x2) * D23_inv;
1037 __m128 uy23 = (y3 -
y2) * D23_inv;
1038 __m128 ux13 = (x3 -
x1) * D31_inv;
1039 __m128 uy13 = (y3 -
y1) * D31_inv;
1041 __m128 cosalpha = ux12 * ux13 + uy12 * uy13;
1042 __m128 sinalpha = ux13 * uy12 - ux12 * uy13;
1044 __m128 ux_mid = ux23 * cosalpha - uy23 * sinalpha;
1045 __m128 uy_mid = ux23 * sinalpha + uy23 * cosalpha;
1046 _mm_store_ps(ux_mid_a, ux_mid);
1047 _mm_store_ps(uy_mid_a, uy_mid);
1049 __m128 ux_end = ux23 * cosalpha + uy23 * sinalpha;
1050 __m128 uy_end = uy23 * cosalpha - ux23 * sinalpha;
1052 _mm_store_ps(ux_end_a, ux_end);
1053 _mm_store_ps(uy_end_a, uy_end);
1056 __m128
v =
one - sinalpha * sinalpha;
1057 v = _vec_sqrt_ps(v);
1061 __m128 s2 = _vec_atan_ps(v);
1064 tmp1 = _mm_cmpgt_ps(k,
zero);
1065 tmp2 = _mm_and_ps(tmp1, s2);
1066 tmp1 = _mm_andnot_ps(tmp1, D23);
1067 s2 = _mm_xor_ps(tmp1, tmp2);
1071 __m128 del_z_2 = z3 -
z2;
1072 __m128 dzdl_2 = s2 * s2 + del_z_2 * del_z_2;
1073 dzdl_2 = _vec_rsqrt_ps(dzdl_2);
1075 __m128 ddzdl_2 = (dz2 + dz3) * D23_inv;
1076 _mm_store_ps(dzdl_2_a, dzdl_2);
1077 _mm_store_ps(ddzdl_2_a, ddzdl_2);
1079 sinalpha = ux13 * uy23 - ux23 * uy13;
1080 v =
one - sinalpha * sinalpha;
1081 v = _vec_sqrt_ps(v);
1085 __m128
s1 = _vec_atan_ps(v);
1088 tmp1 = _mm_cmpgt_ps(k,
zero);
1089 tmp2 = _mm_and_ps(tmp1, s1);
1090 tmp1 = _mm_andnot_ps(tmp1, D12);
1091 s1 = _mm_xor_ps(tmp1, tmp2);
1093 __m128 del_z_1 = z2 -
z1;
1094 __m128 dzdl_1 = s1 * s1 + del_z_1 * del_z_1;
1095 dzdl_1 = _vec_rsqrt_ps(dzdl_1);
1097 __m128 ddzdl_1 = (dz1 +
dz2) * D12_inv;
1098 _mm_store_ps(dzdl_1_a, dzdl_1);
1099 _mm_store_ps(ddzdl_1_a, ddzdl_1);
1101 __m128 c_dk = _mm_load_ps(cur_dkappa_a);
1102 __m128 c_k = _mm_load_ps(cur_kappa_a);
1103 __m128 c_ux = _mm_load_ps(cur_ux_a);
1104 __m128 c_uy = _mm_load_ps(cur_uy_a);
1105 __m128 c_chi2 = _mm_load_ps(cur_chi2_a);
1106 __m128 sinang = _mm_load1_ps(&sinang_cut);
1107 __m128 cosdiff = _mm_load1_ps(&cosang_diff_inv);
1109 __m128 kdiff = c_k -
k;
1110 __m128 n_dk = c_dk + dk + sinang *
k;
1111 __m128 chi2_k = kdiff * kdiff / (n_dk * n_dk);
1112 __m128 cos_scatter = c_ux * ux_mid + c_uy * uy_mid;
1114 (
one - cos_scatter) * (
one - cos_scatter) * cosdiff * cosdiff;
1115 tmp1 = dzdl_1 * sinang;
1117 __m128 chi2_dzdl = (dzdl_1 - dzdl_2) / (ddzdl_1 + ddzdl_2 + tmp1);
1118 chi2_dzdl *= chi2_dzdl;
1121 __m128 n_chi2 = c_chi2 + chi2_ang + chi2_k + chi2_dzdl;
1122 _mm_store_ps(chi2_a, n_chi2);
1148 dummy_track.
d = 0.5 * (range.
min_d + range.
max_d);
1153 init_state.
nu = sqrt(dummy_track.
kappa);
1154 init_state.
phi = dummy_track.
phi;
1155 init_state.
d = dummy_track.
d;
1156 init_state.
dzdl = dummy_track.
dzdl;
1157 init_state.
z0 = dummy_track.
z0;
1159 init_state.
C = Matrix<float, 5, 5>::Zero(5, 5);
1161 init_state.
C(1, 1) = pow(range.
max_d - range.
min_d, 2.);
1162 init_state.
C(2, 2) = pow(10. * sqrt(range.
max_k - range.
min_k), 2.);
1165 init_state.
chi2 = 0.;
1167 init_state.
x_int = 0.;
1168 init_state.
y_int = 0.;
1169 init_state.
z_int = 0.;
1171 for (
unsigned int i = 0; i <
n_layers; ++i) {
1174 dummies[i].set_x(x);
1175 dummies[i].set_y(x);
1176 dummies[i].set_z(x);
1177 dummies[i].set_layer(i);
1254 vector<SimpleTrack3D>& tracks,
1257 cout <<
"fidTracksBySegments " << endl;
1258 vector<TrackSegment>* cur_seg = &
segments1;
1259 vector<TrackSegment>* next_seg = &
segments2;
1260 unsigned int curseg_size = 0;
1261 unsigned int nextseg_size = 0;
1263 vector<TrackSegment> complete_segments;
1267 for (
unsigned int l = 0; l <
n_layers; ++l) {
1270 for (
unsigned int i = 0; i < hits.size(); ++i) {
1271 unsigned int min = (hits[i].get_layer() - allowed_missing);
1272 if (allowed_missing > (
unsigned int) hits[i].get_layer()) {
1275 for (
int l = min; l <= hits[i].get_layer(); l += 1) {
1279 for (
unsigned int l = 0; l <
n_layers; ++l) {
1289 gettimeofday(&t1, NULL);
1292 float cosang_diff_inv = 1. / cosang_diff;
1296 vector<float> inv_layer;
1297 inv_layer.assign(n_layers, 1.);
1298 for (
unsigned int l = 3; l <
n_layers; ++l) {
1299 inv_layer[l] = 1. / (((float)l) - 2.);
1302 unsigned int hit_counter = 0;
1343 unsigned int hit1[4];
1344 unsigned int hit2[4];
1345 unsigned int hit3[4];
1348 temp_segment.
hits.assign(n_layers, 0);
1350 for (
unsigned int i = 0, sizei =
layer_sorted[0].size(); i < sizei; ++i) {
1351 for (
unsigned int j = 0, sizej =
layer_sorted[1].size(); j < sizej; ++j) {
1352 for (
unsigned int k = 0, sizek =
layer_sorted[2].size();
k < sizek; ++
k) {
1364 dx1_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[0][i].get_size(0,0));
1365 dy1_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[0][i].get_size(1,1));
1366 dz1_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[0][i].get_size(2,2));
1372 dx2_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[1][j].get_size(0,0));
1373 dy2_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[1][j].get_size(1,1));
1374 dz2_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[1][j].get_size(2,2));
1380 dx3_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[2][
k].get_size(0,0));
1381 dy3_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[2][
k].get_size(1,1));
1382 dz3_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[2][
k].get_size(2,2));
1384 hit1[hit_counter] = i;
1385 hit2[hit_counter] = j;
1386 hit3[hit_counter] =
k;
1390 if (hit_counter == 4) {
1392 z3_a, dx1_a, dy1_a, dz1_a, dx2_a, dy2_a, dz2_a,
1393 dx3_a, dy3_a, dz3_a, kappa_a, dkappa_a,
1394 ux_mid_a, uy_mid_a, ux_end_a, uy_end_a,
1395 dzdl_1_a, dzdl_2_a, ddzdl_1_a, ddzdl_2_a);
1397 for (
unsigned int h = 0;
h < hit_counter; ++
h) {
1399 (dzdl_1_a[
h] - dzdl_2_a[
h]) /
1400 (ddzdl_1_a[
h] + ddzdl_2_a[
h] + fabs(dzdl_1_a[
h] * sinang_cut));
1401 temp_segment.
chi2 *= temp_segment.
chi2;
1402 if (temp_segment.
chi2 > 2.0) {
1405 temp_segment.
ux = ux_end_a[
h];
1406 temp_segment.
uy = uy_end_a[
h];
1407 temp_segment.
kappa = kappa_a[
h];
1411 temp_segment.
dkappa = dkappa_a[
h];
1412 temp_segment.
hits[0] = hit1[
h];
1413 temp_segment.
hits[1] = hit2[
h];
1414 temp_segment.
hits[2] = hit3[
h];
1416 unsigned int outer_layer =
1418 if ((outer_layer - 2) > allowed_missing) {
1421 if ((n_layers - 3) <= allowed_missing) {
1422 complete_segments.push_back(temp_segment);
1424 if (next_seg->size() == nextseg_size) {
1425 next_seg->push_back(temp_segment);
1428 (*next_seg)[nextseg_size] = temp_segment;
1438 if (hit_counter != 0) {
1440 dx1_a, dy1_a, dz1_a, dx2_a, dy2_a, dz2_a, dx3_a,
1441 dy3_a, dz3_a, kappa_a, dkappa_a, ux_mid_a, uy_mid_a,
1442 ux_end_a, uy_end_a, dzdl_1_a, dzdl_2_a, ddzdl_1_a,
1445 for (
unsigned int h = 0;
h < hit_counter; ++
h) {
1447 (dzdl_1_a[
h] - dzdl_2_a[
h]) /
1448 (ddzdl_1_a[
h] + ddzdl_2_a[
h] + fabs(dzdl_1_a[
h] * sinang_cut));
1449 temp_segment.
chi2 *= temp_segment.
chi2;
1450 if (temp_segment.
chi2 > 2.0) {
1453 temp_segment.
ux = ux_end_a[
h];
1454 temp_segment.
uy = uy_end_a[
h];
1455 temp_segment.
kappa = kappa_a[
h];
1459 temp_segment.
dkappa = dkappa_a[
h];
1460 temp_segment.
hits[0] = hit1[
h];
1461 temp_segment.
hits[1] = hit2[
h];
1462 temp_segment.
hits[2] = hit3[
h];
1464 unsigned int outer_layer =
layer_sorted[2][temp_segment.
hits[2]].get_layer();
1465 if ((outer_layer - 2) > allowed_missing) {
1468 if ((n_layers - 3) <= allowed_missing) {
1469 complete_segments.push_back(temp_segment);
1471 if (next_seg->size() == nextseg_size) {
1472 next_seg->push_back(temp_segment);
1475 (*next_seg)[nextseg_size] = temp_segment;
1483 swap(cur_seg, next_seg);
1484 swap(curseg_size, nextseg_size);
1487 unsigned int whichseg[4];
1488 for (
unsigned int l = 3; l <
n_layers; ++l) {
1489 if (l == (n_layers - 1)) {
1490 easy_chi2_cut *= 0.25;
1493 for (
unsigned int i = 0, sizei = curseg_size; i < sizei; ++i) {
1494 for (
unsigned int j = 0, sizej =
layer_sorted[l].size(); j < sizej; ++j) {
1495 if ((
layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_layer() >=
1500 x1_a[hit_counter] =
layer_sorted[l - 2][(*cur_seg)[i].hits[l - 2]].get_x();
1501 y1_a[hit_counter] =
layer_sorted[l - 2][(*cur_seg)[i].hits[l - 2]].get_y();
1502 z1_a[hit_counter] =
layer_sorted[l - 2][(*cur_seg)[i].hits[l - 2]].get_z();
1503 x2_a[hit_counter] =
layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_x();
1504 y2_a[hit_counter] =
layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_y();
1505 z2_a[hit_counter] =
layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_z();
1510 dx1_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[l - 2][(*cur_seg)[i].hits[l - 2]].get_size(0,0));
1511 dy1_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[l - 2][(*cur_seg)[i].hits[l - 2]].get_size(1,1));
1512 dz1_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[l - 2][(*cur_seg)[i].hits[l - 2]].get_size(2,2));
1513 dx2_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_size(0,0));
1514 dy2_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_size(1,1));
1515 dz2_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[l - 1][(*cur_seg)[i].hits[l - 1]].get_size(2,2));
1516 dx3_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[l][j].get_size(0,0));
1517 dy3_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[l][j].get_size(1,1));
1518 dz3_a[hit_counter] = 0.5*sqrt(12.0)*sqrt(
layer_sorted[l][j].get_size(2,2));
1520 cur_kappa_a[hit_counter] = (*cur_seg)[i].kappa;
1521 cur_dkappa_a[hit_counter] = (*cur_seg)[i].dkappa;
1522 cur_ux_a[hit_counter] = (*cur_seg)[i].ux;
1523 cur_uy_a[hit_counter] = (*cur_seg)[i].uy;
1524 cur_chi2_a[hit_counter] = (*cur_seg)[i].chi2;
1526 whichseg[hit_counter] = i;
1527 hit1[hit_counter] = j;
1530 if (hit_counter == 4) {
1532 x1_a, y1_a, z1_a, x2_a, y2_a, z2_a, x3_a, y3_a, z3_a, dx1_a,
1533 dy1_a, dz1_a, dx2_a, dy2_a, dz2_a, dx3_a, dy3_a, dz3_a, kappa_a,
1534 dkappa_a, ux_mid_a, uy_mid_a, ux_end_a, uy_end_a, dzdl_1_a,
1535 dzdl_2_a, ddzdl_1_a, ddzdl_2_a, sinang_cut, cosang_diff_inv,
1536 cur_kappa_a, cur_dkappa_a, cur_ux_a, cur_uy_a, cur_chi2_a,
1539 for (
unsigned int h = 0;
h < hit_counter; ++
h) {
1540 if ((chi2_a[
h]) * inv_layer[l] < easy_chi2_cut) {
1541 temp_segment.
chi2 = chi2_a[
h];
1542 temp_segment.
ux = ux_end_a[
h];
1543 temp_segment.
uy = uy_end_a[
h];
1544 temp_segment.
kappa = kappa_a[
h];
1548 temp_segment.
dkappa = dkappa_a[
h];
1549 for (
unsigned int ll = 0; ll < l; ++ll) {
1550 temp_segment.
hits[ll] = (*cur_seg)[whichseg[
h]].hits[ll];
1552 temp_segment.
hits[l] = hit1[
h];
1553 unsigned int outer_layer =
1555 temp_segment.
n_hits = l + 1;
1556 if ((n_layers - (l + 1)) <= allowed_missing) {
1557 complete_segments.push_back(temp_segment);
1559 if ((outer_layer - l) > allowed_missing) {
1562 if (next_seg->size() == nextseg_size) {
1563 next_seg->push_back(temp_segment);
1566 (*next_seg)[nextseg_size] = temp_segment;
1575 if (hit_counter != 0) {
1577 x1_a, y1_a, z1_a, x2_a, y2_a, z2_a, x3_a, y3_a, z3_a, dx1_a, dy1_a,
1578 dz1_a, dx2_a, dy2_a, dz2_a, dx3_a, dy3_a, dz3_a, kappa_a, dkappa_a,
1579 ux_mid_a, uy_mid_a, ux_end_a, uy_end_a, dzdl_1_a, dzdl_2_a, ddzdl_1_a,
1580 ddzdl_2_a, sinang_cut, cosang_diff_inv, cur_kappa_a, cur_dkappa_a,
1581 cur_ux_a, cur_uy_a, cur_chi2_a, chi2_a);
1583 for (
unsigned int h = 0;
h < hit_counter; ++
h) {
1584 if ((chi2_a[
h]) * inv_layer[l] < easy_chi2_cut) {
1585 temp_segment.
chi2 = chi2_a[
h];
1586 temp_segment.
ux = ux_end_a[
h];
1587 temp_segment.
uy = uy_end_a[
h];
1588 temp_segment.
kappa = kappa_a[
h];
1593 temp_segment.
dkappa = dkappa_a[
h];
1594 for (
unsigned int ll = 0; ll < l; ++ll) {
1595 temp_segment.
hits[ll] = (*cur_seg)[whichseg[
h]].hits[ll];
1597 temp_segment.
hits[l] = hit1[
h];
1598 unsigned int outer_layer =
1600 temp_segment.
n_hits = l + 1;
1601 if ((n_layers - (l + 1)) <= allowed_missing) {
1602 complete_segments.push_back(temp_segment);
1604 if ((outer_layer - l) > allowed_missing) {
1607 if (next_seg->size() == nextseg_size) {
1608 next_seg->push_back(temp_segment);
1611 (*next_seg)[nextseg_size] = temp_segment;
1618 swap(cur_seg, next_seg);
1619 swap(curseg_size, nextseg_size);
1622 for (
unsigned int i = 0; i < complete_segments.size(); ++i) {
1623 if (cur_seg->size() == curseg_size) {
1624 cur_seg->push_back(complete_segments[i]);
1627 (*cur_seg)[curseg_size] = complete_segments[i];
1632 gettimeofday(&t2, NULL);
1633 time1 = ((double)(t1.tv_sec) + (double)(t1.tv_usec) / 1000000.);
1634 time2 = ((double)(t2.tv_sec) + (double)(t2.tv_usec) / 1000000.);
1635 CAtime += (time2 - time1);
1638 vector<SimpleHit3D> temp_hits;
1639 for (
unsigned int i = 0, sizei = curseg_size; i < sizei; ++i) {
1642 temp_comb.assign((*cur_seg)[i].n_hits, 0);
1643 for (
unsigned int l = 0; l < (*cur_seg)[i].n_hits; ++l) {
1648 if (it !=
combos.end()) {
1651 if (
combos.size() > 10000) {
1656 for (
unsigned int l = 0; l < (*cur_seg)[i].n_hits; ++l) {
1660 gettimeofday(&t1, NULL);
1668 float init_chi2 =
fitTrack(temp_track);
1673 gettimeofday(&t2, NULL);
1674 time1 = ((double)(t1.tv_sec) + (double)(t1.tv_usec) / 1000000.);
1675 time2 = ((double)(t2.tv_sec) + (double)(t2.tv_usec) / 1000000.);
1681 state.
phi = temp_track.
phi;
1682 if (state.
phi < 0.) {
1685 state.
d = temp_track.
d;
1688 state.
z0 = temp_track.
z0;
1690 state.
C = Matrix<float, 5, 5>::Zero(5, 5);
1691 state.
C(0, 0) = pow(0.01, 2.);
1692 state.
C(1, 1) = pow(0.01, 2.);
1693 state.
C(2, 2) = pow(0.01 * state.
nu, 2.);
1694 state.
C(3, 3) = pow(0.05, 2.);
1695 state.
C(4, 4) = pow(0.05, 2.);
1702 for (
unsigned int h = 0;
h < temp_track.
hits.size(); ++
h) {
1711 gettimeofday(&t2, NULL);
1712 time1 = ((double)(t1.tv_sec) + (double)(t1.tv_usec) / 1000000.);
1713 time2 = ((double)(t2.tv_sec) + (double)(t2.tv_usec) / 1000000.);
1716 if (!(temp_track.
kappa == temp_track.
kappa)) {
1725 if (state.
chi2 / (2. * ((
float)(temp_track.
hits.size())) - 5.) >
chi2_cut) {
1730 if (fabs(temp_track.
d) >
dca_cut) {
1738 tracks.push_back(temp_track);
1742 for (
unsigned int i = 0; i < temp_track.
hits.size(); ++i) {
1743 (*hit_used)[temp_track.
hits[i].get_id()] =
true;
1752 return ((
double)(x > 0.)) - ((double)(x < 0.));
1756 float&
x,
float&
y,
float&
z) {
1763 float hitx = seed.
hits.back().get_x();
1764 float hity = seed.
hits.back().get_y();
1768 float cosphi = cos(phi);
1769 float sinphi = sin(phi);
1773 float kd = (d * k + 1.);
1774 float kcx = kd * cosphi;
1775 float kcy = kd * sinphi;
1776 float kd_inv = 1. / kd;
1777 float R2 = rad_det * rad_det;
1778 float a = 0.5 * (k * R2 + (d * d * k + 2. *
d)) * kd_inv;
1779 float tmp1 = a * kd_inv;
1780 float P2x = kcx *
tmp1;
1781 float P2y = kcy *
tmp1;
1783 float h = sqrt(R2 - a * a);
1785 float ux = -kcy * kd_inv;
1786 float uy = kcx * kd_inv;
1788 float x1 = P2x + ux *
h;
1789 float y1 = P2y + uy *
h;
1790 float x2 = P2x - ux *
h;
1791 float y2 = P2y - uy *
h;
1792 float diff1 = (x1 - hitx) * (x1 - hitx) + (y1 - hity) * (y1 - hity);
1793 float diff2 = (x2 - hitx) * (x2 - hitx) + (y2 - hity) * (y2 - hity);
1795 if (diff1 < diff2) {
1800 x = P2x + signk * ux *
h;
1801 y = P2y + signk * uy *
h;
1803 double sign_dzdl =
sign(dzdl);
1805 double startx = d * cosphi;
1806 double starty = d * sinphi;
1807 double D = sqrt((startx - x) * (startx - x) + (starty - y) * (starty - y));
1809 double v = 0.5 * k *
D;
1812 if (v >= 0.999999) {
1815 double s = 2. * asin(v) /
k;
1818 double dz = sqrt(s * s * dzdl * dzdl / (1. - dzdl * dzdl));
1819 z = z0 + sign_dzdl *
dz;
1822 double temp1 = k * D * 0.5;
1824 double temp2 = D * 0.5;
1829 s += (3. / 20.) *
temp2;
1831 s += (5. / 56.) *
temp2;
1833 double dz = sqrt(s * s * dzdl * dzdl / (1. - dzdl * dzdl));
1834 z = z0 + sign_dzdl *
dz;
1839 unsigned int min_hits,
1850 vector<SimpleHit3D>&
hits,
unsigned int ,
unsigned int ,
1851 vector<SimpleTrack3D>& ) {
1853 unsigned int pos = 0;
1854 while (pos < hits.size()) {
1855 for (
unsigned int i = 0; i <
nthreads; ++i) {
1856 if (pos >= hits.size()) {
1859 for (
unsigned int j = 0; j < hits_per_thread; ++j) {
1860 if (pos >= hits.size()) {
1868 for (
unsigned int i = 0; i <
nthreads; ++i) {
1878 for (
unsigned int b = 0;
b < nbins; ++
b) {
1881 for (
unsigned int i = 0; i <
nthreads; ++i) {
1892 unsigned int min_hits,
1893 unsigned int max_hits,
1894 vector<SimpleTrack3D>& tracks) {
1898 for (
unsigned int i = 0; i <
nthreads; ++i) {
1911 for (
unsigned int i = 0; i <
nthreads; ++i) {
1920 for (
unsigned int i = 0; i <
nthreads; ++i) {
1929 for (
unsigned int i = 0; i <
nthreads; ++i) {
1939 vector<SimpleTrack3D> temp_tracks;
1940 for (
unsigned int i = 0; i <
nthreads; ++i) {
1941 vector<HelixKalmanState>* states = &(
thread_trackers[i]->getKalmanStates());
1942 for (
unsigned int j = 0; j <
thread_tracks[i].size(); ++j) {
1951 unsigned long int w = (*((
unsigned long int*)arg));
1958 unsigned long int w = (*((
unsigned long int*)arg));